+ ./ya make . -T --test-size=small --test-size=medium --stat --test-threads 52 --link-threads 12 -DUSE_EAT_MY_DATA --build release --sanitize=address -DDEBUGINFO_LINES_ONLY --bazel-remote-store --bazel-remote-base-uri http://cachesrv.internal:8081 --bazel-remote-username cache_user --bazel-remote-password-file /tmp/tmp.hS2x1v7dRv --bazel-remote-put --dist-cache-max-file-size=209715200 -A --retest --stat -DCONSISTENT_DEBUG --no-dir-outputs --test-failure-code 0 --build-all --cache-size 2TB --force-build-depends --log-file /home/runner/actions_runner/_work/ydb/ydb/tmp/results/ya_log.txt --evlog-file /home/runner/actions_runner/_work/ydb/ydb/tmp/results/try_1/ya_evlog.jsonl --junit /home/runner/actions_runner/_work/ydb/ydb/tmp/results/try_1/junit.xml --build-results-report /home/runner/actions_runner/_work/ydb/ydb/tmp/results/try_1/report.json --output /home/runner/actions_runner/_work/ydb/ydb/tmp/out Output root is subdirectory of Arcadia root, this may cause non-idempotent build Configuring dependencies for platform default-linux-x86_64-release-asan Configuring dependencies for platform tools [2 ymakes processing] [8778/8778 modules configured] [1119/5169 modules rendered] [2 ymakes processing] [8778/8778 modules configured] [5042/5169 modules rendered] [2 ymakes processing] [8778/8778 modules configured] [5169/5169 modules rendered] Configuring dependencies for platform test_tool_tc1-global [0 ymakes processing] [8784/8784 modules configured] [5169/5169 modules rendered] Configuring tests execution Configuring local and dist store caches Configuration done. Preparing for execution |33.3%| CLEANING SYMRES | 0.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/abstract/libengines-reader-abstract.a | 2.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/log/tests/ydb-tests-stress-log-tests | 1.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/loading/libcolumnshard-engines-loading.a | 4.0%| PREPARE $(VCS) | 5.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/portions/libcolumnshard-engines-portions.a | 2.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/libreader-common_reader-iterator.global.a | 4.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/grpc_services/libydb-core-grpc_services.a | 4.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/actor/libengines-reader-actor.a | 4.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/common/libengines-reader-common.a | 4.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/abstract/read_metadata.cpp | 4.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tablet_flat/libydb-core-tablet_flat.a | 5.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/libreader-plain_reader-constructor.a | 4.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/predicate/libcolumnshard-engines-predicate.a | 4.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/graph/shard/libcore-graph-shard.a | 4.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/olap/helpers/libut-olap-helpers.a | 4.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/fqrun/src/libtools-fqrun-src.a | 4.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/actor/libcore-ymq-actor.a | 4.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/constructor/libreader-common_reader-constructor.a | 4.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/libreader-common_reader-iterator.a | 4.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/libreader-sys_view-abstract.a | 5.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/libsimple_reader-iterator-collections.a | 5.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/libreader-plain_reader-constructor.global.a | 5.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/libreader-plain_reader-iterator.a | 5.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sync_points/libsimple_reader-iterator-sync_points.a | 6.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/chunks/libreader-sys_view-chunks.global.a | 6.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/api/protos/libapi-protos.a | 6.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/libreader-simple_reader-constructor.a | 6.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/scheme/libcolumnshard-engines-scheme.a | 6.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/granules/libreader-sys_view-granules.global.a | 6.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/scheme_cache/libcore-tx-scheme_cache.a | 6.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/libreader-simple_reader-iterator.a | 6.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/ds_table/libservices-metadata-ds_table.a | 6.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/scheme/tiering/libengines-scheme-tiering.a | 6.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/kesus/libydb-services-kesus.a | 6.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/abstract/libservices-metadata-abstract.a | 7.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/libstorage-indexes-bloom_ngramm.global.a | 7.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/portions/libstorage-indexes-portions.a | 7.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/liboptimizer-lcbuckets-planner.a | 7.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/export/session/libcolumnshard-export-session.a | 7.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tracing/libydb-core-tracing.a | 7.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/libydb-core-tx.a | 8.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/constructor/libreader-sys_view-constructor.a | 8.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/libstorage-indexes-bloom_ngramm.a | 8.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/writer/buffer/libengines-writer-buffer.a | 8.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/scheme/abstract/libengines-scheme-abstract.a | 8.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/lib/auth/libservices-lib-auth.a | 8.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/deprecated/persqueue_v0/libservices-deprecated-persqueue_v0.a | 8.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/liboptimizer-lcbuckets-constructor.global.a | 8.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/libreader-simple_reader-constructor.global.a | 8.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/constructor/liboptimizer-lbuckets-constructor.global.a | 8.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/chunks/libengines-storage-chunks.a | 9.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/libscheme-indexes-abstract.a | 9.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/scheme_board/libcore-tx-scheme_board.a | 9.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/abstract/libstorage-actualizer-abstract.a | 9.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/http/libcore-ymq-http.a | 9.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/export/actor/libcolumnshard-export-actor.a | 9.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/ext_index/common/libservices-ext_index-common.a | 9.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/base/libcore-ymq-base.a |10.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/liboptimizer-lcbuckets-planner.global.a |10.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/datastreams/libydb-services-datastreams.a |10.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tools/cfg/k8s_api/libpy3tools-cfg-k8s_api.global.a |10.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/protos/libpy3library-actors-protos.global.a |10.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/folder_service/proto/liblibrary-folder_service-proto.a |10.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/protos/libpy3library-login-protos.global.a |10.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/protos/liblibrary-login-protos.a |11.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/defaults/protos/libscheme-defaults-protos.a |11.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/services/libydb-library-services.a |11.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tools/cfg/libpy3ydb-tools-cfg.global.a |12.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/protos/libpy3dq-actors-protos.global.a |12.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/proto/libpy3yql-dq-proto.global.a |12.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/api/service/protos/libpy3api-service-protos.global.a |12.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/proto/libpy3providers-common-proto.global.a |12.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tools/cfg/walle/libpy3tools-cfg-walle.global.a |12.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/ext_index/service/libservices-ext_index-service.a |13.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/proto/libcore-file_storage-proto.a |14.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_proxy_schemereq.cpp |14.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/metadata/abstract/request_features.cpp |14.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/scheme_cache/scheme_cache.h_serialized.cpp |14.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_stat_table_btree_index_histogram.cpp |14.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tools/ydbd_slice/libpy3ydbd_slice.global.a |14.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/http/xml_builder.cpp |14.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/base/events_writer.cpp |14.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/base/run_query.cpp |14.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/proto/libyql-dq-proto.a |15.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_table.cpp |15.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/datastreams/shard_iterator.cpp |15.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/datastreams/next_token.cpp |15.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/protos/libdq-actors-protos.a |15.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cxxsupp/libcxx/liblibs-cxxsupp-libcxx.a |15.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/base/probes.cpp |16.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/base/queue_attributes.cpp |16.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/base/helpers.cpp |16.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/base/dlq_helpers.cpp |16.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/base/query_id.h_serialized.cpp |16.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/proto/libproviders-common-proto.a |16.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/base/queue_id.cpp |16.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/base/secure_protobuf_printer.cpp |17.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/metadata/abstract/events.cpp |18.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/scheme_board/subscriber.h_serialized.cpp |18.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/scheme_board/two_part_description.cpp |18.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/scheme_board/replica.cpp |19.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/base/cloud_enums.h_serialized.cpp |21.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/tier_info.cpp |20.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/schema_diff.cpp |29.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/chunks/data.cpp |28.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/scheme/tiering/tier_info.cpp |28.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/abstract/abstract.cpp |30.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/abstract/read_context.cpp |32.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/health_check/ut/ydb-core-health_check-ut |31.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/lib/auth/auth_helpers.cpp |32.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/abstract/context.cpp |33.2%| PREPARE $(LLD_ROOT-3808007503) |33.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/constructor/resolver.cpp |35.2%| PREPARE $(YMAKE_PYTHON3-4256832079) |35.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/abstract/read_metadata.h_serialized.cpp |35.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/predicate/range.cpp |37.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_cache/scheme_cache.cpp |37.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/export/session/session.h_serialized.cpp |37.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx.cpp |38.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/tests/unit/client/draft/ydb-public-sdk-cpp-tests-unit-client-draft |40.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/clickhouse/tests-datasource-clickhouse |41.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/scheme/snapshot_scheme.cpp |42.5%| PREPARE $(PYTHON) |42.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/source.cpp |42.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/datastreams/grpc_service.cpp |43.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/writer/buffer/events.cpp |44.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/const.cpp |44.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/columns_set.h_serialized.cpp |45.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/policy.cpp |45.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/abstract/common.cpp |46.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/one_layer.cpp |45.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/subscriber.cpp |46.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/ds_table/registration.cpp |46.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/columns_set.cpp |47.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/abstract/abstract.cpp |47.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/iterator.cpp |47.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/mysql/connector-tests-datasource-mysql |48.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/scanner.cpp |48.2%| [CF] {default-linux-x86_64, release, asan} $(B)/library/cpp/build_info/build_info.cpp |48.2%| [CF] {default-linux-x86_64, release, asan} $(B)/library/cpp/build_info/sandbox.cpp |48.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/chunks/chunks.cpp |48.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/tiering/common.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/abstract/column_ids.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/base/action.cpp |48.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/metadata/abstract/parsing.cpp |48.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/services/metadata/abstract/kqp_common.h_serialized.cpp |48.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/metadata/abstract/decoder.cpp |48.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tracing/http.cpp |48.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/metadata/ds_table/config.cpp |48.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/export/session/cursor.cpp |48.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/message_seqno.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/header.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tracing/trace_collection.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/base/acl.cpp |48.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_write_actor.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tracing/trace.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/counters.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/abstract/schema_version.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/checker.cpp |48.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/scheme/objects_cache.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/common.h_serialized.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/abstract.cpp |48.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/deprecated/persqueue_v0/persqueue.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/common.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ext_index/common/service.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/http/parser.rl6.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/scheme_board/events.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/scheme_board/helpers.cpp |48.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/ds_table/accessor_snapshot_simple.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/http/xml.cpp |48.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/scheme_board/opaque_path_description.cpp |48.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ext_index/common/events.cpp |48.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_allocator/libcore-tx-tx_allocator.a |48.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/export/events/libcolumnshard-export-events.a |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/http/types.cpp |48.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/cancellation/libcpp-threading-cancellation.a |48.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/issue/protos/libpublic-issue-protos.a |48.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/datastreams/datastreams_proxy.cpp |48.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/tracing/usage/libtx-tracing-usage.a |48.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/colorizer/liblibrary-cpp-colorizer.a |48.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/comptable/liblibrary-cpp-comptable.a |48.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/config/liblibrary-cpp-config.a |48.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/constructor/constructor.cpp |48.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/jaraco.functools/py3/libpy3python-jaraco.functools-py3.global.a |48.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/jaraco.text/libpy3contrib-python-jaraco.text.global.a |48.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/jaraco.collections/libpy3contrib-python-jaraco.collections.global.a |48.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/bitseq/libcpp-containers-bitseq.a |48.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/2d_array/libcpp-containers-2d_array.a |48.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/test_helper/libtx-columnshard-test_helper.a |48.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/jsonschema/py3/libpy3python-jsonschema-py3.global.a |48.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/compproto/liblibrary-cpp-compproto.a |48.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v0/lexer/libsql-v0-lexer.a |48.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/jedi/py3/libpy3python-jedi-py3.global.a |48.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/atomizer/libcpp-containers-atomizer.a |48.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_proxy/libcore-tx-tx_proxy.a |48.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/matplotlib-inline/libpy3contrib-python-matplotlib-inline.global.a |48.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lwtrace/mon/libcpp-lwtrace-mon.a |48.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/ipython/py3/libpy3python-ipython-py3.global.a |48.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cxxsupp/libcxxrt/liblibs-cxxsupp-libcxxrt.a |48.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/interval.cpp |48.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/test_helper/program_constructor.cpp |48.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_allocator_client/libcore-tx-tx_allocator_client.a |48.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/zero_level.cpp |48.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/abstract/constructor.cpp |48.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/multidict/libpy3contrib-python-multidict.global.a |48.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/test_helper/kernels_wrapper.cpp |48.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/intrusive_avl_tree/libcpp-containers-intrusive_avl_tree.a |48.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/postgresql/ydb-tests-functional-postgresql |48.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/compact_vector/libcpp-containers-compact_vector.a |48.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/comptrie/libcpp-containers-comptrie.a |48.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pexpect/py3/libpy3python-pexpect-py3.global.a |48.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/parso/py3/libpy3python-parso-py3.global.a |48.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/packaging/py3/libpy3python-packaging-py3.global.a |48.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/oauthlib/libpy3contrib-python-oauthlib.global.a |48.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/disjoint_interval_tree/libcpp-containers-disjoint_interval_tree.a |49.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/paged_vector/libcpp-containers-paged_vector.a |49.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/sorted_vector/libcpp-containers-sorted_vector.a |49.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/intrusive_rb_tree/libcpp-containers-intrusive_rb_tree.a |48.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/kesus/grpc_service.cpp |48.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/ring_buffer/libcpp-containers-ring_buffer.a |48.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/util/evlog/libcore-util-evlog.a |48.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/stack_vector/libcpp-containers-stack_vector.a |48.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/stack_array/libcpp-containers-stack_array.a |48.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_write.cpp |48.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/deprecated/enum_codegen/libcpp-deprecated-enum_codegen.a |48.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/str_map/libcpp-containers-str_map.a |48.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/kubernetes/libpy3contrib-python-kubernetes.global.a |48.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/coroutine/engine/libcpp-coroutine-engine.a |48.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/viewer/libydb-core-viewer.a |49.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/dbg_output/liblibrary-cpp-dbg_output.a |49.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/coroutine/listener/libcpp-coroutine-listener.a |49.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/deprecated/accessors/libcpp-deprecated-accessors.a |49.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/secret/accessor/libmetadata-secret-accessor.a |49.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/viewer/json/libcore-viewer-json.a |49.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/deprecated/split/libcpp-deprecated-split.a |49.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/provider/libproviders-common-provider.a |49.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/constructor.cpp |49.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/deprecated/kmp/libcpp-deprecated-kmp.a |49.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/scanner.cpp |49.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/argonish/libcpp-digest-argonish.a |49.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/diff/liblibrary-cpp-diff.a |49.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/merge.cpp |49.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/schema/expr/libcommon-schema-expr.a |49.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pluggy/py3/libpy3python-pluggy-py3.global.a |49.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/platformdirs/libpy3contrib-python-platformdirs.global.a |49.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/granules/granules.cpp |49.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/protobuf/py3/libpy3python-protobuf-py3.global.a |49.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/argonish/internal/proxies/avx2/libinternal-proxies-avx2.a |49.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/argonish/internal/proxies/ssse3/libinternal-proxies-ssse3.a |49.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/argonish/internal/proxies/ref/libinternal-proxies-ref.a |49.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/common/re2_udf.cpp |49.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/protobuf/py3/libpy3python-protobuf-py3.a |49.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/prompt-toolkit/py3/libpy3python-prompt-toolkit-py3.global.a |49.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/common/math_udf.cpp |49.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/argonish/internal/proxies/sse41/libinternal-proxies-sse41.a |49.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/datastreams/put_records_actor.cpp |49.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/murmur/libcpp-digest-murmur.a |49.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/viewer/yaml/libcore-viewer-yaml.a |49.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/lower_case/libcpp-digest-lower_case.a |49.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/common/json2_udf.cpp |49.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/crc32c/libcpp-digest-crc32c.a |49.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/common/libkqp-ut-common.a |49.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/md5/libcpp-digest-md5.a |49.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/dot_product/liblibrary-cpp-dot_product.a |49.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/old_crc/libcpp-digest-old_crc.a |49.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/viewer/libydb-core-viewer.global.a |49.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/disjoint_sets/liblibrary-cpp-disjoint_sets.a |49.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/execprofile/liblibrary-cpp-execprofile.a |49.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/dwarf_backtrace/liblibrary-cpp-dwarf_backtrace.a |49.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/dns/liblibrary-cpp-dns.a |49.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/enumbitset/liblibrary-cpp-enumbitset.a |49.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/constructor.cpp |49.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/html/escape/libcpp-html-escape.a |49.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/wrappers/events/libcore-wrappers-events.a |49.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/getopt/liblibrary-cpp-getopt.global.a |49.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/ptyprocess/py3/libpy3python-ptyprocess-py3.global.a |49.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pure-eval/libpy3contrib-python-pure-eval.global.a |49.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/wrappers/libydb-core-wrappers.a |49.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pyrsistent/py3/libpy3python-pyrsistent-py3.global.a |49.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pyasn1/py3/libpy3python-pyasn1-py3.global.a |49.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/histogram/hdr/libcpp-histogram-hdr.a |49.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/plain_read_data.cpp |49.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/py/py3/libpy3python-py-py3.global.a |49.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/common/string_udf.cpp |49.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pycparser/py3/libpy3python-pycparser-py3.global.a |49.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/read_metadata.cpp |49.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/scheme/abstract/index_info.cpp |49.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/backup/ydb-tests-functional-backup |49.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/requests/py3/libpy3python-requests-py3.global.a |49.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/common/datetime2_udf.cpp |49.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/transform/libproviders-common-transform.a |49.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/viewer/protos/libcore-viewer-protos.a |49.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/requests-oauthlib/libpy3contrib-python-requests-oauthlib.global.a |49.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pytest/py3/libpy3python-pytest-py3.global.a |49.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/constructor/constructor.cpp |49.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/html/pcdata/libcpp-html-pcdata.a |49.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/ruamel.yaml.clib/py3/libpy3python-ruamel.yaml.clib-py3.global.a |49.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/python-dateutil/py3/libpy3python-python-dateutil-py3.global.a |49.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/http/misc/libcpp-http-misc.a |49.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/ipmath/liblibrary-cpp-ipmath.a |49.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/http/io/libcpp-http-io.a |49.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/http/simple/libcpp-http-simple.a |49.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/common/unicode_udf.cpp |49.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/MarkupSafe/py3/libpy3python-MarkupSafe-py3.global.a |49.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/http/server/libcpp-http-server.a |49.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/int128/liblibrary-cpp-int128.a |49.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/tenacity/py3/libpy3python-tenacity-py3.global.a |49.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sync_points/result.cpp |49.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/http/fetch/libcpp-http-fetch.a |49.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/iterator/liblibrary-cpp-iterator.a |49.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/json/fast_sax/libcpp-json-fast_sax.a |49.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/json/easy_parse/libcpp-json-easy_parse.a |49.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/export/actor/export_actor.cpp |49.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/json/common/libcpp-json-common.a |49.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/ipv6_address/liblibrary-cpp-ipv6_address.a |49.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/constructor/read_metadata.cpp |49.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/json/writer/libcpp-json-writer.a |49.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/typeguard/libpy3contrib-python-typeguard.global.a |49.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/setuptools/py3/libpy3python-setuptools-py3.global.a |49.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/typing-extensions/py3/libpy3python-typing-extensions-py3.global.a |49.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/api/grpc/libapi-grpc.a |49.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/traitlets/py3/libpy3python-traitlets-py3.global.a |49.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/actor/sha256.cpp |49.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/json/yson/libcpp-json-yson.a |49.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/l2_distance/liblibrary-cpp-l2_distance.a |49.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/queues/common/libymq-queues-common.a |49.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/metadata.cpp |49.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lfalloc/alloc_profiler/libcpp-lfalloc-alloc_profiler.a |49.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sync_points/abstract.cpp |49.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lcs/liblibrary-cpp-lcs.a |49.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/urllib3/py3/libpy3python-urllib3-py3.global.a |49.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/websocket-client/libpy3contrib-python-websocket-client.global.a |49.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lfalloc/dbg_info/libcpp-lfalloc-dbg_info.a |49.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lua/liblibrary-cpp-lua.a |49.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/logger/global/libcpp-logger-global.a |49.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/yarl/libpy3contrib-python-yarl.a |49.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/yarl/libpy3contrib-python-yarl.global.a |49.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/wheel/libpy3contrib-python-wheel.global.a |49.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp-tstring/y_absl/container/libabseil-cpp-tstring-y_absl-container.a |49.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lwtrace/mon/analytics/liblwtrace-mon-analytics.a |49.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/logger/liblibrary-cpp-logger.global.a |49.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/auth/libcore-sys_view-auth.a |49.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp-tstring/y_absl/base/libabseil-cpp-tstring-y_absl-base.a |49.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/logger/liblibrary-cpp-logger.a |49.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/pq_read/test/ydb-tests-tools-pq_read-test |49.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/fetching.cpp |50.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/ydb/py3/libpy3python-ydb-py3.global.a |50.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lwtrace/mon/libcpp-lwtrace-mon.global.a |50.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp-tstring/y_absl/hash/libabseil-cpp-tstring-y_absl-hash.a |50.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lwtrace/protos/libcpp-lwtrace-protos.a |50.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/libabseil-cpp-tstring-y_absl-numeric.a |50.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/libabseil-cpp-tstring-y_absl-debugging.a |50.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp-tstring/y_absl/profiling/libabseil-cpp-tstring-y_absl-profiling.a |50.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp-tstring/y_absl/random/libabseil-cpp-tstring-y_absl-random.a |50.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sync_points/limit.cpp |50.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/actor/libmessagebus_actor.a |50.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp-tstring/y_absl/flags/libabseil-cpp-tstring-y_absl-flags.a |50.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/proto/libcore-ymq-proto.a |50.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/config/libcpp-messagebus-config.a |50.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lwtrace/liblibrary-cpp-lwtrace.a |50.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/queues/fifo/libymq-queues-fifo.a |50.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/libabseil-cpp-tstring-y_absl-synchronization.a |50.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/monitoring/libcpp-messagebus-monitoring.a |50.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/actor/user_settings_names.cpp |50.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/protobuf/libmessagebus_protobuf.a |50.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/json/liblibrary-cpp-json.a |50.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/oldmodule/libcpp-messagebus-oldmodule.a |50.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/queues/std/libymq-queues-std.a |50.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/accessor/libydb-library-accessor.a |50.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/scheduler/libcpp-messagebus-scheduler.a |50.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tiering/libcore-tx-tiering.a |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/deprecated/json/libmonlib-deprecated-json.a |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/aclib/libydb-library-aclib.a |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/buffered/libmonlib-encode-buffered.a |50.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/granule_view.cpp |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/www/libcpp-messagebus-www.a |49.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/libpy3ydb-tests-library.global.a |49.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/dynamic_counters/libcpp-monlib-dynamic_counters.a |50.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/legacy_protobuf/protos/libencode-legacy_protobuf-protos.a |50.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/api/grpc/libpy3api-grpc.global.a |50.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/json/libmonlib-encode-json.a |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/libcpp-monlib-encode.a |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/prometheus/libmonlib-encode-prometheus.a |50.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/scheme/abstract_scheme.cpp |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/messagebus/libcpp-monlib-messagebus.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/liblibrary-cpp-messagebus.a |50.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/optimizer.cpp |50.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/scheme/column_features.cpp |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/actor_type/liblibrary-actors-actor_type.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/text/libmonlib-encode-text.a |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/exception/libcpp-monlib-exception.a |50.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/oltp_workload/tests/ydb-tests-stress-oltp_workload-tests |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/spack/libmonlib-encode-spack.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/aclib/protos/liblibrary-aclib-protos.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/service/libcpp-monlib-service.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/service/pages/resources/libservice-pages-resources.global.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/core/harmonizer/libactors-core-harmonizer.a |50.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/api/grpc/a8f74ccfefd66bc6dc846adade_raw.auxcpp |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/proto_parser/antlr3/libv1-proto_parser-antlr3.a |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/service/pages/tablesorter/libservice-pages-tablesorter.global.a |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/www/libcpp-messagebus-www.global.a |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/service/pages/libmonlib-service-pages.a |50.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/72c34d62a8e49d016b67b1434e_raw.auxcpp |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/metrics/libcpp-monlib-metrics.a |50.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/c57e7c296b4b94e00564af4633_raw.auxcpp |50.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/c7946d54e0949cdad526004c95_raw.auxcpp |50.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/api/grpc/140c88ca3e90f1923d2b7a0c94_raw.auxcpp |50.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/cache.cpp |50.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/44177782c00f54e10c2b7a580b_raw.auxcpp |50.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/9d521aa1b041663163790033ad_raw.auxcpp |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/openssl/big_integer/libcpp-openssl-big_integer.a |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/client/arrow/fbs/libclient-arrow-fbs.a |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/mime/types/libcpp-mime-types.a |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/openssl/crypto/libcpp-openssl-crypto.a |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/dnscachelib/liblibrary-actors-dnscachelib.a |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/openssl/init/libcpp-openssl-init.a |50.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/fetching.cpp |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp-tstring/y_absl/types/libabseil-cpp-tstring-y_absl-types.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/openssl/holders/libcpp-openssl-holders.a |50.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/constructor.cpp |50.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tracing/tablet_info.cpp |50.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/source.cpp |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/portions/extractor/libindexes-portions-extractor.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/qplayer/udf_resolver/libcore-qplayer-udf_resolver.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/openssl/io/libcpp-openssl-io.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/openssl/method/libcpp-openssl-method.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/dnsresolver/liblibrary-actors-dnsresolver.a |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp-tstring/y_absl/strings/libabseil-cpp-tstring-y_absl-strings.a |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/helpers/liblibrary-actors-helpers.a |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/packers/liblibrary-cpp-packers.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/packedtypes/liblibrary-cpp-packedtypes.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp-tstring/y_absl/time/libabseil-cpp-tstring-y_absl-time.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/types/libpy3essentials-public-types.global.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp/absl/hash/libabseil-cpp-absl-hash.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp/absl/flags/libabseil-cpp-absl-flags.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/oss/canonical/libpy3tests-oss-canonical.global.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/protobuf/json/proto/libprotobuf-json-proto.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/protobuf/interop/libcpp-protobuf-interop.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/libffi/libcontrib-restricted-libffi.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp/absl/synchronization/libabseil-cpp-absl-synchronization.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/regex/hyperscan/libcpp-regex-hyperscan.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/protobuf/util/libcpp-protobuf-util.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/protobuf/util/proto/libprotobuf-util-proto.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp/absl/time/libabseil-cpp-absl-time.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/regex/pcre/libcpp-regex-pcre.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/llhttp/libcontrib-restricted-llhttp.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/random_provider/liblibrary-cpp-random_provider.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/tools/python3/Modules/_sqlite/libpy3python3-Modules-_sqlite.global.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/tools/python3/Modules/_sqlite/libpy3python3-Modules-_sqlite.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/memory_log/liblibrary-actors-memory_log.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/prof/liblibrary-actors-prof.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/retry/liblibrary-cpp-retry.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/cpuid_check/liblibrary-cpp-cpuid_check.global.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/retry/protos/libcpp-retry-protos.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/absl_flat_hash/libcpp-containers-absl_flat_hash.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/interconnect/mock/libactors-interconnect-mock.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/malloc/system/libsystem_allocator.a |50.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/collection.cpp |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/core/liblibrary-actors-core.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/sighandler/liblibrary-cpp-sighandler.a |50.9%| {BAZEL_DOWNLOAD} $(B)/library/cpp/sanitizer/plugin/sanitizer.py.pyplugin |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/certifi/libpy3library-python-certifi.global.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/http/liblibrary-actors-http.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/func/libpy3library-python-func.global.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/cores/libpy3library-python-cores.global.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/fs/libpy3library-python-fs.global.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/filelock/libpy3library-python-filelock.global.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/find_root/libpy3library-python-find_root.global.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/import_tracing/constructor/libpy3python-import_tracing-constructor.global.a |50.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/scheme/filtered_scheme.cpp |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/import_tracing/lib/libpy3python-import_tracing-lib.global.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/scheme/liblibrary-cpp-scheme.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/pytest/libpy3library-python-pytest.global.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/log_backend/liblibrary-actors-log_backend.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/sliding_window/liblibrary-cpp-sliding_window.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/pytest/plugins/libpy3python-pytest-plugins.global.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/sse/liblibrary-cpp-sse.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/runtime_py3/libpy3library-python-runtime_py3.global.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/skiff/liblibrary-cpp-skiff.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/bzip2/libcpp-streams-bzip2.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/strings/libpy3library-python-strings.global.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/symbols/libc/libpython-symbols-libc.global.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/svn_version/libpy3library-python-svn_version.global.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/symbols/python/libpy3cpython-symbols-python.global.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/symbols/module/libpy3python-symbols-module.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/windows/libpy3library-python-windows.global.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/strings/libpy3library-python-strings.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/libpy3libs-config-protos.global.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/schemeshard/libcore-protos-schemeshard.a |51.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/iterator.cpp |50.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/context.cpp |50.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/filler.cpp |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/testing/yatest_common/libpy3python-testing-yatest_common.global.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/util/charset/libutil-charset.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/common/protos/libcolumnshard-common-protos.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/testing/filter/libpy3python-testing-filter.global.a |50.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/portions/base_with_blobs.cpp |50.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/portions/constructor.cpp |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/testlib/common/libactors-testlib-common.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bits_storage/libstorage-indexes-bits_storage.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/lzma/libcpp-streams-lzma.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/zstd/libcpp-streams-zstd.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/brotli/libcpp-streams-brotli.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/zc_memory_input/libcpp-streams-zc_memory_input.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/base64/libcpp-string_utils-base64.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/csv/libcpp-string_utils-csv.a |50.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/constructor.cpp |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/util/liblibrary-actors-util.a |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/interconnect/liblibrary-actors-interconnect.a |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_public/iam/libpy3client-yc_public-iam.global.a |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/testlib/liblibrary-actors-testlib.a |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/config/protos/libcore-config-protos.a |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_public/common/libpy3client-yc_public-common.global.a |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/wilson/liblibrary-actors-wilson.a |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/liblibs-config-protos.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/arrow_clickhouse/Common/liblibrary-arrow_clickhouse-Common.a |49.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/parse_size/libcpp-string_utils-parse_size.a |49.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/arrow_clickhouse/Columns/liblibrary-arrow_clickhouse-Columns.a |49.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/schemeshard/libpy3core-protos-schemeshard.global.a |50.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/antlr3_cpp_runtime/libcontrib-libs-antlr3_cpp_runtime.a |50.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/quote/libcpp-string_utils-quote.a |50.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme/protos/libcore-scheme-protos.a |50.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/libpy3ydb-core-protos.global.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/scan/libcpp-string_utils-scan.a |50.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/relaxed_escaper/libcpp-string_utils-relaxed_escaper.a |50.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/arrow_clickhouse/DataStreams/liblibrary-arrow_clickhouse-DataStreams.a |50.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/ztstrbuf/libcpp-string_utils-ztstrbuf.a |50.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/terminate_handler/liblibrary-cpp-terminate_handler.a |50.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/meta.cpp |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/url/libcpp-string_utils-url.a |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/apache/orc-format/liblibs-apache-orc-format.a |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/tdigest/liblibrary-cpp-tdigest.a |50.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/terminate_handler/liblibrary-cpp-terminate_handler.global.a |50.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/common/libcpp-testing-common.a |50.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/ds_table/table_exists.cpp |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/gmock_in_unittest/libcpp-testing-gmock_in_unittest.global.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/gmock_in_unittest/libcpp-testing-gmock_in_unittest.a |50.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/log_backend/ut/ydb-core-log_backend-ut |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/chunks_limiter/libydb-library-chunks_limiter.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/arrow_parquet/libydb-library-arrow_parquet.a |49.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/arrow_kernels/libydb-library-arrow_kernels.a |49.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/fetch_steps.cpp |49.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/antlr4_cpp_runtime/libcontrib-libs-antlr4_cpp_runtime.a |49.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/gtest_extensions/libcpp-testing-gtest_extensions.a |49.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/levenshtein_diff/libcpp-string_utils-levenshtein_diff.a |50.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/base/counters.cpp |50.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/hook/libcpp-testing-hook.a |50.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/folder_service/libydb-library-folder_service.a |50.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/lib/libpy3tests-olap-lib.global.a |50.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/source.cpp |50.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/constructor.cpp |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/apache/avro/liblibs-apache-avro.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/db_pool/protos/liblibrary-db_pool-protos.a |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/conclusion/libydb-library-conclusion.a |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/atomic/libcpp-threading-atomic.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/indent_text/libcpp-string_utils-indent_text.a |50.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/libstorage-indexes-categories_bloom.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/unittest_main/libcpp-testing-unittest_main.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/equeue/libcpp-threading-equeue.a |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/blocking_queue/libcpp-threading-blocking_queue.a |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/cron/libcpp-threading-cron.a |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/tools/python3/libcontrib-tools-python3.a |50.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/portions/meta.cpp |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/future/libcpp-threading-future.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/arrow_clickhouse/libydb-library-arrow_clickhouse.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/db_pool/libydb-library-db_pool.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/libpy3api-grpc-draft.global.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/unittest/libcpp-testing-unittest.a |50.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/olap_workload/tests/ydb-tests-stress-olap_workload-tests |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/hash/liblibrary-formats-arrow-hash.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/folder_service/mock/liblibrary-folder_service-mock.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/skip_list/libcpp-threading-skip_list.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/hot_swap/libcpp-threading-hot_swap.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/poor_man_openmp/libcpp-threading-poor_man_openmp.a |50.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/chunks/column.cpp |50.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/transactions/locks/libcolumnshard-transactions-locks.global.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/task_scheduler/libcpp-threading-task_scheduler.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/queue/libcpp-threading-queue.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/csv/converter/libarrow-csv-converter.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/time_provider/liblibrary-cpp-time_provider.a |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/thread_local/libcpp-threading-thread_local.a |51.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_read_actor.cpp |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/modifier/liblibrary-formats-arrow-modifier.a |51.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/metrics/libproviders-common-metrics.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/simple_builder/liblibrary-formats-arrow-simple_builder.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/unified_agent_client/liblibrary-cpp-unified_agent_client.global.a |51.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/export/events/events.cpp |51.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/writer/buffer/actor2.cpp |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/unified_agent_client/liblibrary-cpp-unified_agent_client.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/scalar/liblibrary-formats-arrow-scalar.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/type_info/liblibrary-cpp-type_info.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/liblibrary-formats-arrow.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/xml/document/libcpp-xml-document.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/validation/liblibrary-formats-arrow-validation.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/unified_agent_client/proto/libcpp-unified_agent_client-proto.a |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/grpc/server/actors/libgrpc-server-actors.a |51.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/secret/accessor/secret_id.cpp |51.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/writer/libcolumnshard-engines-writer.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/switch/liblibrary-formats-arrow-switch.a |51.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/fyamlcpp/libydb-library-fyamlcpp.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/xml/init/libcpp-xml-init.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/backtrace/libcpp-yt-backtrace.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/cpu_clock/libcpp-yt-cpu_clock.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/global_plugins/libydb-library-global_plugins.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yson/json/libcpp-yson-json.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/transformer/liblibrary-formats-arrow-transformer.a |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/splitter/liblibrary-formats-arrow-splitter.a |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/backtrace/cursors/libunwind/libbacktrace-cursors-libunwind.a |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/malloc/libcpp-yt-malloc.a |51.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/manager/libservices-metadata-manager.a |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/logging/libcpp-yt-logging.a |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/exception/libcpp-yt-exception.a |51.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/assert/libcpp-yt-assert.a |51.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/global/libcpp-yt-global.a |51.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/logging/plain_text_formatter/libyt-logging-plain_text_formatter.a |51.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/misc/libcpp-yt-misc.a |51.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yson/node/libcpp-yson-node.a |51.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/libydb-core-protos.a |51.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yson/liblibrary-cpp-yson.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/http_proxy/error/liblibrary-http_proxy-error.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/splitter/abstract/libcolumnshard-splitter-abstract.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/error/libcpp-yt-error.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/memory/libcpp-yt-memory.a |51.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/scheme/index_info.cpp |51.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/iterator.cpp |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/writer/put_status.cpp |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/http_proxy/authorization/liblibrary-http_proxy-authorization.a |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/metadata/manager/modification_controller.cpp |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/grpc/server/liblibrary-grpc-server.a |51.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/metadata/manager/preparation_controller.cpp |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/writer/blob_constructor.cpp |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/metadata/manager/restore_controller.cpp |51.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/threading/libcpp-yt-threading.a |51.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/system/libcpp-yt-system.a |51.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/mixedpy/ydb-tests-stress-mixedpy |51.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/metadata/manager/fetch_database.cpp |51.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/metadata/manager/ydb_value_operator.cpp |51.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/string/libcpp-yt-string.a |51.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/metadata/manager/table_record.cpp |51.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/yson/libcpp-yt-yson.a |51.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/ytalloc/api/libcpp-ytalloc-api.a |51.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ext_index/service/add_index.cpp |51.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/account_lockout/liblibrary-login-account_lockout.a |51.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/fetched_data.cpp |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/yson_string/libcpp-yt-yson_string.a |51.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/ds_table/behaviour_registrator_actor.cpp |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/logger/libydb-library-logger.a |51.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/apps/version/libversion_definition.a |51.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/libydb-library-login.a |51.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/actorlib_impl/libydb-core-actorlib_impl.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/cache/liblibrary-login-cache.a |51.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/backup/controller/libcore-backup-controller.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/util/draft/libutil-draft.a |51.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blob_depot/agent/libcore-blob_depot-agent.a |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/backup/common/libcore-backup-common.a |51.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/audit/libydb-core-audit.a |52.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/backup/common/proto/libbackup-common-proto.a |51.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/backup/impl/libcore-backup-impl.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/password_checker/liblibrary-login-password_checker.a |51.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blob_depot/libydb-core-blob_depot.a |51.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/backpressure/libcore-blobstorage-backpressure.a |51.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/adapter/libolap-bg_tasks-adapter.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/mkql_proto/libydb-library-mkql_proto.a |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/actorlib_impl/long_timer.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/actorlib_impl/actor_tracker.cpp |51.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/actorlib_impl/name_service_client_protocol.cpp |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/ncloud/impl/liblibrary-ncloud-impl.a |51.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/base/libcore-blobstorage-base.a |51.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/base/generated/libcore-base-generated.a |51.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/monitoring.cpp |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/actorlib_impl/read_http_reply_protocol.cpp |51.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/abstract/initialization.cpp |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/common/libtx-schemeshard-common.a |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/backup/impl/table_writer.cpp |51.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/ds_table/accessor_snapshot_base.cpp |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/logoblob.cpp |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/local_user_token.cpp |51.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/localdb.cpp |51.6%| PREPARE $(OS_SDK_ROOT-sbr:243881345) |51.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/base/libydb-core-base.a |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/group_stat.cpp |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/actor_activity_names.cpp |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/feature_flags_service.cpp |51.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/dsproxy/mock/libblobstorage-dsproxy-mock.a |51.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/export/actor/write.cpp |51.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/plain_read_data.cpp |51.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/base/generated/runtime_feature_flags.cpp |52.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/base/blobstorage_syncstate.cpp |52.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/base/html.cpp |52.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/base/blobstorage_vdiskid.cpp |52.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_clusters_updater_actor.cpp |52.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/domain.cpp |52.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ext_index/service/deleting.cpp |52.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ext_index/service/add_data.cpp |52.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/backtrace.cpp |51.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/wilson_tracing_control.cpp |51.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/counters.cpp |51.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/traceid.cpp |51.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/auth.cpp |51.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/storage_pools.cpp |52.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/tx_processing.cpp |52.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/tablet_status_checker.cpp |52.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/abstract/fetcher.cpp |52.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/tablet_killer.cpp |52.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/subdomain.cpp |52.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/load_test.cpp |52.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/tablet.cpp |52.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/table_index.cpp |52.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/fetcher.cpp |52.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/blobstorage.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/iterator.cpp |50.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/blobstorage_grouptype.cpp |50.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/event_filter.cpp |50.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/libcore-tx-schemeshard.a |50.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/row_version.cpp |50.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/statestorage_event_filter.cpp |50.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/context.cpp |50.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/statestorage.cpp |50.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/services_assert.cpp |50.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/board_replica.cpp |50.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/pool_stats_collector.cpp |51.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/path.cpp |51.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/statestorage_monitoring.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/statestorage_proxy.cpp |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/common/libcore-blobstorage-common.a |51.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/base/memory_controller_iface.h_serialized.cpp |50.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/dsproxy/libcore-blobstorage-dsproxy.a |50.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/scheme/schema_version.cpp |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/pdisk_io/protos/liblibrary-pdisk_io-protos.a |50.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/indexes/libschemeshard-olap-indexes.a |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/groupinfo/libcore-blobstorage-groupinfo.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/lwtrace_probes/libcore-blobstorage-lwtrace_probes.a |51.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/columns/libschemeshard-olap-columns.a |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/pdisk_io/libydb-library-pdisk_io.a |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/events/libolap-bg_tasks-events.a |51.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/protos/libolap-bg_tasks-protos.a |51.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/fetched_data.cpp |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/incrhuge/libcore-blobstorage-incrhuge.a |51.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/populator.cpp |51.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/nodewarden/libcore-blobstorage-nodewarden.a |51.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/writer/write_controller.cpp |51.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/locks/write.cpp |51.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/common_level.cpp |51.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_read.cpp |51.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/header.cpp |51.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/portions/portion_info.h_serialized.cpp |51.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ext_index/service/activation.cpp |51.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/fetching.cpp |51.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/ds_table/accessor_refresh.cpp |51.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/ds_table/accessor_subscribe.cpp |51.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ext_index/service/executor.cpp |51.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/test_helper/shard_writer.cpp |51.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/context.cpp |51.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/portions/constructors.cpp |51.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/backup/controller/tablet.cpp |51.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/actorlib_impl/connect_socket_protocol.cpp |51.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/pdisk/libcore-blobstorage-pdisk.a |51.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/portions/portion_info.cpp |51.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/locks/read_finished.cpp |51.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ext_index/common/config.cpp |51.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/anubis_osiris/libblobstorage-vdisk-anubis_osiris.a |51.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/other/libcore-blobstorage-other.a |51.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/portions/write_with_blobs.cpp |51.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/actorlib_impl/send_data_protocol.cpp |51.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/portions/compacted.cpp |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/persqueue/deprecated/read_batch_converter/libpersqueue-deprecated-read_batch_converter.a |51.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/retention.cpp |51.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/http/http.cpp |51.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/persqueue/topic_parser/liblibrary-persqueue-topic_parser.a |51.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/huge/libblobstorage-vdisk-huge.a |51.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/balance/libblobstorage-vdisk-balance.a |51.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/common/libblobstorage-vdisk-common.a |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/client/metadata/libcore-client-metadata.a |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/pretty_types_print/protobuf/liblibrary-pretty_types_print-protobuf.a |51.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/meta.cpp |51.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/abstract/kqp_common.cpp |52.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk.cpp |52.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/pdisk/mock/libblobstorage-pdisk-mock.a |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/portions/data_accessor.cpp |52.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_log_cache.cpp |52.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/portions/index_chunk.cpp |52.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_defs.h_serialized.cpp |52.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_internal_interface.cpp |52.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_logreader.cpp |52.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/transactions/libolap-bg_tasks-transactions.a |52.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl_metadata.cpp |52.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_completion_impl.cpp |52.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_delayed_cost_loop.cpp |52.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_drivemodel_db.cpp |52.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/common/blobstorage_event_filter.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/ds_table/scheme_describe.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/portions/constructor_accessor.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/actorlib_impl/read_data_protocol.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/portions/column_record.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/abstract.cpp |52.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/send_message.cpp |52.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl_log.cpp |52.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/set_queue_attributes.cpp |52.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhugedefs.cpp |52.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_histograms.cpp |52.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/locks/read_start.cpp |52.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/defrag/ut/ydb-core-blobstorage-vdisk-defrag-ut |52.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/backup/controller/tx_init.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/schema.cpp |52.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_handle_class.cpp |52.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_costmodel.cpp |52.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_histogram_latency.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/backup/controller/tx_init_schema.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/predicate/filter.cpp |52.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/persqueue/topic_parser/counters.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/test_helper/columnshard_ut_common.cpp |52.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_outofspace.cpp |52.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl.cpp |52.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/ds_table/service.cpp |52.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_hugeblobctx.cpp |52.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/zero_level.cpp |52.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_performance_params.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/writer/buffer/actor.cpp |52.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/common/blobstorage_cost_tracker.cpp |52.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhugeheap.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/test_helper/shard_reader.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/portions/written.cpp |52.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_util_atomicblockcounter.cpp |52.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_params.cpp |51.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_requestimpl.cpp |51.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/untag_queue.cpp |51.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_sectorrestorator.cpp |52.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/service.cpp |52.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_state.h_serialized.cpp |52.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_syslogreader.cpp |52.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/fetched_data.cpp |52.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/test_helper/helper.cpp |52.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_writer.cpp |52.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_util_signal_event.cpp |52.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/drivedata_serializer.cpp |52.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/portions/meta.cpp |52.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_util_flightcontrol.cpp |52.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/barriers/libvdisk-hulldb-barriers.a |52.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/bulksst_add/libvdisk-hulldb-bulksst_add.a |52.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_anubis_algo.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/test_helper/controllers.cpp |52.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/base/libvdisk-hulldb-base.a |51.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_mongroups.cpp |51.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/fresh/libvdisk-hulldb-fresh.a |51.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/abstract.cpp |51.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/compstrat/libvdisk-hulldb-compstrat.a |51.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/deprecated/persqueue_v0/api/grpc/libapi-grpc-persqueue-deprecated.a |51.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_config.cpp |51.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/cache_block/libvdisk-hulldb-cache_block.a |52.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/common/libschemeshard-olap-common.a |52.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/manager/abstract.cpp |52.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/discovery/libydb-services-discovery.a |52.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/util/ut/ydb-core-util-ut |52.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/column_families/libschemeshard-olap-column_families.a |52.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/unicode/set/libcpp-unicode-set.a |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_begin_transaction.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/manager/abstract.h_serialized.cpp |52.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hullop/libblobstorage-vdisk-hullop.a |52.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/default_fetching.cpp |52.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hullop/hullcompdelete/libvdisk-hullop-hullcompdelete.a |52.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/dynamic_config/libydb-services-dynamic_config.a |52.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/persqueue/topic_parser/topic_parser.cpp |52.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/tag_queue.cpp |52.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/manager/modification.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/constructor.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/manager/alter_impl.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/common/blobstorage_vdisk_guids.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/base/statestorage_guardian.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/manager/alter.cpp |52.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/hullop/hullop_entryserialize.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/manager/object.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/limit_sorted.cpp |52.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/unicode/normalization/libcpp-unicode-normalization.a |52.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/public_http/libydb-core-public_http.global.a |52.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/ingress/libblobstorage-vdisk-ingress.a |52.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/localrecovery/libblobstorage-vdisk-localrecovery.a |51.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/public_http/libydb-core-public_http.a |51.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/base/board_lookup.cpp |51.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/ext_index/metadata/extractor/libext_index-metadata-extractor.a |51.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/libcore-blobstorage-vdisk.a |51.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/backpressure/queue.cpp |51.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/protos/libblobstorage-vdisk-protos.a |51.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/protobuf_printer/libydb-library-protobuf_printer.a |51.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/query/libblobstorage-vdisk-query.a |52.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/scrub/libblobstorage-vdisk-scrub.a |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/common/kqp_ut_common.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_request_reporting.cpp |52.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/mon/liblibrary-schlab-mon.a |52.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/probes/liblibrary-schlab-probes.a |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/get_queue_url.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/base/board_publish.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_mon.cpp |52.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/purecalc/libcore-persqueue-purecalc.a |52.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/repl/libblobstorage-vdisk-repl.a |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/backup/impl/local_partition_reader.cpp |51.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/pdisk/ut/ydb-core-blobstorage-pdisk-ut |51.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_put.cpp |51.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/group_sessions.cpp |51.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_nodemonactor.cpp |51.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/backpressure/event.cpp |51.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/manager/restore.cpp |51.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_multiget.cpp |51.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/manager/generic_manager.cpp |51.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_strategy_base.cpp |51.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_monactor.cpp |51.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_stat.cpp |51.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_discover.cpp |52.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_check_integrity_get.cpp |52.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_put_impl.cpp |52.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_block.cpp |52.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_discover_m3dc.cpp |52.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/base/blobstorage_events.cpp |52.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_collect.cpp |52.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/skeleton/libblobstorage-vdisk-skeleton.a |52.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/localrecovery/localrecovery_defs.cpp |51.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/manager/common.cpp |51.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_multicollect.cpp |52.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/backpressure/unisched.cpp |52.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/writer/compacted_blob_constructor.cpp |52.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/workload_service/libcore-kqp-workload_service.a |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/sub_columns_fetching.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/backpressure/queue_backpressure_client.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/full_scan_sorted.cpp |52.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/proto/libproviders-pq-proto.a |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_blackboard.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/not_sorted.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_context.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/base/statestorage_replica.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_indexrestoreget.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/writer/indexed_blob_constructor.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_get.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_range.cpp |52.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/dq/provider/ut/ydb-library-yql-providers-dq-provider-ut |51.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_state.cpp |51.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_discover_m3of4.cpp |51.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_get_block.cpp |51.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/common/blobstorage_status.cpp |51.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/syncer/libblobstorage-vdisk-syncer.a |52.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/common/columnshard.cpp |52.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_status.cpp |52.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_anubisfinder.cpp |52.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/client/minikql_compile/libcore-client-minikql_compile.a |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_impl.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_patch.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_events.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/balance/utils.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_encrypt.cpp |52.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/mon/liblibrary-schlab-mon.global.a |52.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blockstore/core/libcore-blockstore-core.a |52.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/apache/arrow/liblibs-apache-arrow.a |52.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/change_exchange/libydb-core-change_exchange.a |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/balance/handoff_map.cpp |51.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/base/appdata.cpp |51.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/actors/libproviders-s3-actors.a |51.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_mon.cpp |51.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_assimilate.cpp |51.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/common/blobstorage_dblogcutter.cpp |51.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_request.cpp |51.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_get_impl.cpp |51.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_broker.cpp |51.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_blockdevice_async.cpp |51.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/minikql_compile/compile_context.cpp |51.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/normalizer/abstract/libcolumnshard-normalizer-abstract.a |51.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/export/session/storage/s3/libsession-storage-s3.global.a |51.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/config_shards/libalter-in_store-config_shards.a |52.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/minikql_compile/compile_result.cpp |52.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/util/libydb-core-util.a |52.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/abstract/liboperations-alter-abstract.a |52.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/minikql_compile/db_key_resolver.cpp |52.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/layout/libschemeshard-olap-layout.a |52.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/defrag/libblobstorage-vdisk-defrag.a |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_osiris.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_actor.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_anubisproxy.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl_http.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhugerecovery.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_anubis_osiris.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_driveestimator.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_nodemon.cpp |52.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/address_classifier.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhuge.cpp |52.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/backoff.cpp |52.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/intrusive_heap.cpp |52.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/cpuinfo.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_tools.cpp |52.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/ulid.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/hullbase_barrier.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/balance/sender.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/barriers/barriers_essence.cpp |52.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_recoverylogwriter.cpp |52.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/aws.cpp |52.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/ui64id.cpp |52.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/text.cpp |52.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/stlog.cpp |52.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/cache.cpp |52.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/fragmented_buffer.cpp |52.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/format.cpp |52.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/concurrent_rw_hash.cpp |52.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/fast_tls.cpp |52.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/source_location.cpp |52.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/console.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/balance/deleter.cpp |52.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/client/server/libcore-client-server.a |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_response.cpp |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/page_map.cpp |51.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/hazard.cpp |51.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/random.cpp |51.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/gen_step.cpp |51.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/hyperlog_counter.cpp |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/schemu/liblibrary-schlab-schemu.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/schoot/liblibrary-schlab-schoot.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/schine/liblibrary-schlab-schine.a |51.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/cms/console/libcore-cms-console.a |51.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/balance/balancing_actor.cpp |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/cms/console/util/libcms-console-util.a |52.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/server/http_ping.cpp |52.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/cms/console/validators/libcms-console-validators.a |52.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/cms/libydb-core-cms.a |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/blobstorage_hullsatisfactionrank.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_anubisrunner.cpp |52.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/server/msgbus_server_configdummy.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_appendix.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/barriers/barriers_tree.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/barriers/barriers_chain.cpp |52.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/cms/node_checkers.h_serialized.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hullop/blobstorage_hulllog.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_anubis.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/barriers/barriers_public.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/cache_block/cache_block.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/bulksst_add/hulldb_bulksst_add.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_segment.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_datasnap.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_mon.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/localrecovery/localrecovery_readbulksst.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_log.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/minikql_compile/mkql_compile_service.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/blobstorage_hulldefs.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/normalizer/abstract/abstract.h_serialized.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/scrub/blob_recovery_request.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/compstrat/hulldb_compstrat_defs.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hullop/hullop_compactfreshappendix.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hullop/blobstorage_hulllogcutternotify.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hullop/blobstorage_hull.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_huge.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/scrub/blob_recovery_queue.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hullop/hullcompdelete/blobstorage_hullcompdelete.cpp |52.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/audit_log.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/query/assimilation.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_data.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/compstrat/hulldb_compstrat_selector.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_replbroker.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_snapshot.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/scrub/blob_recovery.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/query/query_range.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hullop/blobstorage_hullactor.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_unreadable.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_skeletonfront.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/localrecovery/localrecovery_logreplay.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/query/query_readactor.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/query/query_public.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_monactors.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/scrub/restore_corrupted_blob_actor.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/util/memory_tracker.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/query/query_readbatch.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_vmovedpatch_actor.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_hullrepljob.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/query/query_stathuge.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_vmultiput_actor.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/query/query_extr.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_repl.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_vpatch_actor.cpp |52.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/server/ic_nodes_cache_service.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_loggedrec.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/query/query_stattablet.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_overload_handler.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/query/query_barrier.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_replproxy.cpp |52.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/configs_config.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/scrub/blob_recovery_process.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_oos_logic.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/query/query_statdb.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_sst.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/guid_firstrun.cpp |52.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/cms/services.h_serialized.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_shred.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_replmonhandler.cpp |52.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/services.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_pdisk.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/workload_service/kqp_workload_service.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_db.cpp |52.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/s3/actors/ut/ydb-library-yql-providers-s3-actors-ut |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_skeleton.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_oos_tracker.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_mon_dbmainpage.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_block_and_get.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/guid_recovery.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_recoverlostdata_proxy.cpp |52.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/console_audit.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/guid_proxywrite.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_compactionstate.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_data.cpp |52.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/tx_processor.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/localrecovery/localrecovery_public.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_syncfullhandler.cpp |52.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/modifications_validator.cpp |52.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/grpc_library_helper.cpp |52.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/util.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_scheduler.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/guid_propagator.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/syncer_job_task.cpp |52.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/node_checkers.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_recoverlostdata.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_syncfull.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_committer.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/syncer_job_actor.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/defrag/defrag_quantum.cpp |52.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/config/init/libcore-config-init.a |52.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/guid_proxyobtain.cpp |52.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/util/failure_injection.cpp |52.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/persqueue_v1/actors/libservices-persqueue_v1-actors.a |52.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/control/libydb-core-control.a |52.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/signals/libydb-library-signals.a |52.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/control/lib/libcore-control-lib.a |52.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/config/validation/libcore-config-validation.a |52.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/debug/libydb-core-debug.a |52.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/discovery/libydb-core-discovery.a |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/export/session/storage/s3/storage.cpp |52.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/docapi/libydb-core-docapi.a |52.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/cms/libydb-core-cms.global.a |52.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/ydb-core-blobstorage-vdisk-hulldb-cache_block-ut |52.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/config/init/dummy.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/defrag/defrag_actor.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_pq_metacache.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server.cpp |52.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/signals/private.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/defrag/defrag_rewriter.cpp |52.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/actors/persqueue_utils.cpp |52.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/actors/codecs.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/minikql_compile/yql_expr_minikql.cpp |52.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/actors/partition_writer_cache_actor.cpp |52.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/actors/partition_writer.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/grpc_proxy_status.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_hive_create_tablet.cpp |52.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/actors/helpers.cpp |52.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/sentinel.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/abstract/abstract.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_http_server.cpp |52.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/signals/histogram.cpp |52.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/signals/object_counter.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_drain_node.cpp |52.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/signals/client.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tx_remove_permissions.cpp |52.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/configs_cache.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_localwriter.cpp |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__get_log_tail.cpp |52.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/driver_lib/cli_utils/libcli_utils.a |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_node_registration.cpp |52.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/signals/agent.cpp |52.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/abstract/liblibrary-workload-abstract.a |52.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/driver_lib/base_utils/libbase_utils.a |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__get_yaml_metadata.cpp |52.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/driver_lib/cli_config_base/libcore-driver_lib-cli_config_base.a |52.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_vdisk/ydb-core-blobstorage-ut_vdisk |52.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_cmd_config.cpp |52.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/driver_lib/cli_utils/melancholic_gopher.cpp |52.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/driver_lib/cli_base/libcli_base.a |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__load_state.cpp |52.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/walle_create_task_adapter.cpp |52.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tx_remove_request.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/configs_dispatcher.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__remove_tenant_done.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__update_confirmed_subdomain.cpp |52.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/engine/libydb-core-engine.a |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__replace_config_subscriptions.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/grpc_server.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_console.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__replace_yaml_config.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__remove_computational_units.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/walle_list_tasks_adapter.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tx_process_notification.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_ic_debug.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__get_yaml_config.cpp |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_blobstorage_config.cpp |52.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tx_get_log_tail.cpp |52.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_cms.cpp |52.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/signals/owner.cpp |52.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/walle_check_task_adapter.cpp |52.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__remove_tenant_failed.cpp |52.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/filestore/core/libcore-filestore-core.a |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tx_remove_task.cpp |52.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/kv/liblibrary-workload-kv.global.a |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__create_tenant.cpp |52.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/external_sources/object_storage/libcore-external_sources-object_storage.a |52.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/resharding/libalter-in_store-resharding.a |52.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/common/liblibrary-formats-arrow-accessor-common.a |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/walle_remove_task_adapter.cpp |52.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/composite/liblibrary-formats-arrow-accessor-composite.a |52.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/composite_serial/libarrow-accessor-composite_serial.a |52.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__configure.cpp |52.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/liboperations-alter-in_store.a |52.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/external_sources/object_storage/inference/libexternal_sources-object_storage-inference.a |52.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/abstract/libarrow-accessor-abstract.a |52.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__drop_yaml_config.cpp |52.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/normalizer/insert_table/libcolumnshard-normalizer-insert_table.global.a |52.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/defaults/common/libscheme-defaults-common.a |52.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__revert_pool_state.cpp |52.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/erasure/libydb-core-erasure.a |52.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/plain/libarrow-accessor-plain.global.a |52.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/erasure_checkers.cpp |52.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/plain/libarrow-accessor-plain.a |52.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/transform/libyql-dq-transform.a |52.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__update_subdomain_key.cpp |52.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/state/libyql-dq-state.a |52.6%| [CP] {default-linux-x86_64, release, asan} $(B)/common_test.context |52.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/tasks/libyql-dq-tasks.a |52.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__cleanup_subscriptions.cpp |52.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/sparsed/libarrow-accessor-sparsed.a |52.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/downtime.cpp |52.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/sub_columns/libarrow-accessor-sub_columns.global.a |52.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__log_cleanup.cpp |52.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/type_ann/libyql-dq-type_ann.a |52.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tx_store_permissions.cpp |52.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tx_remove_expired_notifications.cpp |52.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tx_reject_notification.cpp |52.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__update_tenant_state.cpp |52.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__remove_config_subscriptions.cpp |52.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__remove_config_subscription.cpp |52.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/sub_columns/libarrow-accessor-sub_columns.a |52.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/feature_flags_configurator.cpp |52.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/clickhouse/proto/libproviders-clickhouse-proto.a |52.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/external_sources/libydb-core-external_sources.a |52.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/jaeger_tracing_configurator.cpp |52.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/hash/libformats-arrow-hash.a |52.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/program/libformats-arrow-program.global.a |52.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/http.cpp |52.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/base/libpublic-lib-base.a |52.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/arrow/interface/libcommon-arrow-interface.a |52.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__update_last_provided_config.cpp |52.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/clickhouse/provider/libproviders-clickhouse-provider.a |52.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/serializer/libformats-arrow-serializer.a |52.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/decimal/libsrc-library-decimal.a |52.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console_configs_subscriber.cpp |52.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/reader/libformats-arrow-reader.a |52.7%| PREPARE $(CLANG_FORMAT-1286082657) |52.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/external_sources/external_source_builder.cpp |52.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__init_scheme.cpp |52.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/save_load/libformats-arrow-save_load.a |52.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/rows/libformats-arrow-rows.a |52.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/external_sources/validation_functions.cpp |52.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tx_init_scheme.cpp |52.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/walle_api_handler.cpp |52.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/external_sources/external_data_source.cpp |52.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__update_pool_state.cpp |52.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/external_sources/external_source_factory.cpp |52.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/fatal_error_handlers/libclient-types-fatal_error_handlers.a |52.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/engine/minikql/libcore-engine-minikql.a |52.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/rate_limiter/utils/liblibs-rate_limiter-utils.a |52.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/serializer/libformats-arrow-serializer.global.a |52.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/transformer/libformats-arrow-transformer.a |52.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/out/libapi-protos-out.a |52.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/jwt/libsrc-library-jwt.a |52.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/actors/libfq-libs-actors.a |52.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_table_part.cpp |52.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/runtime/libyql-dq-runtime.a |52.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/rate_limiter/events/liblibs-rate_limiter-events.a |52.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/src/client/config/libsrc-client-config.a |52.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/splitter/libformats-arrow-splitter.a |52.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/libydb-core-formats.a |52.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cluster_info.cpp |52.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/deprecated/client/liblib-deprecated-client.a |52.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/read_rule/libfq-libs-read_rule.a |52.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__set_config.cpp |52.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/filters/librow_dispatcher-format_handler-filters.a |52.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/checkpoint_storage/events/liblibs-checkpoint_storage-events.a |52.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/audit/libfq-libs-audit.a |52.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/deprecated/kicli/liblib-deprecated-kicli.a |52.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/token_accessor/client/libcommon-token_accessor-client.a |52.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/persqueue/topic_parser_public/libsdk-library-persqueue-topic_parser_public-v3.a |52.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/checkpoint_storage/proto/liblibs-checkpoint_storage-proto.a |52.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/engine/minikql/minikql_engine_host.cpp |52.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/actors/nodes_health_check.cpp |52.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/api_adapters.cpp |52.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/actors/clusters_from_connections.cpp |52.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/parsers/librow_dispatcher-format_handler-parsers.a |52.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/actors/task_result_write.cpp |52.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/actors/rate_limiter.cpp |52.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__add_config_subscription.cpp |52.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/fq/libpublic-lib-fq.a |52.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/actors/task_ping.cpp |52.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/actors/proxy_private.cpp |52.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/ext_index/metadata/libservices-ext_index-metadata.global.a |52.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/actors/task_get.cpp |52.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/program/libformats-arrow-program.a |52.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/actors/rate_limiter_resources.cpp |52.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/checkpointing/libfq-libs-checkpointing.a |52.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/libydb-services-metadata.a |52.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/libcore-formats-arrow.a |52.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/dictionary/libformats-arrow-dictionary.a |53.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/tiering/libstorage-actualizer-tiering.a |52.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/actors/pending_fetcher.cpp |52.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/actors/error.cpp |52.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/actors/result_writer.cpp |52.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/actors/nodes_manager.cpp |53.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/actors/table_bindings_from_bindings.cpp |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/checkpointing_common/libfq-libs-checkpointing_common.a |53.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console_configs_provider.cpp |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/conveyor/usage/libtx-conveyor-usage.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/value/libpublic-lib-value.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/scheme_types/libpublic-lib-scheme_types.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/uuid/libsrc-library-uuid.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/shared_resources/interface/liblibs-shared_resources-interface.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/command_base/libydb_cli_command_base.a |52.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/sdk_core_access/libydb_sdk_core_access.a |52.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tx_log_cleanup.cpp |52.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/signer/libfq-libs-signer.a |52.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/tasks_packer/libfq-libs-tasks_packer.a |52.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/checkpoint_storage/libfq-libs-checkpoint_storage.a |52.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/json_value/libpublic-lib-json_value.a |52.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/ydb_discovery/libydb_cli_command_ydb_discovery.a |52.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/experimental/libpublic-lib-experimental.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/coordinator/public/libtx-coordinator-public.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/common/yql_parser/libydb_cli-common-yql_parser.a |53.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_resolve_node.cpp |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/audit/events/liblibs-audit-events.a |53.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console_handshake.cpp |53.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/coordinator/libcore-tx-coordinator.a |52.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/config/config.cpp |52.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/dump/files/libydb_cli-dump-files.a |52.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/yson_value/libpublic-lib-yson_value.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/bg_tasks/abstract/libservices-bg_tasks-abstract.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/graph/protos/libcore-graph-protos.a |53.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/data_events/libcore-tx-data_events.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/ydb/libfq-libs-ydb.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/cloud_audit/libfq-libs-cloud_audit.a |53.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/immediate_controls_configurator.cpp |53.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tx_update_config.cpp |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/dump/util/libydb_cli-dump-util.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/adapters/issue/libcpp-adapters-issue.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/include/ydb-cpp-sdk/client/topic/libydb-cpp-sdk-client-topic.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/purecalc_no_pg_wrapper/liblibs-row_dispatcher-purecalc_no_pg_wrapper.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/libfq-libs-config.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/grpc_streaming/libydb-core-grpc_streaming.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_config/events/liblibs-control_plane_config-events.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/graph/service/libcore-graph-service.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/common_client/impl/libclient-common_client-impl.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/compute/ydb/synchronization_service/libcompute-ydb-synchronization_service.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/common_client/libsrc-client-common_client.a |53.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/http.cpp |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_config/libfq-libs-control_plane_config.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/io_formats/ydb_dump/libcore-io_formats-ydb_dump.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_proxy/events/liblibs-control_plane_proxy-events.a |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/logger.cpp |52.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__alter_tenant.cpp |52.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/keyvalue/libydb-core-keyvalue.a |52.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/control_plane_proxy/actors/liblibs-control_plane_proxy-actors.a |52.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tx_log_and_send.cpp |52.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/long_tx_service/libcore-tx-long_tx_service.a |52.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/io_formats/cell_maker/libcore-io_formats-cell_maker.a |52.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/operation_id/protos/liblibrary-operation_id-protos.a |52.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/jaeger_tracing/libydb-core-jaeger_tracing.a |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console_tenants_manager.cpp |52.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__update_tenant_pool_config.cpp |52.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/limiter/usage/libtx-limiter-usage.a |52.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/control_plane_proxy/libfq-libs-control_plane_proxy.a |52.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/cms/libydb-services-cms.a |52.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/coordinator/coordinator_hooks.cpp |52.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/compute/ydb/liblibs-compute-ydb.a |51.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/control/immediate_control_board_actor.cpp |51.6%| PREPARE $(FLAKE8_PY3-715603131) |52.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/libcore-tx-datashard.a |52.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/datashard/change_exchange.h_serialized.cpp |52.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_fill_node.cpp |52.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms.cpp |53.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/log_settings_configurator.cpp |52.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tx_store_walle_task.cpp |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/config_helpers.cpp |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__toggle_config_validator.cpp |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/draft/libsrc-client-draft.a |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console__remove_tenant.cpp |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/info_collector.cpp |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/datastreams/libsrc-client-datastreams.a |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console_configs_manager.cpp |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/actors/read_init_auth_actor.cpp |53.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/erase_rows_condition.cpp |53.0%| PREPARE $(TEST_TOOL_HOST-sbr:8580453620) |53.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_test_shard_request.cpp |53.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tx_update_downtimes.cpp |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/logger.cpp |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/net_classifier_updater.cpp |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_tablet_state.cpp |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_types.cpp |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/discovery/discovery.cpp |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_proxy.cpp |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/config/init/init_noop.cpp |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tx_load_state.cpp |53.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/datashard/execution_unit_kind.h_serialized.cpp |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_pq_metarequest.cpp |53.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/extstorage_usage_config.cpp |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/external_sources/object_storage.cpp |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator__configure.cpp |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/config/init/init.h_serialized.cpp |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/tiering/counters.cpp |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_scheme_initroot.cpp |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/actors/direct_read_actor.cpp |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/config/init/init.cpp |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_tx_request.cpp |53.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/engine/minikql/flat_local_tx_factory.cpp |53.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/data_events/shard_writer.cpp |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator__check.cpp |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator_state.cpp |53.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/key_conflicts.cpp |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_scheme_request.cpp |53.1%| PREPARE $(CLANG-874354456) |53.0%| PREPARE $(CLANG18-1866954364) |53.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator_impl.cpp |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_config.cpp |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/actors/distributed_commit_helper.cpp |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator__plan_step.cpp |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/actors/update_offsets_in_transaction_actor.cpp |53.1%| [CC] {default-linux-x86_64, release, asan} $(B)/library/cpp/build_info/build_info.cpp |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/mediator_queue.cpp |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_index/kmeans_helper.cpp |53.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/probes.cpp |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/actors/write_session_actor.cpp |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/actors/read_session_actor.cpp |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_tablet.cpp |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator.cpp |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator__stop_guard.cpp |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator__read_step_subscriptions.cpp |53.1%| [CC] {default-linux-x86_64, release, asan} $(S)/library/cpp/svnversion/svn_interface.c |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_pq_read_session_info.cpp |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/actors/partition_actor.cpp |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator__acquire_read_step.cpp |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_persqueue.cpp |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator__last_step_subscriptions.cpp |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/actors/run_actor.cpp |53.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/range_ops.cpp |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator__restore_transaction.cpp |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_actorsystem_perftest.cpp |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_scheme_initroot.cpp |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_data_tx_out_rs_unit.cpp |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_cms.cpp |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__progress_resend_rs.cpp |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/export_iface.cpp |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/export_s3_buffer.cpp |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/insert_table/broken_dedup.cpp |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator__monitoring.cpp |53.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/drop_persistent_snapshot_unit.cpp |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli.cpp |53.2%| [BI] {default-linux-x86_64, release, asan} $(B)/library/cpp/build_info/buildinfo_data.h |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator__restore_params.cpp |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/drop_table_unit.cpp |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/store_distributed_erase_tx_unit.cpp |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/export_scan.cpp |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator__mediators_confirmations.cpp |53.2%| [CC] {default-linux-x86_64, release, asan} $(S)/library/cpp/build_info/build_info_static.cpp |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/actors/commit_offset_actor.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/actors/schema_actors.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_server.cpp |53.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/type_serialization.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_admin.cpp |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_persqueue_cluster_discovery.cpp |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_fakeinitshard.cpp |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_console.cpp |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/backup_unit.cpp |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/service.cpp |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__get_state_tx.cpp |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/execution_unit.h_serialized.cpp |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/check_write_unit.cpp |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_persqueue.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_scheme_cache_append.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/execute_kqp_scan_tx_unit.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_tablet_counters.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_subdomain_path_id.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/memory_state_migration.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_node.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_root.cpp |53.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/change_exchange.cpp |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/drop_index_notice_unit.cpp |53.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/change_record_body_serializer.cpp |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/incr_restore_helpers.cpp |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/finalize_plan_tx_unit.cpp |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/execute_distributed_erase_tx_unit.cpp |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_genconfig.cpp |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator__schema.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_persqueue_stress.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/object.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator__init.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/execute_commit_writes_tx_unit.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/actors/read_info_actor.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_index/prefix_kmeans.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/execute_write_unit.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/store_commit_writes_tx_unit.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_tenant.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/direct_tx_unit.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/upload_stats.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/finish_propose_write_unit.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/resharding/update.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_debug.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_index/reshuffle_kmeans.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/load_tx_details_unit.cpp |53.3%| [CC] {default-linux-x86_64, release, asan} $(S)/library/cpp/svnversion/svnversion.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/initiate_build_index_unit.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_index/local_kmeans.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/import_s3.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__engine_host.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/volatile_tx.h_serialized.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/operation.h_serialized.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_disk.cpp |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/follower_edge.cpp |53.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/load_write_details_unit.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/prepare_distributed_erase_tx_in_rs_unit.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_validate_config.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/change_record_cdc_serializer.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/scan_common.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/actor/actor.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_index/secondary_index.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/execute_kqp_data_tx_unit.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/prepare_kqp_data_tx_in_rs_unit.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/stream_scan_common.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/make_snapshot_unit.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/plan_queue_unit.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/store_data_tx_unit.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/move_table_unit.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/key_validator.cpp |53.3%| [CC] {default-linux-x86_64, release, asan} $(B)/library/cpp/build_info/sandbox.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/change_sender_async_index.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/store_scheme_tx_unit.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/read_table_scan.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/restore_unit.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_snapshots.cpp |53.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/finish_propose_unit.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_write_out_rs_unit.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/tiering/tiering.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/change_sender.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/store_snapshot_tx_unit.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/remove_schema_snapshots.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_index/sample_k.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/read_op_unit.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/prepare_scheme_tx_in_rs_unit.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/export_s3_uploader.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_bs.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/receive_snapshot_cleanup_unit.cpp |53.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_failpoints.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/export_common.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/change_collector_cdc_stream.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/execute_data_tx_unit.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/wait_for_stream_clearance_unit.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_scheme_tx_out_rs_unit.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/cdc_stream_scan.cpp |53.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/incr_restore_scan.cpp |53.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/wait_for_plan_unit.cpp |53.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/load_and_wait_in_rs_unit.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_kqp_delete_rows.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/operation.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/finalize_build_index_unit.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/volatile_tx_mon.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/store_and_send_out_rs_unit.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/read_table_scan_unit.cpp |53.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/datashard/datashard_s3_upload.h_serialized.cpp |53.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/remove_lock_change_records.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ext_index/metadata/behaviour.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/change_sender_table_base.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__data_cleanup.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/change_record.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_write_operation.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/change_collector_async_index.cpp |53.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/change_collector_base.cpp |53.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/control_plane_proxy/config.cpp |53.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/keyvalue/keyvalue_collect_operation.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/datashard.h_serialized.cpp |53.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/keyvalue/keyvalue_helpers.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/move_index_unit.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/make_scan_snapshot_unit.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__cancel_tx_proposal.cpp |53.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/keyvalue/keyvalue_data.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/store_write_unit.cpp |53.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/keyvalue/keyvalue_index_record.cpp |53.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/remove_locks.cpp |53.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/keyvalue/keyvalue_stored_state_data.cpp |53.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/change_collector.cpp |53.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__conditional_erase_rows.cpp |53.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/protect_scheme_echoes_unit.cpp |53.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/keyvalue/keyvalue_storage_read_request.cpp |53.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/create_cdc_stream_unit.cpp |53.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/create_table_unit.cpp |53.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/control_plane_proxy/probes.cpp |53.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__compaction.cpp |53.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/store_and_send_write_out_rs_unit.cpp |53.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/control_plane_proxy/actors/control_plane_storage_requester_actor.cpp |53.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/check_distributed_erase_tx_unit.cpp |53.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/create_volatile_snapshot_unit.cpp |53.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_proxy/actors/ydb_schema_query_actor.h_serialized.cpp |53.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/long_tx_service/lwtrace_probes.cpp |53.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/receive_snapshot_unit.cpp |53.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__cleanup_borrowed.cpp |53.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/create_incremental_restore_src_unit.cpp |53.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/execution_unit.cpp |53.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__mon_reset_schema_version.cpp |53.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/long_tx_service/long_tx_service.cpp |53.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/datashard/backup_restore_traits.h_serialized.cpp |53.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/control_plane_proxy/actors/ydb_schema_query_actor.cpp |53.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/backup_restore_traits.cpp |53.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/prepare_data_tx_in_rs_unit.cpp |53.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/data_events/write_data.cpp |53.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_donor/ydb-core-blobstorage-ut_blobstorage-ut_donor |53.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/check_commit_writes_tx_unit.cpp |53.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/complete_data_tx_unit.cpp |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/events/libfq-libs-events.a |53.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/prepare_write_tx_in_rs_unit.cpp |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/db_schema/libfq-libs-db_schema.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/datashard/libcore-tx-datashard.global.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/driver/libsrc-client-driver.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/gateway/libfq-libs-gateway.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/hmac/libfq-libs-hmac.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/graph_params/proto/liblibs-graph_params-proto.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/grpc/libfq-libs-grpc.a |53.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_services/rpc_whoami.cpp |53.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/logs/libfq-libs-logs.a |53.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/init/libfq-libs-init.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/extension_common/libsrc-client-extension_common.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/export/libsrc-client-export.a |53.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_kqp_upsert_rows.cpp |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/extensions/solomon_stats/libclient-extensions-solomon_stats.a |53.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/change_sender_incr_restore.cpp |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/metrics/libfq-libs-metrics.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/value/libsrc-client-value.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/grpc_connections/libimpl-ydb_internal-grpc_connections.a |53.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__monitoring.cpp |53.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/health/libfq-libs-health.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/driver_lib/version/libversion.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/mock/libfq-libs-mock.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/sparsed/libarrow-accessor-sparsed.global.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/quota_manager/events/liblibs-quota_manager-events.a |53.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/completed_operations_unit.cpp |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/long_tx_service/public/libtx-long_tx_service-public.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/limiter/grouped_memory/usage/liblimiter-grouped_memory-usage.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/quota_manager/libfq-libs-quota_manager.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/federated_topic/libsrc-client-federated_topic.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/limiter/grouped_memory/service/liblimiter-grouped_memory-service.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/private_client/libfq-libs-private_client.a |53.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__compact_borrowed.cpp |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/iam/libsrc-client-iam.a |53.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/check_data_tx_unit.cpp |53.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard.cpp |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/protos/libfq-libs-protos.a |53.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_kqp_read_table.cpp |53.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/drop_volatile_snapshot_unit.cpp |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/issue/libsrc-library-issue.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/rate_limiter/quoter_service/liblibs-rate_limiter-quoter_service.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/common/libimpl-ydb_internal-common.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/operation_id/libsrc-library-operation_id.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/federated_topic/impl/libclient-federated_topic-impl.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_endpoints/libclient-impl-ydb_endpoints.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/kv/liblibrary-workload-kv.a |53.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/control_plane_storage/libfq-libs-control_plane_storage.a |53.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/volatile_tx.cpp |53.6%| PREPARE $(GDB) |53.6%| [CP] {default-linux-x86_64, release, asan} $(B)/yql/essentials/minikql/comp_nodes/llvm16/yql/essentials/minikql/computation/mkql_computation_node_codegen.h |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_storage/proto/liblibs-control_plane_storage-proto.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/purecalc_compilation/liblibs-row_dispatcher-purecalc_compilation.a |53.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/conflicts_cache.cpp |53.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__schema_changed.cpp |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/liblibs-row_dispatcher-format_handler.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/limiter/service/libtx-limiter-service.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/db_id_async_resolver_impl/libfq-libs-db_id_async_resolver_impl.a |53.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/change_exchange_split.cpp |53.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/check_read_unit.cpp |53.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/config.cpp |53.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__progress_tx.cpp |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/discovery/libsrc-client-discovery.a |53.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_and_wait_dependencies_unit.cpp |53.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/util.cpp |53.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/check_scheme_tx_unit.cpp |53.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__cleanup_uncommitted.cpp |53.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_distributed_erase_tx_out_rs_unit.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/probes.cpp |53.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/cdc_stream_heartbeat.cpp |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/persqueue/obfuscate/libsdk-library-persqueue-obfuscate-v3.a |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/control_plane_storage_counters.cpp |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/make_request/libimpl-ydb_internal-make_request.a |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/request_validators.cpp |53.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bloom/libstorage-indexes-bloom.global.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/signal_backtrace/libydb-library-signal_backtrace.a |53.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__object_storage_listing.cpp |53.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__read_columns.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_services/grpc_mon.cpp |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/retry/libimpl-ydb_internal-retry.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/plain_status/libimpl-ydb_internal-plain_status.a |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_services/grpc_publisher_service_actor.cpp |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/auth/libydb-services-auth.a |53.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/complete_write_unit.cpp |53.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/datashard_active_transaction.h_serialized.cpp |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/db_id_async_resolver/libproviders-common-db_id_async_resolver.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/session_pool/libimpl-ydb_internal-session_pool.a |53.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__propose_tx_base.cpp |53.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/common/libalter-in_store-common.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/thread_pool/libimpl-ydb_internal-thread_pool.a |53.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__plan_step.cpp |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/grpc_services/cancelation/protos/libgrpc_services-cancelation-protos.a |53.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_services/audit_log.cpp |53.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_osiris/ydb-core-blobstorage-ut_blobstorage-ut_osiris |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_stats/libclient-impl-ydb_stats.a |53.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/create_persistent_snapshot_unit.cpp |53.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/mediator/libcore-tx-mediator.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/priorities/usage/libtx-priorities-usage.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/io_formats/arrow/scheme/libio_formats-arrow-scheme.a |53.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/keyvalue/keyvalue_simple_db_flat.cpp |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/events/liblibs-row_dispatcher-events.a |53.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__column_stats.cpp |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/params/libsrc-client-params.a |53.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_kqp_effects.cpp |53.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__cleanup_in_rs.cpp |53.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__stats.cpp |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/operation/libsrc-client-operation.a |54.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__s3_download_txs.cpp |54.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_direct_transaction.cpp |54.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__init.cpp |54.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/check_snapshot_tx_unit.cpp |53.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/scheme/libstorage-actualizer-scheme.a |53.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__s3_upload_txs.cpp |53.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_change_sending.cpp |53.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/change_sender_cdc_stream.cpp |53.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__write.cpp |53.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__migrate_schemeshard.cpp |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_storage/events/liblibs-control_plane_storage-events.a |53.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__op_rows.cpp |54.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_change_receiving.cpp |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/pushdown/libproviders-common-pushdown.a |54.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/locks/libcore-tx-locks.a |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_table_committed.cpp |54.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/drop_cdc_stream_unit.cpp |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/priorities/service/libtx-priorities-service.a |54.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__cleanup_tx.cpp |53.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/locks/range_treap.cpp |53.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/locks/time_counters.cpp |54.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_kqp_data_tx_out_rs_unit.cpp |54.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_s3_upload_rows.cpp |54.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__kqp_scan.cpp |54.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_active_transaction.cpp |54.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_distributed_erase.cpp |54.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_locks_db.cpp |54.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__snapshot_txs.cpp |54.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_direct_upload.cpp |54.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__readset.cpp |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/replication/common/libtx-replication-common.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/proto/libsrc-client-proto.a |54.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/long_tx_service/commit_impl.cpp |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/common/libcommon.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/import/libsrc-client-import.a |54.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_outreadset.cpp |54.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/long_tx_service/acquire_snapshot_impl.cpp |54.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_common_upload.cpp |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/query/impl/libclient-query-impl.a |54.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__store_scan_state.cpp |54.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_dep_tracker.cpp |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/resources/libsrc-client-resources.a |53.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__read_iterator.cpp |53.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_s3_uploads.cpp |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/rate_limiter/libsrc-client-rate_limiter.a |53.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/controller/libtx-replication-controller.a |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/lag_provider.cpp |53.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/long_tx_service/long_tx_service_impl.cpp |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/query/libsrc-client-query.a |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/event_util.cpp |54.1%| PREPARE $(WITH_JDK-sbr:7832760150) |54.1%| PREPARE $(JDK17-472926544) |54.1%| PREPARE $(JDK_DEFAULT-472926544) |54.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_overload.cpp |54.1%| PREPARE $(WITH_JDK17-sbr:7832760150) |54.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_repl_offsets_server.cpp |54.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_change_sender_activation.cpp |54.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_proxy/control_plane_proxy.cpp |54.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_user_table.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/session_info.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/nodes_manager.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/stream_remover.cpp |54.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_repl_apply.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/target_with_stream.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/private_events.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/replication/controller/replication.h_serialized.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/stream_consumer_remover.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/replication.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/target_table.cpp |54.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_s3_downloads.cpp |54.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_schema_snapshots.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/sys_params.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/target_base.cpp |54.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator__schema_upgrade.cpp |54.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard__store_table_path.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/target_transfer.cpp |53.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_loans.cpp |53.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_repl_offsets_client.cpp |53.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/tenant_resolver.cpp |53.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/target_discoverer.cpp |54.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/data_events/shards_splitter.cpp |54.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_split_dst.cpp |54.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_split_src.cpp |54.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_direct_erase.cpp |54.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/mediator/mediator__schema_upgrade.cpp |54.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/query/rpc_attach_session.cpp |54.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/control_plane_storage/internal/liblibs-control_plane_storage-internal.a |54.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/blobstorage-ut_blobstorage-ut_vdisk_restart |54.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_pipeline.cpp |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/ss_tasks/libsrc-client-ss_tasks.a |54.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_trans_queue.cpp |54.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_common/rpc_common_kqp_session.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_table_observer.cpp |54.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/keyvalue/keyvalue_state.cpp |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/test_connection/events/liblibs-test_connection-events.a |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_services/rpc_calls.cpp |54.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage.cpp |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/scheme/libsrc-client-scheme.a |54.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/mediator/execute_queue.cpp |54.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_kqp.cpp |54.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/scheme/counters.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/internal/response_tasks.cpp |54.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_backup.cpp |54.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_kqp_lookup_table.cpp |54.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_storage/validators.cpp |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/benchmarks/gen/tpch-dbgen/libbenchmarks-gen-tpch-dbgen.a |54.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_kqp_compute.cpp |54.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/query/rpc_kqp_tx.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/internal/utils.cpp |54.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/manager/libschemeshard-olap-manager.a |54.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/local_rate_limiter.cpp |54.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/init/init.cpp |54.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_repl_offsets.cpp |54.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/program/libcore-tx-program.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/benchmarks/gen/tpcds-dbgen/libbenchmarks-gen-tpcds-dbgen.a |54.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/locks/locks.cpp |54.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_describe_path.cpp |54.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/alter_cdc_stream_unit.cpp |54.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/alter_table_unit.cpp |54.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_copy_table.cpp |54.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/client/scheme_cache_lib/libcore-client-scheme_cache_lib.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/benchmarks/queries/tpcds/libbenchmarks-queries-tpcds.global.a |54.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/service/libtx-replication-service.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/topic/codecs/libclient-topic-codecs.global.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/csv/table/libarrow-csv-table.a |54.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/resolve_local_db_table.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/program/registry.cpp |54.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/initializer/libservices-metadata-initializer.a |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/program/program.cpp |54.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_user_db.cpp |54.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/bloom/constructor.cpp |54.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_describe_external_data_source.cpp |54.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/common/ut_helpers/libproviders-common-ut_helpers.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/table/libsrc-client-table.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/impl/libclient-persqueue_public-impl.a |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/value_helpers/libimpl-ydb_internal-value_helpers.a |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/keyvalue/keyvalue_state_collect.cpp |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/bloom/meta.cpp |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/dst_alterer.cpp |54.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/program/builder.cpp |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/exceptions/libclient-types-exceptions.a |54.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_bindings.cpp |54.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/mediator/mediator__configure.cpp |54.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/dst_remover.cpp |54.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_alter_coordination_node.cpp |54.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_queries.cpp |54.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/locks/locks_db.cpp |54.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_create_coordination_node.cpp |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/query/rpc_fetch_script_results.cpp |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/table/impl/libclient-table-impl.a |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/credentials/oauth2_token_exchange/libtypes-credentials-oauth2_token_exchange.a |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_quotas.cpp |54.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ydb-core-blobstorage-ut_blobstorage |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/keyvalue/keyvalue.cpp |54.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/service/json_change_record.cpp |54.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/service/table_writer.cpp |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/mediator/mediator__init.cpp |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/health/health.cpp |54.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/metadata/initializer/events.cpp |54.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/replication/service/worker.h_serialized.cpp |54.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/service/topic_reader.cpp |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/mediator/mediator.cpp |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/libsrc-client-types.a |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_storage/in_memory_control_plane_storage.cpp |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/status/libclient-types-status.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/operation/libclient-types-operation.a |53.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/ydb_over_fq/explain_data_query.cpp |53.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/ydb_over_fq/keep_alive.cpp |54.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/behaviour/view/libgateway-behaviour-view.global.a |54.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/keyvalue/keyvalue_collector.cpp |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/quota_manager/proto/liblibs-quota_manager-proto.a |54.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yaml_config/static_validator/liblibrary-yaml_config-static_validator.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/grpc/client/libsdk-library-grpc-client-v3.a |54.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/lib/ydb_cli/commands/libclicommands.a |54.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yaml_config/static_validator/builders.cpp |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/mediator/mediator__schema.cpp |54.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/driver_lib/run/librun.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/credentials/libclient-types-credentials.a |54.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/logs/log.cpp |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/task_runner/libproviders-dq-task_runner.a |54.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/data_events/columnshard_splitter.cpp |54.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/stream_creator.cpp |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_compute_database.cpp |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_maintenance.cpp |54.3%| PREPARE $(CLANG-1922233694) |54.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/topic_operations_scenario.cpp |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_connections.cpp |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/lib/ydb_cli/commands/ydb_admin.cpp |54.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/ydb_benchmark.h_serialized.cpp |54.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/benchmark_utils.cpp |54.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_debug.cpp |54.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_tools.cpp |54.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/query_workload.cpp |54.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_service_scripting.cpp |54.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/topic_read_scenario.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/ydb_over_fq/list_directory.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/keyvalue/keyvalue_intermediate.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/lib/ydb_cli/commands/ydb_node_config.cpp |54.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_service_table.cpp |54.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_benchmark.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/grpc_request_proxy_simple.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_alter_table.cpp |54.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/topic_readwrite_scenario.cpp |54.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/driver_lib/run/config_helpers.cpp |54.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/topic_write_scenario.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_copy_tables.cpp |54.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/driver_lib/run/auto_config_initializer.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_storage/internal/task_result_write.cpp |54.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_ping.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/grpc_request_proxy.cpp |54.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/ydb_latency.h_serialized.cpp |54.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_latency.cpp |54.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/ydb_ping.h_serialized.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/service/worker.cpp |54.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_root_common.cpp |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_service_monitoring.cpp |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_service_auth.cpp |54.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_service_operation.cpp |54.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_service_scheme.cpp |54.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_profile.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/lib/ydb_cli/commands/ydb_cluster.cpp |54.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_yql.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/keyvalue/keyvalue_storage_request.cpp |54.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_service_export.cpp |54.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_service_import.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_import.cpp |54.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_sql.cpp |54.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_proxy/actors/query_utils.cpp |54.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_monitoring.cpp |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_workload_import.cpp |54.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/workload_service/tables/libkqp-workload_service-tables.a |54.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/dst_creator.cpp |54.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/behaviour/table/libgateway-behaviour-table.global.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/topic/impl/libclient-topic-impl.a |54.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_kh_describe.cpp |54.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/libcore-kqp-gateway.a |54.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_service_topic.cpp |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_workload.cpp |54.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/scheme/scheme.cpp |54.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/ydb_over_fq/execute_data_query.cpp |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/worker_manager/interface/libdq-worker_manager-interface.a |54.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_import_data.cpp |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/security/libydb-library-security.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/kqp_session_common/libimpl-ydb_internal-kqp_session_common.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/table/query_stats/libclient-table-query_stats.a |54.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool/libgateway-behaviour-resource_pool.global.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/compute/common/liblibs-compute-common.a |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/sequenceshard/ut_sequenceshard.cpp |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/common/libfq-libs-common.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/common/libengines-scheme-common.a |54.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/libgateway-behaviour-resource_pool_classifier.global.a |54.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/grpc_endpoint_publish_actor.cpp |54.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/opt/logical/libkqp-opt-logical.a |54.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_make_directory.cpp |54.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/max/libstorage-indexes-max.global.a |54.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/host/libcore-kqp-host.a |54.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/opt/libcore-kqp-opt.a |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/lib/ydb_cli/commands/ydb_dynamic_config.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/scheme_cache_lib/yql_db_scheme_resolver.cpp |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/provider/exec/libdq-provider-exec.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/async_io/libproviders-pq-async_io.a |54.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/lib/actors/libservices-lib-actors.a |54.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/ydb_over_fq/describe_table.cpp |54.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/lib/ydb_cli/commands/ydb_storage_config.cpp |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/task_runner_actor/libproviders-dq-task_runner_actor.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/string_utils/helpers/liblibrary-string_utils-helpers.a |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_discovery.cpp |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/logger/libimpl-ydb_internal-logger.a |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/ydb_over_fq/create_session.cpp |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_worker_error.cpp |54.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/local_rpc/libkqp-gateway-local_rpc.a |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_drop_coordination_node.cpp |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/libfq-libs-row_dispatcher.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/provider/libproviders-dq-provider.a |54.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/mediator/tablet_queue.cpp |54.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_describe_table.cpp |54.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/query/rpc_execute_query.cpp |54.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_storage/internal/rate_limiter_resources.cpp |54.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_create_table.cpp |54.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_rename_tables.cpp |54.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_drop_table.cpp |54.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/service/base_table_writer.cpp |54.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_cancel_operation.cpp |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/program/resolver.cpp |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_view.cpp |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_storage/internal/nodes_health_check.cpp |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_fq.cpp |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/operation_helpers.cpp |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_remove_directory.cpp |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_commit_transaction.cpp |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_describe_table_options.cpp |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_cms.cpp |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/common/update.cpp |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_drop_replication.cpp |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_drop_dst_result.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_object_storage.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_execute_scheme_query.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/manager/manager.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_replication.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_storage/internal/task_ping.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/query/rpc_execute_script.cpp |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_services/grpc_helper.cpp |54.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_scheme_base.cpp |54.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_storage/internal/task_get.cpp |54.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_init.cpp |54.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_explain_data_query.cpp |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/db_driver_state/libimpl-ydb_internal-db_driver_state.a |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/mediator/mediator_impl.cpp |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/shared_resources/libfq-libs-shared_resources.a |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_create_stream_result.cpp |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_heartbeat.cpp |54.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/provider/libcore-kqp-provider.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/iam_private/libsrc-client-iam_private.a |54.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/export/session/libcolumnshard-export-session.global.a |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/run/service_initializer.cpp |54.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_dynamic_config.cpp |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/topic/common/libclient-topic-common.a |54.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/query_compiler/libcore-kqp-query_compiler.a |54.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_assign_stream_name.cpp |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/proxy_service/proto/libkqp-proxy_service-proto.a |54.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_store_hotdog.cpp |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_drop_stream_result.cpp |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_init_schema.cpp |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/secret_resolver.cpp |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/libcpp/libgeneric-connector-libcpp.a |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_discovery_targets_result.cpp |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/initializer/behaviour.cpp |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_describe_replication.cpp |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_create_replication.cpp |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/logging.cpp |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_explain_yql_script.cpp |54.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/provider/yql_kikimr_settings.cpp |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_load_rows.cpp |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_kqp_base.cpp |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/workload_service/tables/table_queries.cpp |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/initializer/common.cpp |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_resolve_secret_result.cpp |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_alter_replication.cpp |54.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_services/audit_dml_operations.cpp |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/include/libclient-persqueue_public-include.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/coordinator/protos/libtx-coordinator-protos.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/arrow/libproviders-common-arrow.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/rate_limiter/control_plane_service/liblibs-rate_limiter-control_plane_service.a |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/initializer/snapshot.cpp |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/initializer/fetcher.cpp |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/http_gateway/libproviders-common-http_gateway.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/test_connection/libfq-libs-test_connection.a |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_assign_tx_id.cpp |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/max/meta.cpp |54.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/node_service/libcore-kqp-node_service.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/bg_tasks/protos/libservices-bg_tasks-protos.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/grpc_services/cancelation/libcore-grpc_services-cancelation.a |54.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/local_pgwire/libydb-core-local_pgwire.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/compute/ydb/control_plane/libcompute-ydb-control_plane.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/proto/libpy3core-file_storage-proto.global.a |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_keep_alive.cpp |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/max/constructor.cpp |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_create_dst_result.cpp |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/backup/libydb-services-backup.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/result_formatter/libfq-libs-result_formatter.a |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/initializer/manager.cpp |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/data_events/common/libtx-data_events-common.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/pushdown/libproviders-generic-pushdown.a |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/initializer/object.cpp |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/udf/service/stub/libudf-service-stub.global.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/cm_client/libproviders-pq-cm_client.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/protos/libpy3library-formats-arrow-protos.global.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/mkql/libproviders-dq-mkql.a |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_prepare_data_query.cpp |54.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/pq_async_io/libtests-fq-pq_async_io.a |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_forget_operation.cpp |54.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/local_pgwire/sql_parser.cpp |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yql/providers/common/ut_helpers/dq_fake_ca.cpp |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/switch/libformats-arrow-switch.a |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/local_pgwire/local_pgwire_util.cpp |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/graph/shard/protos/libgraph-shard-protos.a |54.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/synclog/libblobstorage-vdisk-synclog.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/log/liblibrary-workload-log.global.a |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/service/service.cpp |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/proto/libproviders-generic-proto.a |54.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/libydb-core-mind.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/credentials/login/libtypes-credentials-login.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/protos/liblibrary-schlab-protos.a |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/controller.cpp |54.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/log_backend/json_envelope.cpp |54.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/memory_controller/libydb-core-memory_controller.a |54.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/log_backend/libydb-core-log_backend.a |54.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/fq/pq_async_io/mock_pq_gateway.cpp |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/planner/libproviders-dq-planner.a |54.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogmsgimpl.cpp |54.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/behaviour/external_data_source/libgateway-behaviour-external_data_source.global.a |54.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogneighbors.cpp |54.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogformat.cpp |54.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclog_private_events.cpp |54.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogdsk.cpp |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/coordination/libsrc-client-coordination.a |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_execute_data_query.cpp |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/lib/actors/pq_schema_actor.cpp |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/initializer/initializer.cpp |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/view/behaviour.cpp |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/tx_alter_dst_result.cpp |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_read_columns.cpp |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/run/factories.cpp |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_read_table.cpp |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_list_operations.cpp |54.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/slot_indexes_pool.cpp |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/behaviour.cpp |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_rollback_transaction.cpp |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/kqp_query_plan.cpp |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/initializer/accessor_init.cpp |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_log_store.cpp |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_login.cpp |54.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yaml_config/static_validator/builders.cpp |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log.cpp |54.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/memory_controller/memtable_collection.cpp |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_stream_execute_yql_script.cpp |54.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/log_backend/log_backend_build.cpp |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/table_settings.cpp |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool/behaviour.cpp |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_get_scale_recommendation.cpp |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_stream_execute_scan_query.cpp |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_keyvalue.cpp |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_ranges_predext.cpp |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/run/config.cpp |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/kqp_gateway.cpp |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_ping.cpp |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_read_rows.cpp |54.7%| PREPARE $(CLANG16-1380963495) |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_modify_permissions.cpp |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_helpers.cpp |54.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogmsgwriter.cpp |54.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogmem.cpp |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_rate_limiter_api.cpp |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_sqlin_compact.cpp |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/kqp_opt_effects.cpp |54.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/federated_query/libcore-kqp-federated_query.a |54.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogmsgreader.cpp |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_sort.cpp |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/result/libsrc-client-result.a |54.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mon/libydb-core-mon.a |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/kqp_ic_gateway.cpp |54.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/operations/libbehaviour-tablestore-operations.a |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_describe_coordination_node.cpp |54.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/operations/libbehaviour-tablestore-operations.global.a |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/kqp_opt_build_phy_query.cpp |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/runtime/libproviders-dq-runtime.a |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/run/config_parser.cpp |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/host/kqp_gateway_proxy.cpp |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/tpc_base/liblibrary-workload-tpc_base.a |54.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mon/crossref.cpp |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/export/session/control.cpp |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/kqp_opt.cpp |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/kqp_column_statistics_requester.cpp |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/import/liblib-ydb_cli-import.a |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_effects.cpp |54.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/table_creator/libydb-library-table_creator.a |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/local_rpc/helper.cpp |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_config.cpp |54.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool/libgateway-behaviour-resource_pool.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/dump/liblib-ydb_cli-dump.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/grpc_caching/libydb-core-grpc_caching.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/unicode_base/lib/libcommon-unicode_base-lib.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/interactive/highlight/libcommands-interactive-highlight.a |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/host/kqp_host.cpp |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/benchmark_base/liblibrary-workload-benchmark_base.a |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/export/session/task.cpp |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/math/libmath_udf.global.a |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/run/main.cpp |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/lease_holder.cpp |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/libydb-core-kqp.global.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/clickhouse/expr_nodes/libproviders-clickhouse-expr_nodes.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/tpch/liblibrary-workload-tpch.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/common/libformats-arrow-common.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/mkql_proto/protos/liblibrary-mkql_proto-protos.a |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_export.cpp |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/rewrite_io_utils.cpp |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/tpc_base/liblibrary-workload-tpc_base.global.a |54.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/runtime/libcore-kqp-runtime.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/tpcds/liblibrary-workload-tpcds.a |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/kqp_opt_phy_check.cpp |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/table/behaviour.cpp |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_table_misc.cpp |54.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/rm_service/libcore-kqp-rm_service.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/unicode_base/libunicode_udf.global.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/backup/libkikimr_backup.a |54.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/address_classification/libcore-mind-address_classification.a |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_sqlin.cpp |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/yql_kikimr_opt_build.cpp |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/protos/liblibs-row_dispatcher-protos.a |54.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/opt/physical/effects/libopt-physical-effects.a |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/host/kqp_type_ann.cpp |54.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/runtime/kqp_arrow_memory_pool.cpp |54.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/runtime/kqp_stream_lookup_factory.cpp |54.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/runtime/kqp_write_actor_settings.cpp |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/actors/libproviders-generic-actors.a |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_extract.cpp |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/run/kikimr_services_initializers.cpp |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/pg/libessentials-sql-pg.a |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/kqp_query_blocks_transformer.cpp |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/runtime/kqp_compute.cpp |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/runtime/kqp_effects.cpp |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/runtime/kqp_program_builder.cpp |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/runtime/kqp_scan_data_meta.cpp |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/kqp_constant_folding_transformer.cpp |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/kqp_metadata_loader.cpp |54.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/address_classification/counters.cpp |54.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/mind/address_classification/net_classifier.h_serialized.cpp |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/kqp_opt_phase.cpp |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_get_shard_locations.cpp |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/kqp_opt_phy_finalize.cpp |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/kqp_opt_kql.cpp |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/kqp_opt_build_txs.cpp |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/opt/kqp_query_plan.h_serialized.cpp |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/kqp_statistics_transformer.cpp |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/logical/kqp_opt_cbo.cpp |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_get_operation.cpp |54.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/runtime/kqp_read_table.cpp |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/host/kqp_explain_prepared.cpp |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_indexes.cpp |54.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/opt/physical/libkqp-opt-physical.a |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_join.cpp |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/host/kqp_statement_rewrite.cpp |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/host/kqp_runner.cpp |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/yql_kikimr_opt.cpp |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/host/kqp_translate.cpp |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/log_backend/log_backend.cpp |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/dynamic_nameserver.cpp |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/sequenceshard/ut_helpers.cpp |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/host/kqp_transform.cpp |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/run/run.cpp |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/runtime/kqp_sequencer_factory.cpp |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_services/audit_logins.cpp |54.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/runtime/ut/ydb-core-kqp-runtime-ut |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/dynamic_nameserver_mon.cpp |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_execute_yql_script.cpp |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclog.cpp |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/local_pgwire/local_pgwire.cpp |54.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/libgateway-behaviour-tablestore.a |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/export/session/session.cpp |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/opt/physical/predicate_collector.cpp |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/yql_kikimr_expr_nodes.cpp |55.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/opt/peephole/libkqp-opt-peephole.a |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_ranges.cpp |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/yql_kikimr_provider.cpp |55.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/ydb_proxy/libtx-replication-ydb_proxy.a |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/node_broker__register_node.cpp |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/topics/libcore-kqp-topics.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/interactive/highlight/color/libinteractive-highlight-color.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/topic/libsrc-client-topic.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/benchmarks/queries/tpch/libbenchmarks-queries-tpch.global.a |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_describe_external_table.cpp |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/worker_manager/libproviders-dq-worker_manager.a |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/tenant_node_enumeration.cpp |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/clickbench/liblibrary-workload-clickbench.global.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/libydb-library-schlab.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/metering/libydb-core-metering.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/monitoring/libsrc-client-monitoring.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/mixed/liblibrary-workload-mixed.a |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/transfer_workload/libtransfer_workload.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/config/libcore-persqueue-config.a |55.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/libgateway-behaviour-tablestore.global.a |55.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/ydb_proxy/topic_message.cpp |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/string/libstring_udf.global.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/interface/libproviders-dq-interface.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/counters/libproviders-dq-counters.a |55.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_replication/ydb-core-tx-datashard-ut_replication |55.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/finalize_script_service/libcore-kqp-finalize_script_service.a |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/provider/yql_kikimr_provider.h_serialized.cpp |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/external_data_source/behaviour.cpp |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/interactive/complete/libcommands-interactive-complete.a |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/query_compiler/kqp_mkql_compiler.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_syncloghttp.cpp |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/ext_index/metadata/extractor/libext_index-metadata-extractor.global.a |55.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/grpc_services/tablet/libcore-grpc_services-tablet.a |55.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/public_http/ut/ydb-core-public_http-ut |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/debug/libsrc-client-debug.a |54.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/workload_service/actors/libkqp-workload_service-actors.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/cms/libsrc-client-cms.a |54.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/events/libcore-persqueue-events.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/protos/liblibrary-formats-arrow-protos.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/api/service/libconnector-api-service.a |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/yql_kikimr_gateway.cpp |54.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/run_script_actor/libcore-kqp-run_script_actor.a |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/local_pgwire/local_pgwire_auth_actor.cpp |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/tpcds/liblibrary-workload-tpcds.global.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/workload_service/common/libkqp-workload_service-common.a |54.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/libgateway-behaviour-resource_pool_classifier.a |54.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/load_test/libydb-core-load_test.a |54.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/proxy_service/libcore-kqp-proxy_service.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/task_meta/libproviders-pq-task_meta.a |55.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/probes.cpp |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/topic_workload/libtopic_workload.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/mixed/liblibrary-workload-mixed.global.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/common/librow_dispatcher-format_handler-common.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/interactive/libydb_cli-commands-interactive.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/math/lib/libcommon-math-lib.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/clickbench/liblibrary-workload-clickbench.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/api/protos/libdq-api-protos.a |54.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/common/liboperations-alter-common.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ut_helpers/libpublic-lib-ut_helpers.a |54.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mon_alloc/libydb-core-mon_alloc.a |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/yql_kikimr_datasource.cpp |54.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/behaviour/external_data_source/libgateway-behaviour-external_data_source.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/pgproxy/libydb-core-pgproxy.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/log/liblibrary-workload-log.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/expr_nodes/libproviders-generic-expr_nodes.a |54.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/session_actor/libcore-kqp-session_actor.a |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/yql_kikimr_datasink.cpp |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/labels_maintainer.cpp |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/compressors/libproviders-s3-compressors.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/common/libproviders-dq-common.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/stat_visualization/libpublic-lib-stat_visualization.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/tpch/liblibrary-workload-tpch.global.a |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/topic/libtopic.a |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/node_broker__graceful_shutdown.cpp |55.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/query_data/libcore-kqp-query_data.a |55.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/export/session/selector/abstract/libsession-selector-abstract.a |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/tenant_pool.cpp |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/opt/libproviders-dq-opt.a |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogdata.cpp |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/provider/libproviders-generic-provider.a |55.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mon_alloc/tcmalloc.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/load_test/pdisk_write.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogrecovery.cpp |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/pg_wrapper/libessentials-parser-pg_wrapper.a |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogreader.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/yql_kikimr_results.cpp |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/query_compiler/kqp_olap_compiler.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/workload_service/actors/pool_handlers_actors.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/query_compiler/kqp_query_compiler.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/yql_kikimr_type_ann_pg.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/fq/pq_async_io/ut_helpers.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_kh_snapshots.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/local_pgwire/local_pgwire_connection.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/memory_controller/memory_controller.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/node_broker__init_scheme.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/node_broker__load_state.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/node_broker.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/node_broker__update_config.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/node_broker__update_config_subscription.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/yql_kikimr_type_ann.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/query_data/kqp_query_data.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/runtime/kqp_read_iterator_common.cpp |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/provider/libproviders-pq-provider.a |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogkeeper_state.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/load_test/pdisk_log.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/local_pgwire/pgwire_kqp_proxy.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/runtime/kqp_write_table.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/tenant_slot_broker.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/read_attributes_utils.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/load_test/config_examples.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/load_test/aggregated_result.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/load_test/ycsb/info_collector.cpp |55.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_incremental_restore_scan/ydb-core-tx-datashard-ut_incremental_restore_scan |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/tenant_slot_broker__alter_tenant.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/load_test/archive.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/runtime/kqp_scan_data.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/load_test/kqp.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/node_broker__extend_lease.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/load_test/keyvalue_write.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/table_creator/table_creator.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/local.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/node_service/kqp_node_service.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/load_test/percentile.h_serialized.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/tenant_slot_broker__update_node_location.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/tenant_slot_broker__assign_free_slots.cpp |54.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/load_test/yql_single_query.cpp |54.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mon_alloc/stats.cpp |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/tenant_slot_broker__update_pool_status.cpp |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mon_alloc/profiler.cpp |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogkeeper_committer.cpp |54.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_split_merge/ydb-core-tx-schemeshard-ut_split_merge |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/tenant_slot_broker__update_config.cpp |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/codecs/libcore-persqueue-codecs.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/common/libproviders-s3-common.a |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mon_alloc/memory_info.cpp |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/executer_actor/shards_resolver/libkqp-executer_actor-shards_resolver.a |54.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/tx_chain/libolap-bg_tasks-tx_chain.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/protos/libcolumnshard-export-protos.a |54.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain/ydb-core-tx-schemeshard-ut_extsubdomain |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/expr_nodes/libproviders-pq-expr_nodes.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/base32/libcpp-string_utils-base32.a |55.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/hive/libcore-mind-hive.a |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogkeeper.cpp |54.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/libydb-core-persqueue.a |54.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_transfer/ydb-core-tx-schemeshard-ut_transfer |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/expr_nodes/libcore-kqp-expr_nodes.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/common/libproviders-pq-common.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp/absl/debugging/libabseil-cpp-absl-debugging.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/partition_key_range/libcore-persqueue-partition_key_range.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/issue/protos/libpy3core-issue-protos.global.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/benchmarks/gen/tpcds-dbgen/libbenchmarks-gen-tpcds-dbgen.global.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/gateway/native/libpq-gateway-native.a |54.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/yql_kikimr_exec.cpp |54.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/olap/bg_tasks/tx_chain/common.cpp |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mon/mon.cpp |54.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/bscontroller/libcore-mind-bscontroller.a |54.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_pq_reboots/ydb-core-tx-schemeshard-ut_pq_reboots |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/olap/bg_tasks/tx_chain/status_channel.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/olap/bg_tasks/tx_chain/task.cpp |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/upsert_index.cpp |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/ydb_proxy/ydb_proxy.cpp |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool/manager.cpp |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/node_broker__update_epoch.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/sequencer.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/tenant_slot_broker__update_slot_status.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/tenant_slot_broker__init_scheme.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/tenant_slot_broker__check_slot_status.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/abstract.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/rm_service/kqp_resource_estimation.cpp |55.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_reboots/ydb-core-tx-schemeshard-ut_reboots |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/configured_tablet_bootstrapper.cpp |55.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/dynamic_config/ut/ydb-services-dynamic_config-ut |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/add_column.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/runtime/kqp_compute_scheduler.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/alter_sharding.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/federated_query/kqp_federated_query_helpers.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/tablet/rpc_restart_tablet.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/ycsb/bulk_mkql_upsert.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/drop_index.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/workload_service/actors/cpu_load_actors.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/tablet/rpc_change_schema.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_node_registration.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/node_broker__migrate_state.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/alter_column.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/query_data/kqp_predictor.cpp |55.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_kqp_errors/ydb-core-tx-datashard-ut_kqp_errors |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/memory.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/federated_query/kqp_federated_query_actors.cpp |55.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/cms/ut/ydb-core-cms-ut |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/tablet/rpc_execute_mkql.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_delete_index.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/rm_service/kqp_resource_info_exchanger.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_effects.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/group_write.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_update_index.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_precompute.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/drop_column.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/bscontroller/bsc_audit.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/export/session/selector/abstract/selector.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/workload_service/actors/scheme_actors.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_returning.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/tenant_slot_broker__load_state.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/address_classification/net_classifier.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/upsert_opt.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_helpers.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/runtime/kqp_read_actor.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/proxy_service/kqp_proxy_peer_stats_calculator.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mon_alloc/monitor.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_olap_agg.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/bg_tasks/tx_chain/session.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/rm_service/kqp_snapshot_manager.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/bg_tasks/tx_chain/actor.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_insert_index.cpp |55.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_export/ydb-core-tx-schemeshard-ut_export |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_stage_float_up.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_olap_filter.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/ycsb/common.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/rm_service/kqp_rm_service.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/finalize_script_service/kqp_finalize_script_actor.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/user_settings_reader.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_upsert_index.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/ycsb/test_load_read_iterator.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_limit.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/manager.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_insert.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/external_data_source/manager.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_uniq_helper.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/runtime/kqp_stream_lookup_actor.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/runtime/kqp_write_actor.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/runtime/kqp_stream_lookup_worker.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/runtime/kqp_output_stream.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_indexes.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/checker.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/peephole/kqp_opt_peephole_write_constraint.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_update.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_build_stage.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/vdisk_write.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/runtime/kqp_tasks_runner.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_upsert_defaults.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_sort.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/pdisk_read.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/graph/shard/tx_store_metrics.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/object.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/initializer.cpp |55.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tablet_flat/ut/ydb-core-tablet_flat-ut |55.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/query/ydb-core-kqp-ut-query |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/runtime/kqp_transport.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/events/events.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/proxy_service/kqp_session_info.cpp |55.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/persqueue_v1/ut/describes_ut/ydb-services-persqueue_v1-ut-describes_ut |55.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/header.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/heartbeat.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/behaviour.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/runtime/kqp_sequencer_actor.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/finalize_script_service/kqp_finalize_script_service.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/peephole/kqp_opt_peephole_wide_read.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_source.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/opt/peephole/kqp_opt_peephole.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/snapshot.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/fetcher.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/session_actor/kqp_worker_common.cpp |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/common_app.cpp |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/blob.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/session_actor/kqp_response.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/session_actor/kqp_query_stats.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/metering_sink.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/microseconds_sliding_window.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/key.cpp |55.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/config/ut/ydb-services-config-ut |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/offload_actor.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/run_script_actor/kqp_run_script_actor.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__delete_tablet.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/ycsb/kqp_select.cpp |55.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/sequenceshard/ut/ydb-core-tx-sequenceshard-ut |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/quota_tracker.cpp |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/percentile_counter.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/session_actor/kqp_worker_actor.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__update_dc_followers.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/session_actor/kqp_session_actor.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/ycsb/kqp_upsert.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/proxy_service/kqp_proxy_databases_cache.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/drain.cpp |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/pq_database.cpp |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/pq_rl_helpers.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/session_actor/kqp_temp_tables_manager.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/write_meta.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/boot_queue.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/ycsb/test_load_actor.cpp |55.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/skeleton/ut/ydb-core-blobstorage-vdisk-skeleton-ut |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/fill.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__update_domain.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/common/object.cpp |55.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/sourceid_info.h_serialized.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__request_tablet_owners.cpp |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/write_id.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/manager.cpp |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/utils.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/balancer.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/session_actor/kqp_query_state.cpp |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/type_codecs_defs.cpp |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/static/libname-service-static.global.a |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__update_tablet_metrics.cpp |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/text/libv1-complete-text.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/union/libname-service-union.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/libsql-v1-complete.a |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/common/update.cpp |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/ranking/libname-service-ranking.global.a |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/hive.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/query_data/kqp_prepared_query.cpp |55.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/executer_actor/libcore-kqp-executer_actor.a |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/domain_info.cpp |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/syntax/libv1-complete-syntax.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/antlr4_pure/libv1-lexer-antlr4_pure.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/antlr4/libv1-complete-antlr4.a |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/proxy_service/kqp_script_executions.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/node_info.cpp |55.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/compile_service/libcore-kqp-compile_service.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/libcomplete-name-service.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/reflect/libsql-v1-reflect.a |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/leader_tablet_info.cpp |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/reflect/libsql-v1-reflect.global.a |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/service_actor.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/read_metadata.cpp |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/ranking/libname-service-ranking.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/static/libname-service-static.a |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tablet_info.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__configure_subdomain.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/storage_group_info.cpp |55.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/actors/libkqp-gateway-actors.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/antlr_ast/gen/v1_ansi_antlr4/libantlr_ast-gen-v1_ansi_antlr4.a |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/executer_actor/kqp_locks_helper.cpp |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/antlr_ast/gen/v1_antlr4/libantlr_ast-gen-v1_antlr4.a |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/cmds_storage_pool.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__delete_node.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/monitoring.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/hive_statics.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/hive_domains.cpp |55.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/mediator/ut/ydb-core-tx-mediator-ut |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/update_group_latencies.cpp |55.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/utils/libkqp-gateway-utils.a |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__adopt_tablet.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/hive_impl.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/cmds_drive_status.cpp |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/shared_handle.cpp |55.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/behaviour/view/libgateway-behaviour-view.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/session/storage/abstract/libsession-storage-abstract.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme/libydb-core-scheme.a |55.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/quoter/libydb-core-quoter.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/raw_socket/libydb-core-raw_socket.a |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/dread_cache_service/caching_service.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/storage_balancer.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/storage_stats_calculator.cpp |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/common/libcolumnshard-export-common.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/security/certificate_check/libcore-security-certificate_check.a |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__init_scheme.cpp |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/credentials/libproviders-s3-credentials.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/expr_nodes/libproviders-s3-expr_nodes.a |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__generate_data_ut.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__cut_tablet_history.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/quoter/debug_info.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__update_tablet_groups.cpp |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme_types/libydb-core-scheme_types.a |55.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/export/session/storage/tier/libsession-storage-tier.global.a |55.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/persqueue_cluster_discovery/libydb-services-persqueue_cluster_discovery.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/actors_factory/libproviders-s3-actors_factory.a |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/quoter/probes.cpp |55.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/statistics/aggregator/libcore-statistics-aggregator.a |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/shred.cpp |55.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_move_reboots/ydb-core-tx-schemeshard-ut_move_reboots |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/antlr4_pure_ansi/libv1-lexer-antlr4_pure_ansi.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/resources/libsrc-client-resources.global.a |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/follower_tablet_info.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/storage_pool_info.cpp |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/argonish/internal/proxies/sse2/libinternal-proxies-sse2.a |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tablet_move_info.cpp |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/clang18-rt/lib/asan/libclang_rt.asan-x86_64.a |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/hive_log.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/group_layout_checker.cpp |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/statistics/aggregator/schema.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/cmds_host_config.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/load_everything.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/get_group.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/drop_donor.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/cmds_box.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/commit_config.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__tablet_owners_reply.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/read_balancer.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/proxy_service/kqp_proxy_service.cpp |55.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/ydb/ut/ydb-services-ydb-ut |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__create_tablet.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/register_node.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_cluster_discovery/counters.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_cluster_discovery/cluster_discovery_worker.cpp |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/events/libproviders-s3-events.a |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/layout_helpers.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/migrate.cpp |55.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/writer/libcore-persqueue-writer.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/issue/protos/libcore-issue-protos.a |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/config_cmd.cpp |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/out/libcore-protos-out.a |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/config_fit_pdisks.cpp |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/public_http/protos/libcore-public_http-protos.a |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/group_metrics_exchange.cpp |55.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/generic/libvdisk-hulldb-generic.a |55.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/nodes/libcore-sys_view-nodes.a |55.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/counters/libcore-kqp-counters.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/helper/libproviders-dq-helper.a |55.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/libydb-core-sys_view.a |55.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/index/libstorage-actualizer-index.a |55.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/pg_tables/libcore-sys_view-pg_tables.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/path_generator/libproviders-s3-path_generator.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/config/libproviders-dq-config.a |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__configure_scale_recommender.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/config.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__block_storage_result.cpp |55.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/processor/libcore-sys_view-processor.a |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/node_report.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__process_pending_operations.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/blobstorage_hulloptlsn.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/update_last_seen_ready.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__release_tablets.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__delete_tablet_result.cpp |55.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/query_stats/libcore-sys_view-query_stats.a |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/quoter/quoter_service.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/bsc.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/disk_metrics.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/config_fit_groups.cpp |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/pgproxy/protos/libcore-pgproxy-protos.a |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/account_read_quoter.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/stat_processor.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__sync_tablets.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/virtual_group.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/monitoring.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__resume_tablet.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/update_seen_operational.cpp |55.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/security/libydb-core-security.a |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__lock_tablet.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/request_controller_info.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__status.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/sys_view/processor/schema.cpp |55.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/partition_stats/libcore-sys_view-partition_stats.a |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__reassign_groups.cpp |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/common/simple/libkqp-common-simple.a |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/scrub.cpp |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/serializations/libproviders-s3-serializations.a |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/cluster_tracker.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/export/session/storage/tier/storage.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__request_tablet_seq.cpp |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/quoter/public/libcore-quoter-public.a |55.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/service/libcore-sys_view-service.a |55.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/resource_pools/libcore-sys_view-resource_pools.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/common/libproviders-solomon-common.a |55.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/resource_pool_classifiers/libcore-sys_view-resource_pool_classifiers.a |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/select_groups.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/security/login_shared_func.cpp |55.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/statistics/database/libcore-statistics-database.a |55.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/recovery/libvdisk-hulldb-recovery.a |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/group_mapper.cpp |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/common/shutdown/libkqp-common-shutdown.a |55.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/statistics/service/libcore-statistics-service.a |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__kill_node.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__reassign_groups_on_decommit.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__load_everything.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__seize_tablets.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/sys_view/service/db_counters.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/sys_view/service/query_interval.cpp |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/range_helpers/libproviders-s3-range_helpers.a |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__seize_tablets_reply.cpp |55.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/normalizer/portion/libcolumnshard-normalizer-portion.global.a |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__stop_tablet.cpp |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/folder_service/proto/libpy3library-folder_service-proto.global.a |55.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/normalizer/portion/libcolumnshard-normalizer-portion.a |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/sys_view.cpp |55.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/hooks/abstract/libcolumnshard-hooks-abstract.a |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__switch_drain.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/grouper.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__release_tablets_reply.cpp |55.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/libcore-tx-columnshard.a |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__process_boot_queue.cpp |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/object_listers/libproviders-s3-object_listers.a |55.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/loading/libtx-columnshard-loading.a |55.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/compute_actor/libcore-kqp-compute_actor.a |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__disconnect_node.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/statistics/service/http_request.cpp |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/statistics/libproviders-s3-statistics.a |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/propose_group_key.cpp |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/resource_pools/libydb-core-resource_pools.a |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/quoter/kesus_quoter_proxy.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__start_tablet.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__register_node.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_cluster_discovery/grpc_service.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/init_scheme.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__restart_tablet.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/partition_blob_encoder.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/self_heal.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__update_tablets_object.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__response_tablet_seq.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/mirrorer.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__unlock_tablet.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/console_interaction.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ownerinfo.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_cluster_discovery/cluster_discovery_service.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/compute_actor/kqp_compute_events.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/rpc_fq_internal.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__update_tablet_status.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/partition.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/list_all_topics_actor.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/blob.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/partition_scale_manager.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/transaction.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/event_helpers.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/partition_scale_request.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/pq_l2_cache.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/defs.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/processor/tx_interval_metrics.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/service/ext_counters.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/processor/tx_interval_summary.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/processor/processor_impl.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/partition_sourcemanager.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/counters.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/processor/tx_init.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/fetch_request_actor.cpp |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/provider/libproviders-s3-provider.a |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/write_quoter.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/processor/tx_init_schema.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/tablet_flat_executor.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/counters/kqp_counters.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/processor/tx_configure.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/partition_monitoring.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/utils/scheme_helpers.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/pq_impl.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/executer_actor/kqp_planner_strategy.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/compute_actor/kqp_scan_events.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/compute_actor/kqp_scan_compute_stat.cpp |55.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/show_create/libcore-sys_view-show_create.a |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/pq_impl_app.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/partition_read.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/processor/processor.cpp |55.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/normalizer/granule/libcolumnshard-normalizer-granule.global.a |55.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/testlib/libydb-core-testlib.a |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/partition_init.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/compute_actor/kqp_compute_state.h_serialized.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/compute_actor/kqp_scan_common.cpp |55.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/blobs_action/abstract/libcolumnshard-blobs_action-abstract.a |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/processor/tx_aggregate.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/executer_actor/kqp_tasks_graph.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/aggregator.cpp |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/expr_nodes/libproviders-solomon-expr_nodes.a |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/sys_view/show_create/formatters_common.cpp |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/proto/libproviders-solomon-proto.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/deprecated/persqueue_v0/api/protos/libapi-protos-persqueue-deprecated.a |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/sys_view/show_create/create_view_formatter.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/processor/tx_top_partitions.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/read_balancer_app.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/read_balancer__balancing_app.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/read_balancer__balancing.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/read_quoter.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/testlib/storage_helpers.cpp |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/actors/libproviders-solomon-actors.a |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_analyze.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/database/database.cpp |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/sourceid.cpp |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hullds_idx.cpp |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/user_info.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/testlib/actor_helpers.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/common.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/read.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/processor/tx_collect.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/blob_set.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/remove.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/shared_sausagecache.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/write.cpp |55.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/export/session/selector/backup/libsession-selector-backup.global.a |55.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/testlib/basics/libcore-testlib-basics.a |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/processor/db_counters.cpp |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/balance_coverage/libcore-tx-balance_coverage.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/protos/libcore-tablet_flat-protos.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/ydb_issue/proto/liblibrary-ydb_issue-proto.a |55.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/normalizer/schema_version/libcolumnshard-normalizer-schema_version.global.a |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/events/libproviders-solomon-events.a |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/pq.cpp |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/executer_actor/kqp_tasks_validate.cpp |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/executer_actor/kqp_data_executer.cpp |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/gateway/libproviders-solomon-gateway.a |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/executer_actor/kqp_table_resolver.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/executer_actor/kqp_scan_executer.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_analyze_table_delivery_problem.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/executer_actor/kqp_executer_stats.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/executer_actor/kqp_planner.cpp |55.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/common/libcore-sys_view-common.a |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/executer_actor/kqp_partition_helper.cpp |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/bg_tasks/events/libcolumnshard-bg_tasks-events.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/provider/libproviders-solomon-provider.a |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_resolve.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blob_cache.cpp |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/ydb_issue/proto/libpy3library-ydb_issue-proto.global.a |55.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/test_tablet/libydb-core-test_tablet.a |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/compile_service/kqp_compile_actor.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/partition_write.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_init_schema.cpp |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/read_balancer__balancing.h_serialized.cpp |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_analyze_table_response.cpp |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/solomon_accessor/client/libsolomon-solomon_accessor-client.a |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/security/ldap_auth_provider/libcore-security-ldap_auth_provider.a |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/s3transfer/py3/libpy3python-s3transfer-py3.global.a |55.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/tablets/libcore-sys_view-tablets.a |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/solomon_accessor/grpc/libsolomon-solomon_accessor-grpc.a |55.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/blobs_action/bs/libcolumnshard-blobs_action-bs.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/bg_tasks/manager/libcolumnshard-bg_tasks-manager.a |54.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/sessions/libcore-sys_view-sessions.a |54.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/service/sysview_service.cpp |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/bg_tasks/transactions/libcolumnshard-bg_tasks-transactions.a |54.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/udfs/common/clickhouse/client/libclickhouse_client_udf.global.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/mkql_helpers/libyt-lib-mkql_helpers.a |54.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/blobs_action/local/libcolumnshard-blobs_action-local.a |54.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hullds_sstslice.cpp |54.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/blobs_action/storages_manager/libcolumnshard-blobs_action-storages_manager.a |54.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/blobs_action/events/libcolumnshard-blobs_action-events.a |54.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/testlib/actors/libcore-testlib-actors.a |54.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/storage/libcore-sys_view-storage.a |54.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_aggr_stat_response.cpp |54.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/aggregator_impl.cpp |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/public/ydb_issue/libyql-public-ydb_issue.a |54.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/subscriber.cpp |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/utils/actor_log/libyql-utils-actor_log.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/common/libcolumnshard-blobs_action-common.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/bg_tasks/abstract/libcolumnshard-bg_tasks-abstract.a |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/base/common/demangle.cpp |54.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/ColumnConst.cpp |54.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/PODArray.cpp |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/special_cleaner.cpp |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/writer/source_id_encoding.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeEnum.cpp |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hulldb_bulksstmngr.cpp |54.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/ParquetBlockOutputFormat.cpp |54.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/executer_actor/kqp_scheme_executer.cpp |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/compile_service/kqp_compile_service.cpp |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/executer_actor/kqp_partitioned_executer.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/ORCBlockInputFormat.cpp |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_navigate.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/columnshard.h_serialized.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_analyze_deadline.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/testlib/actors/wait_events.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserDatabaseOrNone.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/parseIdentifierOrStringLiteral.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/formatSettingName.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hullds_sst.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hullds_sstvec.cpp |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Interpreters/ProfileEventsExt.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Interpreters/InternalTextLogsQueue.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/actors/analyze_actor.cpp |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/CompressionMethod.cpp |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Interpreters/ClientInfo.cpp |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/ThreadPoolReader.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_analyze_table_request.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/DoubleConverter.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationDate.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/AsynchronousReadBufferFromFile.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationDate32.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeNothing.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeNested.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Core/BaseSettings.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_ack_timeout.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataStreams/materializeBlock.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/createHardLink.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Compression/LZ4_decompress_faster.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/checkStackSize.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/TimerDescriptor.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/ZooKeeper/IKeeper.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_configure.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/Throttler.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/behaviour/view/manager.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/ThreadStatus.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/IPv6ToBinary.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/FieldVisitorWriteBinary.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/executer_actor/kqp_executer_impl.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/compile_service/kqp_compile_computation_pattern_service.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hullds_idxsnap.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/utils/metadata_helpers.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/Exception.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/writer/metadata_initializers.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/base/common/getThreadId.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/bs/address.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_schemeshard_stats.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/base/common/getResource.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/base/common/sleep.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/ColumnTuple.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/ColumnNullable.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/base/common/errnoToString.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/base/common/getPageSize.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/base/common/preciseExp10.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/base/common/DateLUTImpl.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/base/common/DateLUT.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/base/common/getFQDNOrHostName.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/base/common/StringRef.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/base/common/JSON.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/ColumnString.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/ColumnAggregateFunction.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/ColumnLowCardinality.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/base/common/mremap.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/base/common/shift10.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/bs/read.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/AggregateFunctions/IAggregateFunction.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/AggregateFunctions/AggregateFunctionCombinatorFactory.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/AggregateFunctions/AggregateFunctionFactory.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_response_tablet_distribution.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/ColumnFixedString.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/actors/scheme.cpp |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/scan.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/ColumnFunction.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_schedule_traversal.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/ClickHouseRevision.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/Epoll.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/ColumnMap.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/ColumnCompressed.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/IColumn.cpp |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard_schema.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/ColumnsCommon.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/FilterDescription.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/CurrentMemoryTracker.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/Config/AbstractConfigurationComparison.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/MaskOperations.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/ColumnArray.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/CurrentMetrics.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/ColumnDecimal.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/CurrentThread.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/FieldVisitorToString.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/AlignedBuffer.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/DNSResolver.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/ErrorCodes.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/FieldVisitorDump.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/TaskStatsInfoGetter.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/Allocator.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/ThreadProfileEvents.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/OpenSSLHelpers.cpp |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/executer_actor/kqp_literal_executer.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/IntervalKind.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/MemoryTracker.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/PipeFDs.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/RemoteHostFilter.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/ProcfsMetricsProvider.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/ProfileEvents.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/ColumnVector.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Compression/CompressionCodecNone.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/ThreadPool.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Compression/ICompressionCodec.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/parseAddress.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Compression/CompressionCodecMultiple.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Compression/CompressionCodecLZ4.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Compression/CompressionFactory.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/getNumberOfPhysicalCPUCores.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/quoteString.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/formatIPv6.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/getMultipleKeysFromConfig.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/escapeForFileName.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/isLocalAddress.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/hex.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/formatReadable.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/hasLinuxCapability.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Compression/CompressedReadBuffer.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/randomSeed.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/setThreadName.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Compression/CompressedReadBufferBase.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/thread_local_rng.cpp |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/writer/partition_chooser_impl.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Compression/CompressedWriteBuffer.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Compression/CompressedReadBufferFromFile.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataStreams/SizeLimits.cpp |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/query_stats/query_metrics.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Core/SettingsFields.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Core/BlockInfo.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Core/Field.cpp |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/partition_stats/top_partitions.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Core/ColumnWithTypeAndName.cpp |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/executer_actor/kqp_executer.h_serialized.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Core/Block.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataStreams/NativeBlockOutputStream.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Core/SettingsEnums.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Core/NamesAndTypes.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataStreams/BlockStreamProfileInfo.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeMap.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeArray.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataStreams/IBlockInputStream.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataStreams/ColumnGathererStream.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataStreams/NativeBlockInputStream.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataStreams/ExecutionSpeedLimits.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeCustomSimpleAggregateFunction.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeAggregateFunction.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeDate32.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeCustomIPv4AndIPv6.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeDateTime.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeDate.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeCustomGeo.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeDecimalBase.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeInterval.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeDateTime64.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeFunction.cpp |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/security/ticket_parser.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeLowCardinalityHelpers.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeFixedString.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationCustomSimpleText.cpp |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/restore_v1_chunks.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeFactory.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeNumberBase.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeTuple.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/EnumValues.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeNullable.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeString.cpp |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_init.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypesNumber.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Core/Settings.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeLowCardinality.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeUUID.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/NestedUtils.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypesDecimal.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationAggregateFunction.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationArray.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/IDataType.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Functions/extractTimeZoneFromFunctionArguments.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationMap.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/ISerialization.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationDateTime64.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationWrapper.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationLowCardinality.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/service/service.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationEnum.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationFixedString.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationUUID.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationString.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationDecimal.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationIP.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationDateTime.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard_private_events.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationNothing.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/pg_tables/pg_tables.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationDecimalBase.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationTupleElement.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Formats/verbosePrintString.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_finish_trasersal.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationNullable.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Formats/NativeFormat.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationTuple.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationNumber.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Formats/JSONEachRowUtils.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/registerDataTypeDateTime.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/tx_datashard_scan_response.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/security/login_page.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/getLeastSupertype.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/gc_actor.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/query_stats/query_stats.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Formats/ProtobufReader.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Formats/FormatFactory.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Formats/registerFormats.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Formats/ProtobufWriter.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/index/index.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/action.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Functions/IFunction.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/service/service_impl.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/nodes/nodes.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Functions/FunctionFactory.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Functions/FunctionHelpers.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/MMappedFileDescriptor.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Functions/toFixedString.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/restore_v2_chunks.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/MMapReadBufferFromFileWithCache.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/MMappedFile.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/Progress.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/MMapReadBufferFromFile.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/PeekableReadBuffer.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/recovery/hulldb_recovery.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/AsynchronousReadBufferFromFileDescriptor.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/common/schema.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/SynchronousReader.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/OpenedFile.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/ReadBufferFromPocoSocket.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/MMapReadBufferFromFileDescriptor.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/ReadBufferFromFileBase.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/broken_blobs.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/normalizer.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/ReadBufferFromFileDescriptor.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/WriteBufferFromFileBase.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/ReadBufferFromFile.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/readFloatText.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/ReadSettings.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/ReadHelpers.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/TimeoutSetter.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/UseSSL.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/WriteBufferValidUTF8.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/WriteHelpers.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/WriteBufferFromFile.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/ReadBufferFromMemory.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/WriteBufferFromFileDescriptorDiscardOnFailure.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/WriteBufferFromFileDescriptor.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTQueryWithTableAndOutput.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/copyData.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/parseDateTimeBestEffort.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTOrderByElement.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/WriteBufferFromPocoSocket.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/createReadBufferFromFileBase.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTQueryWithOutput.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTDropQuery.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTBackupQuery.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTAsterisk.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Interpreters/QueryThreadLog.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Interpreters/TablesStatus.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTColumnDeclaration.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTDictionaryAttributeDeclaration.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTAlterQuery.cpp |56.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard__scan.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Interpreters/QueryLog.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTCreateQuery.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTColumnsMatcher.cpp |56.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/leaked_blobs.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTConstraintDeclaration.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTColumnsTransformers.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTDatabaseOrNone.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTIndexDeclaration.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTDictionary.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTInsertQuery.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTFunctionWithKeyValueArguments.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTExpressionList.cpp |56.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/chunks.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTFunction.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTNameTypePair.cpp |56.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/portion.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTQueryParameter.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTOptimizeQuery.cpp |56.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/resource_pools/resource_pools.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTProjectionSelectQuery.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTIdentifier.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTLiteral.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTKillQueryQuery.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTQualifiedAsterisk.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTPartition.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTProjectionDeclaration.cpp |56.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/export/session/selector/backup/selector.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTWithAlias.cpp |56.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/partition_stats/partition_stats.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserDescribeTableQuery.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTShowGrantsQuery.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTSampleRatio.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTRolesOrUsersSet.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTSetQuery.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTSelectWithUnionQuery.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTSetRoleQuery.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTSettingsProfileElement.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTSelectQuery.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTWindowDefinition.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTSubquery.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTTTLElement.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTSystemQuery.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTUserNameWithHost.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserDataType.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTShowTablesQuery.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTTablesInSelectQuery.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserBackupQuery.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/CommonParsers.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTWithElement.cpp |56.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/granule/clean_granule.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/InsertQuerySettingsPushDownVisitor.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/IParserBase.cpp |56.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/compute_actor/kqp_compute_actor_helpers.cpp |56.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/compute_actor/kqp_compute_state.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ExpressionListParsers.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/IAST.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserCase.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserCheckQuery.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/Lexer.cpp |56.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/tablet_flat_executed.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ExpressionElementParsers.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserAlterQuery.cpp |56.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/writer/writer.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserSelectWithUnionQuery.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserSetRoleQuery.cpp |56.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean_empty.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserCreateQuery.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserOptimizeQuery.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserPartition.cpp |56.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/compute_actor/kqp_pure_compute_actor.cpp |56.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard__write_index.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard__init.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/background_controller.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserExplainQuery.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Functions/FunctionsConversion.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserDictionaryAttributeDeclaration.cpp |56.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/schema_version/version.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserDictionary.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserKillQueryQuery.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserDropQuery.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserInsertQuery.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserSampleRatio.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserExternalDDLQuery.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard__statistics.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserSetQuery.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserProjectionSelectQuery.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserQuery.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/bs/remove.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserRenameQuery.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/granule/normalizer.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserTablesInSelectQuery.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserSelectQuery.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserRolesOrUsersSet.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/formatAST.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserShowTablesQuery.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/hooks/abstract/abstract.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserShowGrantsQuery.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserSettingsProfileElement.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserShowPrivilegesQuery.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/compute_actor/kqp_compute_actor.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/chunks_v0_meta.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserTablePropertiesQuery.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserUnionQueryElement.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserSystemQuery.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/chunks_actualization.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/TokenIterator.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserUseQuery.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/QueryWithOutputSettingsPushDownVisitor.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserWatchQuery.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserUserNameWithHost.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Interpreters/castColumn.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserWithElement.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/ConcatProcessor.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Executors/PollingQueue.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/CSVRowInputFormat.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/parseUserName.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/parseDatabaseAndTableName.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Chunk.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/IInputFormat.cpp |56.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/loading/stages.cpp |56.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/compute_actor/kqp_scan_compute_manager.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/parseIntervalKind.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/queryToString.cpp |56.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tablet/libydb-core-tablet.a |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/parseQuery.cpp |56.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/blobs_action/transaction/libcolumnshard-blobs_action-transaction.a |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/IOutputFormat.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/IRowOutputFormat.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/tablet_flat_dummy.cpp |56.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/blobs_reader/libtx-columnshard-blobs_reader.a |56.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_accessor/in_mem/libcolumnshard-data_accessor-in_mem.a |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/ArrowBlockInputFormat.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/CHColumnToArrowColumn.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/ArrowBufferedStreams.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/IRowInputFormat.cpp |56.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/grpc_services/counters/libcore-grpc_services-counters.a |56.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_sharing/modification/tasks/libdata_sharing-modification-tasks.a |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.cpp |56.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/counters/libtx-columnshard-counters.a |56.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/operations/common/libcolumnshard-operations-common.a |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/CSVRowOutputFormat.cpp |56.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_accessor/in_mem/libcolumnshard-data_accessor-in_mem.global.a |56.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/operations/batch_builder/libcolumnshard-operations-batch_builder.a |56.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_accessor/libtx-columnshard-data_accessor.a |56.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_accessor/local_db/libcolumnshard-data_accessor-local_db.a |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard__notify_tx_completion.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard__plan_step.cpp |56.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/blobs_action/tier/libcolumnshard-blobs_action-tier.a |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/AvroRowInputFormat.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/testlib/actors/block_events.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/JSONEachRowRowOutputFormat.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTQueryWithOnCluster.cpp |56.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_locks/manager/libcolumnshard-data_locks-manager.a |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/IProcessor.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/IAccumulatingTransform.cpp |56.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/normalizer/tablet/libcolumnshard-normalizer-tablet.global.a |56.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_sharing/common/session/libdata_sharing-common-session.a |56.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/operations/slice_builder/libcolumnshard-operations-slice_builder.a |56.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_reader/libtx-columnshard-data_reader.a |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/RawBLOBRowInputFormat.cpp |56.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/blobs_action/libtx-columnshard-blobs_action.a |56.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_accessor/abstract/libcolumnshard-data_accessor-abstract.a |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Functions/CastOverloadResolver.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/TSKVRowInputFormat.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/TSKVRowOutputFormat.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/OutputStreamToOutputFormat.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/RowInputFormatWithDiagnosticInfo.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/columnshard_impl.h_serialized.cpp |56.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_locks/locks/libcolumnshard-data_locks-locks.a |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/clickhouse_client_udf.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/resource_pool_classifiers/resource_pool_classifiers.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/TabSeparatedRowOutputFormat.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/LimitTransform.cpp |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard__write.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/ISink.cpp |56.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_sharing/destination/events/libdata_sharing-destination-events.a |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/ISimpleTransform.cpp |56.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/defs/libcore-file_storage-defs.a |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Port.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/ISource.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/ResizeProcessor.cpp |56.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/utils/actors/libyql-utils-actors.a |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/ydb/proto/libproviders-ydb-proto.a |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/bg_tasks/protos/libcolumnshard-bg_tasks-protos.a |56.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/ydb/expr_nodes/libproviders-ydb-expr_nodes.a |56.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp-tstring/y_absl/log/libabseil-cpp-tstring-y_absl-log.a |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/tables_manager.cpp |56.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/bg_tasks/session/libcolumnshard-bg_tasks-session.a |56.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_accessor/local_db/libcolumnshard-data_accessor-local_db.global.a |56.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/counters/libcolumnshard-blobs_action-counters.a |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard.cpp |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/protos/libpy3yql-essentials-protos.global.a |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/common/libtx-columnshard-common.a |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/utils/plan/libyql-utils-plan.a |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard__propose_cancel.cpp |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/operation/libclient-yc_private-operation.a |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/nc_private/accessservice/libclient-nc_private-accessservice.a |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/accessservice/libclient-yc_private-accessservice.a |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/show_create/show_create.cpp |56.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/operations/libtx-columnshard-operations.a |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/common/context/libdata_sharing-common-context.a |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_public/common/libclient-yc_public-common.a |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/servicecontrol/libclient-yc_private-servicecontrol.a |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/protobuf/json/libcpp-protobuf-json.a |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/tenant_runtime.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/private/aggregated_counters.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/counters/background_controller.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/tier/adapter.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_counters.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/pipe_tracker.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/tier/read.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/labeled_db_counters.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/labeled_counters_merger.cpp |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_public/events/libclient-yc_public-events.a |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/counters/column_tables.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/counters/req_tracer.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/data_locks/locks/composite.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/private/labeled_db_counters.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/data_locks/locks/abstract.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/counters/scan.h_serialized.cpp |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/resourcemanager/libclient-yc_private-resourcemanager.a |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/counters/splitter.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/counters/writes_monitor.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_services/counters/proxy_counters.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/counters/common_data.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/tier/common.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/test_tablet/load_actor_delete.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/basics/appdata.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard_view.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/counters/insert_table.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_pipecache.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_req_blockbs.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_req_findlatest.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_pipe_client_cache.cpp |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/iam/libclient-yc_private-iam.a |56.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_sharing/destination/session/libdata_sharing-destination-session.a |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_counters_app.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/basics/runtime.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_pipe_server.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_req_delete.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/show_create/create_table_formatter.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_req_writelog.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_req_rebuildhistory.cpp |56.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/resource_subscriber/libtx-columnshard-resource_subscriber.a |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/ydb/provider/libproviders-ydb-provider.a |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_tracing_signals.cpp |56.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_sharing/destination/transactions/libdata_sharing-destination-transactions.a |56.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_sharing/manager/libcolumnshard-data_sharing-manager.a |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/initiator/controller/libdata_sharing-initiator-controller.a |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/protos/libcolumnshard-blobs_action-protos.a |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/initiator/status/libdata_sharing-initiator-status.a |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/initiator/controller/libdata_sharing-initiator-controller.global.a |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/fake_coordinator.cpp |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_public/iam/libclient-yc_public-iam.a |56.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_sharing/modification/transactions/libdata_sharing-modification-transactions.a |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/stock/liblibrary-workload-stock.a |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/stock/liblibrary-workload-stock.global.a |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/liburing/libcontrib-libs-liburing.a |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/resources/libtx-columnshard-resources.a |56.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yaml_config/libydb-library-yaml_config.a |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/compute_actor/kqp_scan_fetcher_actor.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/basics/services.cpp |56.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_sharing/modification/events/libdata_sharing-modification-events.a |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/basics/helpers.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard_subdomain_path_id.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/write_actor.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/resource_subscriber/counters.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/compute_actor/kqp_compute_actor_factory.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/resource_subscriber/container.cpp |56.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yaml_config/public/liblibrary-yaml_config-public.a |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/common_helper.cpp |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/storage.cpp |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/test_tablet/tx_initialize.cpp |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/protos/libyaml-config-protos.a |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/test_tablet/tx_init_scheme.cpp |56.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/splitter/libtx-columnshard-splitter.a |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/protos/libcolumnshard-data_sharing-protos.a |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard__propose_transaction.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/test_tablet/load_actor_impl.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/sessions/sessions.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yaml_config/public/yaml_config.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/tablet_helpers.cpp |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/tablet/libydb-services-tablet.a |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard__progress_tx.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/compute_actor/kqp_scan_compute_actor.cpp |56.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/persqueue_v1/libydb-services-persqueue_v1.a |56.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/ymq/libydb-services-ymq.a |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/test_tablet/test_shard_context.cpp |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/replication/libydb-services-replication.a |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_json/libydb-library-yaml_json.a |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/rate_limiter/libydb-services-rate_limiter.a |56.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/ydb/libydb-services-ydb.a |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/view/libydb-services-view.a |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean_deprecated_snapshot.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/splitter/settings.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/splitter/chunk_meta.cpp |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/ast/serialize/libessentials-ast-serialize.a |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/test_tablet/state_server_interface.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/gc.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/test_tablet/load_actor_write.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ymq/utils.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/events/delete_blobs.cpp |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/ycloud/impl/liblibrary-ycloud-impl.a |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/bs/write.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/node_whiteboard.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/inflight_request_tracker.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/counters/scan.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_accessor/abstract/constructor.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_object_storage.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/test_tablet/tx_load_everything.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/counters/portion_index.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/resource_broker.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_export.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_scripting.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_clickhouse_internal.cpp |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/ydb_issue/libydb-library-ydb_issue.a |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_import.cpp |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/ast/libyql-essentials-ast.a |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_operation.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/counters/indexation.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/counters/counters.cpp |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/arrow_kernels/registry/libcore-arrow_kernels-registry.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/subscriber/abstract/events/libsubscriber-abstract-events.a |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_scheme.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_pipe_client.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_debug.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/cs_helper.cpp |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/ydb_issue/libydb-library-ydb_issue.global.a |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_logstore.cpp |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/arrow_kernels/request/libcore-arrow_kernels-request.a |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/tier/remove.cpp |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/credentials/libessentials-core-credentials.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/common/libdq-actors-common.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/dq_integration/libessentials-core-dq_integration.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/expr_nodes/libessentials-core-expr_nodes.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/cbo/libessentials-core-cbo.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/subscriber/abstract/subscriber/libsubscriber-abstract-subscriber.a |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/tablets/tablets.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/test_tablet/load_actor_mon.cpp |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/dq_integration/transform/libcore-dq_integration-transform.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/common/protos/libpy3columnshard-common-protos.global.a |56.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_sharing/source/events/libdata_sharing-source-events.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/expr_nodes_gen/libessentials-core-expr_nodes_gen.a |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_responsiveness_pinger.cpp |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/persqueue/counter_time_keeper/liblibrary-persqueue-counter_time_keeper.a |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/test_tablet/test_tablet.cpp |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/common/events/libkqp-common-events.a |56.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/count_min_sketch/libstorage-indexes-count_min_sketch.global.a |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_query.cpp |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/jaraco.context/libpy3contrib-python-jaraco.context.global.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/input_transforms/libdq-actors-input_transforms.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/subscriber/events/tables_erased/libsubscriber-events-tables_erased.a |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/operations/batch_builder/merger.cpp |56.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/ext_index/metadata/libservices-ext_index-metadata.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/libyql-dq-actors.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/download/libcore-file_storage-download.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/protos/libcolumnshard-engines-protos.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/http_download/proto/libfile_storage-http_download-proto.a |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/counters/portions.cpp |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/http_download/libcore-file_storage-http_download.a |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_metrics.cpp |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/api/service/protos/libapi-service-protos.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/facade/libessentials-core-facade.a |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/test_tablet/load_actor_state.cpp |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/task_runner/libdq-actors-task_runner.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/extract_predicate/libessentials-core-extract_predicate.a |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_accessor/local_db/constructor.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/events.cpp |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/libessentials-core-file_storage.a |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/common/kqp_script_executions.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/local/storage.cpp |56.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/normalizer/tables/libcolumnshard-normalizer-tables.global.a |56.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kesus/tablet/libcore-kesus-tablet.a |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_accessor/abstract/manager.cpp |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/spilling/libdq-actors-spilling.a |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/rate_accounting.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/probes.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_table.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/quoter_resource_tree.cpp |56.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/tablet/libtx-columnshard-tablet.a |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/schema.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/counters/engine_logs.cpp |56.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/common/libcore-kqp-common.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/compute/libdq-actors-compute.a |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/tier/gc_info.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/counters/blobs_manager.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/common/kqp_lwtrace_probes.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/bs/gc.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_accessor/in_mem/constructor.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/common/kqp.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/common/control.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/common/kqp_event_impl.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/test_tablet/test_shard_mon.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/bs/gc_actor.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_reader/events.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_accessor/request.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_accessor/in_mem/collector.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/bs/storage.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/columnshard_impl.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/operations/common/context.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/common/kqp_tx_info.h_serialized.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/splitter/batch_slice.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/tx_helpers.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_accessor/events.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/common/kqp_timeouts.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_reader/actor.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/common/kqp_user_request_context.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/common/kqp_types.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/operations/manager.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_accessor/in_mem/manager.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/common/kqp_yql.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/bs/blob_manager.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/common/kqp_yql.h_serialized.cpp |56.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/transactions/libtx-columnshard-transactions.a |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_counters_aggregator.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/test_tablet/load_actor_read_validate.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_accessor/local_db/collector.cpp |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/protos/libpy3columnshard-engines-protos.global.a |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_accessor/abstract/collector.cpp |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/histogram/libessentials-core-histogram.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp/absl/types/libabseil-cpp-absl-types.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/comp_nodes/libyql-dq-comp_nodes.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/issue/libessentials-core-issue.a |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_reader/read_coordinator.cpp |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/langver/libessentials-core-langver.a |56.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/transactions/locks/libcolumnshard-transactions-locks.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/common/libyql-dq-common.a |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_reader/task.cpp |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/issue/libessentials-core-issue.global.a |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/resource_subscriber/task.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/counters/columnshard.h_serialized.cpp |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/minsketch/libessentials-core-minsketch.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/defaults/protos/libpy3scheme-defaults-protos.global.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/qplayer/storage/interface/libqplayer-storage-interface.a |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/tablet/gc_counters.cpp |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/pg_settings/libessentials-core-pg_settings.a |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_accessor/actor.cpp |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/pg_catalog/libessentials-parser-pg_catalog.global.a |56.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/transactions/operators/ev_write/libtransactions-operators-ev_write.global.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/pg_wrapper/interface/libparser-pg_wrapper-interface.a |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/splitter/blob_info.cpp |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/antlr4/libparser-proto_ast-antlr4.a |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/transactions/locks/interaction.cpp |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/antlr3/libparser-proto_ast-antlr3.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/pg_catalog/proto/libparser-pg_catalog-proto.a |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/tablet/broken_txs.cpp |56.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/transactions/operators/ev_write/libtransactions-operators-ev_write.a |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/resource_subscriber/actor.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/blob_manager_db.cpp |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/protos/liblibrary-actors-protos.a |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_locks/manager/manager.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/resource_subscriber/events.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/tx_init_schema.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_resolver.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/counters/columnshard.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/test_client.cpp |57.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/transactions/operators/libcolumnshard-transactions-operators.global.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/jsonpath/libproto_ast-gen-jsonpath.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/codec/arrow/libcommon-codec-arrow.a |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/destination/events/status.cpp |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/arrow_resolve/libproviders-common-arrow_resolve.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/activation/libproviders-common-activation.a |57.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/transactions/operators/libcolumnshard-transactions-operators.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/transactions/protos/libcolumnshard-transactions-protos.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/codec/libproviders-common-codec.a |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/splitter/chunks.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/storage/groups.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/counters/counters_manager.cpp |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/tx_reader/libtx-columnshard-tx_reader.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/peephole_opt/libessentials-core-peephole_opt.a |57.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/common/libservices-metadata-common.a |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_list_renderer.cpp |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/comp_nodes/libproviders-common-comp_nodes.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/dq/libproviders-common-dq.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/v0/libproto_ast-gen-v0.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/config/libproviders-common-config.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/gateway/libproviders-common-gateway.a |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/bootstrapper.cpp |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/antlr4/libv1-lexer-antlr4.a |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/tx_init.cpp |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/proto/libproviders-s3-proto.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/gateways_utils/libproviders-common-gateways_utils.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/antlr4_ansi/libv1-lexer-antlr4_ansi.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme/protos/libpy3core-scheme-protos.global.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/malloc/api/libcpp-malloc-api.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/libsql-v1-lexer.a |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/tx_semaphore_acquire.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/tablet_impl.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/storage/pdisks.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/count_min_sketch/meta.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_comp.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/manager/shared_blobs.cpp |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/proto_parser/antlr4_ansi/libv1-proto_parser-antlr4_ansi.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/proto_parser/antlr3_ansi/libv1-proto_parser-antlr3_ansi.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/proto_parser/antlr4/libv1-proto_parser-antlr4.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/proto_parser/libsql-v1-proto_parser.a |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/tier/gc_actor.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/operations/write_data.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/tablet_db.cpp |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/types/binary_json/libessentials-types-binary_json.a |57.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/transactions/transactions/libcolumnshard-transactions-transactions.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/backtrace/libessentials-utils-backtrace.a |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_comp_create.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/tx_semaphore_update.cpp |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/types/uuid/libessentials-types-uuid.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/types/dynumber/libessentials-types-dynumber.a |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_accessor/local_db/manager.cpp |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/common_opt/libessentials-core-common_opt.a |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/storage/vslots.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/tx_quoter_resource_update.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/node_tablet_monitor.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_comp_gen.cpp |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/failure_injector/libessentials-utils-failure_injector.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/fetch/libessentials-utils-fetch.a |57.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/flat_comp_gen.h_serialized.cpp |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/v1_ansi_antlr4/libproto_ast-gen-v1_ansi_antlr4.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/symbols/registry/libpython-symbols-registry.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/log/libessentials-utils-log.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/providers/stat/expr_nodes/libproviders-stat-expr_nodes.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/threading/libessentials-utils-threading.a |57.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/transaction/libengines-reader-transaction.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/log/proto/libutils-log-proto.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/common/libcpp-mapreduce-common.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/libyql-essentials-utils.a |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_dbase_apply.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_dbase_scheme.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_dummy.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_exec_broker.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_database.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/tx_session_destroy.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/quoter_runtime.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/tier/gc.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/count_min_sketch/constructor.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/tx_config_get.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/tx_semaphore_describe.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/tx_quoter_resource_describe.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/operations/slice_builder/pack_builder.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/tx_dummy.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/tablet_html.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_sys.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/tx_session_attach.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_write.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_sharing/common/session/common.h_serialized.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/tx_semaphore_delete.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/storage/storage_stats.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/tx_semaphore_release.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yaml_config/serialize_deserialize.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/splitter/column_info.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/storages_manager/manager.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_gc_insert_table.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/tier/storage.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/tx_quoter_resource_delete.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_locks/locks/snapshot.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_reader/actor.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/tx_semaphore_timeout.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yaml_config/yaml_config_helpers.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_monitoring_proxy.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/tx_self_check.cpp |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/http/libcpp-mapreduce-http.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/v1_antlr4/libproto_ast-gen-v1_antlr4.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/v1_ansi/libproto_ast-gen-v1_ansi.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/config_clusters/libyt-lib-config_clusters.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/v1/libproto_ast-gen-v1.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/v0_proto_split/libproto_ast-gen-v0_proto_split.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/interface/logging/libmapreduce-interface-logging.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/protos/libyql-essentials-protos.a |57.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/optimizer/libreader-sys_view-optimizer.global.a |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/storage/storage_pools.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yaml_config/console_dumper.cpp |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/library/user_job_statistics/libmapreduce-library-user_job_statistics.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/io/libcpp-mapreduce-io.a |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/operations/events.cpp |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/actors/events/libdq-actors-events.a |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yaml_config/yaml_config.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/source/events/transfer.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/actors/test_runtime.cpp |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/libyql-essentials-core.a |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_gc_indexed.cpp |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/crypto/chacha_512/libblobstorage-crypto-chacha_512.a |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/tablet.cpp |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/proto/libpy3providers-s3-proto.global.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/common/compilation/libkqp-common-compilation.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/naming_conventions/libydb-library-naming_conventions.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/protobuf/libcontrib-libs-protobuf.global.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/certs/libcerts.global.a |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yaml_config/yaml_config_parser.cpp |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/MarkupSafe/py3/libpy3python-MarkupSafe-py3.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/codec/codegen/llvm16/libcodec-codegen-llvm16.global.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/interface/libcpp-mapreduce-interface.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/codec/codegen/llvm16/libcodec-codegen-llvm16.a |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_executor_counters.cpp |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/init_yt_api/libyt-lib-init_yt_api.a |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/locks/abstract.cpp |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/common/buffer/libkqp-common-buffer.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/hash/libyt-lib-hash.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/expr_nodes/libproviders-yt-expr_nodes.a |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/common/session/common.cpp |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/expr_traits/libyt-lib-expr_traits.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/crypto/libcore-blobstorage-crypto.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/udf/tz/libpublic-udf-tz.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/undumpable/libyt-library-undumpable.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/conveyor/service/libtx-conveyor-service.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/graph_reorder/libyt-lib-graph_reorder.a |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/modification/transactions/tx_change_blobs_owning.cpp |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/key_filter/libyt-lib-key_filter.a |57.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kesus/proxy/libcore-kesus-proxy.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/gateway/qplayer/libyt-gateway-qplayer.a |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/destination/transactions/tx_start_from_initiator.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ymq/grpc_service.cpp |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/lambda_builder/libyt-lib-lambda_builder.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/job/libproviders-yt-job.a |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/destination/session/destination.cpp |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/mkql_dq/libproviders-yt-mkql_dq.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/infer_schema/libyt-lib-infer_schema.a |57.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/granule/libengines-storage-granule.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/common/batch/libkqp-common-batch.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/udf/support/libpublic-udf-support.a |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_req_reset.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_locks/locks/list.cpp |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/res_pull/libyt-lib-res_pull.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/services/libessentials-core-services.a |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/operations/slice_builder/builder.cpp |57.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/scheme/versions/libengines-scheme-versions.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/http_client/libcpp-mapreduce-http_client.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/re2/libcontrib-libs-re2.a |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/normalizer/tables/normalizer.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/manager/sessions.cpp |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/libapi-grpc-draft.a |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/tx_session_detach.cpp |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/skiff/libyt-lib-skiff.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/wrappers/ut_helpers/libcore-wrappers-ut_helpers.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/row_spec/libyt-lib-row_spec.a |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/common/timeout.cpp |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/protobuf/builtin_proto/protos_from_protobuf/libpy3protobuf-builtin_proto-protos_from_protobuf.global.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/xxhash/libcontrib-libs-xxhash.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/client/libcpp-mapreduce-client.a |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/tx_semaphore_create.cpp |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/url_mapper/libyt-lib-url_mapper.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/mkql_proto/protos/libpy3library-mkql_proto-protos.global.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/yson_helpers/libyt-lib-yson_helpers.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/common/libproviders-yt-common.a |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_fwd_misc.cpp |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/log/libyt-lib-log.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/yt_download/libyt-lib-yt_download.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/lzma/libcontrib-libs-lzma.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/schema/libyt-lib-schema.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/proto/libproviders-yt-proto.a |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/modification/events/change_owning.cpp |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/provider/libproviders-yt-provider.global.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/ytflow/expr_nodes/libproviders-ytflow-expr_nodes.a |57.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/flat_executor.pb.cc |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/common/ss_dialog.cpp |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/ytflow/integration/interface/libytflow-integration-interface.a |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/operations/write.cpp |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/antlr3/libv1-lexer-antlr3.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/annotations/libapi-protos-annotations.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/ytflow/integration/proto/libytflow-integration-proto.a |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/tx_config_set.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/proxy/events.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_draft.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_mem_warm.cpp |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/light_rw_lock/libcpp-threading-light_rw_lock.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/gateway/lib/libyt-gateway-lib.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/antlr3_ansi/libv1-lexer-antlr3_ansi.a |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/modification/tasks/modification.cpp |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yson_pull/libyson_pull.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/uri/liblibrary-cpp-uri.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/double-conversion/libcontrib-libs-double-conversion.a |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/codec/libproviders-yt-codec.a |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/proxy/proxy_actor.cpp |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/keyvalue/protos/libcore-keyvalue-protos.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/clang18-rt/lib/asan_static/libclang_rt.asan_static-x86_64.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/opt/libproviders-yt-opt.a |57.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/yt/yql/providers/yt/provider/libproviders-yt-provider.a |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/grpc/third_party/address_sorting/libgrpc-third_party-address_sorting.a |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libc_compat/libcontrib-libs-libc_compat.a |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/storages_manager.cpp |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cxxsupp/libcxxabi-parts/liblibs-cxxsupp-libcxxabi-parts.a |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt.cpp |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/client/arrow/libyt-client-arrow.a |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/tx_sessions_describe.cpp |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_remove_blobs.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_field_subset.cpp |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/destination/transactions/tx_data_from_source.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_cbo_helpers.cpp |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/operations/batch_builder/restore.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_weak_fields.cpp |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_bio_actor.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_key_range.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_datasink_constraints.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_block_output.cpp |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/destination/events/transfer.cpp |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/locks/dependencies.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_block_io_utils.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_datasink.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_block_input.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_ytql.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_load_table_meta.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_datasink_exec.cpp |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/tx_session_timeout.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_key.cpp |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/actors/libproviders-dq-actors.a |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/operations/batch_builder/builder.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_load_columnar_stats.cpp |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/Scalar/liblib-Transforms-Scalar.a |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/tx_quoter_resource_add.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_provider_context.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_physical_optimize.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_join_reorder.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_push.cpp |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/opt/libyql-dq-opt.a |57.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/provider/yql_yt_op_settings.h_serialized.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_intent_determination.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_provider_impl.cpp |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/tier/write.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_op_hash.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_io_discovery_walk_folders.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_table_desc.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_datasink_finalize.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_provider.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_peephole.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_forwarding_gateway.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_datasink_trackable.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_op_settings.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_gateway.cpp |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/openssl/libcontrib-libs-openssl.a |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_sort.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_mkql_compiler.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_boot_lease.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_optimize.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_datasink_type_ann.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_join.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_ytflow_optimize.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_write.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_blobs_written.cpp |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/gateway/native/libyt-gateway-native.a |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_io_discovery.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_datasource_exec.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_content.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/grpc_pq_read.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_epoch.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_ytflow_integration.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ext_index/metadata/object.cpp |56.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/util/actorsys_test/libcore-util-actorsys_test.a |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_helper.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_datasource_constraints.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_fuse.cpp |56.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/config/libydb-services-config.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/protobuf/libcontrib-libs-protobuf.a |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_dq_optimize.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_wide_flow.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_datasource_type_ann.cpp |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/libessentials-sql-v1.a |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_exec_commit_mgr.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_page_label.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_lambda.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_datasource.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_part_charge_range.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_merge.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/destination/events/control.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_dq_integration.cpp |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/client/query_tracker_client/libyt-client-query_tracker_client.a |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_block_io_filter.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/topic.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_part_charge_create.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_map.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_table.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_horizontal_join.cpp |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/core/misc/isa_crc64/libisa-l_crc_yt_patch.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/build/libyt-yt-build.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cxxsupp/builtins/liblibs-cxxsupp-builtins.a |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/flat_part_loader.h_serialized.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_part_index_iter_create.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_part_dump.cpp |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/erasure/libyt-library-erasure.a |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/flat_page_iface.h_serialized.cpp |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/clang18-rt/lib/asan_cxx/libclang_rt.asan_cxx-x86_64.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/comp_nodes/llvm16/libyt-comp_nodes-llvm16.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/auth/libyt-library-auth.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/decimal/libyt-library-decimal.a |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_part_outset.cpp |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/numeric/libyt-library-numeric.a |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_partition.cpp |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/core/https/libyt-core-https.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/procfs/libyt-library-procfs.a |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_part_overlay.cpp |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/c-ares/libcontrib-libs-c-ares.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/profiling/libyt-library-profiling.a |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/hydra/version.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/client.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_dq_hybrid.cpp |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/column_converters/libyt-library-column_converters.a |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/distributed_table_client.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/queue_client/common.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/query_client/query_statistics.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/journal_reader.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_misc.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/client_common.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/election/public.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chunk_client/read_limit.cpp |56.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/yt/yt/core/libyt-yt-core.a |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/queue_client/config.cpp |56.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/yt/yt/client/libyt-yt-client.a |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_logical_optimize.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_physical_finalizing.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_join_impl.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/file_client/config.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/cypress_client/public.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_part_loader.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_helpers.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/complex_types/uuid_text.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tablet_flat/flat_executor_compaction_logic.h_serialized.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/scheduler_thread.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/suspendable_action_queue.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_write_index.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/signature/validator.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_boot_misc.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fiber_manager.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/thread_affinity.cpp |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/client/formats/libyt-client-formats.a |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/loading/stages.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/crypto/config.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_exec_seat.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/crypto/crypto.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/complex_types/yson_format_conversion.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/destination/transactions/tx_finish_ack_from_initiator.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/crypto/tls.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/execution_stack.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/two_level_fair_share_thread_pool.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/delayed_executor.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/random.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/throughput_throttler.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/ref_counted_tracker.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/public.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ext_index/metadata/initializer.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_stream_pipe.cpp |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/core/libyt-yt-core.global.a |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/process_exit_profiler.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_executor_borrowlogic.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/pool_allocator.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/proc.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_executor_tx_env.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/protobuf_helpers.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/parser_helpers.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_load_blob_queue.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/pattern_formatter.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_executor_snapshot.cpp |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/core/http/libyt-core-http.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/v1_proto_split/libproto_ast-gen-v1_proto_split.a |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/linear_probe.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/common/kqp_ru_calc.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/transactions/tx_controller.h_serialized.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/tablet/ext_tx_base.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/blob_output.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/schemaless_dynamic_table_writer.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/utf8_decoder.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/checksum.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/error.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/row_buffer.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/scheme/versions/filtered_scheme.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/bloom_filter.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_accessor/manager.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/bitmap.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ypath/stack.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_executor_gclogic.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/schema_serialization_helpers.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/cache_config.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/peer_discovery.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/null_channel.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/local_server.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ypath_resolver.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/message_format.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/schema.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/utilex/random.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ypath/token.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/message.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/common/kqp_tx_manager.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/tracing/public.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/source/events/control.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/local_channel.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/threading/thread.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/transaction_client/noop_timestamp_provider.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_executor.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/ypath/parser_detail.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/destination/transactions/tx_finish_from_source.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/transaction_client/timestamp_provider_base.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/grpc_pq_write.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/transaction_client/remote_timestamp_provider.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/tablet_client/config.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_executor_db_mon.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/tablet_client/watermark_runtime_data.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/transaction_client/batching_timestamp_provider.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ypath_service.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_executor_bootlogic.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/transaction_client/config.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/tablet_client/public.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ypath_detail.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/tablet_client/helpers.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/services_initializer.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_executor_data_cleanup_logic.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/ypath/rich.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/tablet_client/table_mount_cache_detail.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/timestamped_schema_helpers.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/versioned_row.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/unversioned_value.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/request_complexity_limits.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/static_service_dispatcher.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/scheme/versions/versioned_index.cpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/writer.cpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/permission.cpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/ypath_designated_consumer.cpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/ypath_filtering_consumer.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/size.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/wire_protocol.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ymq/ymq_proxy.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_executor_txloglogic.cpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/syntax_checker.cpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/unversioned_row.cpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/token_writer.cpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/token.cpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/tokenizer.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/operators/ev_write/sync.cpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/protobuf_interop_unknown_fields.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/pull_parser_deserialize.cpp |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/common/kqp_resolve.cpp |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_executor_compaction_logic.cpp |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ext_index/metadata/fetcher.cpp |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ext_index/metadata/manager.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/producer.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/lexer.cpp |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/operators/ev_write/abstract.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/async_writer.cpp |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/persqueue.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/service.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/pull_parser.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/scheme/versions/snapshot_scheme.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/protobuf_interop.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/protobuf_interop_options.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/channel_detail.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/operators/propose_tx.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/caching_channel_factory.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/client.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/backoff_strategy.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/bus/server.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/dynamic_channel_pool.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/schemas.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/stream_log_writer.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/dialer.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/connection.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/random_access_gzip.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/operators/ev_write/primary.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/serializable_logger.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/transaction/tx_scan.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/operators/sharing.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/bus/channel.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/grpc_pq_schema.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/key_bound_compressor.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ext_index/metadata/snapshot.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/operators/long_tx_write.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/portions/read_with_blobs.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/tablet/write_queue.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/operators/ev_write/secondary.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/config.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/key_bound.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/common/kqp_tx.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/key.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/composite_compare.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/transactions/tx_add_sharding_info.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/proxy/proxy.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/transaction/tx_internal_scan.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/operators/backup.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/packet.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/current_invoker.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_exec_commit.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/local_bypass.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/cancelation_token.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/future.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/public.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/cancelable_context.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/config.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/tx_controller.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/granule/stages.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/columnar_statistics.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/scheme/versions/abstract_scheme.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/granule/storage.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/operators/ev_write/simple.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/comparator.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/granule/portions_index.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/optimizer/optimizer.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/helpers.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/locks_db.cpp |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/granule/granule.cpp |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/transactions/tx_finish_async.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/invoker_pool.cpp |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/formats/libyt-library-formats.a |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/queue_client/queue_rowset.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/dispatcher_impl.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/codicil_guarded_invoker.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/scheduler/operation_cache.cpp |57.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/transactions/operators/schema.cpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/queue_client/partition_reader.cpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chunk_client/ready_event_reader_base.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/granule/granule.h_serialized.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/queue_client/helpers.cpp |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/util/actorsys_test/testactorsys.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/queue_client/producer_client.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chunk_client/chunk_replica.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/table_partition_reader.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/util/actorsys_test/single_thread_ic_mock.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/sticky_transaction_pool.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/row_stream.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chunk_client/helpers.cpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/table_client.cpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/row_batch_writer.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/connection.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/file_writer.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chunk_client/data_statistics.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/table_reader.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/file_reader.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/table_writer.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/timestamp_provider.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/connection_impl.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/queue_transaction_mixin.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chaos_client/replication_card.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/transaction.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/public.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/row_batch_reader.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/shuffle_client.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/security_client.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/journal_client.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/etc_client.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/query_tracker_client.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/transaction.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/wire_row_stream.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/public.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chaos_client/helpers.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/skynet.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chaos_client/replication_card_serialization.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/journal_writer.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/delegating_transaction.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/config.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/dynamic_table_transaction_mixin.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/internal_client.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/client_cache.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chunk_client/public.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/delegating_client.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/dispatcher.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/invoker_detail.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/ssl_context.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/distributed_table_session.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/operation_client.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/helpers.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/brotli.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/complex_types/merge_complex_types.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/address_helpers.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/ssl_helpers.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/invoker_util.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/codec.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/stream.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/snappy.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rowset.cpp |57.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yaml_config/public/yaml_config.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/json/json_writer.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/log_writer_detail.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/column_sort_schema.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/table_mount_cache.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/private.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/persistent_queue.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chunk_client/config.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/formatter.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chaos_client/replication_card_cache.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/helpers.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/complex_types/infinite_entity.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/bundle_controller_client/bundle_controller_client.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chaos_client/config.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/config.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/merge_table_schemas.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/scheduler/operation_id_or_alias.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/complex_types/time_text.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/public.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/transaction_impl.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/complex_types/check_type_compatibility.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/dictionary_codec.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/lzma.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/scheduler/spec_patch.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/columnar.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/config.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/bundle_controller_client/bundle_controller_settings.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/config.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/zstd_compression.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/server.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/stream_output.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/queue_client/consumer_client.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/public.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/adjusted_exponential_moving_average.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/complex_types/check_yson_token.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/name_table.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/client_base.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/bit_packed_unsigned_vector.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/fluent_log.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/bzip2.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/bit_packing.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/client.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/lz.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/pipe.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/compression.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/logger_owner.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/public.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/socket.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/json/json_parser.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/helpers.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/descriptors.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/context.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/local_address.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/type_def.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/listener.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/authentication_identity.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/logical_type.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/authenticator.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/load.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/arithmetic_formula.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/system_log_event_provider.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/connection.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/balancing_channel.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/request_queue_provider.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/server_detail.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/public.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/protocol_version.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/dispatcher.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/per_key_request_queue_provider.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/response_keeper.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/serialized_channel.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/async_consumer.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/roaming_channel.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/profiling/timing.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/type_registry.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/retrying_channel.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/static_channel_factory.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/throttling_channel.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/forwarding_consumer.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ypath/tokenizer.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/consumer.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/viable_peer_registry.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/stream.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/attributes_stripper.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/attribute_consumer.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/parser.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/null_consumer.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/depth_limiting_yson_consumer.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/list_verb_lazy_yson_consumer.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/config.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/string_builder_stream.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/string_merger.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/stream.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/string_filter.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/string.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/service_detail.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ephemeral_attribute_owner.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/interned_attributes.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytalloc/statistics_producer.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/convert.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/yson_builder.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytalloc/bindings.cpp |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/config/grpc_service.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/attributes.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/attribute_consumer.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/node.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/node_detail.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/exception_helpers.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/request_complexity_limiter.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/serialize.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/attribute_filter.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytalloc/config.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/helpers.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/serialize.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/system_attribute_provider.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/tree_visitor.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/tree_builder.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/table_output.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ephemeral_node_factory.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/table_consumer.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/table_upload_options.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/service_discovery/service_discovery.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/versioned_io_options.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/yson_struct_update.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/threading/spin_wait_slow_path_logger.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/validate_logical_type.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/transaction_client/helpers.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/tablet_client/table_mount_cache.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/value_consumer.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/virtual.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/versioned_reader.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ypath/helpers.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/tracing/allocation_tags.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/unordered_schemaful_reader.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/hedging_channel.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/coro_pipe.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/yson_struct.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/codicil.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/yson_struct_detail.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/service_combiner.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/configurable_singleton_def.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/fair_share_hierarchical_queue.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ypath_client.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/digest.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/config.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/helpers.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/row_batch.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/statistics.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/zerocopy_output_writer.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/row_base.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/id_generator.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/hedging_manager.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/hazard_ptr.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/histogram.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/schemaless_row_reorderer.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/config.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/fs.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_rw_lock.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/record_codegen_cpp.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_semaphore.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/relaxed_mpsc_queue.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/serialize_dump.cpp |55.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/api/protos/libapi-protos.a |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/address.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_stream.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/action_queue.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/file_log_writer.cpp |55.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/counters/libstorage-actualizer-counters.a |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/ref_counted_tracker_profiler.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/ref_counted_tracker_statistics_producer.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/statistic_path.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/serialize.cpp |55.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/query_actor/libydb-library-query_actor.a |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_barrier.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/slab_allocator.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/shutdown.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/zlib.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/record_helpers.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/zstd.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/coroutine.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/thread_pool.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_looper.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_share_invoker_queue.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_share_queue_scheduler_thread.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/client_impl.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_share_thread_pool.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_share_invoker_pool.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/system_invokers.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_share_action_queue.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/json/json_callbacks.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/thread_pool_detail.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/thread_pool_poller.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/dns/dns_resolver.cpp |55.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/api/protos/libpy3api-protos.global.a |55.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/json/helpers.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/retrying_periodic_executor.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/dns/config.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/json/config.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/invoker_alarm.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/config.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/periodic_executor.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/notify_manager.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fls.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/dns/ares_dns_resolver.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fiber_scheduler_thread.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/nonblocking_batcher.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/lease_manager.cpp |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/predicate/container.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/pollable_detail.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/invoker_queue.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/profiling_helpers.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/periodic_yielder.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_throttler.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/signature/signature.cpp |55.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/propagating_storage.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/scheduled_executor.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/signature/generator.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/security_client/helpers.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/single_queue_scheduler_thread.cpp |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/quantized_executor.cpp |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fiber.cpp |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/security_client/access_control.cpp |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/security_client/public.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/security_client/acl.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/column_rename_descriptor.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/check_schema_compatibility.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/api/protos/ydb_config.pb.cc |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/adapters.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/chunk_stripe_statistics.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/misc/method_helpers.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/journal_client/public.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/blob_reader.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/api/protos/00ccceedc2861088d9671c050e_raw.auxcpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/counters/counters.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/query_actor/query_actor.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/job_tracker_client/public.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/kafka/packet.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/job_tracker_client/helpers.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/hive/timestamp_map.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/journal_client/config.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/misc/io_tags.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/kafka/protocol.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/query_client/query_builder.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/misc/config.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/node_tracker_client/public.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/node_tracker_client/helpers.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/kafka/requests.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/object_client/public.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/options.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/object_client/helpers.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_range_cache.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_part_slice.cpp |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/annotations/libpy3api-protos-annotations.global.a |55.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/misc/workload.cpp |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/signals/libyt-library-signals.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/quantile_digest/libyt-library-quantile_digest.a |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/flat_row_eggs.h_serialized.cpp |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/profiling/resource_tracker/liblibrary-profiling-resource_tracker.global.a |55.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/node_tracker_client/node_directory.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_row_versions.cpp |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/profiling/resource_tracker/liblibrary-profiling-resource_tracker.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/re2/libyt-library-re2.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/psutil/py3/libpy3python-psutil-py3.a |55.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_sausage_meta.cpp |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/tvm/libyt-library-tvm.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/tracing/libyt-library-tracing.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/asttokens/libpy3contrib-python-asttokens.global.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/aiosignal/libpy3contrib-python-aiosignal.global.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/psutil/py3/libpy3python-psutil-py3.global.a |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/graph/shard/backends.cpp |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/more-itertools/py3/libpy3python-more-itertools-py3.global.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/PyYAML/py3/libpy3python-PyYAML-py3.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/ytprof/api/liblibrary-ytprof-api.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/skiff_ext/libyt-library-skiff_ext.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/aiohttp/libpy3contrib-python-aiohttp.global.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/format/libsql-v1-format.global.a |55.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/http_proxy/libydb-core-http_proxy.a |55.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_sausagecache.cpp |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/cffi/py3/libpy3python-cffi-py3.global.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/formats/libyt_proto-yt-formats.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/metrics/protos/libcommon-metrics-protos.a |55.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_stat_part_group_iter_create.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_stat_table.cpp |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/libyql-essentials-sql.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/pg_dummy/libessentials-sql-pg_dummy.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/format/libsql-v1-format.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/google-auth/py3/libpy3python-google-auth-py3.global.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/iniconfig/libpy3contrib-python-iniconfig.global.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bits_storage/libstorage-indexes-bits_storage.global.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/protobuf/builtin_proto/protos_from_protoc/libpy3protobuf-builtin_proto-protos_from_protoc.global.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/python/enable_v3_new_behavior/libpy3sdk-python-enable_v3_new_behavior.global.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/core/libyt_proto-yt-core.a |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_stat_table_btree_index.cpp |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/predicate/predicate.cpp |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/settings/libessentials-sql-settings.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/yaml/libcontrib-libs-yaml.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/PyYAML/py3/libpy3python-PyYAML-py3.global.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/zlib/libcontrib-libs-zlib.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/expat/libcontrib-libs-expat.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/aiohttp/libpy3contrib-python-aiohttp.a |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/field_transformation.pb.cc |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/graph/shard/shard_impl.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/persqueue_error_codes.pb.cc |54.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_auth.pb.cc |54.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/persqueue_common.pb.h_serialized.cpp |54.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_export.pb.cc |54.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_replication.pb.cc |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/mkql/libproviders-common-mkql.a |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_clickhouse_internal.pb.cc |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_tablet.pb.cc |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_persqueue_cluster_discovery.pb.cc |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_object_storage.pb.cc |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_scripting.pb.cc |54.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/datastreams.pb.h_serialized.cpp |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/graph/shard/tx_aggregate_data.cpp |54.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/libstorage-indexes-categories_bloom.global.a |54.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_backup.pb.cc |54.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/persqueue_error_codes_v1.pb.cc |54.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/persqueue_common.pb.cc |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/comp_nodes/dq/llvm16/libcomp_nodes-dq-llvm16.a |54.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_dynamic_config.pb.cc |54.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_query.pb.cc |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/zstd/libcontrib-libs-zstd.a |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_federation_discovery.pb.cc |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_common.pb.cc |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_query_stats.pb.cc |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_formats.pb.cc |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_issue_message.pb.cc |54.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_maintenance.pb.cc |54.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_logstore.pb.cc |54.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_scheme.pb.cc |54.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_import.pb.cc |54.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/2fcaa64b3483b34ed81c523b1c_raw.auxcpp |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/graph/shard/tx_get_metrics.cpp |54.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/bfd44264fb24e9810ea4265f6e_raw.auxcpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/84a549b4a1a4fc6cc28608647e_raw.auxcpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ea4772b2d1f4541bcdc0b8f512_raw.auxcpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/datastreams.pb.cc |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_operation.pb.cc |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_keyvalue.pb.cc |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/6c33e37410a6c88fdad8622661_raw.auxcpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/778038d64b7ffacece0e5fd0aa_raw.auxcpp |54.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/0d93498295ce40435c81fa31f1_raw.auxcpp |54.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_monitoring.pb.cc |54.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_view.pb.cc |54.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_discovery.pb.cc |54.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_cms.pb.cc |54.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/yql/essentials/minikql/comp_nodes/llvm16/libminikql-comp_nodes-llvm16.a |54.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_persqueue_cluster_discovery.pb.h_serialized.cpp |54.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_debug.pb.cc |54.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_topic.pb.h_serialized.cpp |54.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/graph/shard/tx_change_backend.cpp |54.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_status_codes.pb.cc |54.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/fq.pb.cc |54.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ymq.pb.cc |54.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_value.pb.cc |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/ipdb/py3/libpy3python-ipdb-py3.global.a |54.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_rate_limiter.pb.cc |54.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/planner/liboptimizer-lbuckets-planner.global.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/multidict/libpy3contrib-python-multidict.a |54.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_table.pb.cc |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/planner/liboptimizer-lbuckets-planner.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/schema/libproviders-common-schema.a |54.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_coordination.pb.cc |54.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_persqueue_v1.pb.cc |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/schema/mkql/libcommon-schema-mkql.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/schema/skiff/libcommon-schema-skiff.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/schema/parser/libcommon-schema-parser.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/services/libpy3ydb-library-services.global.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/structured_token/libproviders-common-structured_token.a |54.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/request/libservices-metadata-request.a |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/graph/shard/tx_monitoring.cpp |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/graph/shard/tx_startup.cpp |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/rsa/py3/libpy3python-rsa-py3.global.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pyasn1-modules/py3/libpy3python-pyasn1-modules-py3.global.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/pg/expr_nodes/libproviders-pg-expr_nodes.a |54.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_topic.pb.cc |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/result/expr_nodes/libproviders-result-expr_nodes.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/config/libessentials-providers-config.a |54.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_sharing/source/transactions/libdata_sharing-source-transactions.a |54.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_sharing/source/session/libdata_sharing-source-session.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/six/py3/libpy3python-six-py3.global.a |54.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/changes/abstract/libengines-changes-abstract.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/stack-data/libpy3contrib-python-stack-data.global.a |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/graph/shard/tx_init_schema.cpp |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/ruamel.yaml/py3/libpy3python-ruamel.yaml-py3.global.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/PyJWT/py3/libpy3python-PyJWT-py3.global.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/ruamel.yaml.clib/py3/libpy3python-ruamel.yaml.clib-py3.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/token_accessor/grpc/libcommon-token_accessor-grpc.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/udf_resolve/libproviders-common-udf_resolve.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/decimal/libessentials-public-decimal.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/Jinja2/py3/libpy3python-Jinja2-py3.global.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/PyHamcrest/py3/libpy3python-PyHamcrest-py3.global.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/wcwidth/py3/libpy3python-wcwidth-py3.global.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/langver/libessentials-public-langver.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/pg/provider/libproviders-pg-provider.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v0/libessentials-sql-v0.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/grpc/third_party/upb/libgrpc-third_party-upb.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/clients/libpy3tests-library-clients.global.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/udf/arrow/libpublic-udf-arrow.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/udf/service/exception_policy/libudf-service-exception_policy.global.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/on_disk/chunks/libcpp-on_disk-chunks.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/issue/libessentials-public-issue.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/udf/libessentials-public-udf.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/client/libyt_proto-yt-client.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/services/mounts/libcore-services-mounts.global.a |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/http_proxy/auth_factory.cpp |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp-tstring/y_absl/status/libabseil-cpp-tstring-y_absl-status.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/result_format/libessentials-public-result_format.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp/absl/base/libabseil-cpp-absl-base.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp/absl/container/libabseil-cpp-absl-container.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/sql_types/libessentials-core-sql_types.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/services/mounts/libcore-services-mounts.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/api/grpc/libdq-api-grpc.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp/absl/numeric/libabseil-cpp-absl-numeric.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp/absl/log/libabseil-cpp-absl-log.a |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/http_proxy/metrics_actor.cpp |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/http_proxy/exceptions_mapping.cpp |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/portions/extractor/libindexes-portions-extractor.global.a |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/http_proxy/http_service.cpp |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/getopt/small/libcpp-getopt-small.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp/absl/profiling/libabseil-cpp-absl-profiling.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/url_lister/interface/libcore-url_lister-interface.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/url_preprocessing/interface/libcore-url_preprocessing-interface.a |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/http_proxy/discovery_actor.cpp |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/result/provider/libproviders-result-provider.a |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/http_proxy/grpc_service.cpp |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/user_data/libessentials-core-user_data.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp/absl/strings/libabseil-cpp-absl-strings.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/arrow/libessentials-minikql-arrow.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/zstd/libblockcodecs-codecs-zstd.global.a |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/portions/constructor_meta.cpp |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libbz2/libcontrib-libs-libbz2.a |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/meta.cpp |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/core/libcpp-blockcodecs-core.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/codegen/llvm16/libminikql-codegen-llvm16.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/issue/protos/libpy3public-issue-protos.global.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/datetime/libessentials-minikql-datetime.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/resource/liblibrary-cpp-resource.a |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/constructor.cpp |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/Pygments/py3/libpy3python-Pygments-py3.global.a |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/actor/attributes_md5.cpp |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/jsonpath/rewrapper/hyperscan/libjsonpath-rewrapper-hyperscan.global.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/jsonpath/rewrapper/libminikql-jsonpath-rewrapper.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/reservoir_sampling/libpy3library-python-reservoir_sampling.global.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/resource/libpy3library-python-resource.global.a |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/actor/auth_mocks.cpp |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/computation/llvm16/libminikql-computation-llvm16.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/jsonpath/rewrapper/proto/libjsonpath-rewrapper-proto.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/dom/libessentials-minikql-dom.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/tools/python3/lib2/py/libpy3python3-lib2-py.global.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/jsonpath/libessentials-minikql-jsonpath.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/jsonpath/parser/libminikql-jsonpath-parser.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/runtime_py3/libpy3library-python-runtime_py3.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/jsonpath/rewrapper/re2/libjsonpath-rewrapper-re2.global.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/runtime_py3/main/libpython-runtime_py3-main.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/wardens/libpy3tests-library-wardens.global.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/svn_version/libpy3library-python-svn_version.a |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/http_proxy/http_req.cpp |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/symbols/module/libpy3python-symbols-module.global.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/common/antlr4/libparser-common-antlr4.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/testing/yatest_lib/libpy3python-testing-yatest_lib.global.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/tools/python3/Lib/libpy3tools-python3-Lib.global.a |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/portions/common.cpp |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/actor/cleanup_queue_data.cpp |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/sqlite3/libcontrib-libs-sqlite3.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/common/libessentials-parser-common.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/config/protos/libpy3core-config-protos.global.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/tools/enum_parser/enum_serialization_runtime/libtools-enum_parser-enum_serialization_runtime.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/column/libengines-scheme-column.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/lexer_common/libessentials-parser-lexer_common.a |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_addmember.cpp |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/auth_factory.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_append.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_agg_factory.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_exists.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_withcontext.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_zip.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_wide_top_sort.cpp |54.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/actor.cpp |54.9%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_tooptional.cpp |54.9%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_rh_hash.cpp |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/pg_catalog/libessentials-parser-pg_catalog.a |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_reverse.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_guess.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_queue.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_heap.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_hasitems.cpp |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/cfg.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_fold1.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_discard.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_group.cpp |54.9%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_wide_combine.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_factory.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_fold.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/metadata/request/config.cpp |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/util/libyutil.a |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_agg_count.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_just.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/metadata/request/common.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_logical.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_container.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_getelem.cpp |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/libyql-essentials-minikql.a |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_func.cpp |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/count_queues.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_aggrcount.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_apply.cpp |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/create_queue.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/abstract/compaction_info.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/abstract/settings.cpp |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/type_ann/libessentials-core-type_ann.a |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_callable.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_map_join.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_decimal.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_dictitems.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_agg_some.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_chain_map.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_skiptake.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_compress.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_check_args.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_contains.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_if.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_chain1_map.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_coalesce.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_chopper.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_coalesce.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_condense.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_combine.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_condense1.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_exists.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_agg_sum.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_decimal_mul.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_ensure.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_enumerate.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_dynamic_variant.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_extend.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_fromyson.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/create_user.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_element.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_decimal_mod.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_decimal_div.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_flow.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_frombytes.cpp |55.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/changes/actualization/construction/libchanges-actualization-construction.a |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_fromstring.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_filter.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_agg_minmax.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_grace_join_imp.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_pickle.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_mapnext.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_top.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_grace_join.cpp |55.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/changes/actualization/controller/libchanges-actualization-controller.a |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_next_value.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_lazy_list.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_blocks.cpp |55.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/abstract/libchanges-compaction-abstract.a |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_iterable.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_flatmap.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/delete_message.cpp |54.8%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_invoke.cpp |54.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/common/libchanges-compaction-common.a |54.8%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_hopping.cpp |54.8%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_join_dict.cpp |54.8%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_ifpresent.cpp |54.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/change_visibility.cpp |54.9%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_iterator.cpp |54.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/plain/libchanges-compaction-plain.global.a |54.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/abstract/libstorage-optimizer-abstract.a |54.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/secret/libservices-metadata-secret.a |54.9%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_logical.cpp |54.9%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_length.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_map.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_lookup.cpp |55.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/libtx-columnshard-engines.a |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_match_recognize_list.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_multimap.cpp |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/computation/libessentials-minikql-computation.a |54.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/libengines-changes-compaction.a |55.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/secret/libservices-metadata-secret.global.a |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_match_recognize_measure_arg.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_null.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_match_recognize_rows_formatter.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_listfromrange.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_multihopping.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_nop.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_replicate.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_now.cpp |55.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/request/request_actor.cpp |55.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kafka_proxy/libydb-core-kafka_proxy.a |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_random.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_prepend.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_reduce.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_removemember.cpp |55.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/sparsed/libchanges-compaction-sparsed.global.a |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_squeeze_state.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_seq.cpp |55.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_squeeze_to_list.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_source.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_match_recognize.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_safe_circular_buffer.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_range.cpp |55.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/libchanges-compaction-sub_columns.global.a |55.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/libchanges-compaction-sub_columns.a |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_map_join.cpp |55.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/insert_table/libcolumnshard-engines-insert_table.a |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_round.cpp |55.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/plain/libchanges-compaction-plain.a |55.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/changes/libcolumnshard-engines-changes.a |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_skip.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_size.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_timezone.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_sort.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_take.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_scalar_apply.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_toindexdict.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/changes/abstract/abstract.h_serialized.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_wide_chain_map.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_unwrap.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_wide_map.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/request/request_actor_cb.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_tobytes.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_time_order_recover.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_varitem.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_tostring.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_way.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_weakmember.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_switch.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_udf.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_wide_chopper.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_visitall.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_if.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_wide_condense.cpp |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/cachetools/py3/libpy3python-cachetools-py3.global.a |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_wide_filter.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_collect.cpp |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/abstract/move_portions.cpp |55.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_join.cpp |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/attrs/py3/libpy3python-attrs-py3.global.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/oss/ydb_sdk_import/libpy3tests-oss-ydb_sdk_import.global.a |55.2%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_while.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/actor/error.cpp |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/persqueue_cluster_discovery/cluster_ordering/libservices-persqueue_cluster_discovery-cluster_ordering.a |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/auth_multi_factory.cpp |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/base64/avx2/liblibs-base64-avx2.a |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/base64/plain32/liblibs-base64-plain32.a |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/backtrace/libcontrib-libs-backtrace.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/base64/plain64/liblibs-base64-plain64.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/base64/neon64/liblibs-base64-neon64.a |55.2%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_todict.cpp |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/base64/ssse3/liblibs-base64-ssse3.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/brotli/common/liblibs-brotli-common.a |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/source/session/cursor.cpp |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/brotli/dec/liblibs-brotli-dec.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/monitoring/libydb-services-monitoring.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/testing/recipe/libpy3python-testing-recipe.global.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/crcutil/libcontrib-libs-crcutil.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/farmhash/arch/sse42_aesni/libfarmhash-arch-sse42_aesni.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/farmhash/libcontrib-libs-farmhash.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/farmhash/arch/sse41/libfarmhash-arch-sse41.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/farmhash/arch/sse42/libfarmhash-arch-sse42.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/counters/libengines-changes-counters.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cctz/tzdata/liblibs-cctz-tzdata.global.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cctz/libcontrib-libs-cctz.a |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/actor/infly.cpp |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/fastlz/libcontrib-libs-fastlz.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/cffi/py3/libpy3python-cffi-py3.a |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/common/result.cpp |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/brotli/enc/liblibs-brotli-enc.a |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/source/transactions/tx_write_source_cursor.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/defs.cpp |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/base64/neon32/liblibs-base64-neon32.a |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/actors/txn_actor_response_builder.cpp |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/cryptography/py3/libpy3python-cryptography-py3.a |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/filter.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/kafka_messages_int.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/actors/kafka_api_versions_actor.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/kafka_proxy/kafka.h_serialized.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/kafka_metrics.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/actors/kafka_find_coordinator_actor.cpp |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/fmt/libcontrib-libs-fmt.a |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/actors/kafka_transaction_actor_sql.cpp |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/abstract/abstract.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/kafka_records.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/kafka_consumer_protocol.cpp |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/source/transactions/tx_start_source_cursor.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/insert_table/committed.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/insert_table/meta.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/actors/kafka_metrics_actor.cpp |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/insert_table/user_data.cpp |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/apache/orc/liblibs-apache-orc.a |55.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/insert_table/inserted.cpp |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/curl/libcontrib-libs-curl.a |55.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/health_check/libydb-core-health_check.a |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/actors/kafka_produce_actor.cpp |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/flatbuffers/libcontrib-libs-flatbuffers.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/hdr_histogram/libcontrib-libs-hdr_histogram.a |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common/queue.cpp |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/recipes/common/libpy3library-recipes-common.global.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libaio/static/liblibs-libaio-static.a |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common/stats.cpp |55.3%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_agg.cpp |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libevent/event_openssl/liblibs-libevent-event_openssl.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libevent/event_thread/liblibs-libevent-event_thread.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libevent/event_core/liblibs-libevent-event_core.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libevent/event_extra/liblibs-libevent-event_extra.a |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/actor/local_rate_limiter_allocator.cpp |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/liblibs-aws-sdk-cpp-aws-cpp-sdk-core.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/linuxvdso/original/liblibs-linuxvdso-original.a |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/kafka_messages.cpp |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libidn/static/liblibs-libidn-static.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/linuxvdso/libcontrib-libs-linuxvdso.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/charset-normalizer/libpy3contrib-python-charset-normalizer.global.a |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/actor/log.cpp |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libiconv/static/liblibs-libiconv-static.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/BinaryFormat/libllvm16-lib-BinaryFormat.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Bitstream/Reader/liblib-Bitstream-Reader.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libfyaml/libcontrib-libs-libfyaml.a |55.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/actor/metering.h_serialized.cpp |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/actor/message_delay_stats.cpp |55.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/actor/monitoring.cpp |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Bitcode/Writer/liblib-Bitcode-Writer.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/DebugInfo/MSF/liblib-DebugInfo-MSF.a |55.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/abstract/changes.cpp |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/hyperscan/runtime_corei7/liblibs-hyperscan-runtime_corei7.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/hyperscan/runtime_core2/liblibs-hyperscan-runtime_core2.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/AsmParser/libllvm16-lib-AsmParser.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Bitcode/Reader/liblib-Bitcode-Reader.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Demangle/libllvm16-lib-Demangle.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/hyperscan/runtime_avx2/liblibs-hyperscan-runtime_avx2.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/grpc/libcontrib-libs-grpc.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/ExecutionEngine/Orc/Shared/libExecutionEngine-Orc-Shared.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/ExecutionEngine/MCJIT/liblib-ExecutionEngine-MCJIT.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/DebugInfo/Symbolize/liblib-DebugInfo-Symbolize.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/ExecutionEngine/PerfJITEvents/liblib-ExecutionEngine-PerfJITEvents.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/ExecutionEngine/Orc/TargetProcess/libExecutionEngine-Orc-TargetProcess.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/ExecutionEngine/libllvm16-lib-ExecutionEngine.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/IRPrinter/libllvm16-lib-IRPrinter.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/IRReader/libllvm16-lib-IRReader.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/decorator/py3/libpy3python-decorator-py3.global.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/DebugInfo/CodeView/liblib-DebugInfo-CodeView.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/idna/py3/libpy3python-idna-py3.global.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Linker/libllvm16-lib-Linker.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/MC/MCDisassembler/liblib-MC-MCDisassembler.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Frontend/OpenMP/liblib-Frontend-OpenMP.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libxml/libcontrib-libs-libxml.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/ExecutionEngine/RuntimeDyld/liblib-ExecutionEngine-RuntimeDyld.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/future/py3/libpy3python-future-py3.global.a |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/planner/optimizer.cpp |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/DebugInfo/DWARF/liblib-DebugInfo-DWARF.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/CodeGen/AsmPrinter/liblib-CodeGen-AsmPrinter.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Target/X86/TargetInfo/libTarget-X86-TargetInfo.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Target/libllvm16-lib-Target.a |55.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/abstract/merger.cpp |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/DebugInfo/PDB/liblib-DebugInfo-PDB.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/CodeGen/GlobalISel/liblib-CodeGen-GlobalISel.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Target/X86/AsmParser/libTarget-X86-AsmParser.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/CFGuard/liblib-Transforms-CFGuard.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Remarks/libllvm16-lib-Remarks.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Target/X86/Disassembler/libTarget-X86-Disassembler.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/MC/MCParser/liblib-MC-MCParser.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/AggressiveInstCombine/liblib-Transforms-AggressiveInstCombine.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/TargetParser/libllvm16-lib-TargetParser.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/TextAPI/libllvm16-lib-TextAPI.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/testlib/s3_recipe_helper/liblibrary-testlib-s3_recipe_helper.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/ObjCARC/liblib-Transforms-ObjCARC.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/Coroutines/liblib-Transforms-Coroutines.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/lz4/libcontrib-libs-lz4.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/frozenlist/libpy3contrib-python-frozenlist.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/executing/libpy3contrib-python-executing.global.a |55.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/transfer/libalter-in_store-transfer.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/nghttp2/libcontrib-libs-nghttp2.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/nghttp3/libcontrib-libs-nghttp3.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/cryptography/py3/libpy3python-cryptography-py3.global.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/ProfileData/libllvm16-lib-ProfileData.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/lua/libcontrib-libs-lua.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/openldap/libraries/liblber/libopenldap-libraries-liblber.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/MC/libllvm16-lib-MC.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/googleapis-common-protos/libcontrib-libs-googleapis-common-protos.a |55.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/standalone/liboperations-alter-standalone.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/pcre/pcre16/liblibs-pcre-pcre16.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Target/X86/MCTargetDesc/libTarget-X86-MCTargetDesc.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/frozenlist/libpy3contrib-python-frozenlist.global.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/pcre/pcre32/liblibs-pcre-pcre32.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/openldap/libcontrib-libs-openldap.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/poco/Crypto/liblibs-poco-Crypto.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/liblibs-aws-sdk-cpp-aws-cpp-sdk-s3.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/nayuki_md5/libcontrib-libs-nayuki_md5.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/lzmasdk/libcontrib-libs-lzmasdk.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/poco/NetSSL_OpenSSL/liblibs-poco-NetSSL_OpenSSL.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/tcmalloc/malloc_extension/liblibs-tcmalloc-malloc_extension.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/poco/JSON/liblibs-poco-JSON.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/t1ha/libcontrib-libs-t1ha.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/snappy/libcontrib-libs-snappy.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/pcre/libcontrib-libs-pcre.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/simdjson/libcontrib-libs-simdjson.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/yajl/libcontrib-libs-yajl.a |55.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/schema/libalter-in_store-schema.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/sasl/libcontrib-libs-sasl.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp/absl/random/libabseil-cpp-absl-random.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/invoke_builtins/llvm16/libminikql-invoke_builtins-llvm16.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/poco/Util/liblibs-poco-Util.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Object/libllvm16-lib-Object.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp/absl/status/libabseil-cpp-absl-status.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/poco/XML/liblibs-poco-XML.a |55.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/options/libschemeshard-olap-options.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/importlib-resources/libpy3contrib-python-importlib-resources.global.a |55.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/operations/libschemeshard-olap-operations.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/utf8proc/libcontrib-libs-utf8proc.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-compression/librestricted-aws-aws-c-compression.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/yaml-cpp/libcontrib-libs-yaml-cpp.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/ngtcp2/libcontrib-libs-ngtcp2.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/opentelemetry-proto/libcontrib-libs-opentelemetry-proto.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Passes/libllvm16-lib-Passes.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/common/libstorage-actualizer-common.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/zstd06/libcontrib-libs-zstd06.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-http/librestricted-aws-aws-c-http.a |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/source/transactions/tx_finish_ack_to_source.cpp |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libunwind/libcontrib-libs-libunwind.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-auth/librestricted-aws-aws-c-auth.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-io/librestricted-aws-aws-c-io.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/grpcio/py3/libpy3python-grpcio-py3.global.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-checksums/librestricted-aws-aws-checksums.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-cal/librestricted-aws-aws-c-cal.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/exception/librestricted-boost-exception.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/atomic/librestricted-boost-atomic.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/poco/Net/liblibs-poco-Net.a |55.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/schema/libschemeshard-olap-schema.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/Instrumentation/liblib-Transforms-Instrumentation.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-common/librestricted-aws-aws-c-common.a |55.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/ttl/libschemeshard-olap-ttl.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/random/librestricted-boost-random.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-sdkutils/librestricted-aws-aws-c-sdkutils.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Support/libllvm16-lib-Support.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/coroutine/librestricted-boost-coroutine.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/context/fcontext_impl/libboost-context-fcontext_impl.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/sequenceproxy/public/libtx-sequenceproxy-public.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/tiering/abstract/libtx-tiering-abstract.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/context/impl_common/libboost-context-impl_common.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/container/librestricted-boost-container.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/InstCombine/liblib-Transforms-InstCombine.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/sequenceproxy/libcore-tx-sequenceproxy.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/Vectorize/liblib-Transforms-Vectorize.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/poco/Foundation/liblibs-poco-Foundation.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/regex/librestricted-boost-regex.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/context/ucontext_impl/libboost-context-ucontext_impl.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-event-stream/librestricted-aws-aws-c-event-stream.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/chrono/librestricted-boost-chrono.a |55.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/store/libschemeshard-olap-store.a |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/olap/ttl/schema.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/olap/ttl/update.cpp |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/thread/librestricted-boost-thread.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-mqtt/librestricted-aws-aws-c-mqtt.a |55.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/sequenceshard/libcore-tx-sequenceshard.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-s3/librestricted-aws-aws-c-s3.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/CodeGen/SelectionDAG/liblib-CodeGen-SelectionDAG.a |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/common/context.cpp |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/bit_io/liblibrary-cpp-bit_io.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/program_options/librestricted-boost-program_options.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/uriparser/libcontrib-restricted-uriparser.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/binsaver/liblibrary-cpp-binsaver.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/iostreams/librestricted-boost-iostreams.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/brotli/libblockcodecs-codecs-brotli.global.a |55.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/olap/table/libschemeshard-olap-table.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/bzip/libblockcodecs-codecs-bzip.global.a |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/sequenceshard/schema.cpp |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-crt-cpp/librestricted-aws-aws-crt-cpp.a |55.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/portions/libreader-sys_view-portions.global.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/locale/librestricted-boost-locale.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/graph/librestricted-boost-graph.a |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/abstract/remove_portions.cpp |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/IR/libllvm16-lib-IR.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/Utils/liblib-Transforms-Utils.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/serialization/librestricted-boost-serialization.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/grpcio/py3/libpy3python-grpcio-py3.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/lib/sharding/libservices-lib-sharding.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/s2n/librestricted-aws-s2n.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/charset/liblibrary-cpp-charset.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/cgiparam/liblibrary-cpp-cgiparam.a |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/logic.cpp |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/cache/liblibrary-cpp-cache.a |55.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/time_cast/libcore-tx-time_cast.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/charset/lite/libcpp-charset-lite.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/tracing/service/libtx-tracing-service.a |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/builder.cpp |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/case_insensitive_string/liblibrary-cpp-case_insensitive_string.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/dragonbox/libdragonbox.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/codecs/greedy_dict/libcpp-codecs-greedy_dict.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/IPO/liblib-Transforms-IPO.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Analysis/libllvm16-lib-Analysis.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/http-parser/libcontrib-restricted-http-parser.a |55.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ydb_convert/libydb-core-ydb_convert.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/local_discovery/libydb-services-local_discovery.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Target/X86/liblib-Target-X86.a |55.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/sharding/libcore-tx-sharding.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/sequenceshard/public/libtx-sequenceshard-public.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/googletest/googletest/librestricted-googletest-googletest.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/archive/liblibrary-cpp-archive.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/keyvalue/libydb-services-keyvalue.a |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/source/transactions/tx_data_ack_to_source.cpp |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/maintenance/libydb-services-maintenance.a |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/common/description.h_serialized.cpp |55.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/sharding/libcore-tx-sharding.global.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/googletest/googlemock/librestricted-googletest-googlemock.a |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/plain/column_portion_chunk.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/remap.cpp |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/hyperscan/libcontrib-libs-hyperscan.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/thrift/libcontrib-restricted-thrift.a |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/sharding/unboxed_reader.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/iterator.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/sharding/hash.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ydb_convert/ydb_convert.cpp |55.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tiering/tier/libtx-tiering-tier.a |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/insert_table/path_info.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/common/description.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/source/transactions/tx_start_to_source.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ydb_convert/compression.cpp |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/legacy_zstd06/libblockcodecs-codecs-legacy_zstd06.global.a |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/plain/column_cursor.cpp |55.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/skip_index/libstorage-indexes-skip_index.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/lz4/libblockcodecs-codecs-lz4.global.a |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tiering/tier/identifier.cpp |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/snappy/libblockcodecs-codecs-snappy.global.a |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tiering/tier/s3_uri.cpp |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/fastlz/libblockcodecs-codecs-fastlz.global.a |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/cityhash-1.0.2/libcontrib-restricted-cityhash-1.0.2.a |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/plain/logic.cpp |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/plain/merged_column.cpp |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/liblibrary-cpp-blockcodecs.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/zlib/libblockcodecs-codecs-zlib.global.a |55.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_common/libtx-datashard-ut_common.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/types/libessentials-public-types.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/lzma/libblockcodecs-codecs-lzma.global.a |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/data_sharing/source/session/source.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/insert_table/insert_table.cpp |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/lib/libpy3tests-datashard-lib.global.a |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/insert_table/rt_insertion.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/log_backend/json_envelope_ut.cpp |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/delete_queue.cpp |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/codecs/liblibrary-cpp-codecs.a |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/test_meta/libpy3tests-library-test_meta.global.a |55.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/pq_read/test/objcopy_9818d2b70aad7db98a0f9c044c.o |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/functional/backup/backup_ut.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/common/result.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/sequenceshard/sequenceshard_impl.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/sequenceshard/tx_create_sequence.cpp |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/datastreams_helpers/libpy3tests-tools-datastreams_helpers.global.a |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/fq_runner/libpy3tests-tools-fq_runner.global.a |55.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/pq_read/test/objcopy_0035b673555f394234ae284e25.o |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/common/conveyor_task.cpp |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pg8000/libpy3contrib-python-pg8000.global.a |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/fq/libydb-services-fq.a |55.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/oltp_workload/tests/objcopy_49a1ca9559288648fba9cf7b65.o |55.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/oltp_workload/tests/objcopy_367e2bc5d83faa0907a06d2976.o |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/common/libpy3tests-stress-common.global.a |55.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/oltp_workload/tests/objcopy_0446f521b26a2e8128f94ac50f.o |55.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/hooks/testing/libcolumnshard-hooks-testing.a |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/sequenceshard/tx_drop_sequence.cpp |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/oltp_workload/workload/libpy3stress-oltp_workload-workload.global.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/oltp_workload/workload/type/libpy3oltp_workload-workload-type.global.a |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ydb_convert/topic_description.cpp |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/icu/libcontrib-libs-icu.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/sql/lib/libpy3tests-sql-lib.global.a |55.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/workload_service/ut/common/libworkload_service-ut-common.a |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/helpers/get_value.cpp |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/options/schema.cpp |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/retry/libpy3library-python-retry.global.a |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/kqp_helper.cpp |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ydb_convert/column_families.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/tools/fqrun/src/actors.cpp |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/actor/fifo_cleanup.h_serialized.cpp |55.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/postgresql/objcopy_816e2dba53f55d924139cdb3c5.o |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/tools/fqrun/src/common.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/sequenceshard/tx_init.cpp |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/olap_workload/workload/libpy3stress-olap_workload-workload.global.a |55.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/olap_workload/tests/objcopy_e68ca1a2fa9943132c020ae028.o |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/sequenceshard/tx_init_schema.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/tools/fqrun/src/fq_runner.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ymq.pb.cc |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/index_events_processor.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/column_engine_logs.h_serialized.cpp |55.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_vdisk/lib/libblobstorage-ut_vdisk-lib.a |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/options/update.cpp |55.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/olap_workload/tests/objcopy_9be8b6745d0fa150928bab4206.o |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/kafka_transactions_coordinator.cpp |55.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/olap_workload/tests/objcopy_8e19d47784789c55156c57f816.o |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/antlr4-c3/libcontrib-libs-antlr4-c3.a |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/time_cast/time_cast.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/field_transformation.pb.cc |55.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/datastreams.pb.h_serialized.cpp |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_create_partitions_actor.cpp |55.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/generic/connector/libcpp/ut_helpers/libconnector-libcpp-ut_helpers.a |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/boto3/py3/libpy3python-boto3-py3.global.a |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/schema/schema.cpp |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/delete_user.cpp |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/portions/constructor_portion.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/persqueue_common.pb.h_serialized.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/datastreams.pb.cc |55.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/persqueue_common.pb.cc |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/actualization/controller/controller.cpp |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_sasl_handshake_actor.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/http_client.cpp |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/secret/fetcher.cpp |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/sequenceshard/tx_allocate_sequence.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/dataset.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/fq.pb.cc |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/abstract/optimizer.cpp |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/secret/checker_access.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/generic/connector/libcpp/ut_helpers/defaults.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/generic/connector/libcpp/ut_helpers/database_resolver_mock.cpp |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/CodeGen/libllvm16-lib-CodeGen.a |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_restore_ut.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/persqueue_error_codes.pb.cc |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ydb_convert/tx_proxy_status.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_color_limits.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_backup.pb.cc |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/secret/access.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/sequenceshard/sequenceshard.cpp |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/sequenceshard/tx_update_sequence.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_log_cache_ut.cpp |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/sharding/hash_slider.cpp |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/sharding/sharding.cpp |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/sequenceshard/tx_freeze_sequence.cpp |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/column_engine_logs.cpp |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/sequenceshard/tx_get_sequence.cpp |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/secret/checker_secret.cpp |55.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stability/tool/libpy3tests-stability-tool.global.a |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/zstandard/py3/libpy3python-zstandard-py3.a |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/zstandard/py3/libpy3python-zstandard-py3.global.a |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/standalone/update.cpp |55.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/apps/etcd_proxy/service/libapps-etcd_proxy-service.a |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/garbage_collector.cpp |55.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/kqprun/runlib/libtools-kqprun-runlib.a |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/schema/update.cpp |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/chunk_queue/libcpp-threading-chunk_queue.a |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_actions.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut.cpp |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/list_queue_tags.cpp |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/skip_index/constructor.cpp |55.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stability/tool/objcopy_04f56802b68450abc8421282d0.o |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/table/table.cpp |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/defrag/defrag_actor_ut.cpp |55.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stability/tool/objcopy_6403bfa5c5e35b29a21c73fb0e.o |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_list_offsets_actor.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/etcd_proxy/service/etcd_shared.cpp |55.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_helpers/libtx-schemeshard-ut_helpers.a |55.7%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stability/tool/objcopy_533f06087e794c7af638ea75dc.o |55.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/ut/common/libpersqueue-ut-common.a |55.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/get_queue_attributes.cpp |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/column_engine.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/etcd_proxy/service/etcd_base_init.cpp |55.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/ut_utils/libtopic-ut-ut_utils.a |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/merger.cpp |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_alter_configs_actor.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/etcd_proxy/service/etcd_watch.cpp |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/actor/events.h_serialized.cpp |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/merge_subset.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_yard.cpp |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/sharding/hash_intervals.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_helpers/auditlog_helpers.cpp |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_sasl_auth_actor.cpp |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tiering/tier/object.cpp |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/moto/py3/libpy3python-moto-py3.global.a |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/ut_utils/managed_executor.cpp |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/botocore/py3/libpy3python-botocore-py3.global.a |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/actualization/construction/context.cpp |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/receive_message.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/ut_utils/trace.cpp |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/sharding/random.cpp |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_transaction_actor.cpp |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/local_executor/libcpp-threading-local_executor.a |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_balancer_actor.cpp |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/db_wrapper.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/etcd_proxy/service/etcd_grpc.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_rate_limiter.pb.cc |55.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_discovery.pb.cc |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/skip_index/meta.cpp |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/general_compaction.cpp |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/queue_schema.cpp |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/sequenceshard/tx_restore_sequence.cpp |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/googleapis-common-protos/libpy3contrib-libs-googleapis-common-protos.global.a |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/purge_queue.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/runtime/kqp_scan_data_ut.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_helpers/ls_checks.cpp |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/queues_list_reader.cpp |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/clickhouse-connect/libpy3contrib-python-clickhouse-connect.global.a |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_metadata_actor.cpp |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/indexation.cpp |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/cleanup_tables.cpp |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/python/yt/libpy3yt-python-yt.global.a |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/python/yt/yson/libpy3python-yt-yson.global.a |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/python/yt/type_info/libpy3python-yt-type_info.global.a |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/actor/queue_schema.h_serialized.cpp |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/list_users.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_export.pb.cc |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_describe_configs_actor.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_logstore.pb.cc |55.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_dynamic_config.pb.cc |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/health_check/health_check.cpp |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/jmespath/py3/libpy3python-jmespath-py3.global.a |55.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_maintenance.pb.cc |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/sharding/hash_modulo.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_federation_discovery.pb.cc |55.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_replication.pb.cc |55.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_tablet.pb.cc |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/runtime/kqp_compute_scheduler_ut.cpp |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_init_producer_id_actor.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/clickhouse/e273c09e9944ed8d4db55cf519_raw.auxcpp |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_offset_commit_actor.cpp |55.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/test/libvdisk-hulldb-test.a |55.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/clickhouse/objcopy_04bfe236a98e0af88f14e75aff.o |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/tests/common_test_cases/libpy3connector-tests-common_test_cases.global.a |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/asn1crypto/libpy3contrib-python-asn1crypto.global.a |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/secret/access_behaviour.cpp |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_create_topics_actor.cpp |55.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ut_utils/libpersqueue_public-ut-ut_utils.a |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/sequenceshard/tx_mark_schemeshard_pipe.cpp |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/with_appended.cpp |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/list_queues.cpp |55.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/clickhouse/objcopy_0ee10940713087f217114ab4be.o |55.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/clickhouse/objcopy_fbab8021d30ec8df368308c49a.o |55.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_import.pb.cc |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/schema/update.cpp |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/secret/snapshot.cpp |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/tests/utils/clients/libpy3tests-utils-clients.global.a |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/purge.cpp |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/kafka_transactional_producers_initializers.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ut_utils/data_plane_helpers.cpp |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/compaction.cpp |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/tests/utils/run/libpy3tests-utils-run.global.a |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/tests/utils/libpy3connector-tests-utils.global.a |55.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_formats.pb.cc |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/tests/utils/scenario/libpy3tests-utils-scenario.global.a |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/tests/utils/types/libpy3tests-utils-types.global.a |55.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_issue_message.pb.cc |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_topic_offsets_actor.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_keyvalue.pb.cc |55.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_operation.pb.cc |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/shared_cache_switchable_ut.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_monitoring.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/kafka_connection.cpp |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/docker/libpy3contrib-python-docker.global.a |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/node_tracker.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_topic.pb.cc |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pytz/py3/libpy3python-pytz-py3.global.a |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/public_http/http_router_ut.cpp |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ydb_convert/table_description.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_value.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_clickhouse_internal.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/list_permissions.cpp |56.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/ut/objcopy_9f29b589555ed64086e5eadccf.o |55.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_topic.pb.h_serialized.cpp |55.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yaml_config/static_validator/ut/ydb-library-yaml_config-static_validator-ut |55.9%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/erasure/ut_perf/ydb-core-erasure-ut_perf |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/shared_cache_clock_pro_ut.cpp |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/sequenceshard/tx_redirect_sequence.cpp |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/kafka_consumer_members_metadata_initializers.cpp |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_read_session_actor.cpp |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/kafka_consumer_groups_metadata_initializers.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yaml_config/static_validator/ut/test.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_fetch_actor.cpp |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/tests/unit/client/draft/helpers/libclient-draft-helpers.a |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/secret/secret_behaviour.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/secret/manager.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_memtable.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/shared_handle_ut.cpp |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/modify_permissions.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ydb_convert/table_settings.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_stat.cpp |56.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/signer/ut/ydb-core-fq-libs-signer-ut |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/sparsed/logic.cpp |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/Werkzeug/py3/libpy3python-Werkzeug-py3.global.a |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/test/libs/table/libtest-libs-table.a |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/tests/unit/client/draft/ydb_scripting_response_headers_ut.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/tests/unit/client/draft/ydb_view_ut.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter_store.cpp |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_offset_fetch_actor.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_cxx_database_ut.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/actors/kafka_balance_actor_sql.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/queue_leader.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_comp_gen.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_run.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_self.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_decimal.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_charge.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_table_part_ut.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_range_cache_ut.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/drop_store.cpp |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/scramp/libpy3contrib-python-scramp.global.a |56.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/statistics/ut_common/libcore-statistics-ut_common.a |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/list_dead_letter_source_queues.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_compaction.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/portions/portions.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/persqueue_error_codes_v1.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_persqueue_cluster_discovery.pb.h_serialized.cpp |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/simplejson/py3/libpy3python-simplejson-py3.a |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/ttl.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_sausage.cpp |56.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/yql_testlib/libydb-core-yql_testlib.a |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/executor.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/changes/cleanup_portions.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_db_iface.cpp |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/simplejson/py3/libpy3python-simplejson-py3.global.a |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/gtest/libcpp-testing-gtest.a |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/factory/open_common/libstreams-factory-open_common.a |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/secret/secret.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_slice.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_persqueue_cluster_discovery.pb.cc |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/xmltodict/py3/libpy3python-xmltodict-py3.global.a |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_slice_loader.cpp |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/gtest_main/libcpp-testing-gtest_main.a |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_screen.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_races.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_persqueue_v1.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_many.cpp |56.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yaml_config/validator/ut/validator_builder/yaml_config-validator-ut-validator_builder |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_scripting.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ydb_convert/table_profiles.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_auth.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/transfer/update.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/metering.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/proxy_service.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yaml_config/validator/ut/validator_builder/validator_builder_ut.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/vdisk_mock.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/apps/etcd_proxy/service/etcd_impl.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_executor_gclogic_ut.cpp >> TErasurePerfTest::Split |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/store/store.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/fifo_cleanup.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_cms.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/s3/actors/ut/yql_arrow_push_down_ut.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_iterator.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_db_scheme.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_compaction_multi.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_btree_index_nodes.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_common.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/defrag.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_faketablet.cpp >> TErasurePerfTest::Split [GOOD] >> TErasurePerfTest::Restore >> Signer::Basic [GOOD] |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/api/protos/ydb_config.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_dbstat.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_scheme.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_bad_blobid.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_coordination.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_forward.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_outofspace.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/ttl/validator.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_repl.cpp |56.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/tests/integration/server_restart/public-sdk-cpp-tests-integration-server_restart |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_table.pb.cc |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/allure-pytest/libpy3contrib-python-allure-pytest.global.a |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/proxy_actor.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_view.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_gc.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/hooks/testing/controller.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/shared_cache_s3fifo_ut.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_brokendevice.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_status_codes.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/tests/integration/server_restart/main.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_part.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/secret/initializer.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_huge.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/hooks/testing/ro_controller.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_helpers/export_reboots_common.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/ut/flat_test_db.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/create_table.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_blockdevice_ut.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_localrecovery.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/workload_service/ut/common/kqp_workload_service_ut_common.cpp |56.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yaml_config/validator/liblibrary-yaml_config-validator.a |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_defrag.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_load.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/helpers/local.cpp |56.1%| [TS] {asan, default-linux-x86_64, release} ydb/core/fq/libs/signer/ut/unittest >> Signer::Basic [GOOD] |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yaml_config/validator/configurators.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yaml_config/validator/validator_builder.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/prepare.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/test/testhull_index.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yaml_config/validator/validator_checks.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yaml_config/validator/validator.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_util_ut.cpp |56.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/fyamlcpp/ut/ydb-library-fyamlcpp-ut |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yql/providers/generic/connector/libcpp/ut_helpers/connector_client_mock.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/drop_table.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/validator/validator_builder.h_serialized.cpp |56.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/kqp/kqp_query_session/ydb-tests-functional-kqp-kqp_query_session |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/fyamlcpp/fyamlcpp_ut.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/common/pq_ut_common.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_bloom.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/standalone/object.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/ut/ut_other.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/helpers.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_helpers/failing_mtpq.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/functional/kqp/kqp_query_session/main.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_object_storage.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/create_store.cpp |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/helpers/aggregation.cpp |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/test/libs/rows/libtest-libs-rows.a |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_helpers/test_env.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/fyamlcpp/libfyaml_ut.cpp |56.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/backup/s3_path_style/ydb-tests-functional-backup-s3_path_style |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/ut/ut_shared_sausagecache_actor.cpp |56.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/pq_read/pq_read |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_helpers/data_erasure_helpers.cpp |56.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/config/utils/libcore-config-utils.a |56.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/tests/integration/basic_example/public-sdk-cpp-tests-integration-basic_example |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/config/ut/main.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_debug.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_synclog.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_helpers/helpers.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/functional/backup/s3_path_style/s3_path_style_backup_ut.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/driver_lib/version/ut/version_ut.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/tools/pq_read/main.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/query/kqp_types_ut.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/tools/kqprun/runlib/utils.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/tests/integration/basic_example/basic_example_data.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/tests/integration/basic_example/main.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/tests/integration/basic_example/basic_example.cpp |56.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/config/ut/ydb-core-config-ut |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/common/autoscaling_ut_common.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_versions.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/security/ut/util_ut.cpp |56.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/common/libformat_handler-ut-common.a |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_oos_logic_ut.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/ut/ut_datetime.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/ut_utils/topic_sdk_test_setup.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_query.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/ut/ut_data_cleanup.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/query/kqp_query_ut.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_btree_index_iter_charge.cpp |56.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/kqprun/src/libtools-kqprun-src.a |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_query_stats.pb.cc |56.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cblas/libcontrib-libs-cblas.a |56.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/security/ut/ydb-library-security-ut |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/tools/kqprun/runlib/application.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_split_merge/ut_split_merge.cpp |56.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/prctl/libpy3library-python-prctl.global.a |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ut_utils/ut_utils.cpp |56.1%| [LD] {BAZEL_DOWNLOAD} $(B)/contrib/python/moto/bin/moto_server |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/tools/fqrun/src/fq_setup.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_pq_reboots/ut_pq_reboots.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_vpatch_actor_ut.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/kqprun/src/common.h_serialized.cpp |56.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/replication/ydb-tests-functional-replication |56.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/testlib/service_mocks/ldap_mock/libtestlib-service_mocks-ldap_mock.a |56.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/driver_lib/version/ut/ydb-core-driver_lib-version-ut |56.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/clickhouse-connect/libpy3contrib-python-clickhouse-connect.a |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/get_block.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_transfer/ut_transfer.cpp |56.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/allure-python-commons/libpy3contrib-python-allure-python-commons.global.a |56.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libf2c/libcontrib-libs-libf2c.a |56.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/lz/snappy/libstreams-lz-snappy.a |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/functional/replication/transfer.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_redo.cpp |56.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/clapack/part1/liblibs-clapack-part1.a |56.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/prctl/libpy3library-python-prctl.a |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/functional/replication/replication.cpp |56.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/viewer/tests/objcopy_af18efc2f04dd1af5ca802c329.o |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/vdisk_restart.cpp |56.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/viewer/tests/objcopy_87b299e07b15c86f4f50f458ef.o |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/ut_common/datashard_ut_common.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/assimilation.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/ut_common/ut_common.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ut_utils/test_server.cpp |56.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/viewer/tests/objcopy_f3c323ef80ada193284f036d44.o |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/mediator/mediator_ut.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cluster_info_ut.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/config/validation/auth_config_validator_ut/auth_config_validator_ut.cpp |56.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/query/kqp_limits_ut.cpp |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/ut/ut_shared_sausagecache.cpp |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/lz/libcpp-streams-lz.a |55.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/tests/unit/client/oauth2_token_exchange/tests-unit-client-oauth2_token_exchange |55.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/kqprun/recipe/objcopy_dcbdf62672440a626e79a64e14.o |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/kqprun/recipe/libpy3kqprun_recipe.global.a |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/io_formats/arrow/scheme/csv_arrow_ut.cpp |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/lz/lz4/libstreams-lz-lz4.a |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_proto.cpp |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_export/ut_export.cpp |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/query/kqp_params_ut.cpp |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/factory/open_by_signature/libstreams-factory-open_by_signature.a |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_pages.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_part_multi.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/tests/unit/client/oauth2_token_exchange/jwt_token_source_ut.cpp |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/merge_split_common_table/libpy3functional-sqs-merge_split_common_table.global.a |55.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/merge_split_common_table/std/objcopy_2efdf95387a81f55cf9c81071a.o |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/space_check.cpp |55.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/kqprun/recipe/kqprun_recipe |56.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/merge_split_common_table/std/objcopy_242486256e1af973cd1d5376d1.o |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/clapack/part2/liblibs-clapack-part2.a |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/tests/unit/client/oauth2_token_exchange/credentials_ut.cpp |56.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/merge_split_common_table/std/objcopy_5d73baff4bb68923ddbe5f4fcd.o |55.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/alloc.grpc.pb.cc |55.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/auth.grpc.pb.cc |55.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/base.grpc.pb.cc |55.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_mediator.grpc.pb.cc |55.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_config.grpc.pb.cc |55.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console.pb.cc |55.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_keyvalue.pb.cc |55.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_base3.pb.cc |55.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bind_channel_storage_pool.grpc.pb.cc |55.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/backup.pb.cc |55.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/base.pb.cc |55.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_kesus.pb.cc |55.8%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/s3_recipe/s3_recipe |55.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_keyvalue.grpc.pb.cc |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/query/kqp_stats_ut.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/backup.grpc.pb.cc |55.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/feature_flags.pb.cc |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_executor_leases_ut.cpp |56.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/libydb-core-protos.a |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/feature_flags.grpc.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/drivemodel.pb.cc |55.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/http_config.pb.cc |55.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/statestorage.grpc.pb.cc |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_incremental_restore_scan.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/minikql_engine.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_schemeshard.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/memory_controller_config.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/mon.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_tx_allocator.grpc.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/minikql_engine.grpc.pb.cc |56.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/sqs/merge_split_common_table/std/functional-sqs-merge_split_common_table-std |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/mon.grpc.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/labeled_counters.grpc.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_testshard.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_statistics_aggregator.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_sequenceshard.grpc.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_replication.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_sequenceshard.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_replication.pb.cc |56.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/scheme_tests/objcopy_5b5c3367c789898aa5a6cae866.o |56.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/scheme_tests/objcopy_4826ee2207124da1bc398e3bd8.o |56.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/tools/kqprun/runlib/kikimr_setup.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_sysview_processor.pb.cc |56.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/scheme_tests/objcopy_8e57113197bb359e3999b04aab.o |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_testshard.grpc.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_sysview_processor.grpc.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/ydb_table_impl.grpc.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/yql_testlib/yql_testlib.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/ydb_table_impl.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_columnshard.grpc.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp_stats.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/ydb_result_set_old.pb.cc |56.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yaml_config/static_validator/ut/test.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_datashard.grpc.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_sequenceshard.grpc.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_mediator_timecast.grpc.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_extsubdomain/ut_extsubdomain.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_proxy.pb.cc |56.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/query_cache/ydb-tests-functional-query_cache |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/query/kqp_analyze_ut.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_proxy.grpc.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/common/recursive_remove_ut.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_scheme.grpc.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_sequenceshard.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_datashard.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_scheme.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/whiteboard_flags.grpc.pb.cc |56.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/workload_manager_config.grpc.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/query/kqp_explain_ut.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/whiteboard_disk_states.grpc.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/ydb_result_set_old.grpc.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/whiteboard_flags.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/workload_manager_config.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/data_events.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/whiteboard_disk_states.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_tx_allocator.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_kqp_errors.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/yql_translation_settings.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/yql_translation_settings.grpc.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/common/cert_format_converter_ut.cpp |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_simplebs.cpp |56.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/scheme_tests/ydb-tests-functional-scheme_tests |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/data_events.grpc.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_tx_proxy.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/common/csv_parser_ut.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/common/pg_dump_parser_ut.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/common/normalize_path_ut.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_mediator_timecast.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_columnshard.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp_stats.grpc.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/maintenance.grpc.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/memory_controller_config.grpc.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/monitoring.cpp |56.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/croaring/libcontrib-libs-croaring.a |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp_physical.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/load_test.grpc.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/local.grpc.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/labeled_counters.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/long_tx_service.grpc.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/ut/ut_rename_table_column.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/metrics.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/memory_stats.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/local.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/load_test.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/metrics.grpc.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/memory_stats.grpc.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/long_tx_service.pb.cc |56.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/tools/lib/cmds/libpy3tools-lib-cmds.global.a |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yaml_config/console_dumper_ut.cpp |56.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/public/tools/ydb_recipe/objcopy_c55121179eeb3b5753498290c4.o |56.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/tools/ydb_recipe/libpy3ydb_recipe.global.a |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_limits.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_counters_aggregator.pb.cc |56.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_tx.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sqs.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_kv.grpc.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_ut.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_health.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus.grpc.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_health.grpc.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_counters_aggregator.grpc.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_database.grpc.pb.cc |56.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/viewer/tests/ydb-core-viewer-tests |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_pq.grpc.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_tx.grpc.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_tracing_signals.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_database.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_kv.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_pipe.grpc.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/base/ptr_ut.cpp |56.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_move_reboots/ut_move_reboots.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/base/batched_vec_ut.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_tracing_signals.grpc.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_pipe.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_limits.grpc.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/base/bufferwithgaps_ut.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/test_shard.pb.cc |56.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yaml_config/validator/validator_checks.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx.grpc.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tenant_pool.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tenant_pool.grpc.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tenant_slot_broker.grpc.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tracing.grpc.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tracing.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tenant_slot_broker.pb.cc |56.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/highwayhash/arch/sse41/libhighwayhash-arch-sse41.a |56.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/highwayhash/arch/avx2/libhighwayhash-arch-avx2.a |56.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/highwayhash/libcontrib-libs-highwayhash.a |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/netclassifier.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/netclassifier.grpc.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/test_shard.grpc.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sqs.grpc.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_broker.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/profiler.grpc.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_whiteboard.grpc.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_broker.grpc.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/pdiskfit.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_board_mon.grpc.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/pdiskfit.grpc.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/query_stats.grpc.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_pq.pb.cc |56.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/deprecated/liblibrary-yaml_config-deprecated.a |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/pqconfig.grpc.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/s3_settings.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_whiteboard.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/shred.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/pqconfig.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/index_restore_get.cpp |56.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yaml_config/validator/configurators.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/resource_broker.grpc.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/replication.grpc.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_maintenance_api_ut.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/query_stats.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/resource_broker.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/s3_settings.grpc.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/replication.pb.cc |56.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/http_api_client/libpy3fq-libs-http_api_client.global.a |56.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/scheduler/libproviders-dq-scheduler.a |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_type_operation.grpc.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_log.grpc.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/shared_cache.pb.h_serialized.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_board.grpc.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_type_metadata.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_board.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/gc.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/shared_cache.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_type_metadata.grpc.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_board_mon.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_log.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/shared_cache.grpc.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_type_operation.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/recovery.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/serverless_proxy_config.grpc.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/discover.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/serverless_proxy_config.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet.grpc.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/dq/scheduler/ut/dq_scheduler_ut.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/stream.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/statestorage.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/table_service_config.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_ut_common.cpp |56.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/query_cache/objcopy_e31620202d3ba8df14ff2a18e1.o |56.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/sync.cpp |56.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/tools/ydb_recipe/ydb_recipe |56.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/query_cache/objcopy_388aef0b6ac03d4f661ae7a30e.o |56.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/config/validation/auth_config_validator_ut/core-config-validation-auth_config_validator_ut |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/stream.grpc.pb.cc |56.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/query_cache/objcopy_f8b2cbafb1fed0e25bf9683c2d.o |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/statistics.grpc.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/table_service_config.grpc.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/statistics.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sys_view.grpc.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/subdomains.pb.cc |56.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/wardens/objcopy_3db6af291678d4ac330517956a.o |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/long_tx_service/public/types_ut.cpp |56.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/vdisk_malfunction.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/subdomains.grpc.pb.cc |56.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/wardens/objcopy_1555e67a3dd43a3e7f09bf8eee.o |56.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/wardens/objcopy_488333b1ebd4c1d6d8ec5bcb8f.o |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/table_stats.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/table_stats.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_counters.pb.cc |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/wardens/ydb-tests-functional-wardens |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/grpc_status_proxy.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/maintenance.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sys_view.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_counters.grpc.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/profiler.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/grpc_status_proxy.grpc.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/grpc_pq_old.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/data_integrity_trails.grpc.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/health.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/health.grpc.pb.cc |56.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/Flask-Cors/py3/libpy3python-Flask-Cors-py3.global.a |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/http_config.grpc.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/hive.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/hive.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/external_sources.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/database_basic_sausage_metainfo.pb.cc |56.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/Flask/py3/libpy3python-Flask-py3.global.a |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/key.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kafka.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/import.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/import.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/index_builder.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kesus.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/mirror3of4.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kesus.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kafka.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_reboots/ut_reboots.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/database_basic_sausage_metainfo.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/key.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp_physical.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/data_integrity_trails.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/index_builder.grpc.pb.cc |56.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/public/tools/lib/cmds/ut/objcopy_0ade7a5662c6292edc3a8de02f.o |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/get.cpp |56.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/public/tools/lib/cmds/ut/objcopy_e2cd022168ff179d1441f5d3df.o |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/health_check/health_check_ut.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/group_reconfiguration.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/db_metadata_cache.grpc.pb.cc |56.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/public/tools/lib/cmds/ut/objcopy_c9ab749ab3188a8582c5cefa5e.o |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/export.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_load.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_backup.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/ut_helpers.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_backup.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_config.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/filestore_config.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_config.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_load.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/export.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/drivemodel.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/multiget.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/alloc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blob_depot_config.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_load.pb.h_serialized.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_node_broker.grpc.pb.cc |56.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/apps/ydb/commands/libcommands.a |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/auth.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_statistics_aggregator.grpc.pb.cc |56.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yaml_config/validator/validator_builder.cpp |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/tools/lib/cmds/ut/ydb-public-tools-lib-cmds-ut |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/filestore_config.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/follower_group.grpc.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/external_sources.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/follower_group.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/flat_scheme_op.grpc.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/flat_tx_scheme.grpc.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/flat_tx_scheme.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/downtime_ut.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_tx_proxy.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/grpc_pq_old.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/db_metadata_cache.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/snapshots.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/grpc.grpc.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/flat_scheme_op.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/ydb/main.cpp |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_executor_database_ut.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_pq.pb.cc |56.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/breakpad/src/client/linux/libsrc-client-linux.a |56.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/apps/ydb/objcopy_774cbd1f10ee287899289ecb3f.o |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_mediator.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_node_broker.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_replication.grpc.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_schemeshard.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_pq.grpc.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bind_channel_storage_pool.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blob_depot.grpc.pb.cc |56.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yaml_config/tools/dump_ds_init/yaml-to-proto-dump-ds-init |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/lib/ydb_cli/common/ut/ydb-public-lib-ydb_cli-common-ut |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blob_depot.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_base.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blob_depot_config.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_base.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_base3.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/change_exchange.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_config.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_base.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/viewer/json/json_ut.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_vdisk_config.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_pdisk_config.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_vdisk_internal.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_pdisk_config.pb.h_serialized.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_pdisk_config.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bootstrapper.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bootstrap.grpc.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/self_heal.cpp |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_executor_ut.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_vdisk_config.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blockstore_config.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blockstore_config.grpc.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bootstrap.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bootstrapper.grpc.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/compile_service_config.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console.grpc.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/gc_quorum_3dc.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/cms.grpc.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/channel_purpose.pb.cc |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/change_exchange.pb.cc |56.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/breakpad/src/liblibs-breakpad-src.a |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/compaction.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/sanitize_groups.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/compaction.grpc.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/channel_purpose.grpc.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/config_units.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/config_units.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/compile_service_config.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/cms.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/config.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage.grpc.pb.cc |56.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/sqs/libpy3tests-library-sqs.global.a |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_blob_depot.grpc.pb.cc |56.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_columnshard.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_tenant.grpc.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/yt/actors/ut/yql_yt_lookup_actor_ut.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/config.pb.cc |56.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/yt/actors/libproviders-yt-actors.a |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_config.grpc.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_base.pb.cc |56.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/ut_transform/objcopy_342e8590e41686b18307d054a9.o |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/incorrect_queries.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/extra_block_checks.cpp |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/http_api/ydb-tests-fq-http_api |56.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/ut_transform/objcopy_b5b36403e069f48d06f8367722.o |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_backup.pb.cc |56.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/ut_transform/objcopy_c693478edc1220e7a9143567d1.o |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_config.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters.grpc.pb.cc |56.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/federated_query/common/libut-federated_query-common.a |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_cms.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_tenant.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_columnshard.grpc.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/blobstorage_distributed_config.pb.cc |56.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/http_api/objcopy_7eade8c49389813f8c36b72b5b.o |56.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/base/ut/ydb-core-blobstorage-base-ut |56.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yaml_config/validator/validator.cpp |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/block_race.cpp |56.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/http_api/objcopy_7bfd03a31f5e230607792f10cc.o |56.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/http_api/objcopy_1a1e300767b552f4c13c3295d0.o |56.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/http_api/objcopy_4f92526e13553482736b942b2c.o |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/dynamic_config/dynamic_config_ut.cpp |56.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/http_api/objcopy_3209cda00462f2963f3cbbc912.o |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_blob_depot.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_bs_controller.grpc.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_cms.grpc.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_vdisk_internal.grpc.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_backup.grpc.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_bs_controller.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_datashard.grpc.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_coordinator.grpc.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_datashard.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_kesus.grpc.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_coordinator.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk_color.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_hive.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_hive.grpc.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk_color.grpc.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/intrusive_stack_ut.cpp |56.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/restarts/ydb-tests-fq-restarts |56.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/restarts/objcopy_bf578b7161cc94bf18488d04ca.o |56.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/restarts/objcopy_e7477203b27fa0321cf18fd7ee.o |56.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/restarts/objcopy_f928a40774b17a9d6cd7cabd2c.o |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/restarts/849c58233edc33539cbeb93a31_raw.auxcpp |56.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/restarts/objcopy_b8d63b589074145793d63c27a3.o |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/concurrent_rw_hash_ut.cpp |56.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yaml_config/ut/ydb-library-yaml_config-ut |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/patch.cpp |56.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/dq/scheduler/ut/ydb-library-yql-providers-dq-scheduler-ut |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/responses/py3/libpy3python-responses-py3.global.a |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/queue_oneone_inplace_ut.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/cache_cache_ut.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/hyperlog_counter_ut.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/dq/provider/yql_dq_provider_ut.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/gen_restarts.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/intrusive_fixed_hash_set_ut.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/lf_stack_ut.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/event_priority_queue_ut.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/hazard_ut.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/ui64id_ut.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/intrusive_heap_ut.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/log_priority_mute_checker_ut.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/lz4_data_generator_ut.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/stlog_ut.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/simple_cache_ut.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/token_bucket_ut.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/wildcard_ut.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/fast_tls_ut.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/ulid_ut.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/page_map_ut.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/bits_ut.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/queue_inplace_ut.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_row_versions_ut.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/btree_ut.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/operation_queue_priority_ut.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/circular_queue_ut.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/fragmented_buffer_ut.cpp |56.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/fyamlcpp/fyamlcpp_ut.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/operation_queue_ut.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/cache_ut.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/btree_cow_ut.cpp |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/colorama/py3/libpy3python-colorama-py3.global.a |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/codegen/no_llvm/libminikql-codegen-no_llvm.a |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/click/py3/libpy3python-click-py3.global.a |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/ut_helpers.cpp |56.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/mixedpy/objcopy_d2e759e2d0ff1243166a3bc7d9.o |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_crypto_ut.cpp |56.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/lib/libblobstorage-ut_blobstorage-lib.a |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/computation/no_llvm/libminikql-computation-no_llvm.a |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_sectormap.cpp |56.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/mixedpy/objcopy_51562f83ff52d1ceaac0c36a08.o |56.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/mixedpy/objcopy_fe9c8c25e6c570097a9d0c06f9.o |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/address_classifier_ut.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/interval_set_ut.cpp |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/bucket_quoter/liblibrary-cpp-bucket_quoter.a |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_context.cpp |56.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/pq_read/test/objcopy_45b6981aed17dda33d43217f52.o |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_env.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/scrub_fast.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/mock/pdisk_mock.cpp |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/postgres_integrations/library/libpy3tests-postgres_integrations-library.global.a |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/patched/replxx/librestricted-patched-replxx.a |56.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/postgres_integrations/library/ut/objcopy_cf5836766ac30ca7ea957ce368.o |56.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/postgres_integrations/library/ut/objcopy_899316667b8914fe8ec3af85d9.o |56.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/postgres_integrations/library/ut/objcopy_daba02a22b66dd174e40603586.o |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/tbb/libcontrib-libs-tbb.a |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/mysql/364af2d5bcc4d0c488c09257c5_raw.auxcpp |56.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/mysql/objcopy_41a67a8b373ce2db88d0a50b4b.o |56.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/postgresql/objcopy_e4166f3d104a6751b45e7e712f.o |56.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/mysql/objcopy_5a23f199ba2ad5114d97d1e863.o |56.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/log/tests/objcopy_2f7ac0f750374152d13c6bfbcf.o |56.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/mysql/objcopy_71c5c57afe9530748c30b055f8.o |56.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/postgresql/objcopy_b9aaa278b10ed44e5645b3ef2f.o |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/postgresql/common/libpy3functional-postgresql-common.global.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/lz4/py3/libpy3python-lz4-py3.global.a |56.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/log/tests/objcopy_a926d3332cb769ac3e6c9e6e37.o |56.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/log/tests/objcopy_854d6cc7a0cc5cdd793cfc1e6d.o |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/lz4/py3/libpy3python-lz4-py3.a |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/validation.cpp |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/db_pool.pb.{h, cc} |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/http_config.pb.cc |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/postgres_integrations/library/ut/ydb-tests-postgres_integrations-library-ut |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/task_controller.pb.{h, cc} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bootstrap.{pb.h ... grpc.pb.h} |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_schemeshard.grpc.pb.cc |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/health_config.pb.{h, cc} |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_broker.pb.cc |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_base.{pb.h ... grpc.pb.h} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk_color.{pb.h ... grpc.pb.h} |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/e9ba3ee2f0ee1966e63998b143_raw.auxcpp |56.7%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/crypto/ut/ydb-core-blobstorage-crypto-ut |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blob_depot.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/memory_controller_config.grpc.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/table_service_config.grpc.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/vdisk_test.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blockstore_config.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/hive.pb.cc |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tenant_pool.{pb.h ... grpc.pb.h} |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yaml_config/ut_transform/ydb-library-yaml_config-ut_transform |56.7%| [PR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/expr_nodes/yql_pq_expr_nodes.{gen.h ... defs.inl.h} |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/memory_controller_config.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/shared_cache.pb.cc |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage.{pb.h ... grpc.pb.h} |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/profiler.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/helpers/writer.cpp |56.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_storage/internal/ut/objcopy_c96ef635306ccee8a5cf6359f1.o |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/io_formats/arrow/scheme/ut/ydb-core-io_formats-arrow-scheme-ut |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tenant_slot_broker.{pb.h ... grpc.pb.h} |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/internal/ut/utils_ut.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/helpers/typed_local.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_database.pb.cc |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/api/grpc/api.{pb.h ... grpc.pb.h} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_tablet.pb.{h, cc} |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/viewer/json/ut/ydb-core-viewer-json-ut |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/decommit_3dc.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/mon_reregister_ut.cpp |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/long_tx_service/public/ut/ydb-core-tx-long_tx_service-public-ut |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/mkql_simple_file/libproviders-common-mkql_simple_file.a |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/cms.{pb.h ... grpc.pb.h} |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_object_storage_ut.cpp |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tracing.{pb.h ... grpc.pb.h} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/protos/data.pb.{h, cc} |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/tools/kqprun/src/kqp_runner.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/config/bsconfig_ut.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_health.grpc.pb.cc |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/nodes_manager.pb.{h, cc} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/drivemodel.{pb.h ... grpc.pb.h} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/http_config.{pb.h ... grpc.pb.h} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/datastreams.pb.{h, cc} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/kqprun/src/proto/storage_meta.pb.{h, cc} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/config.{pb.h ... grpc.pb.h} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/compile_service_config.{pb.h ... grpc.pb.h} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_maintenance.pb.{h, cc} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/coordinator/protos/events.pb.{h, cc} |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/itsdangerous/py3/libpy3python-itsdangerous-py3.global.a |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/5a51b30173b13c865a1488561e_raw.auxcpp >> TBlobStorageCrypto::TestMixedStreamCypher [GOOD] >> TBlobStorageCrypto::TestOffsetStreamCypher |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk.grpc.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/acceleration.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/221e269cd278844526401f2e44_raw.auxcpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/ut/describes_ut/describe_topic_ut.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk.pb.cc |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/memory_controller_config.{pb.h ... grpc.pb.h} |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/7376f011aca56ff6505ce31aa2_raw.auxcpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_tenants_ut.cpp |56.7%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/resource_pools/ut/ydb-core-resource_pools-ut |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_base3.pb.cc >> TBlobStorageCrypto::TestOffsetStreamCypher [GOOD] >> TBlobStorageCrypto::TestInplaceStreamCypher [GOOD] >> TBlobStorageCrypto::PerfTestStreamCypher |56.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/example/objcopy_2b682e146a665bfa19210b0fd9.o |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/udf/service/terminate_policy/libudf-service-terminate_policy.global.a |56.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/example/objcopy_e0aef87c4bf15cfdc957f4bdd1.o |56.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/example/objcopy_c623700776b43ee95ec93c56f9.o |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yaml_config/yaml_config_parser_ut.cpp |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/db_pool/protos/config.pb.{h, cc} |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/ut/describes_ut/ic_cache_ut.cpp |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_table.pb.{h, cc} |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/encryption.cpp >> TBlobStorageCrypto::PerfTestStreamCypher [GOOD] >> TBlobStorageCrypto::UnalignedTestStreamCypher [GOOD] >> TBlobStorageCryptoRope::TestEqualInplaceStreamCypher |56.7%| [PR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/expr_nodes/kqp_expr_nodes.{gen.h ... defs.inl.h} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/services/bg_tasks/protos/container.pb.{h, cc} |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_sequenceshard.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_sequenceshard.grpc.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/main.cpp |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/memory_stats.{pb.h ... grpc.pb.h} |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/dq/state/ut/ydb-library-yql-dq-state-ut |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/codec/codegen/no_llvm/libcodec-codegen-no_llvm.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/numpy/py3/numpy/random/libpy3py3-numpy-random.global.a |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/state/ut/dq_state_load_plan_ut.cpp |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/example/ydb-tests-example |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/comp_nodes/no_llvm/libyt-comp_nodes-no_llvm.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/numpy/py3/libpy3python-numpy-py3.global.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/invoke_builtins/no_llvm/libminikql-invoke_builtins-no_llvm.a |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/backup.{pb.h ... grpc.pb.h} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bootstrap.{pb.h ... grpc.pb.h} |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yaml_config/yaml_config_ut.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/mvp/core/mvp_test_runtime.cpp |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters.{pb.h ... grpc.pb.h} |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/gateway/file/libyt-gateway-file.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/quota/libclient-yc_private-quota.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/nc_private/iam/libclient-nc_private-iam.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/mvp/core/protos/libmvp-core-protos.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/numpy/py3/numpy/random/libpy3py3-numpy-random.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/oauth/libclient-yc_private-oauth.a |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/mvp/core/mvp_tokens.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/common/ut_common.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yaml_config/yaml_config_proto2yaml_ut.cpp |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/mvp/security/simple/libmvp-security-simple.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/mvp/core/libydb-mvp-core.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/access/libclient-yc_private-access.a |56.8%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/normalizer/abstract/abstract.h_serialized.cpp |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/testing/group_overseer/libblobstorage-testing-group_overseer.a >> TBlobStorageCryptoRope::TestEqualInplaceStreamCypher [GOOD] >> TBlobStorageCryptoRope::TestEqualMixedStreamCypher |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/http_gateway/mock/libcommon-http_gateway-mock.a |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_debug_v1.grpc.pb.cc |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/yt/actors/ut/ydb-library-yql-providers-yt-actors-ut |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yaml_config/tools/dump_ds_init/main.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/workload_manager_config.grpc.pb.cc |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/servicecontrol/resource.{pb.h ... grpc.pb.h} |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/mirror3dc.cpp >> ResourcePoolClassifierTest::SettingsExtracting [GOOD] >> ResourcePoolClassifierTest::IntSettingsParsing [GOOD] >> ResourcePoolTest::SettingsValidation [GOOD] >> ResourcePoolTest::IntSettingsParsing [GOOD] >> ResourcePoolTest::SecondsSettingsParsing [GOOD] >> ResourcePoolTest::SettingsExtracting [GOOD] >> ResourcePoolTest::PercentSettingsParsing [GOOD] >> ResourcePoolClassifierTest::SettingsValidation [GOOD] >> ResourcePoolClassifierTest::StringSettingsParsing [GOOD] |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/counting_events.cpp |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/accessservice/sensitive.{pb.h ... grpc.pb.h} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/servicecontrol/access_service.{pb.h ... grpc.pb.h} |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/metadata/ut/functions_metadata_ut.cpp |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/accessservice/resource.{pb.h ... grpc.pb.h} |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/blobstorage_distributed_config.grpc.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/generic/provider/ut/pushdown/pushdown_ut.cpp |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/kqp/kqp_query_svc/ydb-tests-functional-kqp-kqp_query_svc |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_operation_v1.grpc.pb.cc |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/ydb/v1/libyc_private-ydb-v1.a >> TErasurePerfTest::Restore [GOOD] >> TErasureSmallBlobSizePerfTest::StringErasureMode [GOOD] >> TErasureSmallBlobSizePerfTest::ConvertToRopeMode [GOOD] |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_bulk_upsert_ut.cpp |56.7%| [LD] {BAZEL_DOWNLOAD} $(B)/tools/enum_parser/enum_parser/enum_parser |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/apps/ydb/ydb |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/functional/kqp/kqp_query_svc/main.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_helpers.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/tools/kqprun/src/ydb_setup.cpp |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/tests/unit/client/coordination/ydb-public-sdk-cpp-tests-unit-client-coordination |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk/huge_migration_ut.cpp |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_tablet_v1.{pb.h ... grpc.pb.h} |56.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/resource_pools/ut/unittest >> ResourcePoolClassifierTest::StringSettingsParsing [GOOD] |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/quota_manager/proto/quota_internal.pb.{h, cc} |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/tools/kqprun/src/actors.cpp |56.7%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/tools/simple_json_diff/simple_json_diff |56.7%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tools/solomon_emulator/bin/solomon_emulator |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/nc_private/iam/token_service.{pb.h ... grpc.pb.h} |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/tests/unit/client/coordination/coordination_ut.cpp |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/tornado/tornado-4/libpy3python-tornado-tornado-4.a |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/examples/pagination/pagination.cpp |56.7%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tools/solomon_emulator/recipe/solomon_recipe |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/metrics/ut/sanitize_label_ut.cpp |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/tornado/tornado-4/libpy3python-tornado-tornado-4.global.a |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/metrics/ut/metrics_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/examples/pagination/main.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/examples/pagination/pagination_data.cpp |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/metrics/ut/ydb-core-fq-libs-metrics-ut |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/mvp/core/protos/mvp.pb.{h, cc} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_rate_limiter.pb.{h, cc} |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/lib/node_warden_mock_pipe.cpp |56.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/erasure/ut_perf/unittest >> TErasureSmallBlobSizePerfTest::ConvertToRopeMode [GOOD] |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_login_ut.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/test_connection/ut/test_connection_ut.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_query_ut.cpp |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/oauth/cloud_user.{pb.h ... grpc.pb.h} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/proto/dq_task_params.pb.{h, cc} |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/external_sources/object_storage/inference/ut/arrow_inference_ut.cpp |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/oauth/claims.{pb.h ... grpc.pb.h} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/oauth/session_service.{pb.h ... grpc.pb.h} |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/lib/node_warden_mock_bsc.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/federated_query/common/common.cpp |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/protos/task.pb.{h, cc} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/graph_params/proto/graph_params.pb.{h, cc} |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/numpy/py3/libpy3python-numpy-py3.a |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/deadlines.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_ldap_login_ut.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/examples/vector_index/main.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/mvp/core/mvp_ut.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_coordination_ut.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/examples/vector_index/vector_index.cpp |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blob_depot.{pb.h ... grpc.pb.h} |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/osiris.cpp |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_backup.{pb.h ... grpc.pb.h} |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_monitoring_v1.grpc.pb.cc |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/top_keeper/libcpp-containers-top_keeper.a |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/protos/storage.pb.{h, cc} |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/client/metadata/ut/ydb-core-client-metadata-ut |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_import_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_stats_ut.cpp |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/kqprun/tests/ydb-tests-tools-kqprun-tests |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/cache_block/cache_block_ut.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_logstore_ut.cpp |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/examples/vector_index/vector_index |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/tests/unit/client/result/ydb-public-sdk-cpp-tests-unit-client-result |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_scripting_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_read_rows_ut.cpp |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/kqprun/tests/objcopy_5923b362516b6632b9769a5db2.o |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/kqprun/tests/objcopy_278b1a63a14648a80c4b930adb.o |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_ut.cpp |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/sfh/libcpp-digest-sfh.a |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/kqprun/tests/objcopy_6b37760fb6a28054d0feafd61d.o |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/audit/daf02fd86bb7e2296f1437ae1f_raw.auxcpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_path_element.cpp |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/audit/objcopy_643fa2679e88d9b2d33558b050.o |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/examples/pagination/pagination |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/fixtures/libpy3tests-library-fixtures.global.a |56.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/audit/objcopy_53073eb93c76466fca8f474c5f.o |56.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/audit/objcopy_fe15eb83a42d9d70d347bbba65.o |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/accurate_accumulate/liblibrary-cpp-accurate_accumulate.a |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/tests/unit/client/result/result_ut.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/ds_proxy_lwtrace.cpp |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/protos/events.pb.{h, cc} |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk_color.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/donor.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_monitoring_ut.cpp |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/mvp/core/ut/ydb-mvp-core-ut |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/histogram/adaptive/protos/libhistogram-adaptive-protos.a |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stability/tool/objcopy_7406de026bf25e30e96a88517d.o |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_index_table_ut.cpp |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stability/tool/tool |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/histogram/adaptive/libcpp-histogram-adaptive.a |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/audit/ydb-tests-functional-audit |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/hyperloglog/liblibrary-cpp-hyperloglog.a |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/helpers/query_executor.cpp |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/annotations/validation.pb.{h, cc} |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_table_ut.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/codecs/ut/codecs_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/auth/users.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_bulk_upsert_olap_ut.cpp |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_common.pb.{h, cc} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_issue_message.pb.{h, cc} |56.8%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/api/protos/ydb_config.pb.{h, cc} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_operation.pb.{h, cc} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_status_codes.pb.{h, cc} |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_olapstore_ut.cpp |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/codecs/ut/ydb-core-persqueue-codecs-ut |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_rate_limiter_v1.grpc.pb.cc |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/load_test.{pb.h ... grpc.pb.h} |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/lib/node_warden_mock_state.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/mvp/meta/meta_cache_ut.cpp |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console.{pb.h ... grpc.pb.h} |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_register_node_ut.cpp |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/tests/integration/bulk_upsert/ydb-public-sdk-cpp-tests-integration-bulk_upsert |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/generic/pushdown/ut/ydb-library-yql-providers-generic-pushdown-ut |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/common/libpy3tests-olap-common.global.a |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/scenario/ydb-tests-olap-scenario |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/mvp/meta/libydb-mvp-meta.a |56.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/kv/tests/objcopy_08f7acdb6eb761b28bf6990862.o |56.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/kv/tests/objcopy_c7c229be41e9b028572ad1aab3.o |56.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/scenario/objcopy_36807918bd7a86c1ea37310c9c.o |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_config.{pb.h ... grpc.pb.h} |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/generic/provider/ut/pushdown/yql-providers-generic-provider-ut-pushdown |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/scenario/objcopy_656baae3c1e24959f5bcc457d7.o |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/scenario/objcopy_0ab925f82bbba07bf3b749dc3c.o |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/scenario/helpers/libpy3olap-scenario-helpers.global.a |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_tenant.{pb.h ... grpc.pb.h} |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/kv/tests/ydb-tests-stress-kv-tests |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/kv/tests/objcopy_5294a064c14cf5a49516321590.o |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/scenario/709f125727d9ea4165df516509_raw.auxcpp |56.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/scenario/objcopy_5992d4831c5055a481712a2a80.o |56.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/column_family/compression/objcopy_1ab2a5a6dd84a6c9ff5d5c50b0.o |56.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/column_family/compression/objcopy_3bdea7737a87c43bfaa0aaf4c3.o |56.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/column_family/compression/objcopy_6887bde1dc99f5c5c2f0922842.o |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/tests/integration/bulk_upsert/main.cpp |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/linear_regression/liblibrary-cpp-linear_regression.a |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/tests/integration/bulk_upsert/bulk_upsert.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/generic/pushdown/ut/match_predicate_ut.cpp |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/external_sources/object_storage/inference/ut/external_sources-object_storage-inference-ut |56.8%| [LD] {BAZEL_DOWNLOAD} $(B)/contrib/tools/protoc/protoc |56.8%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/abstract/read_metadata.h_serialized.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/actors/compute/ut/dq_source_watermark_tracker_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/actors/compute/ut/dq_compute_issues_buffer_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/actors/compute/ut/dq_compute_actor_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/actors/compute/ut/dq_compute_actor_async_input_helper_ut.cpp |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/labeled_counters.{pb.h ... grpc.pb.h} |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/objcopy_2cc418e8604751e5b8f9029a81.o |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/objcopy_ef822f612b696eb514a5565056.o |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/column_family/compression/ydb-tests-olap-column_family-compression |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sys_view.{pb.h ... grpc.pb.h} |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/ydb-tests-olap |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/ydb/ut/ydb-dump.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/ydb/ut/parse_command_line.cpp |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/objcopy_ad84868df819de98481440cf0a.o |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/ydb/ut/workload-transfer-topic-to-table.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/ydb/ut/run_ydb.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/ydb/ut/supported_codecs_fixture.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/ydb/ut/supported_codecs.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/comm.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/ydb/ut/workload-topic.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/scheme_board/double_indexed_ut.cpp |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/regex/pire/libcpp-regex-pire.a |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/serverless/13360e4ecdf34efe6c3a817a44_raw.auxcpp |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/serverless/objcopy_cf3971576aced18377e99f5367.o |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/apps/etcd_proxy/proto/kv.{pb.h ... grpc.pb.h} >> TBlobStorageCryptoRope::TestEqualMixedStreamCypher [GOOD] >> TBlobStorageCryptoRope::TestMixedStreamCypher |56.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/serverless/objcopy_7c81cbfa6b5ce112674cb0a849.o |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/filesystem/librestricted-boost-filesystem.a |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_mediator_timecast.{pb.h ... grpc.pb.h} |56.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/serverless/objcopy_e2acb41e7099c0db4fe54a1587.o |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/proto/file_storage.pb.{h, cc} |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/xz/libcpp-streams-xz.a |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/garbage.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/sentinel_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/blocks.cpp |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/protos/sessions.pb.{h, cc} |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_testshard/main.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__create.cpp |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/sql/objcopy_2f0e0ac8198858b9ec9901778e.o >> TBlobStorageCryptoRope::TestMixedStreamCypher [GOOD] >> TBlobStorageCryptoRope::TestOffsetStreamCypher |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/sql/objcopy_83efacabe56767ae4f106a6d27.o |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/ut/objcopy_cd9abca883cad9b25e20bf2f08.o |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/sql/objcopy_f738234258cd034cd5383f92ad.o |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/ut/objcopy_6508d12aaafde6f0a60fe8fff3.o |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/benchmark/libcpp-testing-benchmark.a |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/serverless/ydb-tests-functional-serverless |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/sql/ydb-tests-sql |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/mvp/meta/ut/ydb-mvp-meta-ut |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/benchmark/main/libtesting-benchmark-main.global.a |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/ut/objcopy_bd84885c5c24478d181ba9e493.o |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/abstract/update.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/generated/codegen/main.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/protobuf_printer/protobuf_printer_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/library/protobuf_printer/ut/test_proto.pb.cc |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/control_plane_storage/internal/ut/core-fq-libs-control_plane_storage-internal-ut >> TBlobStorageCryptoRope::TestOffsetStreamCypher [GOOD] >> TBlobStorageCryptoRope::TestInplaceStreamCypher |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/library/ut/ydb-tests-library-ut |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/abstract/context.cpp |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/iam/iam_token.{pb.h ... grpc.pb.h} |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/serializability/libpy3tests-library-serializability.global.a |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_copy_table.cpp |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/generated/codegen/codegen >> TBlobStorageCryptoRope::TestInplaceStreamCypher [GOOD] >> TBlobStorageCryptoRope::PerfTestStreamCypher |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/tld/liblibrary-cpp-tld.a |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_backup.cpp |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/unicode/punycode/libcpp-unicode-punycode.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/timezone_conversion/liblibrary-cpp-timezone_conversion.a |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/protobuf_printer/ut/ydb-library-protobuf_printer-ut |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/memory_controller/memtable_collection_ut.cpp |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/ydb_serializable/lib/libpy3tools-ydb_serializable-lib.global.a |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/ydb_serializable/replay/objcopy_efd352795aee39d7ac6e163a2d.o |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/ydb_serializable/replay/libpy3tools-ydb_serializable-replay.global.a |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yaml_config/tools/dump/yaml-to-proto-dump |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/db_id_async_resolver_impl/ut/mdb_endpoint_generator_ut.cpp |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/apps/ydb/ut/ydb-apps-ydb-ut |56.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/yt/libydb-core-yt.a >> TBlobStorageCryptoRope::PerfTestStreamCypher [GOOD] >> TBlobStorageCryptoRope::UnalignedTestStreamCypher [GOOD] >> TChaCha::KeystreamTest1 [GOOD] >> TChaCha::KeystreamTest2 [GOOD] >> TChaCha::KeystreamTest3 [GOOD] >> TChaCha::KeystreamTest4 [GOOD] >> TChaCha::KeystreamTest5 [GOOD] >> TChaCha::KeystreamTest6 [GOOD] >> TChaCha::KeystreamTest7 [GOOD] >> TChaCha::KeystreamTest8 [GOOD] >> TChaCha::MultiEncipherOneDecipher [GOOD] >> TChaCha::SecondBlock [GOOD] >> TChaCha512::KeystreamTest1 [GOOD] >> TChaCha512::KeystreamTest2 [GOOD] >> TChaCha512::KeystreamTest3 [GOOD] >> TChaCha512::KeystreamTest4 [GOOD] >> TChaCha512::KeystreamTest5 [GOOD] >> TChaCha512::KeystreamTest6 [GOOD] >> TChaCha512::KeystreamTest7 [GOOD] >> TChaCha512::KeystreamTest8 [GOOD] >> TChaCha512::MultiEncipherOneDecipher [GOOD] >> TChaCha512::SecondBlock [GOOD] >> TChaCha512::CompatibilityTest |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_base.grpc.pb.cc |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_operation.pb.{h, cc} |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/s3/provider/yql_s3_listing_strategy_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_ut_common.cpp |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tools/tstool/libpy3tstool.global.a |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tools/tstool/objcopy_6077c98b9810fee0e2250a36a4.o |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/jinja2cpp/libcontrib-libs-jinja2cpp.a |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/btree_benchmark/main.cpp |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/ydb_serializable/replay/replay |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__pq_stats.cpp |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/protos/events.pb.{h, cc} |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_event_managers.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/yt/yt_shutdown.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/yt/yt_wrapper.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blob_depot_config.grpc.pb.cc |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tools/tstool/tstool |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/legacy_protobuf/protos/metric_meta.pb.{h, cc} |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/data_quotas/objcopy_89b3e69f7cdba68b4eefcae48c.o |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/data_quotas/objcopy_4b2e093abff756c97b675c0a31.o |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_view_v1.{pb.h ... grpc.pb.h} |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_test_functions.cpp |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/data_quotas/objcopy_a6e393b6d53f4c73feac80b55c.o |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/data_quotas/ydb-tests-olap-data_quotas |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tiering/manager.cpp |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/ydb_table_impl.{pb.h ... grpc.pb.h} |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/tools/dq/dq_cli/main.cpp >> TChaCha512::CompatibilityTest [GOOD] >> TChaChaVec::KeystreamTest1 [GOOD] >> TChaChaVec::KeystreamTest2 [GOOD] >> TChaChaVec::KeystreamTest3 [GOOD] >> TChaChaVec::KeystreamTest4 [GOOD] >> TChaChaVec::KeystreamTest5 [GOOD] >> TChaChaVec::KeystreamTest6 [GOOD] >> TChaChaVec::KeystreamTest7 [GOOD] >> TChaChaVec::KeystreamTest8 [GOOD] >> TChaChaVec::MultiEncipherOneDecipher [GOOD] >> TChaChaVec::SecondBlock [GOOD] >> TChaChaVec::CompatibilityTest |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/protos/data.pb.{h, cc} |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/layout/layout.cpp |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_scripting_v1.{pb.h ... grpc.pb.h} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/protos/blobs.pb.{h, cc} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/keyvalue/protos/events.pb.{h, cc} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_scheme_v1.{pb.h ... grpc.pb.h} |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/dq/actors/compute/ut/ydb-library-yql-dq-actors-compute-ut |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sqs.{pb.h ... grpc.pb.h} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_keyvalue_v1.{pb.h ... grpc.pb.h} |56.8%| [PR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/expr_nodes/yql_s3_expr_nodes.{gen.h ... defs.inl.h} |56.8%| [PR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/expr_nodes/yql_generic_expr_nodes.{gen.h ... defs.inl.h} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_discovery.pb.{h, cc} >> TChaChaVec::CompatibilityTest [GOOD] >> TPoly1305::TestVector1 [GOOD] >> TPoly1305::TestVector2 [GOOD] >> TPoly1305::TestVector3 [GOOD] >> TPoly1305::TestVector4 [GOOD] >> TPoly1305Vec::TestVector1 [GOOD] >> TPoly1305Vec::TestVector2 [GOOD] >> TPoly1305Vec::TestVector3 [GOOD] >> TPoly1305Vec::TestVector4 [GOOD] >> TTest_t1ha::TestZeroInputHashIsNotZero [GOOD] >> TTest_t1ha::PerfTest [GOOD] >> TTest_t1ha::T1haHashResultsStablilityTest [GOOD] |56.7%| [PR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/expr_nodes/yql_yt_expr_nodes.{gen.h ... defs.inl.h} |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tools/cfg/bin/ydb_configure |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_backup_v1.{pb.h ... grpc.pb.h} |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/tests/unit/client/value/ydb-public-sdk-cpp-tests-unit-client-value |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yaml_config/tools/dump/main.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/topic_workload/topic_workload_params_ut.cpp |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tools/cfg/bin/libpy3ydb_configure.global.a |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/yt/export_yt.cpp |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tools/cfg/bin/objcopy_940b9a794cb8fbc6ebdf926276.o |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/rename/ydb-tests-functional-rename |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/lib/ydb_cli/commands/topic_workload/ut/ydb-public-lib-ydb_cli-commands-topic_workload-ut |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/apps/ydbd/export.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/commands/topic_workload/ut/topic_workload_writer_producer_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/rename/dc048c91e67372877fc6ad2dfc_raw.auxcpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/tests/unit/client/value/value_ut.cpp |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/rename/objcopy_5865a174a6c25ca1a2d6386702.o |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/rename/objcopy_c02c3d9f840d02af9fad858a55.o |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/rename/objcopy_bfa810e70cd1de18c5d4a18a62.o |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/base/generated/codegen/ydb-core-base-generated-codegen |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/metering/time_grid_ut.cpp |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/rename/objcopy_5db899a01c2ec6f53648af6840.o |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/rename/objcopy_00c87b13e2f685811a9825079d.o |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/generated/codegen/main.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/metering/stream_ru_calculator_ut.cpp |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/copy_table/objcopy_589315062f5401a368910248f0.o |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/backup/ut/ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/backpressure/ut_client/backpressure_ut.cpp |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/copy_table/objcopy_c114cbf6b820d92320c1e2c912.o |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/util/btree_benchmark/btree_benchmark |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/tools/dq/dq_cli/dq_cli |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/ut_pdiskfit/lib/libblobstorage-ut_pdiskfit-lib.a |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/copy_table/objcopy_61613f0bd98876f149d8574891.o |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/validators/validator_bootstrap_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/auth_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/validators/validator_nameservice_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/validators/registry_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/scheme/ut_pg/scheme_tablecell_pg_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_blob_depot.grpc.pb.cc |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/copy_table/ydb-tests-datashard-copy_table |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp.grpc.pb.cc |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/metering/ut/ydb-core-metering-ut |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/tablet/rpc_restart_tablet_ut.cpp |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_table_v1.{pb.h ... grpc.pb.h} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/fq_v1.{pb.h ... grpc.pb.h} |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/scheme_board/ut_double_indexed/ydb-core-tx-scheme_board-ut_double_indexed |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhugeheap_ut.cpp |56.8%| [TS] {asan, default-linux-x86_64, release} ydb/core/blobstorage/crypto/ut/unittest >> TTest_t1ha::T1haHashResultsStablilityTest [GOOD] |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/huge/top_ut.cpp |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/core/yson/proto/protobuf_interop.pb.{h, cc} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/core/ytree/proto/ypath.pb.{h, cc} |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/tablet/rpc_change_schema_ut.cpp |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/backpressure/ut_client/ydb-core-blobstorage-backpressure-ut_client |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/core/ytree/proto/request_complexity_limits.pb.{h, cc} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_export_v1.{pb.h ... grpc.pb.h} |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk2/huge.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/backup/common/encryption_ut.cpp |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/client/chunk_client/proto/data_statistics.pb.{h, cc} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/client/node_tracker_client/proto/node_directory.pb.{h, cc} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/client/hive/proto/cluster_directory.pb.{h, cc} |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_pdiskfit/ut/main.cpp |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/backup/ut/ydb-library-backup-ut |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/client/bundle_controller/proto/bundle_controller_service.pb.{h, cc} |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/scheme/ut_pg/ydb-core-scheme-ut_pg |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/op_load.cpp |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_replication_v1.{pb.h ... grpc.pb.h} |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/federated_query/kqp_federated_query_helpers_ut.cpp |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/docs/generator/generator |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/pdiskfit.{pb.h ... grpc.pb.h} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_topic_v1.{pb.h ... grpc.pb.h} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_dynamic_config_v1.{pb.h ... grpc.pb.h} |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/base/ut_auth/ydb-core-base-ut_auth |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_services/tablet/rpc_execute_mkql_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/pgwire/main.cpp |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_node_broker.{pb.h ... grpc.pb.h} |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/pgwire/pg_ydb_proxy.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/pgwire/pg_ydb_connection.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/persqueue/topic_parser/ut/topic_names_converter_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/pgwire/pgwire.cpp |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/s3/provider/ut/ydb-library-yql-providers-s3-provider-ut |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/docs/generator/libpy3olap-docs-generator.global.a |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/docs/generator/objcopy_ac8dbe7f54a2cb7efb6636f75f.o |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_pq.cpp |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_query_v1.{pb.h ... grpc.pb.h} |56.8%| [PR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/pg/expr_nodes/yql_pg_expr_nodes.{gen.h ... defs.inl.h} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_scripting.pb.{h, cc} |56.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/api/objcopy_253d734e8c901d319d84fcc6e9.o |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/api/objcopy_363b5875cc5c5e5745458b16b8.o |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/memory_controller/memory_controller_ut.cpp |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/api/ydb-tests-functional-api |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/export_s3_buffer_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/row_dispatcher/ut/topic_session_ut.cpp |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/sqs/multinode/ydb-tests-functional-sqs-multinode |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/api/objcopy_303f7409bfab4277e367bbd11a.o |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/api/objcopy_e2a089b95d9316f6e26025d3e3.o |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_rate_limiter_v1.{pb.h ... grpc.pb.h} |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/persqueue/topic_parser/ut/ydb-library-persqueue-topic_parser-ut |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tools/ydbd_slice/bin/ydbd_slice |56.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/multinode/objcopy_afb48e06933bdee6c5245db82e.o |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/quota_manager/ut_helpers/liblibs-quota_manager-ut_helpers.a |56.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/multinode/objcopy_10b0cfa01297f7d7392eb4d9e4.o |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/apps/ydbd/main.cpp |56.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tools/ydbd_slice/bin/objcopy_9509442a50bd9d1393fa0d54e4.o |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_vdisk2/ydb-core-blobstorage-ut_vdisk2 |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/view/view_ut.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/row_dispatcher/ut/coordinator_ut.cpp |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/proto/dq_state_load_plan.pb.{h, cc} |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/multinode/objcopy_b306c2955ce13e6db6cae73363.o |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/cms/console/validators/ut/ydb-core-cms-console-validators-ut |56.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/protos/flat_table_part.pb.{h, cc} |56.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/tpc/medium/ydb-tests-functional-tpc-medium |56.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tpc/medium/objcopy_69005edd0f9166633ccd754c08.o |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tpc/lib/libpy3functional-tpc-lib.global.a |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/schemeshard/operations.{pb.h ... grpc.pb.h} |56.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tpc/medium/objcopy_e5d897582dc0fbda7c578cb53f.o |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/load/lib/libpy3olap-load-lib.global.a |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/row_dispatcher/ut/leader_election_ut.cpp |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/tests/unit/library/issue/ydb-public-sdk-cpp-tests-unit-library-issue |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/yds/ydb-tests-fq-yds |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tpc/medium/objcopy_d009f62008041e2f09cdbf7def.o |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/row_dispatcher/ut/row_dispatcher_ut.cpp |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/sqs/large/ydb-tests-functional-sqs-large |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/config/validation/column_shard_config_validator_ut/column_shard_config_validator_ut.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yds/38dcacd12926621ca72e30ce1b_raw.auxcpp |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/transactions/protos/tx_event.pb.{h, cc} |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/no_llvm/libminikql-comp_nodes-no_llvm.a |56.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yds/objcopy_b08299d456f3448b368e814cb8.o |56.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yds/objcopy_dae5a42f53b4f98bf1b9fd8118.o |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yds/objcopy_6b8c453743f8fd2c5380af70c6.o |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yds/objcopy_25d3afea4b7778a202a80125cb.o |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/ttl_tiering/ydb-tests-olap-ttl_tiering |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yds/objcopy_7a185a4b35de7733fde931d298.o |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yds/objcopy_1339ee5ef04af3a5a49d43a6c9.o |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/backup/common/ut/ydb-core-backup-common-ut |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/lib/json_value/ut/ydb-public-lib-json_value-ut |56.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yds/objcopy_fdd48fc620c42f480ae38b77f5.o |56.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/large/objcopy_422ca1effff14e5a08952658d0.o |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/db_id_async_resolver_impl/ut/ydb-core-fq-libs-db_id_async_resolver_impl-ut |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhugeheap_ctx_ut.cpp |56.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yds/objcopy_fcc835b175560db56b04f51f44.o |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/large/objcopy_8ac5034640eee44b1cd5fa5253.o |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/tests/unit/library/issue/utf8_ut.cpp |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/olap_workload/olap_workload |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/large/objcopy_5f161468ff5322b803d4d0dc79.o |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhuge_ut.cpp |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/ttl_tiering/objcopy_6cc8d554301fc8d647fa6e6c7c.o |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yds/objcopy_9f43001a877b9e371fe700c81d.o |56.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/ttl_tiering/objcopy_69bb4174ba5b22bacbabacd799.o |56.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/ttl_tiering/objcopy_0664e2ab2eb37ae9f02538e483.o |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/ut_sequence/dsproxy_config_retrieval.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/graph/ut/graph_ut.cpp |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/olap_workload/libpy3olap_workload.global.a |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/ttl_tiering/objcopy_b4d1a41a4041b6372d2a384279.o |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/tests/unit/library/issue/yql_issue_ut.cpp |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/olap_workload/objcopy_9de271b22d7bcc64ef77cc3cde.o |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/v1_proto_split/SQLv1Parser.pb.{code0.cc ... main.h} |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/json_value/ydb_json_value_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/splitter/ut/batch_slice.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_tracing_signals.grpc.pb.cc |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/test_connection.pb.{h, cc} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/private_proxy.pb.{h, cc} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/config_units.{pb.h ... grpc.pb.h} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/read_actors_factory.pb.{h, cc} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/quotas_manager.pb.{h, cc} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/cms.{pb.h ... grpc.pb.h} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/base.{pb.h ... grpc.pb.h} |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/test_connection/ut/ydb-core-fq-libs-test_connection-ut |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_whiteboard.{pb.h ... grpc.pb.h} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/persqueue_common.pb.{h, cc} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/drivemodel.{pb.h ... grpc.pb.h} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/filestore_config.{pb.h ... grpc.pb.h} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_board_mon.{pb.h ... grpc.pb.h} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/follower_group.{pb.h ... grpc.pb.h} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/folder_service/proto/config.pb.{h, cc} |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/balancing.cpp |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_limits.{pb.h ... grpc.pb.h} |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/config/validation/column_shard_config_validator_ut/column_shard_config_validator_ut |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/control_plane_proxy/ut/control_plane_proxy_ut.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/hullop/hullop_delayedresp_ut.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/hullop/blobstorage_hullcompactdeferredqueue_ut.cpp |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/replication.{pb.h ... grpc.pb.h} |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_pdiskfit/ut/ydb-core-blobstorage-ut_pdiskfit-ut |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_kv.{pb.h ... grpc.pb.h} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/protos/dq_stats.pb.{h, cc} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_query.pb.{h, cc} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_scheme.pb.{h, cc} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_broker.{pb.h ... grpc.pb.h} |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/cms_ut_common.cpp |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_topic.pb.{h, cc} |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_testshard/ydb-core-blobstorage-ut_testshard |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_group/main.cpp |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_table.pb.{h, cc} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/statistics.{pb.h ... grpc.pb.h} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet.{pb.h ... grpc.pb.h} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/protos/clickhouse.pb.{h, cc} |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/sentinel_ut_unstable.cpp |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tracing.{pb.h ... grpc.pb.h} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tenant_slot_broker.{pb.h ... grpc.pb.h} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/stream.{pb.h ... grpc.pb.h} |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_column_stats.cpp |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/protos/dq_events.pb.{h, cc} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tenant_pool.{pb.h ... grpc.pb.h} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/contrib/libs/googleapis-common-protos/google/api/field_behavior.{pb.h ... grpc.pb.h} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/whiteboard_flags.{pb.h ... grpc.pb.h} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme/protos/pathid.{pb.h ... grpc.pb.h} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_database.{pb.h ... grpc.pb.h} |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/huge/ut/ydb-core-blobstorage-vdisk-huge-ut |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_vdisk_internal.{pb.h ... grpc.pb.h} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/services/services.{pb.h ... grpc.pb.h} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/contrib/libs/googleapis-common-protos/google/api/http.{pb.h ... grpc.pb.h} |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/examples/secondary_index_builtin/secondary_index_create.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/examples/secondary_index_builtin/main.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/examples/secondary_index_builtin/secondary_index_select.cpp |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/yql_translation_settings.{pb.h ... grpc.pb.h} |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/examples/secondary_index_builtin/secondary_index_fill.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/examples/secondary_index_builtin/secondary_index_select_join.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/examples/secondary_index_builtin/secondary_index.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/examples/secondary_index_builtin/secondary_index_drop.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/actorlib_impl/actor_activity_ut.cpp |56.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/http_proxy/ut/objcopy_5fddfa8f171a3216cad65e02ab.o |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_services/operation_helpers_ut.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_services/rpc_calls_ut.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/s3/object_listers/yql_s3_path_ut.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/http_proxy/ut/json_proto_conversion_ut.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/actorlib_impl/actor_tracker_ut.cpp |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/examples/secondary_index_builtin/secondary_index_builtin |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/s3/object_listers/ut/ydb-library-yql-providers-s3-object_listers-ut |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/actorlib_impl/actor_bootstrapped_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/actorlib_impl/test_interconnect_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/splitter/ut/ut_splitter.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/actorlib_impl/test_protocols_ut.cpp |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/generic/streaming/ydb-tests-fq-generic-streaming |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/grpc/server/ut/ydb-library-grpc-server-ut |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_data_cleanup.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/keyvalue/keyvalue_storage_read_request_ut.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/stop_pdisk.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/generic/streaming/4399546af28cb40e5d74ea4a4b_raw.auxcpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hullop/blobstorage_readbatch_ut.cpp |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/generic/streaming/objcopy_49bad8251d240ad7c49d384b91.o |56.8%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/columnshard.h_serialized.cpp |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/generic/streaming/objcopy_181bdcd1743e9a1a78fafe4b60.o |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/generic/utils/libpy3fq-generic-utils.global.a |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/keyvalue/keyvalue_ut.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/grpc/server/ut/stream_adaptor_ut.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_background_compaction.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/keyvalue/keyvalue_collector_ut.cpp |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hullop/ut/ydb-core-blobstorage-vdisk-hullop-ut |56.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/http_proxy/ut/inside_ydb_ut/objcopy_484246668d943fbae3b476ec7d.o |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_group/ydb-core-blobstorage-ut_group |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/security/ticket_parser_ut.cpp |56.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot/ydb-core-blobstorage-ut_blobstorage-ut_blob_depot |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/opt/kqp_sqlin_ut.cpp |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/scheme_shard/ydb-tests-functional-scheme_shard |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/proxy_service/kqp_script_executions_ut.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/node_warden_pdisk.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/node_warden_group.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/idx_test/ydb_index_ut.cpp |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/ydb_serializable/ydb_serializable |56.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/federated_query/ut/ydb-core-kqp-federated_query-ut |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/config/ydb-tests-functional-config |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/opt/kqp_not_null_ut.cpp |56.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/scheme_shard/objcopy_f93c60b04a0499f2ec6880591a.o |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/opt/kqp_agg_ut.cpp |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/breakpad/libydb-library-breakpad.global.a |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/batch_operations/kqp_batch_delete_ut.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/opt/kqp_returning_ut.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/actors/ut/database_resolver_ut.cpp |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/grpc_streaming/ut/grpc/libgrpc_streaming-ut-grpc.a |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/opt/kqp_ne_ut.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/proxy_service/kqp_proxy_ut.cpp |56.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/ydb_serializable/objcopy_3fdb568d483b57acc8e627f8c2.o |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/keys/libydb-library-keys.a |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_export/ydb-core-tx-datashard-ut_export |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/batch_operations/kqp_batch_update_ut.cpp |56.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/scheme_shard/objcopy_8120ef49e7e653ed0601604313.o |56.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/config/objcopy_ae5b9f6e7a00f305f01a3dde87.o |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/opt/kqp_merge_ut.cpp |56.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/config/objcopy_f0c8f68ad8d5be2aa410794898.o |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/mv_object_map_ut.cpp |56.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/scheme_shard/objcopy_d3af02c7d57ea2cbbe5d381baa.o |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/ydb_serializable/libpy3tests-tools-ydb_serializable.global.a |56.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/config/objcopy_93891caf0b2b82d249b0a98fa8.o |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_compaction.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_rs.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/opt/kqp_kv_ut.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/opt/kqp_named_expressions_ut.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/opt/kqp_extract_predicate_unpack_ut.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/restart_pdisk.cpp |56.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/generic/streaming/objcopy_49e9948af399bc60603a7d2db5.o |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/oom/ydb-tests-olap-oom |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/common/util_ut.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/grpc/server/ut/grpc_response_ut.cpp |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/yql/essentials/tools/sql2yql/sql2yql |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/http_proxy/ut/inside_ydb_ut/inside_ydb_ut.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/opt/kqp_ranges_ut.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/http_proxy/ut/ymq_ut.cpp |56.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/oom/objcopy_df0cb3f315162a3110ee243ecd.o |56.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/oom/objcopy_a0543c2dc30365e9b2ad3d0ca6.o |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/quoter/quoter_service_bandwidth_test/main.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/opt/kqp_sort_ut.cpp |56.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/oom/objcopy_e0331f455507fe5ac3b71d0537.o |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/grouper_ut.cpp |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/check/libv1-lexer-check.a |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/federated_query/generic_ut/kqp_generic_provider_ut.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/quoter/quoter_service_bandwidth_test/server.cpp |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/regex/libv1-lexer-regex.a |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yql/essentials/tools/sql2yql/sql2yql.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/common/entity_id_ut.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_index/ut/ut_local_kmeans.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/locks/range_treap_ut.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/common/rows_proto_splitter_ut.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_index/ut/ut_reshuffle_kmeans.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/backup/impl/table_writer_ut.cpp |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/gateway/dummy/libpq-gateway-dummy.a |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/ut_fat/dsproxy_ut.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_index/ut/ut_sample_k.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/ut_fat/blobstorage_node_warden_ut_fat.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/ut_kqp.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_keys.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_range_ops.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/service/json_change_record_ut.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/huge.cpp |56.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/udfs/common/knn/libknn_udf.global.a |56.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/udfs/common/roaring/libroaring.global.a |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/udfs/common/datetime/libdatetime_udf.global.a |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_index/ut/ut_prefix_kmeans.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/grpc_streaming/grpc_streaming_ut.cpp |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/client/ydb_topic/include/libclient-ydb_topic-include.a |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/clickhouse/actors/libproviders-clickhouse-actors.a |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/quoter/quoter_service_ut.cpp |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/stats_collector/libproviders-dq-stats_collector.a |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/build_index/ut/ut_secondary_index.cpp |56.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/metrics/libproviders-dq-metrics.a |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/ut_common.cpp |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/long_tx_service/long_tx_service_ut.cpp |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/ut_helpers.cpp |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/common/cache_ut.cpp |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/ydb/actors/libproviders-ydb-actors.a |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/ut_counters.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/tools/dq/worker_node/main.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/base/board_subscriber_ut.cpp |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/ut_labeled.cpp |56.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/global_worker_manager/libproviders-dq-global_worker_manager.a |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/ut_rw/ut_backup.cpp |56.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/ydb/comp_nodes/libproviders-ydb-comp_nodes.a |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/replication_huge.cpp |56.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/actors/yt/libdq-actors-yt.a |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/common/iceberg_processor_ut.cpp |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/federated_query/generic_ut/iceberg_ut_data.cpp |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/quoter/quoter_service_bandwidth_test/quota_requester.cpp |56.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/replication/ut_helpers/libtx-replication-ut_helpers.a |56.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/service/libproviders-dq-service.a |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/group_mapper_ut.cpp |56.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/splitter/ut/ydb-core-tx-columnshard-splitter-ut |56.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_balancing/ydb-core-blobstorage-ut_blobstorage-ut_balancing |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/security/ldap_auth_provider/ldap_utils_ut.cpp |56.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/protos/accessor.pb.{h, cc} |56.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/persqueue/tests/liblibrary-persqueue-tests.a |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/http_proxy/ut/kinesis_ut.cpp |56.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/protos/interconnect.pb.{h, cc} |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/cache_eviction_ut.cpp |56.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/netclassifier.{pb.h ... grpc.pb.h} |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/quoter/kesus_quoter_ut.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/microseconds_sliding_window_ut.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/quota_tracker_ut.cpp |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/replication.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/internals_ut.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/service/ut/ut_column_statistics.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_bsvolume_reboots/ut_bsvolume_reboots.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/partitiongraph_ut.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/make_config.cpp |56.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/grpc_services/tablet/ut/ydb-core-grpc_services-tablet-ut |56.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_column_stats/ydb-core-tx-datashard-ut_column_stats |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/quoter/ut_helpers.cpp |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/time_cast/time_cast_ut.cpp |56.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/ut/objcopy_1d0482d354dc270d18e7123281.o |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/metering_sink_ut.cpp |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/scan/kqp_flowcontrol_ut.cpp |56.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_background_compaction/ydb-core-tx-datashard-ut_background_compaction |56.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/ydb-core-blobstorage-ut_blobstorage-ut_stop_pdisk |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/service/ut/ut_basic_statistics.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/solomon/actors/ut/dq_solomon_write_actor_ut.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/utils_ut.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/type_codecs_ut.cpp |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/ut_rw/ut_normalizer.cpp |56.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/cms/ut_sentinel/ydb-core-cms-ut_sentinel |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/control/immediate_control_board_actor_ut.cpp |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/ut_rw/ut_columnshard_read_write.cpp |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/service/ut/ut_http_request.cpp |56.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/apps/pgwire/pgwire |56.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/view/ydb-core-kqp-ut-view |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_sequence_reboots/ut_sequence_reboots.cpp |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/scan/kqp_scan_ut.cpp |56.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/memory_controller/ut/ydb-core-memory_controller-ut |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/blobstorage-ut_blobstorage-ut_restart_pdisk |56.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_keyvalue.pb.{h, cc} |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/scan/kqp_split_ut.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/slow/pq_ut.cpp |56.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/table_service_config.{pb.h ... grpc.pb.h} |56.5%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/types/yql_types.pb.{h, cc} |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/nodewarden/group_stat_aggregator.cpp |56.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_discovery_v1.{pb.h ... grpc.pb.h} |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_prefixed_vector_ut.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/actor/ut/attributes_md5_ut.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/actor/ut/message_delay_stats_ut.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/actor/ut/sha256_ut.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/slow/autopartitioning_ut.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_multishard_ut.cpp |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_compaction/ydb-core-tx-datashard-ut_compaction |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/actor/ut/infly_ut.cpp |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_data_cleanup/ydb-core-tx-datashard-ut_data_cleanup |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_minstep.cpp |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_ext_blobs_multiple_channels.cpp |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_subdomain_reboots/ut_subdomain_reboots.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/read_attributes_utils_ut.cpp |56.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/idx_test/libpublic-lib-idx_test.a |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/pqtablet_ut.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_vector_ut.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_ut.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf_invoke_common.cpp |56.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tools/stress_tool/lib/libydb_device_test.a |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/service/ut/ut_aggregation/ut_aggregate_statistics.cpp |56.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tools/stress_tool/proto/libtools-stress_tool-proto.a |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/security/ldap_auth_provider/ldap_auth_provider_ut.cpp |56.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/network/libessentials-utils-network.a |56.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/graph/ut/ydb-core-graph-ut |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/service/topic_reader_ut.cpp |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/yql_kikimr_provider_ut.cpp |56.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/ut_schema/ut_columnshard_schema.cpp |56.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/pq_ut.cpp |56.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/proto/retry_config.pb.{h, cc} |56.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_user_attributes_reboots/ut_user_attributes_reboots.cpp |56.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_rs/ydb-core-tx-datashard-ut_rs |56.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/node_warden_proxy.cpp |56.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/node_warden_pipe.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf_invoke_storage_config.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/fetch_request_ut.cpp |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/tools/dq/worker_node/worker_node |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_olap_reboots/ut_olap_reboots.cpp |56.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/api/service/connector.{pb.h ... grpc.pb.h} |56.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/fq.pb.{h, cc} |56.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/compress_base/lib/libcommon-compress_base-lib.a |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yql/providers/solomon/actors/ut/ut_helpers.cpp |56.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/re2/libre2_udf.global.a |56.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/logs/dsv/libdsv_udf.global.a |56.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/histogram/libhistogram_udf.global.a |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/pqtablet_mock.cpp |56.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/json2/libjson2_udf.global.a |56.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/json/libjson_udf.global.a |56.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/compress_base/libcompress_udf.global.a |56.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/yson2/libyson2_udf.global.a |56.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/ip_base/lib/libcommon-ip_base-lib.a |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/partition_ut.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/query_actor/query_actor_ut.cpp |56.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/set/libset_udf.global.a |56.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/digest/libdigest_udf.global.a |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf_invoke_static_group.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/ncloud/impl/access_service_ut.cpp |56.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/hyperloglog/libhyperloglog_udf.global.a |56.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/url_base/lib/libcommon-url_base-lib.a |56.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/stat/libstat_udf.global.a |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/list_all_topics_ut.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/counters_ut.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/user_info_ut.cpp |56.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/hyperscan/libhyperscan_udf.global.a |56.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/ip_base/libip_udf.global.a |56.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/stat/static/libcommon-stat-static.a |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/keyvalue/grpc_service_ut.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/pqrb_describes_ut.cpp |56.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/pire/libpire_udf.global.a |56.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/datetime2/libdatetime2_udf.global.a |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf_generate.cpp |56.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/topfreq/libtopfreq_udf.global.a |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/partition_chooser_ut.cpp |56.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/url_base/liburl_udf.global.a |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/wrappers/s3_wrapper_ut.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/provider/yql_kikimr_gateway_ut.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/sourceid_ut.cpp |56.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/topfreq/static/libcommon-topfreq-static.a |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/olap/high_load/read_update_write.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_sequence.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf_scatter_gather.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_replication.cpp |56.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/proto/dq_transport.pb.{h, cc} |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/columns/schema.cpp |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/node_warden_resource.cpp |56.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/api/service/protos/connector.pb.{h, cc} |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/ydb_proxy/partition_end_watcher_ut.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_view.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_billing_helpers.cpp |56.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/proto/dq_tasks.pb.{h, cc} |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/keyvalue/ut/ydb-services-keyvalue-ut |56.4%| [PB] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/monitoring/mon_proto.pb.{h, cc} |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/build_index/ut/ydb-core-tx-datashard-build_index-ut |56.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_pq.{pb.h ... grpc.pb.h} |56.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_pq.{pb.h ... grpc.pb.h} |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/apps/ydbd/ydbd |56.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/top/libtop_udf.global.a |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_pq.cpp |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/row_dispatcher/ut/ydb-core-fq-libs-row_dispatcher-ut |56.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/rate_limiter.pb.{h, cc} |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_range_ops/ydb-core-tx-datashard-ut_range_ops |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_lock.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tools/stress_tool/device_test_tool.cpp |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/control_plane_proxy/ut/ydb-core-fq-libs-control_plane_proxy-ut |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/nodewarden/ut_sequence/ydb-core-blobstorage-nodewarden-ut_sequence |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_external_data_source.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_solomon.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_cdc_stream.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/service/worker_ut.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_bsv.cpp |56.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_tracing_signals.{pb.h ... grpc.pb.h} |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_backup_collection.cpp |56.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/pqconfig.{pb.h ... grpc.pb.h} |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/ut/metering_ut.cpp |56.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/storage.pb.{h, cc} |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_huge/ydb-core-blobstorage-ut_blobstorage-ut_huge |56.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/grpc_status_proxy.{pb.h ... grpc.pb.h} |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_initiate_build_index.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_solomon.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_continuous_backup.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tools/stress_tool/device_test_tool_ut.cpp |56.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/read_actors_factory.pb.{h, cc} |56.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/tools/stress_tool/proto/device_perf_test.{pb.h ... grpc.pb.h} |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_column_build/ut_column_build.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_sequence.cpp |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tools/stress_tool/ydb_stress_tool |56.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/task_controller.pb.{h, cc} |56.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/proto/records.pb.{h, cc} |56.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/quotas_manager.pb.{h, cc} |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_memory_changes.cpp |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/solomon/actors/ut/ydb-library-yql-providers-solomon-actors-ut |56.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/keyvalue/ut/ydb-core-keyvalue-ut |56.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/defaults/protos/data.pb.{h, cc} |56.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp |56.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/columns/update.cpp |56.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_index.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/ut_bscontroller/main.cpp |56.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/protos/events.pb.{h, cc} |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_keys/ydb-core-tx-datashard-ut_keys |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/indexes/schema.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__unmark_restore_tables.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_restore_backup_collection.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/node_warden_mon.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__upgrade_schema.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_kqp_scan.cpp |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/actorlib_impl/ut/ydb-core-actorlib_impl-ut |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/time_cast/ut/ydb-core-tx-time_cast-ut |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/long_tx_service/ut/ydb-core-tx-long_tx_service-ut |56.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/idx_test/ydb-core-kqp-ut-idx_test |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_rtmr.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_serverless_reboots/ut_serverless_reboots.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_types.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_export_uploaders.cpp |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_bsvolume_reboots/ydb-core-tx-schemeshard-ut_bsvolume_reboots |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/security/ut/ydb-core-security-ut |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_subdomain_reboots/ydb-core-tx-schemeshard-ut_subdomain_reboots |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_upgrade_subdomain.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__upgrade_access_database.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_audit_log.cpp |56.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/subdomains.{pb.h ... grpc.pb.h} |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/secret/ut/ut_secret.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__sync_update_tenants.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/ydb_proxy/ydb_proxy_ut.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_index.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_build_index.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_indexed_table.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_bg_tasks__list.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_side_effects.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/schemeshard_info_types.h_serialized.cpp |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/cms/ut_sentinel_unstable/ydb-core-cms-ut_sentinel_unstable |56.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_formats.pb.{h, cc} |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/data_gc.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/tools/fqrun/fqrun.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_split_merge.cpp |56.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_export.pb.{h, cc} |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/operation_queue_timer.h_serialized.cpp |56.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_backup.{pb.h ... grpc.pb.h} |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__serverless_storage_billing.cpp |56.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_coordination.pb.{h, cc} |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_sequence_reboots/ydb-core-tx-schemeshard-ut_sequence_reboots |56.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/token_accessor.pb.{h, cc} |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tools/stress_tool/ut/ydb-tools-stress_tool-ut |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/secret/ut/ydb-services-metadata-secret-ut |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_utils.cpp |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/fqrun/fqrun |56.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus.{pb.h ... grpc.pb.h} |56.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/db_pool.pb.{h, cc} |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_validate_ttl.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_export__forget.cpp |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/high_load/ydb-tests-olap-high_load |56.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters.{pb.h ... grpc.pb.h} |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/query_actor/ut/ydb-library-query_actor-ut |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/data_uncertain.cpp |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/ut_rw/ydb-core-tx-columnshard-ut_rw |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__conditional_erase.cpp |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/quoter/ut/ydb-core-quoter-ut |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_cluster_discovery/cluster_discovery_service_ut.cpp |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/batch_operations/ydb-core-kqp-ut-batch_operations |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/fyamlcpp/fyamlcpp.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__list.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_cdc_stream_scan.cpp |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/proxy_service/ut/ydb-core-kqp-proxy_service-ut |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__monitoring.cpp |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/opt/ydb-core-kqp-ut-opt |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_external_table.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tenant_pool.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_domain_links.cpp |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_replication/core-blobstorage-ut_blobstorage-ut_replication |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/ydb_table_impl.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/yql_translation_settings.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common.cpp |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/http_proxy/ut/inside_ydb_ut/ydb-core-http_proxy-ut-inside_ydb_ut |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_backup.pb.cc |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/service/ut_json_change_record/tx-replication-service-ut_json_change_record |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/quoter/quoter_service_bandwidth_test/quoter_service_bandwidth_test |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_mediator.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tracing.pb.cc |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/persqueue_cluster_discovery/ut/ydb-services-persqueue_cluster_discovery-ut |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_tx.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp |56.4%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/data_sharing/common/session/common.h_serialized.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__init_root.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_export__create.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_tx.pb.cc |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/grpc_services/ut/ydb-core-grpc_services-ut |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/schemeshard_types.h_serialized.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_cdc_stream.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/common_ut.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_mediator_timecast.pb.cc |56.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/local.{pb.h ... grpc.pb.h} |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_import__cancel.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_vdisk_config.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/maintenance.pb.cc |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/locks/ut_range_treap/ydb-core-tx-locks-ut_range_treap |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/public/ydb_issue/ut/ydb-library-yql-public-ydb_issue-ut |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_vdisk_internal.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_scheme.pb.cc |56.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/kqprun/src/proto/libkqprun-src-proto.a |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_quotas_ut.cpp |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_minstep/ydb-core-tx-datashard-ut_minstep |56.4%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/schemeshard_info_types.h_serialized.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__delete_tablet_reply.cpp |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/wrappers/ut/ydb-core-wrappers-ut |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/change_exchange.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_queries_ut.cpp |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/http_proxy/ut/ydb-core-http_proxy-ut |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/public/ydb_issue/ut/ydb_issue_ut.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/fq/control_plane_storage/in_memory_control_plane_storage_ut.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_vdisk_internal.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/compile_service_config.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/group_metrics_exchange.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_counters.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/compaction.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/test_shard.pb.cc |56.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/key.{pb.h ... grpc.pb.h} |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_counters_aggregator.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/local.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/memory_stats.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bootstrapper.grpc.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp_physical.pb.cc |56.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/compile_service_config.{pb.h ... grpc.pb.h} |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_vdisk_config.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_whiteboard.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/subdomains.grpc.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_limits.grpc.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_kv.pb.cc |56.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/index_builder.{pb.h ... grpc.pb.h} |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_export_flow_proposals.cpp |56.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/service/ut_topic_reader/ydb-core-tx-replication-service-ut_topic_reader |56.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/bscontroller/ut/ydb-core-mind-bscontroller-ut |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/table_service_config.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/long_tx_service.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/pqconfig.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/serverless_proxy_config.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/table_stats.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_type_operation.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/shared_cache.pb.h_serialized.cpp |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__list_users.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/workload_manager_config.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/minikql_engine.grpc.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/metrics.grpc.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/7da42a3b6793cbed63d7170b89_raw.auxcpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_kv.grpc.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/s3_settings.grpc.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_board.grpc.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_limits.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_bindings_ut.cpp |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_ut.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_pq.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/config_units.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_type_metadata.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/shared_cache.grpc.pb.cc |56.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_pipe.grpc.pb.cc |56.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/federated_query/generic_ut/ydb-core-kqp-ut-federated_query-generic_ut |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_import__list.cpp |56.4%| [PR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/api/protos/00ccceedc2861088d9671c050e_raw.auxcpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/http_config.grpc.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_log.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/ce697fc3b324cb6152c4d7223d_raw.auxcpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/index_builder.grpc.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/health.pb.cc |56.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/iam/yandex_passport_cookie.{pb.h ... grpc.pb.h} |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_stats.cpp |56.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/data_events.{pb.h ... grpc.pb.h} |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_kqp_stream_lookup.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_type_metadata.grpc.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/base/generated/runtime_feature_flags_ut.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/a5bc0e0ff9026c4aca1173f708_raw.auxcpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/grpc.grpc.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_log.grpc.pb.cc |56.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_external_blobs/ydb-core-tx-datashard-ut_external_blobs |56.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/iam/iam_token_service_subject.{pb.h ... grpc.pb.h} |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/s3_delete.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/follower_group.grpc.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/a045b564f46815fdcdce235af1_raw.auxcpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_board_mon.grpc.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_broker.grpc.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_sysview_processor.grpc.pb.cc |56.5%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/api/protos/ydb_config__intpy3___pb2.py.jnwv.yapyc3 |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/statistics.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/filestore_config.grpc.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_statistics_aggregator.grpc.pb.cc |56.5%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/api/protos/ydb_config__intpy3___pb2.py{, i} |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/hive.grpc.pb.cc |56.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_config.{pb.h ... grpc.pb.h} |56.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_cms__intpy3___pb2.py.jnwv.yapyc3 |56.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_clickhouse_internal__intpy3___pb2.py{, i} |56.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_cms__intpy3___pb2.py{, i} |56.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_coordination__intpy3___pb2.py{, i} |56.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_coordination__intpy3___pb2.py.jnwv.yapyc3 |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_extsubdomain.cpp |56.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_common__intpy3___pb2.py{, i} |56.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/control_plane_storage/ydb-tests-fq-control_plane_storage |56.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_common__intpy3___pb2.py.jnwv.yapyc3 |56.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_debug__intpy3___pb2.py.jnwv.yapyc3 |56.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_clickhouse_internal__intpy3___pb2.py.jnwv.yapyc3 |56.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_debug__intpy3___pb2.py{, i} |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/9d48b9407474d3b0919ccb531a_raw.auxcpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/b04a8a4d880a6ab0d69f34e52c_raw.auxcpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_node_broker.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/s3_settings.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/serverless_proxy_config.grpc.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_backup.grpc.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_self_pinger.cpp |56.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp_stats.{pb.h ... grpc.pb.h} |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__login.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/dbb377e7c1773c184d477494b2_raw.auxcpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/drivemodel.pb.cc |56.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/protos/initiator.pb.{h, cc} |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/9adf0628f6ff902780290e87a9_raw.auxcpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/eac755898e5b78b71222fd0f02_raw.auxcpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_backup.grpc.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/export.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet.grpc.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_statistics_aggregator.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_config.grpc.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_tx_proxy.grpc.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_bs_controller.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/yc_search_ut/test_events_writer.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_config.grpc.pb.cc |56.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/memory_controller_config.{pb.h ... grpc.pb.h} |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_coordinator.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/data.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/config_units.grpc.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/d52903a0693870f83d0bbe0ab8_raw.auxcpp |56.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/common/ut/ydb-core-fq-libs-common-ut |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_keyvalue.grpc.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/flat_scheme_op.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_testshard.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_kesus.grpc.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/48f915926c5dab20db170ec408_raw.auxcpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/base.grpc.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_tenant.grpc.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/alloc.grpc.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/flat_scheme_op.grpc.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus.grpc.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/04f3291fff576effedd1d3a510_raw.auxcpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/e6ce42a762195cf7e946ca411e_raw.auxcpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/auth.grpc.pb.cc |56.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_persqueue_cluster_discovery.pb.{h, cc} |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/e11564474d0b882ae934f449d8_raw.auxcpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_connections_permissions_ut.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_config.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_replication.grpc.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters.grpc.pb.cc |56.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_counters_aggregator.{pb.h ... grpc.pb.h} |56.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/row_dispatcher.pb.{h, cc} |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kafka.pb.cc |56.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/flat_scheme_op.{pb.h ... grpc.pb.h} |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_backup.pb.cc |56.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/ut/ydb-core-sys_view-ut |56.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_schemeshard.{pb.h ... grpc.pb.h} |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_kesus.pb.cc |56.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_federation_discovery.pb.{h, cc} |56.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/resource_manager.pb.{h, cc} |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_cms.pb.cc |56.6%| [PR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/d52903a0693870f83d0bbe0ab8_raw.auxcpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/replication.pb.cc |56.6%| [PR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/base/generated/runtime_feature_flags.h |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage.grpc.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_load.pb.cc |56.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_persqueue_v1.pb.{h, cc} |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/0f1c8f1776dfa9603c374693fd_raw.auxcpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/op_init_schema.cpp |56.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/nodes_manager.pb.{h, cc} |56.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/rate_limiter.pb.{h, cc} |56.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blockstore_config.{pb.h ... grpc.pb.h} |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/s3_write.cpp |56.6%| [PB] {BAZEL_DOWNLOAD} $(B)/contrib/libs/opentelemetry-proto/opentelemetry/proto/trace/v1/trace.{pb.h ... grpc.pb.h} |56.6%| [PB] {BAZEL_DOWNLOAD} $(B)/library/cpp/lwtrace/protos/lwtrace.pb.{h, cc} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/contrib/libs/opentelemetry-proto/opentelemetry/proto/resource/v1/resource.{pb.h ... grpc.pb.h} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_broker.{pb.h ... grpc.pb.h} |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blob_depot.grpc.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |56.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk__intpy3___pb2_grpc.py.p5ju.yapyc3 |56.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_replication.pb.{h, cc} |56.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk_color.{pb.h ... grpc.pb.h} |56.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk_color__intpy3___pb2.py.p5ju.yapyc3 |56.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk_color__intpy3___pb2_grpc.py.p5ju.yapyc3 |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_internal_ut.cpp |56.7%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/blobstorage_distributed_config__intpy3___pb2.py.p5ju.yapyc3 |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_bindings_permissions_ut.cpp |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/long_tx_service.{pb.h ... grpc.pb.h} |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/datastreams/datastreams_ut.cpp |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk_color__intpy3___pb2.py{ ... i} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/compaction.{pb.h ... grpc.pb.h} |56.7%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/blobstorage_distributed_config__intpy3___pb2.py{ ... i} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/data_events.{pb.h ... grpc.pb.h} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/index_builder.{pb.h ... grpc.pb.h} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/export.{pb.h ... grpc.pb.h} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_pdisk_config__intpy3___pb2.py{ ... i} |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blob_depot/schema.h_serialized.cpp |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk__intpy3___pb2.py{ ... i} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/channel_purpose.{pb.h ... grpc.pb.h} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/persqueue_error_codes_v1.pb.{h, cc} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/pinger.pb.{h, cc} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_clickhouse_internal.pb.{h, cc} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/hive.{pb.h ... grpc.pb.h} |56.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/fyamlcpp/fyamlcpp.cpp |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme/protos/type_info.{pb.h ... grpc.pb.h} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_limits.{pb.h ... grpc.pb.h} |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/base/generated/ut/ydb-core-base-generated-ut |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/schemeshard/operations.{pb.h ... grpc.pb.h} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/query_stats.{pb.h ... grpc.pb.h} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/audit.pb.{h, cc} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/table_stats.{pb.h ... grpc.pb.h} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_counters.{pb.h ... grpc.pb.h} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme/protos/key_range.{pb.h ... grpc.pb.h} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/replication.{pb.h ... grpc.pb.h} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/whiteboard_disk_states.{pb.h ... grpc.pb.h} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_columnshard.{pb.h ... grpc.pb.h} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/yql_translation_settings.{pb.h ... grpc.pb.h} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/protos/portion_info.pb.{h, cc} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_proxy.{pb.h ... grpc.pb.h} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_dynamic_config.pb.{h, cc} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/dummy.{pb.h ... grpc.pb.h} |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_connections_ut.cpp |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/defaults/protos/data.pb.{h, cc} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/mkql_proto/protos/minikql.{pb.h ... grpc.pb.h} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kafka.{pb.h ... grpc.pb.h} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/protos/dq_stats.pb.{h, cc} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/protos/dq_events.pb.{h, cc} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/ydb_issue/proto/issue_id.{pb.h ... grpc.pb.h} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/annotations/validation.pb.{h, cc} |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_olap_reboots/ydb-core-tx-schemeshard-ut_olap_reboots |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_backup_incremental_backup_collection.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/assimilator.cpp |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/statistics/service/ut/ydb-core-statistics-service-ut |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/persqueue_common.pb.{h, cc} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/grpc_pq_old.{pb.h ... grpc.pb.h} |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_allocator_client/client.cpp |56.7%| [LD] {BAZEL_DOWNLOAD} $(B)/tools/py3cc/py3cc |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_cms.pb.{h, cc} |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/storage_range.cpp |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_formats.pb.{h, cc} |56.7%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/blobstorage_distributed_config__intpy3___pb2_grpc.py.p5ju.yapyc3 |56.7%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/schemeshard_types.h_serialized.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ymq/actor/yc_search_ut/index_events_processor_ut.cpp |56.7%| [LD] {BAZEL_DOWNLOAD} $(B)/contrib/tools/protoc/plugins/grpc_python/grpc_python |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_export.pb.{h, cc} |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_proxy/mon.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/data_load.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/storage_collect_garbage.cpp |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/apache/arrow/cpp/src/arrow/python/libpy3src-arrow-python.a |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_resource_pool.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/storage_patch.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_external_data_source.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/s3_upload.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/tx_proxy/upload_rows_counters.h_serialized.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_bsv.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_copy_sequence.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_kqp.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_login.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_queries_permissions_ut.cpp |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_whiteboard.{pb.h ... grpc.pb.h} |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/storage_discover.cpp |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/kqp/kqp_indexes/ydb-tests-functional-kqp-kqp_indexes |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_query.pb.{h, cc} |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/actor/ut/ydb-core-ymq-actor-ut |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_import_flow_proposals.cpp |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_scheme.pb.{h, cc} |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/viewer/wb_filter.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/metrics.cpp |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_user_attributes_reboots/core-tx-schemeshard-ut_user_attributes_reboots |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/functional/kqp/kqp_indexes/main.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/scheme/scheme_types_proto_ut.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/scheme/scheme_ranges_ut.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/blobstorage_distributed_config.grpc.pb.cc |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/services/deprecated/persqueue_v0/api/grpc/persqueue.{pb.h ... grpc.pb.h} |56.7%| [LD] {BAZEL_DOWNLOAD} $(B)/contrib/python/mypy-protobuf/bin/protoc-gen-mypy/protoc-gen-mypy |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/scheme/scheme_borders_ut.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_resource_pool.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_external_table.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/examples/topic_reader/eventloop/main.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_scheme_v1.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/scheme/scheme_tablecell_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_auth_v1.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_subdomain.cpp |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/datastreams/ut/ydb-services-datastreams-ut |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/issue/protos/issue_severity.pb.{h, cc} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/config/protos/marker.pb.{h, cc} |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_operation_v1.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/resolved_value.cpp |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/tests/unit/client/endpoints/ydb-public-sdk-cpp-tests-unit-client-endpoints |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/scan/ydb-core-kqp-ut-scan |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_export_v1.grpc.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_keyvalue_v1.pb.cc |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/issue/protos/issue_id.pb.{h, cc} |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_export_v1.pb.cc |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/ut_schema/ydb-core-tx-columnshard-ut_schema |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/proto/dq_tasks.pb.{h, cc} |56.8%| [BN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stability/tool/cfg |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_query_v1.grpc.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_scripting_v1.pb.cc |56.8%| [PB] {default-linux-x86_64, release, asan} $(B)/ydb/public/api/protos/ydb_config__intpy3___pb2.py{, i} |56.8%| [BN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stability/tool/statistics_workload |56.8%| [PR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/api/grpc/140c88ca3e90f1923d2b7a0c94_raw.auxcpp |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/ut_fat/ydb-core-mind-ut_fat |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/json_handlers_pq.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_coordination_v1.grpc.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_index_build/ut_index_build.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/tests/unit/client/endpoints/endpoints_ut.cpp |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/protos/clickhouse.pb.{h, cc} |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_cms.grpc.pb.cc |56.7%| [PR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/api/grpc/a8f74ccfefd66bc6dc846adade_raw.auxcpp |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/common.pb.{h, cc} |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_sequenceshard.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_table_v1.grpc.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/storage_block.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/base.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_allocator/txallocator__scheme.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_config.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/describe.cpp |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_debug_v1__intpy3___pb2.py{ ... i} |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_coordination_v1__intpy3___pb2_grpc.py.gcum.yapyc3 |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/resolvereq.cpp |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_discovery_v1__intpy3___pb2.py.gcum.yapyc3 |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_discovery_v1__intpy3___pb2.py{ ... i} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_coordination_v1__intpy3___pb2.py{ ... i} |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_debug_v1__intpy3___pb2.py.gcum.yapyc3 |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_coordination_v1__intpy3___pb2.py.gcum.yapyc3 |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/service/table_writer_ut.cpp |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_debug_v1__intpy3___pb2_grpc.py.gcum.yapyc3 |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/ydb_result_set_old.{pb.h ... grpc.pb.h} |56.8%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/api/grpc/ydb_config_v1__intpy3___pb2_grpc.py.gcum.yapyc3 |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_auth_v1__intpy3___pb2.py.gcum.yapyc3 |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/auth.pb.cc |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/apps/dstool/libpy3ydb-dstool.global.a |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_cms_v1__intpy3___pb2.py.gcum.yapyc3 |56.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/apps/dstool/objcopy_fca89909cedb628068681e1038.o |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_index_build/ut_vector_index_build.cpp |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_cms_v1__intpy3___pb2.py{ ... i} |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/apps/dstool/lib/libpy3dstool_lib.global.a |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_auth_v1__intpy3___pb2.py{ ... i} |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_load.pb.h_serialized.cpp |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/pending_fetcher.pb.{h, cc} |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_datashard.pb.cc |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/ncloud/impl/ut/ydb-library-ncloud-impl-ut |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/grpc_pq_old.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/46a8ef73ff31f7dc3ac1032a42_raw.auxcpp |56.8%| [PB] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/blobstorage_distributed_config__intpy3___pb2.py{ ... i} |56.8%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/api/grpc/ydb_config_v1__intpy3___pb2.py.gcum.yapyc3 |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blob_depot_config.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/grpc_pq_old.grpc.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_backup_backup_collection.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kafka.grpc.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/51f498bc509c1cf14dcc9e29ea_raw.auxcpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_replication.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/space_monitor.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/key.grpc.pb.cc |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/gateways.pb.{h, cc} |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/85072bb936b0763f4b03040c4c_raw.auxcpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/pdiskfit.pb.cc |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp_stats.grpc.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blockstore_config.grpc.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/follower_group.pb.cc |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/scheme/ut/ydb-core-scheme-ut |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/0c63a697118bf491dff02a4ccc_raw.auxcpp |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/control/ut/ydb-core-control-ut |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/153481dc2b1552fca201c7ebcc_raw.auxcpp |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/fq_config.pb.{h, cc} |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/21aa68e41e4a19efb7410ed5fe_raw.auxcpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet.pb.cc |56.9%| [PY] {default-linux-x86_64, release, asan} $(B)/ydb/public/api/protos/ydb_config__intpy3___pb2.py.jnwv.yapyc3 |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/retry_policy_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/whiteboard_disk_states.grpc.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_scheme.grpc.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_discovery_v1.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_columnshard.grpc.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_mediator_timecast.grpc.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/proxy.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/whiteboard_flags.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus.pb.cc |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/resource_broker.{pb.h ... grpc.pb.h} |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_base3.grpc.pb.cc |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/actors/ut/ydb-core-fq-libs-actors-ut |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/proxy_ut.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent.cpp |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/examples/topic_reader/eventloop/persqueue_reader_eventloop |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/commitreq.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_discovery_v1.grpc.pb.cc |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tests/tools/kqprun/kqprun.cpp |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/kqprun/kqprun |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/datareq.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/api/grpc/ydb_config_v1.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/cache_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/mon_main.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/data_mon.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/rpc_long_tx.cpp |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/base/ut_board_subscriber/ydb-core-base-ut_board_subscriber |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_config.pb.cc |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/user_attributes.cpp |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_datashard.{pb.h ... grpc.pb.h} |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/protos/yql_mount.pb.{h, cc} |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/control_plane_storage.pb.{h, cc} |56.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/messaging/objcopy_7211c23d9494c46f0f60063e9e.o |56.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/messaging/objcopy_791e2f78c18891d943ecce5e41.o |56.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/messaging/objcopy_48a08121f0a68da2f2666b0341.o |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_base.pb.cc |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_scheme_v1.grpc.pb.cc |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/proto/udf_resolver.pb.{h, cc} |56.9%| [PY] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/blobstorage_distributed_config__intpy3___pb2_grpc.py.p5ju.yapyc3 |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/protos/selector.pb.{h, cc} |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_federation_discovery_v1.grpc.pb.cc |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pyarrow/libpy3contrib-python-pyarrow.global.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/tests/tpch/lib/libtests-tpch-lib.global.a |56.9%| [PY] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/blobstorage_distributed_config__intpy3___pb2.py.p5ju.yapyc3 |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/tests/tpch/lib/libtests-tpch-lib.a |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_hive.{pb.h ... grpc.pb.h} |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/s3/credentials/credentials_ut.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/tests/kikimr_tpch/kqp_tpch_ut.cpp |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/sqs/messaging/ydb-tests-functional-sqs-messaging |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/read_session_ut.cpp |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/iam/reference.{pb.h ... grpc.pb.h} |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/protos/blobstorage_config.pb.{h, cc} |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/apps/dstool/ydb-dstool |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_pq.cpp |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/s3/credentials/ut/ydb-library-yql-providers-s3-credentials-ut |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/grpc_streaming/ut/ydb-core-grpc_streaming-ut |56.9%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/export/session/session.h_serialized.cpp |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_sequenceshard.{pb.h ... grpc.pb.h} |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/checkpoint_coordinator.pb.{h, cc} |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/ut/slow/ydb-core-persqueue-ut-slow |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/compress_executor_ut.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_cms_v1.grpc.pb.cc |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/fq.pb.{h, cc} |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_column_build/ydb-core-tx-schemeshard-ut_column_build |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_columnshard.pb.cc |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pyarrow/libpy3contrib-python-pyarrow.a |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/compute.pb.{h, cc} |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/json_wb_req.cpp |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/tests/kikimr_tpch/ydb-core-kqp-tests-kikimr_tpch |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/json_handlers_storage.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/json_handlers.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/json_handlers_query.cpp |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/issue_id.pb.{h, cc} |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/protos/links.pb.{h, cc} |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_serverless_reboots/ydb-core-tx-schemeshard-ut_serverless_reboots |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/fq_config.pb.{h, cc} |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_import_v1.grpc.pb.cc |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_load.grpc.pb.cc |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/alloc.{pb.h ... grpc.pb.h} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_config.{pb.h ... grpc.pb.h} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/token_accessor.pb.{h, cc} |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/auth/groups.cpp |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/public_http/protos/fq.pb.{h, cc} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blob_depot_config.{pb.h ... grpc.pb.h} |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/grpc_services/cancelation/protos/event.pb.{h, cc} |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/config/validation/validators_ut.cpp |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/tests/integration/sessions_pool/public-sdk-cpp-tests-integration-sessions_pool |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/json_handlers_pdisk.cpp |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/with_offset_ranges_mode_ut |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/json_handlers_browse.cpp |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_base3.{pb.h ... grpc.pb.h} |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/examples/bulk_upsert_simple/main.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/auth/permissions.cpp |56.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/postgresql/objcopy_0ed2be5b1f8bbcf21c01d97861.o |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/postgresql/8ba4bc7bbd068d496fd8d38c20_raw.auxcpp |56.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/postgresql/objcopy_8a480df96cc6cd49399cfaea66.o |56.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/postgresql/objcopy_c6dc9ea6dc9d2c6577817a5fb6.o |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/sdk/cpp/sdk_credprovider/ydb-tests-functional-sdk-cpp-sdk_credprovider |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/postgresql/tests-datasource-postgresql |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/tests/integration/sessions_pool/main.cpp |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/stream.{pb.h ... grpc.pb.h} |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_kqp_scan/ydb-core-tx-datashard-ut_kqp_scan |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/common/yql_parser/yql_parser_ut.cpp |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/lib/ydb_cli/common/yql_parser/ut/ydb-public-lib-ydb_cli-common-yql_parser-ut |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/security/ldap_auth_provider/ut/ydb-core-security-ldap_auth_provider-ut |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/public_http/http_service.cpp |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/api/protos/task_command_executor.pb.{h, cc} |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/protos/config.pb.{h, cc} |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/examples/topic_writer/transaction/main.cpp |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/config_units.{pb.h ... grpc.pb.h} |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_storage/proto/yq_internal.pb.{h, cc} |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/functional/sdk/cpp/sdk_credprovider/dummy_provider_ut.cpp |56.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/s3/objcopy_8685c3ae88e5169a5acffc7bc4.o |56.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/s3/objcopy_d191482d8b66f1c03ea8df56d3.o |56.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/s3/objcopy_ff581f3cff717ab223922f0cd8.o |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/dq/runtime/ut/file_cache_ut.cpp |56.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/examples/bulk_upsert_simple/bulk_upsert_simple |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_pq.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/4e238bfff142d4ed3830568018_raw.auxcpp |56.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_operation_v1.{pb.h ... grpc.pb.h} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/health_config.pb.{h, cc} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/grpc/fq_private_v1.{pb.h ... grpc.pb.h} |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/generated/codegen/codegen |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/ms_sql_server/7fc0a944ff3f4c9130511a5804_raw.auxcpp |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/api/protos/service.pb.{h, cc} |56.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/ms_sql_server/objcopy_9ec58f723c034c871861783d19.o |56.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/ms_sql_server/objcopy_8f7d2de1c8d713e4feeacffe30.o |56.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/ms_sql_server/objcopy_b0df339b5cd42be3b946278515.o |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/import.{pb.h ... grpc.pb.h} |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/certs/libcerts.global.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cxxsupp/builtins/liblibs-cxxsupp-builtins.a |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/feature_flags.{pb.h ... grpc.pb.h} |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/double-conversion/libcontrib-libs-double-conversion.a |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/ydb_proxy/ut/ydb-core-tx-replication-ydb_proxy-ut |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/ms_sql_server/datasource-ms_sql_server |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libunwind/libcontrib-libs-libunwind.a |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_coordinator.{pb.h ... grpc.pb.h} |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/services/libydb-library-services.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp-tstring/y_absl/log/libabseil-cpp-tstring-y_absl-log.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp/absl/profiling/libabseil-cpp-absl-profiling.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/protos/liblibrary-login-protos.a |56.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/oracle/objcopy_9f24a29ba641072592b3e37403.o |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/grpc/third_party/upb/libgrpc-third_party-upb.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cxxsupp/libcxxabi-parts/liblibs-cxxsupp-libcxxabi-parts.a |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/service/ut_worker/ydb-core-tx-replication-service-ut_worker |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/grpc/third_party/address_sorting/libgrpc-third_party-address_sorting.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/c-ares/libcontrib-libs-c-ares.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cxxsupp/libcxxrt/liblibs-cxxsupp-libcxxrt.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/fmt/libcontrib-libs-fmt.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/libabseil-cpp-tstring-y_absl-numeric.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp-tstring/y_absl/base/libabseil-cpp-tstring-y_absl-base.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp-tstring/y_absl/flags/libabseil-cpp-tstring-y_absl-flags.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/protobuf/libcontrib-libs-protobuf.global.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/zstd/libcontrib-libs-zstd.a |56.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/oracle/objcopy_d8c1983c83374ff3531b03c654.o |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cxxsupp/libcxx/liblibs-cxxsupp-libcxx.a |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/oracle/tests-datasource-oracle |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/oracle/1e2480c2b04be34c00bb78e34e_raw.auxcpp |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/re2/libcontrib-libs-re2.a |56.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/oracle/objcopy_158148a8bf02e291fb1e4cb617.o |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libc_compat/libcontrib-libs-libc_compat.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/zlib/libcontrib-libs-zlib.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/xxhash/libcontrib-libs-xxhash.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/tcmalloc/malloc_extension/liblibs-tcmalloc-malloc_extension.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/tcmalloc/no_percpu_cache/liblibs-tcmalloc-no_percpu_cache.a |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/s3/ydb-tests-datashard-s3 |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/dsproxy/ut_fat/ydb-core-blobstorage-dsproxy-ut_fat |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp-tstring/y_absl/container/libabseil-cpp-tstring-y_absl-container.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/libabseil-cpp-tstring-y_absl-debugging.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/tcmalloc/no_percpu_cache/liblibs-tcmalloc-no_percpu_cache.global.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp/absl/numeric/libabseil-cpp-absl-numeric.a |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/json_handlers_scheme.cpp |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/protobuf/libcontrib-libs-protobuf.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp-tstring/y_absl/types/libabseil-cpp-tstring-y_absl-types.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp-tstring/y_absl/hash/libabseil-cpp-tstring-y_absl-hash.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp-tstring/y_absl/status/libabseil-cpp-tstring-y_absl-status.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp-tstring/y_absl/profiling/libabseil-cpp-tstring-y_absl-profiling.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/jinja2cpp/libcontrib-libs-jinja2cpp.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/openssl/libcontrib-libs-openssl.a |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index_tx_base.cpp |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp-tstring/y_absl/random/libabseil-cpp-tstring-y_absl-random.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp-tstring/y_absl/time/libabseil-cpp-tstring-y_absl-time.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/libabseil-cpp-tstring-y_absl-synchronization.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp/absl/base/libabseil-cpp-absl-base.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp/absl/flags/libabseil-cpp-absl-flags.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp/absl/container/libabseil-cpp-absl-container.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp/absl/log/libabseil-cpp-absl-log.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp/absl/debugging/libabseil-cpp-absl-debugging.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp/absl/hash/libabseil-cpp-absl-hash.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/protos/liblibrary-formats-arrow-protos.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp-tstring/y_absl/strings/libabseil-cpp-tstring-y_absl-strings.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp/absl/types/libabseil-cpp-absl-types.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/container/librestricted-boost-container.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/cpuid_check/liblibrary-cpp-cpuid_check.global.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp/absl/status/libabseil-cpp-absl-status.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp/absl/random/libabseil-cpp-absl-random.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp/absl/strings/libabseil-cpp-absl-strings.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/malloc/tcmalloc/libcpp-malloc-tcmalloc.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/atomic/librestricted-boost-atomic.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp/absl/synchronization/libabseil-cpp-absl-synchronization.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/exception/librestricted-boost-exception.a |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/examples/topic_writer/transaction/topic_writer_transaction |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/zstd/libblockcodecs-codecs-zstd.global.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp/absl/time/libabseil-cpp-absl-time.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme/protos/libcore-scheme-protos.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/absl_flat_hash/libcpp-containers-absl_flat_hash.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/filesystem/librestricted-boost-filesystem.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/core/libcpp-blockcodecs-core.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/regex/librestricted-boost-regex.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/protos/liblibrary-actors-protos.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/util/charset/libutil-charset.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/tools/enum_parser/enum_serialization_runtime/libtools-enum_parser-enum_serialization_runtime.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/folder_service/proto/liblibrary-folder_service-proto.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/schemeshard/libcore-protos-schemeshard.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/config/protos/libcore-config-protos.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/util/libyutil.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/resource/liblibrary-cpp-resource.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/common/protos/libcolumnshard-common-protos.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/malloc/api/libcpp-malloc-api.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/types/libessentials-public-types.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/defaults/protos/libscheme-defaults-protos.a |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/generated/codegen/main.cpp |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/liblibs-config-protos.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/protos/libcolumnshard-engines-protos.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/issue/protos/libpublic-issue-protos.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/proto/libcore-file_storage-proto.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/grpc/libcontrib-libs-grpc.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/proto/libyql-dq-proto.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/ydb_issue/proto/liblibrary-ydb_issue-proto.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/mkql_proto/protos/liblibrary-mkql_proto-protos.a |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/indexes/ydb-core-kqp-ut-indexes |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/annotations/libapi-protos-annotations.a |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/benchmarks_init/objcopy_de67ee476035f2cc7c8d34c996.o |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/proto/libproviders-s3-proto.a |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/benchmarks_init/objcopy_287a0728f8b1ad204ac0396eb2.o |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/statistics/service/ut/ut_aggregation/ydb-core-statistics-service-ut-ut_aggregation |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/benchmarks_init/objcopy_c96c333b4f7fc5cb2b98b27907.o |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/issue/protos/libcore-issue-protos.a |57.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/config.grpc.pb.cc |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/protos/libyql-essentials-protos.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/api/service/protos/libapi-service-protos.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/protos/libdq-actors-protos.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/local_gateway/libproviders-dq-local_gateway.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/proto/libproviders-common-proto.a |57.0%| [BN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stability/tool/ydb_cli |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/test_shard.grpc.pb.cc |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/api/protos/dqs.pb.{h, cc} |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/proto/dq_io.pb.{h, cc} |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_blob_depot.{pb.h ... grpc.pb.h} |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/local.{pb.h ... grpc.pb.h} |57.0%| [PR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/provider/yql_kikimr_expr_nodes.{gen.h ... defs.inl.h} |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_mediator.{pb.h ... grpc.pb.h} |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/viewer_request.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_cdc_stream.cpp |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/benchmarks_init/ydb-tests-functional-benchmarks_init |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_testshard.{pb.h ... grpc.pb.h} |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/accessservice/access_service.{pb.h ... grpc.pb.h} |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/blobstorage_grouptype_ut.cpp |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/iam/service_account_service.{pb.h ... grpc.pb.h} |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/localdb_ut.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/sys_view/service/query_history_ut.cpp |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/data_integrity_trails.{pb.h ... grpc.pb.h} |57.0%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/config/init/init.h_serialized.cpp |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/s3_import/ydb-tests-olap-s3_import |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/s3_import/objcopy_1dba5118ef0a485f3bf803be50.o |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/ut/path_ut.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/ut/table_index_ut.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/statestorage_guardian_impl_ut.cpp |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/s3_import/objcopy_2d296dfaf373f7f15e6312517a.o |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/s3_import/objcopy_938861be99a6cedecb22904193.o |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/util_string_ut.cpp |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/actor/yc_search_ut/ydb-core-ymq-actor-yc_search_ut |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/s3_import/objcopy_a65a4fae8912a32233240d3c51.o |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/s3_import/objcopy_6e536fb2c379a4ebe79c499de8.o |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/util_pool_ut.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/ut/memory_stats_ut.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/statestorage_ut.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/logoblob_ut.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/examples/ttl/main.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/examples/ttl/ttl.cpp |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/config/validation/ut/ydb-core-config-validation-ut |56.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/sql/large/objcopy_d68e1e5b762e412afe6a534487.o |56.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/sql/large/objcopy_7eab954373d77ffb1fab95ca0d.o |56.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/sql/large/objcopy_27c0687ceeb7ce4ff5e4cea90a.o |56.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stability/ydb/objcopy_48e09f84949dd34b82c51f21a3.o |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stability/ydb/ydb-tests-stability-ydb |56.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stability/ydb/objcopy_3b212908932716bae8a8e38b2c.o |56.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stability/ydb/objcopy_ce63bab0f89a8715a42271a26a.o |56.9%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/datashard.h_serialized.cpp |56.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/encryption/objcopy_64cecb639c5f85fbf868097a08.o |56.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/encryption/objcopy_93dc3386250916dfae1ecb9b13.o |56.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/encryption/objcopy_3d6916930a438b51675ef6dda7.o |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/icu/libcontrib-libs-icu.a |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/dq/runtime/ut/ydb-library-yql-providers-dq-runtime-ut |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/pq/provider/ut/yql_pq_ut.cpp |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/sql/large/ydb-tests-sql-large |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_stats/ydb-core-tx-datashard-ut_stats |57.0%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/columns_set.h_serialized.cpp |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/proto/credentials.pb.{h, cc} |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/encryption/ydb-tests-functional-encryption |57.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_auth_v1.grpc.pb.cc |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/library/cpp/retry/protos/retry_options.pb.{h, cc} |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/autoconfig/objcopy_44fac4fe441507735704a000ad.o |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/autoconfig/objcopy_994fcbd53c4e2174c302bdb5ab.o |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/autoconfig/objcopy_7c328c2741f9dd7697a2e0e8b1.o |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/examples/ttl/ttl |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/autoconfig/ydb-tests-functional-autoconfig |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/blobstorage_blob_ut.cpp |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/provider/ut/ydb-core-kqp-provider-ut |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/blobstorage_hullstorageratio_ut.cpp |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/shared_cache.{pb.h ... grpc.pb.h} |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/s3/objcopy_ce073e3cc612363936bdd04210.o |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/s3/objcopy_0c451aebc6dafbdf0d9da2ab02.o |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/s3/objcopy_c43ce24509a50b033fa4050a33.o |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/s3/objcopy_52d3e6a0651990fc997ab40ba2.o |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/s3/objcopy_03f75cad4510fd9d018635026c.o |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/s3/ydb-tests-fq-s3 |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/s3/objcopy_64bde13108f9284b2e9f0bbb7a.o |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/s3/objcopy_dc1e8788b8287c02880cfe2814.o |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/backup/impl/ut_table_writer/ydb-core-backup-impl-ut_table_writer |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/s3/objcopy_6cfba3dbee97ec121b2f346459.o |57.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/s3/c664ef6ca80e747b410e1da324_raw.auxcpp |57.0%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/volatile_tx.h_serialized.cpp |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/sqs/common/ydb-tests-functional-sqs-common |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/ut/ydb-core-persqueue-ut |57.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_rate_limiter_v1.pb.cc |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/key.{pb.h ... grpc.pb.h} |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/services/deprecated/persqueue_v0/api/protos/persqueue.pb.{h, cc} |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/cms/cms_ut.cpp |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/base/ut/ydb-core-base-ut |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/blobstorage/ydb-tests-functional-blobstorage |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_sequenceshard.{pb.h ... grpc.pb.h} |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_auth_v1.{pb.h ... grpc.pb.h} |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/common/objcopy_0a1f127d9343562caddfbacf79.o |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/ut/ut_reader.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/ut/ut_column_filter.cpp |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/common/objcopy_f9b0feecd0e36f08cbf5c53562.o |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/ut/ut_hash.cpp |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/common/objcopy_178e64ce5db822fc6aa8b3e608.o |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_base.{pb.h ... grpc.pb.h} |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/blobstorage/objcopy_1c0f807c059fe226699115f242.o |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/ut/ut_dictionary.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/ut/ut_program_step.cpp |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/blobstorage/objcopy_16842d72ae0dac1856818f841e.o |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/service/ut/ydb-core-sys_view-service-ut |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tablet_flat/ut_util/ydb-core-tablet_flat-ut_util |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/ydb_cli/ydb-tests-functional-ydb_cli |57.0%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/execution_unit.h_serialized.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/ut/ut_arrow.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/ydb_cli/5c5fdf614c3039a8dba94a4f38_raw.auxcpp |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/ydb_cli/objcopy_c77713875cf17988efd8fc0fb3.o |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/ydb_cli/objcopy_c52ec5ba5ab0b788efaa5ed704.o |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/ydb_cli/objcopy_903d4758faea71f1363e296b3f.o |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_anubis_algo_ut.cpp |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/ydb_cli/objcopy_359d47616c1036f0865eb1e662.o |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/barriers/ut/ydb-core-blobstorage-vdisk-hulldb-barriers-ut |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/common/objcopy_b866963286293af0b6f2139fed.o |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut_large/ut_btree_index_large.cpp |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/blobstorage/objcopy_790c6ea4aad5e761d21421b25d.o |57.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/alloc.pb.cc |57.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_table_v1.pb.cc |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/compute.pb.{h, cc} |57.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/change_exchange.grpc.pb.cc |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/s3/compressors/ut/ydb-library-yql-providers-s3-compressors-ut |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/s3/compressors/ut/decompressor_ut.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/query/query_spacetracker_ut.cpp |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/protos/dq_status_codes.pb.{h, cc} |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/folder_service/proto/config.pb.{h, cc} |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/graph/shard/protos/counters_shard.pb.{h, cc} |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/memory_stats.{pb.h ... grpc.pb.h} |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/column_families/update.cpp |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/iam/service_account.{pb.h ... grpc.pb.h} |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_replrecoverymachine_ut.cpp |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_maintenance_v1.{pb.h ... grpc.pb.h} |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/apps/etcd_proxy/proto/rpc.{pb.h ... grpc.pb.h} |57.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_tracing_signals.pb.cc |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_logstore.pb.{h, cc} |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/base/ut/ydb-core-blobstorage-vdisk-hulldb-base-ut |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/pgproxy/protos/pgproxy.pb.{h, cc} |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/barriers/barriers_tree_ut.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_hullreplwritesst_ut.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/hullbase_barrier_ut.cpp |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/c/cyson/liblibrary-c-cyson.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/cyson/libpy3library-python-cyson.global.a |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_index_build/ydb-core-tx-schemeshard-ut_index_build |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/client/chunk_client/proto/read_limit.pb.{h, cc} |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_clickhouse_internal_v1.{pb.h ... grpc.pb.h} |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/cyson/libpy3library-python-cyson.a |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/base/ut/action_ut.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/base/ut/secure_protobuf_printer_ut.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/hullds_generic_it_ut.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/base/ut/dlq_helpers_ut.cpp |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/operation/operation.{pb.h ... grpc.pb.h} |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_proxy/ut_base_tenant/ydb-core-tx-tx_proxy-ut_base_tenant |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/base/ut/queue_attributes_ut.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/base/ut/helpers_ut.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/incrhuge/ut/incrhuge_log_merger_ut.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/incrhuge/ut/incrhuge_basic_ut.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/incrhuge/ut/incrhuge_id_dict_ut.cpp |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/graph/protos/graph.pb.{h, cc} |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/cms/ut/ydb-services-cms-ut |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/base/ut/counters_ut.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/blobstorage_hullsatisfactionrank_ut.cpp |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_monitoring_v1.{pb.h ... grpc.pb.h} |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/hullds_heap_it_ut.cpp |57.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/hmac/ut/ydb-core-fq-libs-hmac-ut |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/yt/kqp_yt_import/ydb-tests-fq-yt-kqp_yt_import |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/nc_private/accessservice/access.{pb.h ... grpc.pb.h} |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/formats/arrow/ut/ydb-core-formats-arrow-ut |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/pq/provider/ut/ydb-library-yql-providers-pq-provider-ut |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/yt/dq_task_preprocessor/libproviders-yt-dq_task_preprocessor.a |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_kqp/ydb-core-tx-datashard-ut_kqp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/tools/dq/service_node/main.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/common/circlebuf_ut.cpp |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/query/ut/ydb-core-blobstorage-vdisk-query-ut |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/service/ut_table_writer/ydb-core-tx-replication-service-ut_table_writer |57.0%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/column_engine_logs.h_serialized.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/common/circlebufstream_ut.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/common/circlebufresize_ut.cpp |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/tests/common/test_framework/libpy3tests-common-test_framework.global.a |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/common/memusage_ut.cpp |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_import/objcopy_be85b0beafcfe4a7f6fd6c6dce.o |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/tools/libpy3tests-fq-tools.global.a |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_outofspace_ut.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_lsnmngr_ut.cpp |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/scheme_board/ut_cache/ydb-core-tx-scheme_board-ut_cache |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_import/objcopy_2120ba1c181b59ff8129e88f2e.o |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_import/objcopy_1c95ef09a97797b541386e59f9.o |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_import/objcopy_72adec4fc4bd293cc59aa677e3.o |57.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_import/0de346a5cadde55664f85ed317_raw.auxcpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_syncneighbors_ut.cpp |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/anubis_osiris/ut/ydb-core-blobstorage-vdisk-anubis_osiris-ut |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/runtime/ut/ut_helper.cpp |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/tests/unit/client/driver/ydb-public-sdk-cpp-tests-unit-client-driver |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/bscontroller/ut_bscontroller/ydb-core-mind-bscontroller-ut_bscontroller |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_import/objcopy_646bfdd69de974aac5b70bb33b.o |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_debug.pb.{h, cc} |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/public_http/http_router.cpp |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_debug_v1.{pb.h ... grpc.pb.h} |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/runtime/dq_arrow_helpers_ut.cpp |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/load/ydb-tests-olap-load |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/runtime/dq_output_channel_ut.cpp |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tablet_flat/ut_large/ydb-core-tablet_flat-ut_large |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/load/objcopy_323a17e94d8d570989807d19d3.o |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/load/objcopy_2e1dd9c9bc385e6efd22b78136.o |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/load/objcopy_ec616740770a3a76d53352e427.o |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/tests/unit/client/driver/driver_ut.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/7879daa613faf01b1b1cb72bdc_raw.auxcpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_background_cleaning/ut_background_cleaning.cpp |57.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/tools/dqrun/lib/libtools-dqrun-lib.a |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/public_http/http_req.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_pdisk_error_ut.cpp |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/mkql_proto/protos/minikql.{pb.h ... grpc.pb.h} |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/engine/kikimr_program_builder_ut.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/engine/mkql_engine_flat_host_ut.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/mvp/meta/bin/main.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/engine/mkql_proto_ut.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/pipe_tracker_ut.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_metrics_ut.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_config_ut.cpp |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/dq/runtime/ut/ydb-library-yql-dq-runtime-ut |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/repl/ut/ydb-core-blobstorage-vdisk-repl-ut |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_index/ut_async_index.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/flat_executor_ut_large.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/blobstorage_hullwritesst_ut.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_index/ut_vector_index.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hullds_sst_it_all_ut.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_counters_ut.cpp |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/plans/ydb-tests-fq-plans |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_index/ut_unique_index.cpp |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/generic/ut/ydb-core-blobstorage-vdisk-hulldb-generic-ut |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/sharding/ut/ut_sharding.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_counters_ut.cpp >> HmacSha::HmacSha1 [GOOD] |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/dml/ydb-tests-datashard-dml |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/workload/benchmark_base/state_ut.cpp |56.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/plans/objcopy_96b8686cd075e874d95d4aa5c5.o |56.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/plans/objcopy_b031a661ba244dffa03ab0c7ec.o |56.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/dml/objcopy_8fca143a218b930f297b779e3a.o |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/dml/objcopy_9314464e3560b2511ac931acd9.o |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/plans/objcopy_d0255dda539959b69d421868a2.o |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/plans/objcopy_6a5c78aa9f679a0920be5264fe.o |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/dml/objcopy_8db6616d40f8020d0632222fe3.o |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_put_ut.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_quorum_tracker_ut.cpp |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/examples/basic_example/basic_example |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/s3/range_helpers/ut/ydb-library-yql-providers-s3-range_helpers-ut |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_get_ut.cpp |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/workload/benchmark_base/ut/ydb-library-workload-benchmark_base-ut |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/examples/basic_example/main.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/examples/basic_example/basic_example.cpp |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/merge_split_common_table/fifo/objcopy_1574e8a5a6c530c7bfd6378c4d.o |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/merge_split_common_table/fifo/objcopy_504b845d57f1a23561e970de61.o |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/merge_split_common_table/fifo/objcopy_2aa1916d45dca98014edb3d732.o |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/examples/basic_example/basic_example_data.cpp |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/unistat/libmonlib-encode-unistat.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/apps/etcd_proxy/service/libapps-etcd_proxy-service.global.a |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/sqs/merge_split_common_table/fifo/functional-sqs-merge_split_common_table-fifo |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_request_reporting_ut.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_patch_ut.cpp |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/common/ut/ydb-core-blobstorage-vdisk-common-ut |57.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/plans/5a2f230528097042fdaf726fed_raw.auxcpp |56.9%| [TS] {asan, default-linux-x86_64, release} ydb/core/fq/libs/hmac/ut/unittest >> HmacSha::HmacSha1 [GOOD] |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/s3/range_helpers/path_list_reader_ut.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/s3/range_helpers/file_tree_builder_ut.cpp |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/monlib/libpy3library-python-monlib.global.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/monlib/libpy3library-python-monlib.a |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/s3_settings.{pb.h ... grpc.pb.h} |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/apps/etcd_proxy/proto/libetcd-grpc.a |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/resource_broker.{pb.h ... grpc.pb.h} |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_sequence_ut.cpp |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/query_stats.{pb.h ... grpc.pb.h} |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_value.pb.{h, cc} |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/proto/gateways_config.pb.{h, cc} |57.0%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/columnshard_impl.h_serialized.cpp |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/common/protos/snapshot.pb.{h, cc} |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/mvp/meta/bin/mvp_meta |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/engine/mkql_engine_flat_ut.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yql/tools/dqrun/lib/dqrun_lib.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/bootstrapper_ut.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_counters_aggregator_ut.cpp |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_logstore_v1.{pb.h ... grpc.pb.h} |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/alloc.{pb.h ... grpc.pb.h} |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_mirror3of4/main.cpp |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/tools/dq/service_node/service_node |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_statistics_aggregator.{pb.h ... grpc.pb.h} |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_pipe_ut.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/groupinfo/blobstorage_groupinfo_partlayout_ut.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncquorum_ut.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/groupinfo/blobstorage_groupinfo_blobmap_ut.cpp |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/base/ut/ydb-core-ymq-base-ut |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/groupinfo/blobstorage_groupinfo_iter_ut.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/groupinfo/blobstorage_groupinfo_ut.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_data_ut.cpp |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/auth.{pb.h ... grpc.pb.h} |57.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/key.pb.cc |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_resolver_ut.cpp |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/protos/common.pb.{h, cc} |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/shared_cache.{pb.h ... grpc.pb.h} |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/protos/login.pb.{h, cc} |56.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/cloud/objcopy_0b6bc206b470900b0b94249ade.o |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/table_service_config.{pb.h ... grpc.pb.h} |56.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/cloud/objcopy_c740f52ec3a04fe6a3985bed0b.o |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/sqs/cloud/ydb-tests-functional-sqs-cloud |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/statistics_workload/statistics_workload |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/contrib/libs/googleapis-common-protos/google/api/annotations.{pb.h ... grpc.pb.h} |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/nemesis/driver/nemesis |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/incrhuge/ut/ydb-core-blobstorage-incrhuge-ut |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/statistics_workload/objcopy_b4ebb94deb4cea673457b77fcc.o |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/statistics_workload/libpy3statistics_workload.global.a |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/flavours/libpy3tests-library-flavours.global.a |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/backup/impl/local_partition_reader_ut.cpp |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/backpressure/ut/ydb-core-blobstorage-backpressure-ut |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/nodewarden/bind_queue_ut.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_req_blockbs_ut.cpp |56.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tenants/objcopy_e317764e105a7e9e48b67a7b7e.o |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/resource_broker_ut.cpp |56.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tenants/objcopy_5a4a401f33f46c70417a65f584.o |56.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/nemesis/driver/objcopy_81ae81681ce2388a653cfa5ba3.o |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/tenants/ydb-tests-functional-tenants |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tenants/objcopy_86ad37399122e504f3e6d8378d.o |57.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tenants/aae788a890ddcb1702c659c8aa_raw.auxcpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/tests/tpch/commands.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/tests/tpch/cmd_drop.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/tests/tpch/main.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/tests/tpch/cmd_run_query.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/tests/tpch/cmd_prepare_scheme.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/tests/tpch/cmd_prepare.cpp |56.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tpc/large/objcopy_703c8e1d9a9a2b271b8b995a29.o |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/tests/tpch/cmd_run_bench.cpp |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tpc/large/objcopy_bac05c8b5a79735451f58d9322.o |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet/tablet_pipecache_ut.cpp |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tpc/large/objcopy_52e86d5ee8fadefdbb415ca379.o |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tpc/large/objcopy_912038ceef7de48e0e15c25307.o |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/cloud/objcopy_6e0da74b1512d0ffe19c5dc500.o |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/backpressure/queue_backpressure_server_ut.cpp |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/nemesis/library/libpy3tools-nemesis-library.global.a |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tpc/large/objcopy_2194854d9f8cbb3e0ba798b861.o |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/keyvalue/keyvalue_ut_trace.cpp |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tenants/objcopy_951c70889c9404d1662da27090.o |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/backpressure/queue_backpressure_client_ut.cpp |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/nemesis/driver/libpy3nemesis.global.a |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_base/ut_base.cpp |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/groupinfo/ut/ydb-core-blobstorage-groupinfo-ut |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/tpc/large/ydb-tests-functional-tpc-large |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_base/ut_table_pg_types.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/ut_selfheal/main.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_base/ut_info_types.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_base/ut_table_decimal_types.cpp |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/tests/integration/sessions/ydb-public-sdk-cpp-tests-integration-sessions |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_query_stats.pb.{h, cc} |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_mirror3of4/ydb-core-blobstorage-ut_mirror3of4 |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/apps/etcd_proxy/service/ut/etcd_service_ut.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_broker_ut.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_login/ut_login.cpp |56.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/cms/objcopy_9ea5b1fb7a4f8e1b0b8d7cf345.o |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_localwriter_ut.cpp |56.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/cms/objcopy_a5874452d3dbd6f6e49cd08be6.o |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_sysview_processor.{pb.h ... grpc.pb.h} |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/cms/ydb-tests-functional-cms |56.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/cms/objcopy_7f9e816a97aaeee837ac316091.o |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_base/ut_commit_redo_limit.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/storagepoolmon/ut/storagepoolmon_ut.cpp |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_background_cleaning/ydb-core-tx-schemeshard-ut_background_cleaning |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/cms/objcopy_a38b1580810a6e4b419da99dcf.o |57.0%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/manager/abstract.h_serialized.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/actors/spilling/spilling_file_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/tests/integration/sessions/main.cpp |56.8%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/read_balancer__balancing.h_serialized.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf_binding.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/ut_selfheal/self_heal_actor_ut.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_table_index.cpp |56.9%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/provider/yql_kikimr_provider.h_serialized.cpp |56.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/cms/objcopy_b9fd5c62781ec3b78d111a0ba7.o |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/ut/group_test_ut.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/minikql_compile/yql_expr_minikql_compile_ut.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/checkpointing/ut/checkpoint_coordinator_ut.cpp |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/dq/actors/spilling/ut/ydb-library-yql-dq-actors-spilling-ut |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/workload_service/ut/kqp_workload_service_actors_ut.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/blobstorage_node_warden_ut.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/tx/kqp_snapshot_isolation_ut.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/workload_service/ut/kqp_workload_service_ut.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/tx/kqp_tx_ut.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/node_warden_stat_aggr.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_restore_incremental_backup.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/tx/kqp_sink_locks_ut.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf_mon.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/node_warden_cache.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/node_warden_group_resolver.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/scrub.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/workload_service/ut/kqp_workload_service_tables_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/tx/kqp_sink_mvcc_ut.cpp |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/oltp_workload/oltp_workload |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/sysview/kqp_sys_col_ut.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/node_warden_scrub.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_db_changes.cpp |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/tests/tpch/tpch |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/tenant_ut_pool.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/subscriber_ut.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/tenant_node_enumeration_ut.cpp |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/sharding/ut/ydb-core-tx-sharding-ut |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/sysview/kqp_sys_view_ut.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_fat.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_restore.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/tenant_ut_local.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/tx/kqp_locks_ut.cpp |56.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/oltp_workload/objcopy_bcf2142e31bf537964dc063d11.o |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/oltp_workload/libpy3oltp_workload.global.a |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/tx/kqp_sink_tx_ut.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/sequencer_ut.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/external_sources/external_data_source_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/perf/kqp_workload_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_indexed_table.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_event_managers.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/tx/kqp_mvcc_ut.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/external_sources/external_source_builder_ut.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/join/kqp_join_order_ut.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/external_sources/object_storage_ut.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/external_sources/iceberg_ddl_ut.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/tx/kqp_locks_tricky_ut.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_test_functions.cpp |56.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/re2/libre2_udf.so |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/ydb/ut/ydb_ut.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/node_broker_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/perf/kqp_query_perf_ut.cpp |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/protos/unittests.pb.{h, cc} |56.8%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/udfs/common/datetime/libdatetime_udf.so |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/join/kqp_join_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console_ut_tenants.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/jaeger_tracing_configurator_ut.cpp |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_ymq_v1.{pb.h ... grpc.pb.h} |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/service/kqp_service_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/join/kqp_index_lookup_join_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/feature_flags_configurator_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/immediate_controls_configurator_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/configs_dispatcher_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/rm_service/kqp_rm_ut.cpp |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_index/ydb-core-tx-schemeshard-ut_index |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/configs_cache_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/join/kqp_flip_join_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/service/kqp_qs_scripts_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/ut_ycsb.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/storage_pool_info_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/scale_recommender_policy_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/hive_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/hive_impl_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/console_ut_configs.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/net_classifier_updater_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_sequence/ut_sequence.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_backup/ut_backup.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/service/kqp_document_api_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/log_settings_configurator_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/replica_ut.cpp |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/mkql_proto/ut/helpers/libmkql_proto-ut-helpers.a |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/quoter_performance_test/main.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tiering/ut/ut_object.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_stats/ut_stats.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/ut_sequence/datashard_ut_sequence.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/service/kqp_qs_queries_ut.cpp |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/external_sources/ut/ydb-core-external_sources-ut |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_base/ydb-core-tx-schemeshard-ut_base |56.8%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/pire/libpire_udf.so |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/object_distribution_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/cms/console/modifications_validator_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/populator_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_followers.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/sequenceproxy/sequenceproxy_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_login/ydb-core-tx-schemeshard-ut_login |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/tools/dqrun/dqrun.cpp |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/utils/actor_system/libyql-utils-actor_system.a |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/partition_stats/partition_stats_ut.cpp |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/utils/bindings/libyql-utils-bindings.a |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/ut_helpers.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__forget.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_move/ut_move.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/gateway/ut/metadata_conversion.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_ru_calculator/ut_ru_calculator.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_filestore_reboots/ut_filestore_reboots.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_external_data_source_reboots/ut_external_data_source_reboots.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_incremental_backup.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_part.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/federated_query/s3/kqp_federated_query_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__root_data_erasure_manager.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/http/ut/xml_builder_ut.cpp |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/bscontroller/ut_selfheal/ydb-core-mind-bscontroller-ut_selfheal |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/sys_view/query_stats/query_stats_ut.cpp |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/cbo/simple/libcore-cbo-simple.a |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__table_stats_histogram.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_volatile.cpp |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/persqueue_error_codes.pb.{h, cc} |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_table.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_continuous_backup.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_external_table/ut_external_table.cpp |56.8%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/test/test_import/libtest_import_udf.so |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/private_proxy.pb.{h, cc} |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_common_pq.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tiering/ut/ut_tiers.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_allocator_client/ut_helpers.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_backup_collection/ut_backup_collection.cpp |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/pinger.pb.{h, cc} |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_fs.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/table_creator/table_creator_ut.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_export_reboots_s3/ut_export_reboots_s3.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/dst_creator_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_subdomain/ut_subdomain.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/federated_query/s3/s3_recipe_ut_helpers.cpp |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/pg_ext/libessentials-core-pg_ext.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/qplayer/storage/memory/libqplayer-storage-memory.a |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/url_preprocessing/libessentials-core-url_preprocessing.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/url_lister/libessentials-core-url_lister.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/qplayer/storage/file/libqplayer-storage-file.a |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_allocator/txallocator_ut.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_compaction/ut_compaction.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/federated_query/s3/kqp_federated_scheme_ut.cpp |56.7%| [PR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/base/generated/runtime_feature_flags.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_continuous_backup/ut_continuous_backup.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_allocator_client/actor_client_ut.cpp |56.7%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/string/libstring_udf.so |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/proxy/ut_helpers.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_cdc_stream/ut_cdc_stream.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/target_discoverer_ut.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/proxy/proxy_actor_ut.cpp |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/test_connection.pb.{h, cc} |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_kesus.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_upload_rows.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_data_erasure/ut_data_erasure.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_allocator/txallocator_ut_helpers.cpp |56.7%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/debug_tools/ut/ydb-core-debug_tools-ut |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/federated_query/s3/kqp_s3_plan_ut.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/encrypted_storage_ut.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_import_scheme_query_executor.cpp |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tablet/ut/ydb-core-tablet-ut |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_import_getters.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/ycloud/impl/user_account_service_ut.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_external_table.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/ycloud/impl/access_service_ut.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_change_collector.cpp |56.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_cms.pb.{h, cc} |56.6%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/services/persqueue_cluster_discovery/cluster_ordering/ut/cluster_ordering-ut |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yql/providers/generic/actors/ut/yql_generic_lookup_actor_ut.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_minikql.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_pdisk_config.grpc.pb.cc |56.7%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/datetime2/libdatetime2_udf.so |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_split_merge_reboots/ut_split_merge_reboots.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogmem_ut.cpp |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/tools/yql_facade_run/libessentials-tools-yql_facade_run.a |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/ycloud/impl/folder_service_ut.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogmsgimpl_ut.cpp |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp_physical.{pb.h ... grpc.pb.h} |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogdsk_ut.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/ycloud/impl/service_account_service_ut.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogdata_ut.cpp |56.7%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/solomon/ydb-tests-fq-solomon |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/proxy_ext_tenant_ut.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/garbage_collection.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_import.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/codecs_ut.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tracing.grpc.pb.cc |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogmsgwriter_ut.cpp |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_impl.cpp |56.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/scheme_board/ut_subscriber/ydb-core-tx-scheme_board-ut_subscriber |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_bsv.cpp |56.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_svp_migration.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tenant_slot_broker.pb.cc |56.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/protos/index.pb.{h, cc} |56.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/ydb_result_set_old.pb.cc |56.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/scheme_board/ut_replica/ydb-core-tx-scheme_board-ut_replica |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_import__forget.cpp |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/coordinator/client/libfmr-coordinator-client.a |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_path_describer.cpp |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/coordinator/impl/libfmr-coordinator-impl.global.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/job/impl/libfmr-job-impl.a |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/dread_cache_service/ut/caching_proxy_ut.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/ut_common.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_login_large/ut_login_large.cpp |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/coordinator/interface/libfmr-coordinator-interface.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/coordinator/impl/libfmr-coordinator-impl.a |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__cancel.cpp |56.7%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/counters/columnshard.h_serialized.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_export__list.cpp >> OperationLog::Size29 [GOOD] >> OperationLog::Size8 [GOOD] >> OperationLog::Size1 [GOOD] >> OperationLog::Size1000 |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/blocks.cpp |56.7%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/storage/granule/granule.h_serialized.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/ut/kqp_mock.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/ut/functions_executor_wrapper.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_import__get.cpp |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_backup/ydb-core-tx-schemeshard-ut_backup |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/secret_masker/dummy/liblib-secret_masker-dummy.a |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_continuous_backup.cpp |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/base/generated/codegen/ydb-core-base-generated-codegen |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/yt_url_lister/libyt-lib-yt_url_lister.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/table_data_service/local/libfmr-table_data_service-local.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/yt_service/interface/libfmr-yt_service-interface.a |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_export__cancel.cpp |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/yt_service/impl/libfmr-yt_service-impl.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/gateway/fmr/libyt-gateway-fmr.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/job_factory/impl/libfmr-job_factory-impl.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/job/interface/libfmr-job-interface.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/request_options/proto_helpers/libfmr-request_options-proto_helpers.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/coordinator/interface/proto_helpers/libcoordinator-interface-proto_helpers.a >> TWeighedOrderingTest::WeighedOrderingTest [GOOD] >> TWeighedOrderingTest::SimpleSelectionTest [GOOD] >> TWeighedOrderingTest::WeighedSelectionTest [GOOD] |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_import__create.cpp |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/fmr_tool_lib/libyt-fmr-fmr_tool_lib.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/tools/ytrun/lib/libtools-ytrun-lib.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/request_options/libyt-fmr-request_options.a |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/generated/codegen/main.cpp |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/job_factory/interface/libfmr-job_factory-interface.a |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_incremental_backup/ydb-core-tx-datashard-ut_incremental_backup |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/tools/dqrun/dqrun |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/yt_service/file/libfmr-yt_service-file.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/table_data_service/interface/libfmr-table_data_service-interface.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/worker/impl/libfmr-worker-impl.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/utils/libyt-fmr-utils.a |56.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/proto/libyt-fmr-proto.a |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_index.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/channel_purpose.grpc.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx.grpc.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tenant_slot_broker.grpc.pb.cc |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/http_config.{pb.h ... grpc.pb.h} |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_scrub/ydb-core-blobstorage-ut_blobstorage-ut_scrub |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot_fat/blobstorage-ut_blobstorage-ut_blob_depot_fat |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/resourcemanager/folder.{pb.h ... grpc.pb.h} |56.7%| [TS] {asan, default-linux-x86_64, release} ydb/services/persqueue_cluster_discovery/cluster_ordering/ut/unittest >> TWeighedOrderingTest::WeighedSelectionTest [GOOD] |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/yql_translation_settings.grpc.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_kesus.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_pdisk_config.pb.h_serialized.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bootstrap.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/channel_purpose.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bootstrap.grpc.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/long_tx_service.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_counters_aggregator.grpc.pb.cc |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_sequence/ydb-core-tx-schemeshard-ut_sequence |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_pipe.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/data_integrity_trails.grpc.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/compaction.grpc.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/scheme_board/monitoring_ut.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/mon.pb.cc |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_stats/ydb-core-tx-schemeshard-ut_stats >> OperationLog::Size1000 [GOOD] >> OperationLog::ConcurrentWrites |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_datashard.grpc.pb.cc |56.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_filestore_reboots/ydb-core-tx-schemeshard-ut_filestore_reboots |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_type_operation.grpc.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/memory_stats.grpc.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_board_mon.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__fix_bad_paths.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_resource_pool.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/data_resolve.cpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_sequence.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/stream.grpc.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sys_view.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/c6e71ace71af2ca8d4b922a8b7_raw.auxcpp |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__init_schema.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sqs.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/ut_large.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/compile_service_config.grpc.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/grpc_status_proxy.grpc.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console.grpc.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/9fa27820013d46224e83d7b15c_raw.auxcpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/config.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/database_basic_sausage_metainfo.grpc.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_node_broker.grpc.pb.cc >> OperationLog::ConcurrentWrites [GOOD] |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/profiler.grpc.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/resource_broker.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/cb24ba84fc65cfce566426e17c_raw.auxcpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/statestorage.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/data_events.grpc.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_tx_allocator.grpc.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kesus.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kesus.grpc.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_tx_proxy.pb.cc |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/initializer/ut/ydb-services-metadata-initializer-ut |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/query_stats.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_blob_depot.pb.cc |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_coordination_v1.{pb.h ... grpc.pb.h} |56.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk.{pb.h ... grpc.pb.h} |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/database_basic_sausage_metainfo.pb.cc |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/24036ddeee78f44ada88a6af81_raw.auxcpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/c7c7ff07e9f9c4958a05360e68_raw.auxcpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/97c7f58baf450747c8b1fbf9ce_raw.auxcpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_blob_depot.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/grpc_status_proxy.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/feature_flags.grpc.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/db_metadata_cache.grpc.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_base.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/3dcc22393238c1e55263500793_raw.auxcpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_columnshard.grpc.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/load_test.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_sequenceshard.grpc.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_tenant.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/311b492a2703a23add4407a5db_raw.auxcpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_tx_allocator.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/ff9bac788a1f26aeea22b55b3b_raw.auxcpp |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_monitoring.pb.{h, cc} |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/external_sources.pb.cc |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_import.pb.{h, cc} |56.8%| [TS] {asan, default-linux-x86_64, release} ydb/core/debug_tools/ut/unittest >> OperationLog::ConcurrentWrites [GOOD] |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/control_plane_proxy.pb.{h, cc} |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/metadata/initializer/ut/ut_init.cpp |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/compaction.{pb.h ... grpc.pb.h} |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_solomon.cpp |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_sequence/ydb-core-tx-datashard-ut_sequence |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_base3.{pb.h ... grpc.pb.h} |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_move/ydb-core-tx-schemeshard-ut_move |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_vdisk_config.{pb.h ... grpc.pb.h} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/config.{pb.h ... grpc.pb.h} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bind_channel_storage_pool.{pb.h ... grpc.pb.h} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_base.{pb.h ... grpc.pb.h} |56.8%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tablet_flat/flat_executor_compaction_logic.h_serialized.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/health.grpc.pb.cc |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_pdisk_config.{pb.h ... grpc.pb.h} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/follower_group.{pb.h ... grpc.pb.h} |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_replication.cpp |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/engine/ut/ydb-core-engine-ut |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/bg_tasks/protos/data.pb.{h, cc} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/netclassifier.{pb.h ... grpc.pb.h} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_kv.{pb.h ... grpc.pb.h} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_log.{pb.h ... grpc.pb.h} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_issue_message.pb.{h, cc} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/aclib/protos/aclib.pb.{h, cc} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_tx.{pb.h ... grpc.pb.h} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_object_storage.pb.{h, cc} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet.{pb.h ... grpc.pb.h} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp.{pb.h ... grpc.pb.h} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/protos/login.pb.{h, cc} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/protos/accessor.pb.{h, cc} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/protos/ssa.pb.{h, cc} |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_data_erasure_reboots/ut_data_erasure_reboots.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_proxy/global.cpp |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/services/services.{pb.h ... grpc.pb.h} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/annotations/sensitive.pb.{h, cc} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/persqueue_error_codes.pb.{h, cc} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_common.pb.{h, cc} |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_proxy/upload_rows_counters.cpp |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/iam/user_account.{pb.h ... grpc.pb.h} |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/sequenceproxy/ut/ydb-core-tx-sequenceproxy-ut |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_datashard.{pb.h ... grpc.pb.h} |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogkeeper_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/tx_proxy/read_table_impl.h_serialized.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/status.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_cancel_tx.cpp |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/checkpointing/ut/ydb-core-fq-libs-checkpointing-ut |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_status_codes.pb.{h, cc} |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/tx/ydb-core-kqp-ut-tx |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/ycloud/impl/ut/ydb-library-ycloud-impl-ut |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/load_test/ut/ydb-core-load_test-ut |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/keyvalue/ut_trace/ydb-core-keyvalue-ut_trace |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/table_creator/ut/ydb-library-table_creator-ut |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/s3.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_coordination_v1.pb.cc |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/scheme_board/ut_populator/ydb-core-tx-scheme_board-ut_populator |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/audit.pb.{h, cc} |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/workload_manager_config.{pb.h ... grpc.pb.h} |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_debug_v1.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_locks.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_federation_discovery_v1.pb.cc |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_external_data_source_reboots/schemeshard-ut_external_data_source_reboots |56.8%| [BN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stability/tool/simple_queue |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_external_table/ydb-core-tx-schemeshard-ut_external_table |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/ydb/backup_ut/ydb-services-ydb-backup_ut |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/protos/dq_effects.pb.{h, cc} |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/ut/pqtablet_mock.cpp |56.8%| [LD] {BAZEL_DOWNLOAD} $(B)/tools/rescompressor/rescompressor |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/api/grpc/ydb_config_v1.grpc.pb.cc |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/proto/gateways_config.pb.{h, cc} |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_cms_v1.pb.cc |56.8%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/api/grpc/ydb_config_v1.{pb.h ... grpc.pb.h} |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bind_channel_storage_pool.grpc.pb.cc |56.8%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/operation_queue_timer.h_serialized.cpp |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/executer_actor/ut/ydb-core-kqp-executer_actor-ut |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/backup.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bind_channel_storage_pool.pb.cc |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_followers/ydb-core-tx-datashard-ut_followers |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_schemeshard.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/netclassifier.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sys_view.grpc.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/57b7f33361e7645770379d4a7a_raw.auxcpp |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/tests/unit/client/params/ydb-public-sdk-cpp-tests-unit-client-params |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/51f3c6b31da276aa5ff3b1d532_raw.auxcpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/cms.grpc.pb.cc |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/50bf539daffd4b04cbbd397d0d_raw.auxcpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/ut/ut_traverse_columnshard.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk_color.grpc.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_config.grpc.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/query.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_scripting_v1.grpc.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/ut/partition_writer_cache_actor_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/whiteboard_flags.grpc.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/tests/unit/client/params/params_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/ut/ut_traverse_datashard.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_proxy.pb.cc |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/pqconfig.pb.cc |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/persqueue_ut.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/ut/ut_analyze_columnshard.cpp |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/proto/sink.pb.{h, cc} |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/ut/demo_tx.cpp |56.8%| [BN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stability/tool/nemesis |56.8%| [LD] {BAZEL_DOWNLOAD} $(B)/contrib/tools/protoc/plugins/grpc_cpp/grpc_cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_allocator/txallocator__reserve.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/backup_ut/ydb_backup_ut.cpp |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__data_erasure_manager.cpp |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme/protos/pathid.{pb.h ... grpc.pb.h} |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/storage_get.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/schemereq.cpp |56.9%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/reader/common/description.h_serialized.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/storage_get_block.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/aggregator/ut/ut_analyze_datashard.cpp |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_backup_collection/ydb-core-tx-schemeshard-ut_backup_collection |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ydb_convert/compression_ut.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/rate_limiter/rate_limiter_ut.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_allocator/txallocator.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_allocator_client/actor_client.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/dc1d75c607398802ca53bc60f3_raw.auxcpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_consistent_copy_tables.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/first_class_src_ids_ut.cpp |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_pipe.{pb.h ... grpc.pb.h} |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_pdisk_config.{pb.h ... grpc.pb.h} |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ydb_convert/ydb_convert_ut.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/persqueue_compat_ut.cpp |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/common.pb.{h, cc} |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_tx_proxy.{pb.h ... grpc.pb.h} |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yaml_config/validator/ut/validator_checks/yaml_config-validator-ut-validator_checks |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_vdisk_config.{pb.h ... grpc.pb.h} |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yaml_config/validator/ut/validator_checks/validator_checks_ut.cpp |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/storage.pb.{h, cc} |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/ut/rate_limiter_test_setup.cpp |56.9%| [PR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/expr_nodes/dq_expr_nodes.{gen.h ... defs.inl.h} |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_cms.{pb.h ... grpc.pb.h} |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/dsproxy/ut/ydb-core-blobstorage-dsproxy-ut |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_auth.pb.{h, cc} |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bind_channel_storage_pool.{pb.h ... grpc.pb.h} |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/ut/topic_service_ut.cpp |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blockstore_config.{pb.h ... grpc.pb.h} |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/rate_limiter/ut/ydb-services-rate_limiter-ut |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/synclog/ut/ydb-core-blobstorage-vdisk-synclog-ut |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_allocator_client/ut/ydb-core-tx-tx_allocator_client-ut |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/persqueue_v1/ut/ydb-services-persqueue_v1-ut |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/storage_put.cpp |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/apps/etcd_proxy/service/ut/ydb-apps-etcd_proxy-service-ut |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/subdomains.pb.cc |56.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/minikql_engine.pb.cc |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/sysview/ydb-core-kqp-ut-sysview |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/change_exchange.{pb.h ... grpc.pb.h} |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/generic/actors/ut/ydb-library-yql-providers-generic-actors-ut |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/topic_yql_ut.cpp |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_config.{pb.h ... grpc.pb.h} |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_keyvalue.{pb.h ... grpc.pb.h} |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/http/ut/ydb-core-ymq-http-ut |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/persqueue_common_ut.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/ut/partition_writer_cache_actor_fixture.cpp |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_cms_v1.{pb.h ... grpc.pb.h} |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_continuous_backup/ydb-core-tx-schemeshard-ut_continuous_backup |56.9%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/changes/abstract/abstract.h_serialized.cpp |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/nc_private/iam/token.{pb.h ... grpc.pb.h} |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_unsafe.cpp |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/test_shard.{pb.h ... grpc.pb.h} |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/examples/secondary_index/main.cpp |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream/ydb-core-tx-schemeshard-ut_cdc_stream |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/examples/secondary_index/secondary_index_create.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_info_types.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/examples/secondary_index/secondary_index_drop.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/examples/secondary_index/secondary_index.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/examples/secondary_index/secondary_index_update.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/examples/secondary_index/secondary_index_list.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/examples/secondary_index/secondary_index_generate.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/examples/secondary_index/secondary_index_delete.cpp |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tiering/ut/ydb-core-tx-tiering-ut |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_ru_calculator/ydb-core-tx-schemeshard-ut_ru_calculator |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_subdomain/ydb-core-tx-schemeshard-ut_subdomain |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/field_transformation.pb.{h, cc} |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_volatile/ydb-core-tx-datashard-ut_volatile |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/examples/secondary_index/secondary_index |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/controller/ut_dst_creator/ydb-core-tx-replication-controller-ut_dst_creator |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_export_reboots_s3/ydb-core-tx-schemeshard-ut_export_reboots_s3 |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/public_http/grpc_request_context_wrapper.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_columnshard.pb.cc |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kesus.{pb.h ... grpc.pb.h} |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_compaction/ydb-core-tx-schemeshard-ut_compaction |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/minikql_engine.{pb.h ... grpc.pb.h} |56.9%| [LD] {BAZEL_DOWNLOAD} $(B)/contrib/tools/protoc/plugins/cpp_styleguide/cpp_styleguide |56.9%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/operation.h_serialized.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__notify.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/viewer.cpp |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_proxy/ut_encrypted_storage/ydb-core-tx-tx_proxy-ut_encrypted_storage |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/backup/impl/ut_local_partition_reader/ydb-core-backup-impl-ut_local_partition_reader |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/core/misc/proto/error.pb.{h, cc} |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ext_index/ut/ut_ext_index.cpp |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/protos/actors.pb.{h, cc} |56.9%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/validation/validation |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/db_metadata_cache.{pb.h ... grpc.pb.h} |56.9%| [PB] {default-linux-x86_64, release, asan} $(B)/ydb/public/api/grpc/ydb_config_v1.{pb.h ... grpc.pb.h} |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_object_storage_v1.{pb.h ... grpc.pb.h} |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/client/node_tracker_client/proto/node.pb.{h, cc} |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_apply_build_index.cpp |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/iam/user_account_service.{pb.h ... grpc.pb.h} |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/ut/ydb-core-kqp-gateway-ut |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/upload_rows.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tiering/fetcher.cpp |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/maintenance.{pb.h ... grpc.pb.h} |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_import_v1.{pb.h ... grpc.pb.h} |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/mon.{pb.h ... grpc.pb.h} |56.9%| [PB] {tool} $(B)/ydb/public/api/protos/ydb_config.pb.{h, cc} |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/data_integrity_trails.{pb.h ... grpc.pb.h} |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme/protos/type_info.{pb.h ... grpc.pb.h} |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/feature_flags.{pb.h ... grpc.pb.h} |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/contrib/libs/googleapis-common-protos/google/rpc/status.{pb.h ... grpc.pb.h} |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_database.{pb.h ... grpc.pb.h} |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/proto/file_storage.pb.{h, cc} |56.9%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/actor/fifo_cleanup.h_serialized.cpp |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_federation_discovery_v1.{pb.h ... grpc.pb.h} |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/load_test/ut_ycsb/ydb-core-load_test-ut_ycsb |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_data_erasure/ydb-core-tx-schemeshard-ut_data_erasure |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/profiler.{pb.h ... grpc.pb.h} |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/statestorage.{pb.h ... grpc.pb.h} |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/workload_manager_config.{pb.h ... grpc.pb.h} |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/agent.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/ydb_convert/table_description_ut.cpp |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_change_collector/ydb-core-tx-datashard-ut_change_collector |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/auth/owners.cpp |56.9%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/datashard_active_transaction.h_serialized.cpp |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/core/misc/proto/guid.pb.{h, cc} |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/proxy_impl.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/federated_topic/ut/basic_usage_ut.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/workload/tpch/ut/queries_ut.cpp |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/workload_service/ut/ydb-core-kqp-workload_service-ut |56.9%| [EN] {BAZEL_DOWNLOAD} $(S)/ydb/library/workload/kv/kv.h |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/tests/unit/client/discovery_mutator/sdk-cpp-tests-unit-client-discovery_mutator |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_allocator/ut/ydb-core-tx-tx_allocator-ut |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/nodewarden/ut/ydb-core-blobstorage-nodewarden-ut |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/proto/events.pb.{h, cc} |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__get.cpp |56.9%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/actor/events.h_serialized.cpp |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/private_api.pb.{h, cc} |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/ut/ydb-core-mind-ut |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/common/common.cpp |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bootstrapper.{pb.h ... grpc.pb.h} |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_effective_acl.cpp |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/idx_test/idx_test |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/viewer_topic_data.cpp |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/perf/ydb-core-kqp-ut-perf |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/tests/unit/client/discovery_mutator/discovery_mutator_ut.cpp |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_split_merge_reboots/ydb-core-tx-schemeshard-ut_split_merge_reboots |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/resource_manager.pb.{h, cc} |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/hive/ut/ydb-core-mind-hive-ut |56.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/dwarf_backtrace/registry/libcpp-dwarf_backtrace-registry.global.a |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/workload/tpch/ut/ydb-library-workload-tpch-ut |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_login_large/ydb-core-tx-schemeshard-ut_login_large |56.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/idx_test/objcopy_2073c82ff4f331dc0428c98194.o |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/tools/idx_test/main.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/maintenance.grpc.pb.cc |56.9%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tools/solomon_emulator_grpc/solomon_recipe_grpc |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/controller/ut_target_discoverer/replication-controller-ut_target_discoverer |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_upload_rows/ydb-core-tx-datashard-ut_upload_rows |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/idx_test/ut/idx_test_data_provider_ut.cpp |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/service/ydb-core-kqp-ut-service |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/ingress/blobstorage_ingress_matrix_ut.cpp |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/scheme_board/ut_monitoring/ydb-core-tx-scheme_board-ut_monitoring |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/ingress/blobstorage_ingress_ut.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_database.grpc.pb.cc |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/ydb_table_impl.grpc.pb.cc |56.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/proto/source.pb.{h, cc} |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/import.grpc.pb.cc |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tenant_pool.grpc.pb.cc |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/config_shards/update.cpp |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_identificators.cpp |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/rm_service/ut/ydb-core-kqp-rm_service-ut |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/join/ydb-core-kqp-ut-join |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/syncer/ut/ydb-core-blobstorage-vdisk-syncer-ut |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/ext_index/ut/ydb-services-ext_index-ut |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/statistics.grpc.pb.cc |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/local.grpc.pb.cc |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/labeled_counters.grpc.pb.cc |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yaml_config/validator/ut/validator/ydb-library-yaml_config-validator-ut-validator |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp_physical.grpc.pb.cc |56.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yaml_config/validator/ut/validator/validator_ut.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/load_test.grpc.pb.cc |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/cms.pb.cc |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_minikql/ydb-core-tx-datashard-ut_minikql |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/replication.grpc.pb.cc |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_health.pb.cc |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_board.pb.cc |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/98fc1dffd4f8628627620dee22_raw.auxcpp |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/hive.{pb.h ... grpc.pb.h} |57.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_pq.pb.cc |57.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/9c4c70f2d51847454f8f89aa01_raw.auxcpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_base.grpc.pb.cc |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_testshard.grpc.pb.cc |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp_stats.pb.cc |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/grpc.pb.cc |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_whiteboard.pb.cc |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/iam/iam_token_service.{pb.h ... grpc.pb.h} |57.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/e2b4a11a7ecdc1f7d8ac73e2ad_raw.auxcpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_backup_collection.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_pdisk_config.pb.cc |57.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/import.pb.cc |57.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/postgres_integrations/go-libpq/d78d0f74a3f72be1016c0cf8cf_raw.auxcpp |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/postgres_integrations/go-libpq/objcopy_4352b8b3e3cf61532c865b371b.o |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/postgres_integrations/go-libpq/objcopy_3ddbad334a37a829b3772ddb05.o |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_hive.grpc.pb.cc |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/lib/idx_test/ut/ydb-public-lib-idx_test-ut |56.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/postgres_integrations/go-libpq/objcopy_95b3eecc97c453f0c55c456659.o |57.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_sysview_processor.pb.cc |57.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_keyvalue.pb.cc |57.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/flat_tx_scheme.pb.cc |57.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/data_events.pb.cc |57.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_coordinator.grpc.pb.cc |57.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/b80936acda5492be599ad9f6b6_raw.auxcpp |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/postgres_integrations/go-libpq/ydb-tests-postgres_integrations-go-libpq |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/s3_scan.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_bs_controller.grpc.pb.cc |57.0%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blob_depot/schema.h_serialized.cpp |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/checkpoint_coordinator.pb.{h, cc} |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_proxy/ut_ext_tenant/ydb-core-tx-tx_proxy-ut_ext_tenant |57.0%| [LD] {BAZEL_DOWNLOAD} $(B)/tools/cpp_style_checker/cpp_style_checker |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/pgproxy/pg_proxy_ut.cpp |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/contrib/libs/opentelemetry-proto/opentelemetry/proto/common/v1/common.{pb.h ... grpc.pb.h} |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/metrics.{pb.h ... grpc.pb.h} |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blob_depot_config.{pb.h ... grpc.pb.h} |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk.{pb.h ... grpc.pb.h} |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/src/client/federated_topic/ut/ydb-public-sdk-cpp-src-client-federated_topic-ut |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_config.{pb.h ... grpc.pb.h} |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/s3_settings.{pb.h ... grpc.pb.h} |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/subdomains.{pb.h ... grpc.pb.h} |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_scheme.{pb.h ... grpc.pb.h} |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/whiteboard_flags.{pb.h ... grpc.pb.h} |57.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/protos/services_common.pb.{h, cc} |56.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp_physical.{pb.h ... grpc.pb.h} |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/tools/combiner_perf/bin/main.cpp |56.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/proto/dq_transport.pb.{h, cc} |56.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_coordination.pb.{h, cc} |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/viewer/wb_merge.cpp |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf_console.cpp |56.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/testing.cpp |56.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_query_stats.pb.{h, cc} |56.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf_fsm.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_keyvalue_v1.grpc.pb.cc |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/cancelable_context.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/codicil_guarded_invoker.cpp |56.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/tools/combiner_perf/libkqp-tools-combiner_perf.a |56.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/yt/yt/core/libyt-yt-core.a |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/invoker_pool.cpp |56.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_board.{pb.h ... grpc.pb.h} |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/local_bypass.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/tree_visitor.cpp |56.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage.{pb.h ... grpc.pb.h} |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__describe_scheme.cpp |56.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/tools/combiner_perf/bin/combiner_perf |56.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf_dynamic.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/dq/actors/grouped_issues_ut.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/tree_builder.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/service_discovery/service_discovery.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/dq/actors/actors_ut.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ypath/tokenizer.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/node.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ypath_client.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/interned_attributes.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/node_detail.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/backoff_strategy.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/retrying_periodic_executor.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/request_complexity_limiter.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/permission.cpp |56.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/ingress/ut/ydb-core-blobstorage-vdisk-ingress-ut |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/action_queue.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/current_invoker.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/protobuf_helpers.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/future.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/cancelation_token.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/public.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/invoker_util.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/invoker_detail.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/virtual.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/ssl_helpers.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/client.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/dispatcher.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/packet.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/brotli.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/dispatcher_impl.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/ssl_context.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/bzip2.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/config.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/public.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/server.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/zlib.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/lz.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/notify_manager.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/connection.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_stream_pipe.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/dictionary_codec.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/stream.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_barrier.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/codec.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/lzma.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/snappy.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_semaphore.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/coroutine.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/zstd.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_looper.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_rw_lock.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/delayed_executor.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_share_invoker_queue.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/lease_manager.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/config.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_stream.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_share_invoker_pool.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fiber.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/execution_stack.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_share_queue_scheduler_thread.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_share_action_queue.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/invoker_alarm.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_share_thread_pool.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fls.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_bsvolume/ut_bsvolume.cpp >> common.cpp::clang_format [GOOD] >> common.h::clang_format [GOOD] |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fiber_manager.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_throttler.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/nonblocking_batcher.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/scheduler_thread.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/pollable_detail.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fiber_scheduler_thread.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/invoker_queue.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/log_writer_detail.cpp |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/tools/local_ydb/local_ydb |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/proc.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/profiling_helpers.cpp |57.0%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/federated_query/common/clang_format >> common.h::clang_format [GOOD] |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/periodic_executor.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/thread_affinity.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/periodic_yielder.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/quantized_executor.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/scheduled_executor.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/thread_pool.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/throughput_throttler.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/propagating_storage.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/crypto/config.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/system_invokers.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/single_queue_scheduler_thread.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/dns/dns_resolver.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/suspendable_action_queue.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/two_level_fair_share_thread_pool.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/crypto/tls.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/thread_pool_detail.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/json/helpers.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/json/json_parser.cpp |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/cms/console/ut/ydb-core-cms-console-ut |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/thread_pool_poller.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/formatter.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/dns/config.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/logger_owner.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/json/json_callbacks.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/crypto/crypto.cpp |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tools/tsserver/tsserver |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/json/json_writer.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/json/config.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/dns/ares_dns_resolver.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/system_log_event_provider.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/fluent_log.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/file_log_writer.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/compression.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/pool_allocator.cpp |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_data_erasure_reboots/ydb-core-tx-schemeshard-ut_data_erasure_reboots |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/adjusted_exponential_moving_average.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/arithmetic_formula.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/random_access_gzip.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/stream_log_writer.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tools/tsserver/main.cpp |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/canonical/objcopy_461999da7ba13deab5689c18ec.o |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/canonical/objcopy_17cef60c2dd0eb7ea46181ba87.o |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/stream_output.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/zstd_compression.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/serializable_logger.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/configurable_singleton_def.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/cache_config.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/codicil.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/checksum.cpp |57.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/serializable/ydb-tests-functional-serializable |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/bit_packed_unsigned_vector.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/bitmap.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/bit_packing.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/blob_output.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/bloom_filter.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/digest.cpp |57.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/canonical/objcopy_065e9244d685c2b8f0ab66e414.o |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/parser_helpers.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/hazard_ptr.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/hedging_manager.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/config.cpp |57.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__tenant_data_erasure_manager.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/error.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/public.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/coro_pipe.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/config.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/fair_share_hierarchical_queue.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/ref_counted_tracker.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/linear_probe.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/histogram.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/pattern_formatter.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/shutdown.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/id_generator.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/system_attribute_provider.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/random.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/authentication_identity.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/fs.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/serialize.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/throttling_channel.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/ref_counted_tracker_profiler.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/process_exit_profiler.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__state_changed_reply.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/relaxed_mpsc_queue.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/connection.cpp |57.1%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/transactions/tx_controller.h_serialized.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/ref_counted_tracker_statistics_producer.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/address.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/serialize_dump.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/listener.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/statistic_path.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/zerocopy_output_writer.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/socket.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/utf8_decoder.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/slab_allocator.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/profiling/timing.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/helpers.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/statistics.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/context.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/public.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/authenticator.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/dialer.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/local_address.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/config.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/type_def.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/type_registry.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/load.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/descriptors.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/message.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/schemas.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/yson_struct_update.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/balancing_channel.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/bus/channel.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/message_format.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/client.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/yson_struct_detail.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/stream.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/local_server.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/protocol_version.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/caching_channel_factory.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/dispatcher.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/channel_detail.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/hedging_channel.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/serialized_channel.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/bus/server.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/retrying_channel.cpp |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/protobuf/dynamic_prototype/libcpp-protobuf-dynamic_prototype.a |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/static_channel_factory.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/null_channel.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/config.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/per_key_request_queue_provider.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/local_channel.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/tracing/public.cpp |57.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/pgproxy/ut/ydb-core-pgproxy-ut |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/response_keeper.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/request_queue_provider.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/viable_peer_registry.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/dynamic_channel_pool.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/helpers.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/public.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/peer_discovery.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut_pg/flat_database_pg_ut.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/service.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ypath/token.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/roaming_channel.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/static_service_dispatcher.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ypath/stack.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/string.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/tracing/allocation_tags.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/threading/spin_wait_slow_path_logger.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ypath/helpers.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/server_detail.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/threading/thread.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/utilex/random.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/forwarding_consumer.cpp |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/protobuf/yql/libcpp-protobuf-yql.a |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ypath_resolver.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/yson_struct.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/attribute_consumer.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/async_consumer.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_lock.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/coro_tx.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ypath_service.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/parser.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/string_builder_stream.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/attributes_stripper.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/service_detail.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/null_consumer.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/stream.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/depth_limiting_yson_consumer.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/async_writer.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/snap_vec_ut.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/protobuf_interop.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/consumer.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/config.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/lexer.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/list_verb_lazy_yson_consumer.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/string_merger.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/tokenizer.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ypath_detail.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/pull_parser_deserialize.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/protobuf_interop_unknown_fields.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/producer.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/protobuf_interop_options.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/attribute_consumer.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/size.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytalloc/statistics_producer.cpp |57.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/public/tools/local_ydb/objcopy_8d2ea3c78a255bb4c87c2fc54a.o |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/tools/local_ydb/libpy3local_ydb.global.a |57.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/serializable/objcopy_51b071d7746089933668451b33.o |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/token_writer.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/string_filter.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/token.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/syntax_checker.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/ypath_filtering_consumer.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/pull_parser.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/exception_helpers.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/yson_builder.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/writer.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/ypath_designated_consumer.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytalloc/config.cpp |57.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yaml_config/validator/ut/validator_checks/validator_checks_ut.cpp |57.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/serializable/objcopy_445797246443360525d31550d1.o |57.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/canonical/ydb-tests-functional-canonical |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytalloc/bindings.cpp |57.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/serializable/objcopy_3ea8aa67e7c24c4f0e3b0406b9.o |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ephemeral_attribute_owner.cpp |57.2%| [CC] {tool} $(B)/ydb/public/api/protos/ydb_config.pb.cc |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/attributes.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/serialize.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/convert.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/request_complexity_limits.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/resource_broker.grpc.pb.cc |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf_validate.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/attribute_filter.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ephemeral_node_factory.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/external_sources.grpc.pb.cc |57.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/pdiskfit.grpc.pb.cc |57.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/operation_id/protos/operation_id.pb.{h, cc} |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/helpers.cpp |57.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/compute/common/ut/objcopy_caf222d14387d4810b5cb3e853.o |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/compute/common/ut/config_ut.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/service_combiner.cpp |57.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx.{pb.h ... grpc.pb.h} |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_xxport__helpers.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_monitoring_v1.pb.cc |57.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_proxy.grpc.pb.cc |57.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_auth_v1__intpy3___pb2_grpc.py.gcum.yapyc3 |57.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/metrics.pb.cc |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/compute/common/ut/utils_ut.cpp |57.2%| [PR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/expr_nodes/yql_expr_nodes.{gen.h ... defs.inl.h} |57.2%| [PR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/include/llvm/IR/Attributes.inc{, .d} |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_external_table.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf_invoke_state_storage.cpp |57.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/pending_fetcher.pb.{h, cc} |57.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_datastreams_v1.{pb.h ... grpc.pb.h} |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/storage_status.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_allocator/txallocator_impl.cpp |57.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_backup.pb.{h, cc} |57.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/serverless_proxy_config.{pb.h ... grpc.pb.h} |57.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/protos/cursor.pb.{h, cc} |57.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/activation.pb.{h, cc} |57.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/nc_private/accessservice/access_service.{pb.h ... grpc.pb.h} |57.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_columnshard.{pb.h ... grpc.pb.h} |57.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/partition_stats/ut/ydb-core-sys_view-partition_stats-ut |57.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/proto/dq_solomon_shard.pb.{h, cc} |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/op_commit_blob_seq.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__background_cleaning.cpp |57.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/split_merge/objcopy_5accfe00d45fb7ebcc30e116b2.o |57.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/split_merge/objcopy_93665db601a12d4842de4565e2.o |57.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/split_merge/objcopy_b783a1a2aacb855daa1e55fad6.o |57.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/with_quotas/objcopy_245adf3e28f56e6467e034d9f2.o |57.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/with_quotas/objcopy_31d605682329607481eb568ed0.o |57.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/with_quotas/objcopy_7648c2519d02b8456f762efc4b.o |57.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_schemeshard.{pb.h ... grpc.pb.h} |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__find_subdomain_path_id.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/whiteboard_disk_states.pb.cc |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/s3.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__table_stats.cpp |57.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/viewer/protos/viewer.pb.{h, cc} |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_fs.cpp |57.3%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/protos/common.pb.{h, cc} |57.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage.pb.cc |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/read_table_impl.cpp |57.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/script_execution/objcopy_1aeeb50f676472f975830c135d.o |57.2%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/issue/protos/issue_message.pb.{h, cc} |57.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/script_execution/objcopy_bcbbd2d8f2367d5f3ed5199234.o |57.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_bs_controller.{pb.h ... grpc.pb.h} |57.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/script_execution/objcopy_f05ead59375a9db120b95dd730.o |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/read.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_subdomain.cpp |57.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/client/minikql_compile/ut/ydb-core-client-minikql_compile-ut |57.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kesus/tablet/quoter_performance_test/quoter_performance_test |57.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/split_merge/ydb-tests-datashard-split_merge |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_tables.cpp |57.3%| [PB] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/core/misc/proto/protobuf_helpers.pb.{h, cc} |57.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/script_execution/ydb-tests-functional-script_execution |57.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/proxy_service/proto/result_set_meta.pb.{h, cc} |57.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_load.{pb.h ... grpc.pb.h} |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/runtime/kqp_scan_spilling_ut.cpp |57.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/sqs/with_quotas/ydb-tests-functional-sqs-with_quotas |57.2%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/blobstorage_distributed_config.{pb.h ... grpc.pb.h} |57.2%| [EN] {BAZEL_DOWNLOAD} $(S)/ydb/library/workload/stock/stock.h |57.2%| [PB] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/core/rpc/proto/rpc.pb.{h, cc} |57.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/protos/actors.pb.{h, cc} |57.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/channel_purpose.{pb.h ... grpc.pb.h} |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_path.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_view.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/upload_rows_common_impl.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf_persistent_storage.cpp |57.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_counters.{pb.h ... grpc.pb.h} |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/abstract/converter.cpp |57.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/ydb/ut/ydb-core-fq-libs-ydb-ut |57.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_kesus.{pb.h ... grpc.pb.h} |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/mvp/oidc_proxy/openid_connect.cpp |57.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/annotations/sensitive.pb.{h, cc} |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/etcd_proxy/main.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/runtime/kqp_scan_logging_ut.cpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/examples/topic_reader/transaction/application.cpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/examples/topic_reader/transaction/options.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/examples/topic_reader/transaction/main.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/etcd_proxy/proxy.cpp |57.2%| [PB] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/core/tracing/proto/tracing_ext.pb.{h, cc} |57.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/checkpoint_storage/proto/graph_description.pb.{h, cc} |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/mvp/oidc_proxy/libydb-mvp-oidc_proxy.a |57.2%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/portions/portion_info.h_serialized.cpp |57.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/dq/actors/ut/ydb-library-yql-providers-dq-actors-ut |57.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/dsproxy/ut_strategy/ydb-core-blobstorage-dsproxy-ut_strategy |57.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/storagepoolmon/ut/ydb-core-blobstorage-storagepoolmon-ut |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/tools/yqlrun/lib/libtools-yqlrun-lib.a |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/json_handlers_vdisk.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yql/tools/yqlrun/yqlrun.cpp |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/tools/yqlrun/http/libtools-yqlrun-http.a |57.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/query_stats/ut/ydb-core-sys_view-query_stats-ut |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/bg_tasks/adapter/adapter.cpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/ut/ut_kafka_functions.cpp |57.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/config/tools/protobuf_plugin/config_proto_plugin |57.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_datashard.pb.cc |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/ut/kafka_test_client.cpp |57.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/sequenceshard/public/ut/ydb-core-tx-sequenceshard-public-ut |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/ut/ut_serialization.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/common/aba998449c2518e3272d8e87fb_raw.auxcpp |57.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/common/objcopy_b34c6a8a5501db208eebc5d8e4.o |57.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/common/objcopy_e32003454342267c2263935765.o |57.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/common/objcopy_9a3dabea847c21e0b4fa4cda26.o |57.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/common/objcopy_cca8dcd66462c9ca3c57fcb78e.o |57.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bootstrapper.pb.cc |57.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/query_stats.grpc.pb.cc |57.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/common/ydb-tests-fq-common |57.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/a43a1b049def7b52107bfe6841_raw.auxcpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_bsv.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_export__get.cpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/flat_tx_scheme.grpc.pb.cc |57.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/statestorage.grpc.pb.cc |57.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/labeled_counters.pb.cc |57.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/mem_alloc/objcopy_15e284a8ecb30c90903e842e70.o |57.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/mem_alloc/objcopy_12d01741952bd4afa836364d84.o |57.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tablet_flat/ut_pg/ydb-core-tablet_flat-ut_pg |57.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/mem_alloc/objcopy_cee1e02beaf827051149b5ca30.o |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_appendix_ut.cpp |57.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yaml_config/validator/ut/validator/validator_ut.cpp |57.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/resourcemanager/transitional/folder_service.{pb.h ... grpc.pb.h} |57.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/table_stats.grpc.pb.cc |57.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/common/protos/blob_range.pb.{h, cc} |57.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/netclassifier.grpc.pb.cc |57.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/feature_flags.pb.cc |57.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_mediator.grpc.pb.cc |57.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_hive.pb.cc |57.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/6d8f4fd29ff9c4a1004f1b19e5_raw.auxcpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/8750dadd4a699d4221d697bf03_raw.auxcpp |57.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/examples/topic_reader/transaction/read_from_topic_in_transaction |57.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_datashard.grpc.pb.cc |57.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_pq.grpc.pb.cc |57.3%| [PB] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/core/ytree/proto/attributes.pb.{h, cc} |57.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/protos/interconnect.pb.{h, cc} |57.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/index_builder.pb.cc |57.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/base.{pb.h ... grpc.pb.h} |57.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/aacd711c16330b321c41dc634a_raw.auxcpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/mvp/oidc_proxy/oidc_proxy_ut.cpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_cansel_build_index.cpp |57.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/protos/dq_status_codes.pb.{h, cc} |57.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/yql/tools/yqlrun/yqlrun |57.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/grpc.{pb.h ... grpc.pb.h} |57.3%| [BN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stability/tool/olap_workload |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/ut_strategy/strategy_ut.cpp |57.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yaml_config/static_validator/ut/example_configs/static_validator-ut-example_configs |57.3%| [LD] {BAZEL_DOWNLOAD} $(B)/tools/rescompiler/rescompiler |57.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/fresh/ut/ydb-core-blobstorage-vdisk-hulldb-fresh-ut |57.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/federated_query/s3/ydb-core-kqp-ut-federated_query-s3 |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yaml_config/static_validator/ut/example_configs/test.cpp |57.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/pqconfig.{pb.h ... grpc.pb.h} |57.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/protos/events.pb.{h, cc} |57.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/mem_alloc/ydb-tests-fq-mem_alloc |57.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_topic_v1.pb.cc |57.3%| [PR] {default-linux-x86_64, release, asan} $(B)/ydb/public/api/protos/00ccceedc2861088d9671c050e_raw.auxcpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/ydb_result_set_old.grpc.pb.cc |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_segment_ut.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_data_ut.cpp |57.3%| [PR] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/d52903a0693870f83d0bbe0ab8_raw.auxcpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/closed_interval_set_ut.cpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tiering/common.cpp |57.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp.{pb.h ... grpc.pb.h} |57.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_cms_v1__intpy3___pb2_grpc.py.gcum.yapyc3 |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/json_pipe_req.cpp |57.3%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/blobstorage_distributed_config.{pb.h ... grpc.pb.h} |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/examples/topic_reader/simple/main.cpp |57.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_locks/ydb-core-tx-datashard-ut_locks |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/arrow/kqp_types_arrow_ut.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/dsproxy/ut_ftol/dsproxy_fault_tolerance_ut.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/config/init/init_ut.cpp |57.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/ut_large/ydb-core-sys_view-ut_large |57.3%| [PB] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/blobstorage_distributed_config.{pb.h ... grpc.pb.h} |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/mvp/oidc_proxy/bin/main.cpp |57.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/mvp/oidc_proxy/ut/ydb-mvp-oidc_proxy-ut |57.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/apps/etcd_proxy/proto/auth.{pb.h ... grpc.pb.h} |57.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/config/init/ut/ydb-core-config-init-ut |57.3%| [PR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/result/expr_nodes/yql_res_expr_nodes.{gen.h ... defs.inl.h} |57.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/auth.{pb.h ... grpc.pb.h} |57.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/nc_private/iam/token_exchange_service.{pb.h ... grpc.pb.h} |57.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_counters.pb.cc |57.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blob_depot/ut/ydb-core-blob_depot-ut |57.3%| [PB] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/core/misc/proto/hyperloglog.pb.{h, cc} |57.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/233de20735e53f1ba88dbd8fb0_raw.auxcpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/data_trash.cpp |57.3%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/actor/queue_schema.h_serialized.cpp |57.3%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/api/protos/ydb_config.pb.{h, cc} |57.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/activation.pb.{h, cc} |57.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/examples/topic_reader/simple/simple_persqueue_reader |57.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/whiteboard_disk_states.{pb.h ... grpc.pb.h} |57.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ymq.pb.{h, cc} |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/format_handler_ut.cpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/topic_filter_ut.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_cdc_stream.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/arrow/kqp_arrow_in_channels_ut.cpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/topic_parser_ut.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__backup_collection_common.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/given_id_range_ut.cpp |57.3%| [PB] {default-linux-x86_64, release, asan} $(B)/ydb/public/api/protos/ydb_config.pb.{h, cc} |57.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/mvp/oidc_proxy/bin/mvp_oidc_proxy |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/snapshotreq.cpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/export.grpc.pb.cc |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/data_decommit.cpp |57.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/resourcemanager/folder_service.{pb.h ... grpc.pb.h} |57.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/dread_cache_service/ut/ydb-core-persqueue-dread_cache_service-ut |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/json_handlers_operation.cpp |57.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kesus/proxy/ut/ydb-core-kesus-proxy-ut |57.3%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/executer_actor/kqp_executer.h_serialized.cpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/test/tool/surg/main.cpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/security/certificate_check/cert_utils_ut.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_export.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__progress.cpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/security/certificate_check/cert_check_ut.cpp |57.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/restarts/objcopy_0359848ae21601186c5b0d9873.o |57.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/restarts/objcopy_277b7e8f79021687bec95be8db.o |57.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/restarts/objcopy_afdf6d60c4f76ae91a235d460b.o |57.3%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/log/proto/logger_config.pb.{h, cc} |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_extsubdomain.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_replication.cpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/data_integrity_trails.pb.cc |57.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ydb_convert/ut/ydb-core-ydb_convert-ut |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_assign_bsv.cpp |57.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/common/protos/snapshot.pb.{h, cc} |57.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/proto/dq_state_load_plan.pb.{h, cc} |57.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/backup.grpc.pb.cc |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/viewer/wb_aggregate.cpp |57.3%| [PB] {tool} $(B)/ydb/core/protos/blobstorage_distributed_config.{pb.h ... grpc.pb.h} |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/column_families/schema.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/request.cpp |57.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/multi_plane/objcopy_b8aa61f402be805d2e3e9e75a2.o |57.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/multi_plane/objcopy_d23500649301df2a8de48ba70d.o |57.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/multi_plane/objcopy_c65a9d5efe13dc05c1466090ba.o |57.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/filestore_config.{pb.h ... grpc.pb.h} |57.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/6beed6347509e595b05df0ee05_raw.auxcpp |57.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/restarts/ydb-tests-functional-restarts |57.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_query_v1.pb.cc |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/driver_lib/run/auto_config_initializer_ut.cpp |57.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_tx_allocator.{pb.h ... grpc.pb.h} |57.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/flat_tx_scheme.{pb.h ... grpc.pb.h} |57.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_replication.{pb.h ... grpc.pb.h} |57.3%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/public/api/protos/00ccceedc2861088d9671c050e_raw.auxcpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sqs.grpc.pb.cc |57.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/7cc1936770e12fc0f11609c5f7_raw.auxcpp |57.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/config/protos/marker.pb.{h, cc} |57.3%| [PB] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/api/grpc/ydb_config_v1__intpy3___pb2.py{ ... i} |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/read_only_pdisk.cpp |57.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/gateways.pb.{h, cc} |57.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/flat_scheme_op.{pb.h ... grpc.pb.h} |57.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/control_plane_storage.pb.{h, cc} |57.3%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/d52903a0693870f83d0bbe0ab8_raw.auxcpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/db_metadata_cache.pb.cc |57.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/external_sources.{pb.h ... grpc.pb.h} |57.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_topic.pb.{h, cc} |57.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/multi_plane/ydb-tests-fq-multi_plane |57.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/dump_restore/objcopy_ce0222bab1634be9f9a52f715d.o |57.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/dump_restore/objcopy_da2669c2228a88c83cd32d45da.o |57.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/dump_restore/objcopy_ec94bbf9004678001f4c8195e3.o |57.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/table_stats.{pb.h ... grpc.pb.h} |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/sys_view/auth/group_members.cpp |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__publish_to_scheme_board.cpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/drivemodel.grpc.pb.cc |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/ut/ut_transaction_coordinator.cpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/mon.grpc.pb.cc |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_backup.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/protos/blobstorage_distributed_config.pb.cc |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/ut/ut_transaction_actor.cpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bad3613215fd255df20e874137_raw.auxcpp |57.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/iam/oauth_request.{pb.h ... grpc.pb.h} |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_audit_log_fragment.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_topic_v1.grpc.pb.cc |57.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_import_v1.pb.cc |57.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_persqueue_v1.{pb.h ... grpc.pb.h} |57.4%| [PB] {default-linux-x86_64, release, asan} $(B)/ydb/public/api/grpc/ydb_config_v1__intpy3___pb2.py{ ... i} |57.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_value.pb.{h, cc} |57.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/private_api.pb.{h, cc} |57.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp_stats.{pb.h ... grpc.pb.h} |57.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/control_plane_proxy.pb.{h, cc} |57.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/protos/fields.pb.{h, cc} |57.4%| [PR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/expr_nodes/dqs_expr_nodes.{gen.h ... defs.inl.h} |57.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx.pb.cc |57.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/grpc_streaming/ut/grpc/streaming_service.{pb.h ... grpc.pb.h} |57.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_view.pb.{h, cc} |57.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/dump_restore/ydb-tests-datashard-dump_restore |57.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/filestore_config.pb.cc |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/ut/actors_ut.cpp |57.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/statistics/aggregator/ut/ydb-core-statistics-aggregator-ut |57.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_bsvolume/ydb-core-tx-schemeshard-ut_bsvolume |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/ut/ut_protocol.cpp |57.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/ydb/table_split_ut/ydb-services-ydb-table_split_ut |57.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tablet_flat/test/tool/surg/surg |57.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/ydb/connector-tests-datasource-ydb |57.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/ydb/objcopy_c93b2f849b5f6ee8532dd4d6fd.o |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/google/benchmark/librestricted-google-benchmark.a |57.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/ydb/objcopy_cf816152ca64b2ca8294df441b.o |57.3%| [EN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/opt/kqp_query_plan.h_serialized.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/ydb/ydb_table_split_ut.cpp |57.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/ydb/objcopy_1e0fb16076b5a3105119e574a8.o |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/json_handlers_viewer.cpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/ydb/0e6ff9458826896f7a7b1b2eaf_raw.auxcpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_sequence.cpp |57.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/ttl/ydb-tests-functional-ttl |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/op_apply_config.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/proxy.cpp |57.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/ttl/objcopy_965640ca94893d27c182c611e2.o |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/given_id_range.cpp |57.3%| [PY] {default-linux-x86_64, release, asan} $(B)/ydb/public/api/grpc/ydb_config_v1__intpy3___pb2_grpc.py.gcum.yapyc3 |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_external_data_source.cpp |57.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/ttl/objcopy_c068ee86eb127df13256bfbe45.o |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/jaeger_tracing/sampler_ut.cpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/jaeger_tracing/throttler_ut.cpp |57.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/ttl/objcopy_0aefef587c181350d3a25f70e0.o |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/bg_tasks/transactions/tasks_list.cpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/s3/common/util_ut.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__borrowed_compaction.cpp |57.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/s3/common/ut/ydb-library-yql-providers-s3-common-ut |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/quoter_resource_tree_ut.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/blob_depot.cpp |57.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/async_replication/objcopy_08a4b5d38a76e21591db0c3424.o |57.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/async_replication/objcopy_f4b44a5d280d0f27f5ffd278e8.o |57.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/async_replication/ydb-tests-datashard-async_replication |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__clean_pathes.cpp |57.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/async_replication/objcopy_e2637cea0f2e4db109b364a246.o |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/node_warden_impl.cpp |57.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/protos/fq_private.pb.{h, cc} |57.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/compute/common/ut/ydb-core-fq-libs-compute-common-ut |57.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/stream.pb.cc |57.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/row_dispatcher.pb.{h, cc} |57.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/jaeger_tracing/ut/ydb-core-jaeger_tracing-ut |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/channel_kind.cpp |57.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/simple_queue/tests/ydb-tests-stress-simple_queue-tests |57.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/simple_queue/simple_queue |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blob_depot/agent/blob_mapping_cache.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__init_populator.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__init.cpp |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__make_access_database_no_inheritable.cpp |57.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/generic/connector/tests/join/yql-providers-generic-connector-tests-join |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_pdiskfit/pdiskfit/pdiskfit.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_pq_metarequest_ut.cpp |57.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/nemesis/ut/ydb-tests-tools-nemesis-ut |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_cdc_stream_common.cpp |57.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/compatibility/ydb-tests-compatibility |57.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_fs.cpp |57.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/simple_queue/objcopy_6c8bedcdc8efb835a928b278ce.o |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/simple_queue/libpy3simple_queue.global.a |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation.cpp |57.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/tests/join/objcopy_7bb4c5cc9026f2b8034570c51c.o |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/abstract/object.cpp |57.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/hive/ydb-tests-functional-hive |57.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/tests/join/objcopy_eff72a5efd2fa66b3363e16886.o |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/yql/kqp_pragma_ut.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/yql/kqp_yql_ut.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/tests/join/2486a40dc27b3deeed2a20d6d7_raw.auxcpp |57.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/nemesis/ut/objcopy_c98e5b95c64b8486a12f10d408.o |57.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/nemesis/ut/objcopy_927a1f7611cf94fb1cd21ef8cf.o |57.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/nemesis/ut/objcopy_b06d27009e49b9ba3df883a226.o |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard_shard_deleter.cpp |57.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/compatibility/objcopy_5a4d32e486cd65c99d0b765c68.o |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/yql/kqp_scripting_ut.cpp |57.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/generic/analytics/ydb-tests-fq-generic-analytics |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_just_reject.cpp |57.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yaml_config/static_validator/ut/example_configs/test.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/node_warden_vdisk.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/tablet_ut.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kafka_proxy/ut/metarequest_ut.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/aggregations_ut.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/ut_with_sdk/mirrorer_ut.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/cost/kqp_cost_ut.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/external_sources/s3/ut/s3_aws_credentials_ut.cpp |57.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/ydb-core-blobstorage-vdisk-hulldb-compstrat-ut |57.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/hive/objcopy_48884f6b745ced4d3e78997cb1.o |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kesus/tablet/ut_helpers.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/test/tool/perf/colons.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/balance_coverage/balance_coverage_builder_ut.cpp |57.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tablet_flat/test/tool/perf/table-perf |57.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/generic/analytics/edaf602b2011baa1519a223d63_raw.auxcpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_table.cpp |57.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/tests/join/objcopy_fa785ada0d264f44db0c3df820.o |57.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/generic/analytics/objcopy_1007df29dec27b0b7a1587d49f.o |57.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/generic/analytics/objcopy_b91160bcee04ad1f57e80af064.o |57.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/generic/analytics/objcopy_1326afc143d720f2af434cd836.o |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/tiering_ut.cpp |57.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/hive/objcopy_5333c1912ecbac0f64ff97551f.o |57.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/limits/ydb-tests-functional-limits |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/checkpoint_storage/ut/ydb_checkpoint_storage_ut.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/blobs_sharing_ut.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/ut_with_sdk/balancing_ut.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/checkpoint_storage/ut/ydb_state_storage_ut.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/ut_with_sdk/topic_ut.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/write_ut.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/compression_ut.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/ut_with_sdk/autoscaling_ut.cpp |57.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/limits/objcopy_d52256d4fa9895f38df6030445.o |57.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/limits/objcopy_40779f0570229cef213050a4fa.o |57.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/limits/objcopy_14c03c6aecffbe39cb01ddf2ed.o |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/sparsed_ut.cpp |57.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/hive/objcopy_aebf7c73fcaf6a54715cc177c8.o |57.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/runtime/ydb-core-kqp-ut-runtime |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/persqueue/ut/ut_with_sdk/commitoffset_ut.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/indexes_ut.cpp |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/gbenchmark/libcpp-testing-gbenchmark.a |57.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/simple_queue/tests/objcopy_3df021aac8504049c53286aea0.o |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/optimizer_ut.cpp |57.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/compatibility/objcopy_97ec267d3b3cd1125d3d798a91.o |57.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/compatibility/objcopy_65ac58c27d43a55d0ea4eda626.o |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/test/tool/perf/main.cpp |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/simple_queue/workload/libpy3stress-simple_queue-workload.global.a |57.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/simple_queue/tests/objcopy_2492aafb6862566a2398c9f27e.o |57.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/simple_queue/tests/objcopy_e66920085df69f6f7e41547063.o |57.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_pdiskfit/pdiskfit/pdiskfit |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/datatime64_ut.cpp |57.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/flat_executor.pb.{h, cc} |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/vdisk/hulldb/compstrat/hulldb_compstrat_ut.cpp |57.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/delete_ut.cpp |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/decimal_ut.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/locks_ut.cpp |58.0%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/public/api/grpc/ydb_config_v1.pb.cc |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/data_integrity/kqp_data_integrity_trails_ut.cpp |58.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/ut_blobstorage-ut_read_only_pdisk |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/json_ut.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/stream_creator_ut.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/statistics/database/ut/ut_database.cpp |58.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/apps/etcd_proxy/etcd_proxy |58.0%| [PR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/generated/dispatch_op.h |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/graph/shard/ut/shard_ut.cpp |58.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/arrow/ydb-core-kqp-ut-arrow |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/checkpoint_storage/ut/gc_ut.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/pg/kqp_pg_ut.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/olap/indexes/update.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/kqp_olap_ut.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_base_reboots/ut_base_reboots.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/actors/test_runtime_ut.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/statistics_ut.cpp |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/ut/helper.cpp |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_snapshot.cpp |58.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/testlib/actors/ut/ydb-core-testlib-actors-ut |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/schemereq_ut.cpp |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/sys_view_ut.cpp |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/result_formatter/result_formatter_ut.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/fq/libs/checkpoint_storage/ut/storage_service_ydb_ut.cpp |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/kqp_olap_stats_ut.cpp |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/pg/pg_catalog_ut.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/race.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__background_compaction.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/ut/ut_script.cpp |60.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/olap/clickbench_ut.cpp |61.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_external_table_reboots/ut_external_table_reboots.cpp |61.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/ut/ut_insert_table.cpp |61.3%| [CC] {BAZEL_UPLOAD} $(B)/ydb/public/api/protos/00ccceedc2861088d9671c050e_raw.auxcpp |61.5%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/protos/d52903a0693870f83d0bbe0ab8_raw.auxcpp |61.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yaml_config/static_validator/builders.cpp |61.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_topic_splitmerge/ut_topic_splitmerge.cpp |61.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_index.cpp |61.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/object_storage_listing_ut.cpp |61.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/fyamlcpp/fyamlcpp.cpp |61.9%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/public/api/protos/ydb_config.pb.cc |62.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yaml_config/validator/validator.cpp |62.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_restore/ut_restore.cpp |62.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yaml_config/validator/configurators.cpp |62.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yaml_config/public/yaml_config.cpp |62.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yaml_config/static_validator/ut/test.cpp |62.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yaml_config/validator/validator_builder.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/external_sources/hive_metastore/ut/common.cpp |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/ut/queue_id_ut.cpp |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/ut/params_ut.cpp |62.4%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/public/api/grpc/ydb_config_v1.grpc.pb.cc |62.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_extsubdomain_reboots/ut_extsubdomain_reboots.cpp |62.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/external_sources/hive_metastore/libcore-external_sources-hive_metastore.a |62.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/ut/ut_program.cpp |62.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_read_iterator_ext_blobs.cpp |62.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/replication/controller/assign_tx_id_ut.cpp |62.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/locks_ut.cpp |62.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_read_iterator.cpp |62.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_reassign.cpp |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/viewer/ut/ut_utils.cpp |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/external_sources/hive_metastore/ut/hive_metastore_client_ut.cpp |62.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_replication_reboots/ut_replication_reboots.cpp |62.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/security/certificate_check/ut/ydb-core-security-certificate_check-ut |63.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/ydb-core-fq-libs-row_dispatcher-format_handler-ut |63.0%| [CC] {BAZEL_UPLOAD} $(B)/ydb/public/api/grpc/ydb_config_v1.pb.cc |63.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/flat_ut.cpp |63.0%| [CC] {BAZEL_UPLOAD} $(B)/ydb/public/api/protos/ydb_config.pb.cc |63.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yaml_config/static_validator/ut/example_configs/test.cpp |63.0%| [CC] {BAZEL_UPLOAD} $(B)/ydb/public/api/grpc/ydb_config_v1.grpc.pb.cc |63.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yaml_config/validator/validator_checks.cpp |63.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/columnshard/engines/ut/ut_logs_engine.cpp |63.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/fyamlcpp/fyamlcpp_ut.cpp |63.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/cancel_tx_ut.cpp |63.0%| [CC] {BAZEL_UPLOAD} $(B)/ydb/public/api/protos/ydb_config.pb.cc |63.0%| [PY] {BAZEL_UPLOAD} $(B)/ydb/core/protos/blobstorage_distributed_config__intpy3___pb2_grpc.py.p5ju.yapyc3 |63.0%| [PY] {BAZEL_UPLOAD} $(B)/ydb/public/api/grpc/ydb_config_v1__intpy3___pb2_grpc.py.gcum.yapyc3 |63.0%| [PY] {BAZEL_UPLOAD} $(B)/ydb/public/api/protos/ydb_config__intpy3___pb2.py.jnwv.yapyc3 |63.0%| [PR] {BAZEL_UPLOAD} $(B)/ydb/public/api/protos/00ccceedc2861088d9671c050e_raw.auxcpp |63.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yaml_config/validator/ut/validator/validator_ut.cpp |63.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_view/ut_view.cpp |63.0%| [PB] {BAZEL_UPLOAD} $(B)/ydb/public/api/grpc/ydb_config_v1.{pb.h ... grpc.pb.h} |63.0%| [PR] {BAZEL_UPLOAD} $(B)/ydb/core/protos/d52903a0693870f83d0bbe0ab8_raw.auxcpp |63.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yaml_config/validator/ut/validator_checks/validator_checks_ut.cpp |63.0%| [PB] {BAZEL_UPLOAD} $(B)/ydb/public/api/protos/ydb_config.pb.{h, cc} |63.1%| [PB] {BAZEL_UPLOAD} $(B)/ydb/public/api/grpc/ydb_config_v1__intpy3___pb2.py{ ... i} |63.1%| [PB] {BAZEL_UPLOAD} $(B)/ydb/core/protos/blobstorage_distributed_config__intpy3___pb2.py{ ... i} |63.1%| [PY] {BAZEL_UPLOAD} $(B)/ydb/core/protos/blobstorage_distributed_config__intpy3___pb2.py.p5ju.yapyc3 |63.0%| [PB] {BAZEL_UPLOAD} $(B)/ydb/public/api/protos/ydb_config__intpy3___pb2.py{, i} |63.0%| [PB] {BAZEL_UPLOAD} $(B)/ydb/public/api/protos/ydb_config.pb.{h, cc} |63.0%| [PB] {BAZEL_UPLOAD} $(B)/ydb/core/protos/blobstorage_distributed_config.{pb.h ... grpc.pb.h} |63.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_index_build_reboots/ut_index_build_reboots.cpp |63.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/protobuf_udf/libessentials-minikql-protobuf_udf.a |63.1%| [PB] {BAZEL_UPLOAD} $(B)/ydb/core/protos/blobstorage_distributed_config.{pb.h ... grpc.pb.h} |63.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/trace_ut.cpp |63.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/objcopy_1406195445f45d950dda89fcd8.o |63.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_change_exchange.cpp |63.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tablet_flat/benchmark/b_part.cpp |63.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_external_data_source/ut_external_data_source.cpp |63.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/read_only_vdisk.cpp |63.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/scheme/kqp_scheme_ut.cpp |63.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kafka_proxy/ut/ydb-core-kafka_proxy-ut |63.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/protobuf/libprotobuf_udf.global.a |63.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_cdc_stream_reboots/ut_cdc_stream_reboots.cpp |62.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/external_sources/hive_metastore/hive_metastore_native/libexternal_sources-hive_metastore-hive_metastore_native.a |63.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/file/libfile_udf.global.a |63.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/address_classification/net_classifier_ut.cpp |63.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/external_sources/hive_metastore/ut/hive_metastore_fetcher_ut.cpp |63.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tools/query_replay_yt/query_compiler.cpp |63.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_ttl/ut_ttl_utility.cpp |63.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/streaming/libstreaming_udf.global.a |63.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_trace.cpp |63.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/viewer_ut.cpp |63.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/common_ut.cpp |63.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/effects/kqp_effects_ut.cpp |63.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/effects/kqp_inplace_update_ut.cpp |63.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/scheme/kqp_acl_ut.cpp |63.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/yql/ydb-core-kqp-ut-yql |63.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/compress_executor_ut.cpp |63.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/dsproxy/ut_ftol/ydb-core-blobstorage-dsproxy-ut_ftol |63.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/data/kqp_read_null_ut.cpp |63.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_backup_collection_reboots/ut_backup_collection_reboots.cpp |63.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/public/api/protos/libpy3api-protos.global.a |63.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tools/query_replay_yt/query_replay_yt |64.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/library/yaml_config/validator/liblibrary-yaml_config-validator.a |64.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tools/query_replay_yt/query_replay.cpp |64.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/public/api/grpc/libapi-grpc.a |64.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tools/blobsan/blobsan |64.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_external_data_source/ydb-core-tx-schemeshard-ut_external_data_source |64.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/cost/ydb-core-kqp-ut-cost |64.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tools/query_replay_yt/main.cpp |65.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_proxy/ut_schemereq/ydb-core-tx-tx_proxy-ut_schemereq |65.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/libpy3ydb-core-protos.global.a |65.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/topic_data_ut.cpp |65.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/fq/ut_integration/ut_utils.cpp |65.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/basic_usage_ut.cpp |65.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_snapshot/ydb-core-tx-datashard-ut_snapshot |66.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_write.cpp |66.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tools/blobsan/main.cpp |66.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/public/api/protos/libpy3api-protos.global.a |66.7%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/blobstorage_distributed_config.grpc.pb.cc |66.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_serverless/ut_serverless.cpp |66.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/topic_to_table_ut.cpp |66.9%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/protos/blobstorage_distributed_config.grpc.pb.cc |66.9%| [AR] {default-linux-x86_64, release, asan} $(B)/library/cpp/build_info/liblibrary-cpp-build_info.a |67.0%| [AR] {default-linux-x86_64, release, asan} $(B)/library/cpp/svnversion/liblibrary-cpp-svnversion.a |67.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/controller/ut_stream_creator/tx-replication-controller-ut_stream_creator |67.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/balance_coverage/ut/ydb-core-tx-balance_coverage-ut |67.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/library/yaml_config/validator/liblibrary-yaml_config-validator.a |67.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp |67.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/retry_policy_ut.cpp |67.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ydb-public-sdk-cpp-src-client-persqueue_public-ut |67.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/effects/kqp_write_ut.cpp |67.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_ttl/ut_ttl.cpp |67.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/local_partition_ut.cpp |67.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/statistics/database/ut/ydb-core-statistics-database-ut |67.5%| [AR] {default-linux-x86_64, release, asan} $(B)/yt/yql/providers/yt/provider/libproviders-yt-provider.a |67.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/protos/libpy3ydb-core-protos.global.a |67.5%| [AR] {BAZEL_UPLOAD, SKIPPED} $(B)/yt/yql/providers/yt/provider/libproviders-yt-provider.a |67.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_init.cpp |67.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/ut/rate_limiter_test_setup.cpp |67.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/driver_lib/run/ut/ydb-core-driver_lib-run-ut |68.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/scheme/kqp_constraints_ut.cpp |68.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_vector_index_build_reboots/ut_vector_index_build_reboots.cpp |68.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/ut/effects/kqp_immediate_effects_ut.cpp |68.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/client/server/ut/ydb-core-client-server-ut |68.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/graph/shard/ut/ydb-core-graph-shard-ut |68.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/controller/ut_assign_tx_id/core-tx-replication-controller-ut_assign_tx_id |68.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/library/yaml_config/static_validator/liblibrary-yaml_config-static_validator.a |68.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/describe_topic_ut.cpp |68.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_view/ydb-core-tx-schemeshard-ut_view |68.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/library/fyamlcpp/libydb-library-fyamlcpp.a |69.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_restore/ydb-core-tx-schemeshard-ut_restore |69.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/library/yaml_config/public/liblibrary-yaml_config-public.a |69.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain_reboots/ydb-core-tx-schemeshard-ut_extsubdomain_reboots |69.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/library/fyamlcpp/libydb-library-fyamlcpp.a |69.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/lib/ydb_cli/commands/ydb_storage_config.cpp |69.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/sdk/cpp/src/client/config/config.cpp |69.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/library/yaml_config/static_validator/liblibrary-yaml_config-static_validator.a |69.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/src/client/config/libsrc-client-config.a |69.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_external_table_reboots/ydb-core-tx-schemeshard-ut_external_table_reboots |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_storage_config.cpp |69.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/fq/pq_async_io/ut/dq_pq_write_actor_ut.cpp |70.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/fq/pq_async_io/ut/dq_pq_read_actor_ut.cpp |70.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/fq/pq_async_io/ut/dq_pq_rd_read_actor_ut.cpp |70.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_group_reconfiguration/ut_group_reconfiguration |70.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_user_attributes/ut_user_attributes.cpp |70.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/persqueue_v1/ut/new_schemecache_ut/ydb-services-persqueue_v1-ut-new_schemecache_ut |70.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/library/yaml_config/public/liblibrary-yaml_config-public.a |70.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_replication_reboots/ydb-core-tx-schemeshard-ut_replication_reboots |70.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/ut/ut_with_sdk/ydb-core-persqueue-ut-ut_with_sdk |70.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_object_storage_listing.cpp |71.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/public/sdk/cpp/src/client/config/libsrc-client-config.a |71.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream_reboots/ydb-core-tx-schemeshard-ut_cdc_stream_reboots |71.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_order.cpp |71.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/ydb-public-sdk-cpp-src-client-topic-ut |71.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/config/config.cpp |71.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kesus/tablet/ut/ydb-core-kesus-tablet-ut |71.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_erase_rows.cpp |71.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_read_table.cpp |71.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/compression_ut.cpp |71.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/viewer/ut/ydb-core-viewer-ut |71.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_replication/ut_replication.cpp |71.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_index_build_reboots/ydb-core-tx-schemeshard-ut_index_build_reboots |71.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/read_session_ut.cpp |71.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_vector_index_build_reboots/tx-schemeshard-ut_vector_index_build_reboots |71.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_change_exchange/ydb-core-tx-datashard-ut_change_exchange |71.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/persqueue_new_schemecache_ut.cpp |71.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/olap/ydb-core-kqp-ut-olap |71.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_base_reboots/ydb-core-tx-schemeshard-ut_base_reboots |71.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/ut/ydb-core-ymq-ut |71.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_auditsettings/ut_auditsettings.cpp |71.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/public/api/grpc/libapi-grpc.a |71.7%| [AR] {tool} $(B)/ydb/public/api/protos/libapi-protos.a |71.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_read_iterator/ydb-core-tx-datashard-ut_read_iterator |71.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/public/api/protos/libapi-protos.a |71.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/persqueue_common_new_schemecache_ut.cpp |71.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/scheme/abstract/index_info.cpp |71.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator_volatile_ut.cpp |71.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_topic_splitmerge/ydb-core-tx-schemeshard-ut_topic_splitmerge |71.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/abstract/index_info.cpp |71.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/coordinator/coordinator_ut.cpp |71.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_olap/ut_olap.cpp |71.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/topic/topic_write.cpp |71.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yaml_config/validator/ut/validator_checks/yaml_config-validator-ut-validator_checks |71.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/topic/topic_write_ut.cpp |71.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/fq/ut_integration/fq_ut.cpp |71.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yaml_config/static_validator/ut/ydb-library-yaml_config-static_validator-ut |71.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_reassign/ydb-core-tx-datashard-ut_reassign |71.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/data_integrity/ydb-core-kqp-ut-data_integrity |71.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/fq/ut_integration/ydb-services-fq-ut_integration |71.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yaml_config/static_validator/ut/example_configs/static_validator-ut-example_configs |71.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yaml_config/validator/ut/validator_builder/yaml_config-validator-ut-validator_builder |71.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/fyamlcpp/ut/ydb-library-fyamlcpp-ut |71.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/result_formatter/ut/ydb-core-fq-libs-result_formatter-ut |71.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/export/session/storage/tier/storage.cpp |71.6%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/blobstorage_distributed_config.pb.cc |71.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/export/session/storage/tier/storage.cpp |71.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/change_record_cdc_serializer.cpp |71.6%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/protos/blobstorage_distributed_config.pb.cc |71.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_trace/ydb-core-tx-datashard-ut_trace |71.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/change_record_cdc_serializer.cpp |71.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_backup_collection_reboots/tx-schemeshard-ut_backup_collection_reboots |71.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |71.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/incr_restore_helpers.cpp |71.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/ut/ydb-core-tx-columnshard-engines-ut |71.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/tests/unit/client/value/ydb-public-sdk-cpp-tests-unit-client-value |71.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/tier/remove.cpp |71.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/incr_restore_helpers.cpp |71.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/scheme/tiering/tier_info.cpp |71.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/tier/remove.cpp |71.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/tiering/tier_info.cpp |71.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/tests/unit/library/issue/ydb-public-sdk-cpp-tests-unit-library-issue |71.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/public/ydb_issue/ut/ydb-library-yql-public-ydb_issue-ut |71.6%| [AR] {default-linux-x86_64, release, asan} $(B)/yql/essentials/minikql/comp_nodes/llvm16/libminikql-comp_nodes-llvm16.a |71.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/persqueue/codecs/ut/ydb-core-persqueue-codecs-ut |71.6%| [AR] {BAZEL_UPLOAD, SKIPPED} $(B)/yql/essentials/minikql/comp_nodes/llvm16/libminikql-comp_nodes-llvm16.a |71.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/tests/unit/client/coordination/ydb-public-sdk-cpp-tests-unit-client-coordination |71.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/lib/json_value/ut/ydb-public-lib-json_value-ut |71.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/tx_proxy/storage_tenant_ut.cpp |71.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/tests/unit/client/draft/ydb-public-sdk-cpp-tests-unit-client-draft |71.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_rtmr_reboots/ut_rtmr_reboots.cpp |71.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/tools/pq_read/pq_read |71.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/bg_tasks/adapter/adapter.cpp |71.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/bg_tasks/adapter/adapter.cpp |71.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/pq_async_io/ut/ydb-tests-fq-pq_async_io-ut |71.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_serverless/ydb-core-tx-schemeshard-ut_serverless |71.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/counters/counters.cpp |71.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/external_sources/s3/ut/ydb-core-external_sources-s3-ut |71.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/tests/unit/client/endpoints/ydb-public-sdk-cpp-tests-unit-client-endpoints |71.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/export/session/storage/tier/libsession-storage-tier.global.a |71.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/counters/counters.cpp |71.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/scheme/tiering/libengines-scheme-tiering.a |71.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/export/session/storage/tier/libsession-storage-tier.global.a |71.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/config/grpc_service.cpp |71.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/tests/unit/client/result/ydb-public-sdk-cpp-tests-unit-client-result |71.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/pg/ydb-core-kqp-ut-pg |71.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/tiering/libengines-scheme-tiering.a |71.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/header.cpp |71.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/sequenceshard/sequenceshard_impl.cpp |71.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/lib/ydb_cli/commands/ydb_cluster.cpp |71.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/scheme/ut/ydb-core-scheme-ut |71.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/header.cpp |71.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_write/ydb-core-tx-datashard-ut_write |71.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_cluster.cpp |71.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_erase_rows/ydb-core-tx-datashard-ut_erase_rows |71.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/sequenceshard/sequenceshard_impl.cpp |71.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_ttl/ydb-core-tx-schemeshard-ut_ttl |71.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/tests/unit/client/driver/ydb-public-sdk-cpp-tests-unit-client-driver |71.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/counters/libstorage-actualizer-counters.a |71.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/tests/integration/sessions_pool/public-sdk-cpp-tests-integration-sessions_pool |71.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/adapter/libolap-bg_tasks-adapter.a |71.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/config/grpc_service.cpp |71.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/counters/libstorage-actualizer-counters.a |71.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_object_storage_listing/ydb-core-tx-datashard-ut_object_storage_listing |71.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/checkpoint_storage/ut/ydb-core-fq-libs-checkpoint_storage-ut |71.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/adapter/libolap-bg_tasks-adapter.a |71.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_user_attributes/ydb-core-tx-schemeshard-ut_user_attributes |71.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/data/ydb-core-kqp-ut-data |71.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/public/lib/ydb_cli/topic/topic_read_ut.cpp |71.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_init/ydb-core-tx-datashard-ut_init |71.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/schemeshard/ut_rtmr/ut_rtmr.cpp |71.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/config/libydb-services-config.a |71.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/ut_blobstorage-ut_read_only_vdisk |71.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/dq/runtime/ut/ydb-library-yql-dq-runtime-ut |71.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_read_table/ydb-core-tx-datashard-ut_read_table |71.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/tests/unit/client/params/ydb-public-sdk-cpp-tests-unit-client-params |71.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/scheme/abstract/libengines-scheme-abstract.a |71.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tools/query_replay/main.cpp |71.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/abstract/libengines-scheme-abstract.a |71.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tools/query_replay/query_compiler.cpp |71.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/public/api/protos/libapi-protos.a |71.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tools/query_replay/query_proccessor.cpp |71.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_replication/ydb-core-tx-schemeshard-ut_replication |71.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tools/query_replay/ydb_query_replay |71.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/tools/query_replay/query_replay.cpp |71.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/config/libydb-services-config.a |71.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tablet_flat/benchmark/core_tablet_flat_benchmark |71.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_olap/ydb-core-tx-schemeshard-ut_olap |71.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/lib/ydb_cli/topic/ut/ydb-public-lib-ydb_cli-topic-ut |71.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/address_classification/ut/ydb-core-mind-address_classification-ut |71.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/libstorage-indexes-categories_bloom.a |71.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/libstorage-indexes-categories_bloom.a |71.4%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/yaml_config/static_validator/ut/ydb-library-yaml_config-static_validator-ut |71.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_order/ydb-core-tx-datashard-ut_order |71.4%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/yaml_config/validator/ut/validator_checks/yaml_config-validator-ut-validator_checks |71.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_auditsettings/ydb-core-tx-schemeshard-ut_auditsettings |71.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/tests/integration/sessions/ydb-public-sdk-cpp-tests-integration-sessions |71.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/interactive/highlight/ut/ydb_cli-commands-interactive-highlight-ut |71.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/effects/ydb-core-kqp-ut-effects |71.4%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/yaml_config/validator/ut/validator_builder/yaml_config-validator-ut-validator_builder |71.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_proxy/ut_storage_tenant/ydb-core-tx-tx_proxy-ut_storage_tenant |71.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/coordinator/ut/ydb-core-tx-coordinator-ut |71.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/external_sources/hive_metastore/ut/ydb-core-external_sources-hive_metastore-ut |71.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/client/ut/ydb-core-client-ut |71.3%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/yaml_config/static_validator/ut/example_configs/static_validator-ut-example_configs |71.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_rtmr_reboots/ydb-core-tx-schemeshard-ut_rtmr_reboots |71.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/scheme/ydb-core-kqp-ut-scheme |71.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/splitter/chunks.cpp |71.2%| [AR] {default-linux-x86_64, release, asan} $(B)/yt/yt/client/libyt-yt-client.a |71.2%| [AR] {BAZEL_UPLOAD, SKIPPED} $(B)/yt/yt/client/libyt-yt-client.a |71.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_rtmr/ydb-core-tx-schemeshard-ut_rtmr |71.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/splitter/chunks.cpp |71.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/scheme/versions/filtered_scheme.cpp |71.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/dq/actors/spilling/ut/ydb-library-yql-dq-actors-spilling-ut |71.0%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/fyamlcpp/ut/ydb-library-fyamlcpp-ut |70.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/tests/unit/client/discovery_mutator/sdk-cpp-tests-unit-client-discovery_mutator |70.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/s3/object_listers/ut/ydb-library-yql-providers-s3-object_listers-ut |70.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/versions/filtered_scheme.cpp |70.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/workload/benchmark_base/ut/ydb-library-workload-benchmark_base-ut |70.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/lib/idx_test/ut/ydb-public-lib-idx_test-ut |70.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/abstract/merger.cpp |70.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/kqp/kqp_indexes/ydb-tests-functional-kqp-kqp_indexes |70.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/abstract/merger.cpp |69.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yaml_config/serialize_deserialize.cpp |69.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/fq/yt/kqp_yt_import/ydb-tests-fq-yt-kqp_yt_import |69.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/plain/column_cursor.cpp |69.8%| RESOURCE $(sbr:4966407557) |69.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/plain/merged_column.cpp |69.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/dq/runtime/ut/ydb-library-yql-providers-dq-runtime-ut |69.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yaml_config/serialize_deserialize.cpp |69.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/scheme/versions/snapshot_scheme.cpp |69.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yaml_config/yaml_config_helpers.cpp |69.8%| [AR] {RESULT} $(B)/ydb/public/api/protos/libapi-protos.a |69.8%| [LD] {RESULT} $(B)/ydb/library/yaml_config/validator/ut/validator_checks/yaml_config-validator-ut-validator_checks |69.8%| PREPARE $(FLAKE8_PY2-2255386470) |69.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_accessor/request.cpp |69.8%| [AR] {RESULT} $(B)/ydb/library/fyamlcpp/libydb-library-fyamlcpp.a |69.8%| [UN] {default-linux-x86_64, release, asan} $(B)/yql/essentials/tests/common/test_framework/udfs_deps/common-test_framework-udfs_deps.pkg.fake |69.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/lib/ydb_cli/commands/ydb_dynamic_config.cpp |69.8%| [SB] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/postgresql/psql/psql |69.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/s3/credentials/ut/ydb-library-yql-providers-s3-credentials-ut |69.8%| [AR] {RESULT} $(B)/ydb/library/yaml_config/validator/liblibrary-yaml_config-validator.a |69.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/sdk/cpp/sdk_credprovider/ydb-tests-functional-sdk-cpp-sdk_credprovider |69.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/plain/column_cursor.cpp |69.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/plain/merged_column.cpp |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/versions/snapshot_scheme.cpp |69.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/builder.cpp |69.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/export_s3_buffer.cpp |69.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yaml_config/yaml_config_helpers.cpp |69.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kesus/tablet/tx_quoter_resource_add.cpp |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_accessor/request.cpp |69.8%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/examples/type_inspection/libtype_inspection_udf.so |69.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/splitter/batch_slice.cpp |69.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/examples/dummylog/libdummylog.so |69.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/hyperloglog/libhyperloglog_udf.so |69.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/remap.cpp |69.8%| PREPARE $(FLAKE8_LINTER-sbr:6561765464) |69.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/examples/callables/libcallables_udf.so |69.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/logs/dsv/libdsv_udf.so |69.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/streaming/libstreaming_udf.so |69.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/examples/dicts/libdicts_udf.so |69.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/test/simple/libsimple_udf.so |69.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/config/init/init_noop.cpp |69.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/tests/integration/basic_example/public-sdk-cpp-tests-integration-basic_example |69.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/abstract/libchanges-compaction-abstract.a |69.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/lib/ydb_cli/commands/topic_workload/ut/ydb-public-lib-ydb_cli-commands-topic_workload-ut |69.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/backup/ydb-tests-functional-backup |69.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/plain/logic.cpp |69.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/examples/structs/libstructs_udf.so |69.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/vector/libvector_udf.so |69.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/math/libmath_udf.so |69.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/set/libset_udf.so |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/builder.cpp |69.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_accessor/in_mem/collector.cpp |69.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/topfreq/libtopfreq_udf.so |69.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/examples/lists/liblists_udf.so |69.9%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/erasure/ut/ydb-core-erasure-ut |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/export_s3_buffer.cpp |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/tablet/tx_quoter_resource_add.cpp |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/config/init/init_noop.cpp |69.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/iterator.cpp |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/remap.cpp |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/splitter/batch_slice.cpp |69.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/sparsed/logic.cpp |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_dynamic_config.cpp |69.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/abstract/libchanges-compaction-abstract.a |69.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/plain/libchanges-compaction-plain.global.a |69.9%| PREPARE $(BLACK_LINTER-sbr:8415400280) |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_accessor/in_mem/collector.cpp |69.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/transactions/locks/write.cpp |69.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/digest/libdigest_udf.so |69.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/stat/libstat_udf.so |69.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/security/ut/ydb-library-security-ut |69.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/yson2/libyson2_udf.so |69.9%| [AR] {RESULT} $(B)/ydb/public/api/grpc/libapi-grpc.a |69.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/iterator.cpp |69.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/sparsed/libchanges-compaction-sparsed.global.a |69.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/dq/actors/compute/ut/ydb-library-yql-dq-actors-compute-ut |69.8%| [AR] {default-linux-x86_64, release, asan} $(B)/yt/yt/core/libyt-yt-core.a |69.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kesus/tablet/tx_session_detach.cpp |69.8%| [AR] {RESULT} $(B)/ydb/public/api/protos/libapi-protos.a |69.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/histogram/libhistogram_udf.so |69.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/generic/pushdown/ut/ydb-library-yql-providers-generic-pushdown-ut |69.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/dq/actors/compute/ut/ydb-library-yql-dq-actors-compute-ut |69.9%| [AR] {BAZEL_UPLOAD, SKIPPED} $(B)/yt/yt/core/libyt-yt-core.a |69.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/plain/column_portion_chunk.cpp |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/transactions/locks/write.cpp |69.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/plain/libchanges-compaction-plain.global.a |69.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/transactions/locks/abstract.cpp |69.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_accessor/abstract/manager.cpp |69.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kesus/tablet/tx_sessions_describe.cpp |69.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/http.cpp |69.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kesus/tablet/tx_semaphore_create.cpp |69.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_accessor/in_mem/constructor.cpp |69.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/top/libtop_udf.so |69.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/viewer/json_handlers.cpp |69.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/logs/log.cpp |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/tablet/tx_session_detach.cpp |69.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/lib/ydb_cli/commands/ydb_node_config.cpp |69.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/rm_service/kqp_resource_estimation.cpp |69.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_accessor/local_db/constructor.cpp |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/transactions/locks/abstract.cpp |69.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/counters/portions.cpp |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/plain/column_portion_chunk.cpp |69.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/protobuf_printer/ut/ydb-library-protobuf_printer-ut |69.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/sparsed/libchanges-compaction-sparsed.global.a |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_accessor/abstract/manager.cpp |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/http.cpp |69.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/log_settings_configurator.cpp |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/tablet/tx_sessions_describe.cpp |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_accessor/in_mem/constructor.cpp |69.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/counters/portion_index.cpp |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/rm_service/kqp_resource_estimation.cpp |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/tablet/tx_semaphore_create.cpp |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/viewer/json_handlers.cpp |69.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/action.cpp |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/plain/logic.cpp |69.9%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/config/tools/protobuf_plugin/ut/ydb-core-config-tools-protobuf_plugin-ut |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/logs/log.cpp |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_accessor/local_db/constructor.cpp |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/counters/portions.cpp |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_node_config.cpp |69.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/tests/integration/bulk_upsert/ydb-public-sdk-cpp-tests-integration-bulk_upsert |69.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/file/libfile_udf.so |69.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/base/blobstorage_events.cpp |69.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/lib/ydb_cli/commands/ydb_admin.cpp |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/counters/portion_index.cpp |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/log_settings_configurator.cpp |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/action.cpp |69.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/base/blobstorage_events.cpp |69.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/lib/ydb_cli/commands/ydb_admin.cpp |69.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_accessor/abstract/constructor.cpp |69.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/lib/ydb_cli/common/yql_parser/ut/ydb-public-lib-ydb_cli-common-yql_parser-ut |69.5%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/abstract/libchanges-compaction-abstract.a |69.5%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/plain/libchanges-compaction-plain.global.a |69.4%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/sparsed/libchanges-compaction-sparsed.global.a |69.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_accessor/abstract/constructor.cpp |69.4%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/scheme/abstract/libengines-scheme-abstract.a |69.4%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/scheme/tiering/libengines-scheme-tiering.a |69.2%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/counters/libstorage-actualizer-counters.a |69.2%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/libstorage-indexes-categories_bloom.a |69.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/sparsed/logic.cpp |69.1%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/export/session/storage/tier/libsession-storage-tier.global.a |69.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/s3/range_helpers/ut/ydb-library-yql-providers-s3-range_helpers-ut |69.1%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/adapter/libolap-bg_tasks-adapter.a |69.1%| [AR] {RESULT} $(B)/ydb/library/yaml_config/public/liblibrary-yaml_config-public.a |69.1%| [AR] {RESULT} $(B)/ydb/services/config/libydb-services-config.a |69.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/grpc/server/ut/ydb-library-grpc-server-ut |69.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/jaeger_tracing_configurator.cpp |69.0%| [AR] {RESULT} $(B)/ydb/core/protos/libpy3ydb-core-protos.global.a |69.0%| [AR] {RESULT} $(B)/yt/yt/core/libyt-yt-core.a |69.0%| [LD] {RESULT} $(B)/ydb/library/yaml_config/static_validator/ut/ydb-library-yaml_config-static_validator-ut |69.0%| [LD] {RESULT} $(B)/ydb/tests/functional/backup/ydb-tests-functional-backup |69.0%| [AR] {RESULT} $(B)/ydb/public/api/protos/libpy3api-protos.global.a |69.0%| [AR] {RESULT} $(B)/ydb/library/yaml_config/static_validator/liblibrary-yaml_config-static_validator.a |69.0%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/tests/integration/sessions_pool/public-sdk-cpp-tests-integration-sessions_pool |68.9%| [AR] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/config/libsrc-client-config.a |68.9%| [LD] {RESULT} $(B)/ydb/library/yaml_config/static_validator/ut/example_configs/static_validator-ut-example_configs |68.9%| [LD] {RESULT} $(B)/ydb/library/workload/benchmark_base/ut/ydb-library-workload-benchmark_base-ut |68.9%| [LD] {RESULT} $(B)/ydb/library/fyamlcpp/ut/ydb-library-fyamlcpp-ut |68.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yaml_config/yaml_config_parser.cpp |68.9%| [LD] {RESULT} $(B)/ydb/library/yaml_config/validator/ut/validator_builder/yaml_config-validator-ut-validator_builder |68.9%| [LD] {RESULT} $(B)/ydb/public/lib/ydb_cli/common/yql_parser/ut/ydb-public-lib-ydb_cli-common-yql_parser-ut |68.9%| [LD] {RESULT} $(B)/ydb/public/lib/ydb_cli/commands/topic_workload/ut/ydb-public-lib-ydb_cli-commands-topic_workload-ut |68.9%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/tests/unit/client/coordination/ydb-public-sdk-cpp-tests-unit-client-coordination |68.9%| [LD] {RESULT} $(B)/ydb/public/lib/json_value/ut/ydb-public-lib-json_value-ut |68.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/memory_controller/memory_controller.cpp |68.9%| [LD] {RESULT} $(B)/ydb/core/persqueue/codecs/ut/ydb-core-persqueue-codecs-ut |68.9%| [LD] {RESULT} $(B)/ydb/tests/tools/pq_read/pq_read |68.9%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/tests/unit/client/draft/ydb-public-sdk-cpp-tests-unit-client-draft |68.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/tools/astdiff/astdiff |68.8%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/tests/integration/sessions/ydb-public-sdk-cpp-tests-integration-sessions |68.8%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/tests/unit/client/value/ydb-public-sdk-cpp-tests-unit-client-value |68.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/jaeger_tracing_configurator.cpp |68.8%| [LD] {RESULT} $(B)/ydb/tests/functional/sdk/cpp/sdk_credprovider/ydb-tests-functional-sdk-cpp-sdk_credprovider |68.8%| [LD] {RESULT} $(B)/ydb/tests/fq/yt/kqp_yt_import/ydb-tests-fq-yt-kqp_yt_import |68.9%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/tests/unit/client/params/ydb-public-sdk-cpp-tests-unit-client-params |68.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/data_accessor/in_mem/libcolumnshard-data_accessor-in_mem.global.a |68.8%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/tests/unit/client/result/ydb-public-sdk-cpp-tests-unit-client-result |68.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/run/config.cpp |68.8%| [LD] {RESULT} $(B)/ydb/library/yql/providers/dq/runtime/ut/ydb-library-yql-providers-dq-runtime-ut |68.8%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/tests/unit/client/discovery_mutator/sdk-cpp-tests-unit-client-discovery_mutator |68.8%| [AR] {RESULT} $(B)/yql/essentials/minikql/comp_nodes/llvm16/libminikql-comp_nodes-llvm16.a |68.8%| [LD] {RESULT} $(B)/ydb/library/yql/dq/runtime/ut/ydb-library-yql-dq-runtime-ut |68.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/data_accessor/local_db/libcolumnshard-data_accessor-local_db.global.a |68.9%| [AR] {RESULT} $(B)/yt/yql/providers/yt/provider/libproviders-yt-provider.a |68.8%| [AR] {RESULT} $(B)/yt/yt/client/libyt-yt-client.a |68.9%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_accessor/in_mem/libcolumnshard-data_accessor-in_mem.global.a |68.9%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_accessor/local_db/libcolumnshard-data_accessor-local_db.global.a |68.9%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/tests/unit/library/issue/ydb-public-sdk-cpp-tests-unit-library-issue |68.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/log_backend/log_backend.cpp |68.9%| [LD] {RESULT} $(B)/ydb/library/yql/providers/generic/pushdown/ut/ydb-library-yql-providers-generic-pushdown-ut |68.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/node_broker__update_config_subscription.cpp |68.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/run/config.cpp |68.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_accessor/in_mem/libcolumnshard-data_accessor-in_mem.global.a |68.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_accessor/local_db/libcolumnshard-data_accessor-local_db.global.a |68.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/backup/ut/ydb-library-backup-ut |68.9%| [LD] {RESULT} $(B)/ydb/library/protobuf_printer/ut/ydb-library-protobuf_printer-ut |68.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/logs/libfq-libs-logs.a |68.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/python/python3_small/libpython3_udf.so |68.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/memory_controller/libydb-core-memory_controller.a |68.9%| [LD] {RESULT} $(B)/ydb/library/backup/ut/ydb-library-backup-ut |68.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/backup/ut/ydb-library-backup-ut |68.9%| [AR] {RESULT} $(B)/ydb/core/fq/libs/logs/libfq-libs-logs.a |68.9%| [AR] {RESULT} $(B)/ydb/core/memory_controller/libydb-core-memory_controller.a |68.9%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/persqueue/codecs/ut/ydb-core-persqueue-codecs-ut |68.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/log_backend/libydb-core-log_backend.a |68.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yaml_config/yaml_config_parser.cpp |68.9%| [AR] {RESULT} $(B)/ydb/core/log_backend/libydb-core-log_backend.a |68.9%| [LD] {BAZEL_UPLOAD} $(B)/ydb/public/sdk/cpp/tests/unit/library/issue/ydb-public-sdk-cpp-tests-unit-library-issue |68.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/node_broker__update_config_subscription.cpp |68.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/hyperscan/libhyperscan_udf.so |68.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/node_broker__load_state.cpp |68.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/node_broker__update_config.cpp |68.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/fq/libs/logs/libfq-libs-logs.a |68.8%| [LD] {BAZEL_UPLOAD} $(B)/ydb/public/sdk/cpp/tests/unit/client/endpoints/ydb-public-sdk-cpp-tests-unit-client-endpoints |68.7%| [LD] {BAZEL_UPLOAD} $(B)/ydb/public/sdk/cpp/tests/unit/client/value/ydb-public-sdk-cpp-tests-unit-client-value |68.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/node_broker__graceful_shutdown.cpp |68.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/log_backend/libydb-core-log_backend.a |68.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/node_broker__load_state.cpp |68.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/memory_controller/libydb-core-memory_controller.a |68.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/node_broker__update_config.cpp |68.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/libchanges-compaction-sub_columns.a |68.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/node_broker__graceful_shutdown.cpp |68.4%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/tests/integration/bulk_upsert/ydb-public-sdk-cpp-tests-integration-bulk_upsert |68.4%| [LD] {RESULT} $(B)/ydb/public/lib/idx_test/ut/ydb-public-lib-idx_test-ut |68.3%| [LD] {RESULT} $(B)/ydb/library/yql/providers/s3/credentials/ut/ydb-library-yql-providers-s3-credentials-ut |68.3%| [LD] {RESULT} $(B)/ydb/tests/functional/kqp/kqp_indexes/ydb-tests-functional-kqp-kqp_indexes |68.2%| [LD] {RESULT} $(B)/ydb/core/scheme/ut/ydb-core-scheme-ut |68.1%| [LD] {RESULT} $(B)/ydb/library/yql/public/ydb_issue/ut/ydb-library-yql-public-ydb_issue-ut |68.1%| [LD] {RESULT} $(B)/ydb/library/grpc/server/ut/ydb-library-grpc-server-ut |68.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/operations/batch_builder/merger.cpp |68.1%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/libchanges-compaction-sub_columns.a |68.1%| [LD] {RESULT} $(B)/ydb/library/yql/dq/actors/compute/ut/ydb-library-yql-dq-actors-compute-ut |68.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/plain/libchanges-compaction-plain.a |68.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/libchanges-compaction-sub_columns.a |67.8%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/yql/public/ydb_issue/ut/ydb-library-yql-public-ydb_issue-ut |67.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/node_broker__migrate_state.cpp |67.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/max/meta.cpp |67.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/dq/state/ut/ydb-library-yql-dq-state-ut |67.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/operations/batch_builder/merger.cpp |67.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/memory_controller/memory_controller.cpp |67.7%| [TS] {RESULT} ydb/core/erasure/ut_perf/unittest |67.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/plain/libchanges-compaction-plain.a |67.6%| [LD] {RESULT} $(B)/ydb/library/yql/dq/actors/spilling/ut/ydb-library-yql-dq-actors-spilling-ut |67.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/log_backend/log_backend.cpp |67.6%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/url_base/liburl_udf.so |67.6%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/plain/libchanges-compaction-plain.a |67.6%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/tests/unit/client/endpoints/ydb-public-sdk-cpp-tests-unit-client-endpoints |67.6%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/unicode_base/libunicode_udf.so |67.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/base/libcore-blobstorage-base.a |67.6%| [LD] {RESULT} $(B)/ydb/library/yql/providers/s3/object_listers/ut/ydb-library-yql-providers-s3-object_listers-ut |67.6%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/tests/unit/client/driver/ydb-public-sdk-cpp-tests-unit-client-driver |67.6%| [LD] {RESULT} $(B)/ydb/library/security/ut/ydb-library-security-ut |67.6%| [TS] {RESULT} ydb/core/kqp/ut/federated_query/common/clang_format |67.6%| [LD] {RESULT} $(B)/ydb/library/yql/dq/state/ut/ydb-library-yql-dq-state-ut |67.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/node_broker__migrate_state.cpp |67.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/max/meta.cpp |67.4%| COMPACTING CACHE 13.0GiB |67.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/metrics/ut/ydb-core-fq-libs-metrics-ut |67.4%| [AR] {RESULT} $(B)/ydb/core/blobstorage/base/libcore-blobstorage-base.a |67.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/base/libcore-blobstorage-base.a |67.5%| [TS] {RESULT} ydb/core/fq/libs/signer/ut/unittest |67.5%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/tests/integration/basic_example/public-sdk-cpp-tests-integration-basic_example |67.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/run/config_parser.cpp |67.5%| [TS] {RESULT} ydb/services/persqueue_cluster_discovery/cluster_ordering/ut/unittest |67.5%| [LD] {RESULT} $(B)/ydb/library/yql/providers/s3/range_helpers/ut/ydb-library-yql-providers-s3-range_helpers-ut |67.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/kqp/kqp_query_svc/ydb-tests-functional-kqp-kqp_query_svc |67.5%| [LD] {RESULT} $(B)/ydb/core/fq/libs/metrics/ut/ydb-core-fq-libs-metrics-ut |67.5%| [LD] {RESULT} $(B)/ydb/tests/functional/kqp/kqp_query_svc/ydb-tests-functional-kqp-kqp_query_svc |67.5%| [TS] {RESULT} ydb/core/fq/libs/hmac/ut/unittest |67.5%| [TS] {RESULT} ydb/core/debug_tools/ut/unittest |67.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__add_config_subscription.cpp |67.5%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tests/sql/solomon/ydb-library-yql-tests-sql-solomon |67.5%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/json2/libjson2_udf.so |67.5%| [TS] {RESULT} ydb/core/resource_pools/ut/unittest |67.5%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/supp/ydb_supp |67.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__update_last_provided_config.cpp |67.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/replication/ydb-tests-functional-replication |67.5%| [LD] {RESULT} $(B)/ydb/tests/functional/replication/ydb-tests-functional-replication |67.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/yt/actors/ut/ydb-library-yql-providers-yt-actors-ut |67.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/providers/yt/actors/ut/ydb-library-yql-providers-yt-actors-ut |67.5%| [LD] {RESULT} $(B)/ydb/library/yql/providers/yt/actors/ut/ydb-library-yql-providers-yt-actors-ut |67.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__add_config_subscription.cpp |67.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/run/config_parser.cpp |67.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__update_last_provided_config.cpp |67.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__drop_yaml_config.cpp |67.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/dq/scheduler/ut/ydb-library-yql-providers-dq-scheduler-ut |67.7%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/protobuf/libprotobuf_udf.so |67.7%| [LD] {RESULT} $(B)/ydb/library/yql/providers/dq/scheduler/ut/ydb-library-yql-providers-dq-scheduler-ut |67.7%| [LD] {BAZEL_UPLOAD} $(B)/ydb/public/lib/json_value/ut/ydb-public-lib-json_value-ut |67.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/public/lib/ydb_cli/commands/libclicommands.a |67.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/export_iface.cpp |67.8%| [AR] {RESULT} $(B)/ydb/public/lib/ydb_cli/commands/libclicommands.a |67.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__drop_yaml_config.cpp |67.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/export_iface.cpp |67.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__log_cleanup.cpp |67.9%| [TS] {RESULT} ydb/core/blobstorage/crypto/ut/unittest |68.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__log_cleanup.cpp |68.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__remove_config_subscription.cpp |68.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__remove_config_subscriptions.cpp |68.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/tests/integration/server_restart/public-sdk-cpp-tests-integration-server_restart |68.3%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/tests/integration/server_restart/public-sdk-cpp-tests-integration-server_restart |68.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__remove_config_subscription.cpp |68.3%| [LD] {BAZEL_UPLOAD} $(B)/ydb/public/sdk/cpp/tests/unit/client/result/ydb-public-sdk-cpp-tests-unit-client-result |68.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/kqp/kqp_query_session/ydb-tests-functional-kqp-kqp_query_session |68.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__configure.cpp |68.4%| [LD] {RESULT} $(B)/ydb/tests/functional/kqp/kqp_query_session/ydb-tests-functional-kqp-kqp_query_session |68.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__remove_config_subscriptions.cpp |68.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__configure.cpp |68.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/backup/s3_path_style/ydb-tests-functional-backup-s3_path_style |68.6%| [LD] {RESULT} $(B)/ydb/tests/functional/backup/s3_path_style/ydb-tests-functional-backup-s3_path_style |68.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__replace_config_subscriptions.cpp |68.7%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/streaming_optimize/ydb-tests-fq-streaming_optimize |68.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__replace_config_subscriptions.cpp |68.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__get_yaml_config.cpp |68.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yaml_config/validator/ut/validator/ydb-library-yaml_config-validator-ut-validator |68.8%| [LD] {RESULT} $(B)/ydb/library/yaml_config/validator/ut/validator/ydb-library-yaml_config-validator-ut-validator |68.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__get_yaml_config.cpp |68.8%| [LD] {BAZEL_UPLOAD} $(B)/ydb/public/sdk/cpp/tests/unit/client/params/ydb-public-sdk-cpp-tests-unit-client-params |68.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__get_yaml_metadata.cpp |68.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/tests/unit/client/oauth2_token_exchange/tests-unit-client-oauth2_token_exchange |68.8%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/tests/unit/client/oauth2_token_exchange/tests-unit-client-oauth2_token_exchange |68.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__get_yaml_metadata.cpp |68.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kesus/tablet/tablet.cpp |68.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/locks/locks_db.cpp |68.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/lib/ydb_cli/common/ut/ydb-public-lib-ydb_cli-common-ut |68.9%| [LD] {RESULT} $(B)/ydb/public/lib/ydb_cli/common/ut/ydb-public-lib-ydb_cli-common-ut |69.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/dq/provider/ut/ydb-library-yql-providers-dq-provider-ut |69.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/tablet/tablet.cpp |69.0%| [LD] {RESULT} $(B)/ydb/library/yql/providers/dq/provider/ut/ydb-library-yql-providers-dq-provider-ut |69.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/locks/locks_db.cpp |69.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/workload/tpch/ut/ydb-library-workload-tpch-ut |69.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__get_log_tail.cpp |69.0%| [LD] {RESULT} $(B)/ydb/library/workload/tpch/ut/ydb-library-workload-tpch-ut |69.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__get_log_tail.cpp |69.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_identificators.cpp |69.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_identificators.cpp |69.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_types.cpp |69.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/protos/libydb-core-protos.a |69.1%| [AR] {RESULT} $(B)/ydb/core/protos/libydb-core-protos.a |69.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_types.cpp |69.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/splitter/column_info.cpp |69.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/splitter/column_info.cpp |69.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kesus/tablet/tx_quoter_resource_delete.cpp |69.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/tools/idx_test/idx_test |69.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/apps/ydb/ydb |69.1%| [LD] {RESULT} $(B)/ydb/tests/tools/idx_test/idx_test |69.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/apps/ydb/ydb |69.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/tablet/tx_quoter_resource_delete.cpp |69.1%| [LD] {RESULT} $(B)/ydb/apps/ydb/ydb |69.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/generic/connector/tests/join/yql-providers-generic-connector-tests-join |69.1%| [LD] {RESULT} $(B)/ydb/library/yql/providers/generic/connector/tests/join/yql-providers-generic-connector-tests-join |69.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/clickhouse/tests-datasource-clickhouse |69.1%| [LD] {RESULT} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/clickhouse/tests-datasource-clickhouse |69.1%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/workload/benchmark_base/ut/ydb-library-workload-benchmark_base-ut |69.1%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/scheme/ut/ydb-core-scheme-ut |69.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/stream_scan_common.cpp |69.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/writer/write_controller.cpp |69.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kesus/tablet/tx_semaphore_delete.cpp |69.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/stream_scan_common.cpp |69.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/chunks/column.cpp |69.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/writer/write_controller.cpp |69.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/tablet/tx_semaphore_delete.cpp |69.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/chunks/column.cpp |69.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/jaeger_tracing/ut/ydb-core-jaeger_tracing-ut |69.1%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/yql/providers/s3/object_listers/ut/ydb-library-yql-providers-s3-object_listers-ut |69.1%| [LD] {RESULT} $(B)/ydb/core/jaeger_tracing/ut/ydb-core-jaeger_tracing-ut |69.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/examples/basic_example/basic_example |69.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/change_record.cpp |69.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kesus/tablet/tx_semaphore_release.cpp |69.1%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/examples/basic_example/basic_example |69.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/dq/actors/ut/ydb-library-yql-providers-dq-actors-ut |69.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/scheme/abstract_scheme.cpp |69.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/examples/topic_reader/transaction/read_from_topic_in_transaction |69.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/providers/dq/actors/ut/ydb-library-yql-providers-dq-actors-ut |69.1%| [LD] {RESULT} $(B)/ydb/library/yql/providers/dq/actors/ut/ydb-library-yql-providers-dq-actors-ut |69.1%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/examples/topic_reader/transaction/read_from_topic_in_transaction |69.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/backup/controller/tx_init_schema.cpp |69.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/change_record.cpp |69.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/abstract_scheme.cpp |69.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/oracle/tests-datasource-oracle |69.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/ydb/connector-tests-datasource-ydb |69.2%| [LD] {RESULT} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/oracle/tests-datasource-oracle |69.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/backup/controller/tx_init_schema.cpp |69.2%| [LD] {RESULT} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/ydb/connector-tests-datasource-ydb |69.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/tablet/tx_semaphore_release.cpp |69.2%| [LD] {default-linux-x86_64, release, asan} $(B)/yql/essentials/tools/sql2yql/sql2yql |69.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/yql/essentials/tools/sql2yql/sql2yql |69.2%| [LD] {RESULT} $(B)/yql/essentials/tools/sql2yql/sql2yql |69.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/viewer/json/ut/ydb-core-viewer-json-ut |69.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/config/ut/ydb-core-config-ut |69.2%| [LD] {RESULT} $(B)/ydb/core/config/ut/ydb-core-config-ut |69.2%| [LD] {RESULT} $(B)/ydb/core/viewer/json/ut/ydb-core-viewer-json-ut |69.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/public/api/protos/libapi-protos.a |69.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/keyvalue/keyvalue_simple_db_flat.cpp |69.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/driver_lib/version/ut/ydb-core-driver_lib-version-ut |69.2%| [LD] {RESULT} $(B)/ydb/core/driver_lib/version/ut/ydb-core-driver_lib-version-ut |69.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/collection.cpp |69.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kesus/tablet/tx_config_get.cpp |69.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/tests/kikimr_tpch/ydb-core-kqp-tests-kikimr_tpch |69.2%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/security/ut/ydb-library-security-ut |69.2%| [LD] {RESULT} $(B)/ydb/core/kqp/tests/kikimr_tpch/ydb-core-kqp-tests-kikimr_tpch |69.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kesus/tablet/tx_semaphore_describe.cpp |69.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/keyvalue/keyvalue_simple_db_flat.cpp |69.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/abstract/context.cpp |69.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/tablet/tx_config_get.cpp |69.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/collection.cpp |69.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/scheme/column_features.cpp |69.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/backup/controller/tx_init.cpp |69.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kesus/tablet/tx_dummy.cpp |69.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/abstract/abstract.cpp |69.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/abstract/context.cpp |69.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/tablet/tx_semaphore_describe.cpp |69.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/column_features.cpp |69.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/backup/controller/tx_init.cpp |69.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kesus/tablet/tx_session_attach.cpp |69.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/abstract/abstract.cpp |69.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/tablet/tx_dummy.cpp |69.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_kqp_lookup_table.cpp |69.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/base/generated/codegen/ydb-core-base-generated-codegen |69.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/scheme/filtered_scheme.cpp |69.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kesus/tablet/tx_quoter_resource_describe.cpp |69.3%| [LD] {RESULT} $(B)/ydb/core/base/generated/codegen/ydb-core-base-generated-codegen |69.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/transactions/locks/read_start.cpp |69.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/tablet/tx_session_attach.cpp |69.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_kqp_lookup_table.cpp |69.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/filtered_scheme.cpp |69.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/tablet/tx_quoter_resource_describe.cpp |69.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/generated/codegen/codegen |69.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/examples/pagination/pagination |69.3%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/generated/codegen/codegen |69.3%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/examples/pagination/pagination |69.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/transactions/locks/read_start.cpp |69.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_user_table.cpp |69.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_path_element.cpp |69.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/backup/controller/tablet.cpp |69.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kesus/tablet/tx_session_destroy.cpp |69.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_kqp_effects.cpp |69.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/scheme/objects_cache.cpp |69.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/ms_sql_server/datasource-ms_sql_server |69.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_path_element.cpp |69.4%| [LD] {RESULT} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/ms_sql_server/datasource-ms_sql_server |69.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/apps/dstool/ydb-dstool |69.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kesus/tablet/tablet_html.cpp |69.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/objects_cache.cpp |69.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/mvp/oidc_proxy/bin/mvp_oidc_proxy |69.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/apps/dstool/ydb-dstool |69.4%| [LD] {RESULT} $(B)/ydb/apps/dstool/ydb-dstool |69.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/tablet/tx_session_destroy.cpp |69.4%| [LD] {RESULT} $(B)/ydb/mvp/oidc_proxy/bin/mvp_oidc_proxy |69.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_kqp_effects.cpp |69.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/mvp/oidc_proxy/bin/mvp_oidc_proxy |69.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_user_table.cpp |69.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/scheme/snapshot_scheme.cpp |69.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/chunks/data.cpp |69.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/portions/meta.cpp |69.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/snapshot_scheme.cpp |69.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tools/cfg/bin/ydb_configure |69.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/client/metadata/ut/ydb-core-client-metadata-ut |69.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tools/cfg/bin/ydb_configure |69.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/client/metadata/ut/ydb-core-client-metadata-ut |69.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/tablet/tablet_html.cpp |69.5%| [LD] {RESULT} $(B)/ydb/core/client/metadata/ut/ydb-core-client-metadata-ut |69.5%| [LD] {RESULT} $(B)/ydb/tools/cfg/bin/ydb_configure |69.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/chunks/data.cpp |69.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/constructor/resolver.cpp |69.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/examples/topic_writer/transaction/topic_writer_transaction |69.5%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/examples/topic_writer/transaction/topic_writer_transaction |69.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/portions/meta.cpp |69.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yaml_config/console_dumper.cpp |69.6%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/yql/providers/generic/pushdown/ut/ydb-library-yql-providers-generic-pushdown-ut |69.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/logic.cpp |69.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kesus/tablet/quoter_runtime.cpp |69.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/constructor/resolver.cpp |69.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/public/lib/ydb_cli/commands/libclicommands.a |69.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/mvp/meta/bin/mvp_meta |69.6%| [LD] {RESULT} $(B)/ydb/mvp/meta/bin/mvp_meta |69.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/mvp/meta/bin/mvp_meta |69.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/libchanges-compaction-sub_columns.global.a |69.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kesus/tablet/tx_quoter_resource_update.cpp |69.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/transactions/locks/read_finished.cpp |69.6%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/libchanges-compaction-sub_columns.global.a |69.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/backup/controller/tablet.cpp |69.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kesus/tablet/tx_semaphore_update.cpp |69.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/columns_set.cpp |69.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/tablet/quoter_runtime.cpp |69.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/tablet/tx_quoter_resource_update.cpp |69.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/sequenceshard/tx_mark_schemeshard_pipe.cpp |69.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yaml_config/console_dumper.cpp |69.7%| [ld] {default-linux-x86_64, release, asan} $(B)/tools/flake8_linter/flake8_linter |69.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/columns_set.cpp |69.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/tablet/tx_semaphore_update.cpp |69.8%| [UN] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/postgresql/psql/psql |69.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/examples/bulk_upsert_simple/bulk_upsert_simple |69.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/sequenceshard/tx_mark_schemeshard_pipe.cpp |69.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/executer_actor/kqp_planner_strategy.cpp |69.8%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/examples/bulk_upsert_simple/bulk_upsert_simple |69.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/libchanges-compaction-sub_columns.global.a |69.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/executer_actor/kqp_planner_strategy.cpp |69.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/abstract/libstorage-actualizer-abstract.a |69.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_import_scheme_query_executor.cpp |69.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/abstract/libstorage-actualizer-abstract.a |69.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/sequenceshard/tx_redirect_sequence.cpp |69.8%| [ld] {default-linux-x86_64, release, asan} $(B)/tools/black_linter/black_linter |69.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/abstract/libstorage-actualizer-abstract.a |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/sequenceshard/tx_redirect_sequence.cpp |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_import_scheme_query_executor.cpp |69.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/backup/controller/libcore-backup-controller.a |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/transactions/locks/read_finished.cpp |69.9%| [AR] {RESULT} $(B)/ydb/core/backup/controller/libcore-backup-controller.a |69.9%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/yql/providers/dq/runtime/ut/ydb-library-yql-providers-dq-runtime-ut |70.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/postgresql/tests-datasource-postgresql |70.0%| [LD] {RESULT} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/postgresql/tests-datasource-postgresql |70.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/backup/controller/libcore-backup-controller.a |70.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/sequenceshard/tx_get_sequence.cpp |70.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/sequenceshard/tx_restore_sequence.cpp |70.1%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/yaml_config/validator/ut/validator/ydb-library-yaml_config-validator-ut-validator |70.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/logic.cpp |70.1%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/protobuf_printer/ut/ydb-library-protobuf_printer-ut |70.1%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/schemeshard_types.h_serialized.cpp |70.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/tests/tpch/tpch |70.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_kqp_read_table.cpp |70.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/sequenceshard/tx_get_sequence.cpp |70.1%| [LD] {RESULT} $(B)/ydb/core/kqp/tests/tpch/tpch |70.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/tests/tpch/tpch |70.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/sequenceshard/tx_restore_sequence.cpp |70.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/storage/chunks/libengines-storage-chunks.a |70.2%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/chunks/libengines-storage-chunks.a |70.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/sequenceshard/tx_update_sequence.cpp |70.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/config/init/init.cpp |70.2%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/schemeshard_types.h_serialized.cpp |70.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/sequenceshard/tx_freeze_sequence.cpp |70.2%| [PR] {default-linux-x86_64, release, asan} $(B)/ydb/public/api/grpc/140c88ca3e90f1923d2b7a0c94_raw.auxcpp |70.2%| [PK] {default-linux-x86_64, release, asan} $(B)/yql/essentials/tests/common/test_framework/udfs_deps/{common-test_framework-udfs_deps.final.pkg.fake ... yql/essentials/udfs/common/hyperscan/libhyperscan_udf.so} |70.2%| [BN] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stability/tool/cfg |70.2%| [BN] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stability/tool/cfg |70.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/sequenceshard/tx_update_sequence.cpp |70.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_kqp_read_table.cpp |70.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/chunks/libengines-storage-chunks.a |70.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tools/tsserver/tsserver |70.2%| [LD] {RESULT} $(B)/ydb/tools/tsserver/tsserver |70.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/sequenceshard/tx_freeze_sequence.cpp |70.2%| [PR] {BAZEL_UPLOAD} $(B)/ydb/public/api/grpc/140c88ca3e90f1923d2b7a0c94_raw.auxcpp |70.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/transactions/locks/libcolumnshard-transactions-locks.global.a |70.3%| [PY] {default-linux-x86_64, release, asan} $(B)/ydb/public/api/grpc/ydb_config_v1__intpy3___pb2.py.gcum.yapyc3 |70.3%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/transactions/locks/libcolumnshard-transactions-locks.global.a |70.3%| [PY] {BAZEL_UPLOAD} $(B)/ydb/public/api/grpc/ydb_config_v1__intpy3___pb2.py.gcum.yapyc3 |70.3%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/tablet_flat/flat_executor_compaction_logic.h_serialized.cpp |70.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/sequenceshard/sequenceshard.cpp |70.3%| [BN] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stability/tool/ydb_cli |70.3%| [BN] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stability/tool/ydb_cli |70.3%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/portions/portion_info.h_serialized.cpp |70.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/examples/secondary_index_builtin/secondary_index_builtin |70.3%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/tablet_flat/flat_executor_compaction_logic.h_serialized.cpp |70.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/skip_index/meta.cpp |70.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/scan_common.cpp |70.3%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/examples/secondary_index_builtin/secondary_index_builtin |70.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/sequenceshard/sequenceshard.cpp |70.3%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/public/api/grpc/140c88ca3e90f1923d2b7a0c94_raw.auxcpp |70.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/transactions/locks/libcolumnshard-transactions-locks.global.a |70.3%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/portions/portion_info.h_serialized.cpp |70.4%| [CC] {tool} $(B)/ydb/core/protos/blobstorage_distributed_config.grpc.pb.cc |70.4%| [CC] {BAZEL_UPLOAD} $(B)/ydb/public/api/grpc/140c88ca3e90f1923d2b7a0c94_raw.auxcpp |70.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/skip_index/meta.cpp |70.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/scan_common.cpp |70.4%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/protos/blobstorage_distributed_config.grpc.pb.cc |70.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/sequenceshard/tx_allocate_sequence.cpp |70.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kesus/tablet/tablet_db.cpp >> YdbValue::BuildValueList >> YdbValue::BuildValueIncomplete [GOOD] >> YdbValue::BuildValueEmptyListUnknown [GOOD] |70.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/examples/topic_reader/simple/simple_persqueue_reader |70.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/config/init/init.cpp >> YdbValue::BuildValueDictEmpty1 [GOOD] >> YdbValue::BuildTypeReuse [GOOD] >> YdbValue::BuildValueDict1 [GOOD] >> YdbValue::BuildValueDict2 [GOOD] >> YdbValue::BuildValueBadCall [GOOD] >> YdbValue::BuildValueList [GOOD] >> YdbValue::BuildValueListEmpty [GOOD] >> YdbValue::BuildValueListEmpty2 [GOOD] |70.4%| [PR] {default-linux-x86_64, release, asan} $(B)/ydb/public/api/grpc/a8f74ccfefd66bc6dc846adade_raw.auxcpp |70.4%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/columns_set.h_serialized.cpp |70.4%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/examples/topic_reader/simple/simple_persqueue_reader >> YdbValue::BuildValueOptional [GOOD] >> YdbValue::BuildValueOptionalMismatch1 [GOOD] |70.4%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/common/description.h_serialized.cpp >> YdbValue::BuildValueStruct [GOOD] >> YdbValue::BuildValueNestedOptional [GOOD] >> YdbValue::BuildValueListItemMismatch1 [GOOD] >> YdbValue::BuildValueOptionalMismatch2 [GOOD] >> YdbValue::BuildValueListEmpty3 [GOOD] |70.5%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/grpc/server/ut/ydb-library-grpc-server-ut >> YdbValue::BuildValueListItemMismatch3 [GOOD] >> YdbValue::BuildValueListItemMismatch2 [GOOD] >> YdbValue::BuildValueListItemMismatch4 [GOOD] |70.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/sequenceshard/tx_allocate_sequence.cpp |70.5%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/value/gtest >> YdbValue::BuildValueListEmpty2 [GOOD] |70.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/tablet/tablet_db.cpp |70.5%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/value/gtest >> YdbValue::BuildValueBadCall [GOOD] |70.5%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/columns_set.h_serialized.cpp |70.5%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/value/gtest >> YdbValue::BuildValueOptionalMismatch2 [GOOD] |70.5%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/common/description.h_serialized.cpp >> JsonValueTest::PrimitiveValueUtf8String1 [GOOD] >> JsonValueTest::PrimitiveValueUint8 [GOOD] >> JsonValueTest::PrimitiveValueUtf8String2 [GOOD] |70.5%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/normalizer/abstract/abstract.h_serialized.cpp |70.5%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/value/gtest >> YdbValue::BuildValueListItemMismatch4 [GOOD] >> JsonValueTest::TaggedValue [GOOD] |70.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/examples/vector_index/vector_index |70.5%| [PR] {BAZEL_UPLOAD} $(B)/ydb/public/api/grpc/a8f74ccfefd66bc6dc846adade_raw.auxcpp |70.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/sequenceshard/tx_drop_sequence.cpp |70.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/sequenceshard/tx_init_schema.cpp |70.5%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/examples/vector_index/vector_index |70.6%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/normalizer/abstract/abstract.h_serialized.cpp |70.6%| [TM] {asan, default-linux-x86_64, release} ydb/public/lib/json_value/ut/unittest >> JsonValueTest::TaggedValue [GOOD] |70.6%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/public/ydb_issue/ut/unittest |70.6%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/config/init/init.h_serialized.cpp |70.7%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/public/api/grpc/a8f74ccfefd66bc6dc846adade_raw.auxcpp |70.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/sequenceshard/tx_init_schema.cpp |70.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/sequenceshard/tx_drop_sequence.cpp |70.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/runtime/kqp_scan_data.cpp >> JsonValueTest::PrimitiveValueUint16 [GOOD] >> JsonValueTest::PrimitiveValueUint32 [GOOD] >> JsonValueTest::PrimitiveValueUint64 [GOOD] >> JsonValueTest::PrimitiveValueTimestamp64 [GOOD] |70.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/apps/ydbd/export.cpp >> DqOutputChannelWithStorageTests::Spill [GOOD] >> DqOutputWideChannelTests::Overflow [GOOD] >> DqOutputWideChannelTests::BigRow >> DqUnboxedValueToNativeArrowConversion::VariantOverTupleWithOptionals [GOOD] >> TestArrowBlockSplitter::CheckLargeRows [GOOD] |70.7%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/params/gtest |70.7%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/params/gtest >> DqUnboxedValueToNativeArrowConversion::VariantOverStruct [GOOD] >> IssueProtoTest::KikimrYqlSameLayout >> TestArrowBlockSplitter::CheckLargeScalarRows [GOOD] >> DqOutputWideChannelTests::BigRow [GOOD] |70.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/tier/gc_info.cpp |70.7%| [TM] {asan, default-linux-x86_64, release} ydb/public/lib/json_value/ut/unittest >> JsonValueTest::PrimitiveValueTimestamp64 [GOOD] |70.7%| [CC] {BAZEL_UPLOAD} $(B)/ydb/public/api/grpc/a8f74ccfefd66bc6dc846adade_raw.auxcpp >> DqOutputWideChannelTests::ChunkSizeLimit [GOOD] >> IssueProtoTest::KikimrYqlSameLayout [GOOD] >> JsonValueTest::CompositeValueDict [GOOD] |70.7%| [LD] {BAZEL_UPLOAD} $(B)/ydb/public/lib/ydb_cli/common/yql_parser/ut/ydb-public-lib-ydb_cli-common-yql_parser-ut >> JsonValueTest::CompositeValueEmptyList [GOOD] >> JsonValueTest::BinaryStringUnicode [GOOD] >> JsonValueTest::BinaryStringBase64 [GOOD] >> JsonValueTest::BinaryStringAsciiFollowedByNonAscii [GOOD] >> ConvertUnboxedValueToArrowAndBack::DictUtf8ToInterval |70.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/apps/ydbd/export.cpp |70.7%| [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/runtime/ut/unittest >> TestArrowBlockSplitter::CheckLargeScalarRows [GOOD] |70.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/tier/gc_info.cpp |70.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yaml_config/tools/dump/main.cpp |70.7%| [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/runtime/ut/unittest >> DqOutputWideChannelTests::ChunkSizeLimit [GOOD] |70.8%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/public/ydb_issue/ut/unittest >> IssueProtoTest::KikimrYqlSameLayout [GOOD] |70.8%| [TM] {asan, default-linux-x86_64, release} ydb/public/lib/json_value/ut/unittest >> JsonValueTest::BinaryStringAsciiFollowedByNonAscii [GOOD] >> DqOutputWideChannelTests::PopAll >> DqOutputWideChannelTests::PartialRead [GOOD] |70.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yaml_config/tools/dump/main.cpp >> DqOutputWideChannelTests::SingleRead [GOOD] >> DqOutputWideChannelTests::PopAll [GOOD] >> JsonValueTest::EmptyBinaryStringUnicode [GOOD] |70.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/mysql/connector-tests-datasource-mysql |70.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_kqp_upsert_rows.cpp |70.8%| [LD] {RESULT} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/mysql/connector-tests-datasource-mysql >> DqOutputWideChannelWithStorageTests::Overflow [GOOD] >> JsonValueTest::EmptyList [GOOD] >> JsonValueTest::InvalidJsonToBinaryString1 [GOOD] >> DqOutputWideChannelWithStorageTests::Spill [GOOD] |70.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/public/api/grpc/libpy3api-grpc.global.a >> DqUnboxedValueDoNotFitToArrow::DictOptionalToTuple >> JsonValueTest::InvalidJsonToBinaryString2 [GOOD] >> ConvertUnboxedValueToArrowAndBack::VariantOverTupleWithOptionals [GOOD] >> ToStreamTest::ManyIssuesTest [GOOD] >> DqOutputChannelTests::Overflow [GOOD] >> JsonValueTest::PrimitiveValueTimestamp [GOOD] >> DqOutputChannelTests::BigRow |70.8%| [AR] {RESULT} $(B)/ydb/public/api/grpc/libpy3api-grpc.global.a |70.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/flat_store_hotdog.cpp >> JsonValueTest::PrimitiveValueInt64 [GOOD] >> DqOutputChannelTests::BigRow [GOOD] >> JsonValueTest::PrimitiveValueInt8 [GOOD] >> DqOutputChannelTests::ChunkSizeLimit [GOOD] |70.8%| [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/runtime/ut/unittest >> DqOutputWideChannelWithStorageTests::Overflow [GOOD] |70.8%| [TM] {asan, default-linux-x86_64, release} ydb/public/lib/json_value/ut/unittest >> JsonValueTest::InvalidJsonToBinaryString2 [GOOD] |70.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yaml_config/tools/dump_ds_init/main.cpp >> DqUnboxedValueToNativeArrowConversion::Struct [GOOD] >> JsonValueTest::PrimitiveValueSimpleString [GOOD] >> DqUnboxedValueToNativeArrowConversion::Tuple >> DqUnboxedValueToNativeArrowConversion::Tuple [GOOD] >> DqUnboxedValueToNativeArrowConversion::DictUtf8ToInterval >> DqUnboxedValueDoNotFitToArrow::DictOptionalToTuple [GOOD] >> DqUnboxedValueDoNotFitToArrow::OptionalOfOptional [GOOD] |70.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/tools/dq/dq_cli/dq_cli |70.8%| [LD] {RESULT} $(B)/ydb/library/yql/tools/dq/dq_cli/dq_cli |70.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/node_broker__update_epoch.cpp |70.8%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/public/ydb_issue/ut/unittest >> DqUnboxedValueDoNotFitToArrow::LargeVariant >> SamplingControlTests::Simple [GOOD] >> SamplingControlTests::EdgeCaseUpper [GOOD] >> DqUnboxedValueDoNotFitToArrow::LargeVariant [GOOD] >> ConvertUnboxedValueToArrowAndBack::Struct [GOOD] |70.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_kqp_upsert_rows.cpp |70.8%| [TM] {asan, default-linux-x86_64, release} ydb/public/lib/json_value/ut/unittest >> JsonValueTest::PrimitiveValueSimpleString [GOOD] >> ConvertUnboxedValueToArrowAndBack::Tuple >> ConvertUnboxedValueToArrowAndBack::Tuple [GOOD] >> ConvertUnboxedValueToArrowAndBack::VariantOverStruct [GOOD] |70.8%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/public/ydb_issue/ut/unittest >> ToStreamTest::ManyIssuesTest [GOOD] |70.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/local/storage.cpp |70.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yaml_config/tools/dump_ds_init/main.cpp |70.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/public/api/grpc/libpy3api-grpc.global.a |70.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/flat_store_hotdog.cpp >> ConvertUnboxedValueToArrowAndBack::OptionalOfOptional [GOOD] >> ThrottlerControlTests::Overflow_2 [GOOD] |70.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/export_s3_buffer_ut.cpp |70.8%| [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/runtime/ut/unittest >> DqOutputChannelTests::ChunkSizeLimit [GOOD] |70.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/node_broker__update_epoch.cpp >> JsonValueTest::PrimitiveValueDate32 [GOOD] >> JsonValueTest::PrimitiveValueDatetime [GOOD] >> JsonValueTest::PrimitiveValueDate [GOOD] >> JsonValueTest::PrimitiveValueDatetime64 [GOOD] |70.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/jaeger_tracing/ut/unittest >> SamplingControlTests::Simple [GOOD] |70.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/jaeger_tracing/ut/unittest >> SamplingControlTests::EdgeCaseUpper [GOOD] |70.8%| [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/runtime/ut/unittest >> ConvertUnboxedValueToArrowAndBack::OptionalOfOptional [GOOD] |70.8%| [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/runtime/ut/unittest >> DqUnboxedValueDoNotFitToArrow::LargeVariant [GOOD] |70.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/runtime/kqp_scan_data.cpp |70.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_kqp_delete_rows.cpp |70.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/sequenceshard/tx_create_sequence.cpp |70.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/local/storage.cpp |70.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/flat_exec_seat.cpp >> SamplingControlTests::EdgeCaseLower [GOOD] >> DqOutputChannelTests::SingleRead [GOOD] >> ThrottlerControlTests::Overflow_1 [GOOD] >> DqOutputChannelTests::PartialRead [GOOD] |70.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/apps/ydb/ut/ydb-apps-ydb-ut |70.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/export_s3_buffer_ut.cpp |70.8%| [TM] {asan, default-linux-x86_64, release} ydb/public/lib/json_value/ut/unittest >> JsonValueTest::PrimitiveValueDatetime64 [GOOD] |70.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/jaeger_tracing/ut/unittest >> ThrottlerControlTests::Overflow_2 [GOOD] >> ThrottlerControlTests::MultiThreaded2Threads200Ticks30Init7Step >> DqOutputChannelTests::PopAll [GOOD] >> JsonValueTest::CompositeValueStruct [GOOD] >> ConvertUnboxedValueToArrowAndBack::DictUtf8ToInterval [GOOD] >> TestArrowBlockSplitter::SplitLargeBlock [GOOD] |70.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/node_broker__init_scheme.cpp |70.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/flat_executor_snapshot.cpp |70.8%| [LD] {RESULT} $(B)/ydb/apps/ydb/ut/ydb-apps-ydb-ut >> ConvertUnboxedValueToArrowAndBack::ListOfJsons [GOOD] >> ThrottlerControlTests::MultiThreaded5Threads150Ticks500Init15Step >> DqOutputChannelWithStorageTests::Overflow [GOOD] >> JsonValueTest::CompositeValueIntList [GOOD] >> TestArrowBlockSplitter::SplitWithScalars [GOOD] >> ConvertUnboxedValueToArrowAndBack::DictOptionalToTuple >> ThrottlerControlTests::MultiThreaded10Threads100Ticks1000Init22Step |70.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/sequenceshard/tx_init.cpp |70.8%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/fq/libs/metrics/ut/ydb-core-fq-libs-metrics-ut >> TestArrowBlockSplitter::PassSmallBlock [GOOD] >> JsonValueTest::EmptyBinaryStringBase64 [GOOD] >> ThrottlerControlTests::MultiThreaded5Threads150Ticks500Init15Step [GOOD] |70.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/tools/combiner_perf/bin/combiner_perf |70.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/jaeger_tracing/ut/unittest >> SamplingControlTests::EdgeCaseLower [GOOD] |70.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_kqp_delete_rows.cpp |70.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/flat_executor_gclogic.cpp |70.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/flat_exec_seat.cpp >> ThrottlerControlTests::Simple [GOOD] >> ThrottlerControlTests::MultiThreaded2Threads200Ticks30Init7Step [GOOD] >> JsonValueTest::CompositeValueTuple [GOOD] >> JsonValueTest::PrimitiveValueFloat [GOOD] >> CppGrpcClientSimpleTest::WithoutDiscoveryClientLevel [GOOD] >> ThrottlerControlTests::MultiThreaded10Threads100Ticks1000Init22Step [GOOD] |70.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/sequenceshard/tx_create_sequence.cpp |70.9%| [LD] {RESULT} $(B)/ydb/core/kqp/tools/combiner_perf/bin/combiner_perf |70.9%| [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/runtime/ut/unittest >> DqOutputChannelWithStorageTests::Overflow [GOOD] |70.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/tools/combiner_perf/bin/combiner_perf >> ThrottlerControlTests::LongIdle [GOOD] >> JsonValueTest::PrimitiveValueDouble [GOOD] >> CppGrpcClientSimpleTest::ConnectWrongPort [GOOD] |70.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/runtime/kqp_compute_scheduler.cpp |70.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/flat_exec_commit.cpp |70.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/jaeger_tracing/ut/unittest >> ThrottlerControlTests::Overflow_1 [GOOD] >> ConvertUnboxedValueToArrowAndBack::DictOptionalToTuple [GOOD] >> JsonValueTest::PrimitiveValueInt16 [GOOD] >> CppGrpcClientSimpleTest::WithoutDiscoveryDriverLevel [GOOD] |70.9%| [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/runtime/ut/unittest >> TestArrowBlockSplitter::PassSmallBlock [GOOD] |70.9%| [TM] {asan, default-linux-x86_64, release} ydb/public/lib/json_value/ut/unittest >> JsonValueTest::CompositeValueTuple [GOOD] >> ConvertUnboxedValueToArrowAndBack::LargeVariant |70.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/flat_executor_snapshot.cpp |70.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/splitter/blob_info.cpp >> JsonValueTest::PrimitiveValueInt32 [GOOD] >> YdbValue::BuildType [GOOD] >> ConvertUnboxedValueToArrowAndBack::LargeVariant [GOOD] |70.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/jaeger_tracing/ut/unittest >> ThrottlerControlTests::Simple [GOOD] |70.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/jaeger_tracing/ut/unittest >> ThrottlerControlTests::MultiThreaded5Threads150Ticks500Init15Step [GOOD] >> YdbValue::ParseType1 [GOOD] >> ToStreamTest::OneMessageTest [GOOD] |70.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/node_broker__init_scheme.cpp |70.9%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/driver/unittest >> CppGrpcClientSimpleTest::ConnectWrongPort [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/driver/unittest >> CppGrpcClientSimpleTest::WithoutDiscoveryClientLevel [GOOD] Test command err: CreateSession: operation_params { } |70.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/jaeger_tracing/ut/unittest >> ThrottlerControlTests::MultiThreaded2Threads200Ticks30Init7Step [GOOD] >> CppGrpcClientSimpleTest::ConnectWrongPortRetry >> YdbValue::BuildDyNumberValue [GOOD] >> YdbValue::BuildTaggedValue [GOOD] >> YdbValue::BuildTypeIncomplete [GOOD] >> YdbValue::BuildTaggedType [GOOD] >> YdbValue::ParseType2 [GOOD] >> YdbValue::ParseTaggedType [GOOD] >> YdbValue::IncorrectUuid [GOOD] |70.9%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/driver/unittest |70.9%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/config/init/init.h_serialized.cpp |70.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/jaeger_tracing/ut/unittest >> ThrottlerControlTests::LongIdle [GOOD] |70.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/jaeger_tracing/ut/unittest >> ThrottlerControlTests::MultiThreaded10Threads100Ticks1000Init22Step [GOOD] |70.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/flat_exec_commit.cpp |70.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/splitter/blob_info.cpp >> CppGrpcClientSimpleTest::ConnectWrongPortRetry [GOOD] >> CppGrpcClientSimpleTest::TokenCharacters >> Coordination::SessionCancelByDriver >> CppGrpcClientSimpleTest::UsingIpAddresses |70.9%| [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/runtime/ut/unittest >> ConvertUnboxedValueToArrowAndBack::LargeVariant [GOOD] |70.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kesus/tablet/tx_init_schema.cpp |70.9%| [TM] {asan, default-linux-x86_64, release} ydb/public/lib/json_value/ut/unittest >> JsonValueTest::PrimitiveValueInt32 [GOOD] |70.9%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/value/gtest >> YdbValue::IncorrectUuid [GOOD] |70.9%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/coordination/unittest |70.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/flat_executor_gclogic.cpp >> CppGrpcClientSimpleTest::UsingIpAddresses [GOOD] >> Coordination::SessionCancelByDriver [GOOD] >> CppGrpcClientSimpleTest::TokenCharacters [GOOD] >> IssueProtoTest::BinarySerialization [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/driver/unittest >> CppGrpcClientSimpleTest::WithoutDiscoveryDriverLevel [GOOD] Test command err: CreateSession: operation_params { } |71.0%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/value/gtest >> YdbValue::BuildTaggedType [GOOD] |71.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/sequenceshard/tx_init.cpp |71.0%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/driver/unittest >> CppGrpcClientSimpleTest::ConnectWrongPortRetry [GOOD] |71.0%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/public/ydb_issue/ut/unittest >> ToStreamTest::OneMessageTest [GOOD] >> ValidationTests::HasReservedPaths [GOOD] |71.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/runtime/kqp_compute_scheduler.cpp ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/coordination/unittest >> Coordination::SessionCancelByDriver [GOOD] Test command err: ListEndpoints: database: "/Root/My/DB" Session stream started Session request: session_start { path: "/Some/Path" timeout_millis: 1000 seq_no: 1 protection_key: "T\226\364\031R\371\017\234" } Session state: ATTACHED Session state: DETACHED Close: CLIENT_CANCELLED:
: Error: Client is stopped >> DqUnboxedValueToNativeArrowConversion::DictUtf8ToInterval [GOOD] >> DqUnboxedValueToNativeArrowConversion::ListOfJsons [GOOD] >> ValidationTests::CanCopyTo [GOOD] |71.0%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/driver/unittest |71.0%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/coordination/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/driver/unittest >> CppGrpcClientSimpleTest::UsingIpAddresses [GOOD] Test command err: ListEndpoints: database: "/Root/My/DB" CreateSession: operation_params { } >> Scheme::TSerializedCellMatrix [GOOD] |71.0%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/driver/unittest >> CppGrpcClientSimpleTest::TokenCharacters [GOOD] |71.0%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/public/ydb_issue/ut/unittest >> IssueProtoTest::BinarySerialization [GOOD] |71.0%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/driver/unittest |71.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/config/tools/protobuf_plugin/ut/unittest >> ValidationTests::HasReservedPaths [GOOD] >> Scheme::OwnedCellVecFromSerialized [GOOD] |71.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/tablet/tx_init_schema.cpp |71.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/config/tools/protobuf_plugin/ut/unittest >> ValidationTests::AdvancedCopyTo [GOOD] >> Scheme::YqlTypesMustBeDefined [GOOD] >> SchemeBorders::Full [GOOD] |71.0%| [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/runtime/ut/unittest >> DqUnboxedValueToNativeArrowConversion::ListOfJsons [GOOD] |71.0%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/driver/unittest |71.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/config/tools/protobuf_plugin/ut/unittest |71.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/config/tools/protobuf_plugin/ut/unittest >> ValidationTests::CanCopyTo [GOOD] >> ValidationTests::CanDispatchByTag [GOOD] |71.0%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/coordination/unittest |71.0%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut/unittest >> Scheme::OwnedCellVecFromSerialized [GOOD] >> ToOneLineStringTest::ManyIssuesTest [GOOD] >> Coordination::SessionPingTimeout |71.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/config/tools/protobuf_plugin/ut/unittest |71.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/examples/ttl/ttl |71.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/config/tools/protobuf_plugin/ut/unittest >> ValidationTests::MapType [GOOD] >> Scheme::TSerializedCellVec [GOOD] |71.0%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut/unittest >> SchemeBorders::Full [GOOD] |71.0%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/examples/ttl/ttl |71.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/common/context.cpp >> Scheme::UnsafeAppend [GOOD] >> TypesProto::DecimalNoTypeInfo [GOOD] >> SchemeRanges::CmpBorders [GOOD] >> SchemeBorders::Partial [GOOD] >> SchemeRanges::RangesBorders >> TypesProto::Decimal35 [GOOD] |71.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/config/tools/protobuf_plugin/ut/unittest >> ValidationTests::AdvancedCopyTo [GOOD] |71.0%| [TS] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/library/issue/unittest |71.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kesus/tablet/tx_semaphore_acquire.cpp |71.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/config/tools/protobuf_plugin/ut/unittest >> ValidationTests::CanDispatchByTag [GOOD] >> SchemeRanges::RangesBorders [GOOD] >> TErasureTypeTest::TestAllSpecies1of2 >> TypesProto::Decimal22 [GOOD] |71.0%| [TS] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/library/issue/unittest >> ToOneLineStringTest::ManyIssuesTest [GOOD] |71.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/common/context.cpp >> ErasureBrandNew::Block42_encode >> TErasureTypeTest::TestBlock22LossOfAllPossible2 >> TErasureTypeTest::TestEo [GOOD] >> Scheme::CellVecTryParse [GOOD] |71.0%| [CC] {tool} $(B)/ydb/core/protos/blobstorage_distributed_config.pb.cc |71.0%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/yql/dq/state/ut/ydb-library-yql-dq-state-ut ------- [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut/unittest >> Scheme::UnsafeAppend [GOOD] Test command err: Serialize: 0.001729s Cells constructor: 0.005197s Parse: 0.000770s Copy: 0.000152s Move: 0.000144s |71.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/config/tools/protobuf_plugin/ut/unittest >> ValidationTests::MapType [GOOD] |71.1%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut/unittest >> TypesProto::Decimal35 [GOOD] >> Scheme::CompareOrder [GOOD] >> Scheme::NullCell [GOOD] >> Scheme::NotEmptyCell [GOOD] >> TErasureTypeTest::TestBlock31LossOfAllPossible1 |71.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/examples/topic_reader/eventloop/persqueue_reader_eventloop |71.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/config/tools/protobuf_plugin/ut/unittest |71.1%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut/unittest >> SchemeBorders::Partial [GOOD] |71.1%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/coordination/unittest |71.1%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/coordination/unittest |71.1%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/examples/topic_reader/eventloop/persqueue_reader_eventloop |71.1%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut/unittest >> TypesProto::Decimal22 [GOOD] |71.1%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/coordination/unittest >> Scheme::NonEmptyOwnedCellVec [GOOD] >> TErasureTypeTest::TestStripe22LossOfAllPossible2 >> Scheme::EmptyOwnedCellVec [GOOD] |71.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/tablet/tx_semaphore_acquire.cpp |71.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestEo [GOOD] |71.1%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut/unittest >> Scheme::CompareOrder [GOOD] >> TErasureTypeTest::TestBlockByteOrder [GOOD] >> TErasureTypeTest::TestBlock33LossOfAllPossible3 >> TErasureTypeTest::TestBlock31LossOfAllPossible1 [GOOD] >> TErasureTypeTest::TestBlock23LossOfAllPossible3 |71.1%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut/unittest >> Scheme::NotEmptyCell [GOOD] >> TErasureTypeTest::TestDifferentCasesInDiffSplitingBlock4Plus2 [GOOD] |71.1%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut/unittest >> Scheme::EmptyOwnedCellVec [GOOD] |71.1%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/protos/blobstorage_distributed_config.pb.cc >> YdbValue::ParseValueMaybe [GOOD] >> YdbValue::BuildValueTuplePrimitives [GOOD] >> YdbValue::BuildValueStructMissingMember [GOOD] >> YdbValue::ParseValue1 [GOOD] >> YdbValue::BuildValueTupleTypeMismatch [GOOD] >> YdbValue::BuildValueTuple1 [GOOD] >> YdbValue::BuildValueWithType [GOOD] >> YdbValue::CorrectUuid [GOOD] >> YdbValue::ParseValuePg [GOOD] >> YdbValue::BuildValueTuple2 [GOOD] >> TErasureTypeTest::TestBlock22LossOfAllPossible2 [GOOD] >> YdbValue::BuildValueTupleElementsMismatch1 [GOOD] >> YdbValue::BuildValueTupleElementsMismatch2 [GOOD] >> EscapeNonUtf8::Escape [GOOD] >> YdbValue::ParseValue2 [GOOD] |71.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestBlock31LossOfAllPossible1 [GOOD] |71.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestBlockByteOrder [GOOD] |71.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestDifferentCasesInDiffSplitingBlock4Plus2 [GOOD] |71.1%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/value/gtest >> YdbValue::BuildValueTupleElementsMismatch2 [GOOD] |71.1%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/value/gtest >> YdbValue::ParseValue2 [GOOD] >> TErasureTypeTest::TestBlock43LossOfAllPossible3 >> ErasureBrandNew::Block42_restore >> CppGrpcClientResultSetTest::OptionalDictResultSet [GOOD] >> TErasureTypeTest::TestStripe43LossOfAllPossible3 >> TErasureTypeTest::TestStripe22LossOfAllPossible2 [GOOD] |71.1%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/value/gtest >> YdbValue::CorrectUuid [GOOD] >> TErasureTypeTest::TestBlock42PartialRestore1 >> Coordination::SessionStartTimeout |71.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kesus/tablet/tablet_impl.cpp |71.1%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/coordination/unittest >> TErasureTypeTest::TestBlock42PartialRestore3 >> TErasureTypeTest::TestMirror3LossOfAllPossible3 >> TErasureTypeTest::TestDifferentCasesInDiffSplitingMirror3Of4 [GOOD] |71.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestBlock22LossOfAllPossible2 [GOOD] |71.1%| [TS] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/library/issue/unittest >> EscapeNonUtf8::Escape [GOOD] |71.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/portions/index_chunk.cpp |71.1%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/yql/providers/s3/range_helpers/ut/ydb-library-yql-providers-s3-range_helpers-ut |71.1%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/result/unittest |71.1%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/result/unittest >> CppGrpcClientResultSetTest::OptionalDictResultSet [GOOD] >> Coordination::SessionPingTimeout [GOOD] >> TErasureTypeTest::TestMirror3LossOfAllPossible3 [GOOD] |71.1%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/result/unittest |71.1%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/result/unittest |71.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestStripe22LossOfAllPossible2 [GOOD] >> TErasureTypeTest::TestStripe31LossOfAllPossible1 >> TUtf8Tests::Simple [GOOD] |71.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/portions/index_chunk.cpp >> Coordination::SessionStartTimeout [GOOD] |71.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestDifferentCasesInDiffSplitingMirror3Of4 [GOOD] |71.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/flat_executor_gclogic_ut.cpp >> TErasureTypeTest::TestStripe23LossOfAllPossible3 |71.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/tools/ydb_recipe/ydb_recipe >> TErasureTypeTest::TestStripe31LossOfAllPossible1 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/coordination/unittest >> Coordination::SessionPingTimeout [GOOD] Test command err: ListEndpoints: database: "/Root/My/DB" Session stream started Session request: session_start { path: "/Some/Path" timeout_millis: 1000 seq_no: 1 protection_key: "\253\373\346pw\226?L" } Session state: ATTACHED Session request: ping { opaque: 1 } Session request: ping { opaque: 2 } Session request: ping { opaque: 3 } Session state: DETACHED Session state: EXPIRED Close: TIMEOUT:
: Error: Session ping request timed out |71.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/public/tools/ydb_recipe/ydb_recipe |71.2%| [LD] {RESULT} $(B)/ydb/public/tools/ydb_recipe/ydb_recipe |71.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestMirror3LossOfAllPossible3 [GOOD] >> TErasureTypeTest::TestStripe32LossOfAllPossible2 >> CppGrpcClientResultSetTest::ListResultSet >> CppGrpcClientResultSetTest::ListResultSet [GOOD] |71.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/persqueue/codecs/ut/unittest |71.2%| [TS] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/library/issue/unittest >> TUtf8Tests::Simple [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/coordination/unittest >> Coordination::SessionStartTimeout [GOOD] Test command err: ListEndpoints: database: "/Root/My/DB" Got: TIMEOUT:
: Error: Connection request timed out |71.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/tablet/tablet_impl.cpp >> TErasureTypeTest::TestBlock42PartialRestore0 >> TErasureTypeTest::TestBlock32LossOfAllPossible2 |71.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/flat_executor_gclogic_ut.cpp |71.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestStripe31LossOfAllPossible1 [GOOD] |71.2%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/result/unittest >> TErasureTypeTest::TestAllSpeciesCrcWhole1of2 >> TErasureTypeTest::TestStripe42LossOfAllPossible2 >> CppGrpcClientResultSetTest::Utf8OptionalResultSet |71.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/persqueue/codecs/ut/unittest |71.2%| [TA] $(B)/ydb/core/jaeger_tracing/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TErasureTypeTest::TestSplitDiffBlock4Plus2SpecialCase1 [GOOD] >> CppGrpcClientResultSetTest::Utf8OptionalResultSet [GOOD] >> ToOneLineStringTest::OneMessageTest [GOOD] |71.2%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/result/unittest >> CppGrpcClientResultSetTest::ListResultSet [GOOD] |71.2%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/result/unittest >> TErasureTypeTest::TestStripe33LossOfAllPossible3 >> TErasureTypeTest::TestBlock23LossOfAllPossible3 [GOOD] |71.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/datashard/split_merge/ydb-tests-datashard-split_merge |71.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/blobs_action/local/libcolumnshard-blobs_action-local.a |71.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/datashard/split_merge/ydb-tests-datashard-split_merge |71.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/script_execution/ydb-tests-functional-script_execution |71.2%| [LD] {RESULT} $(B)/ydb/tests/datashard/split_merge/ydb-tests-datashard-split_merge |71.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/script_execution/ydb-tests-functional-script_execution |71.2%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/result/unittest >> CppGrpcClientResultSetTest::Utf8OptionalResultSet [GOOD] |71.2%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/blobs_action/local/libcolumnshard-blobs_action-local.a |71.2%| [LD] {RESULT} $(B)/ydb/tests/functional/script_execution/ydb-tests-functional-script_execution >> TErasureTypeTest::TestBlock42PartialRestore2 |71.2%| [TS] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/library/issue/unittest >> ToOneLineStringTest::OneMessageTest [GOOD] |71.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/local/libcolumnshard-blobs_action-local.a |71.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestSplitDiffBlock4Plus2SpecialCase1 [GOOD] |71.2%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/schemeshard_types.h_serialized.cpp |71.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/flat_executor_tx_env.cpp >> TErasureTypeTest::TestStripe32LossOfAllPossible2 [GOOD] >> TErasureTypeTest::isSplittedDataEqualsToOldVerion [GOOD] >> TErasureTypeTest::TestBlock42LossOfAllPossible2 >> ErasureBrandNew::Block42_encode [GOOD] |71.2%| [TS] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/library/issue/unittest >> ErasureBrandNew::Block42_chunked |71.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/persqueue/codecs/ut/unittest |71.2%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/result/unittest |71.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestBlock23LossOfAllPossible3 [GOOD] |71.2%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/schemeshard_types.h_serialized.cpp |71.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/flat_executor_tx_env.cpp >> TErasureTypeTest::TestBlock32LossOfAllPossible2 [GOOD] >> EndpointElector::DiffOnRemove [GOOD] |71.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/flat_exec_commit_mgr.cpp |71.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yaml_config/yaml_config_parser_ut.cpp >> TErasureTypeTest::TestAllSpeciesCrcWhole2of2 >> EndpointElector::Empty [GOOD] >> TErasureTypeTest::TestBlock33LossOfAllPossible3 [GOOD] >> TErasureTypeTest::TestStripe42LossOfAllPossible2 [GOOD] |71.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestStripe32LossOfAllPossible2 [GOOD] |71.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::isSplittedDataEqualsToOldVerion [GOOD] |71.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/persqueue/codecs/ut/unittest >> IdxTestDataProvider::1ShardLimit6bitFromRandomUi16 [GOOD] >> TErasureTypeTest::TestStripe23LossOfAllPossible3 [GOOD] >> EndpointElector::GetEndpoint |71.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/flat_exec_commit_mgr.cpp |71.2%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/endpoints/unittest >> EndpointElector::DiffOnRemove [GOOD] |71.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestBlock32LossOfAllPossible2 [GOOD] |71.3%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/endpoints/unittest >> EndpointElector::Empty [GOOD] >> EndpointElector::GetEndpoint [GOOD] >> IdxTestDataProvider::1ShardLimit6bitFromRandomUi8 [GOOD] >> PersQueueCodecs::ToV1Codec [GOOD] |71.3%| [TA] {RESULT} $(B)/ydb/core/jaeger_tracing/ut/test-results/unittest/{meta.json ... results_accumulator.log} |71.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/sqs/with_quotas/ydb-tests-functional-sqs-with_quotas >> IdxTestDataProvider::4ShardsLimit20bitFromRandomUi64 [GOOD] |71.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/sqs/with_quotas/ydb-tests-functional-sqs-with_quotas |71.3%| [LD] {RESULT} $(B)/ydb/tests/functional/sqs/with_quotas/ydb-tests-functional-sqs-with_quotas >> EndpointElector::EndpointAssociationTwoThreadsNoRace |71.3%| [TM] {asan, default-linux-x86_64, release} ydb/public/lib/idx_test/ut/unittest >> IdxTestDataProvider::1ShardLimit6bitFromRandomUi16 [GOOD] |71.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yaml_config/yaml_config_parser_ut.cpp |71.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestStripe42LossOfAllPossible2 [GOOD] |71.3%| [TS] {asan, default-linux-x86_64, release} ydb/core/persqueue/codecs/ut/unittest |71.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestBlock33LossOfAllPossible3 [GOOD] |71.3%| [TS] {asan, default-linux-x86_64, release} ydb/core/persqueue/codecs/ut/unittest >> PersQueueCodecs::ToV1Codec [GOOD] >> CheckUtils::PromiseDefaultCtorNotInitialized [GOOD] |71.3%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/endpoints/unittest >> EndpointElector::GetEndpoint [GOOD] |71.3%| [TM] {asan, default-linux-x86_64, release} ydb/public/lib/idx_test/ut/unittest >> IdxTestDataProvider::1ShardLimit6bitFromRandomUi8 [GOOD] |71.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestStripe23LossOfAllPossible3 [GOOD] |71.3%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/jaeger_tracing/ut/ydb-core-jaeger_tracing-ut >> CheckUtils::NewPromiseInitialized [GOOD] >> TErasureTypeTest::TestBlock42LossOfAllPossible2 [GOOD] |71.3%| [TM] {asan, default-linux-x86_64, release} ydb/public/lib/idx_test/ut/unittest |71.3%| [TA] $(B)/ydb/library/yql/dq/runtime/ut/test-results/unittest/{meta.json ... results_accumulator.log} |71.3%| [TM] {asan, default-linux-x86_64, release} ydb/public/lib/idx_test/ut/unittest >> IdxTestDataProvider::4ShardsLimit20bitFromRandomUi64 [GOOD] |71.3%| [TM] {asan, default-linux-x86_64, release} ydb/public/lib/idx_test/ut/unittest >> EndpointElector::EndpointAssociationTwoThreadsNoRace [GOOD] >> IdxTestDataProvider::1ShardLimit6bitFromRandomUi32 [GOOD] >> IdxTestDataProvider::1ShardLimit6bitFromRandomUi64 [GOOD] |71.3%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/endpoints/unittest >> CheckUtils::PromiseDefaultCtorNotInitialized [GOOD] |71.3%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/endpoints/unittest >> CheckUtils::NewPromiseInitialized [GOOD] |71.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/common/libchanges-compaction-common.a >> ToOneLineStringTest::SubIssuesTest [GOOD] |71.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestBlock42LossOfAllPossible2 [GOOD] |71.3%| [TM] {asan, default-linux-x86_64, release} ydb/public/lib/idx_test/ut/unittest |71.3%| [TM] {asan, default-linux-x86_64, release} ydb/public/lib/idx_test/ut/unittest |71.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/portions/constructors.cpp >> IssueTest::Ascii [GOOD] |71.3%| [TA] $(B)/ydb/public/sdk/cpp/tests/unit/client/driver/test-results/unittest/{meta.json ... results_accumulator.log} |71.3%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/endpoints/unittest >> EndpointElector::EndpointAssociationTwoThreadsNoRace [GOOD] >> EndpointElector::Election >> EndpointElector::Election [GOOD] |71.3%| [TM] {asan, default-linux-x86_64, release} ydb/public/lib/idx_test/ut/unittest >> IdxTestDataProvider::1ShardLimit6bitFromRandomUi32 [GOOD] |71.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/limits/ydb-tests-functional-limits |71.4%| [TM] {asan, default-linux-x86_64, release} ydb/public/lib/idx_test/ut/unittest >> IdxTestDataProvider::1ShardLimit6bitFromRandomUi64 [GOOD] |71.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/common/libchanges-compaction-common.a |71.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/limits/ydb-tests-functional-limits |71.4%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/common/libchanges-compaction-common.a |71.4%| [TS] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/library/issue/unittest >> ToOneLineStringTest::SubIssuesTest [GOOD] |71.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/portions/constructors.cpp |71.4%| [TS] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/library/issue/unittest >> IssueTest::Ascii [GOOD] >> EndpointElector::EndpointAssiciationSingleThread [GOOD] |71.4%| [TM] {asan, default-linux-x86_64, release} ydb/public/lib/idx_test/ut/unittest |71.4%| [TS] {asan, default-linux-x86_64, release} ydb/core/persqueue/codecs/ut/unittest >> DiscoveryMutator::Simple |71.4%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/endpoints/unittest |71.4%| [TA] $(B)/ydb/core/config/tools/protobuf_plugin/ut/test-results/unittest/{meta.json ... results_accumulator.log} |71.4%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/endpoints/unittest >> EndpointElector::Election [GOOD] >> DiscoveryMutator::Simple [GOOD] |71.4%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/discovery_mutator/unittest |71.4%| [TS] {asan, default-linux-x86_64, release} ydb/core/persqueue/codecs/ut/unittest |71.4%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/discovery_mutator/unittest |71.4%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/discovery_mutator/unittest |71.4%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/discovery_mutator/unittest >> TErasureTypeTest::TestStripe33LossOfAllPossible3 [GOOD] |71.4%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/endpoints/unittest >> EndpointElector::EndpointAssiciationSingleThread [GOOD] |71.4%| [TA] {RESULT} $(B)/ydb/public/sdk/cpp/tests/unit/client/driver/test-results/unittest/{meta.json ... results_accumulator.log} |71.4%| [LD] {RESULT} $(B)/ydb/tests/functional/limits/ydb-tests-functional-limits |71.4%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/discovery_mutator/unittest >> DiscoveryMutator::Simple [GOOD] |71.4%| [TS] {asan, default-linux-x86_64, release} ydb/core/persqueue/codecs/ut/unittest >> ResponseHeaders::PassHeader |71.4%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/discovery_mutator/unittest |71.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/flat_executor_txloglogic.cpp >> DqSpillingFileTests::ReadError |71.4%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/discovery_mutator/unittest |71.4%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/discovery_mutator/unittest |71.4%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/draft/unittest >> TErasureTypeTest::TestBlock43LossOfAllPossible3 [GOOD] >> ResponseHeaders::PassHeader [GOOD] >> DqSpillingFileTests::ReadError [GOOD] >> CppGrpcClientResultSetTest::ListCorruptedResultSet [GOOD] |71.4%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/discovery_mutator/unittest >> TErasureTypeTest::TestStripe43LossOfAllPossible3 [GOOD] >> ToStreamTest::SubIssuesTest |71.4%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/draft/unittest |71.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestStripe33LossOfAllPossible3 [GOOD] |71.4%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/draft/unittest |71.4%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/discovery_mutator/unittest >> DqSpillingFileTests::StartError >> DqSpillingFileTests::Simple [GOOD] >> ToStreamTest::SubIssuesTest [GOOD] >> ToMessage::NonUtf8 [GOOD] >> DqSpillingFileTests::MultipleFileParts [GOOD] >> DqSpillingFileTests::StartError [GOOD] >> ErasureBrandNew::Block42_chunked [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/actors/spilling/ut/unittest >> DqSpillingFileTests::ReadError [GOOD] Test command err: 2025-05-07T08:06:36.644504Z :KQP_COMPUTE ERROR: spilling_file.cpp:968: [Read async] file: /home/runner/.ya/build/build_root/zvgn/0042cb/ydb/library/yql/dq/actors/spilling/ut/test-results/unittest/testing_out_stuff/chunk3/dq_spilling_12554/node_1_cd4bd605-11bb1072-a8e5f335-63f9031a/1_test_0, blobId: 0, offset: 0, error: (Error 2: No such file or directory) util/system/file.cpp:936: can't open "/home/runner/.ya/build/build_root/zvgn/0042cb/ydb/library/yql/dq/actors/spilling/ut/test-results/unittest/testing_out_stuff/chunk3/dq_spilling_12554/node_1_cd4bd605-11bb1072-a8e5f335-63f9031a/1_test_0" with mode RdOnly (0x00000008) |71.4%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/draft/unittest >> ResponseHeaders::PassHeader [GOOD] >> DqSpillingFileTests::NoSpillingService [GOOD] |71.4%| [TA] {RESULT} $(B)/ydb/core/config/tools/protobuf_plugin/ut/test-results/unittest/{meta.json ... results_accumulator.log} |71.4%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/result/unittest >> CppGrpcClientResultSetTest::ListCorruptedResultSet [GOOD] |71.4%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/public/ydb_issue/ut/unittest >> ToStreamTest::SubIssuesTest [GOOD] |71.5%| [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/actors/spilling/ut/unittest >> DqSpillingFileTests::Simple [GOOD] >> DqSpillingFileTests::Write_FileSizeLimitExceeded [GOOD] |71.4%| [TS] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/library/issue/unittest >> ToMessage::NonUtf8 [GOOD] ------- [LD] {default-linux-x86_64, release, asan} $(B)/yql/tools/yqlrun/yqlrun ld.lld: warning: version script assignment of 'global' to symbol '__after_morecore_hook' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'daylight' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'environ' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '_environ' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__free_hook' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__malloc_hook' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__malloc_initialize_hook' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__memalign_hook' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'program_invocation_name' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'program_invocation_short_name' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__realloc_hook' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'timezone' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'tzname' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__libc_start_main' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'AnnotateHappensAfter' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'AnnotateHappensBefore' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'AnnotateIgnoreWritesBegin' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'AnnotateIgnoreWritesEnd' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'AnnotateIgnoreReadsBegin' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'AnnotateIgnoreReadsEnd' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'abort' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'bind' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'close' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__close' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'closedir' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'connect' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'creat' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'creat64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'dl_iterate_phdr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'dup' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'dup2' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'dup3' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'epoll_create' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'epoll_create1' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'epoll_ctl' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'epoll_pwait' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'epoll_wait' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'eventfd' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fork' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__fxstat' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__fxstat64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'gettimeofday' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'inotify_init' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'inotify_init1' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'kill' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'listen' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'nanosleep' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'on_exit' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'open' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'open64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pipe' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pipe2' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_barrier_destroy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_barrier_init' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_barrier_wait' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_cond_broadcast' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_cond_destroy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_cond_init' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_cond_signal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_cond_timedwait' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_cond_wait' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_kill' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_mutex_destroy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_mutex_init' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_mutex_lock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_mutex_timedlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_mutex_trylock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_mutex_unlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_once' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_rwlock_destroy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_rwlock_init' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_rwlock_rdlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_rwlock_timedrdlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_rwlock_timedwrlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_rwlock_tryrdlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_rwlock_trywrlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_rwlock_unlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_rwlock_wrlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_spin_destroy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_spin_init' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_spin_lock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_spin_trylock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_spin_unlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'raise' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__res_iclose' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'rmdir' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'setjmp' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '_setjmp' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'signalfd' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sigsetjmp' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__sigsetjmp' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sigsuspend' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sleep' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'socket' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'socketpair' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'tmpfile' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'tmpfile64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'unlink' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'usleep' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'bcopy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'dladdr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'dlerror' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'dl_iterate_phdr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'epoll_pwait' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'epoll_wait' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fcvt' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fgets_unlocked' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fork' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'forkpty' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fread_unlocked' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__fxstat' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__fxstat64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__fxstatat' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__fxstatat64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'gcvt' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getenv' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'gethostname' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getrlimit' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getrlimit64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getrusage' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'gettimeofday' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'mbrtowc' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'mbtowc' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'memccpy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'mempcpy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'openpty' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pipe' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pipe2' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'prlimit' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'prlimit64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_key_create' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_mutex_lock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_mutex_unlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'putenv' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'setenv' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'shmat' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'socketpair' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'stpcpy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strftime' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strftime_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strftime_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtod' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtod_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtod_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtod_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtof' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtof_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtof_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtof_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtold' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtold_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtold_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtold_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtol_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtol_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtol_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtoll_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtoll_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtoll_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtoul' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtoul_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtoull' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtoul_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtoul_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtoull_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtoull_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtoull_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'swprintf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'tzset' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'vswprintf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcschr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcscmp' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcscpy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcsftime' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcsftime_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcsftime_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstod' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstod_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstod_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstod_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstof' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstof_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstof_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstof_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstol' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstold' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstold_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstold_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstold_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstol_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstoll' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstol_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstol_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstoll_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstoll_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstoll_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstoul' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstoul_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstoull' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstoul_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstoul_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstoull_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstoull_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstoull_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wmemcpy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wmemmove' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wmempcpy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wmemset' failed: symbol not defined |71.5%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/draft/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/actors/spilling/ut/unittest >> DqSpillingFileTests::StartError [GOOD] Test command err: 2025-05-07T08:06:37.013674Z :KQP_COMPUTE ERROR: spilling_file.cpp:239: (TIoSystemError) (Error 13: Permission denied) util/folder/path.cpp:424: could not create directory /nonexistent 2025-05-07T08:06:37.013843Z :KQP_COMPUTE ERROR: spilling_file.cpp:278: Service is broken, send error to client [1:5:2052] 2025-05-07T08:06:37.014143Z :KQP_COMPUTE ERROR: spilling_file.cpp:278: Service is broken, send error to client [1:5:2052] 2025-05-07T08:06:37.014219Z :KQP_COMPUTE ERROR: spilling_file.cpp:278: Service is broken, send error to client [1:5:2052] |71.5%| [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/actors/spilling/ut/unittest >> DqSpillingFileTests::MultipleFileParts [GOOD] |71.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestStripe43LossOfAllPossible3 [GOOD] |71.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/yql/tools/yqlrun/yqlrun >> DqSpillingFileTests::SingleFilePart [GOOD] >> YdbValue::BuildValueDictTypeMismatch1 [GOOD] >> YdbValue::BuildValueDictTypeMismatch2 [GOOD] |71.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestBlock43LossOfAllPossible3 [GOOD] >> YdbValue::BuildValueDictEmpty2 [GOOD] |71.5%| [LD] {RESULT} $(B)/yql/tools/yqlrun/yqlrun >> YdbValue::BuildValueDictEmptyTypeMismatch [GOOD] >> ToMessage::NonUtf8 [GOOD] |71.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/flat_executor_txloglogic.cpp >> YdbValue::BuildValueDictEmptyNoType [GOOD] |71.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/udfs/common/clickhouse/client/libclickhouse_client_udf.global.a >> DqSpillingFileTests::ThreadPoolQueueOverflow |71.5%| [AR] {RESULT} $(B)/ydb/library/yql/udfs/common/clickhouse/client/libclickhouse_client_udf.global.a |71.5%| [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/actors/spilling/ut/unittest >> DqSpillingFileTests::NoSpillingService [GOOD] |71.5%| [AR] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/udfs/common/clickhouse/client/libclickhouse_client_udf.global.a ------- [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> ErasureBrandNew::Block42_chunked [GOOD] Test command err: totalSize# 503983619 period1# 2.146566s period2# 0.677254s MB/s1# 234.7859879 MB/s2# 744.1574638 factor# 3.169513949 |71.5%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/draft/unittest >> DqSpillingFileTests::FdCounterSingleFile [GOOD] |71.5%| [LD] {BAZEL_UPLOAD} $(B)/ydb/public/sdk/cpp/tests/unit/client/oauth2_token_exchange/tests-unit-client-oauth2_token_exchange >> DqSpillingFileTests::Write_TotalSizeLimitExceeded [GOOD] |71.5%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/draft/unittest |71.5%| [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/actors/spilling/ut/unittest >> DqSpillingFileTests::SingleFilePart [GOOD] >> DqSpillingFileTests::ThreadPoolQueueOverflow [GOOD] >> ViewClient::Basic [GOOD] >> DqSpillingFileTests::FdCounterMultiFile [GOOD] |71.5%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/public/ydb_issue/ut/unittest >> ToMessage::NonUtf8 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/actors/spilling/ut/unittest >> DqSpillingFileTests::Write_FileSizeLimitExceeded [GOOD] Test command err: 2025-05-07T08:06:37.421962Z :KQP_COMPUTE ERROR: spilling_file.cpp:412: [Write] File size limit exceeded. From: [1:5:2052], blobId: 2, bytes: 50 >> ParamsBuilder::BuildWithTypeInfo [GOOD] >> ParamsBuilder::Build [GOOD] |71.5%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/value/gtest >> YdbValue::BuildValueDictEmptyNoType [GOOD] |71.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/flat_executor_borrowlogic.cpp |71.5%| [TA] {RESULT} $(B)/ydb/library/yql/dq/runtime/ut/test-results/unittest/{meta.json ... results_accumulator.log} |71.5%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/draft/unittest |71.5%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/public/ydb_issue/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/actors/spilling/ut/unittest >> DqSpillingFileTests::Write_TotalSizeLimitExceeded [GOOD] Test command err: 2025-05-07T08:06:37.805391Z :KQP_COMPUTE ERROR: spilling_file.cpp:425: [Write] Total size limit exceeded. From: [1:5:2052], blobId: 2, bytes: 50 >> ParamsBuilder::IncompleteParam [GOOD] >> ParamsBuilder::TypeMismatchFromValue [GOOD] >> ParamsBuilder::MissingParam [GOOD] >> JsonValueTest::InvalidJsonToBinaryString3 [GOOD] >> ParamsBuilder::BuildFromValue >> JsonValueTest::InvalidJsonToBinaryString4 [GOOD] |71.5%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/params/gtest >> ParamsBuilder::BuildWithTypeInfo [GOOD] |71.5%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/draft/unittest |71.5%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/draft/unittest >> ViewClient::Basic [GOOD] |71.5%| [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/actors/spilling/ut/unittest >> DqSpillingFileTests::FdCounterMultiFile [GOOD] >> ParamsBuilder::BuildFromValue [GOOD] |71.5%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/params/gtest >> ParamsBuilder::Build [GOOD] |71.5%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/params/gtest ------- [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/actors/spilling/ut/unittest >> DqSpillingFileTests::ThreadPoolQueueOverflow [GOOD] Test command err: 2025-05-07T08:06:37.751428Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.751670Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.752040Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.752161Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.752250Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.752338Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.752406Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.752616Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation 2025-05-07T08:06:37.752692Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.752803Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.752881Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.752957Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.753037Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.753103Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.753180Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.753377Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.753460Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.753534Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.753622Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.753686Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.753751Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.753837Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.753923Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation 2025-05-07T08:06:37.754010Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.754183Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.754315Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation 2025-05-07T08:06:37.754392Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.754507Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.754601Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.754681Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.754758Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.754845Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.754936Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation 2025-05-07T08:06:37.755000Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.755079Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.755146Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.755213Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.755317Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.755525Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.755604Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.755673Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.755872Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.755959Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.756057Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.756126Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.756244Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.756335Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.756540Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.756696Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation 2025-05-07T08:06:37.757026Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.757188Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.757290Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation 2025-05-07T08:06:37.757352Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.757470Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.757605Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.757682Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation 2025-05-07T08:06:37.757742Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.757861Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.757932Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation 2025-05-07T08:06:37.758034Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.758138Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.758236Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation 2025-05-07T08:06:37.758314Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.758404Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation 2025-05-07T08:06:37.758460Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.758570Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation 2025-05-07T08:06:37.758800Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.758890Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.758972Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.759045Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.759198Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.759305Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation 2025-05-07T08:06:37.759383Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.759481Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.759544Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation 2025-05-07T08:06:37.759651Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.759748Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-05-07T08:06:37.760308Z :KQP_COMPUTE ERROR: spilling_file.cpp:357: [CloseFile] Can not run operation 2025-05-07T08:06:37.760379Z :KQP_COMPUTE ERROR: spilling_file.cpp:357: [CloseFile] Can not run operation 2025-05-07T08:06:37.760424Z :KQP_COMPUTE ERROR: spilling_file.cpp:357: [CloseFile] Can not run operation 2025-05-07T08:06:37.760457Z :KQP_COMPUTE ERROR: spilling_file.cpp:357: [CloseFile] Can not run operation 2025-05-07T08:06:37.760494Z :KQP_COMPUTE ERROR: spilling_file.cpp:357: [CloseFile] Can not run operation 2025-05-07T08:06:37.760526Z :KQP_COMPUTE ERROR: spilling_file.cpp:357: [CloseFile] Can not run operation 2025-05-07T08:06:37.760557Z :KQP_COMPUTE ERROR: spilling_file.cpp:357: [CloseFile] Can not run operation 2025-05-07T08:06:37.760596Z :KQP_COMPUTE ERROR: spilling_file.cpp:357: [CloseFile] Can not run operation 2025-05-07T08:06:37.760628Z :KQP_COMPUTE ERROR: spilling_file.cpp:357: [CloseFile] Can not run operation 2025-05-07T08:06:37.760660Z :KQP_COMPUTE ERROR: spilling_file.cpp:357: [CloseFile] Can not run operation 2025-05-07T08:06:37.760705Z :KQP_COMPUTE ERROR: spilling_file.cpp:357: [CloseFile] Can not run operation 2025-05-07T08:06:37.760737Z :KQP_COMPUTE ERROR: spilling_file.cpp:357: [CloseFile] Can not run operation 2025-05-07T08:06:37.760843Z :KQP_COMPUTE ERROR: spilling_file.cpp:357: [CloseFile] Can not run operation 2025-05-07T08:06:37.760876Z :KQP_COMPUTE ERROR: spilling_file.cpp:357: [CloseFile] Can not run operation 2025-05-07T08:06:37.760924Z :KQP_COMPUTE ERROR: spilling_file.cpp:357: [CloseFile] Can not run operation 2025-05-07T08:06:37.760964Z :KQP_COMPUTE ERROR: spilling_file.cpp:357: [CloseFile] Can not run operation 2025-05-07T08:06:37.760998Z :KQP_COMPUTE ERROR: spilling_file.cpp:357: [CloseFile] Can not run operation 2025-05-07T08:06:37.761042Z :KQP_COMPUTE ERROR: spilling_file.cpp:357: [CloseFile] Can not run operation 2025-05-07T08:06:37.761088Z :KQP_COMPUTE ERROR: spilling_file.cpp:357: [CloseFile] Can not run operation 2025-05-07T08:06:37.761121Z :KQP_COMPUTE ERROR: spilling_file.cpp:357: [CloseFile] Can not run operation 2025-05-07T08:06:37.761519Z :KQP_COMPUTE ERROR: spilling_file.cpp:357: [CloseFile] Can not run operation >> JsonValueTest::InvalidJsonToBinaryString5 [GOOD] >> JsonValueTest::InvalidJsonToBinaryString6 [GOOD] >> EndpointElector::Pessimization [GOOD] |71.6%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/params/gtest >> ParamsBuilder::IncompleteParam [GOOD] |71.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/flat_executor_borrowlogic.cpp |71.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/splitter/libtx-columnshard-splitter.a |71.6%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/splitter/libtx-columnshard-splitter.a |71.6%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/params/gtest >> ParamsBuilder::TypeMismatchFromValue [GOOD] |71.6%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/params/gtest >> ParamsBuilder::BuildFromValue [GOOD] |71.6%| [TM] {asan, default-linux-x86_64, release} ydb/public/lib/json_value/ut/unittest >> JsonValueTest::InvalidJsonToBinaryString6 [GOOD] |71.6%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/params/gtest >> ParamsBuilder::MissingParam [GOOD] |71.6%| [TS] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/library/issue/unittest >> conftest.py::black [GOOD] >> test_join.py::black [GOOD] |71.6%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/endpoints/unittest >> EndpointElector::Pessimization [GOOD] |71.6%| [TA] $(B)/ydb/public/sdk/cpp/tests/unit/client/coordination/test-results/unittest/{meta.json ... results_accumulator.log} |71.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/predicate/range.cpp >> Scheme::EmptyCell [GOOD] >> Scheme::CompareUuidCells [GOOD] >> test.py::py2_flake8 [GOOD] |71.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/portions/constructor_meta.cpp |71.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/restarts/ydb-tests-functional-restarts |71.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/restarts/ydb-tests-functional-restarts |71.6%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/generic/streaming/black >> test_join.py::black [GOOD] |71.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/datashard/dump_restore/ydb-tests-datashard-dump_restore |71.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/datashard/dump_restore/ydb-tests-datashard-dump_restore |71.6%| [LD] {RESULT} $(B)/ydb/tests/functional/restarts/ydb-tests-functional-restarts |71.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/predicate/range.cpp |71.6%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut/unittest >> Scheme::CompareUuidCells [GOOD] |71.6%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/hybrid_file/part1/py2_flake8 >> test.py::py2_flake8 [GOOD] >> test.py::py2_flake8 [GOOD] >> test.py::py2_flake8 [GOOD] |71.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/splitter/libtx-columnshard-splitter.a |71.6%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/yql/providers/dq/scheduler/ut/ydb-library-yql-providers-dq-scheduler-ut |71.6%| [TS] {RESULT} ydb/tests/fq/generic/streaming/black |71.6%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/hybrid_file/part6/py2_flake8 >> test.py::py2_flake8 [GOOD] |71.6%| [LD] {RESULT} $(B)/ydb/tests/datashard/dump_restore/ydb-tests-datashard-dump_restore |71.6%| [TA] {RESULT} $(B)/ydb/public/sdk/cpp/tests/unit/client/coordination/test-results/unittest/{meta.json ... results_accumulator.log} |71.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/portions/constructor_meta.cpp |71.6%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/solomon/py2_flake8 >> test.py::py2_flake8 [GOOD] >> test.py::py2_flake8 [GOOD] |71.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/predicate/container.cpp |71.6%| [TA] $(B)/ydb/public/lib/idx_test/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> test_config_with_metadata.py::flake8 [GOOD] >> test_distconf.py::flake8 [GOOD] >> test_generate_dynamic_config.py::flake8 [GOOD] |71.6%| [TS] {RESULT} ydb/library/yql/tests/sql/hybrid_file/part6/py2_flake8 |71.6%| [TS] {RESULT} ydb/library/yql/tests/sql/hybrid_file/part1/py2_flake8 |71.6%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part2/py2_flake8 >> test.py::py2_flake8 [GOOD] >> conftest.py::flake8 [GOOD] |71.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/splitter/ut/batch_slice.cpp >> helpers.py::flake8 [GOOD] >> test_ctas.py::flake8 [GOOD] >> test_yt_reading.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] |71.6%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/config/flake8 >> test_generate_dynamic_config.py::flake8 [GOOD] >> test_insert_restarts.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] |71.6%| [TS] {RESULT} ydb/library/yql/tests/sql/solomon/py2_flake8 |71.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/predicate/container.cpp >> test_disk.py::flake8 [GOOD] >> test_tablet.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] >> test_alloc_default.py::flake8 [GOOD] >> test_dc_local.py::flake8 [GOOD] >> IssueProtoTest::WrongBinStringException [GOOD] >> test_result_limits.py::flake8 [GOOD] >> test_scheduling.py::flake8 [GOOD] |71.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/splitter/ut/batch_slice.cpp |71.6%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_import/flake8 >> test_yt_reading.py::flake8 [GOOD] >> __main__.py::flake8 [GOOD] >> StaticConfigExamples::MIRROR_3_DC_NODES_IN_MEMORY [GOOD] >> ParamsBuilder::TypeMismatch [GOOD] >> StaticConfigExamples::MIRROR_3_DC_9_NODES [GOOD] >> StaticConfigExamples::SingleNodeWithFile [GOOD] >> StaticConfigExamples::BLOCK42 [GOOD] >> StaticConfigExamples::MIRROR_3_DC_NODES [GOOD] >> StaticConfigExamples::SINGLE_NODE_IN_MEMORY [GOOD] |71.6%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/hybrid_file/part3/py2_flake8 >> test.py::py2_flake8 [GOOD] |71.6%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/restarts/flake8 >> test_insert_restarts.py::flake8 [GOOD] |71.6%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/config/init/init.h_serialized.cpp |71.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/hive/ydb-tests-functional-hive >> tstool.py::flake8 [GOOD] |71.6%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part2/py2_flake8 |71.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/hive/ydb-tests-functional-hive |71.7%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/tablet_flat/flat_executor_compaction_logic.h_serialized.cpp |71.6%| [TS] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/flake8 >> test_tablet.py::flake8 [GOOD] |71.6%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/hybrid_file/part9/py2_flake8 >> test.py::py2_flake8 [GOOD] |71.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/tools/nemesis/ut/ydb-tests-tools-nemesis-ut |71.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/ttl/ydb-tests-functional-ttl >> test_tpcds.py::flake8 [GOOD] >> test_tpch_spilling.py::flake8 [GOOD] |71.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/tools/nemesis/ut/ydb-tests-tools-nemesis-ut |71.7%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/mem_alloc/flake8 >> test_scheduling.py::flake8 [GOOD] |71.7%| [TA] {RESULT} $(B)/ydb/public/lib/idx_test/ut/test-results/unittest/{meta.json ... results_accumulator.log} |71.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/ttl/ydb-tests-functional-ttl |71.7%| [TS] {asan, default-linux-x86_64, release} ydb/tools/cfg/bin/flake8 >> __main__.py::flake8 [GOOD] |71.7%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/public/ydb_issue/ut/unittest >> IssueProtoTest::WrongBinStringException [GOOD] >> base.py::flake8 [GOOD] >> data_correctness.py::flake8 [GOOD] >> data_migration_when_alter_ttl.py::flake8 [GOOD] >> tier_delete.py::flake8 [GOOD] >> ttl_delete_s3.py::flake8 [GOOD] >> ttl_unavailable_s3.py::flake8 [GOOD] >> unstable_connection.py::flake8 [GOOD] >> test_compatibility.py::flake8 [GOOD] |71.7%| [LD] {RESULT} $(B)/ydb/tests/functional/hive/ydb-tests-functional-hive |71.7%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/config/init/init.h_serialized.cpp |71.7%| [TS] {RESULT} ydb/tests/functional/config/flake8 |71.7%| [LD] {BAZEL_UPLOAD} $(B)/ydb/public/sdk/cpp/tests/unit/client/coordination/ydb-public-sdk-cpp-tests-unit-client-coordination |71.7%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/params/gtest >> ParamsBuilder::TypeMismatch [GOOD] |71.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/graph/shard/backends.cpp >> test_export_s3.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> test_followers.py::flake8 [GOOD] >> test_quota_exhaustion.py::flake8 [GOOD] >> test_stress.py::flake8 [GOOD] |71.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/shared_sausagecache.cpp |71.7%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/tablet_flat/flat_executor_compaction_logic.h_serialized.cpp |71.7%| [TS] {asan, default-linux-x86_64, release} ydb/tools/tstool/flake8 >> tstool.py::flake8 [GOOD] |71.7%| [TS] {asan, default-linux-x86_64, release} ydb/library/yaml_config/static_validator/ut/example_configs/unittest >> StaticConfigExamples::SINGLE_NODE_IN_MEMORY [GOOD] >> test.py::py2_flake8 [GOOD] |71.7%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_import/flake8 |71.7%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/tpc/large/flake8 >> test_tpch_spilling.py::flake8 [GOOD] |71.7%| [LD] {RESULT} $(B)/ydb/tests/tools/nemesis/ut/ydb-tests-tools-nemesis-ut >> __main__.py::flake8 [GOOD] |71.7%| [TS] {asan, default-linux-x86_64, release} ydb/tests/olap/ttl_tiering/flake8 >> unstable_connection.py::flake8 [GOOD] |71.7%| [TS] {RESULT} ydb/library/yql/tests/sql/hybrid_file/part3/py2_flake8 |71.7%| [TS] {RESULT} ydb/tests/fq/restarts/flake8 |71.7%| [LD] {RESULT} $(B)/ydb/tests/functional/ttl/ydb-tests-functional-ttl |71.7%| [TS] {RESULT} ydb/tests/tools/nemesis/ut/flake8 |71.7%| [TS] {asan, default-linux-x86_64, release} ydb/tests/compatibility/flake8 >> test_stress.py::flake8 [GOOD] |71.7%| [TS] {asan, default-linux-x86_64, release} ydb/tests/olap/data_quotas/flake8 >> test_quota_exhaustion.py::flake8 [GOOD] |71.7%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part8/flake8 >> test.py::flake8 [GOOD] |71.7%| [TS] {RESULT} ydb/library/yql/tests/sql/hybrid_file/part9/py2_flake8 >> test.py::flake8 [GOOD] >> test.py::flake8 [GOOD] |71.7%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/hybrid_file/part0/py2_flake8 >> test.py::py2_flake8 [GOOD] >> PersQueueCodecs::FromV1Codec [GOOD] >> run_tests.py::flake8 [GOOD] >> TCredentials::CheckAws [GOOD] >> TCredentials::CheckToken [GOOD] >> JsonValueTest::PgValue [GOOD] >> JsonValueTest::PrimitiveValueBool [GOOD] >> JsonValueTest::NewDatetimeValuesStruct [GOOD] >> JsonValueTest::InvalidJsonToBinaryString7 [GOOD] |71.7%| [TS] {asan, default-linux-x86_64, release} ydb/tests/stability/tool/flake8 >> __main__.py::flake8 [GOOD] |71.7%| [TS] {RESULT} ydb/tests/functional/tpc/large/flake8 |71.7%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part15/flake8 >> test.py::flake8 [GOOD] |71.7%| [TS] {asan, default-linux-x86_64, release} ydb/library/benchmarks/runner/run_tests/flake8 >> run_tests.py::flake8 [GOOD] |71.7%| [TS] {RESULT} ydb/tests/olap/ttl_tiering/flake8 |71.7%| [TS] {RESULT} ydb/tests/fq/mem_alloc/flake8 |71.8%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part19/flake8 >> test.py::flake8 [GOOD] |71.8%| [TS] {RESULT} ydb/tools/cfg/bin/flake8 |71.8%| [TS] {asan, default-linux-x86_64, release} ydb/core/persqueue/codecs/ut/unittest >> PersQueueCodecs::FromV1Codec [GOOD] |71.8%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/s3/credentials/ut/unittest >> TCredentials::CheckToken [GOOD] |71.8%| [TS] {RESULT} ydb/library/yaml_config/static_validator/ut/example_configs/unittest |71.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/graph/shard/backends.cpp |71.8%| [TM] {asan, default-linux-x86_64, release} ydb/public/lib/json_value/ut/unittest >> JsonValueTest::InvalidJsonToBinaryString7 [GOOD] >> test.py::py2_flake8 [GOOD] >> kikimr_config.py::flake8 [GOOD] >> test_split_merge.py::flake8 [GOOD] |71.8%| [TS] {RESULT} ydb/tools/tstool/flake8 >> test.py::py2_flake8 [GOOD] |71.8%| [TS] {RESULT} ydb/tests/compatibility/flake8 |71.8%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part15/flake8 |71.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/shared_sausagecache.cpp |71.8%| [TS] {RESULT} ydb/tests/stability/tool/flake8 |71.8%| [TS] {RESULT} ydb/library/benchmarks/runner/run_tests/flake8 |71.8%| [TS] {asan, default-linux-x86_64, release} ydb/tests/library/ut/flake8 >> kikimr_config.py::flake8 [GOOD] >> test_kqprun_recipe.py::flake8 [GOOD] >> test_copy_table.py::flake8 [GOOD] |71.8%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part6/py2_flake8 >> test.py::py2_flake8 [GOOD] |71.8%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/workload/tpch/ut/ydb-library-workload-tpch-ut |71.8%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part19/py2_flake8 >> test.py::py2_flake8 [GOOD] |71.8%| [TS] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/flake8 >> test_split_merge.py::flake8 [GOOD] >> test_encryption.py::flake8 [GOOD] |71.8%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part19/flake8 |71.8%| [TA] $(B)/ydb/public/sdk/cpp/tests/unit/client/discovery_mutator/test-results/unittest/{meta.json ... results_accumulator.log} |71.8%| [TA] $(B)/ydb/public/sdk/cpp/tests/unit/client/result/test-results/unittest/{meta.json ... results_accumulator.log} >> test.py::py2_flake8 [GOOD] |71.8%| [TS] {RESULT} ydb/library/yql/providers/s3/credentials/ut/unittest |71.8%| [TS] {RESULT} ydb/tests/olap/data_quotas/flake8 |71.8%| [TS] {RESULT} ydb/library/yql/tests/sql/hybrid_file/part0/py2_flake8 |71.8%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part8/flake8 >> test_update_script_tables.py::flake8 [GOOD] |71.8%| [TS] {asan, default-linux-x86_64, release} ydb/tests/tools/kqprun/tests/flake8 >> test_kqprun_recipe.py::flake8 [GOOD] |71.8%| [TS] {asan, default-linux-x86_64, release} ydb/tests/datashard/copy_table/flake8 >> test_copy_table.py::flake8 [GOOD] |71.8%| [TS] {RESULT} ydb/tests/library/ut/flake8 >> runner.py::flake8 [GOOD] |71.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/fq/multi_plane/ydb-tests-fq-multi_plane |71.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/datashard/async_replication/ydb-tests-datashard-async_replication |71.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/fq/multi_plane/ydb-tests-fq-multi_plane |71.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/datashard/async_replication/ydb-tests-datashard-async_replication |71.8%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/encryption/flake8 >> test_encryption.py::flake8 [GOOD] |71.8%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part10/py2_flake8 >> test.py::py2_flake8 [GOOD] >> test_schemeshard_limits.py::flake8 [GOOD] >> __main__.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] |71.8%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part6/py2_flake8 |71.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/portions/write_with_blobs.cpp |71.8%| [TS] {RESULT} ydb/tests/datashard/split_merge/flake8 |71.8%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/normalizer/abstract/abstract.h_serialized.cpp |71.8%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part19/py2_flake8 |71.8%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/script_execution/flake8 >> test_update_script_tables.py::flake8 [GOOD] |71.8%| [TA] $(B)/ydb/core/scheme/ut/test-results/unittest/{meta.json ... results_accumulator.log} |71.8%| [LD] {RESULT} $(B)/ydb/tests/fq/multi_plane/ydb-tests-fq-multi_plane >> FYamlCpp::Out [GOOD] >> FYamlCpp::EnumEquals [GOOD] >> FYamlCpp::MapMark [GOOD] >> LibFyamlCore::doc_build_simple [GOOD] >> FYamlCpp::MultilineScalarMark [GOOD] >> FYamlCpp::SimpleScalarMark [GOOD] >> FYamlCpp::SequenceMark [GOOD] >> FYamlCpp::Leak [GOOD] >> LibFyamlCore::doc_build_parse_check >> FYamlCpp::ErrorHandling [GOOD] >> FYamlCpp::Parser [GOOD] >> Util::MaskTicket [GOOD] |71.9%| [TS] {asan, default-linux-x86_64, release} ydb/library/benchmarks/runner/runner/flake8 >> runner.py::flake8 [GOOD] |71.9%| [TA] $(B)/ydb/library/yql/dq/actors/spilling/ut/test-results/unittest/{meta.json ... results_accumulator.log} |71.9%| [LD] {RESULT} $(B)/ydb/tests/datashard/async_replication/ydb-tests-datashard-async_replication >> LibFyamlCore::doc_build_parse_check [GOOD] >> Util::SanitizeNebiusTicket [GOOD] >> LibFyamlCore::doc_build_scalar [GOOD] >> Util::MaskNebiusTicket [GOOD] >> LibFyamlCore::doc_build_sequence [GOOD] >> LibFyamlCore::doc_build_mapping [GOOD] |71.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/audit/ydb-tests-functional-audit |71.9%| [TS] {asan, default-linux-x86_64, release} ydb/public/tools/local_ydb/flake8 >> __main__.py::flake8 [GOOD] >> LibFyamlCore::doc_path_access [GOOD] |71.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/audit/ydb-tests-functional-audit |71.9%| [TA] {RESULT} $(B)/ydb/public/sdk/cpp/tests/unit/client/discovery_mutator/test-results/unittest/{meta.json ... results_accumulator.log} |71.9%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part3/py2_flake8 >> test.py::py2_flake8 [GOOD] >> LibFyamlCore::doc_path_node [GOOD] >> overlapping_portions.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] >> LibFyamlCore::doc_path_parent [GOOD] >> test.py::py2_flake8 [GOOD] |71.9%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/limits/flake8 >> test_schemeshard_limits.py::flake8 [GOOD] |71.9%| [TA] $(B)/ydb/public/sdk/cpp/tests/unit/client/value/test-results/gtest/{meta.json ... results_accumulator.log} |71.9%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/normalizer/abstract/abstract.h_serialized.cpp >> LibFyamlCore::doc_short_path [GOOD] >> __main__.py::flake8 [GOOD] >> OldFormat::SameVersion [GOOD] >> OldFormat::DefaultRules [GOOD] >> OldFormat::PrevYear [GOOD] >> OldFormat::Trunk [GOOD] >> OldFormat::UnexpectedTrunk [GOOD] >> OldFormat::TooOld [GOOD] >> OldFormat::OldNbs [GOOD] >> VersionParser::Basic [GOOD] >> __main__.py::flake8 [GOOD] >> YdbVersion::DefaultSameVersion [GOOD] >> LibFyamlCore::doc_scalar_path [GOOD] >> YdbVersion::DefaultPrevMajor [GOOD] >> YdbVersion::DefaultNextMajor [GOOD] >> YdbVersion::DefaultHotfix [GOOD] >> YdbVersion::DefaultCompatible [GOOD] |71.9%| [TS] {RESULT} ydb/tests/tools/kqprun/tests/flake8 >> YdbVersion::DefaultNextYear [GOOD] |71.9%| [TA] {RESULT} $(B)/ydb/public/sdk/cpp/tests/unit/client/result/test-results/unittest/{meta.json ... results_accumulator.log} >> YdbVersion::DefaultPrevYear [GOOD] |71.9%| [TS] {RESULT} ydb/tests/datashard/copy_table/flake8 >> YdbVersion::DefaultNewMajor [GOOD] |71.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/portions/write_with_blobs.cpp >> YdbVersion::DefaultOldMajor [GOOD] >> YdbVersion::DefaultDifferentBuild [GOOD] >> YdbVersion::DefaultDifferentBuildIncompatible [GOOD] >> YdbVersion::LimitOld [GOOD] >> YdbVersion::LimitNew [GOOD] >> YdbVersion::CurrentCanLoadFrom [GOOD] >> YdbVersion::CurrentCanLoadFromAllOlder [GOOD] >> YdbVersion::CurrentCanLoadFromIncompatible [GOOD] >> YdbVersion::CurrentStoresReadableBy [GOOD] >> YdbVersion::StoredReadableBy [GOOD] >> YdbVersion::StoredReadableByIncompatible [GOOD] >> YdbVersion::StoredWithRules [GOOD] >> YdbVersion::StoredWithRulesIncompatible [GOOD] >> YdbVersion::OldNbsStored [GOOD] >> YdbVersion::OldNbsIncompatibleStored [GOOD] >> YdbVersion::NewNbsCurrent [GOOD] >> YdbVersion::NewNbsIncompatibleCurrent [GOOD] >> YdbVersion::OneAcceptedVersion [GOOD] >> YdbVersion::ForbiddenMinor [GOOD] >> YdbVersion::DefaultRulesWithExtraForbidden [GOOD] >> YdbVersion::ExtraAndForbidden [GOOD] >> LibFyamlCore::doc_scalar_path_array [GOOD] >> YdbVersion::SomeRulesAndOtherForbidden [GOOD] >> YdbVersion::Component [GOOD] >> YdbVersion::OtherComponent [GOOD] >> YdbVersion::YDBAndNbs [GOOD] >> YdbVersion::DifferentYdbVersionsWithNBSRules [GOOD] >> YdbVersion::TrunkYDBAndNbs [GOOD] >> YdbVersion::TrunkAndStable [GOOD] >> YdbVersion::CompatibleWithSelf [GOOD] >> YdbVersion::PrintCurrentVersionProto [GOOD] >> test_alter_ops.py::flake8 [GOOD] >> test_copy_ops.py::flake8 [GOOD] >> test_scheme_shard_operations.py::flake8 [GOOD] >> LibFyamlCore::doc_nearest_anchor [GOOD] >> test_dml.py::flake8 [GOOD] |71.9%| [TS] {asan, default-linux-x86_64, release} ydb/library/security/ut/unittest >> Util::MaskNebiusTicket [GOOD] |71.9%| [LD] {RESULT} $(B)/ydb/tests/functional/audit/ydb-tests-functional-audit |71.9%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part10/py2_flake8 |71.9%| [TS] {RESULT} ydb/tests/functional/encryption/flake8 |71.9%| [TS] {asan, default-linux-x86_64, release} ydb/tests/olap/oom/flake8 >> overlapping_portions.py::flake8 [GOOD] >> LibFyamlCore::doc_references [GOOD] >> __main__.py::flake8 [GOOD] |71.9%| [TS] {RESULT} ydb/tests/functional/script_execution/flake8 |71.9%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part0/py2_flake8 >> test.py::py2_flake8 [GOOD] |71.9%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/hybrid_file/part7/py2_flake8 >> test.py::py2_flake8 [GOOD] |71.9%| [TS] {RESULT} ydb/library/benchmarks/runner/runner/flake8 >> LibFyamlCore::doc_nearest_child_of [GOOD] |71.9%| [TS] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/driver/flake8 >> __main__.py::flake8 [GOOD] |71.9%| [TS] {asan, default-linux-x86_64, release} ydb/tests/stress/oltp_workload/flake8 >> __main__.py::flake8 [GOOD] |71.9%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/flake8 >> test_scheme_shard_operations.py::flake8 [GOOD] |71.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/blobstorage/ydb-tests-functional-blobstorage >> test_stability.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> LibFyamlCore::doc_create_empty_seq1 [GOOD] >> __main__.py::flake8 [GOOD] >> integrations_test.py::flake8 [GOOD] >> LibFyamlCore::doc_create_empty_seq2 [GOOD] |71.9%| [TA] $(B)/ydb/public/sdk/cpp/tests/unit/client/draft/test-results/unittest/{meta.json ... results_accumulator.log} |71.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/blobstorage/ydb-tests-functional-blobstorage >> LibFyamlCore::doc_create_empty_map1 [GOOD] >> test.py::flake8 [GOOD] >> LibFyamlCore::doc_create_empty_map2 [GOOD] >> LibFyamlCore::doc_create_test_seq1 [GOOD] >> LibFyamlCore::doc_create_test_map1 [GOOD] |71.9%| [TS] {RESULT} ydb/public/tools/local_ydb/flake8 |71.9%| [TS] {asan, default-linux-x86_64, release} ydb/tests/datashard/dml/flake8 >> test_dml.py::flake8 [GOOD] |71.9%| [TS] {asan, default-linux-x86_64, release} ydb/tests/stress/olap_workload/flake8 >> __main__.py::flake8 [GOOD] |71.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/olap/ttl_tiering/ydb-tests-olap-ttl_tiering >> LibFyamlCore::doc_insert_remove_seq [GOOD] >> LibFyamlCore::doc_insert_remove_map [GOOD] >> LibFyamlCore::doc_sort [GOOD] >> YqlHighlightTests::Invalid [GOOD] >> LibFyamlCore::doc_join_scalar_to_scalar [GOOD] >> YqlHighlightTests::Keyword [GOOD] >> LibFyamlCore::doc_join_scalar_to_map [GOOD] |71.9%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part3/py2_flake8 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/driver_lib/version/ut/unittest >> YdbVersion::PrintCurrentVersionProto [GOOD] >> LibFyamlCore::doc_join_scalar_to_seq [GOOD] Test command err: Application: "ydb" >> YqlHighlightTests::Blank [GOOD] |71.9%| [TS] {asan, default-linux-x86_64, release} ydb/tests/stability/ydb/flake8 >> test_stability.py::flake8 [GOOD] >> LibFyamlCore::doc_join_map_to_scalar [GOOD] |71.9%| [TA] $(B)/ydb/public/sdk/cpp/tests/unit/library/issue/test-results/unittest/{meta.json ... results_accumulator.log} |71.9%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part5/flake8 >> test.py::flake8 [GOOD] |72.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/olap/ttl_tiering/ydb-tests-olap-ttl_tiering |72.0%| [TS] {RESULT} ydb/tests/functional/limits/flake8 >> LibFyamlCore::doc_join_map_to_seq [GOOD] >> YqlHighlightTests::Operation [GOOD] |72.0%| [TS] {asan, default-linux-x86_64, release} ydb/tests/postgres_integrations/library/ut/flake8 >> integrations_test.py::flake8 [GOOD] |72.0%| [TS] {asan, default-linux-x86_64, release} ydb/library/yaml_config/tools/simple_json_diff/flake8 >> __main__.py::flake8 [GOOD] |72.0%| [TA] $(B)/ydb/public/sdk/cpp/tests/unit/client/endpoints/test-results/unittest/{meta.json ... results_accumulator.log} |72.0%| [TS] {RESULT} ydb/tests/olap/oom/flake8 |72.0%| [TS] {RESULT} ydb/library/security/ut/unittest |72.0%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part12/flake8 >> test.py::flake8 [GOOD] >> LibFyamlCore::doc_join_map_to_map [GOOD] >> YqlHighlightTests::FunctionIdentifier [GOOD] >> YqlHighlightTests::TypeIdentifier >> LibFyamlCore::doc_join_seq_to_scalar [GOOD] >> conftest.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] >> test_auth_system_views.py::flake8 [GOOD] >> YqlHighlightTests::TypeIdentifier [GOOD] >> LibFyamlCore::doc_join_seq_to_seq [GOOD] >> test_create_users.py::flake8 [GOOD] >> YqlHighlightTests::VariableIdentifier [GOOD] >> YqlHighlightTests::QuotedIdentifier [GOOD] >> YqlHighlightTests::String [GOOD] >> YqlHighlightTests::MultilineString [GOOD] >> YqlHighlightTests::TypedString [GOOD] >> YqlHighlightTests::Number [GOOD] >> YqlHighlightTests::SQL [GOOD] >> YqlHighlightTests::Emoji [GOOD] >> YqlHighlightTests::Typing >> test_create_users_strict_acl_checks.py::flake8 [GOOD] >> LibFyamlCore::doc_join_seq_to_map [GOOD] >> test_db_counters.py::flake8 [GOOD] |72.0%| [LD] {RESULT} $(B)/ydb/tests/functional/blobstorage/ydb-tests-functional-blobstorage >> LibFyamlCore::doc_join_tags [GOOD] |72.0%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part0/py2_flake8 >> test_dynamic_tenants.py::flake8 [GOOD] >> test_example.py::flake8 [GOOD] |72.0%| [LD] {RESULT} $(B)/ydb/tests/olap/ttl_tiering/ydb-tests-olap-ttl_tiering >> LibFyamlCore::doc_build_with_tags [GOOD] >> test_publish_into_schemeboard_with_common_ssring.py::flake8 [GOOD] >> LibFyamlCore::doc_attach_check [GOOD] >> test_storage_config.py::flake8 [GOOD] >> YqlHighlightTests::Typing [GOOD] >> LibFyamlCore::manual_scalar_esc [GOOD] >> YqlHighlightTests::Comment [GOOD] >> test_system_views.py::flake8 [GOOD] >> __main__.py::flake8 [GOOD] >> LibFyamlCore::manual_scalar_quoted [GOOD] >> YqlHighlightTests::Multiline [GOOD] >> test_tenants.py::flake8 [GOOD] |72.0%| [TS] {RESULT} ydb/library/yql/tests/sql/hybrid_file/part7/py2_flake8 |72.0%| [TS] {RESULT} ydb/tests/stress/oltp_workload/flake8 |72.0%| [TS] {RESULT} ydb/tests/tools/nemesis/driver/flake8 |72.0%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part9/py2_flake8 >> test.py::py2_flake8 [GOOD] |72.0%| [TS] {RESULT} ydb/tests/functional/scheme_shard/flake8 >> LibFyamlCore::manual_scalar_copy [GOOD] >> YqlHighlightTests::ANSI [GOOD] >> LibFyamlCore::manual_scalarf [GOOD] >> test_user_administration.py::flake8 [GOOD] >> LibFyamlCore::manual_valid_anchor [GOOD] >> test_users_groups_with_acl.py::flake8 [GOOD] >> StaticValidator::HostConfigs [GOOD] >> LibFyamlCore::manual_invalid_anchor [GOOD] |72.0%| [TA] {RESULT} $(B)/ydb/core/scheme/ut/test-results/unittest/{meta.json ... results_accumulator.log} |72.0%| [TA] {RESULT} $(B)/ydb/library/yql/dq/actors/spilling/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> LibFyamlCore::manual_anchor_removal [GOOD] >> StaticValidator::Hosts [GOOD] >> tpc_tests.py::flake8 [GOOD] >> LibFyamlCore::manual_block_flow_mix [GOOD] >> StaticValidator::DomainsConfig [GOOD] >> LibFyamlCore::scanf_check [GOOD] >> http_client.py::flake8 [GOOD] |72.0%| [TS] {asan, default-linux-x86_64, release} ydb/tests/example/flake8 >> test_example.py::flake8 [GOOD] |72.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/splitter/ut/ut_splitter.cpp >> query_results.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] |72.0%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/tenants/flake8 >> test_users_groups_with_acl.py::flake8 [GOOD] |72.0%| [TS] {asan, default-linux-x86_64, release} ydb/tests/stress/simple_queue/flake8 >> __main__.py::flake8 [GOOD] |72.0%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/portions/portion_info.h_serialized.cpp >> test_cp_ic.py::flake8 [GOOD] >> test_dispatch.py::flake8 [GOOD] |72.0%| [TS] {asan, default-linux-x86_64, release} ydb/public/lib/ydb_cli/commands/interactive/highlight/ut/unittest >> YqlHighlightTests::ANSI [GOOD] |72.0%| [TS] {RESULT} ydb/tests/stability/ydb/flake8 >> test_retry.py::flake8 [GOOD] >> ConfigProto::ForbidNewRequired >> Validator::MultitypeNodeValidation [GOOD] >> test_retry_high_rate.py::flake8 [GOOD] >> Validator::MapValidation [GOOD] >> Validator::IntValidation [GOOD] >> Validator::IntArrayValidation [GOOD] >> Validator::StringValidation [GOOD] >> Validator::BoolValidation [GOOD] >> Validator::OpaqueMaps [GOOD] >> Validator::Enums [GOOD] >> common.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> test_rename.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] >> MatchPredicate::EmptyMatch [GOOD] >> MatchPredicate::EmptyWhere [GOOD] >> MatchPredicate::Between [GOOD] >> MatchPredicate::Less [GOOD] >> MatchPredicate::NotLess [GOOD] >> MatchPredicate::RightColumn [GOOD] |72.0%| [TS] {asan, default-linux-x86_64, release} ydb/library/yaml_config/static_validator/ut/unittest >> StaticValidator::DomainsConfig [GOOD] |72.0%| [TA] {RESULT} $(B)/ydb/public/sdk/cpp/tests/unit/client/draft/test-results/unittest/{meta.json ... results_accumulator.log} |72.0%| [TS] {RESULT} ydb/library/yaml_config/tools/simple_json_diff/flake8 |72.0%| [TS] {asan, default-linux-x86_64, release} ydb/library/benchmarks/runner/flake8 >> tpc_tests.py::flake8 [GOOD] |72.0%| [TS] {asan, default-linux-x86_64, release} ydb/core/fq/libs/http_api_client/flake8 >> query_results.py::flake8 [GOOD] >> test_workload.py::flake8 [GOOD] |72.0%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part12/flake8 |72.0%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part7/py2_flake8 >> test.py::py2_flake8 [GOOD] ------- [TS] {asan, default-linux-x86_64, release} ydb/library/fyamlcpp/ut/unittest >> LibFyamlCore::scanf_check [GOOD] Test command err: :1:6: error: duplicate tag directive %TAG !e! tag:example.com,2019:app/ ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~ [{foo: bar, baz:}] --- # with anchor [&foo foo] (null):1:1: error: cannot set anchor bar (anchor foo already exists) foo ^~~ --- # without anchor [&foo foo] |72.0%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part5/flake8 >> ConfigProto::ForbidNewRequired [GOOD] >> column_table_helper.py::flake8 [GOOD] >> allure_utils.py::flake8 [GOOD] |72.0%| [TA] $(B)/ydb/library/yql/public/ydb_issue/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> range_allocator.py::flake8 [GOOD] |72.0%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/multi_plane/flake8 >> test_retry_high_rate.py::flake8 [GOOD] |72.0%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/portions/portion_info.h_serialized.cpp |72.0%| [TS] {RESULT} ydb/tests/postgres_integrations/library/ut/flake8 |72.0%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/rename/flake8 >> test_rename.py::flake8 [GOOD] >> results_processor.py::flake8 [GOOD] >> s3_client.py::flake8 [GOOD] >> test_clickbench.py::flake8 [GOOD] >> utils.py::flake8 [GOOD] >> test_tpcds.py::flake8 [GOOD] >> test_tpch.py::flake8 [GOOD] |72.1%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part16/py2_flake8 >> test.py::py2_flake8 [GOOD] >> thread_helper.py::flake8 [GOOD] >> time_histogram.py::flake8 [GOOD] >> test_log_scenario.py::flake8 [GOOD] >> ydb_cli.py::flake8 [GOOD] >> zip_bomb.py::flake8 [GOOD] |72.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/common/description.cpp |72.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yaml_config/yaml_config_proto2yaml_ut.cpp |72.1%| [TS] {asan, default-linux-x86_64, release} ydb/library/yaml_config/validator/ut/validator/unittest >> Validator::Enums [GOOD] |72.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/stress/log/tests/flake8 >> test_workload.py::flake8 [GOOD] >> ydb_cluster.py::flake8 [GOOD] >> utils.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] >> ydb_client.py::flake8 [GOOD] |72.1%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/generic/pushdown/ut/unittest >> MatchPredicate::RightColumn [GOOD] |72.1%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/yql/dq/actors/spilling/ut/ydb-library-yql-dq-actors-spilling-ut |72.1%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part9/py2_flake8 |72.1%| [TS] {RESULT} ydb/tests/datashard/dml/flake8 |72.1%| [TA] {RESULT} $(B)/ydb/public/sdk/cpp/tests/unit/library/issue/test-results/unittest/{meta.json ... results_accumulator.log} >> conftest.py::flake8 [GOOD] >> test_unknown_data_source.py::flake8 [GOOD] >> ValidatorBuilder::CanHaveMultipleType [GOOD] >> ValidatorBuilder::CreateMultitypeNode [GOOD] >> ValidatorBuilder::BuildSimpleValidator [GOOD] >> ValidatorBuilder::CanCreateAllTypesOfNodes [GOOD] >> ValidatorBuilder::CanHaveDuplicateType [GOOD] |72.1%| [TS] {RESULT} ydb/tests/stress/olap_workload/flake8 |72.1%| [TS] {RESULT} ydb/tests/example/flake8 |72.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/splitter/ut/ut_splitter.cpp >> test_workload.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] |72.1%| [TS] {RESULT} ydb/tests/stress/simple_queue/flake8 |72.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/olap/load/flake8 >> test_tpch.py::flake8 [GOOD] |72.1%| [TS] {asan, default-linux-x86_64, release} ydb/core/config/ut/unittest >> ConfigProto::ForbidNewRequired [GOOD] |72.1%| [TA] $(B)/ydb/core/persqueue/codecs/ut/test-results/unittest/{meta.json ... results_accumulator.log} |72.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/olap/lib/flake8 >> ydb_cluster.py::flake8 [GOOD] >> test.py::flake8 [GOOD] |72.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/olap/common/flake8 >> ydb_client.py::flake8 [GOOD] |72.1%| [TA] {RESULT} $(B)/ydb/public/sdk/cpp/tests/unit/client/endpoints/test-results/unittest/{meta.json ... results_accumulator.log} |72.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common/description.cpp |72.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/olap/flake8 >> zip_bomb.py::flake8 [GOOD] |72.1%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part12/py2_flake8 >> test.py::py2_flake8 [GOOD] |72.1%| [TA] $(B)/ydb/public/sdk/cpp/tests/unit/client/params/test-results/gtest/{meta.json ... results_accumulator.log} |72.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/common/flake8 >> test_unknown_data_source.py::flake8 [GOOD] |72.1%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/hybrid_file/part4/py2_flake8 >> test.py::py2_flake8 [GOOD] |72.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/stress/simple_queue/tests/flake8 >> test_workload.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> docker_wrapper_test.py::flake8 [GOOD] >> test.py::flake8 [GOOD] |72.1%| [TA] $(B)/ydb/public/lib/json_value/ut/test-results/unittest/{meta.json ... results_accumulator.log} |72.1%| [TS] {asan, default-linux-x86_64, release} ydb/library/yaml_config/validator/ut/validator_builder/unittest >> ValidatorBuilder::CanHaveDuplicateType [GOOD] |72.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part16/flake8 >> test.py::flake8 [GOOD] |72.1%| [TS] {RESULT} ydb/core/fq/libs/http_api_client/flake8 >> test_pdisk_format_info.py::flake8 [GOOD] >> test_replication.py::flake8 [GOOD] >> test_self_heal.py::flake8 [GOOD] >> test_tablet_channel_migration.py::flake8 [GOOD] |72.1%| [TS] {RESULT} ydb/library/benchmarks/runner/flake8 |72.1%| [TS] {RESULT} ydb/tests/stress/log/tests/flake8 |72.1%| [TS] {RESULT} ydb/library/yaml_config/static_validator/ut/unittest |72.1%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part7/py2_flake8 >> test_transform.py::flake8 [GOOD] >> collection.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> select_positive.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> test_quoting.py::flake8 [GOOD] >> Checks::BasicIntChecks [GOOD] |72.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/postgres_integrations/go-libpq/flake8 >> docker_wrapper_test.py::flake8 [GOOD] >> Checks::ErrorInCheck [GOOD] >> Checks::IntArrayValidation [GOOD] >> Checks::OpaqueMaps [GOOD] >> Checks::MapValidation [GOOD] >> Checks::BasicStringChecks [GOOD] |72.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part2/flake8 >> test.py::flake8 [GOOD] |72.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/flake8 >> test_tablet_channel_migration.py::flake8 [GOOD] |72.1%| [TS] {RESULT} ydb/library/yql/providers/generic/pushdown/ut/unittest |72.2%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/flake8 >> test_quoting.py::flake8 [GOOD] |72.2%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/generic/connector/tests/datasource/ydb/flake8 >> test.py::flake8 [GOOD] >> TContinueFromStreamingOffsetsPlanTest::OneToOneMapping [GOOD] >> TContinueFromStreamingOffsetsPlanTest::MultipleTopics [GOOD] >> TContinueFromStreamingOffsetsPlanTest::AllTopicsMustBeUsedInNonForceMode [GOOD] >> test.py::py2_flake8 [GOOD] >> TContinueFromStreamingOffsetsPlanTest::DifferentPartitioning [GOOD] >> TContinueFromStreamingOffsetsPlanTest::ReadPartitionInSeveralPlacesIsOk [GOOD] >> TContinueFromStreamingOffsetsPlanTest::NotMappedAllPartitions [GOOD] >> test_mixed.py::flake8 [GOOD] >> TContinueFromStreamingOffsetsPlanTest::MapSeveralReadingsToOneIsAllowedOnlyInForceMode [GOOD] >> TContinueFromStreamingOffsetsPlanTest::Empty [GOOD] |72.2%| [TS] {RESULT} ydb/library/yaml_config/validator/ut/validator/unittest |72.2%| [TS] {RESULT} ydb/tests/fq/multi_plane/flake8 |72.2%| [TS] {RESULT} ydb/tests/olap/lib/flake8 |72.2%| [TS] {RESULT} ydb/tests/olap/common/flake8 |72.2%| [TS] {RESULT} ydb/tests/functional/rename/flake8 |72.2%| [TS] {asan, default-linux-x86_64, release} ydb/library/yaml_config/ut_transform/flake8 >> test_transform.py::flake8 [GOOD] |72.2%| [TS] {RESULT} ydb/public/lib/ydb_cli/commands/interactive/highlight/ut/unittest |72.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/abstract/abstract.cpp |72.2%| [TS] {asan, default-linux-x86_64, release} ydb/library/yaml_config/validator/ut/validator_checks/unittest >> Checks::BasicStringChecks [GOOD] |72.2%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/dq/state/ut/unittest >> TContinueFromStreamingOffsetsPlanTest::Empty [GOOD] >> test.py::py2_flake8 [GOOD] |72.2%| [TA] {RESULT} $(B)/ydb/public/sdk/cpp/tests/unit/client/value/test-results/gtest/{meta.json ... results_accumulator.log} >> test_actorsystem.py::flake8 [GOOD] |72.2%| [LD] {BAZEL_UPLOAD} $(B)/ydb/tests/tools/pq_read/pq_read |72.2%| [TS] {RESULT} ydb/library/yaml_config/validator/ut/validator_builder/unittest |72.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yaml_config/yaml_config_proto2yaml_ut.cpp |72.2%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part5/py2_flake8 >> test.py::py2_flake8 [GOOD] |72.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/abstract/abstract.cpp |72.2%| [TS] {asan, default-linux-x86_64, release} ydb/tests/stress/mixedpy/flake8 >> test_mixed.py::flake8 [GOOD] |72.2%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part16/flake8 >> test.py::py2_flake8 [GOOD] >> collection.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> select_datetime.py::flake8 [GOOD] >> test_liveness_wardens.py::flake8 [GOOD] >> select_positive.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> test.py::flake8 [GOOD] |72.2%| [TS] {RESULT} ydb/tests/olap/flake8 |72.2%| [TS] {RESULT} ydb/tests/fq/common/flake8 |72.2%| [TS] {RESULT} ydb/library/yql/providers/generic/connector/tests/datasource/ydb/flake8 >> test.py::py2_flake8 [GOOD] |72.2%| [TS] {RESULT} ydb/tests/stress/simple_queue/tests/flake8 |72.2%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part15/py2_flake8 >> test.py::py2_flake8 [GOOD] |72.2%| [TS] {RESULT} ydb/library/yql/tests/sql/hybrid_file/part4/py2_flake8 |72.2%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/autoconfig/flake8 >> test_actorsystem.py::flake8 [GOOD] |72.2%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part8/py2_flake8 >> test.py::py2_flake8 [GOOD] |72.2%| [TM] {RESULT} ydb/core/driver_lib/version/ut/unittest |72.2%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/generic/connector/tests/datasource/ms_sql_server/flake8 >> test.py::flake8 [GOOD] |72.2%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part2/flake8 >> conftest.py::flake8 [GOOD] >> s3_helpers.py::flake8 [GOOD] >> test_bindings_0.py::flake8 [GOOD] >> test_bindings_1.py::flake8 [GOOD] >> test_compressions.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] >> test_early_finish.py::flake8 [GOOD] >> test_explicit_partitioning_0.py::flake8 [GOOD] >> test_explicit_partitioning_1.py::flake8 [GOOD] |72.2%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part1/flake8 >> test.py::flake8 [GOOD] |72.2%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/wardens/flake8 >> test_liveness_wardens.py::flake8 [GOOD] |72.2%| [TS] {RESULT} ydb/tests/functional/blobstorage/flake8 |72.2%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/hybrid_file/part8/py2_flake8 >> test.py::py2_flake8 [GOOD] |72.2%| [TS] {RESULT} ydb/tests/functional/sqs/with_quotas/flake8 >> test_format_setting.py::flake8 [GOOD] >> test_formats.py::flake8 [GOOD] >> test_inflight.py::flake8 [GOOD] >> collection.py::flake8 [GOOD] >> test_insert.py::flake8 [GOOD] >> test_public_metrics.py::flake8 [GOOD] >> test_push_down.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> select_datetime.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> test_s3_0.py::flake8 [GOOD] |72.2%| [TS] {RESULT} ydb/tests/postgres_integrations/go-libpq/flake8 |72.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/predicate/filter.cpp >> select_positive.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> test_ttl.py::flake8 [GOOD] >> test_account_actions.py::flake8 [GOOD] >> test_auditlog.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] >> test_s3_1.py::flake8 [GOOD] >> test_acl.py::flake8 [GOOD] >> test_counters.py::flake8 [GOOD] >> test_format_without_version.py::flake8 [GOOD] >> test_garbage_collection.py::flake8 [GOOD] >> test_multiplexing_tables_format.py::flake8 [GOOD] >> test_ping.py::flake8 [GOOD] >> test_queue_attributes_validation.py::flake8 [GOOD] >> test_queue_counters.py::flake8 [GOOD] >> test_queue_tags.py::flake8 [GOOD] >> test_queues_managing.py::flake8 [GOOD] >> test_throttling.py::flake8 [GOOD] >> DataGeneratorState::SaveLoad [GOOD] |72.2%| [TS] {RESULT} ydb/library/yaml_config/ut_transform/flake8 |72.3%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part5/py2_flake8 |72.3%| [TS] {RESULT} ydb/tests/stress/mixedpy/flake8 >> test_size_limit.py::flake8 [GOOD] >> DataGeneratorState::PortionProcessing [GOOD] >> test.py::py2_flake8 [GOOD] >> base.py::flake8 [GOOD] >> test_tpch_import.py::flake8 [GOOD] >> test_statistics.py::flake8 [GOOD] >> test_streaming_join.py::flake8 [GOOD] >> test_test_connection.py::flake8 [GOOD] |72.3%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part15/py2_flake8 |72.3%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part12/py2_flake8 |72.3%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part14/py2_flake8 >> test.py::py2_flake8 [GOOD] |72.3%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part6/flake8 >> test.py::flake8 [GOOD] |72.3%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/generic/connector/tests/datasource/clickhouse/flake8 >> test.py::flake8 [GOOD] >> test_validation.py::flake8 [GOOD] >> test_ydb_over_fq.py::flake8 [GOOD] >> test_yq_v2.py::flake8 [GOOD] |72.3%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/ttl/flake8 >> test_ttl.py::flake8 [GOOD] |72.3%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/flake8 >> test_auditlog.py::flake8 [GOOD] |72.3%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/common/flake8 >> test_throttling.py::flake8 [GOOD] >> test_base.py::flake8 [GOOD] >> conftest.py::black [GOOD] >> test_http_api.py::flake8 [GOOD] >> test_clickhouse.py::black [GOOD] >> test_greenplum.py::black [GOOD] >> test_join.py::black [GOOD] >> test_mysql.py::black [GOOD] >> test_postgresql.py::black [GOOD] >> test_ydb.py::black [GOOD] |72.3%| [TS] {RESULT} ydb/library/yaml_config/validator/ut/validator_checks/unittest |72.3%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part13/py2_flake8 >> test.py::py2_flake8 [GOOD] |72.3%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/s3/flake8 >> test_yq_v2.py::flake8 [GOOD] |72.3%| [TS] {RESULT} ydb/tests/functional/autoconfig/flake8 |72.3%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part18/py2_flake8 >> test.py::py2_flake8 [GOOD] >> test.py::flake8 [GOOD] >> __main__.py::flake8 [GOOD] >> test.py::flake8 [GOOD] |72.3%| [TS] {asan, default-linux-x86_64, release} ydb/library/workload/benchmark_base/ut/unittest >> DataGeneratorState::PortionProcessing [GOOD] |72.3%| [TS] {asan, default-linux-x86_64, release} ydb/tests/olap/s3_import/flake8 >> test_tpch_import.py::flake8 [GOOD] |72.3%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/generic/analytics/black >> test_ydb.py::black [GOOD] >> test_workload.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] >> __main__.py::flake8 [GOOD] >> test_dump_restore.py::flake8 [GOOD] |72.3%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part8/py2_flake8 |72.3%| [TS] {RESULT} ydb/library/yql/dq/state/ut/unittest |72.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/predicate/filter.cpp |72.3%| [TS] {RESULT} ydb/library/yql/providers/generic/connector/tests/datasource/ms_sql_server/flake8 |72.3%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/http_api/flake8 >> test_http_api.py::flake8 [GOOD] |72.3%| [TS] {RESULT} ydb/tests/functional/wardens/flake8 >> helpers.py::flake8 [GOOD] >> test_base.py::flake8 [GOOD] |72.3%| [TA] {RESULT} $(B)/ydb/public/sdk/cpp/tests/unit/client/params/test-results/gtest/{meta.json ... results_accumulator.log} |72.3%| [TS] {RESULT} ydb/library/yql/tests/sql/hybrid_file/part8/py2_flake8 |72.3%| [TS] {asan, default-linux-x86_64, release} ydb/tests/tools/ydb_serializable/flake8 >> __main__.py::flake8 [GOOD] >> test_query.py::flake8 [GOOD] >> test_s3.py::flake8 [GOOD] |72.3%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part4/flake8 >> test.py::flake8 [GOOD] |72.3%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part1/flake8 |72.3%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/yql/providers/s3/credentials/ut/ydb-library-yql-providers-s3-credentials-ut |72.3%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part10/flake8 >> test.py::flake8 [GOOD] |72.3%| [TS] {asan, default-linux-x86_64, release} ydb/tests/stress/oltp_workload/tests/flake8 >> test_workload.py::flake8 [GOOD] |72.3%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part11/py2_flake8 >> test.py::py2_flake8 [GOOD] |72.3%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part16/py2_flake8 >> test_common.py::flake8 [GOOD] >> test_yandex_cloud_mode.py::flake8 [GOOD] >> test_yandex_cloud_queue_counters.py::flake8 [GOOD] >> main.py::flake8 [GOOD] |72.3%| [TS] {asan, default-linux-x86_64, release} ydb/public/tools/ydb_recipe/flake8 >> __main__.py::flake8 [GOOD] |72.3%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/base/generated/codegen/ydb-core-base-generated-codegen |72.3%| [TS] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/flake8 >> test_dump_restore.py::flake8 [GOOD] |72.4%| [TS] {asan, default-linux-x86_64, release} ydb/tests/sql/lib/flake8 >> test_s3.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] |72.4%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part14/py2_flake8 |72.4%| [TS] {RESULT} ydb/tests/olap/load/flake8 >> conftest.py::flake8 [GOOD] >> test_stats_mode.py::flake8 [GOOD] |72.4%| [TS] {RESULT} ydb/tests/functional/audit/flake8 |72.4%| [TS] {asan, default-linux-x86_64, release} ydb/apps/dstool/flake8 >> main.py::flake8 [GOOD] |72.4%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/flake8 >> test_yandex_cloud_queue_counters.py::flake8 [GOOD] >> test_clickbench.py::flake8 [GOOD] >> test_diff_processing.py::flake8 [GOOD] >> TestCommon::Empty [GOOD] >> TestCommon::ParseCounterName [GOOD] >> TestCommon::CollectTaskRunnerStatisticsByTask [GOOD] >> TestCommon::CollectTaskRunnerStatisticsByStage [GOOD] >> test_tpch.py::flake8 [GOOD] >> test_restarts.py::flake8 [GOOD] >> test.py::flake8 [GOOD] |72.4%| [TS] {RESULT} ydb/tests/olap/s3_import/flake8 |72.4%| [TS] {RESULT} ydb/tests/functional/sqs/common/flake8 |72.4%| [TS] {RESULT} ydb/tests/fq/generic/analytics/black |72.4%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part6/flake8 |72.4%| [TS] {RESULT} ydb/tests/fq/s3/flake8 |72.4%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/hybrid_file/part2/py2_flake8 >> test.py::py2_flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> test_2_selects_limit.py::flake8 [GOOD] >> test_3_selects.py::flake8 [GOOD] >> test_bad_syntax.py::flake8 [GOOD] |72.4%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/plans/flake8 >> test_stats_mode.py::flake8 [GOOD] |72.4%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part18/py2_flake8 |72.4%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/tpc/medium/flake8 >> test_tpch.py::flake8 [GOOD] >> test_fifo_messaging.py::flake8 [GOOD] >> test_generic_messaging.py::flake8 [GOOD] >> test_polling.py::flake8 [GOOD] >> test_base.py::flake8 [GOOD] >> test_big_state.py::flake8 [GOOD] >> test_leader_start_inflight.py::flake8 [GOOD] >> collection.py::flake8 [GOOD] >> test_continue_mode.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> scenario.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> test_case.py::flake8 [GOOD] >> test_cpu_quota.py::flake8 [GOOD] |72.4%| [TA] {RESULT} $(B)/ydb/public/lib/json_value/ut/test-results/unittest/{meta.json ... results_accumulator.log} |72.4%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/restarts/flake8 >> test_restarts.py::flake8 [GOOD] >> test_delete_read_rules_after_abort_by_system.py::flake8 [GOOD] >> test_disposition.py::flake8 [GOOD] >> test_eval.py::flake8 [GOOD] >> test_invalid_consumer.py::flake8 [GOOD] >> test_kill_pq_bill.py::flake8 [GOOD] >> test_mem_alloc.py::flake8 [GOOD] >> test_workload.py::flake8 [GOOD] >> test_metrics_cleanup.py::flake8 [GOOD] >> SecurityPrinterTest::PrintSensitive [GOOD] >> TokenPrinterTest::PrintToken [GOOD] >> FieldSizePrinterTest::PrintRecursiveType [GOOD] |72.4%| [TS] {RESULT} ydb/tests/fq/http_api/flake8 |72.4%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part9/flake8 >> test.py::flake8 [GOOD] |72.4%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/dq/provider/ut/unittest >> TestCommon::CollectTaskRunnerStatisticsByStage [GOOD] >> SecurityPrinterTest::PrintRecursiveType [GOOD] >> PrinterWrapperTest::PrintsToStream [GOOD] >> HideFieldPrinterTest::PrintNoValue [GOOD] >> FieldSizePrinterTest::PrintSuccess [GOOD] >> PrinterWrapperTest::PrintsToString [GOOD] >> test_pq_read_write.py::flake8 [GOOD] >> SanitizeLable::SkipSingleBadSymbol [GOOD] >> test.py::flake8 [GOOD] |72.4%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part11/py2_flake8 |72.4%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/flake8 >> test_polling.py::flake8 [GOOD] |72.4%| [TS] {RESULT} ydb/library/workload/benchmark_base/ut/unittest |72.4%| [TS] {RESULT} ydb/public/tools/ydb_recipe/flake8 |72.4%| [TS] {RESULT} ydb/tests/sql/lib/flake8 |72.4%| [TS] {RESULT} ydb/apps/dstool/flake8 >> Metrics::EmptyIssuesList [GOOD] >> test_public_metrics.py::flake8 [GOOD] >> Metrics::SeveralSubItems [GOOD] >> conftest.py::flake8 [GOOD] >> Metrics::MoreThanFiveItems [GOOD] >> Metrics::OnlyOneItem [GOOD] >> SanitizeLable::Empty [GOOD] >> SanitizeLable::Truncate200 [GOOD] >> Metrics::SeveralTopItems [GOOD] >> SanitizeLable::SkipBadSymbols [GOOD] >> Metrics::CombineSubItems [GOOD] >> test_clickhouse.py::flake8 [GOOD] >> test_greenplum.py::flake8 [GOOD] >> test_join.py::flake8 [GOOD] >> test_mysql.py::flake8 [GOOD] >> test_postgresql.py::flake8 [GOOD] >> test_ydb.py::flake8 [GOOD] >> test_read_rules_deletion.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] >> test_recovery.py::flake8 [GOOD] >> test_recovery_match_recognize.py::flake8 [GOOD] >> test_recovery_mz.py::flake8 [GOOD] >> test_restart_query.py::flake8 [GOOD] |72.4%| [TS] {RESULT} ydb/tests/datashard/dump_restore/flake8 |72.4%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/large/flake8 >> test_leader_start_inflight.py::flake8 [GOOD] |72.4%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/generic/connector/tests/join/flake8 >> test_case.py::flake8 [GOOD] |72.4%| [TS] {asan, default-linux-x86_64, release} ydb/tests/stress/olap_workload/tests/flake8 >> test_workload.py::flake8 [GOOD] |72.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yaml_config/yaml_config_ut.cpp >> test_row_dispatcher.py::flake8 [GOOD] >> test_select_1.py::flake8 [GOOD] >> test_select_limit.py::flake8 [GOOD] >> test_select_limit_db_id.py::flake8 [GOOD] >> test_select_timings.py::flake8 [GOOD] >> test_stop.py::flake8 [GOOD] >> gen-report.py::flake8 [GOOD] |72.4%| [TS] {RESULT} ydb/tests/functional/sqs/cloud/flake8 |72.4%| [TS] {asan, default-linux-x86_64, release} ydb/library/protobuf_printer/ut/unittest >> PrinterWrapperTest::PrintsToString [GOOD] |72.4%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part0/flake8 >> test.py::flake8 [GOOD] |72.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/metrics/ut/unittest >> Metrics::CombineSubItems [GOOD] >> test_watermarks.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> test_yds_bindings.py::flake8 [GOOD] |72.4%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/generic/analytics/flake8 >> test_ydb.py::flake8 [GOOD] |72.4%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part1/py2_flake8 >> test.py::py2_flake8 [GOOD] |72.4%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part13/py2_flake8 >> TpchQueries::ScaleFactor >> test_yq_streaming.py::flake8 [GOOD] >> TCommandWorkloadTopicParamsTests::TestRun_StrToBytes_Simple [GOOD] >> TCommandWorkloadTopicParamsTests::TestRun_StrToBytes_Kilo [GOOD] >> TCommandWorkloadTopicParamsTests::TestRun_StrToBytes_Mega [GOOD] >> TCommandWorkloadTopicParamsTests::TestRun_StrToBytes_Giga [GOOD] >> TCommandWorkloadTopicParamsTests::TestRun_StrToBytes_Error [GOOD] >> TTopicWorkloadWriterProducerTests::HandleAckEvent_ShouldSaveStatistics [GOOD] |72.5%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part4/flake8 |72.5%| [TS] {asan, default-linux-x86_64, release} ydb/library/benchmarks/runner/result_convert/flake8 >> gen-report.py::flake8 [GOOD] >> TTopicWorkloadWriterProducerTests::Send_ShouldCallWriteMethodOfTheWriteSession [GOOD] >> test.py::flake8 [GOOD] >> TTopicWorkloadWriterProducerTests::WaitForContinuationToken_ShouldExtractContinuationTokenFromEvent [GOOD] >> TpchQueries::ScaleFactor [GOOD] |72.5%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yds/flake8 >> test_yq_streaming.py::flake8 [GOOD] |72.5%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part11/flake8 >> test.py::flake8 [GOOD] |72.5%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part10/flake8 >> TTopicWorkloadWriterProducerTests::WaitForContinuationToken_ShouldThrowExceptionIfEventOfTheWrongType [GOOD] >> test.py::flake8 [GOOD] |72.5%| [TS] {RESULT} ydb/tests/tools/ydb_serializable/flake8 >> test_workload.py::flake8 [GOOD] >> test_crud.py::flake8 [GOOD] >> test_inserts.py::flake8 [GOOD] >> test_kv.py::flake8 [GOOD] |72.5%| [TS] {RESULT} ydb/tests/stress/oltp_workload/tests/flake8 |72.5%| [TS] {RESULT} ydb/tests/fq/plans/flake8 |72.5%| [TS] {RESULT} ydb/tests/functional/tpc/medium/flake8 >> conftest.py::flake8 [GOOD] >> test_join.py::flake8 [GOOD] |72.5%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part9/flake8 |72.5%| [TS] {RESULT} ydb/tests/functional/restarts/flake8 |72.5%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part18/flake8 >> test.py::flake8 [GOOD] |72.5%| [TS] {asan, default-linux-x86_64, release} ydb/tests/sql/flake8 >> test_kv.py::flake8 [GOOD] >> Json::BasicRendering [GOOD] >> CertFormatConverter::ParseFromPEM >> test.py::flake8 [GOOD] |72.5%| [TS] {asan, default-linux-x86_64, release} ydb/public/lib/ydb_cli/commands/topic_workload/ut/unittest >> TTopicWorkloadWriterProducerTests::WaitForContinuationToken_ShouldThrowExceptionIfEventOfTheWrongType [GOOD] |72.5%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part7/flake8 >> test.py::flake8 [GOOD] |72.5%| [TS] {asan, default-linux-x86_64, release} ydb/tests/stress/kv/tests/flake8 >> test_workload.py::flake8 [GOOD] >> CertFormatConverter::ParseFromPEM [GOOD] >> CertFormatConverter::InvalidKey [GOOD] >> CertFormatConverter::InvalidCert [GOOD] >> CertFormatConverter::InvalidKeyTakeFromCert [GOOD] >> NormalizePathTest::TestNormalization [GOOD] >> NormalizePathTest::TestAdjustment [GOOD] >> PgDumpParserTests::RemovePublicScheme [GOOD] >> PgDumpParserTests::PgCatalogAndAlterComment [GOOD] >> PgDumpParserTests::CreateTablePrimaryKeys [GOOD] >> RecursiveRemoveTests::SecondPass [GOOD] >> YdbCliCsvParserTests::IntegerTypesTestParams [GOOD] >> YdbCliCsvParserTests::IntegerTypesTestValue [GOOD] >> YdbCliCsvParserTests::IntegerTypesTestList [GOOD] >> YdbCliCsvParserTests::DateTypesTestParams [GOOD] >> YdbCliCsvParserTests::DateTypesTestValue [GOOD] >> YdbCliCsvParserTests::DateTypesTestBuildList [GOOD] >> YdbCliCsvParserTests::OtherPrimitiveTypeTestParams [GOOD] >> YdbCliCsvParserTests::OtherPrimitiveTypesTestValue [GOOD] >> YdbCliCsvParserTests::OtherPrimitiveTypesTestBuildList [GOOD] >> YdbCliCsvParserTests::EdgeValuesTestParams [GOOD] >> YdbCliCsvParserTests::MultipleFields [GOOD] >> YdbCliCsvParserTests::RepeatedEscaping [GOOD] >> YdbCliCsvParserTests::ShuffledColumns [GOOD] >> YdbCliCsvParserTests::InferTypesNonNull [GOOD] >> YdbCliCsvParserTests::InferTypesWithNulls [GOOD] >> __main__.py::flake8 [GOOD] >> __main__.py::flake8 [GOOD] |72.5%| [TS] {asan, default-linux-x86_64, release} ydb/library/workload/tpch/ut/unittest >> TpchQueries::ScaleFactor [GOOD] |72.5%| [TS] {RESULT} ydb/tests/functional/sqs/messaging/flake8 |72.5%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/columns_set.h_serialized.cpp >> tablet_scheme_tests.py::flake8 [GOOD] >> test_generator.py::flake8 [GOOD] |72.5%| [TS] {RESULT} ydb/library/yql/tests/sql/hybrid_file/part2/py2_flake8 |72.5%| [TS] {RESULT} ydb/library/yql/providers/dq/provider/ut/unittest >> test_init.py::flake8 [GOOD] >> test_cms_erasure.py::flake8 [GOOD] >> test_cms_restart.py::flake8 [GOOD] >> test_cms_state_storage.py::flake8 [GOOD] >> utils.py::flake8 [GOOD] >> test.py::flake8 [GOOD] |72.5%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/generic/streaming/flake8 >> test_join.py::flake8 [GOOD] |72.5%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part3/flake8 >> test.py::flake8 [GOOD] |72.5%| [TS] {RESULT} ydb/library/yql/providers/generic/connector/tests/datasource/clickhouse/flake8 |72.5%| [TS] {asan, default-linux-x86_64, release} ydb/tests/tools/kqprun/recipe/flake8 >> __main__.py::flake8 [GOOD] |72.5%| [TS] {asan, default-linux-x86_64, release} ydb/core/viewer/json/ut/unittest >> Json::BasicRendering [GOOD] ------- [TS] {asan, default-linux-x86_64, release} ydb/public/lib/ydb_cli/common/ut/unittest >> YdbCliCsvParserTests::InferTypesWithNulls [GOOD] Test command err: -- SELECT pg_catalog.set_config('search_path', '', false); -- ALTER TABLE public.pgbench_accounts OWNER TO root; -- SELECT pg_catalog.set_config('search_path', '', false); -- ALTER TABLE public.pgbench_accounts OWNER TO root; -- ALTER TABLE ONLY public.pgbench_accounts -- ADD CONSTRAINT pgbench_accounts_pkey PRIMARY KEY (aid); -- ALTER TABLE ONLY public.pgbench_branches -- ADD CONSTRAINT pgbench_branches_pkey PRIMARY KEY (bid); -- ALTER TABLE ONLY public.pgbench_accounts -- ADD CONSTRAINT c_widget_field_6 FOREIGN KEY (value_sysmapid) REFERENCES public.sysmaps(sysmapid) ON DELETE CASCADE; -- ALTER TABLE ONLY public.pgbench_accounts -- ADD CONSTRAINT pgbench_accounts_pkey PRIMARY KEY (aid); -- ALTER TABLE ONLY public.pgbench_branches -- ADD CONSTRAINT pgbench_branches_pkey PRIMARY KEY (bid); -- ALTER TABLE ONLY public.pgbench_accounts -- ADD CONSTRAINT c_widget_field_6 FOREIGN KEY (value_sysmapid) REFERENCES public.sysmaps(sysmapid) ON DELETE CASCADE; |72.5%| [TS] {asan, default-linux-x86_64, release} ydb/tests/stress/statistics_workload/flake8 >> __main__.py::flake8 [GOOD] |72.5%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_tests/flake8 >> tablet_scheme_tests.py::flake8 [GOOD] |72.5%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/columns_set.h_serialized.cpp |72.5%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/flake8 >> utils.py::flake8 [GOOD] |72.5%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/benchmarks_init/flake8 >> test_init.py::flake8 [GOOD] |72.5%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/merge_split_common_table/fifo/flake8 >> test.py::flake8 [GOOD] |72.5%| [TS] {RESULT} ydb/tests/fq/generic/analytics/flake8 |72.5%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part1/py2_flake8 |72.5%| [TS] {RESULT} ydb/library/benchmarks/runner/result_convert/flake8 |72.5%| [TS] {RESULT} ydb/tests/fq/yds/flake8 >> collection.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> select_datetime_with_service_name.py::flake8 [GOOD] >> select_positive_with_service_name.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] >> __main__.py::flake8 [GOOD] >> test_query_cache.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> test_alter_compression.py::flake8 [GOOD] >> test_alter_tiering.py::flake8 [GOOD] >> test_insert.py::flake8 [GOOD] >> test_read_update_write_load.py::flake8 [GOOD] >> test_scheme_load.py::flake8 [GOOD] >> test_simple.py::flake8 [GOOD] |72.6%| [TS] {RESULT} ydb/library/fyamlcpp/ut/unittest |72.6%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part11/flake8 |72.6%| [TS] {RESULT} ydb/library/yql/providers/generic/connector/tests/join/flake8 >> collection.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> select_datetime.py::flake8 [GOOD] >> select_positive.py::flake8 [GOOD] >> select_positive_with_schema.py::flake8 [GOOD] >> test.py::flake8 [GOOD] |72.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yaml_config/yaml_config_ut.cpp |72.6%| [TM] {RESULT} ydb/core/fq/libs/metrics/ut/unittest |72.6%| [TS] {asan, default-linux-x86_64, release} ydb/tests/tools/ydb_serializable/replay/flake8 >> __main__.py::flake8 [GOOD] |72.6%| [TS] {RESULT} ydb/tests/stress/olap_workload/tests/flake8 >> test_sql_streaming.py::flake8 [GOOD] >> test.py::flake8 [GOOD] |72.6%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/generic/connector/tests/datasource/oracle/flake8 >> test.py::flake8 [GOOD] |72.6%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part4/py2_flake8 >> test.py::py2_flake8 [GOOD] |72.6%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/query_cache/flake8 >> test_query_cache.py::flake8 [GOOD] |72.6%| [TS] {asan, default-linux-x86_64, release} ydb/tests/olap/scenario/flake8 >> test_simple.py::flake8 [GOOD] >> test_commit.py::flake8 [GOOD] >> test_timeout.py::flake8 [GOOD] |72.6%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part7/flake8 |72.6%| [TS] {RESULT} ydb/tests/fq/generic/streaming/flake8 |72.6%| [AR] {default-linux-x86_64, release, asan, pic} $(B)/yt/yt/core/libyt-yt-core.a |72.6%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/generic/connector/tests/datasource/postgresql/flake8 >> test.py::flake8 [GOOD] |72.6%| [TS] {RESULT} ydb/library/protobuf_printer/ut/unittest |72.6%| [AR] {BAZEL_UPLOAD, SKIPPED} $(B)/yt/yt/core/libyt-yt-core.a |72.6%| [TS] {RESULT} ydb/tests/stress/kv/tests/flake8 |72.6%| [TS] {RESULT} ydb/library/workload/tpch/ut/unittest |72.6%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/streaming_optimize/flake8 >> test_sql_streaming.py::flake8 [GOOD] |72.6%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/merge_split_common_table/std/flake8 >> test.py::flake8 [GOOD] |72.6%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part3/flake8 >> hive_matchers.py::flake8 [GOOD] >> test_create_tablets.py::flake8 [GOOD] >> test_drain.py::flake8 [GOOD] >> test_kill_tablets.py::flake8 [GOOD] >> test.py::flake8 [GOOD] |72.6%| [TS] {asan, default-linux-x86_64, release} ydb/tests/tools/pq_read/test/flake8 >> test_timeout.py::flake8 [GOOD] |72.6%| [LD] {BAZEL_UPLOAD} $(B)/ydb/public/sdk/cpp/tests/unit/client/draft/ydb-public-sdk-cpp-tests-unit-client-draft >> test.py::py2_flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> test_serverless.py::flake8 [GOOD] |72.6%| [TS] {RESULT} ydb/tests/functional/scheme_tests/flake8 |72.6%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/flake8 >> test_kill_tablets.py::flake8 [GOOD] |72.6%| [AR] {RESULT} $(B)/yt/yt/core/libyt-yt-core.a >> test.py::flake8 [GOOD] |72.6%| [TS] {RESULT} ydb/tests/functional/cms/flake8 |72.6%| [TS] {RESULT} ydb/tests/stress/statistics_workload/flake8 |72.6%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/hybrid_file/part10/py2_flake8 >> test.py::py2_flake8 [GOOD] |72.6%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part14/flake8 >> test.py::flake8 [GOOD] |72.6%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/serverless/flake8 >> test_serverless.py::flake8 [GOOD] >> S3FileTreeBuilderTest::DeserializesTrailingSlash [GOOD] >> S3FileTreeBuilderTest::Simple [GOOD] >> S3FileTreeBuilderTest::Interesting [GOOD] >> PathListReaderTest::ReadsFilesListFromTreeParams [GOOD] >> S3FileTreeBuilderTest::DeserializesLeadingSlash [GOOD] >> S3FileTreeBuilderTest::DeserializesManySlashes [GOOD] >> S3FileTreeBuilderTest::PassesFileWithZeroSize [GOOD] >> S3FileTreeBuilderTest::DeserializesRootSlash [GOOD] |72.6%| [TS] {RESULT} ydb/tests/tools/kqprun/recipe/flake8 |72.6%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part0/flake8 >> test.py::flake8 [GOOD] >> test_bulkupserts_tpch.py::flake8 [GOOD] >> test_insert_delete_duplicate_records.py::flake8 [GOOD] >> test_insertinto_selectfrom.py::flake8 [GOOD] >> test_tiering.py::flake8 [GOOD] >> test_workload_manager.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] |72.6%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part17/flake8 >> test.py::flake8 [GOOD] |72.6%| [TS] {RESULT} ydb/core/viewer/json/ut/unittest |72.6%| [TS] {RESULT} ydb/tests/functional/query_cache/flake8 |72.6%| [TS] {RESULT} ydb/tests/tools/ydb_serializable/replay/flake8 |72.7%| [TS] {RESULT} ydb/library/yql/providers/generic/connector/tests/datasource/oracle/flake8 |72.7%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/s3/range_helpers/ut/unittest >> S3FileTreeBuilderTest::DeserializesRootSlash [GOOD] |72.7%| [TS] {RESULT} ydb/tests/olap/scenario/flake8 >> test_s3.py::flake8 [GOOD] >> compare.py::flake8 [GOOD] |72.7%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/generated/codegen/codegen |72.7%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/hybrid_file/part5/py2_flake8 >> test.py::py2_flake8 [GOOD] |72.7%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part13/flake8 >> test.py::flake8 [GOOD] >> alter_compression.py::flake8 [GOOD] >> base.py::flake8 [GOOD] >> collection.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> select_datetime.py::flake8 [GOOD] >> select_positive.py::flake8 [GOOD] >> test.py::flake8 [GOOD] |72.7%| [TS] {asan, default-linux-x86_64, release} ydb/tests/sql/large/flake8 >> test_workload_manager.py::flake8 [GOOD] |72.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/viewer/tests/flake8 >> test.py::flake8 [GOOD] |72.7%| [TS] {RESULT} ydb/library/yql/providers/generic/connector/tests/datasource/postgresql/flake8 |72.7%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part4/py2_flake8 >> test_crud.py::flake8 [GOOD] >> test_discovery.py::flake8 [GOOD] >> test_execute_scheme.py::flake8 [GOOD] >> test_indexes.py::flake8 [GOOD] >> test_insert.py::flake8 [GOOD] >> test_isolation.py::flake8 [GOOD] >> test_public_api.py::flake8 [GOOD] >> test_read_table.py::flake8 [GOOD] >> test_session_grace_shutdown.py::flake8 [GOOD] >> test_session_pool.py::flake8 [GOOD] >> __main__.py::flake8 [GOOD] >> parser.py::flake8 [GOOD] |72.7%| [TS] {RESULT} ydb/tests/fq/streaming_optimize/flake8 |72.7%| [TS] {asan, default-linux-x86_64, release} ydb/library/benchmarks/runner/result_compare/flake8 >> compare.py::flake8 [GOOD] |72.7%| [TS] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/flake8 >> test_s3.py::flake8 [GOOD] |72.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/sequenceshard/libcore-tx-sequenceshard.a |72.7%| [TS] {RESULT} ydb/public/lib/ydb_cli/common/ut/unittest >> test_async_replication.py::flake8 [GOOD] >> TPathTests::TestRegexFromWildcards [GOOD] >> TPathTests::NormalizeEmpty [GOOD] >> TPathTests::NormalizeNoSlashes [GOOD] >> TPathTests::NormalizeSlashes [GOOD] >> TPathTests::NormalizeWithSlashes [GOOD] >> test.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] >> test_multinode_cluster.py::flake8 [GOOD] >> test_recompiles_requests.py::flake8 [GOOD] |72.7%| [TS] {asan, default-linux-x86_64, release} ydb/tests/olap/column_family/compression/flake8 >> base.py::flake8 [GOOD] |72.7%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/api/flake8 >> test_session_pool.py::flake8 [GOOD] |72.7%| [TS] {asan, default-linux-x86_64, release} ydb/tests/olap/docs/generator/flake8 >> parser.py::flake8 [GOOD] |72.7%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/generic/connector/tests/datasource/mysql/flake8 >> test.py::flake8 [GOOD] |72.7%| [TS] {RESULT} ydb/tests/functional/sqs/merge_split_common_table/std/flake8 |72.7%| [TS] {RESULT} ydb/tests/tools/pq_read/test/flake8 |72.7%| [TS] {RESULT} ydb/tests/functional/hive/flake8 |72.7%| [TS] {RESULT} ydb/library/yql/tests/sql/hybrid_file/part10/py2_flake8 >> test.py::flake8 [GOOD] |72.7%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part18/flake8 |72.7%| [TS] {RESULT} ydb/tests/functional/serverless/flake8 |72.7%| [TS] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/flake8 >> test_async_replication.py::flake8 [GOOD] |72.7%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part17/py2_flake8 >> test.py::py2_flake8 [GOOD] |72.7%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/solomon/flake8 >> test.py::flake8 [GOOD] >> test.py::flake8 [GOOD] |72.7%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part14/flake8 |72.7%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/multinode/flake8 >> test_recompiles_requests.py::flake8 [GOOD] |72.7%| [TS] {asan, default-linux-x86_64, release} ydb/public/tools/lib/cmds/ut/flake8 >> test.py::flake8 [GOOD] |72.7%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/common/description.h_serialized.cpp |72.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/yt/export_yt.cpp |72.7%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part17/flake8 |72.7%| [TS] {RESULT} ydb/tests/sql/flake8 |72.7%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/s3/object_listers/ut/unittest >> TPathTests::NormalizeWithSlashes [GOOD] |72.7%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/serializable/flake8 >> test.py::flake8 [GOOD] |72.7%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/common/description.h_serialized.cpp >> test_postgres.py::flake8 [GOOD] >> test_sql.py::flake8 [GOOD] |72.7%| [AR] {RESULT} $(B)/ydb/core/tx/sequenceshard/libcore-tx-sequenceshard.a |72.7%| [TS] {RESULT} ydb/library/yql/tests/sql/hybrid_file/part5/py2_flake8 |72.8%| [TS] {RESULT} ydb/tests/sql/large/flake8 |72.8%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part13/flake8 |72.8%| [TS] {RESULT} ydb/core/viewer/tests/flake8 |72.8%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/postgresql/flake8 >> test_postgres.py::flake8 [GOOD] |72.8%| [TS] {RESULT} ydb/public/lib/ydb_cli/commands/topic_workload/ut/unittest |72.8%| [TS] {RESULT} ydb/tests/functional/api/flake8 |72.8%| [TS] {RESULT} ydb/tests/datashard/s3/flake8 |72.8%| [TS] {RESULT} ydb/library/benchmarks/runner/result_compare/flake8 |72.8%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/canonical/flake8 >> test_sql.py::flake8 [GOOD] |72.8%| [TS] {RESULT} ydb/library/yql/providers/s3/range_helpers/ut/unittest |72.8%| [TS] {RESULT} ydb/library/yql/providers/generic/connector/tests/datasource/mysql/flake8 >> TYqlParamParserTest::TestCaseInsensitiveTypes [GOOD] >> TYqlParamParserTest::TestStructType [GOOD] >> TYqlParamParserTest::TestListType [GOOD] >> TYqlParamParserTest::TestNestedTypes [GOOD] >> TYqlParamParserTest::TestTupleType [GOOD] >> TYqlParamParserTest::TestMultipleParams [GOOD] >> TYqlParamParserTest::TestOptionalTypes >> TYqlParamParserTest::TestDecimalType [GOOD] >> TYqlParamParserTest::TestDictType [GOOD] >> TYqlParamParserTest::TestBasicTypes [GOOD] >> TestTokenExchange::BadResponseFromConfig >> TestTokenExchange::Exchanges [GOOD] >> TestTokenExchange::BadParamsFromConfig [GOOD] >> TestTokenExchange::ExchangesFromConfig [GOOD] >> TestTokenExchange::BadParams [GOOD] >> TestTokenExchange::BadResponse [GOOD] >> conftest.py::flake8 [GOOD] >> test_ydb_backup.py::flake8 [GOOD] >> test_ydb_flame_graph.py::flake8 [GOOD] >> test_ydb_impex.py::flake8 [GOOD] >> test_ydb_recursive_remove.py::flake8 [GOOD] >> test_ydb_scheme.py::flake8 [GOOD] >> test_ydb_scripting.py::flake8 [GOOD] >> test_ydb_sql.py::flake8 [GOOD] >> TYqlParamParserTest::TestOptionalTypes [GOOD] >> test_ydb_table.py::flake8 [GOOD] >> TYqlParamParserTest::TestInvalidQuery [GOOD] >> TYqlParamParserTest::TestWhitespace [GOOD] >> TYqlParamParserTest::TestComplexQuery [GOOD] >> TYqlParamParserTest::TestAllTypes [GOOD] >> TestTokenExchange::BadResponseFromConfig [GOOD] >> TestTokenExchange::UpdatesToken |72.8%| [TS] {RESULT} ydb/tests/olap/docs/generator/flake8 |72.8%| [TS] {RESULT} ydb/tests/olap/column_family/compression/flake8 |72.8%| [TS] {RESULT} ydb/tests/datashard/async_replication/flake8 |72.8%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part17/py2_flake8 |72.8%| [TS] {RESULT} ydb/public/tools/lib/cmds/ut/flake8 |72.8%| [TS] {RESULT} ydb/tests/fq/solomon/flake8 |72.8%| [TS] {RESULT} ydb/tests/functional/serializable/flake8 |72.8%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/ydb_cli/flake8 >> test_ydb_table.py::flake8 [GOOD] |72.8%| [TS] {RESULT} ydb/tests/functional/sqs/multinode/flake8 |72.8%| [TS] {RESULT} ydb/tests/functional/ttl/flake8 |72.8%| [TS] {RESULT} ydb/tests/functional/postgresql/flake8 |72.8%| [TS] {asan, default-linux-x86_64, release} ydb/public/lib/ydb_cli/common/yql_parser/ut/unittest >> TYqlParamParserTest::TestAllTypes [GOOD] |72.8%| [TS] {RESULT} ydb/tests/functional/sqs/large/flake8 |72.8%| [TS] {RESULT} ydb/library/yql/providers/s3/object_listers/ut/unittest |72.8%| [TS] {RESULT} ydb/core/config/ut/unittest |72.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/yt/export_yt.cpp |72.9%| [TS] {RESULT} ydb/tests/functional/canonical/flake8 >> TestFileCache::Evict [GOOD] >> TestFileCache::AddAfterRemoveAcquired [GOOD] >> TestFileCache::Acquire [GOOD] >> TestFileCache::Add [GOOD] >> TestFileCache::AcquireRelease [GOOD] >> TestFileCache::Find [GOOD] >> TestFileCache::ContainsReleased [GOOD] >> TestFileCache::AcquireSingleFile2Times [GOOD] >> TestFileCache::Create [GOOD] |72.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/predicate/libcolumnshard-engines-predicate.a |72.9%| [TS] {RESULT} ydb/tests/functional/tenants/flake8 |72.9%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/predicate/libcolumnshard-engines-predicate.a |72.9%| [TS] {RESULT} ydb/tests/functional/ydb_cli/flake8 |72.9%| [TS] {RESULT} ydb/tests/functional/benchmarks_init/flake8 |72.9%| [TA] {RESULT} $(B)/ydb/core/persqueue/codecs/ut/test-results/unittest/{meta.json ... results_accumulator.log} |72.9%| [TS] {RESULT} ydb/tests/functional/sqs/merge_split_common_table/fifo/flake8 |72.9%| [TA] {RESULT} $(B)/ydb/library/yql/public/ydb_issue/ut/test-results/unittest/{meta.json ... results_accumulator.log} |72.9%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/dq/runtime/ut/unittest >> TestFileCache::Create [GOOD] |72.9%| [TS] {RESULT} ydb/public/lib/ydb_cli/common/yql_parser/ut/unittest >> TestTokenExchange::UpdatesToken [GOOD] >> TestTokenExchange::UpdatesTokenFromConfig >> ResponseTest::UniversalResponseBuf [GOOD] >> ResponseTest::UniversalResponseRefMsg [GOOD] >> ResponseTest::UniversalResponseMsg [GOOD] >> StreamAdaptor::OrderingOneThread >> ResponseTest::UniversalResponseRefBuf [GOOD] |72.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/sequenceshard/libcore-tx-sequenceshard.a |72.9%| [TS] {RESULT} ydb/library/yql/providers/dq/runtime/ut/unittest |72.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/predicate/libcolumnshard-engines-predicate.a >> StreamAdaptor::OrderingOneThread [GOOD] >> StreamAdaptor::OrderingTwoThreads >> StreamAdaptor::OrderingTwoThreads [GOOD] >> StreamAdaptor::OrderingManyThreads >> StreamAdaptor::OrderingManyThreads [GOOD] >> StreamAdaptor::OrderingOneThreadWithSleep >> BackupToolValuePrintParse::ParseValuesFromString [GOOD] >> BackupToolValuePrintParse::ParseValuesFromFile >> BackupToolValuePrintParse::ParseValuesFromFile [GOOD] >> BackupToolValuePrintParse::ResultSetBoolPrintTest [GOOD] >> BackupToolValuePrintParse::ResultSetInt8PrintTest [GOOD] >> BackupToolValuePrintParse::ResultSetInt16PrintTest [GOOD] >> BackupToolValuePrintParse::ResultSetInt32PrintTest [GOOD] >> BackupToolValuePrintParse::ResultSetInt64PrintTest [GOOD] >> BackupToolValuePrintParse::ResultSetFloatPrintTest [GOOD] >> BackupToolValuePrintParse::ResultSetIntarvalsPrintTest [GOOD] >> BackupToolValuePrintParse::ResultSetStringPrintTest [GOOD] >> BackupToolValuePrintParse::ResultSetUtf8PrintTest [GOOD] >> BackupToolValuePrintParse::ResultSetVoidPrintTest [GOOD] >> BackupToolValuePrintParse::ResultSetDecimalPrintTest [GOOD] >> BackupToolValuePrintParse::ResultSetDyNumberPrintTest [GOOD] >> BackupToolValuePrintParse::ResultSetJsonDocumentPrintTest [GOOD] >> UtilTest::SizeFromStringParsing [GOOD] >> UtilTest::SizeFromStringParsingWithDecimalPrefix [GOOD] >> UtilTest::SizeFromStringParsingWithBinaryPrefix [GOOD] >> UtilTest::SizeFromStringParsingErrors [GOOD] >> UtilTest::PathParseTest [GOOD] >> TFunctionsMetadataTest::Serialization >> TFunctionsMetadataTest::Serialization [GOOD] |72.9%| [TS] {asan, default-linux-x86_64, release} ydb/library/backup/ut/unittest >> UtilTest::PathParseTest [GOOD] >> TestTokenExchange::UpdatesTokenFromConfig [GOOD] >> StreamAdaptor::OrderingOneThreadWithSleep [GOOD] >> StreamAdaptor::OrderingTwoThreadsWithSleep >> TestTokenExchange::UsesCachedToken [GOOD] >> TestTokenExchange::UpdatesTokenInBackgroud >> TComputeActorAsyncInputHelperTest::PollAsyncInput [GOOD] >> TComputeActorTest::Empty [GOOD] >> TComputeActorTest::ReceiveData [GOOD] >> TDqSourceWatermarkTrackerTest::StartWatermark1 [GOOD] >> TDqSourceWatermarkTrackerTest::StartWatermark2 [GOOD] >> TDqSourceWatermarkTrackerTest::StartWatermark3 [GOOD] >> TDqSourceWatermarkTrackerTest::WatermarkMovement1 [GOOD] >> TDqSourceWatermarkTrackerTest::WatermarkMovement2 [GOOD] >> TDqSourceWatermarkTrackerTest::WatermarkMovement3 [GOOD] >> TDqSourceWatermarkTrackerTest::WatermarkMovement4 [GOOD] >> TDqSourceWatermarkTrackerTest::IdleFirstShouldReturnStartWatermark [GOOD] >> TDqSourceWatermarkTrackerTest::Idle1 [GOOD] >> TDqSourceWatermarkTrackerTest::IdleNextCheckAt [GOOD] >> TIssuesBufferTest::TestEmpty [GOOD] >> TIssuesBufferTest::TestSimplePush [GOOD] >> TIssuesBufferTest::TestPushWithOverflow [GOOD] >> TIssuesBufferTest::TestSmallBuffer [GOOD] >> TIssuesBufferTest::TestUseAfterDump [GOOD] |72.9%| [TS] {asan, default-linux-x86_64, release} ydb/core/client/metadata/ut/unittest >> TFunctionsMetadataTest::Serialization [GOOD] |72.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/config/init/init_ut.cpp >> StreamAdaptor::OrderingTwoThreadsWithSleep [GOOD] >> StreamAdaptor::OrderingManyThreadsWithSleep ------- [TS] {asan, default-linux-x86_64, release} ydb/library/yql/dq/actors/compute/ut/unittest >> TIssuesBufferTest::TestUseAfterDump [GOOD] Test command err: 2025-05-07T08:06:59.610770Z :Unused ERROR: dq_compute_actor_channels.cpp:133: TxId: TxId, task: 0. Unexpected input channelId: 0 seqNo: 0, expected: 1 |73.0%| [LD] {BAZEL_UPLOAD} $(B)/ydb/tools/tsserver/tsserver >> StreamAdaptor::OrderingManyThreadsWithSleep [GOOD] |73.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/config/init/init_ut.cpp |73.0%| [TM] {asan, default-linux-x86_64, release} ydb/library/grpc/server/ut/unittest >> StreamAdaptor::OrderingManyThreadsWithSleep [GOOD] >> YtLookupActor::Lookup >> TSchedulerTest::ReserveForSmall >> TSchedulerTest::SimpleFifo [GOOD] >> YtLookupActor::Lookup [GOOD] >> TSchedulerTest::ReserveForSmall [GOOD] >> TSchedulerTest::OneUserForCluster [GOOD] >> TSchedulerTest::DoNotReserveForSmall [GOOD] >> TSchedulerTest::NewbieFirst [GOOD] >> TSchedulerTest::FifoAfterOneHour [GOOD] >> TSchedulerTest::HalfWorkersForSmall [GOOD] >> TSchedulerTest::Use75PercentForLargeInNonOverload [GOOD] >> TSchedulerTest::UseOnlyHalfForLargeInOverload [GOOD] |73.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/benchmarks_init/ydb-tests-functional-benchmarks_init |73.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/benchmarks_init/ydb-tests-functional-benchmarks_init |73.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/config/ydb-tests-functional-config ------- [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/yt/actors/ut/unittest >> YtLookupActor::Lookup [GOOD] Test command err: 2025-05-07 08:07:01.122 INFO ydb-library-yql-providers-yt-actors-ut(pid=15520, tid=0x00007F47C1190B00) [default] storage.cpp:178: FileStorage initialized in "/home/runner/.ya/build/build_root/zvgn/0015e6/r3tmp/tmpevlHHO/", temporary dir: "/home/runner/.ya/build/build_root/zvgn/0015e6/r3tmp/tmpevlHHO/15520", files: 0, total size: 0 2025-05-07 08:07:01.238 INFO ydb-library-yql-providers-yt-actors-ut(pid=15520, tid=0x00007F47C1190B00) [YT] yql_yt_lookup_actor.cpp:103: New Yt proivider lookup source actor(ActorId=[1:4:2051]) for cluster=Plato, table=Lookup 2025-05-07 08:07:01.240 DEBUG ydb-library-yql-providers-yt-actors-ut(pid=15520, tid=0x00007F47C1190B00) [YT] yql_yt_lookup_actor.cpp:172: ActorId=[1:4:2051] Got LookupRequest for 4 keys |72.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/scheme_shard/ydb-tests-functional-scheme_shard |73.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/config/ydb-tests-functional-config |73.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/scheme_shard/ydb-tests-functional-scheme_shard |73.0%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/dq/scheduler/ut/unittest >> TSchedulerTest::UseOnlyHalfForLargeInOverload [GOOD] |73.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/cms/ydb-tests-functional-cms |73.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/cms/ydb-tests-functional-cms |73.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/sqs/messaging/ydb-tests-functional-sqs-messaging |73.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/datashard/s3/ydb-tests-datashard-s3 |73.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/sqs/messaging/ydb-tests-functional-sqs-messaging |73.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/datashard/s3/ydb-tests-datashard-s3 |73.0%| [LD] {RESULT} $(B)/ydb/tests/functional/benchmarks_init/ydb-tests-functional-benchmarks_init |73.0%| [LD] {RESULT} $(B)/ydb/tests/functional/config/ydb-tests-functional-config |73.0%| [LD] {RESULT} $(B)/ydb/tests/functional/sqs/messaging/ydb-tests-functional-sqs-messaging |73.0%| [LD] {RESULT} $(B)/ydb/tests/datashard/s3/ydb-tests-datashard-s3 |73.0%| [LD] {RESULT} $(B)/ydb/tests/functional/scheme_shard/ydb-tests-functional-scheme_shard |73.0%| [LD] {BAZEL_UPLOAD} $(B)/ydb/public/sdk/cpp/tests/integration/sessions_pool/public-sdk-cpp-tests-integration-sessions_pool |73.0%| [TS] {RESULT} ydb/core/client/metadata/ut/unittest |73.0%| [LD] {RESULT} $(B)/ydb/tests/functional/cms/ydb-tests-functional-cms |73.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/olap/oom/ydb-tests-olap-oom |73.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/olap/oom/ydb-tests-olap-oom |73.0%| [LD] {RESULT} $(B)/ydb/tests/olap/oom/ydb-tests-olap-oom >> TestTokenExchange::UpdatesTokenInBackgroud [GOOD] >> TestTokenExchange::UpdatesTokenAndRetriesErrors |73.0%| [TS] {RESULT} ydb/library/yql/providers/yt/actors/ut/unittest >> TestIssuesGrouping::ShouldCountEveryIssue [GOOD] >> TestIssuesGrouping::ShouldRemoveOldIssues [GOOD] >> TestIssuesGrouping::ShouldRemoveIfMoreThanMaxIssues [GOOD] >> TestIssuesGrouping::ShouldRemoveTheOldestIfMoreThanMaxIssues [GOOD] >> TestIssuesGrouping::ShouldSaveSubIssues [GOOD] >> ResultReceiver::ReceiveStatus [GOOD] >> ResultReceiver::ReceiveError [GOOD] >> ResultReceiver::WriteQueue [GOOD] |73.0%| [TM] {RESULT} ydb/library/grpc/server/ut/unittest |73.0%| [TS] {RESULT} ydb/library/yql/providers/dq/scheduler/ut/unittest |73.0%| [TS] {RESULT} ydb/library/backup/ut/unittest |73.0%| [TS] {RESULT} ydb/library/yql/dq/actors/compute/ut/unittest |73.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/rename/ydb-tests-functional-rename |73.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/rename/ydb-tests-functional-rename |73.0%| [LD] {RESULT} $(B)/ydb/tests/functional/rename/ydb-tests-functional-rename |73.0%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/dq/actors/ut/unittest >> ResultReceiver::WriteQueue [GOOD] |73.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/config/init/libcore-config-init.a |73.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/tpc/medium/ydb-tests-functional-tpc-medium |73.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/datashard/copy_table/ydb-tests-datashard-copy_table |73.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/tpc/medium/ydb-tests-functional-tpc-medium |73.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/datashard/copy_table/ydb-tests-datashard-copy_table |73.1%| [LD] {RESULT} $(B)/ydb/tests/functional/tpc/medium/ydb-tests-functional-tpc-medium |73.1%| [AR] {RESULT} $(B)/ydb/core/config/init/libcore-config-init.a |73.1%| [LD] {RESULT} $(B)/ydb/tests/datashard/copy_table/ydb-tests-datashard-copy_table |73.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/api/ydb-tests-functional-api |73.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/api/ydb-tests-functional-api |73.1%| [LD] {RESULT} $(B)/ydb/tests/functional/api/ydb-tests-functional-api |73.1%| [TS] {RESULT} ydb/library/yql/providers/dq/actors/ut/unittest |73.1%| [LD] {BAZEL_UPLOAD} $(B)/ydb/public/sdk/cpp/tests/unit/client/driver/ydb-public-sdk-cpp-tests-unit-client-driver >> test_init.py::TestClickbenchInit::test_s1_s3 >> test_generator.py::TestTpchGenerator::test_s1_parts >> test_init.py::TestTpchInit::test_s1_s3 >> test_generator.py::TestTpcdsGenerator::test_s1 >> test_generator.py::TestTpchGenerator::test_s1_state_and_parts >> test_generator.py::TestTpchGenerator::test_s1_state |73.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/config/init/libcore-config-init.a >> test_generator.py::TestTpcdsGenerator::test_s1_state_and_parts >> test_init.py::TestTpcdsInit::test_s1_s3 >> test_init.py::TestTpchInit::test_s1_column_decimal >> test_generator.py::TestTpcdsGenerator::test_s1_state >> test_init.py::TestClickbenchInit::test_s1_s3 [GOOD] >> test_init.py::TestTpcdsInit::test_s100_column >> test_init.py::TestTpchInit::test_s1_s3 [GOOD] >> test_init.py::TestTpchInit::test_s1_column_decimal_ydb >> test_init.py::TestTpchInit::test_s1_row >> test_init.py::TestTpchInit::test_s1_column_decimal [GOOD] |73.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/benchmarks_init/py3test >> test_init.py::TestTpcdsInit::test_s1_s3 [GOOD] >> test_init.py::TestTpchInit::test_s100_column >> test_init.py::TestTpchInit::test_s1_column_decimal_ydb [GOOD] >> test_init.py::TestTpchInit::test_s1_row [GOOD] >> test_init.py::TestTpcdsInit::test_s100_column [GOOD] >> test_init.py::TestTpchInit::test_s100_column [GOOD] |73.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/benchmarks_init/py3test >> test_init.py::TestTpchInit::test_s1_s3 [GOOD] |73.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/benchmarks_init/py3test >> test_init.py::TestTpchInit::test_s1_column_decimal [GOOD] |73.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/s3/compressors/ut/ydb-library-yql-providers-s3-compressors-ut |73.1%| [LD] {RESULT} $(B)/ydb/library/yql/providers/s3/compressors/ut/ydb-library-yql-providers-s3-compressors-ut |73.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/providers/s3/compressors/ut/ydb-library-yql-providers-s3-compressors-ut |73.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/benchmarks_init/py3test >> test_init.py::TestTpchInit::test_s1_row [GOOD] |73.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/benchmarks_init/py3test >> test_init.py::TestTpchInit::test_s1_column_decimal_ydb [GOOD] |73.1%| [LD] {BAZEL_UPLOAD} $(B)/ydb/tests/functional/sdk/cpp/sdk_credprovider/ydb-tests-functional-sdk-cpp-sdk_credprovider |73.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/benchmarks_init/py3test >> test_init.py::TestTpchInit::test_s100_column [GOOD] |73.1%| [LD] {BAZEL_UPLOAD} $(B)/ydb/tests/functional/kqp/kqp_indexes/ydb-tests-functional-kqp-kqp_indexes |73.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/benchmarks_init/py3test >> test_init.py::TestTpcdsInit::test_s100_column [GOOD] |73.1%| [LD] {BAZEL_UPLOAD} $(B)/ydb/public/sdk/cpp/tests/integration/sessions/ydb-public-sdk-cpp-tests-integration-sessions |73.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/sqs/cloud/ydb-tests-functional-sqs-cloud |73.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/sqs/cloud/ydb-tests-functional-sqs-cloud |73.1%| [LD] {RESULT} $(B)/ydb/tests/functional/sqs/cloud/ydb-tests-functional-sqs-cloud |73.1%| [LD] {BAZEL_UPLOAD} $(B)/ydb/public/sdk/cpp/tests/integration/server_restart/public-sdk-cpp-tests-integration-server_restart |73.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/fq/common/ydb-tests-fq-common |73.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/fq/common/ydb-tests-fq-common |73.1%| [LD] {BAZEL_UPLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_import/ydb-tests-fq-yt-kqp_yt_import |73.1%| [LD] {RESULT} $(B)/ydb/tests/fq/common/ydb-tests-fq-common |73.1%| [LD] {BAZEL_UPLOAD} $(B)/ydb/public/sdk/cpp/tests/unit/client/discovery_mutator/sdk-cpp-tests-unit-client-discovery_mutator |73.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/serverless/ydb-tests-functional-serverless |73.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/serverless/ydb-tests-functional-serverless |73.2%| [LD] {RESULT} $(B)/ydb/tests/functional/serverless/ydb-tests-functional-serverless |73.2%| [LD] {BAZEL_UPLOAD} $(B)/ydb/public/sdk/cpp/tests/integration/basic_example/public-sdk-cpp-tests-integration-basic_example |73.2%| [LD] {BAZEL_UPLOAD} $(B)/ydb/tests/functional/kqp/kqp_query_session/ydb-tests-functional-kqp-kqp_query_session >> TCompressorTests::SuccessLz4 [GOOD] >> TCompressorTests::WrongMagicLz4 [GOOD] >> TCompressorTests::ErrorLz4 [GOOD] |73.2%| [LD] {BAZEL_UPLOAD} $(B)/ydb/public/lib/idx_test/ut/ydb-public-lib-idx_test-ut |73.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/tools/pq_read/test/ydb-tests-tools-pq_read-test |73.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/tools/pq_read/test/ydb-tests-tools-pq_read-test |73.2%| [LD] {RESULT} $(B)/ydb/tests/tools/pq_read/test/ydb-tests-tools-pq_read-test |73.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/postgresql/ydb-tests-functional-postgresql |73.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/postgresql/ydb-tests-functional-postgresql |73.2%| [LD] {RESULT} $(B)/ydb/tests/functional/postgresql/ydb-tests-functional-postgresql |73.2%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/s3/compressors/ut/unittest >> TCompressorTests::ErrorLz4 [GOOD] |73.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/sqs/merge_split_common_table/std/functional-sqs-merge_split_common_table-std |73.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/sqs/merge_split_common_table/std/functional-sqs-merge_split_common_table-std |73.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/tools/kqprun/recipe/kqprun_recipe |73.2%| [LD] {RESULT} $(B)/ydb/tests/functional/sqs/merge_split_common_table/std/functional-sqs-merge_split_common_table-std |73.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/tools/kqprun/recipe/kqprun_recipe |73.2%| [LD] {RESULT} $(B)/ydb/tests/tools/kqprun/recipe/kqprun_recipe |73.2%| [TS] {RESULT} ydb/library/yql/providers/s3/compressors/ut/unittest |73.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/simple_queue/tests/ydb-tests-stress-simple_queue-tests |73.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/simple_queue/tests/ydb-tests-stress-simple_queue-tests |73.2%| [LD] {RESULT} $(B)/ydb/tests/stress/simple_queue/tests/ydb-tests-stress-simple_queue-tests |73.2%| [LD] {BAZEL_UPLOAD} $(B)/ydb/public/sdk/cpp/tests/integration/bulk_upsert/ydb-public-sdk-cpp-tests-integration-bulk_upsert |73.2%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/yql/dq/runtime/ut/ydb-library-yql-dq-runtime-ut |73.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/olap_workload/tests/ydb-tests-stress-olap_workload-tests |73.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/olap_workload/tests/ydb-tests-stress-olap_workload-tests |73.2%| [LD] {RESULT} $(B)/ydb/tests/stress/olap_workload/tests/ydb-tests-stress-olap_workload-tests |73.2%| [LD] {BAZEL_UPLOAD} $(B)/ydb/public/sdk/cpp/examples/basic_example/basic_example |73.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/serializable/ydb-tests-functional-serializable |73.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/serializable/ydb-tests-functional-serializable |73.2%| [LD] {RESULT} $(B)/ydb/tests/functional/serializable/ydb-tests-functional-serializable >> test_init.py::TestTpchInit::test_s1_column >> test_init.py::TestTpcdsInit::test_s1_column_decimal_ydb >> test_init.py::TestTpchInit::test_s1_column [GOOD] >> test_generator.py::TestTpcdsGenerator::test_s1_parts |73.2%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/viewer/json/ut/ydb-core-viewer-json-ut |73.2%| [LD] {BAZEL_UPLOAD} $(B)/ydb/tests/functional/replication/ydb-tests-functional-replication |73.2%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/yql/tools/dq/dq_cli/dq_cli >> test_init.py::TestTpcdsInit::test_s1_column_decimal_ydb [GOOD] >> test_init.py::TestTpcdsInit::test_s1_row |73.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/benchmarks_init/py3test >> test_init.py::TestTpchInit::test_s1_column [GOOD] |73.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/oltp_workload/tests/ydb-tests-stress-oltp_workload-tests |73.2%| [LD] {RESULT} $(B)/ydb/tests/stress/oltp_workload/tests/ydb-tests-stress-oltp_workload-tests |73.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/oltp_workload/tests/ydb-tests-stress-oltp_workload-tests |73.3%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/yql/providers/dq/provider/ut/ydb-library-yql-providers-dq-provider-ut >> test_init.py::TestTpcdsInit::test_s1_row [GOOD] |73.3%| [LD] {BAZEL_UPLOAD} $(B)/ydb/tests/functional/kqp/kqp_query_svc/ydb-tests-functional-kqp-kqp_query_svc |73.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/encryption/ydb-tests-functional-encryption >> test_init.py::TestClickbenchInit::test_s1_column |73.3%| [LD] {RESULT} $(B)/ydb/tests/functional/encryption/ydb-tests-functional-encryption |73.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/encryption/ydb-tests-functional-encryption >> test_generator.py::TestTpchGenerator::test_s1 |73.3%| [LD] {BAZEL_UPLOAD} $(B)/ydb/tests/tools/idx_test/idx_test |73.3%| [LD] {BAZEL_UPLOAD} $(B)/ydb/tests/functional/backup/ydb-tests-functional-backup >> test_init.py::TestClickbenchInit::test_s1_column [GOOD] >> test_init.py::TestClickbenchInit::test_s1_row |73.3%| [LD] {BAZEL_UPLOAD} $(B)/ydb/public/lib/ydb_cli/commands/topic_workload/ut/ydb-public-lib-ydb_cli-commands-topic_workload-ut |73.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/benchmarks_init/py3test >> test_init.py::TestTpcdsInit::test_s1_row [GOOD] |73.3%| [LD] {BAZEL_UPLOAD} $(B)/ydb/public/sdk/cpp/examples/topic_reader/transaction/read_from_topic_in_transaction |73.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/benchmarks_init/py3test >> test_init.py::TestClickbenchInit::test_s1_row [GOOD] |73.3%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/tests/kikimr_tpch/ydb-core-kqp-tests-kikimr_tpch |73.3%| [LD] {BAZEL_UPLOAD} $(B)/ydb/public/sdk/cpp/examples/pagination/pagination |73.3%| [LD] {BAZEL_UPLOAD} $(B)/ydb/public/sdk/cpp/examples/topic_reader/simple/simple_persqueue_reader |73.3%| [LD] {BAZEL_UPLOAD} $(B)/ydb/public/sdk/cpp/examples/topic_reader/eventloop/persqueue_reader_eventloop |73.3%| [LD] {BAZEL_UPLOAD} $(B)/ydb/public/sdk/cpp/examples/topic_writer/transaction/topic_writer_transaction |73.3%| [LD] {BAZEL_UPLOAD} $(B)/ydb/public/sdk/cpp/examples/bulk_upsert_simple/bulk_upsert_simple |73.3%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/config/ut/ydb-core-config-ut |73.3%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/clickhouse/tests-datasource-clickhouse |73.3%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/yql/providers/generic/connector/tests/join/yql-providers-generic-connector-tests-join |73.3%| [LD] {BAZEL_UPLOAD} $(B)/ydb/public/sdk/cpp/examples/secondary_index_builtin/secondary_index_builtin |73.3%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/oracle/tests-datasource-oracle |73.3%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/ms_sql_server/datasource-ms_sql_server |73.3%| [LD] {BAZEL_UPLOAD} $(B)/ydb/public/sdk/cpp/examples/vector_index/vector_index |73.3%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/ydb/connector-tests-datasource-ydb |73.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/protos/libydb-core-protos.a |73.3%| [LD] {BAZEL_UPLOAD} $(B)/ydb/tests/functional/backup/s3_path_style/ydb-tests-functional-backup-s3_path_style |73.3%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/driver_lib/version/ut/ydb-core-driver_lib-version-ut |73.3%| [AR] {tool} $(B)/ydb/core/protos/libydb-core-protos.a |73.3%| [AR] {RESULT} $(B)/ydb/core/protos/libydb-core-protos.a |73.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/benchmarks_init/py3test >> test_init.py::TestClickbenchInit::test_s1_row [GOOD] |73.3%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/postgresql/tests-datasource-postgresql |73.3%| [LD] {BAZEL_UPLOAD} $(B)/ydb/public/sdk/cpp/examples/ttl/ttl |73.3%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/yql/providers/generic/connector/tests/datasource/mysql/connector-tests-datasource-mysql |73.3%| [LD] {BAZEL_UPLOAD} $(B)/ydb/public/lib/ydb_cli/common/ut/ydb-public-lib-ydb_cli-common-ut |73.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/olap/s3_import/ydb-tests-olap-s3_import |73.3%| [LD] {RESULT} $(B)/ydb/tests/olap/s3_import/ydb-tests-olap-s3_import |73.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/olap/s3_import/ydb-tests-olap-s3_import |73.4%| [LD] {BAZEL_UPLOAD} $(B)/ydb/apps/ydb/ut/ydb-apps-ydb-ut |73.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/log/tests/ydb-tests-stress-log-tests |73.4%| [LD] {RESULT} $(B)/ydb/tests/stress/log/tests/ydb-tests-stress-log-tests |73.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/log/tests/ydb-tests-stress-log-tests |73.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/protos/libydb-core-protos.a |73.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/fq/mem_alloc/ydb-tests-fq-mem_alloc |73.4%| [LD] {RESULT} $(B)/ydb/tests/fq/mem_alloc/ydb-tests-fq-mem_alloc |73.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/fq/mem_alloc/ydb-tests-fq-mem_alloc |73.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/sql/ydb-tests-sql |73.4%| [LD] {RESULT} $(B)/ydb/tests/sql/ydb-tests-sql |73.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/sql/ydb-tests-sql |73.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/scheme_tests/ydb-tests-functional-scheme_tests |73.4%| [LD] {RESULT} $(B)/ydb/tests/functional/scheme_tests/ydb-tests-functional-scheme_tests |73.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/scheme_tests/ydb-tests-functional-scheme_tests |73.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/kv/tests/ydb-tests-stress-kv-tests |73.4%| [LD] {RESULT} $(B)/ydb/tests/stress/kv/tests/ydb-tests-stress-kv-tests |73.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/kv/tests/ydb-tests-stress-kv-tests |73.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/postgres_integrations/library/ut/ydb-tests-postgres_integrations-library-ut |73.4%| [LD] {RESULT} $(B)/ydb/tests/postgres_integrations/library/ut/ydb-tests-postgres_integrations-library-ut |73.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/postgres_integrations/library/ut/ydb-tests-postgres_integrations-library-ut |73.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/olap/column_family/compression/ydb-tests-olap-column_family-compression |73.4%| [LD] {RESULT} $(B)/ydb/tests/olap/column_family/compression/ydb-tests-olap-column_family-compression |73.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/olap/column_family/compression/ydb-tests-olap-column_family-compression >> test_init.py::TestTpcdsInit::test_s1_column |73.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/library/ut/ydb-tests-library-ut |73.4%| [LD] {RESULT} $(B)/ydb/tests/library/ut/ydb-tests-library-ut |73.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/library/ut/ydb-tests-library-ut >> test_init.py::TestTpcdsInit::test_s1_column [GOOD] >> test_init.py::TestTpcdsInit::test_s1_column_decimal |73.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/tools/kqprun/tests/ydb-tests-tools-kqprun-tests |73.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/tools/kqprun/tests/ydb-tests-tools-kqprun-tests |73.4%| [LD] {RESULT} $(B)/ydb/tests/tools/kqprun/tests/ydb-tests-tools-kqprun-tests |73.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yaml_config/ut_transform/ydb-library-yaml_config-ut_transform >> test_init.py::TestTpcdsInit::test_s1_column_decimal [GOOD] |73.4%| [LD] {RESULT} $(B)/ydb/library/yaml_config/ut_transform/ydb-library-yaml_config-ut_transform |73.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yaml_config/ut_transform/ydb-library-yaml_config-ut_transform |73.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/olap/scenario/ydb-tests-olap-scenario |73.5%| [LD] {RESULT} $(B)/ydb/tests/olap/scenario/ydb-tests-olap-scenario |73.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/olap/scenario/ydb-tests-olap-scenario |73.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/example/ydb-tests-example |73.5%| [LD] {RESULT} $(B)/ydb/tests/example/ydb-tests-example |73.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/example/ydb-tests-example |73.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/benchmarks_init/py3test >> test_init.py::TestTpcdsInit::test_s1_column_decimal [GOOD] |73.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/wardens/ydb-tests-functional-wardens |73.5%| [LD] {RESULT} $(B)/ydb/tests/functional/wardens/ydb-tests-functional-wardens |73.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/wardens/ydb-tests-functional-wardens |73.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/fq/restarts/ydb-tests-fq-restarts |73.5%| [LD] {RESULT} $(B)/ydb/tests/fq/restarts/ydb-tests-fq-restarts |73.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/fq/restarts/ydb-tests-fq-restarts |73.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/fq/http_api/ydb-tests-fq-http_api |73.5%| [LD] {RESULT} $(B)/ydb/tests/fq/http_api/ydb-tests-fq-http_api |73.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/fq/http_api/ydb-tests-fq-http_api |73.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/tools/lib/cmds/ut/ydb-public-tools-lib-cmds-ut |73.5%| [LD] {RESULT} $(B)/ydb/public/tools/lib/cmds/ut/ydb-public-tools-lib-cmds-ut |73.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/public/tools/lib/cmds/ut/ydb-public-tools-lib-cmds-ut |73.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/query_cache/ydb-tests-functional-query_cache |73.5%| [LD] {RESULT} $(B)/ydb/tests/functional/query_cache/ydb-tests-functional-query_cache |73.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/query_cache/ydb-tests-functional-query_cache |73.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/tools/ydb_serializable/ydb_serializable |73.6%| [LD] {RESULT} $(B)/ydb/tests/tools/ydb_serializable/ydb_serializable |73.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/tools/ydb_serializable/ydb_serializable |73.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/viewer/tests/ydb-core-viewer-tests |73.6%| [LD] {RESULT} $(B)/ydb/core/viewer/tests/ydb-core-viewer-tests |73.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/viewer/tests/ydb-core-viewer-tests |73.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/fq/generic/streaming/ydb-tests-fq-generic-streaming |73.6%| [LD] {RESULT} $(B)/ydb/tests/fq/generic/streaming/ydb-tests-fq-generic-streaming |73.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/fq/generic/streaming/ydb-tests-fq-generic-streaming |73.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/olap/docs/generator/generator |73.6%| [LD] {RESULT} $(B)/ydb/tests/olap/docs/generator/generator |73.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/olap/docs/generator/generator |73.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/yt/libydb-core-yt.a |73.6%| [AR] {RESULT} $(B)/ydb/core/yt/libydb-core-yt.a |73.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/yt/libydb-core-yt.a >> TestTokenExchange::UpdatesTokenAndRetriesErrors [GOOD] >> TestTokenExchange::ShutdownWhileRefreshingToken |73.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tools/ydbd_slice/bin/ydbd_slice |73.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tools/ydbd_slice/bin/ydbd_slice |73.7%| [LD] {RESULT} $(B)/ydb/tools/ydbd_slice/bin/ydbd_slice |73.7%| [LD] {tool} $(B)/ydb/core/tx/schemeshard/generated/codegen/codegen |73.7%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/generated/codegen/codegen |73.7%| [PR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/generated/dispatch_op.h |73.7%| [PR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/generated/dispatch_op.h |73.7%| [LD] {tool} $(B)/ydb/core/base/generated/codegen/ydb-core-base-generated-codegen |73.7%| [LD] {RESULT} $(B)/ydb/core/base/generated/codegen/ydb-core-base-generated-codegen |73.7%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/generated/codegen/codegen >> integrations_test.py::test_read_jtest_results[o/OK] [GOOD] |73.7%| [PR] {default-linux-x86_64, release, asan} $(B)/ydb/core/base/generated/runtime_feature_flags.h >> integrations_test.py::test_read_jtest_results[f/failed1] [GOOD] >> integrations_test.py::test_read_jtest_results[f/failed2] [GOOD] >> integrations_test.py::test_read_jtest_results[f/error1] [GOOD] >> integrations_test.py::test_read_jtest_results[s/skipped1] [GOOD] >> integrations_test.py::test_read_jtest_results[s/skipped2] [GOOD] >> integrations_test.py::test_read_jtest_with_one_result [GOOD] |73.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/olap/load/ydb-tests-olap-load |73.7%| [LD] {RESULT} $(B)/ydb/tests/olap/load/ydb-tests-olap-load |73.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/olap/load/ydb-tests-olap-load |73.7%| [PR] {BAZEL_UPLOAD} $(B)/ydb/core/base/generated/runtime_feature_flags.h |73.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/olap/data_quotas/ydb-tests-olap-data_quotas |73.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/olap/data_quotas/ydb-tests-olap-data_quotas |73.7%| [LD] {RESULT} $(B)/ydb/tests/olap/data_quotas/ydb-tests-olap-data_quotas >> TestTokenExchange::ShutdownWhileRefreshingToken [GOOD] >> TestTokenExchange::ExchangesFromFileConfig >> TestTokenExchange::ExchangesFromFileConfig [GOOD] >> TestTokenExchange::SkipsUnknownFieldsInConfig [GOOD] >> TestTokenExchange::JwtTokenSourceInConfig [GOOD] >> TestTokenExchange::BadConfigParams >> TestTokenExchange::BadConfigParams [GOOD] >> JwtTokenSourceTest::Encodes [GOOD] >> JwtTokenSourceTest::BadParams [GOOD] ------- [TS] {asan, default-linux-x86_64, release} ydb/tests/postgres_integrations/library/ut/py3test >> integrations_test.py::test_read_jtest_with_one_result [GOOD] Test command err: /home/runner/.ya/build/build_root/zvgn/0015d8/ydb/tests/postgres_integrations/library/ut/test-results/py3test/ydb/tests/postgres_integrations/library/pytest_integration.py:26: PytestCollectionWarning: cannot collect test class 'TestCase' because it has a __init__ constructor (from: integrations_test.py) /home/runner/.ya/build/build_root/zvgn/0015d8/ydb/tests/postgres_integrations/library/ut/test-results/py3test/ydb/tests/postgres_integrations/library/pytest_integration.py:20: PytestCollectionWarning: cannot collect test class 'TestState' because it has a __init__ constructor (from: integrations_test.py) |73.8%| [TS] {RESULT} ydb/tests/postgres_integrations/library/ut/py3test |73.8%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/base/generated/codegen/ydb-core-base-generated-codegen ------- [TS] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/unit/client/oauth2_token_exchange/unittest >> JwtTokenSourceTest::BadParams [GOOD] Test command err: Checked backgroud update on 0 iteration Checked backgroud update on 1 iteration Shutdown: 0.004777s |73.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/ydb_cli/ydb-tests-functional-ydb_cli |73.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/ydb_cli/ydb-tests-functional-ydb_cli |73.8%| [TS] {RESULT} ydb/public/sdk/cpp/tests/unit/client/oauth2_token_exchange/unittest |73.8%| [LD] {RESULT} $(B)/ydb/tests/functional/ydb_cli/ydb-tests-functional-ydb_cli |73.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/sqs/multinode/ydb-tests-functional-sqs-multinode |73.8%| [LD] {RESULT} $(B)/ydb/tests/functional/sqs/multinode/ydb-tests-functional-sqs-multinode |73.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/sqs/multinode/ydb-tests-functional-sqs-multinode |73.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/datashard/dml/ydb-tests-datashard-dml |73.8%| [LD] {RESULT} $(B)/ydb/tests/datashard/dml/ydb-tests-datashard-dml |73.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/datashard/dml/ydb-tests-datashard-dml |73.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/sql/large/ydb-tests-sql-large |73.8%| [LD] {RESULT} $(B)/ydb/tests/sql/large/ydb-tests-sql-large |73.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/sql/large/ydb-tests-sql-large |73.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/mixedpy/ydb-tests-stress-mixedpy |73.8%| [LD] {RESULT} $(B)/ydb/tests/stress/mixedpy/ydb-tests-stress-mixedpy |73.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/fq/generic/analytics/ydb-tests-fq-generic-analytics |73.8%| [LD] {RESULT} $(B)/ydb/tests/fq/generic/analytics/ydb-tests-fq-generic-analytics |73.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/mixedpy/ydb-tests-stress-mixedpy |73.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/fq/generic/analytics/ydb-tests-fq-generic-analytics |73.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/tenants/ydb-tests-functional-tenants |73.9%| [LD] {RESULT} $(B)/ydb/tests/functional/tenants/ydb-tests-functional-tenants |73.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/tenants/ydb-tests-functional-tenants |73.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/compatibility/ydb-tests-compatibility |73.9%| [LD] {RESULT} $(B)/ydb/tests/compatibility/ydb-tests-compatibility |73.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/compatibility/ydb-tests-compatibility |73.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/statistics_workload/statistics_workload |73.9%| [LD] {RESULT} $(B)/ydb/tests/stress/statistics_workload/statistics_workload |73.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/statistics_workload/statistics_workload |73.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/tools/local_ydb/local_ydb |73.9%| [LD] {RESULT} $(B)/ydb/public/tools/local_ydb/local_ydb |73.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/public/tools/local_ydb/local_ydb |73.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/olap_workload/olap_workload |73.9%| [LD] {RESULT} $(B)/ydb/tests/stress/olap_workload/olap_workload |73.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/olap_workload/olap_workload >> test.py::test_kikimr_config_generator_generic_connector_config [GOOD] |73.9%| [TS] {asan, default-linux-x86_64, release} ydb/public/tools/lib/cmds/ut/py3test >> test.py::test_kikimr_config_generator_generic_connector_config [GOOD] |73.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/autoconfig/ydb-tests-functional-autoconfig |73.9%| [TS] {RESULT} ydb/public/tools/lib/cmds/ut/py3test |73.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/autoconfig/ydb-tests-functional-autoconfig |73.9%| [LD] {RESULT} $(B)/ydb/tests/functional/autoconfig/ydb-tests-functional-autoconfig |73.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/scheme/ut_pg/ydb-core-scheme-ut_pg |74.0%| [LD] {RESULT} $(B)/ydb/core/scheme/ut_pg/ydb-core-scheme-ut_pg |74.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/scheme/ut_pg/ydb-core-scheme-ut_pg |74.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tools/tstool/tstool |74.0%| [LD] {RESULT} $(B)/ydb/tools/tstool/tstool |74.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tools/tstool/tstool |74.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/sqs/common/ydb-tests-functional-sqs-common |74.0%| [LD] {RESULT} $(B)/ydb/tests/functional/sqs/common/ydb-tests-functional-sqs-common |74.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/sqs/common/ydb-tests-functional-sqs-common |74.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/canonical/ydb-tests-functional-canonical |74.0%| [LD] {RESULT} $(B)/ydb/tests/functional/canonical/ydb-tests-functional-canonical |74.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/canonical/ydb-tests-functional-canonical |74.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/fq/s3/ydb-tests-fq-s3 |74.0%| [LD] {RESULT} $(B)/ydb/tests/fq/s3/ydb-tests-fq-s3 |74.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/fq/s3/ydb-tests-fq-s3 |74.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/oltp_workload/oltp_workload |74.0%| [LD] {RESULT} $(B)/ydb/tests/stress/oltp_workload/oltp_workload |74.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/oltp_workload/oltp_workload |74.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/simple_queue/simple_queue |74.0%| [LD] {RESULT} $(B)/ydb/tests/stress/simple_queue/simple_queue |74.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/simple_queue/simple_queue |74.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/tpc/large/ydb-tests-functional-tpc-large |74.1%| [LD] {RESULT} $(B)/ydb/tests/functional/tpc/large/ydb-tests-functional-tpc-large |74.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/tpc/large/ydb-tests-functional-tpc-large |74.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/examples/secondary_index/secondary_index |74.1%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/examples/secondary_index/secondary_index |74.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/tools/ydb_serializable/replay/replay |74.1%| [LD] {RESULT} $(B)/ydb/tests/tools/ydb_serializable/replay/replay |74.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/tools/ydb_serializable/replay/replay |74.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/sqs/merge_split_common_table/fifo/functional-sqs-merge_split_common_table-fifo |74.1%| [LD] {RESULT} $(B)/ydb/tests/functional/sqs/merge_split_common_table/fifo/functional-sqs-merge_split_common_table-fifo |74.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/sqs/merge_split_common_table/fifo/functional-sqs-merge_split_common_table-fifo |74.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/olap/ydb-tests-olap |74.1%| [LD] {RESULT} $(B)/ydb/tests/olap/ydb-tests-olap |74.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/olap/ydb-tests-olap |74.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/fq/plans/ydb-tests-fq-plans |74.1%| [LD] {RESULT} $(B)/ydb/tests/fq/plans/ydb-tests-fq-plans |74.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/fq/plans/ydb-tests-fq-plans |74.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/tools/nemesis/driver/nemesis |74.1%| [LD] {RESULT} $(B)/ydb/tests/tools/nemesis/driver/nemesis |74.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/tools/nemesis/driver/nemesis |74.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/sqs/large/ydb-tests-functional-sqs-large |74.1%| [LD] {RESULT} $(B)/ydb/tests/functional/sqs/large/ydb-tests-functional-sqs-large |74.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/sqs/large/ydb-tests-functional-sqs-large |74.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stability/ydb/ydb-tests-stability-ydb |74.1%| [LD] {RESULT} $(B)/ydb/tests/stability/ydb/ydb-tests-stability-ydb |74.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stability/ydb/ydb-tests-stability-ydb |74.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/fq/yds/ydb-tests-fq-yds |74.1%| [LD] {RESULT} $(B)/ydb/tests/fq/yds/ydb-tests-fq-yds |74.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/fq/yds/ydb-tests-fq-yds |74.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/postgres_integrations/go-libpq/ydb-tests-postgres_integrations-go-libpq |74.1%| [LD] {RESULT} $(B)/ydb/tests/postgres_integrations/go-libpq/ydb-tests-postgres_integrations-go-libpq |74.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/postgres_integrations/go-libpq/ydb-tests-postgres_integrations-go-libpq |74.2%| [LD] {BAZEL_UPLOAD} $(B)/ydb/public/sdk/cpp/examples/secondary_index/secondary_index |74.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx.cpp |74.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx.cpp |74.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/libydb-core-tx.a |74.2%| [AR] {RESULT} $(B)/ydb/core/tx/libydb-core-tx.a |74.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/libydb-core-tx.a |74.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/secret/accessor/secret_id.cpp |74.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/secret/accessor/secret_id.cpp |74.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/metadata/secret/accessor/libmetadata-secret-accessor.a |74.2%| [AR] {RESULT} $(B)/ydb/services/metadata/secret/accessor/libmetadata-secret-accessor.a |74.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/metadata/secret/accessor/libmetadata-secret-accessor.a |74.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/lib/auth/auth_helpers.cpp |74.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/lib/auth/auth_helpers.cpp |74.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/lib/auth/libservices-lib-auth.a |74.2%| [AR] {RESULT} $(B)/ydb/services/lib/auth/libservices-lib-auth.a |74.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/lib/auth/libservices-lib-auth.a |74.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/persqueue/topic_parser/topic_parser.cpp |74.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/persqueue/topic_parser/topic_parser.cpp |74.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/library/persqueue/topic_parser/liblibrary-persqueue-topic_parser.a |74.2%| [AR] {RESULT} $(B)/ydb/library/persqueue/topic_parser/liblibrary-persqueue-topic_parser.a |74.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/library/persqueue/topic_parser/liblibrary-persqueue-topic_parser.a |74.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_context.cpp |74.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_context.cpp |74.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_data.cpp |74.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_data.cpp |74.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/common/blobstorage_dblogcutter.cpp |74.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/base/statestorage_guardian.cpp |74.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/common/blobstorage_dblogcutter.cpp |74.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/base/statestorage_guardian.cpp >> TErasureTypeTest::TestBlock42PartialRestore1 [GOOD] |74.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/base/statestorage_replica.cpp |74.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/base/statestorage_replica.cpp >> TErasureTypeTest::TestBlock42PartialRestore0 [GOOD] |74.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_strategy_base.cpp |74.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_strategy_base.cpp |74.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_assimilate.cpp |74.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_assimilate.cpp >> TErasureTypeTest::TestBlock42PartialRestore3 [GOOD] |74.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestBlock42PartialRestore1 [GOOD] |74.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_appendix.cpp |74.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_appendix.cpp |74.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestBlock42PartialRestore0 [GOOD] |74.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_loggedrec.cpp |74.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_loggedrec.cpp |74.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestBlock42PartialRestore3 [GOOD] >> TErasureTypeTest::TestBlock42PartialRestore2 [GOOD] |74.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestBlock42PartialRestore2 [GOOD] |74.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/manager/generic_manager.cpp |74.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/manager/generic_manager.cpp |74.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/ds_table/registration.cpp |74.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/ds_table/registration.cpp |74.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/query/query_stattablet.cpp |74.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/query/query_stattablet.cpp |74.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_overload_handler.cpp |74.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_overload_handler.cpp |74.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/datastreams/put_records_actor.cpp |74.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/datastreams/put_records_actor.cpp |74.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_replproxy.cpp |74.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_replproxy.cpp |74.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/scrub/blob_recovery_process.cpp |74.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ext_index/service/add_data.cpp |74.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ext_index/service/add_data.cpp |74.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/scrub/blob_recovery_process.cpp |74.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/follower_tablet_info.cpp |74.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/follower_tablet_info.cpp |74.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent/storage_discover.cpp |74.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/storage_discover.cpp |74.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ext_index/metadata/behaviour.cpp |74.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ext_index/metadata/behaviour.cpp |74.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/ext_index/metadata/libservices-ext_index-metadata.global.a |74.3%| [AR] {RESULT} $(B)/ydb/services/ext_index/metadata/libservices-ext_index-metadata.global.a |74.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/ext_index/metadata/libservices-ext_index-metadata.global.a |74.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/zero_level.cpp |74.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/zero_level.cpp |74.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/s3_upload.cpp |74.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/s3_upload.cpp |74.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tablet_move_info.cpp |74.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tablet_move_info.cpp |74.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/constructor.cpp |74.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/constructor.cpp |74.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent/storage_patch.cpp |74.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/storage_patch.cpp |74.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/merge.cpp |74.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/merge.cpp |74.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/storage_pool_info.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/storage_pool_info.cpp |74.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/granules/granules.cpp |74.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/granules/libreader-sys_view-granules.global.a |74.3%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/granules/libreader-sys_view-granules.global.a |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/granules/granules.cpp |74.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/granules/libreader-sys_view-granules.global.a |74.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/remove_lock_change_records.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/remove_lock_change_records.cpp |74.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/read_table_scan_unit.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/read_table_scan_unit.cpp |74.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent/storage_collect_garbage.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/storage_collect_garbage.cpp |74.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/writer/buffer/actor2.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/writer/buffer/actor2.cpp |74.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/writer/buffer/actor.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/writer/buffer/actor.cpp |74.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__cut_tablet_history.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__cut_tablet_history.cpp |74.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/shred.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/shred.cpp |74.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/source.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/source.cpp |74.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__update_tablet_groups.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__update_tablet_groups.cpp |74.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/store_and_send_out_rs_unit.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/store_and_send_out_rs_unit.cpp |74.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/query/query_barrier.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/query/query_barrier.cpp |74.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/data_load.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/data_load.cpp |74.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__generate_data_ut.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__generate_data_ut.cpp |74.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent/storage_range.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/storage_range.cpp |74.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_login.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_login.cpp |74.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_external_data_source.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_external_data_source.cpp |74.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__init_scheme.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__init_scheme.cpp |74.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_bsv.cpp |74.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_bsv.cpp |74.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_copy_sequence.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_copy_sequence.cpp |74.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/finalize_build_index_unit.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/finalize_build_index_unit.cpp >> TErasureTypeTest::TestAllSpeciesCrcWhole2of2 [GOOD] |74.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/volatile_tx_mon.cpp |74.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/op_init_schema.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/volatile_tx_mon.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/op_init_schema.cpp |74.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestAllSpeciesCrcWhole2of2 [GOOD] |74.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/s3_write.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/s3_write.cpp |74.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_self_pinger.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_self_pinger.cpp |74.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_resource_pool.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_resource_pool.cpp |74.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/storage_stats_calculator.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/storage_stats_calculator.cpp |74.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/storage_balancer.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/storage_balancer.cpp >> TErasureTypeTest::TestAllSpecies1of2 [GOOD] >> TErasureTypeTest::TestAllSpecies2of2 |74.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_vmultiput_actor.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_vmultiput_actor.cpp |74.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/dread_cache_service/caching_service.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/dread_cache_service/caching_service.cpp |74.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/cmds_drive_status.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/cmds_drive_status.cpp >> TErasureTypeTest::TestAllSpeciesCrcWhole1of2 [GOOD] |74.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/load_and_wait_in_rs_unit.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/load_and_wait_in_rs_unit.cpp |74.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestAllSpeciesCrcWhole1of2 [GOOD] |74.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/operation.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/operation.cpp |74.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/assimilator.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/assimilator.cpp |74.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_repl.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_repl.cpp |74.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/hive_domains.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/hive_domains.cpp |74.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/s3_delete.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/s3_delete.cpp |74.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_vpatch_actor.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_vpatch_actor.cpp |74.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/update_group_latencies.cpp |74.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/update_group_latencies.cpp |74.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__adopt_tablet.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__adopt_tablet.cpp |74.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_backup_incremental_backup_collection.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_backup_incremental_backup_collection.cpp |74.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp |74.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/wait_for_plan_unit.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/wait_for_plan_unit.cpp |74.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/hive_statics.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/hive_statics.cpp |74.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/storage_group_info.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/storage_group_info.cpp |74.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/incr_restore_scan.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/incr_restore_scan.cpp |74.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/group_metrics_exchange.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/group_metrics_exchange.cpp |74.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__delete_node.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__delete_node.cpp |74.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/query/query_extr.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/query/query_extr.cpp |74.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp |74.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/data.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/data.cpp >> ErasureBrandNew::Block42_restore [GOOD] >> ErasureBrandNew::Block42_restore_benchmark |74.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tablet_info.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tablet_info.cpp >> ErasureBrandNew::Block42_restore_benchmark [GOOD] |74.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__configure_subdomain.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__configure_subdomain.cpp |74.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__login.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__login.cpp ------- [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> ErasureBrandNew::Block42_restore_benchmark [GOOD] Test command err: totalSize# 498290521 period1# 0.871904s period2# 0.486734s MB/s1# 571.4970008 MB/s2# 1023.742991 factor# 1.791335719 |74.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/cmds_storage_pool.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/cmds_storage_pool.cpp >> test_generator.py::TestTpchGenerator::test_s1_parts [GOOD] |74.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/load_test/service_actor.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/load_test/service_actor.cpp |74.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/benchmarks_init/py3test >> test_generator.py::TestTpchGenerator::test_s1_parts [GOOD] |74.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_extsubdomain.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_extsubdomain.cpp |74.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/wait_for_stream_clearance_unit.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/wait_for_stream_clearance_unit.cpp |74.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/execute_data_tx_unit.cpp |74.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/execute_data_tx_unit.cpp |74.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/build_scheme_tx_out_rs_unit.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/build_scheme_tx_out_rs_unit.cpp |74.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/hive_impl.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/hive_impl.cpp |74.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/cdc_stream_scan.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/cdc_stream_scan.cpp |74.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__list_users.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__list_users.cpp |74.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/monitoring.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/monitoring.cpp |74.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/export_common.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/export_common.cpp |74.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_export_flow_proposals.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_export_flow_proposals.cpp >> test_generator.py::TestTpchGenerator::test_s1 [GOOD] |74.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/benchmarks_init/py3test >> test_generator.py::TestTpchGenerator::test_s1 [GOOD] |74.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp |74.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_import__cancel.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_import__cancel.cpp |74.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/domain_info.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/domain_info.cpp |74.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_import__list.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_import__list.cpp |74.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/node_info.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/node_info.cpp |74.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/leader_tablet_info.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/leader_tablet_info.cpp |74.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/proxy_service/kqp_script_executions.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/proxy_service/kqp_script_executions.cpp |74.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_export__create.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_export__create.cpp |74.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__delete_tablet_reply.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__delete_tablet_reply.cpp |74.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/indexes/update.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/indexes/update.cpp |74.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/hive.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/hive.cpp |74.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_cdc_stream.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_cdc_stream.cpp >> test_generator.py::TestTpchGenerator::test_s1_state [GOOD] |74.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/benchmarks_init/py3test >> test_generator.py::TestTpchGenerator::test_s1_state [GOOD] |74.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/query/query_stathuge.cpp |74.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/query/query_stathuge.cpp |74.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/change_collector_cdc_stream.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/change_collector_cdc_stream.cpp |74.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard.cpp |74.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/data_uncertain.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/data_uncertain.cpp |74.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__update_tablet_metrics.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__update_tablet_metrics.cpp |74.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/manager/abstract.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/manager/abstract.cpp |74.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/export/events/events.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/export/events/events.cpp |74.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/export/events/libcolumnshard-export-events.a |74.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/export/events/libcolumnshard-export-events.a |74.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/export/events/libcolumnshard-export-events.a |74.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp |74.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_validate_ttl.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_validate_ttl.cpp |74.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_index.cpp |74.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_index.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common.cpp >> test_generator.py::TestTpchGenerator::test_s1_state_and_parts [GOOD] |74.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_domain_links.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_domain_links.cpp |74.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/benchmarks_init/py3test >> test_generator.py::TestTpchGenerator::test_s1_state_and_parts [GOOD] |74.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/data_gc.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/data_gc.cpp |74.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__init_root.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__init_root.cpp |74.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/receive_snapshot_cleanup_unit.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/receive_snapshot_cleanup_unit.cpp |74.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_config.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_config.cpp |74.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__monitoring.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__monitoring.cpp |74.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_export__forget.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_export__forget.cpp |74.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_utils.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_utils.cpp |74.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/manager.cpp |74.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/manager.cpp |74.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__background_compaction.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__background_compaction.cpp |74.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_bs.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_bs.cpp |74.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_hullrepljob.cpp |74.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_vmovedpatch_actor.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_hullrepljob.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_vmovedpatch_actor.cpp |74.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__request_tablet_owners.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__request_tablet_owners.cpp |74.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__list.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__list.cpp |74.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/balancer.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/balancer.cpp |74.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_external_table.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_external_table.cpp |74.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/common/update.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/common/update.cpp |74.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__serverless_storage_billing.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__serverless_storage_billing.cpp |74.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/query_data/kqp_prepared_query.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/query_data/kqp_prepared_query.cpp |74.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/export_s3_uploader.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/export_s3_uploader.cpp |74.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_cdc_stream_scan.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_cdc_stream_scan.cpp |74.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/node_warden_vdisk.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/node_warden_vdisk.cpp |74.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/fill.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/fill.cpp |74.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/prepare_scheme_tx_in_rs_unit.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/prepare_scheme_tx_in_rs_unit.cpp |74.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp |74.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__conditional_erase.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__conditional_erase.cpp |74.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/boot_queue.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/boot_queue.cpp |74.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__update_domain.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__update_domain.cpp |74.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_read_actor.cpp |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_read_actor.cpp |74.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_export_uploaders.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_export_uploaders.cpp |74.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_split_merge.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_split_merge.cpp |75.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent/channel_kind.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/channel_kind.cpp |75.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/read_op_unit.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/read_op_unit.cpp |75.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent/blob_mapping_cache.cpp |75.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__sync_update_tenants.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/blob_mapping_cache.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__sync_update_tenants.cpp |75.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/load_test/ycsb/test_load_actor.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/load_test/ycsb/test_load_actor.cpp |75.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/query/query_readbatch.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/query/query_readbatch.cpp |75.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_bg_tasks__list.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_bg_tasks__list.cpp |75.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/common/object.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/common/object.cpp |75.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/common/liboperations-alter-common.a |75.0%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/common/liboperations-alter-common.a |75.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/common/liboperations-alter-common.a |75.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_side_effects.cpp |75.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_indexed_table.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_side_effects.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_indexed_table.cpp |75.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent/proxy.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/proxy.cpp |75.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_table.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_table.cpp |75.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_index.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_index.cpp |75.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/session_actor/kqp_query_state.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/session_actor/kqp_query_state.cpp |75.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/given_id_range.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/given_id_range.cpp |75.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_shard_deleter.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_shard_deleter.cpp |75.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/blob_depot.cpp |75.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/op_apply_config.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/blob_depot.cpp |75.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/op_apply_config.cpp |75.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/column_families/schema.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/column_families/schema.cpp |75.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_just_reject.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_just_reject.cpp |75.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_build_index.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_build_index.cpp |75.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/node_warden_mon.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/node_warden_mon.cpp |75.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/build_index/sample_k.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/build_index/sample_k.cpp |75.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_audit_log.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_audit_log.cpp |75.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/node_warden_impl.cpp |75.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/abstract/object.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/node_warden_impl.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/abstract/object.cpp |75.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__upgrade_access_database.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__upgrade_access_database.cpp |75.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/drain.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/drain.cpp |75.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__init_populator.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__init_populator.cpp |75.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/remove_schema_snapshots.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/remove_schema_snapshots.cpp |75.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/session_actor/kqp_temp_tables_manager.cpp >> TErasureTypeTest::TestAllSpecies2of2 [GOOD] |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/session_actor/kqp_temp_tables_manager.cpp |75.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_upgrade_subdomain.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_upgrade_subdomain.cpp |75.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/proxy_service/kqp_proxy_databases_cache.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/proxy_service/kqp_proxy_databases_cache.cpp |75.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__make_access_database_no_inheritable.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__make_access_database_no_inheritable.cpp |75.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent/request.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/request.cpp |75.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_fs.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_fs.cpp |75.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_rtmr.cpp |75.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestAllSpecies2of2 [GOOD] |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_rtmr.cpp |75.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__borrowed_compaction.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__borrowed_compaction.cpp |75.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_proxy/snapshotreq.cpp |75.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/snapshotreq.cpp |75.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__clean_pathes.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__clean_pathes.cpp |75.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_mongroups.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_mongroups.cpp |75.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/bg_tasks/transactions/tasks_list.cpp |75.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/transactions/libolap-bg_tasks-transactions.a |75.2%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/transactions/libolap-bg_tasks-transactions.a |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/bg_tasks/transactions/tasks_list.cpp |75.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/transactions/libolap-bg_tasks-transactions.a |75.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/indexes/schema.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/indexes/schema.cpp |75.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/olap/indexes/libschemeshard-olap-indexes.a |75.2%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/indexes/libschemeshard-olap-indexes.a |75.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/indexes/libschemeshard-olap-indexes.a |75.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/load_test/ycsb/kqp_upsert.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/load_test/ycsb/kqp_upsert.cpp |75.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/data_trash.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/data_trash.cpp |75.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_sequence.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_sequence.cpp |75.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation.cpp |75.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__publish_to_scheme_board.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__publish_to_scheme_board.cpp |75.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp |75.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/abstract/converter.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/abstract/converter.cpp |75.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/data_decommit.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/data_decommit.cpp |75.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/util/memory_tracker.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/util/memory_tracker.cpp |75.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_assign_bsv.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_assign_bsv.cpp |75.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_backup.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_backup.cpp |75.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/distconf.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/distconf.cpp |75.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__upgrade_schema.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__upgrade_schema.cpp |75.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/columns/update.cpp |75.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/columns/update.cpp |75.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__update_dc_followers.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__update_dc_followers.cpp |75.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/store_snapshot_tx_unit.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/store_snapshot_tx_unit.cpp |75.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor.cpp |75.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/viewer/json_handlers_operation.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/viewer/json_handlers_operation.cpp |75.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp |75.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_replication.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_replication.cpp |75.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_restore_backup_collection.cpp |75.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__unmark_restore_tables.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_restore_backup_collection.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__unmark_restore_tables.cpp |75.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_proxy/upload_rows_common_impl.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/upload_rows_common_impl.cpp |75.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_extsubdomain.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_extsubdomain.cpp |75.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__progress.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__progress.cpp |75.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/s3.cpp |75.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_export.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/s3.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_export.cpp |75.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/change_sender.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/change_sender.cpp |75.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/distconf_persistent_storage.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/distconf_persistent_storage.cpp |75.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_allocator/txallocator_impl.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_allocator/txallocator_impl.cpp |75.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent/read.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/read.cpp |75.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/viewer/json_pipe_req.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/viewer/json_pipe_req.cpp |75.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_cdc_stream.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_cdc_stream.cpp >> test_generator.py::TestTpcdsGenerator::test_s1_parts [GOOD] |75.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent/storage_status.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/storage_status.cpp |75.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__backup_collection_common.cpp |75.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__backup_collection_common.cpp |75.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/benchmarks_init/py3test >> test_generator.py::TestTpcdsGenerator::test_s1_parts [GOOD] |75.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/coro_tx.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/coro_tx.cpp |75.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_index.cpp |75.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/op_commit_blob_seq.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_index.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/op_commit_blob_seq.cpp |75.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp |75.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/viewer/json_handlers_vdisk.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/viewer/json_handlers_vdisk.cpp |75.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_proxy/read_table_impl.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/read_table_impl.cpp |75.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_export__get.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_export__get.cpp |75.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/distconf_invoke_state_storage.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/distconf_invoke_state_storage.cpp |75.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_bsv.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_bsv.cpp |75.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/testing.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/testing.cpp |75.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/common/common.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/common/common.cpp |75.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/olap/common/libschemeshard-olap-common.a |75.4%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/common/libschemeshard-olap-common.a |75.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/common/libschemeshard-olap-common.a |75.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index.cpp |75.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_view.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index.cpp |75.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_tables.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_view.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_tables.cpp |75.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_subdomain.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_subdomain.cpp |75.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/distconf_validate.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/distconf_validate.cpp |75.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_path.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_path.cpp |75.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/distconf_console.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/distconf_console.cpp |75.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/distconf_dynamic.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/distconf_dynamic.cpp |75.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/load_test/ycsb/kqp_select.cpp |75.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_fs.cpp |75.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_fs.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/load_test/ycsb/kqp_select.cpp |75.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/session_actor/kqp_session_actor.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/session_actor/kqp_session_actor.cpp >> test_generator.py::TestTpcdsGenerator::test_s1 [GOOD] |75.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__table_stats.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__table_stats.cpp |75.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/benchmarks_init/py3test >> test_generator.py::TestTpcdsGenerator::test_s1 [GOOD] |75.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent/agent.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/agent.cpp |75.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/s3_scan.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/s3_scan.cpp |75.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_memory_changes.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_memory_changes.cpp |75.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/session_actor/kqp_worker_actor.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/session_actor/kqp_worker_actor.cpp |75.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__background_cleaning.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__background_cleaning.cpp |75.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__find_subdomain_path_id.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__find_subdomain_path_id.cpp >> test_generator.py::TestTpcdsGenerator::test_s1_state [GOOD] |75.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__state_changed_reply.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__state_changed_reply.cpp |75.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__delete_tablet.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__delete_tablet.cpp |75.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/benchmarks_init/py3test >> test_generator.py::TestTpcdsGenerator::test_s1_state [GOOD] |75.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_lock.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_lock.cpp |75.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_sequence.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_sequence.cpp |75.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_external_table.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_external_table.cpp |75.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tiering/fetcher.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tiering/fetcher.cpp |75.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_allocator_client/actor_client.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_allocator_client/actor_client.cpp |75.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/tx_allocator_client/libcore-tx-tx_allocator_client.a |75.5%| [AR] {RESULT} $(B)/ydb/core/tx/tx_allocator_client/libcore-tx-tx_allocator_client.a |75.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/tx_allocator_client/libcore-tx-tx_allocator_client.a |75.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/distconf_fsm.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/distconf_fsm.cpp |75.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_allocator/txallocator.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_allocator/txallocator.cpp |75.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_backup_collection.cpp |75.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_backup_collection.cpp |75.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__tenant_data_erasure_manager.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__tenant_data_erasure_manager.cpp |75.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/auth/owners.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/auth/owners.cpp |75.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/fetcher.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/fetcher.cpp |75.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__describe_scheme.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__describe_scheme.cpp |75.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/tiering/tiering.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/tiering/tiering.cpp |75.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_proxy/upload_rows.cpp |75.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent/storage_put.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/upload_rows.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/storage_put.cpp |75.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__init.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__init.cpp |75.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/config_shards/update.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/config_shards/update.cpp |75.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/config_shards/libalter-in_store-config_shards.a |75.6%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/config_shards/libalter-in_store-config_shards.a |75.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/config_shards/libalter-in_store-config_shards.a |75.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/finish_propose_unit.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/finish_propose_unit.cpp |75.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp |75.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_allocator/txallocator__reserve.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_allocator/txallocator__reserve.cpp |75.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/viewer/viewer_topic_data.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/viewer/viewer_topic_data.cpp |75.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_continuous_backup.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_continuous_backup.cpp |75.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent/storage_get_block.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/storage_get_block.cpp |75.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_solomon.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_solomon.cpp |75.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_monactors.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_monactors.cpp |75.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent/storage_get.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/storage_get.cpp |75.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/build_write_out_rs_unit.cpp |75.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_proxy/proxy_impl.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/build_write_out_rs_unit.cpp |75.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/proxy_impl.cpp |75.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/viewer/json_handlers_viewer.cpp |75.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/scrub/restore_corrupted_blob_actor.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/viewer/json_handlers_viewer.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/scrub/restore_corrupted_blob_actor.cpp |75.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_proxy/schemereq.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/schemereq.cpp |75.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/viewer/viewer.cpp |75.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent/status.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/viewer/viewer.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/status.cpp |75.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/columns/schema.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/columns/schema.cpp |75.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/olap/columns/libschemeshard-olap-columns.a |75.7%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/columns/libschemeshard-olap-columns.a |75.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/columns/libschemeshard-olap-columns.a |75.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__get.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__get.cpp |75.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/snapshot.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/snapshot.cpp |75.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_info_types.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_info_types.cpp |75.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_apply_build_index.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_apply_build_index.cpp |75.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp |75.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/run_script_actor/kqp_run_script_actor.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/run_script_actor/kqp_run_script_actor.cpp |75.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/run_script_actor/libcore-kqp-run_script_actor.a |75.7%| [AR] {RESULT} $(B)/ydb/core/kqp/run_script_actor/libcore-kqp-run_script_actor.a |75.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/run_script_actor/libcore-kqp-run_script_actor.a |75.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent/query.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/query.cpp |75.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__notify.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__notify.cpp |75.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/session_actor/kqp_response.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/session_actor/kqp_response.cpp |75.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent/s3.cpp |75.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__data_erasure_manager.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/s3.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__data_erasure_manager.cpp |75.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_unsafe.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_unsafe.cpp |75.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_initiate_build_index.cpp |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_initiate_build_index.cpp |75.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_backup_collection.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_backup_collection.cpp |75.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/session_actor/kqp_worker_common.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/session_actor/kqp_worker_common.cpp |75.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_bsv.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_bsv.cpp |75.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_cdc_stream.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_cdc_stream.cpp |75.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/restore_unit.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/restore_unit.cpp |75.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp |75.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_snapshots.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_snapshots.cpp |75.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_solomon.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_solomon.cpp |75.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_consistent_copy_tables.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_consistent_copy_tables.cpp |75.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/node_warden_resource.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/node_warden_resource.cpp |75.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/session_actor/kqp_query_stats.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/session_actor/kqp_query_stats.cpp |75.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/session_actor/libcore-kqp-session_actor.a |75.8%| [AR] {RESULT} $(B)/ydb/core/kqp/session_actor/libcore-kqp-session_actor.a |75.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/session_actor/libcore-kqp-session_actor.a |75.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/blocks.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/blocks.cpp |75.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_external_data_source.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_external_data_source.cpp |75.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/distconf_scatter_gather.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/distconf_scatter_gather.cpp |75.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/scheme_board/subscriber.cpp |75.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/scheme_board/subscriber.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp |75.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/data_resolve.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/data_resolve.cpp |75.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_view.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_view.cpp |75.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_cancel_tx.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_cancel_tx.cpp |75.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_source.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_source.cpp |75.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/garbage_collection.cpp |75.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/garbage_collection.cpp |75.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/store_scheme_tx_unit.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/store_scheme_tx_unit.cpp |75.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_lock.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_lock.cpp |75.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_pq.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_pq.cpp |75.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/read_table_scan.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/read_table_scan.cpp |75.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/query/query_public.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/query/query_public.cpp |75.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/peephole/kqp_opt_peephole_wide_read.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/peephole/kqp_opt_peephole_wide_read.cpp |75.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/behaviour.cpp |75.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/libgateway-behaviour-tablestore.global.a |75.9%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/libgateway-behaviour-tablestore.global.a |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/behaviour.cpp |75.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/libgateway-behaviour-tablestore.global.a |75.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_replication.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_replication.cpp |75.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/query/query_readactor.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/query/query_readactor.cpp |75.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/finalize_script_service/kqp_finalize_script_service.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/finalize_script_service/kqp_finalize_script_service.cpp |75.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_resource_pool.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_resource_pool.cpp |75.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_solomon.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_solomon.cpp |75.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/peephole/kqp_opt_peephole.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/peephole/kqp_opt_peephole.cpp |75.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/proxy_service/kqp_session_info.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/proxy_service/kqp_session_info.cpp |75.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/events/events.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/events/events.cpp |75.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/persqueue/events/libcore-persqueue-events.a |75.9%| [AR] {RESULT} $(B)/ydb/core/persqueue/events/libcore-persqueue-events.a |75.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/persqueue/events/libcore-persqueue-events.a |75.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_blob_depot.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_blob_depot.cpp |75.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_sequence.cpp |75.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_replication.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_sequence.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_replication.cpp |75.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__fix_bad_paths.cpp |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__fix_bad_paths.cpp |75.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_kesus.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_kesus.cpp |76.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_index.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_index.cpp |76.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/object.cpp |76.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/load_test/pdisk_read.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/object.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/load_test/pdisk_read.cpp |76.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/constructor.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/constructor.cpp |76.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/initializer.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/initializer.cpp |76.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_continuous_backup.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_continuous_backup.cpp |76.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/distconf_generate.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/distconf_generate.cpp |76.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_export__cancel.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_export__cancel.cpp |76.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/change_sender_async_index.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/change_sender_async_index.cpp |76.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_import__get.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_import__get.cpp |76.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/node_warden_pipe.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/node_warden_pipe.cpp |76.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/key_validator.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/key_validator.cpp |76.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_import__create.cpp |76.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/distconf_invoke_static_group.cpp |76.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_export__list.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_import__create.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/distconf_invoke_static_group.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_export__list.cpp |76.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/localrecovery/localrecovery_logreplay.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/localrecovery/localrecovery_logreplay.cpp |76.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__cancel.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__cancel.cpp |76.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_import__forget.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_import__forget.cpp |76.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/move_table_unit.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/move_table_unit.cpp |76.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_import_getters.cpp |76.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/scanner.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_import_getters.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/scanner.cpp |76.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/runtime/kqp_transport.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/runtime/kqp_transport.cpp |76.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_svp_migration.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_svp_migration.cpp |76.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_sequence.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_sequence.cpp |76.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/load_test/vdisk_write.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/load_test/vdisk_write.cpp |76.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_bsv.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_bsv.cpp |76.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/node_warden_proxy.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/node_warden_proxy.cpp |76.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/store_data_tx_unit.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/store_data_tx_unit.cpp |76.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_external_table.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_external_table.cpp |76.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__init_schema.cpp |76.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/runtime/kqp_sequencer_actor.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__init_schema.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/runtime/kqp_sequencer_actor.cpp |76.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_import.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_import.cpp |76.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_path_describer.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_path_describer.cpp |76.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/distconf_invoke_storage_config.cpp |76.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_unreadable.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/distconf_invoke_storage_config.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_unreadable.cpp |76.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/plan_queue_unit.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/plan_queue_unit.cpp |76.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/make_snapshot_unit.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/make_snapshot_unit.cpp |76.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/prepare_kqp_data_tx_in_rs_unit.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/prepare_kqp_data_tx_in_rs_unit.cpp |76.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/distconf_invoke_common.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/distconf_invoke_common.cpp |76.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/checker.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/checker.cpp |76.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/libgateway-behaviour-resource_pool_classifier.a |76.1%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/libgateway-behaviour-resource_pool_classifier.a |76.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_skeletonfront.cpp |76.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_kesus.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_kesus.cpp |76.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_skeletonfront.cpp |76.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/libgateway-behaviour-resource_pool_classifier.a |76.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/scrub/blob_recovery.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/scrub/blob_recovery.cpp |76.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/query/query_range.cpp |76.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_sort.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/query/query_range.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_sort.cpp |76.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/load_test/ycsb/common.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/load_test/ycsb/common.cpp |76.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/fetched_data.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/fetched_data.cpp |76.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_replbroker.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_replbroker.cpp |76.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/base/counters.cpp |76.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_snapshot.cpp |76.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/ymq/base/libcore-ymq-base.a |76.2%| [AR] {RESULT} $(B)/ydb/core/ymq/base/libcore-ymq-base.a |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/base/counters.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_snapshot.cpp |76.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/ymq/base/libcore-ymq-base.a |76.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/load_test/ycsb/test_load_read_iterator.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/load_test/ycsb/test_load_read_iterator.cpp |76.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/source.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/source.cpp |76.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/load_write_details_unit.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/load_write_details_unit.cpp |76.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/execute_kqp_data_tx_unit.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/execute_kqp_data_tx_unit.cpp |76.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/peephole/kqp_opt_peephole_write_constraint.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/peephole/kqp_opt_peephole_write_constraint.cpp |76.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/opt/peephole/libkqp-opt-peephole.a |76.2%| [AR] {RESULT} $(B)/ydb/core/kqp/opt/peephole/libkqp-opt-peephole.a |76.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/opt/peephole/libkqp-opt-peephole.a |76.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/test_helper/helper.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/test_helper/helper.cpp |76.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_upsert_defaults.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_upsert_defaults.cpp |76.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mon_alloc/monitor.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mon_alloc/monitor.cpp |76.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/mon_alloc/libydb-core-mon_alloc.a |76.2%| [AR] {RESULT} $(B)/ydb/core/mon_alloc/libydb-core-mon_alloc.a |76.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/mon_alloc/libydb-core-mon_alloc.a |76.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/bg_tasks/tx_chain/actor.cpp |76.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/bg_tasks/tx_chain/actor.cpp |76.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_validate_config.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_validate_config.cpp |76.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/prepare_distributed_erase_tx_in_rs_unit.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/prepare_distributed_erase_tx_in_rs_unit.cpp |76.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/bg_tasks/tx_chain/session.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/bg_tasks/tx_chain/session.cpp |76.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/tx_chain/libolap-bg_tasks-tx_chain.a |76.3%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/tx_chain/libolap-bg_tasks-tx_chain.a |76.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/tx_chain/libolap-bg_tasks-tx_chain.a |76.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/build_index/secondary_index.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/build_index/secondary_index.cpp |76.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/external_data_source/manager.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/external_data_source/manager.cpp |76.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/gateway/behaviour/external_data_source/libgateway-behaviour-external_data_source.a |76.3%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/behaviour/external_data_source/libgateway-behaviour-external_data_source.a |76.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/gateway/behaviour/external_data_source/libgateway-behaviour-external_data_source.a |76.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/follower_edge.cpp |76.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/runtime/kqp_tasks_runner.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/follower_edge.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/runtime/kqp_tasks_runner.cpp |76.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_fs.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_fs.cpp |76.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/test_helper/controllers.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/test_helper/controllers.cpp |76.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_continuous_backup.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_continuous_backup.cpp |76.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/runtime/kqp_output_stream.cpp |76.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_update.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/runtime/kqp_output_stream.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_update.cpp |76.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__forget.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__forget.cpp |76.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/finalize_script_service/kqp_finalize_script_actor.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/finalize_script_service/kqp_finalize_script_actor.cpp |76.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/finalize_script_service/libcore-kqp-finalize_script_service.a |76.3%| [AR] {RESULT} $(B)/ydb/core/kqp/finalize_script_service/libcore-kqp-finalize_script_service.a |76.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/finalize_script_service/libcore-kqp-finalize_script_service.a |76.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/query/assimilation.cpp |76.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_table.cpp |76.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_indexes.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/query/assimilation.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_table.cpp |76.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_indexes.cpp |76.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_limit.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_limit.cpp |76.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_insert.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_insert.cpp |76.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_disk.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_disk.cpp |76.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/manager.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/manager.cpp |76.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/libgateway-behaviour-tablestore.a |76.4%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/libgateway-behaviour-tablestore.a |76.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__table_stats_histogram.cpp |76.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/libgateway-behaviour-tablestore.a |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__table_stats_histogram.cpp |76.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/rm_service/kqp_rm_service.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/rm_service/kqp_rm_service.cpp |76.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__engine_host.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__engine_host.cpp |76.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/load_tx_details_unit.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/load_tx_details_unit.cpp |76.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__root_data_erasure_manager.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__root_data_erasure_manager.cpp |76.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/address_classification/net_classifier.cpp |76.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/mind/address_classification/libcore-mind-address_classification.a |76.4%| [AR] {RESULT} $(B)/ydb/core/mind/address_classification/libcore-mind-address_classification.a |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/address_classification/net_classifier.cpp |76.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/mind/address_classification/libcore-mind-address_classification.a |76.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/export/session/selector/abstract/selector.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/export/session/selector/abstract/selector.cpp |76.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/export/session/selector/abstract/libsession-selector-abstract.a |76.4%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/export/session/selector/abstract/libsession-selector-abstract.a |76.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/export/session/selector/abstract/libsession-selector-abstract.a |76.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/proxy_service/kqp_proxy_peer_stats_calculator.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/proxy_service/kqp_proxy_peer_stats_calculator.cpp |76.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_stage_float_up.cpp |76.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/compstrat/hulldb_compstrat_selector.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_stage_float_up.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/compstrat/hulldb_compstrat_selector.cpp |76.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/import_s3.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/import_s3.cpp |76.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_build_stage.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_build_stage.cpp |76.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_part.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_part.cpp |76.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/upload_stats.cpp |76.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/upload_stats.cpp |76.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_uniq_helper.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_uniq_helper.cpp |76.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/runtime/kqp_stream_lookup_worker.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/runtime/kqp_stream_lookup_worker.cpp |76.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/initiate_build_index_unit.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/initiate_build_index_unit.cpp |76.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/runtime/kqp_stream_lookup_actor.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/runtime/kqp_stream_lookup_actor.cpp |76.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hullop/hullcompdelete/blobstorage_hullcompdelete.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hullop/hullcompdelete/blobstorage_hullcompdelete.cpp |76.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/hullop/hullcompdelete/libvdisk-hullop-hullcompdelete.a |76.5%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hullop/hullcompdelete/libvdisk-hullop-hullcompdelete.a |76.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/hullop/hullcompdelete/libvdisk-hullop-hullcompdelete.a |76.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/node_warden_scrub.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/node_warden_scrub.cpp |76.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/scrub/blob_recovery_queue.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/scrub/blob_recovery_queue.cpp |76.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/rm_service/kqp_snapshot_manager.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/rm_service/kqp_snapshot_manager.cpp |76.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/workload_service/actors/scheme_actors.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/workload_service/actors/scheme_actors.cpp |76.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/node_warden_cache.cpp |76.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/node_warden_stat_aggr.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/node_warden_cache.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/node_warden_stat_aggr.cpp |76.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/constructor.cpp |76.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/libreader-simple_reader-constructor.global.a |76.5%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/libreader-simple_reader-constructor.global.a |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/constructor.cpp |76.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/libreader-simple_reader-constructor.global.a |76.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/tenant_slot_broker__load_state.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/tenant_slot_broker__load_state.cpp |76.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_impl.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_impl.cpp |76.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/node_warden_group_resolver.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/node_warden_group_resolver.cpp |76.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_insert_index.cpp |76.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/upsert_opt.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/upsert_opt.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_insert_index.cpp |76.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/runtime/kqp_write_actor.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/runtime/kqp_write_actor.cpp |76.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/finish_propose_write_unit.cpp |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/finish_propose_write_unit.cpp |76.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_debug.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_debug.cpp |76.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/load_test/memory.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/load_test/memory.cpp |76.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hullop/blobstorage_hullactor.cpp |76.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/build_index/reshuffle_kmeans.cpp |76.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_olap_filter.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hullop/blobstorage_hullactor.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/build_index/reshuffle_kmeans.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_olap_filter.cpp |76.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/drop_column.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/drop_column.cpp |76.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/build_index/local_kmeans.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/build_index/local_kmeans.cpp |76.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/tablet/rpc_execute_mkql.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/tablet/rpc_execute_mkql.cpp |76.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_huge.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_huge.cpp |76.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/query_data/kqp_predictor.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/query_data/kqp_predictor.cpp |76.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/query_data/libcore-kqp-query_data.a |76.6%| [AR] {RESULT} $(B)/ydb/core/kqp/query_data/libcore-kqp-query_data.a |76.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/query_data/libcore-kqp-query_data.a |76.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/load_test/group_write.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/load_test/group_write.cpp |76.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_helpers.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_helpers.cpp |76.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/store_commit_writes_tx_unit.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/store_commit_writes_tx_unit.cpp |76.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy.cpp |76.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/node_warden_pdisk.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/node_warden_pdisk.cpp |76.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/distconf_mon.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/distconf_mon.cpp |76.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/node_warden_group.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/node_warden_group.cpp |76.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hullop/blobstorage_hulllogcutternotify.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hullop/blobstorage_hulllogcutternotify.cpp |76.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/tablet/rpc_change_schema.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/tablet/rpc_change_schema.cpp |76.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hullop/blobstorage_hull.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hullop/blobstorage_hull.cpp |76.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/workload_service/actors/cpu_load_actors.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/workload_service/actors/cpu_load_actors.cpp |76.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/workload_service/actors/libkqp-workload_service-actors.a |76.6%| [AR] {RESULT} $(B)/ydb/core/kqp/workload_service/actors/libkqp-workload_service-actors.a |76.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/workload_service/actors/libkqp-workload_service-actors.a |76.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_olap_agg.cpp |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_olap_agg.cpp |76.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/federated_query/kqp_federated_query_actors.cpp |76.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/fetch_steps.cpp |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/federated_query/kqp_federated_query_actors.cpp |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/fetch_steps.cpp |76.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/load_test/ycsb/bulk_mkql_upsert.cpp |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/load_test/ycsb/bulk_mkql_upsert.cpp |76.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_update_index.cpp |76.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/load_test/libydb-core-load_test.a |76.7%| [AR] {RESULT} $(B)/ydb/core/load_test/libydb-core-load_test.a |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_update_index.cpp |76.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hullop/hullop_compactfreshappendix.cpp |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hullop/hullop_compactfreshappendix.cpp |76.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/alter_column.cpp |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/alter_column.cpp |76.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/load_test/libydb-core-load_test.a |76.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/direct_tx_unit.cpp |76.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_indexed_table.cpp |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/direct_tx_unit.cpp |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_indexed_table.cpp |76.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/ds_table/table_exists.cpp |76.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/tablet/rpc_restart_tablet.cpp |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/ds_table/table_exists.cpp |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/tablet/rpc_restart_tablet.cpp |76.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/grpc_services/tablet/libcore-grpc_services-tablet.a |76.7%| [AR] {RESULT} $(B)/ydb/core/grpc_services/tablet/libcore-grpc_services-tablet.a |76.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/grpc_services/tablet/libcore-grpc_services-tablet.a |76.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/rm_service/kqp_resource_info_exchanger.cpp |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/rm_service/kqp_resource_info_exchanger.cpp |76.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/rm_service/libcore-kqp-rm_service.a |76.7%| [AR] {RESULT} $(B)/ydb/core/kqp/rm_service/libcore-kqp-rm_service.a |76.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/rm_service/libcore-kqp-rm_service.a |76.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/drop_index.cpp |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/drop_index.cpp |76.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/runtime/kqp_read_actor.cpp |76.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/resharding/update.cpp |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/runtime/kqp_read_actor.cpp |76.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/resharding/libalter-in_store-resharding.a |76.7%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/resharding/libalter-in_store-resharding.a |76.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/resharding/update.cpp |76.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp |76.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/resharding/libalter-in_store-resharding.a |76.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp |76.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/coordinator/coordinator__init.cpp |76.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/coordinator/coordinator__init.cpp |76.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/abstract/common.cpp |76.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/abstract/common.cpp |76.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/distconf_binding.cpp |76.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_restore.cpp |76.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/distconf_binding.cpp |76.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_restore.cpp |76.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/nodewarden/libcore-blobstorage-nodewarden.a |76.8%| [AR] {RESULT} $(B)/ydb/core/blobstorage/nodewarden/libcore-blobstorage-nodewarden.a |76.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/nodewarden/libcore-blobstorage-nodewarden.a |76.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_returning.cpp |76.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_returning.cpp |76.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/coordinator/coordinator__schema.cpp |76.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/coordinator/coordinator__schema.cpp |76.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_upsert_index.cpp |76.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_upsert_index.cpp |76.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/compstrat/hulldb_compstrat_defs.cpp |76.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/compstrat/hulldb_compstrat_defs.cpp |76.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/hulldb/compstrat/libvdisk-hulldb-compstrat.a |76.8%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/compstrat/libvdisk-hulldb-compstrat.a |76.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/compstrat/libvdisk-hulldb-compstrat.a |76.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_db_changes.cpp |76.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_db_changes.cpp |76.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/alter_sharding.cpp |76.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/alter_sharding.cpp |76.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_precompute.cpp |76.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/physical/kqp_opt_phy_precompute.cpp |76.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/opt/physical/libkqp-opt-physical.a |76.8%| [AR] {RESULT} $(B)/ydb/core/kqp/opt/physical/libkqp-opt-physical.a |76.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/opt/physical/libkqp-opt-physical.a |76.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/add_column.cpp |76.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/add_column.cpp |76.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/execute_write_unit.cpp |76.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/execute_write_unit.cpp |76.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_tenant.cpp |76.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_tenant.cpp |76.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/tenant_slot_broker__check_slot_status.cpp |76.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/tenant_slot_broker__check_slot_status.cpp |76.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/federated_query/kqp_federated_query_helpers.cpp |76.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/federated_query/kqp_federated_query_helpers.cpp |76.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/federated_query/libcore-kqp-federated_query.a |76.8%| [AR] {RESULT} $(B)/ydb/core/kqp/federated_query/libcore-kqp-federated_query.a |76.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/federated_query/libcore-kqp-federated_query.a |76.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/ds_table/service.cpp |76.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/execute_commit_writes_tx_unit.cpp |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/ds_table/service.cpp |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/execute_commit_writes_tx_unit.cpp |76.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/op_load.cpp |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/op_load.cpp |76.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_delete_index.cpp |76.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_restore_incremental_backup.cpp |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_delete_index.cpp |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_restore_incremental_backup.cpp |76.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/tenant_slot_broker__update_slot_status.cpp |76.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/scrub/blob_recovery_request.cpp |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/tenant_slot_broker__update_slot_status.cpp |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/scrub/blob_recovery_request.cpp |76.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/actors/read_info_actor.cpp |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/actors/read_info_actor.cpp |76.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/tenant_slot_broker__init_scheme.cpp |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/tenant_slot_broker__init_scheme.cpp |76.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/column_families/update.cpp |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/column_families/update.cpp |76.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/olap/column_families/libschemeshard-olap-column_families.a |76.9%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/column_families/libschemeshard-olap-column_families.a |76.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/column_families/libschemeshard-olap-column_families.a |76.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_effects.cpp |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_effects.cpp |76.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/opt/physical/effects/libopt-physical-effects.a |76.9%| [AR] {RESULT} $(B)/ydb/core/kqp/opt/physical/effects/libopt-physical-effects.a |76.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/test_helper/shard_reader.cpp |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/test_helper/shard_reader.cpp |76.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_table_index.cpp |76.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/opt/physical/effects/libopt-physical-effects.a |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_table_index.cpp |76.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/abstract.cpp |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/abstract.cpp |76.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/operations/libbehaviour-tablestore-operations.a |76.9%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/operations/libbehaviour-tablestore-operations.a |76.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/operations/libbehaviour-tablestore-operations.a |76.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/ydb_proxy/ydb_proxy.cpp |76.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/replication/ydb_proxy/libtx-replication-ydb_proxy.a |76.9%| [AR] {RESULT} $(B)/ydb/core/tx/replication/ydb_proxy/libtx-replication-ydb_proxy.a |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/ydb_proxy/ydb_proxy.cpp |76.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/replication/ydb_proxy/libtx-replication-ydb_proxy.a |76.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/configured_tablet_bootstrapper.cpp |76.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/configured_tablet_bootstrapper.cpp |76.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/one_layer.cpp |77.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/one_layer.cpp |76.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/build_index/prefix_kmeans.cpp |77.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_utils/cli_persqueue_stress.cpp |77.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/build_index/prefix_kmeans.cpp |77.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_persqueue_stress.cpp |77.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool/manager.cpp |77.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool/libgateway-behaviour-resource_pool.a |77.0%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool/libgateway-behaviour-resource_pool.a |77.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool/manager.cpp |77.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool/libgateway-behaviour-resource_pool.a |77.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_proxy/proxy.cpp |77.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/proxy.cpp |77.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/data_mon.cpp |77.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/data_mon.cpp |77.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/public_http/http_service.cpp |77.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/public_http/http_service.cpp |77.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/public_http/libydb-core-public_http.a |77.0%| [AR] {RESULT} $(B)/ydb/core/public_http/libydb-core-public_http.a |77.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogkeeper.cpp |77.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mon/mon.cpp |77.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/public_http/libydb-core-public_http.a |77.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogkeeper.cpp |77.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/mon/libydb-core-mon.a |77.0%| [AR] {RESULT} $(B)/ydb/core/mon/libydb-core-mon.a |77.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mon/mon.cpp |77.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/mon/libydb-core-mon.a |77.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_proxy/commitreq.cpp |77.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/commitreq.cpp |77.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_proxy/rpc_long_tx.cpp |77.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/rpc_long_tx.cpp |77.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_write.cpp |77.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_write.cpp |77.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_allocator/txallocator__scheme.cpp |77.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/finalize_plan_tx_unit.cpp |77.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_allocator/txallocator__scheme.cpp |77.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/finalize_plan_tx_unit.cpp |77.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/tx_allocator/libcore-tx-tx_allocator.a |77.0%| [AR] {RESULT} $(B)/ydb/core/tx/tx_allocator/libcore-tx-tx_allocator.a |77.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/tx_allocator/libcore-tx-tx_allocator.a |77.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/execute_distributed_erase_tx_unit.cpp |77.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/execute_distributed_erase_tx_unit.cpp |77.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_proxy/resolvereq.cpp |77.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/resolvereq.cpp |77.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/mon_main.cpp |77.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/mon_main.cpp |77.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/upsert_index.cpp |77.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/object.cpp |77.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/tablestore/operations/upsert_index.cpp |77.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/liboperations-alter-in_store.a |77.1%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/liboperations-alter-in_store.a |77.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/object.cpp |77.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/liboperations-alter-in_store.a |77.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/operations/libbehaviour-tablestore-operations.global.a |77.1%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/operations/libbehaviour-tablestore-operations.global.a |77.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/operations/libbehaviour-tablestore-operations.global.a |77.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent.cpp |77.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent.cpp |77.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_proxy/describe.cpp |77.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/describe.cpp |77.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_pq.cpp |77.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_pq.cpp |77.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/space_monitor.cpp |77.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/space_monitor.cpp |77.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/auth/permissions.cpp |77.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/auth/permissions.cpp |77.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent/storage_block.cpp |77.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/abstract/context.cpp |77.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/storage_block.cpp |77.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/abstract/update.cpp |77.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/abstract/context.cpp |77.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/abstract/update.cpp |77.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/abstract/liboperations-alter-abstract.a |77.1%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/abstract/liboperations-alter-abstract.a |77.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/abstract/liboperations-alter-abstract.a |77.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent/blocks.cpp |77.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/blocks.cpp |77.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent/resolved_value.cpp |77.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/resolved_value.cpp |77.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/auth/groups.cpp |77.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/minikql_compile/mkql_compile_service.cpp |77.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/auth/groups.cpp |77.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/minikql_compile/mkql_compile_service.cpp |77.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/blobstorage_hulldefs.cpp |77.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/blobstorage_hulldefs.cpp |77.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tiering/manager.cpp |77.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tiering/manager.cpp |77.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/tiering/libcore-tx-tiering.a |77.1%| [AR] {RESULT} $(B)/ydb/core/tx/tiering/libcore-tx-tiering.a |77.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/tiering/libcore-tx-tiering.a |77.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/layout/layout.cpp |77.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent/metrics.cpp |77.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/olap/layout/libschemeshard-olap-layout.a |77.2%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/layout/libschemeshard-olap-layout.a |77.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/layout/layout.cpp |77.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/metrics.cpp |77.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/layout/libschemeshard-olap-layout.a |77.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_proxy/datareq.cpp |77.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/datareq.cpp |77.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/tx_proxy/libcore-tx-tx_proxy.a |77.2%| [AR] {RESULT} $(B)/ydb/core/tx/tx_proxy/libcore-tx-tx_proxy.a |77.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/tx_proxy/libcore-tx-tx_proxy.a |77.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_genconfig.cpp |77.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_genconfig.cpp |77.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/viewer/json_handlers_pdisk.cpp |77.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/viewer/json_handlers_pdisk.cpp |77.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogkeeper_committer.cpp |77.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogkeeper_committer.cpp |77.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/drop_index_notice_unit.cpp |77.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/drop_index_notice_unit.cpp |77.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_log.cpp |77.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_log.cpp |77.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/tenant_slot_broker__update_config.cpp |77.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/tenant_slot_broker__update_config.cpp |77.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__pq_stats.cpp |77.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent/garbage.cpp |77.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__pq_stats.cpp |77.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/garbage.cpp |77.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/viewer/json_handlers_scheme.cpp |77.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/viewer/json_handlers_scheme.cpp |77.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/viewer/json_handlers_browse.cpp |77.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/viewer/json_handlers_browse.cpp |77.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/viewer/json_handlers_query.cpp |77.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/viewer/json_handlers_query.cpp |77.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/localrecovery/localrecovery_readbulksst.cpp |77.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/localrecovery/localrecovery_readbulksst.cpp |77.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_cdc_stream.cpp |77.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_cdc_stream.cpp |77.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/tenant_slot_broker__update_pool_status.cpp |77.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/tenant_slot_broker__update_pool_status.cpp |77.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/tenant_slot_broker__assign_free_slots.cpp |77.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/tenant_slot_broker__assign_free_slots.cpp |77.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index_tx_base.cpp |77.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index_tx_base.cpp |77.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_backup.cpp |77.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_backup.cpp |77.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogkeeper_state.cpp |77.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogkeeper_state.cpp |77.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/runtime/kqp_read_iterator_common.cpp |77.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/runtime/kqp_read_iterator_common.cpp |77.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/tenant_slot_broker__update_node_location.cpp |77.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/agent/comm.cpp |77.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/tenant_slot_broker__update_node_location.cpp |77.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/agent/comm.cpp |77.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blob_depot/agent/libcore-blob_depot-agent.a |77.3%| [AR] {RESULT} $(B)/ydb/core/blob_depot/agent/libcore-blob_depot-agent.a |77.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blob_depot/agent/libcore-blob_depot-agent.a |77.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_copy_table.cpp |77.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_copy_table.cpp |77.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_datasnap.cpp |77.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_datasnap.cpp |77.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/viewer/json_handlers_storage.cpp |77.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/viewer/json_handlers_storage.cpp |77.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_mon.cpp |77.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_mon.cpp |77.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/node_broker__extend_lease.cpp |77.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/node_broker__extend_lease.cpp |77.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/viewer/json_handlers_pq.cpp |77.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/tenant_slot_broker__alter_tenant.cpp |77.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/viewer/json_handlers_pq.cpp |77.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/tenant_slot_broker__alter_tenant.cpp |77.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_tablet_counters.cpp |77.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_tablet_counters.cpp |77.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_pq.cpp |77.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_pq.cpp |77.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_subdomain_path_id.cpp |77.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_subdomain_path_id.cpp |77.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/local.cpp |77.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/local.cpp |77.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_root.cpp |77.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_root.cpp |77.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/viewer/json_wb_req.cpp |77.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/constructor.cpp |77.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/constructor.cpp |77.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/viewer/json_wb_req.cpp |77.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__create.cpp |77.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__create.cpp |77.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_node.cpp |77.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_node.cpp |77.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_external_table.cpp |77.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_external_table.cpp |77.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogreader.cpp |77.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogreader.cpp |77.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/memory_state_migration.cpp |77.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/memory_state_migration.cpp |77.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_resource_pool.cpp |77.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_resource_pool.cpp |77.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp |77.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogrecovery.cpp |77.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp |77.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogrecovery.cpp |77.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/auth/users.cpp |77.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/auth/users.cpp |77.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogdata.cpp |77.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogdata.cpp |77.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/tenant_slot_broker.cpp |77.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/execute_kqp_scan_tx_unit.cpp |77.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/bulksst_add/hulldb_bulksst_add.cpp |77.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/execute_kqp_scan_tx_unit.cpp |77.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/tenant_slot_broker.cpp |77.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/hulldb/bulksst_add/libvdisk-hulldb-bulksst_add.a |77.4%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/bulksst_add/libvdisk-hulldb-bulksst_add.a |77.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/bulksst_add/hulldb_bulksst_add.cpp |77.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/bulksst_add/libvdisk-hulldb-bulksst_add.a |77.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_import_flow_proposals.cpp |77.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/table_creator/table_creator.cpp |77.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_import_flow_proposals.cpp |77.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/table_creator/table_creator.cpp |77.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/library/table_creator/libydb-library-table_creator.a |77.4%| [AR] {RESULT} $(B)/ydb/library/table_creator/libydb-library-table_creator.a |77.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/library/table_creator/libydb-library-table_creator.a |77.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_subdomain.cpp |77.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_subdomain.cpp |77.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_segment.cpp |77.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_segment.cpp |77.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/hulldb/fresh/libvdisk-hulldb-fresh.a |77.4%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/fresh/libvdisk-hulldb-fresh.a |77.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/fresh/libvdisk-hulldb-fresh.a |77.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/service.cpp |77.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/service.cpp |77.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/metadata/libydb-services-metadata.a |77.4%| [AR] {RESULT} $(B)/ydb/services/metadata/libydb-services-metadata.a |77.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/metadata/libydb-services-metadata.a |77.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/viewer/viewer_request.cpp |77.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/viewer/viewer_request.cpp |77.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/viewer/libydb-core-viewer.a |77.4%| [AR] {RESULT} $(B)/ydb/core/viewer/libydb-core-viewer.a |77.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/check_write_unit.cpp |77.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/check_write_unit.cpp |77.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/viewer/libydb-core-viewer.a |77.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/provider/read_attributes_utils.cpp |77.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/provider/read_attributes_utils.cpp |77.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/kesus/grpc_service.cpp |77.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/kesus/libydb-services-kesus.a |77.5%| [AR] {RESULT} $(B)/ydb/services/kesus/libydb-services-kesus.a |77.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/kesus/grpc_service.cpp |77.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/kesus/libydb-services-kesus.a |77.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_utils/cli_persqueue.cpp |77.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_persqueue.cpp |77.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/barriers/barriers_public.cpp |77.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/barriers/barriers_public.cpp |77.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/runtime/kqp_write_table.cpp |77.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/labels_maintainer.cpp |77.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/runtime/kqp_write_table.cpp |77.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_syncloghttp.cpp |77.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/labels_maintainer.cpp |77.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_utils/cli_scheme_cache_append.cpp |77.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_syncloghttp.cpp |77.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_scheme_cache_append.cpp |77.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/runtime/libcore-kqp-runtime.a |77.5%| [AR] {RESULT} $(B)/ydb/core/kqp/runtime/libcore-kqp-runtime.a |77.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__get_state_tx.cpp |77.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__get_state_tx.cpp |77.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/runtime/libcore-kqp-runtime.a |77.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/portions/constructor.cpp |77.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/portions/constructor.cpp |77.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/portions/libstorage-indexes-portions.a |77.5%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/portions/libstorage-indexes-portions.a |77.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/portions/libstorage-indexes-portions.a |77.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/actorlib_impl/read_data_protocol.cpp |77.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/actorlib_impl/read_data_protocol.cpp |77.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/cache_block/cache_block.cpp |77.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/cache_block/cache_block.cpp |77.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/hulldb/cache_block/libvdisk-hulldb-cache_block.a |77.5%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/cache_block/libvdisk-hulldb-cache_block.a |77.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/cache_block/libvdisk-hulldb-cache_block.a |77.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/tenant_node_enumeration.cpp |77.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/tenant_node_enumeration.cpp |77.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/backup_unit.cpp |77.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/backup_unit.cpp |77.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/tenant_pool.cpp |77.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/tenant_pool.cpp |77.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/coordinator/coordinator__mediators_confirmations.cpp |77.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/coordinator/coordinator__mediators_confirmations.cpp ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/benchmarks_init/py3test >> test_generator.py::TestTpcdsGenerator::test_s1_state_and_parts 2025-05-07 08:17:02,059 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-05-07 08:17:02,326 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 15646 4.0G 4.1G 4.0G ydb-tests-functional-benchmarks_init --basetemp /home/runner/.ya/build/build_root/zvgn/002773/tmp --capture no -c pkg:library.python.pytest:pytest.yatest.ini -p no:factor --doc Test command err: File "library/python/pytest/main.py", line 101, in main rc = pytest.main( File "contrib/python/pytest/py3/_pytest/config/__init__.py", line 175, in main ret: Union[ExitCode, int] = config.hook.pytest_cmdline_main( File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 513, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 103, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/main.py", line 320, in pytest_cmdline_main return wrap_session(config, _main) File "contrib/python/pytest/py3/_pytest/main.py", line 273, in wrap_session session.exitstatus = doit(config, session) or 0 File "contrib/python/pytest/py3/_pytest/main.py", line 327, in _main config.hook.pytest_runtestloop(session=session) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 513, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 103, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/main.py", line 352, in pytest_runtestloop item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 513, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 103, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/runner.py", line 115, in pytest_runtest_protocol runtestprotocol(item, nextitem=nextitem) File "contrib/python/pytest/py3/_pytest/runner.py", line 134, in runtestprotocol reports.append(call_and_report(item, "call", log)) File "contrib/python/pytest/py3/_pytest/runner.py", line 223, in call_and_report call = call_runtest_hook(item, when, **kwds) File "contrib/python/pytest/py3/_pytest/runner.py", line 262, in call_runtest_hook return CallInfo.from_call( File "contrib/python/pytest/py3/_pytest/runner.py", line 342, in from_call result: Optional[TResult] = func() File "contrib/python/pytest/py3/_pytest/runner.py", line 263, in lambda: ihook(item=item, **kwds), when=when, reraise=reraise File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 513, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 103, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/runner.py", line 170, in pytest_runtest_call item.runtest() File "contrib/python/pytest/py3/_pytest/python.py", line 1844, in runtest self.ihook.pytest_pyfunc_call(pyfuncitem=self) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 513, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 103, in _multicall res = hook_impl.function(*args) File "library/python/pytest/plugins/ya.py", line 563, in pytest_pyfunc_call pyfuncitem.retval = testfunction(**testargs) File "ydb/tests/functional/benchmarks_init/test_generator.py", line 191, in test_s1_state_and_parts return self.get_cannonical(paths=paths, execs=execs) File "ydb/tests/functional/benchmarks_init/test_generator.py", line 107, in get_cannonical return self.canonical_result(self.scale_hash(paths), self.tmp_path('s1.hash')) File "ydb/tests/functional/benchmarks_init/test_generator.py", line 90, in scale_hash t.join() File "contrib/tools/python3/Lib/threading.py", line 1149, in join self._wait_for_tstate_lock() File "contrib/tools/python3/Lib/threading.py", line 1169, in _wait_for_tstate_lock if lock.acquire(block, timeout): File "library/python/pytest/plugins/ya.py", line 344, in _graceful_shutdown traceback.print_stack(file=sys.stderr) Thread 0x00007fcb4c735640 (most recent call first): File "ydb/tests/functional/benchmarks_init/test_generator.py", line 68 in calc_hashes File "ydb/tests/functional/benchmarks_init/test_generator.py", line 82 in _calc_hash File "contrib/tools/python3/Lib/threading.py", line 1012 in run File "contrib/tools/python3/Lib/threading.py", line 1075 in _bootstrap_inner File "contrib/tools/python3/Lib/threading.py", line 1032 in _bootstrap Thread 0x00007fcb5175e640 (most recent call first): File "ydb/tests/functional/benchmarks_init/test_generator.py", line 69 in calc_hashes File "ydb/tests/functional/benchmarks_init/test_generator.py", line 82 in _calc_hash File "contrib/tools/python3/Lib/threading.py", line 1012 in run File "contrib/tools/python3/Lib/threading.py", line 1075 in _bootstrap_inner File "contrib/tools/python3/Lib/threading.py", line 1032 in _bootstrap Thread 0x00007fcb5759a640 (most recent call first): File "ydb/tests/functional/benchmarks_init/test_generator.py", line 69 in calc_hashes File "ydb/tests/functional/benchmarks_init/test_generator.py", line 82 in _calc_hash File "contrib/tools/python3/Lib/threading.py", line 1012 in run File "contrib/tools/python3/Lib/threading.py", line 1075 in _bootstrap_inner File "contrib/tools/python3/Lib/threading.py", line 1032 in _bootstrap Current thread 0x00007fcb60e77940 (most recent call first): File "contrib/tools/python3/Lib/posixpath.py", line 462 in _joinrealpath File "contrib/tools/python3/Lib/posixpath.py", line 427 in realpath File "contrib/tools/python3/Lib/inspect.py", line 1016 in getmodule File "contrib/tools/python3/Lib/inspect.py", line 1090 in findsource File "contrib/python/pytest/py3/_pytest/_code/source.py", line 121 in findsource File "contrib/python/pytest/py3/_pytest/_code/code.py", line 106 in fullsource File "contrib/python/pytest/py3/_pytest/_code/code.py", line 250 in getsource File "contrib/python/pytest/py3/_pytest/_code/code.py", line 833 in _getentrysource File "contrib/python/pytest/py3/_pytest/_code/code.py", line 931 in repr_traceback_entry File "contrib/python/pytest/py3/_pytest/_code/code.py", line 993 in repr_traceback File "contrib/python/pytest/py3/_pytest/_code/code.py", line 1063 in repr_excinfo File "contrib/python/pytest/py3/_pytest/_code/code.py", line 698 in getrepr File "contrib/python/pytest/py3/_pytest/terminal.py", line 893 in pytest_keyboard_interrupt File "contrib/python/pluggy/py3/pluggy/_callers.py", line 103 in _multicall File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120 in _hookexec File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 513 in __call__ File "contrib/python/pytest/py3/_pytest/main.py", line 287 in wrap_session File "contrib/python/pytest/py3/_pytest/main.py", line 320 in pytest_cmdline_main File "contrib/python/pluggy/py3/pluggy/_callers.py", line 103 in _multicall File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120 in _hookexec File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 513 in __call__ File "contrib/python/pytest/py3/_pytest/config/__init__.py", line 175 in main File "library/python/pytest/main.py", line 101 in main Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 764, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: ...s', '--ya-trace', '/home/runner/.ya/build/build_root/zvgn/002773/ydb/tests/functional/benchmarks_init/test-results/py3test/testing_out_stuff/test_generator/chunk3/ytest.report.trace', '--build-root', '/home/runner/.ya/build/build_root/zvgn/002773', '--source-root', '/home/runner/.ya/build/build_root/zvgn/002773/environment/arcadia', '--output-dir', '/home/runner/.ya/build/build_root/zvgn/002773/ydb/tests/functional/benchmarks_init/test-results/py3test/testing_out_stuff/test_generator/chunk3/testing_out_stuff', '--durations', '0', '--project-path', 'ydb/tests/functional/benchmarks_init', '--test-tool-bin', '/home/runner/.ya/tools/v4/8580453620/test_tool', '--ya-version', '2', '--collect-cores', '--sanitizer-extra-checks', '--build-type', 'release', '--tb', 'short', '--modulo', '10', '--modulo-index', '3', '--partition-mode', 'SEQUENTIAL', '--split-by-tests', '--dep-root', 'ydb/tests/functional/benchmarks_init', '--flags', 'APPLE_SDK_LOCAL=yes', '--flags', 'CFLAGS=-fno-omit-frame-pointer -Wno-unknown-argument', '--flags', 'DEBUGINFO_LINES_ONLY=yes', '--flags', 'DISABLE_FLAKE8_MIGRATIONS=yes', '--flags', 'OPENSOURCE=yes', '--flags', 'SANITIZER_TYPE=address', '--flags', 'TESTS_REQUESTED=yes', '--flags', 'USE_AIO=static', '--flags', 'USE_CLANG_CL=yes', '--flags', 'USE_EAT_MY_DATA=yes', '--flags', 'USE_ICONV=static', '--flags', 'USE_IDN=static', '--flags', 'USE_PREBUILT_TOOLS=no', '--sanitize', 'address', '--test-file-filter', 'test_generator.py']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("...s', '--ya-trace', '/home/runner/.ya/build/build_root/zvgn/002773/ydb/tests/functional/benchmarks_init/test-results/py3test/testing_out_stuff/test_generator/chunk3/ytest.report.trace', '--build-root', '/home/runner/.ya/build/build_root/zvgn/002773', '--source-root', '/home/runner/.ya/build/build_root/zvgn/002773/environment/arcadia', '--output-dir', '/home/runner/.ya/build/build_root/zvgn/002773/ydb/tests/functional/benchmarks_init/test-results/py3test/testing_out_stuff/test_generator/chunk3/testing_out_stuff', '--durations', '0', '--project-path', 'ydb/tests/functional/benchmarks_init', '--test-tool-bin', '/home/runner/.ya/tools/v4/8580453620/test_tool', '--ya-version', '2', '--collect-cores', '--sanitizer-extra-checks', '--build-type', 'release', '--tb', 'short', '--modulo', '10', '--modulo-index', '3', '--partition-mode', 'SEQUENTIAL', '--split-by-tests', '--dep-root', 'ydb/tests/functional/benchmarks_init', '--flags', 'APPLE_SDK_LOCAL=yes', '--flags', 'CFLAGS=-fno-omit-frame-pointer -Wno-unknown-argument', '--flags', 'DEBUGINFO_LINES_ONLY=yes', '--flags', 'DISABLE_FLAKE8_MIGRATIONS=yes', '--flags', 'OPENSOURCE=yes', '--flags', 'SANITIZER_TYPE=address', '--flags', 'TESTS_REQUESTED=yes', '--flags', 'USE_AIO=static', '--flags', 'USE_CLANG_CL=yes', '--flags', 'USE_EAT_MY_DATA=yes', '--flags', 'USE_ICONV=static', '--flags', 'USE_IDN=static', '--flags', 'USE_PREBUILT_TOOLS=no', '--sanitize', 'address', '--test-file-filter', 'test_generator.py']' stopped by 600 seconds timeout",), {}) 2025-05-07 08:17:33,974 WARNING library.python.cores: Core dump dir doesn't exist: /coredumps 2025-05-07 08:17:33,974 WARNING library.python.cores: Core dump dir doesn't exist: /var/tmp/cores |77.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/test_helper/columnshard_ut_common.cpp |77.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tests/fq/pq_async_io/ut_helpers.cpp |77.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/test_helper/columnshard_ut_common.cpp |77.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/tests/fq/pq_async_io/libtests-fq-pq_async_io.a |77.6%| [AR] {RESULT} $(B)/ydb/tests/fq/pq_async_io/libtests-fq-pq_async_io.a |77.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/fq/pq_async_io/ut_helpers.cpp |77.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/tests/fq/pq_async_io/libtests-fq-pq_async_io.a |77.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/node_broker.cpp |77.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/node_broker.cpp |77.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/export_scan.cpp |77.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/export_scan.cpp |77.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_anubis.cpp |77.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_anubis.cpp |77.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hullop/blobstorage_hulllog.cpp |77.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/provider/yql_kikimr_type_ann_pg.cpp |77.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/provider/yql_kikimr_type_ann_pg.cpp |77.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hullop/blobstorage_hulllog.cpp |77.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/hullop/libblobstorage-vdisk-hullop.a |77.6%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hullop/libblobstorage-vdisk-hullop.a |77.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/hullop/libblobstorage-vdisk-hullop.a |77.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_utils/cli_fakeinitshard.cpp |77.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclog.cpp |77.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_fakeinitshard.cpp |77.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclog.cpp |77.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/synclog/libblobstorage-vdisk-synclog.a |77.6%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/synclog/libblobstorage-vdisk-synclog.a |77.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/local_pgwire/local_pgwire_auth_actor.cpp |77.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/local_pgwire/local_pgwire_auth_actor.cpp |77.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/synclog/libblobstorage-vdisk-synclog.a |77.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/barriers/barriers_chain.cpp |77.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/barriers/barriers_chain.cpp |77.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_anubisrunner.cpp |77.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/local_pgwire/pgwire_kqp_proxy.cpp |77.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_anubisrunner.cpp |77.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/local_pgwire/pgwire_kqp_proxy.cpp |77.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/external_data_source/behaviour.cpp |77.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/external_data_source/behaviour.cpp |77.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/gateway/behaviour/external_data_source/libgateway-behaviour-external_data_source.global.a |77.6%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/behaviour/external_data_source/libgateway-behaviour-external_data_source.global.a |77.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/gateway/behaviour/external_data_source/libgateway-behaviour-external_data_source.global.a |77.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/node_broker__register_node.cpp |77.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/node_broker__register_node.cpp |77.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/coordinator/coordinator__restore_params.cpp |77.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/coordinator/coordinator__restore_params.cpp |77.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_console.cpp |77.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_console.cpp |77.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/provider/yql_kikimr_results.cpp |77.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/provider/yql_kikimr_results.cpp |77.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/barriers/barriers_tree.cpp |77.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/barriers/barriers_tree.cpp |77.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/dynamic_nameserver_mon.cpp |77.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/dynamic_nameserver_mon.cpp |77.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/local_pgwire/local_pgwire_connection.cpp |77.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/local_pgwire/local_pgwire_connection.cpp |77.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/query_compiler/kqp_olap_compiler.cpp |77.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/query_compiler/kqp_olap_compiler.cpp |77.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_admin.cpp |77.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_admin.cpp |77.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/local_pgwire/local_pgwire.cpp |77.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/local_pgwire/local_pgwire.cpp |77.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/local_pgwire/libydb-core-local_pgwire.a |77.7%| [AR] {RESULT} $(B)/ydb/core/local_pgwire/libydb-core-local_pgwire.a |77.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/local_pgwire/libydb-core-local_pgwire.a |77.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/abstract.cpp |77.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/abstract.cpp |77.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_utils/cli_persqueue_cluster_discovery.cpp |77.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_persqueue_cluster_discovery.cpp |77.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/blobstorage_hullsatisfactionrank.cpp |77.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/blobstorage_hullsatisfactionrank.cpp |77.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/store_distributed_erase_tx_unit.cpp |77.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/store_distributed_erase_tx_unit.cpp |77.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/coordinator/coordinator__monitoring.cpp |77.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/coordinator/coordinator__monitoring.cpp |77.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/node_service/kqp_node_service.cpp |77.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/node_service/kqp_node_service.cpp |77.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/node_service/libcore-kqp-node_service.a |77.7%| [AR] {RESULT} $(B)/ydb/core/kqp/node_service/libcore-kqp-node_service.a |77.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/node_service/libcore-kqp-node_service.a |77.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/dynamic_nameserver.cpp |77.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/dynamic_nameserver.cpp |77.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/provider/yql_kikimr_gateway.cpp |77.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/provider/yql_kikimr_gateway.cpp |77.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_server.cpp |77.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_server.cpp |77.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_response.cpp |77.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_response.cpp |77.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/drop_table_unit.cpp |77.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/drop_table_unit.cpp |77.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/provider/yql_kikimr_exec.cpp |77.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/provider/yql_kikimr_exec.cpp |77.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/balance/balancing_actor.cpp |77.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/balance/balancing_actor.cpp |77.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/balance/deleter.cpp |77.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/balance/deleter.cpp |77.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/provider/yql_kikimr_datasource.cpp |77.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/provider/yql_kikimr_datasource.cpp |77.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/context.cpp |77.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/context.cpp |77.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/actors/commit_offset_actor.cpp |77.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/actors/commit_offset_actor.cpp |77.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/provider/yql_kikimr_expr_nodes.cpp |77.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/provider/yql_kikimr_expr_nodes.cpp |77.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/coordinator/coordinator__last_step_subscriptions.cpp |77.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/coordinator/coordinator__last_step_subscriptions.cpp |77.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/drop_persistent_snapshot_unit.cpp |77.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/drop_persistent_snapshot_unit.cpp |77.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/provider/yql_kikimr_type_ann.cpp |77.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/provider/yql_kikimr_type_ann.cpp |77.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_recoverylogwriter.cpp |77.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_recoverylogwriter.cpp |77.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/coordinator/coordinator__restore_transaction.cpp |77.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/coordinator/coordinator__restore_transaction.cpp |77.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/provider/yql_kikimr_provider.cpp |77.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/provider/yql_kikimr_provider.cpp |77.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_ranges.cpp |77.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_ranges.cpp |77.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/ds_table/scheme_describe.cpp |77.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/ds_table/scheme_describe.cpp |77.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/actors/schema_actors.cpp |77.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/actors/schema_actors.cpp |77.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_utils/cli.cpp |77.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__progress_resend_rs.cpp |77.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli.cpp |77.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__progress_resend_rs.cpp |77.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/normalizer/insert_table/broken_dedup.cpp |77.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/host/kqp_transform.cpp |77.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/insert_table/broken_dedup.cpp |77.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/host/kqp_transform.cpp |77.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/normalizer/insert_table/libcolumnshard-normalizer-insert_table.global.a |77.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/normalizer/insert_table/libcolumnshard-normalizer-insert_table.global.a |77.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/normalizer/insert_table/libcolumnshard-normalizer-insert_table.global.a |77.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/zero_level.cpp |77.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/zero_level.cpp |77.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/provider/yql_kikimr_opt.cpp |77.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/liboptimizer-lcbuckets-constructor.global.a |77.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/provider/yql_kikimr_opt.cpp |77.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/liboptimizer-lcbuckets-constructor.global.a |77.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/liboptimizer-lcbuckets-constructor.global.a |77.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/coordinator/coordinator__acquire_read_step.cpp |77.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/coordinator/coordinator__acquire_read_step.cpp |77.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/query_compiler/kqp_mkql_compiler.cpp |77.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/query_compiler/kqp_mkql_compiler.cpp |77.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/abstract/kqp_common.cpp |77.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/abstract/kqp_common.cpp |77.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/barriers/barriers_essence.cpp |77.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/barriers/barriers_essence.cpp |77.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/hulldb/barriers/libvdisk-hulldb-barriers.a |77.9%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/barriers/libvdisk-hulldb-barriers.a |77.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/build_data_tx_out_rs_unit.cpp |77.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/barriers/libvdisk-hulldb-barriers.a |77.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/build_data_tx_out_rs_unit.cpp |77.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/lease_holder.cpp |77.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/lease_holder.cpp |77.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/mind/libydb-core-mind.a |77.9%| [AR] {RESULT} $(B)/ydb/core/mind/libydb-core-mind.a |77.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/mind/libydb-core-mind.a |77.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/export/session/session.cpp |77.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/export/session/session.cpp |77.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/query_compiler/kqp_query_compiler.cpp |77.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/query_compiler/kqp_query_compiler.cpp |77.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/query_compiler/libcore-kqp-query_compiler.a |77.9%| [AR] {RESULT} $(B)/ydb/core/kqp/query_compiler/libcore-kqp-query_compiler.a |77.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/query_compiler/libcore-kqp-query_compiler.a |77.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/meta.cpp |77.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/meta.cpp |77.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_utils/cli_scheme_initroot.cpp |77.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_scheme_initroot.cpp |77.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/logical/kqp_opt_cbo.cpp |77.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/logical/kqp_opt_cbo.cpp |77.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/filler.cpp |77.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/filler.cpp |77.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_utils/cli_actorsystem_perftest.cpp |77.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_actorsystem_perftest.cpp |77.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_cms.cpp |77.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_cms.cpp |77.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/kqp_opt_phase.cpp |77.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/kqp_opt_phase.cpp |77.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/coordinator/coordinator__stop_guard.cpp |77.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/coordinator/coordinator__stop_guard.cpp |77.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/hullbase_barrier.cpp |77.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/hullbase_barrier.cpp |77.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/hulldb/base/libvdisk-hulldb-base.a |77.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/coordinator/coordinator__read_step_subscriptions.cpp |77.9%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/base/libvdisk-hulldb-base.a |78.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/coordinator/coordinator__read_step_subscriptions.cpp |78.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/base/libvdisk-hulldb-base.a |78.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/host/kqp_translate.cpp |78.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/host/kqp_translate.cpp |78.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/table/behaviour.cpp |78.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/table/behaviour.cpp |78.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/gateway/behaviour/table/libgateway-behaviour-table.global.a |78.0%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/behaviour/table/libgateway-behaviour-table.global.a |78.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/coordinator/coordinator.cpp |78.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/gateway/behaviour/table/libgateway-behaviour-table.global.a |78.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/coordinator/coordinator.cpp |78.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/host/kqp_statement_rewrite.cpp |78.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/host/kqp_statement_rewrite.cpp |78.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/kqp_constant_folding_transformer.cpp |78.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/kqp_constant_folding_transformer.cpp |78.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/host/kqp_runner.cpp |78.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/host/kqp_runner.cpp |78.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/balance/sender.cpp |78.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/balance/sender.cpp |78.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/host/kqp_explain_prepared.cpp |78.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/host/kqp_explain_prepared.cpp |78.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/export/session/task.cpp |78.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/export/session/task.cpp |78.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/kqp_query_blocks_transformer.cpp |78.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/kqp_query_blocks_transformer.cpp |78.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/kqp_statistics_transformer.cpp |78.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/kqp_statistics_transformer.cpp |78.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/secret/initializer.cpp |78.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/secret/initializer.cpp |78.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/http/http.cpp |78.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/http/http.cpp |78.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/ymq/http/libcore-ymq-http.a |78.0%| [AR] {RESULT} $(B)/ydb/core/ymq/http/libcore-ymq-http.a |78.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/ymq/http/libcore-ymq-http.a |78.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_persqueue.cpp |78.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_persqueue.cpp |78.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/actors/run_actor.cpp |78.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/actors/run_actor.cpp |78.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/actors/libfq-libs-actors.a |78.0%| [AR] {RESULT} $(B)/ydb/core/fq/libs/actors/libfq-libs-actors.a |78.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/fq/libs/actors/libfq-libs-actors.a |78.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_sqlin.cpp |78.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_sqlin.cpp |78.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/standalone/object.cpp |78.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/standalone/object.cpp |78.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/kqp_opt_phy_finalize.cpp |78.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/kqp_opt_phy_finalize.cpp |78.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ydb_convert/table_profiles.cpp |78.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ydb_convert/table_profiles.cpp |78.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/export/session/control.cpp |78.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_extract.cpp |78.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/export/session/control.cpp |78.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_extract.cpp |78.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/export/session/libcolumnshard-export-session.global.a |78.1%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/export/session/libcolumnshard-export-session.global.a |78.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/kqp_metadata_loader.cpp |78.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhuge.cpp |78.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/export/session/libcolumnshard-export-session.global.a |78.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/iterator.cpp |78.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/kqp_metadata_loader.cpp |78.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhuge.cpp |78.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/iterator.cpp |78.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/actorlib_impl/send_data_protocol.cpp |78.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/actorlib_impl/send_data_protocol.cpp |78.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_tools.cpp |78.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/operations/create_store.cpp |78.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_tools.cpp |78.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/create_store.cpp |78.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/operations/drop_table.cpp |78.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/drop_table.cpp |78.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/kqp_opt_build_txs.cpp |78.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/kqp_opt_build_txs.cpp |78.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/coordinator/mediator_queue.cpp |78.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/coordinator/mediator_queue.cpp |78.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_indexes.cpp |78.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_indexes.cpp |78.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/local_rpc/helper.cpp |78.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/local_rpc/helper.cpp |78.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/gateway/local_rpc/libkqp-gateway-local_rpc.a |78.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/kqp_opt_phy_check.cpp |78.1%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/local_rpc/libkqp-gateway-local_rpc.a |78.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/gateway/local_rpc/libkqp-gateway-local_rpc.a |78.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/kqp_opt_phy_check.cpp |78.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_pq_read_session_info.cpp |78.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_pq_read_session_info.cpp |78.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/secret/secret.cpp |78.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/secret/secret.cpp |78.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp |78.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp |78.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ydb_convert/table_settings.cpp |78.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ydb_convert/table_settings.cpp |78.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/host/kqp_type_ann.cpp |78.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/host/kqp_type_ann.cpp |78.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/run/main.cpp |78.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/run/main.cpp |78.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/actors/partition_actor.cpp |78.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/actors/partition_actor.cpp |78.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/store/store.cpp |78.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/store/store.cpp |78.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/olap/store/libschemeshard-olap-store.a |78.2%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/store/libschemeshard-olap-store.a |78.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/store/libschemeshard-olap-store.a |78.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_nodemon.cpp |78.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/run/run.cpp |78.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_nodemon.cpp |78.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/run/run.cpp |78.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_driveestimator.cpp |78.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_driveestimator.cpp |78.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/provider/yql_kikimr_datasink.cpp |78.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/provider/yql_kikimr_datasink.cpp |78.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_tablet.cpp |78.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_tablet.cpp |78.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/meta.cpp |78.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/libstorage-indexes-bloom_ngramm.global.a |78.2%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/libstorage-indexes-bloom_ngramm.global.a |78.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/meta.cpp |78.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/libstorage-indexes-bloom_ngramm.global.a |78.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_join.cpp |78.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_join.cpp |78.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/coordinator/coordinator__plan_step.cpp |78.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/coordinator/coordinator__plan_step.cpp |78.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/operations/create_table.cpp |78.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/create_table.cpp |78.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/source.cpp |78.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/provider/rewrite_io_utils.cpp |78.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/source.cpp |78.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_effects.cpp |78.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/provider/rewrite_io_utils.cpp |78.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_effects.cpp |78.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_anubis_osiris.cpp |78.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/kqp_opt_kql.cpp |78.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_anubis_osiris.cpp |78.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/kqp_opt_kql.cpp |78.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhugerecovery.cpp |78.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/kqp_column_statistics_requester.cpp |78.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhugerecovery.cpp |78.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/kqp_column_statistics_requester.cpp |78.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/huge/libblobstorage-vdisk-huge.a |78.2%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/huge/libblobstorage-vdisk-huge.a |78.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/huge/libblobstorage-vdisk-huge.a |78.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/ttl/validator.cpp |78.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/ttl/validator.cpp |78.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/olap/ttl/libschemeshard-olap-ttl.a |78.3%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/ttl/libschemeshard-olap-ttl.a |78.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/ttl/libschemeshard-olap-ttl.a |78.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/kqp_opt.cpp |78.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/kqp_opt.cpp |78.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/build_index/kmeans_helper.cpp |78.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/build_index/kmeans_helper.cpp |78.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ydb_convert/table_description.cpp |78.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ydb_convert/table_description.cpp |78.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/kqp_opt_build_phy_query.cpp |78.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/kqp_opt_build_phy_query.cpp |78.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/secret/secret_behaviour.cpp |78.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/secret/secret_behaviour.cpp |78.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/actors/update_offsets_in_transaction_actor.cpp |78.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/provider/yql_kikimr_opt_build.cpp |78.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/actors/update_offsets_in_transaction_actor.cpp |78.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/provider/yql_kikimr_opt_build.cpp |78.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/coordinator/coordinator_state.cpp |78.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/coordinator/coordinator_state.cpp |78.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_sort.cpp |78.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_sort.cpp |78.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/coordinator/coordinator_impl.cpp |78.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/secret/snapshot.cpp |78.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/coordinator/coordinator_impl.cpp |78.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/secret/snapshot.cpp |78.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/transfer/update.cpp |78.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/transfer/update.cpp |78.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/transfer/libalter-in_store-transfer.a |78.3%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/transfer/libalter-in_store-transfer.a |78.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/transfer/libalter-in_store-transfer.a |78.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/secret/manager.cpp |78.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/kafka_transactional_producers_initializers.cpp |78.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/actors/kafka_offset_fetch_actor.cpp |78.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/kafka_transactional_producers_initializers.cpp |78.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/secret/manager.cpp |78.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/actors/kafka_offset_fetch_actor.cpp |78.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/scheme_cache/scheme_cache.cpp |78.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/scheme_cache/scheme_cache.cpp |78.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/coordinator/coordinator__check.cpp |78.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/scheme_cache/libcore-tx-scheme_cache.a |78.3%| [AR] {RESULT} $(B)/ydb/core/tx/scheme_cache/libcore-tx-scheme_cache.a |78.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/coordinator/coordinator__check.cpp |78.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/scheme_cache/libcore-tx-scheme_cache.a |78.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/actors/distributed_commit_helper.cpp |78.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/actors/distributed_commit_helper.cpp |78.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ext_index/common/config.cpp |78.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ext_index/common/config.cpp |78.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/ext_index/common/libservices-ext_index-common.a |78.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/actors/kafka_fetch_actor.cpp |78.3%| [AR] {RESULT} $(B)/ydb/services/ext_index/common/libservices-ext_index-common.a |78.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/ext_index/common/libservices-ext_index-common.a |78.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/actors/kafka_fetch_actor.cpp |78.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/actors/kafka_balance_actor_sql.cpp |78.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/actors/kafka_balance_actor_sql.cpp |78.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool/behaviour.cpp |78.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool/behaviour.cpp |78.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool/libgateway-behaviour-resource_pool.global.a |78.4%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool/libgateway-behaviour-resource_pool.global.a |78.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool/libgateway-behaviour-resource_pool.global.a |78.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/cleanup_portions.cpp |78.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/cleanup_portions.cpp |78.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/portions/portions.cpp |78.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/portions/portions.cpp |78.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/portions/libreader-sys_view-portions.global.a |78.4%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/portions/libreader-sys_view-portions.global.a |78.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/portions/libreader-sys_view-portions.global.a |78.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_sqlin_compact.cpp |78.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_sqlin_compact.cpp |78.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tracing/tablet_info.cpp |78.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tracing/tablet_info.cpp |78.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tracing/libydb-core-tracing.a |78.4%| [AR] {RESULT} $(B)/ydb/core/tracing/libydb-core-tracing.a |78.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tracing/libydb-core-tracing.a |78.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/actors/kafka_topic_offsets_actor.cpp |78.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/actors/kafka_topic_offsets_actor.cpp |78.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/secret/access_behaviour.cpp |78.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/secret/access_behaviour.cpp |78.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/metadata/secret/libservices-metadata-secret.global.a |78.4%| [AR] {RESULT} $(B)/ydb/services/metadata/secret/libservices-metadata-secret.global.a |78.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/actors/read_session_actor.cpp |78.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/metadata/secret/libservices-metadata-secret.global.a |78.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/actors/read_session_actor.cpp |78.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/ttl.cpp |78.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/ttl.cpp |78.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/operations/drop_store.cpp |78.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/drop_store.cpp |78.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_config.cpp |78.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/cli_utils/cli_cmds_config.cpp |78.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/const.cpp |78.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/const.cpp |78.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/libstorage-indexes-bloom_ngramm.a |78.4%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/libstorage-indexes-bloom_ngramm.a |78.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/libstorage-indexes-bloom_ngramm.a |78.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/driver_lib/cli_utils/libcli_utils.a |78.4%| [AR] {RESULT} $(B)/ydb/core/driver_lib/cli_utils/libcli_utils.a |78.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/driver_lib/cli_utils/libcli_utils.a |78.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/sharding/hash_modulo.cpp |78.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/actors/write_session_actor.cpp |78.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/sharding/hash_modulo.cpp |78.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/host/kqp_host.cpp |78.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/actors/write_session_actor.cpp |78.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/host/kqp_host.cpp |78.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/actors/kafka_read_session_actor.cpp |78.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/actors/kafka_read_session_actor.cpp |78.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/kqp_gateway.cpp |78.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/kqp_gateway.cpp |78.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/schema/update.cpp |78.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/actors/kafka_create_topics_actor.cpp |78.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/schema/libalter-in_store-schema.a |78.5%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/schema/libalter-in_store-schema.a |78.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/schema/update.cpp |78.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/actors/kafka_create_topics_actor.cpp |78.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/schema/libalter-in_store-schema.a |78.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tiering/tier/object.cpp |78.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tiering/tier/object.cpp |78.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/tiering/tier/libtx-tiering-tier.a |78.5%| [AR] {RESULT} $(B)/ydb/core/tx/tiering/tier/libtx-tiering-tier.a |78.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/operations/alter_store.cpp |78.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/tiering/tier/libtx-tiering-tier.a |78.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/alter_store.cpp |78.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/olap/operations/libschemeshard-olap-operations.a |78.5%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/operations/libschemeshard-olap-operations.a |78.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/libschemeshard-olap-operations.a |78.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl_http.cpp |78.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl_http.cpp |78.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/host/kqp_gateway_proxy.cpp |78.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/host/kqp_gateway_proxy.cpp |78.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/host/libcore-kqp-host.a |78.5%| [AR] {RESULT} $(B)/ydb/core/kqp/host/libcore-kqp-host.a |78.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/host/libcore-kqp-host.a |78.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/data_events/shard_writer.cpp |78.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/data_events/shard_writer.cpp |78.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/kafka_consumer_groups_metadata_initializers.cpp |78.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_anubisproxy.cpp |78.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/kafka_consumer_groups_metadata_initializers.cpp |78.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_anubisproxy.cpp |78.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_scheme_request.cpp |78.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_scheme_request.cpp |78.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/run/kikimr_services_initializers.cpp |78.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/run/kikimr_services_initializers.cpp |78.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/actors/kafka_describe_configs_actor.cpp |78.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/actors/kafka_offset_commit_actor.cpp |78.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/actors/kafka_describe_configs_actor.cpp |78.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/actors/kafka_offset_commit_actor.cpp |78.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/kafka_consumer_members_metadata_initializers.cpp |78.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/kafka_consumer_members_metadata_initializers.cpp |78.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/kqp_opt_effects.cpp |78.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/kqp_opt_effects.cpp |78.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/sharding/random.cpp |78.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/sharding/random.cpp |78.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/tiering/counters.cpp |78.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/tiering/counters.cpp |78.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/tiering/libstorage-actualizer-tiering.a |78.6%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/tiering/libstorage-actualizer-tiering.a |78.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/tiering/libstorage-actualizer-tiering.a |78.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_helpers.cpp |78.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_helpers.cpp |78.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/kafka_connection.cpp |78.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/kafka_connection.cpp |78.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_ranges_predext.cpp |78.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log_ranges_predext.cpp |78.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/initializer/accessor_init.cpp |78.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/initializer/accessor_init.cpp |78.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/actors/kafka_metadata_actor.cpp |78.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/actors/kafka_metadata_actor.cpp |78.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_tx_request.cpp |78.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_tx_request.cpp |78.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/engine/minikql/flat_local_tx_factory.cpp |78.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/engine/minikql/flat_local_tx_factory.cpp |78.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/engine/minikql/libcore-engine-minikql.a |78.6%| [AR] {RESULT} $(B)/ydb/core/engine/minikql/libcore-engine-minikql.a |78.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/behaviour.cpp |78.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/engine/minikql/libcore-engine-minikql.a |78.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/behaviour.cpp |78.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/libgateway-behaviour-resource_pool_classifier.global.a |78.6%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/libgateway-behaviour-resource_pool_classifier.global.a |78.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/compaction.cpp |78.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/libgateway-behaviour-resource_pool_classifier.global.a |78.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/compaction.cpp |78.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log.cpp |78.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/logical/kqp_opt_log.cpp |78.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/opt/logical/libkqp-opt-logical.a |78.6%| [AR] {RESULT} $(B)/ydb/core/kqp/opt/logical/libkqp-opt-logical.a |78.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_osiris.cpp |78.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_osiris.cpp |78.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/opt/logical/libkqp-opt-logical.a |78.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/skip_index/constructor.cpp |78.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/skip_index/constructor.cpp |78.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/skip_index/libstorage-indexes-skip_index.a |78.6%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/skip_index/libstorage-indexes-skip_index.a |78.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/skip_index/libstorage-indexes-skip_index.a |78.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/kqp_ic_gateway.cpp |78.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/actors/kafka_init_producer_id_actor.cpp |78.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/actors/kafka_init_producer_id_actor.cpp |78.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/kqp_ic_gateway.cpp |78.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/gateway/libcore-kqp-gateway.a |78.7%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/libcore-kqp-gateway.a |78.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/actorlib_impl/connect_socket_protocol.cpp |78.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/gateway/libcore-kqp-gateway.a |78.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/actorlib_impl/connect_socket_protocol.cpp |78.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/actorlib_impl/libydb-core-actorlib_impl.a |78.7%| [AR] {RESULT} $(B)/ydb/core/actorlib_impl/libydb-core-actorlib_impl.a |78.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/actorlib_impl/libydb-core-actorlib_impl.a |78.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/sharding/hash_intervals.cpp |78.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/sharding/hash_intervals.cpp |78.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/sharding/libcore-tx-sharding.global.a |78.7%| [AR] {RESULT} $(B)/ydb/core/tx/sharding/libcore-tx-sharding.global.a |78.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/sharding/libcore-tx-sharding.global.a |78.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_request.cpp |78.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_request.cpp |78.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/with_appended.cpp |78.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ydb_convert/tx_proxy_status.cpp |78.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/with_appended.cpp |78.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ydb_convert/tx_proxy_status.cpp |78.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/view/behaviour.cpp |78.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/view/behaviour.cpp |78.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/gateway/behaviour/view/libgateway-behaviour-view.global.a |78.7%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/behaviour/view/libgateway-behaviour-view.global.a |78.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/gateway/behaviour/view/libgateway-behaviour-view.global.a |78.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/policy.cpp |78.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/policy.cpp |78.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/actors/kafka_sasl_auth_actor.cpp |78.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/actors/kafka_sasl_auth_actor.cpp |78.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/coordinator/coordinator__configure.cpp |78.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/coordinator/coordinator__configure.cpp |78.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/tx_alter_dst_result.cpp |78.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/tx_alter_dst_result.cpp |78.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/schema/update.cpp |78.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/schema/update.cpp |78.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/run/factories.cpp |78.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/column_engine.cpp |78.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/run/factories.cpp |78.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/column_engine.cpp |78.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/initializer/initializer.cpp |78.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/initializer/initializer.cpp |78.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_scheme_initroot.cpp |78.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_scheme_initroot.cpp |78.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/service/service.cpp |78.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/service/service.cpp |78.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/interval.cpp |78.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/interval.cpp |78.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/sharding/hash_slider.cpp |78.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/sharding/hash_slider.cpp |78.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/lib/actors/pq_schema_actor.cpp |78.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/db_wrapper.cpp |78.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/lib/actors/libservices-lib-actors.a |78.8%| [AR] {RESULT} $(B)/ydb/services/lib/actors/libservices-lib-actors.a |78.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/db_wrapper.cpp |78.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/lib/actors/pq_schema_actor.cpp |78.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/lib/actors/libservices-lib-actors.a |78.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/secret/checker_secret.cpp |78.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/secret/checker_secret.cpp |78.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/cleanup_tables.cpp |78.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/cleanup_tables.cpp |78.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/constructor.cpp |78.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/constructor.cpp |78.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/actors/kafka_alter_configs_actor.cpp |78.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/actors/kafka_alter_configs_actor.cpp |78.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/external_sources/object_storage.cpp |78.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/external_sources/object_storage.cpp |78.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/external_sources/libydb-core-external_sources.a |78.8%| [AR] {RESULT} $(B)/ydb/core/external_sources/libydb-core-external_sources.a |78.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/external_sources/libydb-core-external_sources.a |78.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/secret/access.cpp |78.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/secret/access.cpp |78.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/actors/kafka_transaction_actor.cpp |78.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/actors/kafka_transaction_actor.cpp |78.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/secret/checker_access.cpp |78.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/secret/checker_access.cpp |78.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/health_check/health_check.cpp |78.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/health_check/libydb-core-health_check.a |78.8%| [AR] {RESULT} $(B)/ydb/core/health_check/libydb-core-health_check.a |78.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/health_check/health_check.cpp |78.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/health_check/libydb-core-health_check.a |78.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/sharding/sharding.cpp |78.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ydb_convert/column_families.cpp |78.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/sharding/sharding.cpp |78.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ydb_convert/column_families.cpp |78.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/sharding/libcore-tx-sharding.a |78.8%| [AR] {RESULT} $(B)/ydb/core/tx/sharding/libcore-tx-sharding.a |78.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/sharding/libcore-tx-sharding.a |78.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/table/table.cpp |78.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/time_cast/time_cast.cpp |78.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/olap/table/libschemeshard-olap-table.a |78.8%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/table/libschemeshard-olap-table.a |78.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/table/table.cpp |78.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/time_cast/time_cast.cpp |78.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/time_cast/libcore-tx-time_cast.a |78.9%| [AR] {RESULT} $(B)/ydb/core/tx/time_cast/libcore-tx-time_cast.a |78.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/table/libschemeshard-olap-table.a |78.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/time_cast/libcore-tx-time_cast.a |78.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yql/providers/common/ut_helpers/dq_fake_ca.cpp |78.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yql/providers/common/ut_helpers/dq_fake_ca.cpp |78.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/common/ut_helpers/libproviders-common-ut_helpers.a |78.9%| [AR] {RESULT} $(B)/ydb/library/yql/providers/common/ut_helpers/libproviders-common-ut_helpers.a |78.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/actors/direct_read_actor.cpp |78.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/library/yql/providers/common/ut_helpers/libproviders-common-ut_helpers.a |78.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/actors/direct_read_actor.cpp |78.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/merge_subset.cpp |78.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/merge_subset.cpp |78.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/actors/kafka_list_offsets_actor.cpp |78.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/actors/kafka_list_offsets_actor.cpp |78.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_blockdevice_async.cpp |78.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_blockdevice_async.cpp |78.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/actors/kafka_balancer_actor.cpp |78.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/actors/kafka_balancer_actor.cpp |78.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/controller.cpp |78.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/controller.cpp |78.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_actor.cpp |78.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_actor.cpp |78.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/actualization/construction/context.cpp |78.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/actualization/construction/context.cpp |78.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/changes/actualization/construction/libchanges-actualization-construction.a |78.9%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/changes/actualization/construction/libchanges-actualization-construction.a |78.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/actualization/construction/libchanges-actualization-construction.a |78.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/indexation.cpp |78.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ydb_convert/topic_description.cpp |78.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/indexation.cpp |78.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ydb_convert/topic_description.cpp |78.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/ydb_convert/libydb-core-ydb_convert.a |78.9%| [AR] {RESULT} $(B)/ydb/core/ydb_convert/libydb-core-ydb_convert.a |78.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/ydb_convert/libydb-core-ydb_convert.a |78.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/secret/fetcher.cpp |78.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/secret/fetcher.cpp |78.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/metadata/secret/libservices-metadata-secret.a |78.9%| [AR] {RESULT} $(B)/ydb/services/metadata/secret/libservices-metadata-secret.a |78.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/metadata/secret/libservices-metadata-secret.a |78.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/actors/kafka_sasl_handshake_actor.cpp |78.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/actors/kafka_sasl_handshake_actor.cpp |78.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/standalone/update.cpp |79.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/standalone/update.cpp |78.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/standalone/liboperations-alter-standalone.a |79.0%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/standalone/liboperations-alter-standalone.a |79.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/standalone/liboperations-alter-standalone.a |79.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/options/update.cpp |79.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/options/update.cpp |79.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/initializer/object.cpp |79.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/initializer/object.cpp |79.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/discovery/discovery.cpp |79.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/discovery/discovery.cpp |79.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/discovery/libydb-core-discovery.a |79.0%| [AR] {RESULT} $(B)/ydb/core/discovery/libydb-core-discovery.a |79.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/discovery/libydb-core-discovery.a |79.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/schema/schema.cpp |79.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/schema/schema.cpp |79.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/olap/schema/libschemeshard-olap-schema.a |79.0%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/schema/libschemeshard-olap-schema.a |79.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/schema/libschemeshard-olap-schema.a |79.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_mon.cpp |79.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_mon.cpp |79.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/options/schema.cpp |79.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/options/schema.cpp |79.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/olap/options/libschemeshard-olap-options.a |79.0%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/options/libschemeshard-olap-options.a |79.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/options/libschemeshard-olap-options.a |79.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/tx_create_dst_result.cpp |79.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/tx_create_dst_result.cpp |79.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/general_compaction.cpp |79.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/general_compaction.cpp |79.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/max/constructor.cpp |79.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/changes/libcolumnshard-engines-changes.a |79.0%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/changes/libcolumnshard-engines-changes.a |79.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/max/constructor.cpp |79.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/max/libstorage-indexes-max.global.a |79.0%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/max/libstorage-indexes-max.global.a |79.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/libcolumnshard-engines-changes.a |79.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/max/libstorage-indexes-max.global.a |79.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_get_impl.cpp |79.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_get_impl.cpp |79.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/merger.cpp |79.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/compaction/merger.cpp |79.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/libengines-changes-compaction.a |79.0%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/libengines-changes-compaction.a |79.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/libengines-changes-compaction.a |79.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/net_classifier_updater.cpp |79.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/net_classifier_updater.cpp |79.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/base/appdata.cpp |79.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/base/appdata.cpp |79.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/initializer/fetcher.cpp |79.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/initializer/fetcher.cpp |79.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/initializer/snapshot.cpp |79.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/initializer/snapshot.cpp |79.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_types.cpp |79.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_types.cpp |79.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/tx_assign_tx_id.cpp |79.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/tx_assign_tx_id.cpp |79.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cms_tx_load_state.cpp |79.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tx_load_state.cpp |79.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/initializer/common.cpp |79.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/initializer/common.cpp |79.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/balance/handoff_map.cpp |79.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/balance/handoff_map.cpp |79.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/initializer/manager.cpp |79.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/initializer/manager.cpp |79.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/abstract/optimizer.cpp |79.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/abstract/optimizer.cpp |79.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/abstract/libstorage-optimizer-abstract.a |79.1%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/abstract/libstorage-optimizer-abstract.a |79.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/tx_alter_replication.cpp |79.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/abstract/libstorage-optimizer-abstract.a |79.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/tx_alter_replication.cpp |79.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/tx_resolve_secret_result.cpp |79.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/tx_resolve_secret_result.cpp |79.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/actors/kafka_create_partitions_actor.cpp |79.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/insert_table/rt_insertion.cpp |79.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/insert_table/rt_insertion.cpp |79.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/actors/kafka_create_partitions_actor.cpp |79.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/kqp_helper.cpp |79.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/kqp_helper.cpp |79.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_encrypt.cpp |79.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_encrypt.cpp |79.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/request/request_actor_cb.cpp |79.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/request/request_actor_cb.cpp |79.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/request/request_actor.cpp |79.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/column_engine_logs.cpp |79.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/request/request_actor.cpp |79.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/column_engine_logs.cpp |79.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/metadata/request/libservices-metadata-request.a |79.1%| [AR] {RESULT} $(B)/ydb/services/metadata/request/libservices-metadata-request.a |79.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/metadata/request/libservices-metadata-request.a |79.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_pq_metarequest.cpp |79.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_pq_metarequest.cpp |79.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/kafka_transactions_coordinator.cpp |79.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/kafka_transactions_coordinator.cpp |79.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kafka_proxy/libydb-core-kafka_proxy.a |79.1%| [AR] {RESULT} $(B)/ydb/core/kafka_proxy/libydb-core-kafka_proxy.a |79.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/actualization/controller/controller.cpp |79.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/actualization/controller/controller.cpp |79.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/changes/actualization/controller/libchanges-actualization-controller.a |79.2%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/changes/actualization/controller/libchanges-actualization-controller.a |79.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/actualization/controller/libchanges-actualization-controller.a |79.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kafka_proxy/libydb-core-kafka_proxy.a |79.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_tablet_state.cpp |79.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_tablet_state.cpp |79.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/insert_table/path_info.cpp |79.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/insert_table/path_info.cpp |79.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/workload_service/tables/table_queries.cpp |79.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/workload_service/tables/table_queries.cpp |79.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/workload_service/tables/libkqp-workload_service-tables.a |79.2%| [AR] {RESULT} $(B)/ydb/core/kqp/workload_service/tables/libkqp-workload_service-tables.a |79.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/workload_service/tables/libkqp-workload_service-tables.a |79.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/run/service_initializer.cpp |79.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/run/service_initializer.cpp |79.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/driver_lib/run/librun.a |79.2%| [AR] {RESULT} $(B)/ydb/core/driver_lib/run/librun.a |79.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_proxy.cpp |79.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_proxy.cpp |79.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/driver_lib/run/librun.a |79.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/insert_table/insert_table.cpp |79.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/insert_table/insert_table.cpp |79.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/logging.cpp |79.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/insert_table/libcolumnshard-engines-insert_table.a |79.2%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/insert_table/libcolumnshard-engines-insert_table.a |79.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/logging.cpp |79.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/insert_table/libcolumnshard-engines-insert_table.a |79.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/http_proxy/auth_factory.cpp |79.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/http_proxy/auth_factory.cpp |79.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cms_tx_update_downtimes.cpp |79.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/balance/utils.cpp |79.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tx_update_downtimes.cpp |79.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/balance/utils.cpp |79.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/balance/libblobstorage-vdisk-balance.a |79.2%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/balance/libblobstorage-vdisk-balance.a |79.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/balance/libblobstorage-vdisk-balance.a |79.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/tx_create_replication.cpp |79.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/tx_create_replication.cpp |79.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_events.cpp |79.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_events.cpp |79.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/logger.cpp |79.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/logger.cpp |79.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/tx_describe_replication.cpp |79.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/tx_describe_replication.cpp |79.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/opt/kqp_query_plan.cpp |79.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/opt/kqp_query_plan.cpp |79.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/tx_discovery_targets_result.cpp |79.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/tx_discovery_targets_result.cpp |79.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/constructor.cpp |79.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/constructor.cpp |79.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_test_shard_request.cpp |79.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_test_shard_request.cpp |79.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/tx_init_schema.cpp |79.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/tx_init_schema.cpp |79.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/initializer/behaviour.cpp |79.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/initializer/behaviour.cpp |79.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/metadata/initializer/libservices-metadata-initializer.a |79.3%| [AR] {RESULT} $(B)/ydb/services/metadata/initializer/libservices-metadata-initializer.a |79.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/metadata/initializer/libservices-metadata-initializer.a |79.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/util/actorsys_test/testactorsys.cpp |79.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/util/actorsys_test/testactorsys.cpp |79.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/secret_resolver.cpp |79.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/secret_resolver.cpp |79.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/context.cpp |79.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/context.cpp |79.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/tx_drop_stream_result.cpp |79.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/tx_drop_stream_result.cpp |79.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/mediator/mediator_impl.cpp |79.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/mediator/mediator_impl.cpp |79.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/fetching.cpp |79.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/fetching.cpp |79.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/actors/read_init_auth_actor.cpp |79.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/actors/read_init_auth_actor.cpp |79.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/config_helpers.cpp |79.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/tx_assign_stream_name.cpp |79.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/config_helpers.cpp |79.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/tx_assign_stream_name.cpp |79.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/persqueue_v1/actors/libservices-persqueue_v1-actors.a |79.3%| [AR] {RESULT} $(B)/ydb/services/persqueue_v1/actors/libservices-persqueue_v1-actors.a |79.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/persqueue_v1/actors/libservices-persqueue_v1-actors.a |79.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/util/actorsys_test/single_thread_ic_mock.cpp |79.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/util/actorsys_test/single_thread_ic_mock.cpp |79.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/util/actorsys_test/libcore-util-actorsys_test.a |79.3%| [AR] {RESULT} $(B)/ydb/core/util/actorsys_test/libcore-util-actorsys_test.a |79.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/meta.cpp |79.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/util/actorsys_test/libcore-util-actorsys_test.a |79.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/meta.cpp |79.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/libstorage-indexes-categories_bloom.global.a |79.3%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/libstorage-indexes-categories_bloom.global.a |79.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/libstorage-indexes-categories_bloom.global.a |79.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/abstract/changes.cpp |79.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/abstract/changes.cpp |79.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/tx_heartbeat.cpp |79.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/tx_heartbeat.cpp |79.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/tx_create_stream_result.cpp |79.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/tx_create_stream_result.cpp |79.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__remove_tenant.cpp |79.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/query_actor/query_actor.cpp |79.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__remove_tenant.cpp |79.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/query_actor/query_actor.cpp |79.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/library/query_actor/libydb-library-query_actor.a |79.4%| [AR] {RESULT} $(B)/ydb/library/query_actor/libydb-library-query_actor.a |79.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/library/query_actor/libydb-library-query_actor.a |79.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/info_collector.cpp |79.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/planner/optimizer.cpp |79.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_sharing/source/transactions/tx_start_to_source.cpp |79.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/info_collector.cpp |79.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/planner/liboptimizer-lbuckets-planner.global.a |79.4%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/planner/liboptimizer-lbuckets-planner.global.a |79.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/source/transactions/tx_start_to_source.cpp |79.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/planner/liboptimizer-lbuckets-planner.global.a |79.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/planner/optimizer.cpp |79.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_sharing/source/session/source.cpp |79.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/source/session/source.cpp |79.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ext_index/service/executor.cpp |79.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ext_index/service/executor.cpp |79.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__toggle_config_validator.cpp |79.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__toggle_config_validator.cpp |79.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/control/immediate_control_board_actor.cpp |79.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/control/immediate_control_board_actor.cpp |79.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/control/libydb-core-control.a |79.4%| [AR] {RESULT} $(B)/ydb/core/control/libydb-core-control.a |79.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/control/libydb-core-control.a |79.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_sharing/source/session/cursor.cpp |79.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/source/session/cursor.cpp |79.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/data_sharing/source/session/libdata_sharing-source-session.a |79.4%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_sharing/source/session/libdata_sharing-source-session.a |79.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_impl.cpp |79.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/source/session/libdata_sharing-source-session.a |79.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_impl.cpp |79.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_sharing/source/transactions/tx_finish_ack_to_source.cpp |79.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/source/transactions/tx_finish_ack_to_source.cpp |79.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kesus/proxy/proxy.cpp |79.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_patch.cpp |79.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/proxy/proxy.cpp |79.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kesus/proxy/libcore-kesus-proxy.a |79.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_patch.cpp |79.4%| [AR] {RESULT} $(B)/ydb/core/kesus/proxy/libcore-kesus-proxy.a |79.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kesus/proxy/libcore-kesus-proxy.a |79.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_sharing/source/transactions/tx_data_ack_to_source.cpp |79.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/source/transactions/tx_data_ack_to_source.cpp |79.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/abstract/move_portions.cpp |79.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/abstract/move_portions.cpp |79.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cms_tx_store_walle_task.cpp |79.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tx_store_walle_task.cpp |79.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/abstract/remove_portions.cpp |79.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/abstract/remove_portions.cpp |79.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/changes/abstract/abstract.cpp |79.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/changes/abstract/abstract.cpp |79.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/ds_table/accessor_subscribe.cpp |79.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/ds_table/accessor_subscribe.cpp |79.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_sharing/source/transactions/tx_write_source_cursor.cpp |79.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/source/transactions/tx_write_source_cursor.cpp |79.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console_configs_manager.cpp |79.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console_configs_manager.cpp |79.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_sharing/source/transactions/tx_start_source_cursor.cpp |79.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/source/transactions/tx_start_source_cursor.cpp |79.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/constructor/constructor.cpp |79.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/data_sharing/source/transactions/libdata_sharing-source-transactions.a |79.5%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_sharing/source/transactions/libdata_sharing-source-transactions.a |79.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/constructor/libreader-sys_view-constructor.a |79.5%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/constructor/libreader-sys_view-constructor.a |79.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/constructor/constructor.cpp |79.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/source/transactions/libdata_sharing-source-transactions.a |79.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/constructor/libreader-sys_view-constructor.a |79.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/tx_init.cpp |79.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/tx_init.cpp |79.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_anubisfinder.cpp |79.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_anubisfinder.cpp |79.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/anubis_osiris/libblobstorage-vdisk-anubis_osiris.a |79.5%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/anubis_osiris/libblobstorage-vdisk-anubis_osiris.a |79.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/anubis_osiris/libblobstorage-vdisk-anubis_osiris.a |79.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/test_helper/shard_writer.cpp |79.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/test_helper/shard_writer.cpp |79.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/test_helper/libtx-columnshard-test_helper.a |79.5%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/test_helper/libtx-columnshard-test_helper.a |79.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_status.cpp |79.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/test_helper/libtx-columnshard-test_helper.a |79.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_status.cpp |79.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/scheme_board/cache.cpp |79.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/scheme_board/cache.cpp |79.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_fill_node.cpp |79.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_fill_node.cpp |79.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ext_index/metadata/snapshot.cpp |79.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ext_index/metadata/snapshot.cpp |79.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/control_plane_storage/internal/task_get.cpp |79.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/internal/task_get.cpp |79.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/granule/portions_index.cpp |79.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/granule/portions_index.cpp |79.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/tx_drop_dst_result.cpp |79.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/tx_drop_dst_result.cpp |79.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/scheme/versions/abstract_scheme.cpp |79.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/versions/abstract_scheme.cpp |79.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/tx_drop_replication.cpp |79.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/tx_drop_replication.cpp |79.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__update_tenant_pool_config.cpp |79.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__update_tenant_pool_config.cpp |79.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/grpc_pq_schema.cpp |79.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/grpc_pq_schema.cpp |79.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ext_index/metadata/fetcher.cpp |79.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ext_index/metadata/fetcher.cpp |79.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/manager/manager.cpp |79.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/manager/manager.cpp |79.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/olap/manager/libschemeshard-olap-manager.a |79.6%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/manager/libschemeshard-olap-manager.a |79.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/transactions/transactions/tx_finish_async.cpp |79.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/ds_table/accessor_refresh.cpp |79.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/transactions/transactions/tx_finish_async.cpp |79.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/manager/libschemeshard-olap-manager.a |79.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/ds_table/accessor_refresh.cpp |79.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/granule/storage.cpp |79.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/granule/storage.cpp |79.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/transactions/operators/schema.cpp |79.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/transactions/operators/schema.cpp |79.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cms.cpp |79.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms.cpp |79.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cms_tx_log_and_send.cpp |79.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tx_log_and_send.cpp |79.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/transactions/locks_db.cpp |79.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/transactions/locks_db.cpp |79.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/scheme/versions/versioned_index.cpp |79.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/versions/versioned_index.cpp |79.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/scheme/versions/libengines-scheme-versions.a |79.6%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/scheme/versions/libengines-scheme-versions.a |79.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/versions/libengines-scheme-versions.a |79.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/control_plane_storage/internal/task_ping.cpp |79.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/internal/task_ping.cpp |79.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/granule/granule.cpp |79.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/granule/granule.cpp |79.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/optimizer/optimizer.cpp |79.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/optimizer/optimizer.cpp |79.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/optimizer/libreader-sys_view-optimizer.global.a |79.6%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/optimizer/libreader-sys_view-optimizer.global.a |79.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/optimizer/libreader-sys_view-optimizer.global.a |79.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/common/blobstorage_status.cpp |79.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/common/blobstorage_status.cpp |79.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/granule/stages.cpp |79.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/granule/stages.cpp |79.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/common/kqp_tx.cpp |79.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/common/kqp_tx.cpp |79.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/transactions/operators/ev_write/simple.cpp |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/transactions/operators/ev_write/simple.cpp |79.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__alter_tenant.cpp |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__alter_tenant.cpp |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/logger.cpp |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/logger.cpp |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/ds_table/accessor_snapshot_simple.cpp |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/ds_table/accessor_snapshot_simple.cpp |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ext_index/metadata/manager.cpp |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ext_index/metadata/manager.cpp |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/transaction/tx_internal_scan.cpp |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/immediate_controls_configurator.cpp |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/immediate_controls_configurator.cpp |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/transaction/tx_internal_scan.cpp |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/transactions/operators/backup.cpp |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/transactions/operators/backup.cpp |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/tablet/write_queue.cpp |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/tablet/write_queue.cpp |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/transactions/transactions/tx_add_sharding_info.cpp |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_get_block.cpp |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_get_block.cpp |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/transactions/transactions/tx_add_sharding_info.cpp |79.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/transactions/transactions/libcolumnshard-transactions-transactions.a |79.7%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/transactions/transactions/libcolumnshard-transactions-transactions.a |79.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/transactions/transactions/libcolumnshard-transactions-transactions.a |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/program/resolver.cpp |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/program/resolver.cpp |79.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/program/libcore-tx-program.a |79.7%| [AR] {RESULT} $(B)/ydb/core/tx/program/libcore-tx-program.a |79.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/program/libcore-tx-program.a |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/transactions/operators/ev_write/secondary.cpp |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/transactions/operators/ev_write/secondary.cpp |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/control_plane_storage/internal/nodes_health_check.cpp |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cms_tx_update_config.cpp |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tx_update_config.cpp |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/internal/nodes_health_check.cpp |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/transactions/tx_controller.cpp |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/transactions/tx_controller.cpp |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/service/base_table_writer.cpp |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/service/base_table_writer.cpp |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_discover_m3of4.cpp |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_discover_m3of4.cpp |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/services_initializer.cpp |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/services_initializer.cpp |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/common/update.cpp |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/common/update.cpp |79.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/common/libalter-in_store-common.a |79.7%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/common/libalter-in_store-common.a |79.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/common/libalter-in_store-common.a |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/transactions/operators/long_tx_write.cpp |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/transactions/operators/long_tx_write.cpp |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/transactions/operators/propose_tx.cpp |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ext_index/metadata/initializer.cpp |79.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/transactions/operators/libcolumnshard-transactions-operators.a |79.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ext_index/metadata/initializer.cpp |79.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/transactions/operators/libcolumnshard-transactions-operators.a |79.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/transactions/operators/propose_tx.cpp |79.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/transactions/operators/libcolumnshard-transactions-operators.a |79.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/transactions/operators/ev_write/abstract.cpp |79.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/transactions/operators/ev_write/abstract.cpp |79.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/transactions/operators/sharing.cpp |79.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/transactions/operators/sharing.cpp |79.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kesus/tablet/tx_session_timeout.cpp |79.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/transactions/operators/libcolumnshard-transactions-operators.global.a |79.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/transactions/operators/libcolumnshard-transactions-operators.global.a |79.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/tablet/tx_session_timeout.cpp |79.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/transactions/operators/libcolumnshard-transactions-operators.global.a |79.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ymq/ymq_proxy.cpp |79.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ymq/ymq_proxy.cpp |79.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ext_index/metadata/object.cpp |79.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ext_index/metadata/object.cpp |79.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/common/kqp_resolve.cpp |79.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/ext_index/metadata/libservices-ext_index-metadata.a |79.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/common/kqp_resolve.cpp |79.8%| [AR] {RESULT} $(B)/ydb/services/ext_index/metadata/libservices-ext_index-metadata.a |79.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/ext_index/metadata/libservices-ext_index-metadata.a |79.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console_tenants_manager.cpp |79.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console_tenants_manager.cpp |79.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/transactions/operators/ev_write/primary.cpp |79.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/transactions/operators/ev_write/primary.cpp |79.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/transactions/operators/ev_write/libtransactions-operators-ev_write.global.a |79.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/transactions/operators/ev_write/libtransactions-operators-ev_write.global.a |79.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/transactions/operators/ev_write/libtransactions-operators-ev_write.global.a |79.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_resolve_node.cpp |79.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_resolve_node.cpp |79.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/grpc_pq_write.cpp |79.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/grpc_pq_write.cpp |79.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/transaction/tx_scan.cpp |79.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/transaction/tx_scan.cpp |79.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/transactions/operators/ev_write/sync.cpp |79.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/transaction/libengines-reader-transaction.a |79.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/transaction/libengines-reader-transaction.a |79.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/transactions/operators/ev_write/sync.cpp |79.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/transaction/libengines-reader-transaction.a |79.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/transactions/operators/ev_write/libtransactions-operators-ev_write.a |79.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/transactions/operators/ev_write/libtransactions-operators-ev_write.a |79.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/transactions/operators/ev_write/libtransactions-operators-ev_write.a |79.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/optimizer.cpp |79.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/optimizer.cpp |79.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/liboptimizer-lcbuckets-planner.global.a |79.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/liboptimizer-lcbuckets-planner.global.a |79.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/liboptimizer-lcbuckets-planner.global.a |79.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/mediator/tablet_queue.cpp |79.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/mediator/tablet_queue.cpp |79.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/common/kqp_tx_manager.cpp |79.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/common/kqp_tx_manager.cpp |79.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console_handshake.cpp |79.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console_handshake.cpp |79.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/fetching.cpp |79.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/fetching.cpp |79.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_sharing/source/events/control.cpp |79.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/source/events/control.cpp |79.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/common/timeout.cpp |79.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/common/timeout.cpp |79.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cms_tx_log_cleanup.cpp |79.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tx_log_cleanup.cpp |79.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/control_plane_storage/internal/rate_limiter_resources.cpp |79.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/internal/rate_limiter_resources.cpp |79.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/http.cpp |79.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/common/ss_dialog.cpp |79.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/http.cpp |79.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/common/ss_dialog.cpp |79.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/metadata/common/libservices-metadata-common.a |79.9%| [AR] {RESULT} $(B)/ydb/services/metadata/common/libservices-metadata-common.a |79.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/metadata/common/libservices-metadata-common.a |79.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_accessor/manager.cpp |79.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_accessor/manager.cpp |79.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/tx_worker_error.cpp |79.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/tx_worker_error.cpp |79.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_state.cpp |79.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_state.cpp |79.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/tablet/ext_tx_base.cpp |79.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/tablet/ext_tx_base.cpp |79.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/tablet/libtx-columnshard-tablet.a |79.9%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/tablet/libtx-columnshard-tablet.a |79.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/tablet/libtx-columnshard-tablet.a |79.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console_configs_provider.cpp |79.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console_configs_provider.cpp |79.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ext_index/service/activation.cpp |79.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ext_index/service/activation.cpp |79.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_sharing/destination/transactions/tx_finish_from_source.cpp |79.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/destination/transactions/tx_finish_from_source.cpp |79.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/transactions/locks/dependencies.cpp |79.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/transactions/locks/dependencies.cpp |79.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/transactions/locks/libcolumnshard-transactions-locks.a |79.9%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/transactions/locks/libcolumnshard-transactions-locks.a |79.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kesus/tablet/tx_config_set.cpp |79.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/transactions/locks/libcolumnshard-transactions-locks.a |79.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/tablet/tx_config_set.cpp |79.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/persqueue.cpp |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/persqueue.cpp |80.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet/tablet_req_reset.cpp |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/tablet_req_reset.cpp |80.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_range.cpp |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_range.cpp |80.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/scheme_cache_lib/yql_db_scheme_resolver.cpp |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/scheme_cache_lib/yql_db_scheme_resolver.cpp |80.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/client/scheme_cache_lib/libcore-client-scheme_cache_lib.a |80.0%| [AR] {RESULT} $(B)/ydb/core/client/scheme_cache_lib/libcore-client-scheme_cache_lib.a |80.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/client/scheme_cache_lib/libcore-client-scheme_cache_lib.a |80.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/datastreams/datastreams_proxy.cpp |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/datastreams/datastreams_proxy.cpp |80.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/grpc_pq_read.cpp |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/grpc_pq_read.cpp |80.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_sharing/destination/transactions/tx_finish_ack_from_initiator.cpp |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/destination/transactions/tx_finish_ack_from_initiator.cpp |80.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/operations/batch_builder/builder.cpp |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/operations/batch_builder/builder.cpp |80.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/api_adapters.cpp |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/api_adapters.cpp |80.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/storages_manager.cpp |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/storages_manager.cpp |80.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yaml_config/yaml_config.cpp |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yaml_config/yaml_config.cpp |80.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/library/yaml_config/libydb-library-yaml_config.a |80.0%| [AR] {RESULT} $(B)/ydb/library/yaml_config/libydb-library-yaml_config.a |80.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_sharing/source/events/transfer.cpp |80.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__set_config.cpp |80.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/library/yaml_config/libydb-library-yaml_config.a |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/source/events/transfer.cpp |80.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/data_sharing/source/events/libdata_sharing-source-events.a |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__set_config.cpp |80.0%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_sharing/source/events/libdata_sharing-source-events.a |80.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/source/events/libdata_sharing-source-events.a |80.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_write_index.cpp |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_write_index.cpp |80.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cluster_info.cpp |80.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_sharing/destination/events/control.cpp |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cluster_info.cpp |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/destination/events/control.cpp |80.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/common/kqp_ru_calc.cpp |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/common/kqp_ru_calc.cpp |80.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/common/libcore-kqp-common.a |80.0%| [AR] {RESULT} $(B)/ydb/core/kqp/common/libcore-kqp-common.a |80.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/common/libcore-kqp-common.a |80.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/common/columnshard.cpp |80.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_get.cpp |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/common/columnshard.cpp |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_get.cpp |80.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/testlib/actors/test_runtime.cpp |80.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/testlib/actors/libcore-testlib-actors.a |80.0%| [AR] {RESULT} $(B)/ydb/core/testlib/actors/libcore-testlib-actors.a |80.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/testlib/actors/test_runtime.cpp |80.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/testlib/actors/libcore-testlib-actors.a |80.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kesus/tablet/tx_self_check.cpp |80.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/tablet/tx_self_check.cpp |80.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/walle_api_handler.cpp |80.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/walle_api_handler.cpp |80.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__update_pool_state.cpp |80.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kesus/tablet/tx_semaphore_timeout.cpp |80.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__update_pool_state.cpp |80.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/tablet/tx_semaphore_timeout.cpp |80.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/operations/batch_builder/restore.cpp |80.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/operations/batch_builder/restore.cpp |80.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/operations/batch_builder/libcolumnshard-operations-batch_builder.a |80.1%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/operations/batch_builder/libcolumnshard-operations-batch_builder.a |80.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/operations/batch_builder/libcolumnshard-operations-batch_builder.a |80.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_blobs_written.cpp |80.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_blobs_written.cpp |80.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/granule_view.cpp |80.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/granule_view.cpp |80.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet/tablet_monitoring_proxy.cpp |80.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console_configs_subscriber.cpp |80.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console_configs_subscriber.cpp |80.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/tablet_monitoring_proxy.cpp |80.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_sharing/destination/events/transfer.cpp |80.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/destination/events/transfer.cpp |80.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_sharing/modification/events/change_owning.cpp |80.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/modification/events/change_owning.cpp |80.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/data_sharing/modification/events/libdata_sharing-modification-events.a |80.1%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_sharing/modification/events/libdata_sharing-modification-events.a |80.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/modification/events/libdata_sharing-modification-events.a |80.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cms_tx_init_scheme.cpp |80.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tx_init_scheme.cpp |80.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/tier/write.cpp |80.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/tier/write.cpp |80.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/service/worker.cpp |80.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/service/worker.cpp |80.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/replication/service/libtx-replication-service.a |80.1%| [AR] {RESULT} $(B)/ydb/core/tx/replication/service/libtx-replication-service.a |80.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/replication/service/libtx-replication-service.a |80.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/feature_flags_configurator.cpp |80.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/feature_flags_configurator.cpp |80.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/normalizer/tables/normalizer.cpp |80.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/normalizer/tables/libcolumnshard-normalizer-tables.global.a |80.1%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/normalizer/tables/libcolumnshard-normalizer-tables.global.a |80.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/tables/normalizer.cpp |80.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/normalizer/tables/libcolumnshard-normalizer-tables.global.a |80.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__init_scheme.cpp |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__init_scheme.cpp |80.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_sharing/destination/transactions/tx_data_from_source.cpp |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/destination/transactions/tx_data_from_source.cpp |80.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/dst_creator.cpp |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/dst_creator.cpp |80.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_remove_blobs.cpp |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_remove_blobs.cpp |80.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_indexrestoreget.cpp |80.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_read.cpp |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_indexrestoreget.cpp |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_read.cpp |80.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/keyvalue/keyvalue_storage_request.cpp |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/keyvalue/keyvalue_storage_request.cpp |80.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_draft.cpp |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_draft.cpp |80.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_reader/actor.cpp |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_reader/actor.cpp |80.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/data_reader/libtx-columnshard-data_reader.a |80.2%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_reader/libtx-columnshard-data_reader.a |80.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_reader/libtx-columnshard-data_reader.a |80.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_sharing/modification/tasks/modification.cpp |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/modification/tasks/modification.cpp |80.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/data_sharing/modification/tasks/libdata_sharing-modification-tasks.a |80.2%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_sharing/modification/tasks/libdata_sharing-modification-tasks.a |80.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/modification/tasks/libdata_sharing-modification-tasks.a |80.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_locks/locks/list.cpp |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_locks/locks/list.cpp |80.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/storage/storage_pools.cpp |80.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/keyvalue/keyvalue_intermediate.cpp |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/storage/storage_pools.cpp |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/keyvalue/keyvalue_intermediate.cpp |80.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/scheme_board/populator.cpp |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/scheme_board/populator.cpp |80.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/operations/write.cpp |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/operations/write.cpp |80.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet/node_tablet_monitor.cpp |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/node_tablet_monitor.cpp |80.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/count_min_sketch/constructor.cpp |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/count_min_sketch/constructor.cpp |80.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/common_level.cpp |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/common_level.cpp |80.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/liboptimizer-lcbuckets-planner.a |80.2%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/liboptimizer-lcbuckets-planner.a |80.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/liboptimizer-lcbuckets-planner.a |80.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet/tablet_sys.cpp |80.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/operations/write_data.cpp |80.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__update_tenant_state.cpp |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/operations/write_data.cpp |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__update_tenant_state.cpp |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/tablet_sys.cpp |80.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_sharing/modification/transactions/tx_change_blobs_owning.cpp |80.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet/bootstrapper.cpp |80.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/bootstrapper.cpp |80.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/data_sharing/modification/transactions/libdata_sharing-modification-transactions.a |80.2%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_sharing/modification/transactions/libdata_sharing-modification-transactions.a |80.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/modification/transactions/tx_change_blobs_owning.cpp |80.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/modification/transactions/libdata_sharing-modification-transactions.a |80.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/operations/slice_builder/builder.cpp |80.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/operations/slice_builder/builder.cpp |80.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/topic.cpp |80.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/topic.cpp |80.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/persqueue_v1/libydb-services-persqueue_v1.a |80.3%| [AR] {RESULT} $(B)/ydb/services/persqueue_v1/libydb-services-persqueue_v1.a |80.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/http_proxy/http_req.cpp |80.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/persqueue_v1/libydb-services-persqueue_v1.a |80.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_blackboard.cpp |80.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/http_proxy/libydb-core-http_proxy.a |80.3%| [AR] {RESULT} $(B)/ydb/core/http_proxy/libydb-core-http_proxy.a |80.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/scheme/scheme.cpp |80.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_blackboard.cpp |80.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/scheme/scheme.cpp |80.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/http_proxy/http_req.cpp |80.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_sharing/manager/sessions.cpp |80.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cms_tx_reject_notification.cpp |80.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tx_reject_notification.cpp |80.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/manager/sessions.cpp |80.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/http_proxy/libydb-core-http_proxy.a |80.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cms_tx_remove_expired_notifications.cpp |80.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet/tablet_list_renderer.cpp |80.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tx_remove_expired_notifications.cpp |80.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/tablet_list_renderer.cpp |80.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_sharing/destination/transactions/tx_start_from_initiator.cpp |80.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/destination/transactions/tx_start_from_initiator.cpp |80.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/data_sharing/destination/transactions/libdata_sharing-destination-transactions.a |80.3%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_sharing/destination/transactions/libdata_sharing-destination-transactions.a |80.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/destination/transactions/libdata_sharing-destination-transactions.a |80.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kesus/tablet/tx_init.cpp |80.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/tablet/tx_init.cpp |80.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/storage/storage_stats.cpp |80.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/storage/storage_stats.cpp |80.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/control_plane_proxy/actors/query_utils.cpp |80.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kesus/tablet/libcore-kesus-tablet.a |80.3%| [AR] {RESULT} $(B)/ydb/core/kesus/tablet/libcore-kesus-tablet.a |80.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/control_plane_proxy/actors/query_utils.cpp |80.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/control_plane_proxy/actors/liblibs-control_plane_proxy-actors.a |80.3%| [AR] {RESULT} $(B)/ydb/core/fq/libs/control_plane_proxy/actors/liblibs-control_plane_proxy-actors.a |80.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/fq/libs/control_plane_proxy/actors/liblibs-control_plane_proxy-actors.a |80.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kesus/tablet/libcore-kesus-tablet.a |80.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_locks/locks/snapshot.cpp |80.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_locks/locks/snapshot.cpp |80.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/data_locks/locks/libcolumnshard-data_locks-locks.a |80.3%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_locks/locks/libcolumnshard-data_locks-locks.a |80.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_locks/locks/libcolumnshard-data_locks-locks.a |80.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/fetched_data.cpp |80.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/fetched_data.cpp |80.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_sharing/destination/events/status.cpp |80.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/destination/events/status.cpp |80.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_sharing/common/session/common.cpp |80.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_sharing/destination/session/destination.cpp |80.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/common/session/common.cpp |80.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/destination/session/destination.cpp |80.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/data_sharing/destination/session/libdata_sharing-destination-session.a |80.4%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_sharing/destination/session/libdata_sharing-destination-session.a |80.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/data_sharing/destination/events/libdata_sharing-destination-events.a |80.4%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_sharing/destination/events/libdata_sharing-destination-events.a |80.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/destination/session/libdata_sharing-destination-session.a |80.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/destination/events/libdata_sharing-destination-events.a |80.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_accessor/local_db/manager.cpp |80.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_accessor/local_db/manager.cpp |80.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/count_min_sketch/meta.cpp |80.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/count_min_sketch/meta.cpp |80.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/count_min_sketch/libstorage-indexes-count_min_sketch.global.a |80.4%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/count_min_sketch/libstorage-indexes-count_min_sketch.global.a |80.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/count_min_sketch/libstorage-indexes-count_min_sketch.global.a |80.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/control_plane_storage/internal/task_result_write.cpp |80.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/internal/task_result_write.cpp |80.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/control_plane_storage/internal/liblibs-control_plane_storage-internal.a |80.4%| [AR] {RESULT} $(B)/ydb/core/fq/libs/control_plane_storage/internal/liblibs-control_plane_storage-internal.a |80.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/fq/libs/control_plane_storage/internal/liblibs-control_plane_storage-internal.a |80.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_gc_indexed.cpp |80.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_gc_indexed.cpp |80.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/stream_creator.cpp |80.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/stream_creator.cpp |80.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ydb/ydb_dummy.cpp |80.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_dummy.cpp |80.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/ydb/libydb-services-ydb.a |80.4%| [AR] {RESULT} $(B)/ydb/services/ydb/libydb-services-ydb.a |80.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/operations/events.cpp |80.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/writer/indexed_blob_constructor.cpp |80.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/operations/events.cpp |80.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/writer/indexed_blob_constructor.cpp |80.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/ydb/libydb-services-ydb.a |80.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet/tablet_resolver.cpp |80.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/tablet_resolver.cpp |80.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/tier/gc_actor.cpp |80.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/tier/gc_actor.cpp |80.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cms_tx_store_permissions.cpp |80.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tx_store_permissions.cpp |80.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/mediator/mediator__schema.cpp |80.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/mediator/mediator__schema.cpp |80.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_gc_insert_table.cpp |80.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_gc_insert_table.cpp |80.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/fetching.cpp |80.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/fetching.cpp |80.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/storage/vslots.cpp |80.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/storage/vslots.cpp |80.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/counters/counters_manager.cpp |80.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/counters/counters_manager.cpp |80.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sync_points/limit.cpp |80.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sync_points/limit.cpp |80.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_sharing/manager/shared_blobs.cpp |80.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_sharing/manager/shared_blobs.cpp |80.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/data_sharing/manager/libcolumnshard-data_sharing-manager.a |80.5%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_sharing/manager/libcolumnshard-data_sharing-manager.a |80.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/manager/libcolumnshard-data_sharing-manager.a |80.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ymq/grpc_service.cpp |80.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/downtime.cpp |80.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/downtime.cpp |80.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/ymq/libydb-services-ymq.a |80.5%| [AR] {RESULT} $(B)/ydb/services/ymq/libydb-services-ymq.a |80.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/storage/pdisks.cpp |80.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ymq/grpc_service.cpp |80.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/storage/pdisks.cpp |80.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/ymq/libydb-services-ymq.a |80.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/storages_manager/manager.cpp |80.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/storages_manager/manager.cpp |80.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/blobs_action/storages_manager/libcolumnshard-blobs_action-storages_manager.a |80.5%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/blobs_action/storages_manager/libcolumnshard-blobs_action-storages_manager.a |80.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/storages_manager/libcolumnshard-blobs_action-storages_manager.a |80.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__update_subdomain_key.cpp |80.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__update_subdomain_key.cpp |80.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/health/health.cpp |80.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/health/health.cpp |80.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/mediator/mediator.cpp |80.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/health/libfq-libs-health.a |80.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/mediator/mediator.cpp |80.5%| [AR] {RESULT} $(B)/ydb/core/fq/libs/health/libfq-libs-health.a |80.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/fq/libs/health/libfq-libs-health.a |80.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/resource_subscriber/events.cpp |80.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/resource_subscriber/events.cpp |80.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/counters/columnshard.cpp |80.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/keyvalue/keyvalue_collector.cpp |80.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/keyvalue/keyvalue_collector.cpp |80.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/counters/columnshard.cpp |80.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__cleanup_subscriptions.cpp |80.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__cleanup_subscriptions.cpp |80.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_write.cpp |80.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/transaction/tx_write.cpp |80.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/data_events/columnshard_splitter.cpp |80.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/erasure_checkers.cpp |80.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/blobs_action/transaction/libcolumnshard-blobs_action-transaction.a |80.5%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/blobs_action/transaction/libcolumnshard-blobs_action-transaction.a |80.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/data_events/columnshard_splitter.cpp |80.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/erasure_checkers.cpp |80.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/transaction/libcolumnshard-blobs_action-transaction.a |80.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_connections.cpp |80.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_connections.cpp |80.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_locks/manager/manager.cpp |80.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_locks/manager/manager.cpp |80.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/data_locks/manager/libcolumnshard-data_locks-manager.a |80.6%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_locks/manager/libcolumnshard-data_locks-manager.a |80.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_locks/manager/libcolumnshard-data_locks-manager.a |80.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_compute_database.cpp |80.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_compute_database.cpp |80.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/resource_subscriber/actor.cpp |80.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/resource_subscriber/actor.cpp |80.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/storage/groups.cpp |80.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/storage/groups.cpp |80.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/storage/libcore-sys_view-storage.a |80.6%| [AR] {RESULT} $(B)/ydb/core/sys_view/storage/libcore-sys_view-storage.a |80.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/counters/blobs_manager.cpp |80.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/sys_view/storage/libcore-sys_view-storage.a |80.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/counters/blobs_manager.cpp |80.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/backpressure/unisched.cpp |80.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/backpressure/unisched.cpp |80.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/tier/storage.cpp |80.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/walle_remove_task_adapter.cpp |80.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/tier/storage.cpp |80.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/walle_remove_task_adapter.cpp |80.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/mediator/mediator__init.cpp |80.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/mediator/mediator__init.cpp |80.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/tier/gc.cpp |80.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/tier/gc.cpp |80.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/blobs_action/tier/libcolumnshard-blobs_action-tier.a |80.6%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/blobs_action/tier/libcolumnshard-blobs_action-tier.a |80.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/tier/libcolumnshard-blobs_action-tier.a |80.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_reader/task.cpp |80.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_accessor/actor.cpp |80.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet/tablet_metrics.cpp |80.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_reader/task.cpp |80.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__revert_pool_state.cpp |80.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_accessor/actor.cpp |80.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/tablet_metrics.cpp |80.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__revert_pool_state.cpp |80.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/operations/slice_builder/pack_builder.cpp |80.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/signals/owner.cpp |80.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/operations/slice_builder/libcolumnshard-operations-slice_builder.a |80.6%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/operations/slice_builder/libcolumnshard-operations-slice_builder.a |80.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/signals/owner.cpp |80.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/operations/slice_builder/pack_builder.cpp |80.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/library/signals/libydb-library-signals.a |80.6%| [AR] {RESULT} $(B)/ydb/library/signals/libydb-library-signals.a |80.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/operations/slice_builder/libcolumnshard-operations-slice_builder.a |80.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/library/signals/libydb-library-signals.a |80.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/backpressure/queue_backpressure_client.cpp |80.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/backpressure/queue_backpressure_client.cpp |80.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/resource_subscriber/task.cpp |80.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/resource_subscriber/task.cpp |80.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/resource_subscriber/libtx-columnshard-resource_subscriber.a |80.7%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/resource_subscriber/libtx-columnshard-resource_subscriber.a |80.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/resource_subscriber/libtx-columnshard-resource_subscriber.a |80.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_accessor/abstract/collector.cpp |80.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_accessor/abstract/collector.cpp |80.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/data_accessor/abstract/libcolumnshard-data_accessor-abstract.a |80.7%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_accessor/abstract/libcolumnshard-data_accessor-abstract.a |80.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_accessor/abstract/libcolumnshard-data_accessor-abstract.a |80.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet/tablet_counters_aggregator.cpp |80.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet/tablet_responsiveness_pinger.cpp |80.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/tablet_counters_aggregator.cpp |80.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/tablet_responsiveness_pinger.cpp |80.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_reader/read_coordinator.cpp |80.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/blob_manager_db.cpp |80.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_reader/read_coordinator.cpp |80.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/blobs_action/libtx-columnshard-blobs_action.a |80.7%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/blobs_action/libtx-columnshard-blobs_action.a |80.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/blob_manager_db.cpp |80.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/libtx-columnshard-blobs_action.a |80.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/test_tablet/load_actor_read_validate.cpp |80.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_accessor/local_db/collector.cpp |80.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/normalizer/tablet/gc_counters.cpp |80.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/test_tablet/load_actor_read_validate.cpp |80.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/tablet/gc_counters.cpp |80.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_accessor/local_db/collector.cpp |80.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/data_accessor/local_db/libcolumnshard-data_accessor-local_db.a |80.7%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_accessor/local_db/libcolumnshard-data_accessor-local_db.a |80.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_accessor/local_db/libcolumnshard-data_accessor-local_db.a |80.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/test_tablet/test_shard_mon.cpp |80.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/test_tablet/test_shard_mon.cpp |80.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/normalizer/tablet/broken_txs.cpp |80.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/tablet/broken_txs.cpp |80.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/normalizer/tablet/libcolumnshard-normalizer-tablet.global.a |80.7%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/normalizer/tablet/libcolumnshard-normalizer-tablet.global.a |80.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/normalizer/tablet/libcolumnshard-normalizer-tablet.global.a |80.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__create_tenant.cpp |80.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__create_tenant.cpp |80.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_accessor/events.cpp |80.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_accessor/events.cpp |80.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/data_accessor/libtx-columnshard-data_accessor.a |80.7%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_accessor/libtx-columnshard-data_accessor.a |80.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_accessor/libtx-columnshard-data_accessor.a |80.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/operations/common/context.cpp |80.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/operations/common/context.cpp |80.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/operations/common/libcolumnshard-operations-common.a |80.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/data_accessor/in_mem/manager.cpp |80.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/operations/common/libcolumnshard-operations-common.a |80.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/operations/common/libcolumnshard-operations-common.a |80.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/data_accessor/in_mem/manager.cpp |80.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/counters/indexation.cpp |80.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/data_accessor/in_mem/libcolumnshard-data_accessor-in_mem.a |80.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_accessor/in_mem/libcolumnshard-data_accessor-in_mem.a |80.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/counters/indexation.cpp |80.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_accessor/in_mem/libcolumnshard-data_accessor-in_mem.a |80.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_reader/actor.cpp |80.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_reader/actor.cpp |80.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/keyvalue/keyvalue.cpp |80.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/keyvalue/keyvalue.cpp |80.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/counters/engine_logs.cpp |80.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/counters/engine_logs.cpp |80.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/testlib/tx_helpers.cpp |80.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/testlib/tx_helpers.cpp |80.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_reader/events.cpp |80.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_reader/events.cpp |80.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/blobs_reader/libtx-columnshard-blobs_reader.a |80.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/blobs_reader/libtx-columnshard-blobs_reader.a |80.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/blobs_reader/libtx-columnshard-blobs_reader.a |80.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/dst_remover.cpp |80.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/dst_remover.cpp |80.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cms_tx_remove_task.cpp |80.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tx_remove_task.cpp |80.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/counters/counters.cpp |80.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/counters/counters.cpp |80.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/grpc_services/counters/libcore-grpc_services-counters.a |80.8%| [AR] {RESULT} $(B)/ydb/core/grpc_services/counters/libcore-grpc_services-counters.a |80.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/grpc_services/counters/libcore-grpc_services-counters.a |80.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet/tablet_pipe_client.cpp |80.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/tablet_pipe_client.cpp |80.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/test_tablet/load_actor_state.cpp |80.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/test_tablet/load_actor_state.cpp |80.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/operations/manager.cpp |80.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/operations/manager.cpp |80.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/operations/libtx-columnshard-operations.a |80.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/operations/libtx-columnshard-operations.a |80.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/operations/libtx-columnshard-operations.a |80.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/mediator/mediator__configure.cpp |80.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/mediator/mediator__configure.cpp |80.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/walle_check_task_adapter.cpp |80.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/walle_check_task_adapter.cpp |80.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_quotas.cpp |80.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/bs/blob_manager.cpp |80.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_quotas.cpp |80.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/bs/blob_manager.cpp |80.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__remove_tenant_failed.cpp |80.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__remove_tenant_failed.cpp |80.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet/resource_broker.cpp |80.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/resource_broker.cpp |80.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_multicollect.cpp |80.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_multicollect.cpp |80.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/test_tablet/load_actor_mon.cpp |80.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/test_tablet/load_actor_mon.cpp |80.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/counters/scan.cpp |80.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/dst_alterer.cpp |80.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/counters/scan.cpp |80.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/dst_alterer.cpp |80.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/replication/controller/libtx-replication-controller.a |80.9%| [AR] {RESULT} $(B)/ydb/core/tx/replication/controller/libtx-replication-controller.a |80.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/replication/controller/libtx-replication-controller.a |80.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/scheme/schema_version.cpp |80.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/schema_version.cpp |80.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/events/delete_blobs.cpp |80.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/events/delete_blobs.cpp |80.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/blobs_action/events/libcolumnshard-blobs_action-events.a |80.9%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/blobs_action/events/libcolumnshard-blobs_action-events.a |80.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/events/libcolumnshard-blobs_action-events.a |80.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/context.cpp |80.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/context.cpp |80.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/keyvalue/keyvalue_state_collect.cpp |80.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/keyvalue/keyvalue_state_collect.cpp |80.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/test_tablet/tx_load_everything.cpp |80.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/test_tablet/tx_load_everything.cpp |80.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/test_tablet/test_tablet.cpp |80.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/test_tablet/test_tablet.cpp |80.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/localrecovery/localrecovery_defs.cpp |80.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/localrecovery/localrecovery_defs.cpp |80.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/deprecated/persqueue_v0/persqueue.cpp |80.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/deprecated/persqueue_v0/persqueue.cpp |80.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet/node_whiteboard.cpp |80.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/node_whiteboard.cpp |80.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tablet/libydb-core-tablet.a |80.9%| [AR] {RESULT} $(B)/ydb/core/tablet/libydb-core-tablet.a |80.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tablet/libydb-core-tablet.a |80.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/bloom/meta.cpp |80.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/bloom/meta.cpp |80.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/test_tablet/test_shard_context.cpp |80.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/test_tablet/test_shard_context.cpp |80.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/bloom/constructor.cpp |80.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/test_tablet/state_server_interface.cpp |80.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/test_tablet/state_server_interface.cpp |80.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/indexes/bloom/constructor.cpp |80.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bloom/libstorage-indexes-bloom.global.a |80.9%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bloom/libstorage-indexes-bloom.global.a |80.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bloom/libstorage-indexes-bloom.global.a |80.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/test_tablet/load_actor_write.cpp |80.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/test_tablet/load_actor_write.cpp |80.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/writer/buffer/events.cpp |81.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/writer/buffer/events.cpp |80.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/writer/buffer/libengines-writer-buffer.a |81.0%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/writer/buffer/libengines-writer-buffer.a |81.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/writer/buffer/libengines-writer-buffer.a |81.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/bs/gc_actor.cpp |81.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/bs/gc_actor.cpp |81.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/manager/common.cpp |81.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/manager/common.cpp |81.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/locks/locks.cpp |81.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/locks/locks.cpp |81.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/locks/libcore-tx-locks.a |81.0%| [AR] {RESULT} $(B)/ydb/core/tx/locks/libcore-tx-locks.a |81.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/locks/libcore-tx-locks.a |81.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/control_plane_storage/in_memory_control_plane_storage.cpp |81.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cms_tx_get_log_tail.cpp |81.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/writer/compacted_blob_constructor.cpp |81.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/in_memory_control_plane_storage.cpp |81.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tx_get_log_tail.cpp |81.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/writer/compacted_blob_constructor.cpp |81.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/writer/libcolumnshard-engines-writer.a |81.0%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/writer/libcolumnshard-engines-writer.a |81.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/writer/libcolumnshard-engines-writer.a |81.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/test_tablet/tx_init_scheme.cpp |81.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/test_tablet/tx_init_scheme.cpp |81.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_collect.cpp |81.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_collect.cpp |81.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_ic_debug.cpp |81.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_ic_debug.cpp |81.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/tablets/tablets.cpp |81.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/test_tablet/tx_initialize.cpp |81.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/tablets/tablets.cpp |81.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/tablets/libcore-sys_view-tablets.a |81.0%| [AR] {RESULT} $(B)/ydb/core/sys_view/tablets/libcore-sys_view-tablets.a |81.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/test_tablet/tx_initialize.cpp |81.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/sys_view/tablets/libcore-sys_view-tablets.a |81.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/bs/storage.cpp |81.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_bindings.cpp |81.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/bs/gc.cpp |81.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/bs/storage.cpp |81.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_bindings.cpp |81.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/bs/gc.cpp |81.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/walle_list_tasks_adapter.cpp |81.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/walle_list_tasks_adapter.cpp |81.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cms_tx_process_notification.cpp |81.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tx_process_notification.cpp |81.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_blobstorage_config.cpp |81.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_blobstorage_config.cpp |81.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/testlib/basics/runtime.cpp |81.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/testlib/basics/runtime.cpp |81.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_user_db.cpp |81.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_user_db.cpp |81.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_cms.cpp |81.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_cms.cpp |81.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/testlib/basics/helpers.cpp |81.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/testlib/basics/helpers.cpp |81.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/alter_cdc_stream_unit.cpp |81.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/alter_cdc_stream_unit.cpp |81.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/test_tablet/load_actor_impl.cpp |81.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/storage.cpp |81.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/storage.cpp |81.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/test_tablet/load_actor_impl.cpp |81.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/scheme/counters.cpp |81.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/scheme/counters.cpp |81.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/scheme/libstorage-actualizer-scheme.a |81.1%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/scheme/libstorage-actualizer-scheme.a |81.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/scheme/libstorage-actualizer-scheme.a |81.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_queries.cpp |81.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_queries.cpp |81.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/alter_table_unit.cpp |81.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/alter_table_unit.cpp |81.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__remove_computational_units.cpp |81.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__remove_computational_units.cpp |81.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sync_points/abstract.cpp |81.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sync_points/abstract.cpp |81.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/iterator.cpp |81.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__replace_yaml_config.cpp |81.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/iterator.cpp |81.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean_deprecated_snapshot.cpp |81.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/fetcher.cpp |81.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/fetcher.cpp |81.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean_deprecated_snapshot.cpp |81.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__replace_yaml_config.cpp |81.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/libscheme-indexes-abstract.a |81.1%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/libscheme-indexes-abstract.a |81.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/libscheme-indexes-abstract.a |81.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/sessions/sessions.cpp |81.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/sessions/sessions.cpp |81.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/sessions/libcore-sys_view-sessions.a |81.1%| [AR] {RESULT} $(B)/ydb/core/sys_view/sessions/libcore-sys_view-sessions.a |81.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/sys_view/sessions/libcore-sys_view-sessions.a |81.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/scheme_board/load_test.cpp |81.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/test_tablet/load_actor_delete.cpp |81.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/scheme_board/load_test.cpp |81.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/test_tablet/load_actor_delete.cpp |81.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/test_tablet/libydb-core-test_tablet.a |81.1%| [AR] {RESULT} $(B)/ydb/core/test_tablet/libydb-core-test_tablet.a |81.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_discover_m3dc.cpp |81.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_discover_m3dc.cpp |81.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/test_tablet/libydb-core-test_tablet.a |81.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_repl_offsets.cpp |81.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_repl_offsets.cpp |81.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/testlib/basics/appdata.cpp |81.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/testlib/basics/appdata.cpp |81.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/testlib/basics/services.cpp |81.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/testlib/basics/services.cpp |81.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/testlib/basics/libcore-testlib-basics.a |81.2%| [AR] {RESULT} $(B)/ydb/core/testlib/basics/libcore-testlib-basics.a |81.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/testlib/basics/libcore-testlib-basics.a |81.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/init/init.cpp |81.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/testlib/fake_coordinator.cpp |81.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/testlib/fake_coordinator.cpp |81.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/init/init.cpp |81.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/init/libfq-libs-init.a |81.2%| [AR] {RESULT} $(B)/ydb/core/fq/libs/init/libfq-libs-init.a |81.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/fq/libs/init/libfq-libs-init.a |81.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/mediator/execute_queue.cpp |81.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/testlib/tablet_helpers.cpp |81.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/mediator/execute_queue.cpp |81.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/testlib/tablet_helpers.cpp |81.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_block.cpp |81.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_block.cpp |81.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/control_plane_storage/validators.cpp |81.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/gc.cpp |81.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/validators.cpp |81.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/gc.cpp |81.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/bs/write.cpp |81.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/bs/write.cpp |81.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/show_create/create_table_formatter.cpp |81.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/testlib/cs_helper.cpp |81.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/show_create/create_table_formatter.cpp |81.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/testlib/cs_helper.cpp |81.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/inflight_request_tracker.cpp |81.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/inflight_request_tracker.cpp |81.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_console.cpp |81.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_put_impl.cpp |81.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_console.cpp |81.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_put_impl.cpp |81.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/columnshard__propose_transaction.cpp |81.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/columnshard__propose_transaction.cpp |81.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/columnshard__progress_tx.cpp |81.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/columnshard__progress_tx.cpp |81.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/bs/remove.cpp |81.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/bs/remove.cpp |81.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/blobs_action/bs/libcolumnshard-blobs_action-bs.a |81.2%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/blobs_action/bs/libcolumnshard-blobs_action-bs.a |81.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/bs/libcolumnshard-blobs_action-bs.a |81.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/testlib/tablet_flat_dummy.cpp |81.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/testlib/tablet_flat_dummy.cpp |81.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__update_confirmed_subdomain.cpp |81.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__update_confirmed_subdomain.cpp |81.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/show_create/show_create.cpp |81.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/show_create/show_create.cpp |81.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/show_create/libcore-sys_view-show_create.a |81.2%| [AR] {RESULT} $(B)/ydb/core/sys_view/show_create/libcore-sys_view-show_create.a |81.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console.cpp |81.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/write_actor.cpp |81.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/sys_view/show_create/libcore-sys_view-show_create.a |81.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console.cpp |81.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/write_actor.cpp |81.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/abstract/fetcher.cpp |81.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/abstract/fetcher.cpp |81.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/mediator/mediator__schema_upgrade.cpp |81.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/mediator/mediator__schema_upgrade.cpp |81.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/metadata.cpp |81.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/metadata.cpp |81.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/mediator/libcore-tx-mediator.a |81.3%| [AR] {RESULT} $(B)/ydb/core/tx/mediator/libcore-tx-mediator.a |81.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/libreader-sys_view-abstract.a |81.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/mediator/libcore-tx-mediator.a |81.3%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/libreader-sys_view-abstract.a |81.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/libreader-sys_view-abstract.a |81.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/columnshard_subdomain_path_id.cpp |81.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/columnshard_subdomain_path_id.cpp |81.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_check_integrity_get.cpp |81.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_check_integrity_get.cpp |81.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/columnshard_impl.cpp |81.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/columnshard_impl.cpp |81.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/testlib/test_client.cpp |81.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/testlib/test_client.cpp |81.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__remove_tenant_done.cpp |81.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__remove_tenant_done.cpp |81.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/keyvalue/keyvalue_state.cpp |81.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/keyvalue/keyvalue_state.cpp |81.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/columnshard_view.cpp |81.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/columnshard_view.cpp |81.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/keyvalue/libydb-core-keyvalue.a |81.3%| [AR] {RESULT} $(B)/ydb/core/keyvalue/libydb-core-keyvalue.a |81.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/keyvalue/libydb-core-keyvalue.a |81.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage.cpp |81.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage.cpp |81.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/grpc_server.cpp |81.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/grpc_server.cpp |81.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/control_plane_storage/libfq-libs-control_plane_storage.a |81.3%| [AR] {RESULT} $(B)/ydb/core/fq/libs/control_plane_storage/libfq-libs-control_plane_storage.a |81.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/fq/libs/control_plane_storage/libfq-libs-control_plane_storage.a |81.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/compute_actor/kqp_compute_actor_factory.cpp |81.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/compute_actor/kqp_compute_actor_factory.cpp |81.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_kqp.cpp |81.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_kqp.cpp |81.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/compute_actor/kqp_scan_compute_actor.cpp |81.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/compute_actor/kqp_scan_compute_actor.cpp |81.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/testlib/tenant_runtime.cpp |81.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/testlib/common_helper.cpp |81.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/testlib/tenant_runtime.cpp |81.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/testlib/common_helper.cpp |81.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/testlib/libydb-core-testlib.a |81.3%| [AR] {RESULT} $(B)/ydb/core/testlib/libydb-core-testlib.a |81.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/testlib/libydb-core-testlib.a |81.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/resource_pool_classifiers/resource_pool_classifiers.cpp |81.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/resource_pool_classifiers/libcore-sys_view-resource_pool_classifiers.a |81.3%| [AR] {RESULT} $(B)/ydb/core/sys_view/resource_pool_classifiers/libcore-sys_view-resource_pool_classifiers.a |81.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/resource_pool_classifiers/resource_pool_classifiers.cpp |81.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/columnshard__propose_cancel.cpp |81.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/sys_view/resource_pool_classifiers/libcore-sys_view-resource_pool_classifiers.a |81.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/columnshard__propose_cancel.cpp |81.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/compute_actor/kqp_scan_compute_manager.cpp |81.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/compute_actor/kqp_scan_compute_manager.cpp |81.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cms_tx_remove_request.cpp |81.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tx_remove_request.cpp |81.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/normalizer/schema_version/version.cpp |81.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_trans_queue.cpp |81.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/schema_version/version.cpp |81.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/normalizer/schema_version/libcolumnshard-normalizer-schema_version.global.a |81.4%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/normalizer/schema_version/libcolumnshard-normalizer-schema_version.global.a |81.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_trans_queue.cpp |81.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/normalizer/schema_version/libcolumnshard-normalizer-schema_version.global.a |81.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/normalizer/granule/normalizer.cpp |81.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/granule/normalizer.cpp |81.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/export/session/selector/backup/selector.cpp |81.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/export/session/selector/backup/selector.cpp |81.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/export/session/selector/backup/libsession-selector-backup.global.a |81.4%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/export/session/selector/backup/libsession-selector-backup.global.a |81.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/export/session/selector/backup/libsession-selector-backup.global.a |81.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_kqp_compute.cpp |81.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_kqp_compute.cpp |81.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/normalizer/portion/chunks_actualization.cpp |81.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/portion/chunks_actualization.cpp |81.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/compute_actor/kqp_scan_fetcher_actor.cpp |81.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/compute_actor/kqp_scan_fetcher_actor.cpp |81.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_direct_erase.cpp |81.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_direct_erase.cpp |81.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/columnshard__plan_step.cpp |81.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/columnshard__plan_step.cpp |81.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/writer/writer.cpp |81.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/writer/writer.cpp |81.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/configs_dispatcher.cpp |81.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/configs_dispatcher.cpp |81.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/walle_create_task_adapter.cpp |81.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/walle_create_task_adapter.cpp |81.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/columnshard__notify_tx_completion.cpp |81.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/columnshard__notify_tx_completion.cpp |81.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/compute_actor/kqp_compute_actor.cpp |81.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/compute_actor/kqp_compute_actor.cpp |81.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_discover.cpp |81.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_discover.cpp |81.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/data_events/shards_splitter.cpp |81.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/data_events/shards_splitter.cpp |81.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/data_events/libcore-tx-data_events.a |81.4%| [AR] {RESULT} $(B)/ydb/core/tx/data_events/libcore-tx-data_events.a |81.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/data_events/libcore-tx-data_events.a |81.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/columnshard__write.cpp |81.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/compute_actor/kqp_compute_state.cpp |81.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/compute_actor/kqp_compute_state.cpp |81.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/columnshard__write.cpp |81.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_split_dst.cpp |81.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_split_dst.cpp |81.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard_cdc_stream_common.cpp |81.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_cdc_stream_common.cpp |81.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/normalizer/portion/chunks_v0_meta.cpp |81.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/portion/chunks_v0_meta.cpp |81.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/coordinator/coordinator__schema_upgrade.cpp |81.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_stat.cpp |81.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/coordinator/coordinator__schema_upgrade.cpp |81.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_stat.cpp |81.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/hooks/abstract/abstract.cpp |81.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/hooks/abstract/abstract.cpp |81.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_pipeline.cpp |81.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean_empty.cpp |81.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/hooks/abstract/libcolumnshard-hooks-abstract.a |81.5%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/hooks/abstract/libcolumnshard-hooks-abstract.a |81.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/columnshard.cpp |81.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean_empty.cpp |81.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/hooks/abstract/libcolumnshard-hooks-abstract.a |81.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/coordinator/libcore-tx-coordinator.a |81.5%| [AR] {RESULT} $(B)/ydb/core/tx/coordinator/libcore-tx-coordinator.a |81.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/columnshard.cpp |81.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_pipeline.cpp |81.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/auth/group_members.cpp |81.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/coordinator/libcore-tx-coordinator.a |81.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/auth/group_members.cpp |81.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_localwriter.cpp |81.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_localwriter.cpp |81.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/auth/libcore-sys_view-auth.a |81.5%| [AR] {RESULT} $(B)/ydb/core/sys_view/auth/libcore-sys_view-auth.a |81.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/sys_view/auth/libcore-sys_view-auth.a |81.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_split_src.cpp |81.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_split_src.cpp |81.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/background_controller.cpp |81.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/tables_manager.cpp |81.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/background_controller.cpp |81.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/tables_manager.cpp |81.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_repl_offsets_client.cpp |81.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_repl_offsets_client.cpp |81.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_external_data_source.cpp |81.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_external_data_source.cpp |81.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ext_index/service/deleting.cpp |81.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ext_index/service/deleting.cpp |81.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/recovery/hulldb_recovery.cpp |81.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/recovery/hulldb_recovery.cpp |81.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/hulldb/recovery/libvdisk-hulldb-recovery.a |81.5%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/recovery/libvdisk-hulldb-recovery.a |81.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/recovery/libvdisk-hulldb-recovery.a |81.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/compute_actor/kqp_compute_actor_helpers.cpp |81.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/compute_actor/kqp_compute_actor_helpers.cpp |81.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/normalizer/granule/clean_granule.cpp |81.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/granule/clean_granule.cpp |81.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/normalizer/granule/libcolumnshard-normalizer-granule.global.a |81.6%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/normalizer/granule/libcolumnshard-normalizer-granule.global.a |81.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/normalizer/granule/libcolumnshard-normalizer-granule.global.a |81.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/configs_cache.cpp |81.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/configs_cache.cpp |81.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/columnshard__statistics.cpp |81.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/columnshard__statistics.cpp |81.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/columnshard__write_index.cpp |81.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/columnshard__write_index.cpp |81.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__store_table_path.cpp |81.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/datastreams/grpc_service.cpp |81.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__store_table_path.cpp |81.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/datastreams/libydb-services-datastreams.a |81.6%| [AR] {RESULT} $(B)/ydb/services/datastreams/libydb-services-datastreams.a |81.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/loading/stages.cpp |81.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/datastreams/grpc_service.cpp |81.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/loading/stages.cpp |81.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/loading/libtx-columnshard-loading.a |81.6%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/loading/libtx-columnshard-loading.a |81.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/loading/libtx-columnshard-loading.a |81.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console__load_state.cpp |81.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_loans.cpp |81.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/datastreams/libydb-services-datastreams.a |81.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console__load_state.cpp |81.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_loans.cpp |81.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/gc_actor.cpp |81.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blobs_action/abstract/gc_actor.cpp |81.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/blobs_action/abstract/libcolumnshard-blobs_action-abstract.a |81.6%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/blobs_action/abstract/libcolumnshard-blobs_action-abstract.a |81.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/abstract/libcolumnshard-blobs_action-abstract.a |81.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/common/schema.cpp |81.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/cms/console/libcore-cms-console.a |81.6%| [AR] {RESULT} $(B)/ydb/core/cms/console/libcore-cms-console.a |81.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/common/libcore-sys_view-common.a |81.6%| [AR] {RESULT} $(B)/ydb/core/sys_view/common/libcore-sys_view-common.a |81.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/common/schema.cpp |81.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/sys_view/common/libcore-sys_view-common.a |81.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/cms/console/libcore-cms-console.a |81.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/compute_actor/kqp_pure_compute_actor.cpp |81.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/compute_actor/kqp_pure_compute_actor.cpp |81.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/compute_actor/libcore-kqp-compute_actor.a |81.6%| [AR] {RESULT} $(B)/ydb/core/kqp/compute_actor/libcore-kqp-compute_actor.a |81.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/compute_actor/libcore-kqp-compute_actor.a |81.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/resource_pools/resource_pools.cpp |81.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/resource_pools/resource_pools.cpp |81.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/resource_pools/libcore-sys_view-resource_pools.a |81.6%| [AR] {RESULT} $(B)/ydb/core/sys_view/resource_pools/libcore-sys_view-resource_pools.a |81.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/sys_view/resource_pools/libcore-sys_view-resource_pools.a |81.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/partition_stats/partition_stats.cpp |81.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/partition_stats/partition_stats.cpp |81.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/columnshard__init.cpp |81.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/columnshard__init.cpp |81.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_monactor.cpp |81.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/normalizer/portion/portion.cpp |81.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_monactor.cpp |81.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/portion/portion.cpp |81.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_node_registration.cpp |81.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/long_tx_service/long_tx_service_impl.cpp |81.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_node_registration.cpp |81.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/long_tx_service/long_tx_service_impl.cpp |81.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_clusters_updater_actor.cpp |81.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_clusters_updater_actor.cpp |81.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_multiget.cpp |81.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_multiget.cpp |81.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/security/login_page.cpp |81.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/security/login_page.cpp |81.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_http_server.cpp |81.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_http_server.cpp |81.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/normalizer/portion/chunks.cpp |81.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/portion/chunks.cpp |81.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_schema_snapshots.cpp |81.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_schema_snapshots.cpp |81.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/normalizer/portion/broken_blobs.cpp |81.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/portion/broken_blobs.cpp |81.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/service/service.cpp |81.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/service/service.cpp |81.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_s3_downloads.cpp |81.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_s3_downloads.cpp |81.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cms_tx_remove_permissions.cpp |81.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tx_remove_permissions.cpp |81.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean.cpp |81.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/portion/clean.cpp |81.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_change_sender_activation.cpp |81.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_change_sender_activation.cpp |81.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_repl_apply.cpp |81.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_repl_apply.cpp |81.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/service/service_impl.cpp |81.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/service/service_impl.cpp |81.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/statistics/service/libcore-statistics-service.a |81.7%| [AR] {RESULT} $(B)/ydb/core/statistics/service/libcore-statistics-service.a |81.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/statistics/service/libcore-statistics-service.a |81.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_write_actor.cpp |81.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_write_actor.cpp |81.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/deprecated/persqueue_v0/libservices-deprecated-persqueue_v0.a |81.7%| [AR] {RESULT} $(B)/ydb/services/deprecated/persqueue_v0/libservices-deprecated-persqueue_v0.a |81.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/deprecated/persqueue_v0/libservices-deprecated-persqueue_v0.a |81.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/aggregator/tx_datashard_scan_response.cpp |81.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/tx_datashard_scan_response.cpp |81.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/long_tx_service/acquire_snapshot_impl.cpp |81.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/aggregator/tx_finish_trasersal.cpp |81.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/long_tx_service/acquire_snapshot_impl.cpp |81.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/tx_finish_trasersal.cpp |81.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/normalizer/portion/restore_v2_chunks.cpp |81.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/portion/restore_v2_chunks.cpp |81.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/manager/restore.cpp |81.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/manager/restore.cpp |81.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/columnshard_private_events.cpp |81.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_drain_node.cpp |81.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/columnshard_private_events.cpp |81.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_drain_node.cpp |81.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_repl_offsets_server.cpp |81.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_repl_offsets_server.cpp |81.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/nodes/nodes.cpp |81.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/nodes/nodes.cpp |81.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/nodes/libcore-sys_view-nodes.a |81.8%| [AR] {RESULT} $(B)/ydb/core/sys_view/nodes/libcore-sys_view-nodes.a |81.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/sys_view/nodes/libcore-sys_view-nodes.a |81.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_overload.cpp |81.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_overload.cpp |81.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/constructor/read_metadata.cpp |81.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/constructor/read_metadata.cpp |81.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/constructor/libreader-common_reader-constructor.a |81.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/constructor/libreader-common_reader-constructor.a |81.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/constructor/libreader-common_reader-constructor.a |81.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/security/ticket_parser.cpp |81.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/security/ticket_parser.cpp |81.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/security/libydb-core-security.a |81.8%| [AR] {RESULT} $(B)/ydb/core/security/libydb-core-security.a |81.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/security/libydb-core-security.a |81.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/columnshard__scan.cpp |81.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/columnshard__scan.cpp |81.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/index/index.cpp |81.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/actualizer/index/index.cpp |81.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/normalizer/portion/normalizer.cpp |81.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/index/libstorage-actualizer-index.a |81.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/index/libstorage-actualizer-index.a |81.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/index/libstorage-actualizer-index.a |81.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/normalizer/portion/libcolumnshard-normalizer-portion.a |81.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/normalizer/portion/libcolumnshard-normalizer-portion.a |81.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/portion/normalizer.cpp |81.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/normalizer/portion/leaked_blobs.cpp |81.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/normalizer/portion/libcolumnshard-normalizer-portion.a |81.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/portion/leaked_blobs.cpp |81.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/aggregator/tx_init.cpp |81.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/tx_init.cpp |81.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/aggregator/tx_schedule_traversal.cpp |81.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/tx_schedule_traversal.cpp |81.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/backpressure/event.cpp |81.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/backpressure/event.cpp |81.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hullds_idxsnap.cpp |81.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hullds_idxsnap.cpp |81.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/long_tx_service/commit_impl.cpp |81.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/long_tx_service/commit_impl.cpp |81.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/long_tx_service/libcore-tx-long_tx_service.a |81.8%| [AR] {RESULT} $(B)/ydb/core/tx/long_tx_service/libcore-tx-long_tx_service.a |81.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/long_tx_service/libcore-tx-long_tx_service.a |81.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_dep_tracker.cpp |81.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_dep_tracker.cpp |81.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/query_stats/query_stats.cpp |81.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/query_stats/query_stats.cpp |81.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/aggregator/tx_response_tablet_distribution.cpp |81.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hullds_sstvec.cpp |81.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/tx_response_tablet_distribution.cpp |81.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hullds_sstvec.cpp |81.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__store_scan_state.cpp |81.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/pg_tables/pg_tables.cpp |81.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/writer/metadata_initializers.cpp |81.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__store_scan_state.cpp |81.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/pg_tables/libcore-sys_view-pg_tables.a |81.9%| [AR] {RESULT} $(B)/ydb/core/sys_view/pg_tables/libcore-sys_view-pg_tables.a |81.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/writer/metadata_initializers.cpp |81.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/pg_tables/pg_tables.cpp |81.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/sys_view/pg_tables/libcore-sys_view-pg_tables.a |81.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/partition_stats/top_partitions.cpp |81.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/columnshard_schema.cpp |81.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/partition_stats/top_partitions.cpp |81.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/partition_stats/libcore-sys_view-partition_stats.a |81.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/columnshard_schema.cpp |81.9%| [AR] {RESULT} $(B)/ydb/core/sys_view/partition_stats/libcore-sys_view-partition_stats.a |81.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/sys_view/partition_stats/libcore-sys_view-partition_stats.a |81.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_s3_uploads.cpp |81.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_s3_uploads.cpp |81.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_hive_create_tablet.cpp |81.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_hive_create_tablet.cpp |81.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hullds_sst.cpp |81.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hullds_sst.cpp |81.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/query_stats/query_metrics.cpp |81.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/query_stats/query_metrics.cpp |81.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/query_stats/libcore-sys_view-query_stats.a |81.9%| [AR] {RESULT} $(B)/ydb/core/sys_view/query_stats/libcore-sys_view-query_stats.a |81.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/sys_view/query_stats/libcore-sys_view-query_stats.a |81.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/normalizer/portion/restore_v1_chunks.cpp |81.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/portion/restore_v1_chunks.cpp |81.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/sentinel.cpp |81.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/aggregator/tx_schemeshard_stats.cpp |81.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/sentinel.cpp |81.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/tx_schemeshard_stats.cpp |81.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/cms/libydb-core-cms.a |81.9%| [AR] {RESULT} $(B)/ydb/core/cms/libydb-core-cms.a |81.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/cms/libydb-core-cms.a |81.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_common_upload.cpp |81.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_common_upload.cpp |81.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/scan.cpp |81.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_nodemonactor.cpp |81.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_nodemonactor.cpp |81.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/libydb-core-sys_view.a |81.9%| [AR] {RESULT} $(B)/ydb/core/sys_view/libydb-core-sys_view.a |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/scan.cpp |82.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/sys_view/libydb-core-sys_view.a |82.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/normalizer/abstract/abstract.cpp |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/abstract/abstract.cpp |82.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/normalizer/abstract/libcolumnshard-normalizer-abstract.a |82.0%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/normalizer/abstract/libcolumnshard-normalizer-abstract.a |82.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/normalizer/abstract/libcolumnshard-normalizer-abstract.a |82.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/grpc_proxy_status.cpp |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/grpc_proxy_status.cpp |82.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/aggregator/tx_configure.cpp |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/tx_configure.cpp |82.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/defrag/defrag_rewriter.cpp |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/defrag/defrag_rewriter.cpp |82.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_outreadset.cpp |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_outreadset.cpp |82.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/aggregator/tx_ack_timeout.cpp |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/tx_ack_timeout.cpp |82.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__readset.cpp |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__readset.cpp |82.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/group_sessions.cpp |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/group_sessions.cpp |82.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hulldb_bulksstmngr.cpp |82.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_direct_upload.cpp |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hulldb_bulksstmngr.cpp |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_direct_upload.cpp |82.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/aggregator/tx_analyze_table_request.cpp |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/tx_analyze_table_request.cpp |82.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/actors/scheme.cpp |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/actors/scheme.cpp |82.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/writer/partition_chooser_impl.cpp |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/writer/partition_chooser_impl.cpp |82.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__snapshot_txs.cpp |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__snapshot_txs.cpp |82.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/plain_read_data.cpp |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/plain_read_data.cpp |82.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__read_iterator.cpp |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__read_iterator.cpp |82.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/aggregator/tx_analyze_deadline.cpp |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/tx_analyze_deadline.cpp |82.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/export/actor/write.cpp |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/export/actor/write.cpp |82.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/aggregator/tx_navigate.cpp |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/tx_navigate.cpp |82.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/utils/metadata_helpers.cpp |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/utils/metadata_helpers.cpp |82.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hullds_sstslice.cpp |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hullds_sstslice.cpp |82.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_locks_db.cpp |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_locks_db.cpp |82.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/subscriber.cpp |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/subscriber.cpp |82.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/writer/source_id_encoding.cpp |82.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server.cpp |82.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/writer/source_id_encoding.cpp |82.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server.cpp |82.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/persqueue/writer/libcore-persqueue-writer.a |82.1%| [AR] {RESULT} $(B)/ydb/core/persqueue/writer/libcore-persqueue-writer.a |82.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/persqueue/writer/libcore-persqueue-writer.a |82.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/actors/analyze_actor.cpp |82.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/defrag/defrag_actor.cpp |82.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/actors/analyze_actor.cpp |82.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/defrag/defrag_actor.cpp |82.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/gateway/actors/libkqp-gateway-actors.a |82.1%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/actors/libkqp-gateway-actors.a |82.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/gateway/actors/libkqp-gateway-actors.a |82.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/service/sysview_service.cpp |82.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/service/sysview_service.cpp |82.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/blob_cache.cpp |82.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/blob_cache.cpp |82.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_distributed_erase.cpp |82.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_distributed_erase.cpp |82.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_active_transaction.cpp |82.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_active_transaction.cpp |82.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/behaviour/view/manager.cpp |82.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/behaviour/view/manager.cpp |82.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/gateway/behaviour/view/libgateway-behaviour-view.a |82.1%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/behaviour/view/libgateway-behaviour-view.a |82.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/gateway/behaviour/view/libgateway-behaviour-view.a |82.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_s3_upload_rows.cpp |82.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_s3_upload_rows.cpp |82.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/export/actor/export_actor.cpp |82.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/export/actor/export_actor.cpp |82.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/export/actor/libcolumnshard-export-actor.a |82.1%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/export/actor/libcolumnshard-export-actor.a |82.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/export/actor/libcolumnshard-export-actor.a |82.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/aggregator/tx_aggr_stat_response.cpp |82.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/tx_aggr_stat_response.cpp |82.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/processor/tx_collect.cpp |82.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hullds_idx.cpp |82.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/processor/tx_collect.cpp |82.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hullds_idx.cpp |82.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/aggregator/tx_init_schema.cpp |82.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/hulldb/generic/libvdisk-hulldb-generic.a |82.1%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/generic/libvdisk-hulldb-generic.a |82.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/tx_init_schema.cpp |82.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/generic/libvdisk-hulldb-generic.a |82.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/compile_service/kqp_compile_computation_pattern_service.cpp |82.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/compile_service/kqp_compile_computation_pattern_service.cpp |82.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/aggregator/tx_analyze_table_response.cpp |82.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/aggregator/tx_analyze_table_delivery_problem.cpp |82.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/tx_analyze_table_response.cpp |82.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/tx_analyze_table_delivery_problem.cpp |82.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/aggregator/tx_resolve.cpp |82.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__cleanup_tx.cpp |82.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/tx_resolve.cpp |82.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/minikql_compile/yql_expr_minikql.cpp |82.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__cleanup_tx.cpp |82.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/minikql_compile/yql_expr_minikql.cpp |82.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/client/minikql_compile/libcore-client-minikql_compile.a |82.2%| [AR] {RESULT} $(B)/ydb/core/client/minikql_compile/libcore-client-minikql_compile.a |82.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/client/minikql_compile/libcore-client-minikql_compile.a |82.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/normalizer/portion/special_cleaner.cpp |82.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/normalizer/portion/special_cleaner.cpp |82.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/normalizer/portion/libcolumnshard-normalizer-portion.global.a |82.2%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/normalizer/portion/libcolumnshard-normalizer-portion.global.a |82.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/normalizer/portion/libcolumnshard-normalizer-portion.global.a |82.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/processor/db_counters.cpp |82.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/processor/db_counters.cpp |82.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/build_kqp_data_tx_out_rs_unit.cpp |82.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/database/database.cpp |82.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/build_kqp_data_tx_out_rs_unit.cpp |82.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/database/database.cpp |82.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/statistics/database/libcore-statistics-database.a |82.2%| [AR] {RESULT} $(B)/ydb/core/statistics/database/libcore-statistics-database.a |82.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/statistics/database/libcore-statistics-database.a |82.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/drop_cdc_stream_unit.cpp |82.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/drop_cdc_stream_unit.cpp |82.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/processor/tx_top_partitions.cpp |82.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/processor/tx_top_partitions.cpp |82.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__kqp_scan.cpp |82.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__kqp_scan.cpp |82.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/processor/processor.cpp |82.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/processor/processor.cpp |82.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/executer_actor/kqp_literal_executer.cpp |82.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/executer_actor/kqp_literal_executer.cpp |82.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/aggregator/aggregator_impl.cpp |82.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/aggregator_impl.cpp |82.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/processor/tx_aggregate.cpp |82.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/processor/tx_aggregate.cpp |82.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/service/ext_counters.cpp |82.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/user_info.cpp |82.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/service/ext_counters.cpp |82.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/user_info.cpp |82.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/service/libcore-sys_view-service.a |82.2%| [AR] {RESULT} $(B)/ydb/core/sys_view/service/libcore-sys_view-service.a |82.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/sys_view/service/libcore-sys_view-service.a |82.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/sourceid.cpp |82.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/sourceid.cpp |82.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_change_receiving.cpp |82.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_change_receiving.cpp |82.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/read_quoter.cpp |82.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/read_quoter.cpp |82.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/aggregator/tx_analyze.cpp |82.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/tx_analyze.cpp |82.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_pq_metacache.cpp |82.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_pq_metacache.cpp |82.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/client/server/libcore-client-server.a |82.2%| [AR] {RESULT} $(B)/ydb/core/client/server/libcore-client-server.a |82.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/client/server/libcore-client-server.a |82.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/aggregator/aggregator.cpp |82.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/aggregator.cpp |82.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/processor/tx_configure.cpp |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/processor/tx_configure.cpp |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/control_plane_proxy/control_plane_proxy.cpp |82.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/partition_write.cpp |82.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/control_plane_proxy/libfq-libs-control_plane_proxy.a |82.3%| [AR] {RESULT} $(B)/ydb/core/fq/libs/control_plane_proxy/libfq-libs-control_plane_proxy.a |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/partition_write.cpp |82.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/fq/libs/control_plane_proxy/libfq-libs-control_plane_proxy.a |82.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/statistics/aggregator/libcore-statistics-aggregator.a |82.3%| [AR] {RESULT} $(B)/ydb/core/statistics/aggregator/libcore-statistics-aggregator.a |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/control_plane_proxy/control_plane_proxy.cpp |82.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/statistics/aggregator/libcore-statistics-aggregator.a |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/compile_service/kqp_compile_service.cpp |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/compile_service/kqp_compile_service.cpp |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/counters/kqp_counters.cpp |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/counters/kqp_counters.cpp |82.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/counters/libcore-kqp-counters.a |82.3%| [AR] {RESULT} $(B)/ydb/core/kqp/counters/libcore-kqp-counters.a |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/executer_actor/kqp_executer_impl.cpp |82.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/counters/libcore-kqp-counters.a |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/executer_actor/kqp_executer_impl.cpp |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/processor/tx_interval_metrics.cpp |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/processor/tx_interval_metrics.cpp |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/processor/tx_init_schema.cpp |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/processor/tx_init_schema.cpp |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/processor/tx_interval_summary.cpp |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/processor/tx_interval_summary.cpp |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/pq.cpp |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/pq.cpp |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_cluster_discovery/cluster_discovery_service.cpp |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_cluster_discovery/cluster_discovery_service.cpp |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/write_quoter.cpp |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/write_quoter.cpp |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/partition_sourcemanager.cpp |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/partition_sourcemanager.cpp |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/processor/processor_impl.cpp |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/processor/processor_impl.cpp |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/event_helpers.cpp |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/event_helpers.cpp |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/read_balancer_app.cpp |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/read_balancer_app.cpp |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/pq_impl_app.cpp |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/pq_impl_app.cpp |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/utils/scheme_helpers.cpp |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/utils/scheme_helpers.cpp |82.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/gateway/utils/libkqp-gateway-utils.a |82.3%| [AR] {RESULT} $(B)/ydb/core/kqp/gateway/utils/libkqp-gateway-utils.a |82.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/gateway/utils/libkqp-gateway-utils.a |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/partition_init.cpp |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/partition_init.cpp |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/partition_read.cpp |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/partition_read.cpp |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/transaction.cpp |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__op_rows.cpp |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/transaction.cpp |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__op_rows.cpp |82.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/quoter/kesus_quoter_proxy.cpp |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/quoter/kesus_quoter_proxy.cpp |82.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/ownerinfo.cpp |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ownerinfo.cpp |82.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/read_balancer__balancing_app.cpp |82.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/list_all_topics_actor.cpp |82.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/compile_service/kqp_compile_actor.cpp |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/read_balancer__balancing_app.cpp |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/list_all_topics_actor.cpp |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/compile_service/kqp_compile_actor.cpp |82.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/partition_scale_request.cpp |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/partition_scale_request.cpp |82.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/compile_service/libcore-kqp-compile_service.a |82.4%| [AR] {RESULT} $(B)/ydb/core/kqp/compile_service/libcore-kqp-compile_service.a |82.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/compile_service/libcore-kqp-compile_service.a |82.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/executer_actor/kqp_partitioned_executer.cpp |82.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_cluster_discovery/grpc_service.cpp |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/executer_actor/kqp_partitioned_executer.cpp |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_cluster_discovery/grpc_service.cpp |82.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/persqueue_cluster_discovery/libydb-services-persqueue_cluster_discovery.a |82.4%| [AR] {RESULT} $(B)/ydb/services/persqueue_cluster_discovery/libydb-services-persqueue_cluster_discovery.a |82.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/persqueue_cluster_discovery/libydb-services-persqueue_cluster_discovery.a |82.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/partition_monitoring.cpp |82.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/fetch_request_actor.cpp |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/partition_monitoring.cpp |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/fetch_request_actor.cpp |82.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/executer_actor/kqp_partition_helper.cpp |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/executer_actor/kqp_partition_helper.cpp |82.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/executer_actor/kqp_scheme_executer.cpp |82.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/read_balancer__balancing.cpp |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/read_balancer__balancing.cpp |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/executer_actor/kqp_scheme_executer.cpp |82.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__update_tablet_status.cpp |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__update_tablet_status.cpp |82.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/pq_l2_cache.cpp |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/pq_l2_cache.cpp |82.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__response_tablet_seq.cpp |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__response_tablet_seq.cpp |82.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__update_tablets_object.cpp |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__update_tablets_object.cpp |82.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__unlock_tablet.cpp |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__unlock_tablet.cpp |82.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/partition_blob_encoder.cpp |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/partition_blob_encoder.cpp |82.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/executer_actor/kqp_executer_stats.cpp |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/executer_actor/kqp_executer_stats.cpp |82.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__restart_tablet.cpp |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__restart_tablet.cpp |82.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/mirrorer.cpp |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/mirrorer.cpp |82.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/executer_actor/kqp_tasks_validate.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/executer_actor/kqp_tasks_validate.cpp |82.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__register_node.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__register_node.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__start_tablet.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__start_tablet.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/executer_actor/kqp_table_resolver.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/executer_actor/kqp_table_resolver.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__disconnect_node.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/executer_actor/kqp_planner.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__disconnect_node.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/executer_actor/kqp_planner.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__process_boot_queue.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__process_boot_queue.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/partition_scale_manager.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/partition_scale_manager.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/cluster_tracker.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/console_interaction.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/cluster_tracker.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/console_interaction.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__release_tablets_reply.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__release_tablets_reply.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/propose_group_key.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/propose_group_key.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/grouper.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/grouper.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__switch_drain.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__switch_drain.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__seize_tablets_reply.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__seize_tablets_reply.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__stop_tablet.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__stop_tablet.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/self_heal.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/self_heal.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__request_tablet_seq.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__request_tablet_seq.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__kill_node.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__kill_node.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__reassign_groups_on_decommit.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__reassign_groups_on_decommit.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/init_scheme.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/init_scheme.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/executer_actor/kqp_tasks_graph.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/executer_actor/kqp_tasks_graph.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/quoter/quoter_service.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/quoter/quoter_service.cpp |82.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/quoter/libydb-core-quoter.a |82.5%| [AR] {RESULT} $(B)/ydb/core/quoter/libydb-core-quoter.a |82.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/quoter/libydb-core-quoter.a |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/partition.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/partition.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__seize_tablets.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__seize_tablets.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/select_groups.cpp |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/select_groups.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__reassign_groups.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__reassign_groups.cpp |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__status.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__status.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/sys_view.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/sys_view.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/account_read_quoter.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/account_read_quoter.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__resume_tablet.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__resume_tablet.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/group_mapper.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/group_mapper.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/request_controller_info.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/request_controller_info.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__lock_tablet.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__lock_tablet.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/update_seen_operational.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/update_seen_operational.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__sync_tablets.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__sync_tablets.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/stat_processor.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/stat_processor.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/scrub.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/scrub.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__process_pending_operations.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__process_pending_operations.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__configure_scale_recommender.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__configure_scale_recommender.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__delete_tablet_result.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__release_tablets.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__delete_tablet_result.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__release_tablets.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__block_storage_result.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__block_storage_result.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/util/failure_injection.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/util/failure_injection.cpp |82.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/util/libydb-core-util.a |82.6%| [AR] {RESULT} $(B)/ydb/core/util/libydb-core-util.a |82.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/util/libydb-core-util.a |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/executer_actor/kqp_scan_executer.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/executer_actor/kqp_scan_executer.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/syncer/guid_proxyobtain.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/disk_metrics.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/update_last_seen_ready.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/disk_metrics.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/guid_proxyobtain.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/update_last_seen_ready.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/group_metrics_exchange.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/group_metrics_exchange.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/node_report.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/node_report.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/pq_impl.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/pq_impl.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/backup/impl/local_partition_reader.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/backup/impl/local_partition_reader.cpp |82.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/backup/impl/libcore-backup-impl.a |82.6%| [AR] {RESULT} $(B)/ydb/core/backup/impl/libcore-backup-impl.a |82.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/backup/impl/libcore-backup-impl.a |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/hive_log.cpp |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/hive_log.cpp |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__create_tablet.cpp |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__create_tablet.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/layout_helpers.cpp |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/config_fit_pdisks.cpp |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/config_fit_groups.cpp |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/layout_helpers.cpp |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/config_fit_pdisks.cpp |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/config_fit_groups.cpp |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/config_cmd.cpp |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/config_cmd.cpp |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_put.cpp |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_put.cpp |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/abstract/initialization.cpp |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/abstract/initialization.cpp |82.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/metadata/abstract/libservices-metadata-abstract.a |82.7%| [AR] {RESULT} $(B)/ydb/services/metadata/abstract/libservices-metadata-abstract.a |82.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/metadata/abstract/libservices-metadata-abstract.a |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/base/board_publish.cpp |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/base/board_publish.cpp |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/monitoring.cpp |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/syncer/syncer_job_actor.cpp |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_committer.cpp |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/syncer_job_actor.cpp |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/iterator.cpp |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_committer.cpp |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/monitoring.cpp |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/iterator.cpp |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/bsc.cpp |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/bsc.cpp |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/get_group.cpp |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/group_layout_checker.cpp |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/get_group.cpp |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/migrate.cpp |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/group_layout_checker.cpp |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/migrate.cpp |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/defrag/defrag_quantum.cpp |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/virtual_group.cpp |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/defrag/defrag_quantum.cpp |82.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/defrag/libblobstorage-vdisk-defrag.a |82.7%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/defrag/libblobstorage-vdisk-defrag.a |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/virtual_group.cpp |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/drop_donor.cpp |82.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/defrag/libblobstorage-vdisk-defrag.a |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/drop_donor.cpp |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/change_sender_cdc_stream.cpp |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/change_sender_cdc_stream.cpp |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/commit_config.cpp |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_mon.cpp |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/commit_config.cpp |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_mon.cpp |82.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/pdisk/libcore-blobstorage-pdisk.a |82.7%| [AR] {RESULT} $(B)/ydb/core/blobstorage/pdisk/libcore-blobstorage-pdisk.a |82.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/pdisk/libcore-blobstorage-pdisk.a |82.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/syncer/guid_propagator.cpp |82.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/guid_propagator.cpp |82.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/cmds_box.cpp |82.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/cmds_box.cpp |82.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/cmds_host_config.cpp |82.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/cmds_host_config.cpp |82.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/export/session/storage/s3/storage.cpp |82.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/export/session/storage/s3/storage.cpp |82.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/export/session/storage/s3/libsession-storage-s3.global.a |82.8%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/export/session/storage/s3/libsession-storage-s3.global.a |82.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/export/session/storage/s3/libsession-storage-s3.global.a |82.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_syncfull.cpp |82.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/register_node.cpp |82.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_syncfull.cpp |82.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/register_node.cpp |82.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__migrate_schemeshard.cpp |82.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__migrate_schemeshard.cpp |82.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_request_reporting.cpp |82.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/dsproxy_request_reporting.cpp |82.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/dsproxy/libcore-blobstorage-dsproxy.a |82.8%| [AR] {RESULT} $(B)/ydb/core/blobstorage/dsproxy/libcore-blobstorage-dsproxy.a |82.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/ds_table/accessor_snapshot_base.cpp |82.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/executer_actor/kqp_data_executer.cpp |82.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/ds_table/accessor_snapshot_base.cpp |82.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/dsproxy/libcore-blobstorage-dsproxy.a |82.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/executer_actor/kqp_data_executer.cpp |82.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_recoverlostdata.cpp |82.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_recoverlostdata.cpp |82.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__write.cpp |82.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__write.cpp |82.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/scheme_board/monitoring.cpp |82.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/scheme_board/monitoring.cpp |82.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/scheme_board/libcore-tx-scheme_board.a |82.8%| [AR] {RESULT} $(B)/ydb/core/tx/scheme_board/libcore-tx-scheme_board.a |82.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/scheme_board/libcore-tx-scheme_board.a |82.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__s3_upload_txs.cpp |82.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__s3_upload_txs.cpp |82.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/check_snapshot_tx_unit.cpp |82.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/check_snapshot_tx_unit.cpp |82.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/syncer/syncer_job_task.cpp |82.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/syncer_job_task.cpp |82.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_direct_transaction.cpp |82.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_direct_transaction.cpp |82.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__s3_download_txs.cpp |82.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__s3_download_txs.cpp |82.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_change_sending.cpp |82.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_change_sending.cpp |82.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_scheduler.cpp |82.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_scheduler.cpp |82.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_backup_backup_collection.cpp |82.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_backup_backup_collection.cpp |82.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_compactionstate.cpp |82.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_compactionstate.cpp |82.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/backpressure/queue.cpp |82.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/backpressure/queue.cpp |82.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/syncer/guid_proxywrite.cpp |82.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/backpressure/libcore-blobstorage-backpressure.a |82.9%| [AR] {RESULT} $(B)/ydb/core/blobstorage/backpressure/libcore-blobstorage-backpressure.a |82.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/guid_proxywrite.cpp |82.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/backpressure/libcore-blobstorage-backpressure.a |82.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__column_stats.cpp |82.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__column_stats.cpp |82.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/config.cpp |82.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/config.cpp |82.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sync_points/result.cpp |82.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sync_points/result.cpp |82.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sync_points/libsimple_reader-iterator-sync_points.a |82.9%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sync_points/libsimple_reader-iterator-sync_points.a |82.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sync_points/libsimple_reader-iterator-sync_points.a |82.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/create_persistent_snapshot_unit.cpp |82.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/create_persistent_snapshot_unit.cpp |82.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__stats.cpp |82.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_syncfullhandler.cpp |82.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__stats.cpp |82.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_syncfullhandler.cpp |82.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/load_everything.cpp |82.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/load_everything.cpp |82.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/base/board_lookup.cpp |82.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/base/board_lookup.cpp |82.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__cleanup_in_rs.cpp |82.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/base/libydb-core-base.a |82.9%| [AR] {RESULT} $(B)/ydb/core/base/libydb-core-base.a |82.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__cleanup_in_rs.cpp |82.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__plan_step.cpp |82.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/mind/bscontroller/libcore-mind-bscontroller.a |82.9%| [AR] {RESULT} $(B)/ydb/core/mind/bscontroller/libcore-mind-bscontroller.a |82.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/base/libydb-core-base.a |82.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__plan_step.cpp |82.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/read_balancer.cpp |82.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/read_balancer.cpp |82.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/mind/bscontroller/libcore-mind-bscontroller.a |82.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/processor/tx_init.cpp |82.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/processor/tx_init.cpp |82.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/processor/libcore-sys_view-processor.a |82.9%| [AR] {RESULT} $(B)/ydb/core/sys_view/processor/libcore-sys_view-processor.a |82.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/sys_view/processor/libcore-sys_view-processor.a |82.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_data.cpp |82.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_data.cpp |82.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__propose_tx_base.cpp |82.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__propose_tx_base.cpp |82.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_block_and_get.cpp |82.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_recoverlostdata_proxy.cpp |82.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_block_and_get.cpp |82.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_recoverlostdata_proxy.cpp |82.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/localrecovery/localrecovery_public.cpp |82.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/localrecovery/localrecovery_public.cpp |82.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/localrecovery/libblobstorage-vdisk-localrecovery.a |82.9%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/localrecovery/libblobstorage-vdisk-localrecovery.a |83.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/localrecovery/libblobstorage-vdisk-localrecovery.a |82.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/syncer/guid_recovery.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/guid_recovery.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/complete_write_unit.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/complete_write_unit.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__read_columns.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__read_columns.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__init.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__init.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_mon_dbmainpage.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_mon_dbmainpage.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/build_distributed_erase_tx_out_rs_unit.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/build_distributed_erase_tx_out_rs_unit.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__object_storage_listing.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__object_storage_listing.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/common/blobstorage_vdisk_guids.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/common/blobstorage_vdisk_guids.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/ds_table/behaviour_registrator_actor.cpp |83.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/common/libblobstorage-vdisk-common.a |83.0%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/common/libblobstorage-vdisk-common.a |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/ds_table/behaviour_registrator_actor.cpp |83.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/common/libblobstorage-vdisk-common.a |83.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/metadata/ds_table/libservices-metadata-ds_table.a |83.0%| [AR] {RESULT} $(B)/ydb/services/metadata/ds_table/libservices-metadata-ds_table.a |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/build_and_wait_dependencies_unit.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/build_and_wait_dependencies_unit.cpp |83.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/metadata/ds_table/libservices-metadata-ds_table.a |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_oos_tracker.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_oos_tracker.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__progress_tx.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__progress_tx.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__cleanup_uncommitted.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__cleanup_uncommitted.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/cdc_stream_heartbeat.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/cdc_stream_heartbeat.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/check_read_unit.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/check_read_unit.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/chunks/chunks.cpp |83.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/chunks/libreader-sys_view-chunks.global.a |83.0%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/chunks/libreader-sys_view-chunks.global.a |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/sys_view/chunks/chunks.cpp |83.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/chunks/libreader-sys_view-chunks.global.a |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/completed_operations_unit.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/completed_operations_unit.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__schema_changed.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__schema_changed.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/drop_volatile_snapshot_unit.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/drop_volatile_snapshot_unit.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_db.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_db.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/check_scheme_tx_unit.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/check_scheme_tx_unit.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/check_data_tx_unit.cpp |83.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/check_data_tx_unit.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/conflicts_cache.cpp |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/conflicts_cache.cpp |83.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__compact_borrowed.cpp |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__compact_borrowed.cpp |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/fetched_data.cpp |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/change_exchange_split.cpp |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/fetched_data.cpp |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/manager/object.cpp |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/change_exchange_split.cpp |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/manager/object.cpp |83.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/libreader-common_reader-iterator.a |83.1%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/libreader-common_reader-iterator.a |83.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/libreader-common_reader-iterator.a |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_pdisk.cpp |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_pdisk.cpp |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_replmonhandler.cpp |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_replmonhandler.cpp |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__monitoring.cpp |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__monitoring.cpp |83.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/repl/libblobstorage-vdisk-repl.a |83.1%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/repl/libblobstorage-vdisk-repl.a |83.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/repl/libblobstorage-vdisk-repl.a |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/prepare_write_tx_in_rs_unit.cpp |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/prepare_write_tx_in_rs_unit.cpp |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/manager/alter.cpp |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/manager/alter.cpp |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/change_sender_incr_restore.cpp |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/change_sender_incr_restore.cpp |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/check_commit_writes_tx_unit.cpp |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/check_commit_writes_tx_unit.cpp |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/prepare_data_tx_in_rs_unit.cpp |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/prepare_data_tx_in_rs_unit.cpp |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/constructor/constructor.cpp |83.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/constructor/liboptimizer-lbuckets-constructor.global.a |83.1%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/constructor/liboptimizer-lbuckets-constructor.global.a |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/constructor/constructor.cpp |83.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/constructor/liboptimizer-lbuckets-constructor.global.a |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/complete_data_tx_unit.cpp |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/complete_data_tx_unit.cpp |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/syncer/guid_firstrun.cpp |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/guid_firstrun.cpp |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/proxy_service/kqp_proxy_service.cpp |83.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/syncer/libblobstorage-vdisk-syncer.a |83.1%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/syncer/libblobstorage-vdisk-syncer.a |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/proxy_service/kqp_proxy_service.cpp |83.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/syncer/libblobstorage-vdisk-syncer.a |83.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/proxy_service/libcore-kqp-proxy_service.a |83.1%| [AR] {RESULT} $(B)/ydb/core/kqp/proxy_service/libcore-kqp-proxy_service.a |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/volatile_tx.cpp |83.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/proxy_service/libcore-kqp-proxy_service.a |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/volatile_tx.cpp |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_shred.cpp |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_shred.cpp |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ext_index/service/add_index.cpp |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ext_index/service/add_index.cpp |83.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/ext_index/service/libservices-ext_index-service.a |83.1%| [AR] {RESULT} $(B)/ydb/services/ext_index/service/libservices-ext_index-service.a |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/manager/alter_impl.cpp |83.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/ext_index/service/libservices-ext_index-service.a |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/manager/alter_impl.cpp |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__mon_reset_schema_version.cpp |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__mon_reset_schema_version.cpp |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/manager/modification.cpp |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/manager/modification.cpp |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/execution_unit.cpp |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/execution_unit.cpp |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/iterator.cpp |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/iterator.cpp |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_sst.cpp |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/scrub/scrub_actor_sst.cpp |83.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/libreader-simple_reader-iterator.a |83.2%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/libreader-simple_reader-iterator.a |83.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/libreader-simple_reader-iterator.a |83.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/scrub/libblobstorage-vdisk-scrub.a |83.2%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/scrub/libblobstorage-vdisk-scrub.a |83.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/scrub/libblobstorage-vdisk-scrub.a |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/receive_snapshot_unit.cpp |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/check_distributed_erase_tx_unit.cpp |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/receive_snapshot_unit.cpp |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/check_distributed_erase_tx_unit.cpp |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/query/query_statdb.cpp |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/query/query_statdb.cpp |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/create_volatile_snapshot_unit.cpp |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/create_volatile_snapshot_unit.cpp |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_oos_logic.cpp |83.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/query/libblobstorage-vdisk-query.a |83.2%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/query/libblobstorage-vdisk-query.a |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_oos_logic.cpp |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__cleanup_borrowed.cpp |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__cleanup_borrowed.cpp |83.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/query/libblobstorage-vdisk-query.a |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/create_incremental_restore_src_unit.cpp |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/create_incremental_restore_src_unit.cpp |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/scanner.cpp |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/scanner.cpp |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/store_and_send_write_out_rs_unit.cpp |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/store_and_send_write_out_rs_unit.cpp |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__compaction.cpp |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__compaction.cpp |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/create_table_unit.cpp |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/create_table_unit.cpp |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/workload_service/kqp_workload_service.cpp |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/workload_service/kqp_workload_service.cpp |83.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/workload_service/libcore-kqp-workload_service.a |83.2%| [AR] {RESULT} $(B)/ydb/core/kqp/workload_service/libcore-kqp-workload_service.a |83.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/workload_service/libcore-kqp-workload_service.a |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/read_metadata.cpp |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/read_metadata.cpp |83.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/libreader-simple_reader-constructor.a |83.2%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/libreader-simple_reader-constructor.a |83.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/libreader-simple_reader-constructor.a |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/store_write_unit.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/store_write_unit.cpp |83.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/tablet_flat_executed.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/tablet_flat_executed.cpp |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__conditional_erase_rows.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__conditional_erase_rows.cpp |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__load_everything.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__load_everything.cpp |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/protect_scheme_echoes_unit.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/protect_scheme_echoes_unit.cpp |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/change_collector.cpp |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/create_cdc_stream_unit.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/change_collector.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/create_cdc_stream_unit.cpp |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/portions/portion_info.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/portions/portion_info.cpp |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/tx__tablet_owners_reply.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__tablet_owners_reply.cpp |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/graph/shard/tx_store_metrics.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/graph/shard/tx_store_metrics.cpp |83.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/mind/hive/libcore-mind-hive.a |83.3%| [AR] {RESULT} $(B)/ydb/core/mind/hive/libcore-mind-hive.a |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/remove_locks.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/remove_locks.cpp |83.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/mind/hive/libcore-mind-hive.a |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/make_scan_snapshot_unit.cpp |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/change_collector_base.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/make_scan_snapshot_unit.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/change_collector_base.cpp |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__cancel_tx_proposal.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__cancel_tx_proposal.cpp |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/query/rpc_attach_session.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/query/rpc_attach_session.cpp |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_common/rpc_common_kqp_session.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_common/rpc_common_kqp_session.cpp |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/scheme/index_info.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/scheme/index_info.cpp |83.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/scheme/libcolumnshard-engines-scheme.a |83.3%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/scheme/libcolumnshard-engines-scheme.a |83.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/libcolumnshard-engines-scheme.a |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/move_index_unit.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/move_index_unit.cpp |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_skeleton.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/skeleton/blobstorage_skeleton.cpp |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/plain_read_data.cpp |83.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/skeleton/libblobstorage-vdisk-skeleton.a |83.3%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/skeleton/libblobstorage-vdisk-skeleton.a |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/plain_read_data.cpp |83.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/libreader-plain_reader-iterator.a |83.3%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/libreader-plain_reader-iterator.a |83.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/libreader-plain_reader-iterator.a |83.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/skeleton/libblobstorage-vdisk-skeleton.a |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/change_collector_async_index.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/change_collector_async_index.cpp |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_node_registration.cpp |83.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_node_registration.cpp |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/portions/meta.cpp |83.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/portions/meta.cpp |83.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/portions/constructor_accessor.cpp |83.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/change_sender_table_base.cpp |83.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/portions/constructor_accessor.cpp |83.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/change_sender_table_base.cpp |83.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_write_operation.cpp |83.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_write_operation.cpp |83.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard__data_cleanup.cpp |83.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/default_fetching.cpp |83.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_begin_transaction.cpp |83.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard__data_cleanup.cpp |83.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/default_fetching.cpp |83.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_begin_transaction.cpp |83.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/user_settings_reader.cpp |83.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/user_settings_reader.cpp |83.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard.cpp |83.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard.cpp |83.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/tag_queue.cpp |83.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/tag_queue.cpp |83.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yql/providers/solomon/actors/ut/ut_helpers.cpp |83.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yql/providers/solomon/actors/ut/ut_helpers.cpp |83.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/common/kqp_ut_common.cpp |83.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/common/kqp_ut_common.cpp |83.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/common/libkqp-ut-common.a |83.4%| [AR] {RESULT} $(B)/ydb/core/kqp/ut/common/libkqp-ut-common.a |83.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/ut/common/libkqp-ut-common.a |83.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/long_tx_service/long_tx_service_ut.cpp |83.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/long_tx_service/long_tx_service_ut.cpp |83.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/portions/written.cpp |83.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/portions/written.cpp |83.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/abstract.cpp |83.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/abstract.cpp |83.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/federated_query/kqp_federated_query_helpers_ut.cpp |83.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/federated_query/kqp_federated_query_helpers_ut.cpp |83.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/mvp/core/mvp_ut.cpp |83.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/mvp/core/mvp_ut.cpp |83.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_simplebs.cpp |83.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_simplebs.cpp |83.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/sentinel_ut.cpp |83.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/sentinel_ut.cpp |83.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/untag_queue.cpp |83.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/set_queue_attributes.cpp |83.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/set_queue_attributes.cpp |83.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/untag_queue.cpp |83.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/read_metadata.cpp |83.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/read_metadata.cpp |83.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/libreader-plain_reader-constructor.a |83.4%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/libreader-plain_reader-constructor.a |83.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/libreader-plain_reader-constructor.a |83.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cms_ut_common.cpp |83.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/sentinel_ut_unstable.cpp |83.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_ut_common.cpp |83.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/sentinel_ut_unstable.cpp |83.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/actor/actor.cpp |83.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/actor/libengines-reader-actor.a |83.4%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/actor/libengines-reader-actor.a |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/actor/actor.cpp |83.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/actor/libengines-reader-actor.a |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/external_sources/object_storage/inference/ut/arrow_inference_ut.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/external_sources/object_storage/inference/ut/arrow_inference_ut.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/apps/ydbd/main.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/apps/ydbd/main.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/backpressure/ut_client/backpressure_ut.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/backpressure/ut_client/backpressure_ut.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_kh_snapshots.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_kh_snapshots.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/tablet/rpc_restart_tablet_ut.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/schema.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/tablet/rpc_restart_tablet_ut.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/schema.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/row_dispatcher/ut/leader_election_ut.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/row_dispatcher/ut/leader_election_ut.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/row_dispatcher/ut/coordinator_ut.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/row_dispatcher/ut/coordinator_ut.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/common/ut_common.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/common/ut_common.cpp |83.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/common/libformat_handler-ut-common.a |83.5%| [AR] {RESULT} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/common/libformat_handler-ut-common.a |83.5%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/common/libformat_handler-ut-common.a |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/service.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/service.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/lib/node_warden_mock_state.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/lib/node_warden_mock_state.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/row_dispatcher/ut/row_dispatcher_ut.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/row_dispatcher/ut/row_dispatcher_ut.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_testshard/main.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_testshard/main.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_describe_external_table.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_describe_external_table.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/portions/column_record.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/portions/column_record.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_test_functions.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_test_functions.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/query_actor/query_actor_ut.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/query_actor/query_actor_ut.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_execute_yql_script.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_execute_yql_script.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_event_managers.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_event_managers.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/keyvalue/grpc_service_ut.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/keyvalue/grpc_service_ut.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/send_message.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/sequenceshard/ut_helpers.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/sequenceshard/ut_helpers.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/send_message.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/lib/node_warden_mock_bsc.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/lib/node_warden_mock_bsc.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_synclog.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_synclog.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_defrag.cpp |83.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_defrag.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/row_dispatcher/ut/topic_session_ut.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/row_dispatcher/ut/topic_session_ut.cpp |83.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_brokendevice.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_brokendevice.cpp |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_get_shard_locations.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_get_shard_locations.cpp |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_load.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_load.cpp |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/time_cast/time_cast_ut.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/time_cast/time_cast_ut.cpp |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/constructor.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/constructor.cpp |83.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/libreader-plain_reader-constructor.global.a |83.6%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/libreader-plain_reader-constructor.global.a |83.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/libreader-plain_reader-constructor.global.a |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_vdisk/lib/prepare.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/prepare.cpp |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_vdisk/lib/helpers.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/helpers.cpp |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/test/testhull_index.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/test/testhull_index.cpp |83.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/hulldb/test/libvdisk-hulldb-test.a |83.6%| [AR] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/test/libvdisk-hulldb-test.a |83.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/test/libvdisk-hulldb-test.a |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/tablet/rpc_execute_mkql_ut.cpp |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_gc.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/tablet/rpc_execute_mkql_ut.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_gc.cpp |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_range_ops.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_range_ops.cpp |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/tablet/rpc_change_schema_ut.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/tablet/rpc_change_schema_ut.cpp |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tests/tools/kqprun/src/actors.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/tools/kqprun/src/actors.cpp |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/build_index/ut/ut_secondary_index.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/build_index/ut/ut_secondary_index.cpp |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_outofspace.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_outofspace.cpp |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_faketablet.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_faketablet.cpp |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_many.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_many.cpp |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_compaction.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_compaction.cpp |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_column_stats.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_column_stats.cpp |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_keys.cpp |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_background_compaction.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_keys.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_background_compaction.cpp |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/build_index/ut/ut_sample_k.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/build_index/ut/ut_sample_k.cpp |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/build_index/ut/ut_reshuffle_kmeans.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/build_index/ut/ut_reshuffle_kmeans.cpp |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_vdisk/lib/vdisk_mock.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/vdisk_mock.cpp |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/portions/data_accessor.cpp |83.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/portions/data_accessor.cpp |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_repl.cpp |83.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_get_operation.cpp |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_repl.cpp |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_get_operation.cpp |83.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tests/tools/kqprun/src/kqp_runner.cpp |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/tools/kqprun/src/kqp_runner.cpp |83.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_export.cpp |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_export.cpp |83.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/build_index/ut/ut_prefix_kmeans.cpp |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/build_index/ut/ut_prefix_kmeans.cpp |83.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/graph/ut/graph_ut.cpp |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/graph/ut/graph_ut.cpp |83.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/federated_query/common/common.cpp |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/federated_query/common/common.cpp |83.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/federated_query/common/libut-federated_query-common.a |83.7%| [AR] {RESULT} $(B)/ydb/core/kqp/ut/federated_query/common/libut-federated_query-common.a |83.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/ut/federated_query/common/libut-federated_query-common.a |83.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/proxy_actor.cpp |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/proxy_actor.cpp |83.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_rs.cpp |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_rs.cpp |83.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/yql_testlib/yql_testlib.cpp |83.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/yql_testlib/libydb-core-yql_testlib.a |83.7%| [AR] {RESULT} $(B)/ydb/core/yql_testlib/libydb-core-yql_testlib.a |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/yql_testlib/yql_testlib.cpp |83.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/yql_testlib/libydb-core-yql_testlib.a |83.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/build_index/ut/ut_local_kmeans.cpp |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/build_index/ut/ut_local_kmeans.cpp |83.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_data_cleanup.cpp |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_data_cleanup.cpp |83.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/fifo_cleanup.cpp |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/fifo_cleanup.cpp |83.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tests/tools/kqprun/runlib/utils.cpp |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/tools/kqprun/runlib/utils.cpp |83.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_describe_coordination_node.cpp |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_describe_coordination_node.cpp |83.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ut_utils/test_server.cpp |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ut_utils/test_server.cpp |83.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tests/tools/kqprun/runlib/kikimr_setup.cpp |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/tools/kqprun/runlib/kikimr_setup.cpp |83.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tests/tools/kqprun/runlib/application.cpp |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/tools/kqprun/runlib/application.cpp |83.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/tests/tools/kqprun/runlib/libtools-kqprun-runlib.a |83.7%| [AR] {RESULT} $(B)/ydb/tests/tools/kqprun/runlib/libtools-kqprun-runlib.a |83.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/tests/tools/kqprun/runlib/libtools-kqprun-runlib.a |83.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_helpers/data_erasure_helpers.cpp |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_helpers/data_erasure_helpers.cpp |83.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/executor.cpp |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/executor.cpp |83.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/table_settings.cpp |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/table_settings.cpp |83.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/flat_part_loader.cpp |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/flat_part_loader.cpp |83.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/view/view_ut.cpp |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/view/view_ut.cpp |83.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/retention.cpp |83.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/retention.cpp |83.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/memory_controller/memory_controller_ut.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/memory_controller/memory_controller_ut.cpp |83.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_config.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_config.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_helpers/failing_mtpq.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_helpers/failing_mtpq.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/proxy_service.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/proxy_service.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/node_tracker.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/node_tracker.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/purge.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/purge.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/apps/etcd_proxy/service/etcd_impl.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/apps/etcd_proxy/service/etcd_impl.cpp |83.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/apps/etcd_proxy/service/libapps-etcd_proxy-service.a |83.8%| [AR] {RESULT} $(B)/ydb/apps/etcd_proxy/service/libapps-etcd_proxy-service.a |83.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/apps/etcd_proxy/service/libapps-etcd_proxy-service.a |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_modify_permissions.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_modify_permissions.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_helpers/export_reboots_common.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_helpers/export_reboots_common.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/list_permissions.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/list_permissions.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_login.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_login.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tests/tools/kqprun/src/ydb_setup.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/tools/kqprun/src/ydb_setup.cpp |83.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/tests/tools/kqprun/src/libtools-kqprun-src.a |83.8%| [AR] {RESULT} $(B)/ydb/tests/tools/kqprun/src/libtools-kqprun-src.a |83.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/tests/tools/kqprun/src/libtools-kqprun-src.a |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/list_dead_letter_source_queues.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_ping.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/list_dead_letter_source_queues.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/ut/common/pq_ut_common.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_ping.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/common/pq_ut_common.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/portions/compacted.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/portions/compacted.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_rate_limiter_api.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_rate_limiter_api.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/modify_permissions.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/modify_permissions.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_get_scale_recommendation.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_get_scale_recommendation.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/hooks/testing/ro_controller.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/hooks/testing/ro_controller.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/ut_common/ut_common.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/ut_common/ut_common.cpp |83.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/statistics/ut_common/libcore-statistics-ut_common.a |83.8%| [AR] {RESULT} $(B)/ydb/core/statistics/ut_common/libcore-statistics-ut_common.a |83.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/statistics/ut_common/libcore-statistics-ut_common.a |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/queue_leader.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/queue_leader.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/cfg.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/cfg.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_log_store.cpp |83.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_log_store.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/ut_utils/topic_sdk_test_setup.cpp |83.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/list_users.cpp |83.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/ut_utils/libtopic-ut-ut_utils.a |83.8%| [AR] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/ut_utils/libtopic-ut-ut_utils.a |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/list_users.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/ut_utils/topic_sdk_test_setup.cpp |83.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/ut_utils/libtopic-ut-ut_utils.a |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_list_operations.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_list_operations.cpp |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_rollback_transaction.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_rollback_transaction.cpp |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/queue_schema.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/queue_schema.cpp |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ut_utils/ut_utils.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ut_utils/ut_utils.cpp |83.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ut_utils/libpersqueue_public-ut-ut_utils.a |83.9%| [AR] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ut_utils/libpersqueue_public-ut-ut_utils.a |83.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ut_utils/libpersqueue_public-ut-ut_utils.a |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/list_queues.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/list_queues.cpp |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_helpers/test_env.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_helpers/test_env.cpp |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/garbage_collector.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/garbage_collector.cpp |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/receive_message.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/receive_message.cpp |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/purge_queue.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/purge_queue.cpp |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/graph/shard/tx_get_metrics.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/graph/shard/tx_get_metrics.cpp |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_forget_operation.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_forget_operation.cpp |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/list_queue_tags.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/list_queue_tags.cpp |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_helpers/helpers.cpp |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_read_table.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_read_table.cpp |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/common/conveyor_task.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_helpers/helpers.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common/conveyor_task.cpp |83.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_helpers/libtx-schemeshard-ut_helpers.a |83.9%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_helpers/libtx-schemeshard-ut_helpers.a |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/graph/shard/tx_change_backend.cpp |83.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/ut_helpers/libtx-schemeshard-ut_helpers.a |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/graph/shard/tx_change_backend.cpp |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/hooks/testing/controller.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/hooks/testing/controller.cpp |83.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/hooks/testing/libcolumnshard-hooks-testing.a |83.9%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/hooks/testing/libcolumnshard-hooks-testing.a |83.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/hooks/testing/libcolumnshard-hooks-testing.a |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/get_queue_attributes.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/get_queue_attributes.cpp |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_read_columns.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_read_columns.cpp |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_prepare_data_query.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_prepare_data_query.cpp |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/auth_multi_factory.cpp |83.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/auth_multi_factory.cpp |83.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/common/result.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common/result.cpp |83.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/common/libengines-reader-common.a |84.0%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/common/libengines-reader-common.a |84.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/common/libengines-reader-common.a |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/graph/shard/tx_monitoring.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/graph/shard/tx_monitoring.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_keep_alive.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_keep_alive.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/graph/shard/tx_init_schema.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/graph/shard/tx_init_schema.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_scheme_base.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_scheme_base.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/flat_executor_data_cleanup_logic.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/index_events_processor.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/flat_executor_data_cleanup_logic.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/index_events_processor.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_kqp_base.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_kqp_base.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_localrecovery.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_localrecovery.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/ut/common/autoscaling_ut_common.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/common/autoscaling_ut_common.cpp |84.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/persqueue/ut/common/libpersqueue-ut-common.a |84.0%| [AR] {RESULT} $(B)/ydb/core/persqueue/ut/common/libpersqueue-ut-common.a |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/graph/shard/shard_impl.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/graph/shard/shard_impl.cpp |84.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/persqueue/ut/common/libpersqueue-ut-common.a |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/flat_executor_compaction_logic.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/flat_executor_compaction_logic.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/delete_user.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/delete_user.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/create_queue.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/create_queue.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_explain_yql_script.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_explain_yql_script.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/lib/node_warden_mock_pipe.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/graph/shard/tx_startup.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/lib/node_warden_mock_pipe.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/graph/shard/tx_startup.cpp |84.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/lib/libblobstorage-ut_blobstorage-lib.a |84.0%| [AR] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/lib/libblobstorage-ut_blobstorage-lib.a |84.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/ut_blobstorage/lib/libblobstorage-ut_blobstorage-lib.a |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/flat_load_blob_queue.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/flat_load_blob_queue.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/portions/constructor_portion.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/portions/constructor_portion.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/workload_service/ut/common/kqp_workload_service_ut_common.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/actor.cpp |84.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/workload_service/ut/common/libworkload_service-ut-common.a |84.0%| [AR] {RESULT} $(B)/ydb/core/kqp/workload_service/ut/common/libworkload_service-ut-common.a |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/actor.cpp |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/workload_service/ut/common/kqp_workload_service_ut_common.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/delete_queue.cpp |84.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/workload_service/ut/common/libworkload_service-ut-common.a |84.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/delete_queue.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_load_rows.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_stream_execute_scan_query.cpp |84.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_stream_execute_scan_query.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_load_rows.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/ut_common/datashard_ut_common.cpp |84.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_common/libtx-datashard-ut_common.a |84.1%| [AR] {RESULT} $(B)/ydb/core/tx/datashard/ut_common/libtx-datashard-ut_common.a |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/ut_common/datashard_ut_common.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/create_user.cpp |84.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/datashard/ut_common/libtx-datashard-ut_common.a |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/create_user.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_replication.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_replication.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_stream_execute_yql_script.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_stream_execute_yql_script.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_huge.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_dbstat.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_huge.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_dbstat.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/count_queues.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/delete_message.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/count_queues.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/delete_message.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_bad_blobid.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/lib/test_bad_blobid.cpp |84.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_vdisk/lib/libblobstorage-ut_vdisk-lib.a |84.1%| [AR] {RESULT} $(B)/ydb/core/blobstorage/ut_vdisk/lib/libblobstorage-ut_vdisk-lib.a |84.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/ut_vdisk/lib/libblobstorage-ut_vdisk-lib.a |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/change_visibility.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/change_visibility.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_remove_directory.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/flat_bio_actor.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_remove_directory.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/flat_bio_actor.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/flat_executor_db_mon.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/flat_executor_db_mon.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_dynamic_config.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_dynamic_config.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_read_rows.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_read_rows.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/operation_helpers.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/operation_helpers.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_execute_data_query.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_execute_data_query.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_explain_data_query.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_explain_data_query.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_object_storage.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_object_storage.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/grpc_endpoint_publish_actor.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/grpc_endpoint_publish_actor.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/metering.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/flat_boot_misc.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/flat_boot_misc.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/metering.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/flat_boot_lease.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/flat_boot_lease.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_cms.cpp |84.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_cms.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_execute_scheme_query.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_execute_scheme_query.cpp |84.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/query/rpc_execute_script.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_view.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/query/rpc_execute_script.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_view.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/auth_factory.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/auth_factory.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/portions/read_with_blobs.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/portions/read_with_blobs.cpp |84.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/portions/libcolumnshard-engines-portions.a |84.2%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/portions/libcolumnshard-engines-portions.a |84.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/portions/libcolumnshard-engines-portions.a |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_drop_table.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_drop_table.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_describe_table_options.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_describe_table_options.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_commit_transaction.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_commit_transaction.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_drop_coordination_node.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_drop_coordination_node.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_rename_tables.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_rename_tables.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_make_directory.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_make_directory.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/resolve_local_db_table.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/resolve_local_db_table.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/graph/shard/tx_aggregate_data.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/graph/shard/tx_aggregate_data.cpp |84.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/graph/shard/libcore-graph-shard.a |84.2%| [AR] {RESULT} $(B)/ydb/core/graph/shard/libcore-graph-shard.a |84.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/graph/shard/libcore-graph-shard.a |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/queues_list_reader.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/queues_list_reader.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_cancel_operation.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_cancel_operation.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_import_data.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_import_data.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_kh_describe.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_kh_describe.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_import.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_import.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/ydb_over_fq/create_session.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/ydb_over_fq/create_session.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_describe_table.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_describe_table.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_create_table.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_create_table.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_copy_tables.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_copy_tables.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_alter_coordination_node.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/local_rate_limiter.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_alter_coordination_node.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/tablet_flat_executor.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/local_rate_limiter.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/tablet_flat_executor.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_create_coordination_node.cpp |84.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_create_coordination_node.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/ydb_over_fq/keep_alive.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/ydb_over_fq/keep_alive.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/ydb_over_fq/describe_table.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/ydb_over_fq/describe_table.cpp |84.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/flat_table.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/query/rpc_fetch_script_results.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/flat_table.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/query/rpc_fetch_script_results.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_describe_external_data_source.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_maintenance.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_describe_external_data_source.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_maintenance.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_discovery.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_discovery.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_keyvalue.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_keyvalue.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_alter_table.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_alter_table.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/grpc_request_proxy_simple.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/grpc_request_proxy_simple.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_backup.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/query/rpc_kqp_tx.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_backup.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_describe_path.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/query/rpc_kqp_tx.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_describe_path.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/ydb_over_fq/execute_data_query.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_monitoring.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/ydb_over_fq/execute_data_query.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_monitoring.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_copy_table.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_copy_table.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/ydb_over_fq/list_directory.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/ydb_over_fq/list_directory.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/flat_executor_bootlogic.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/flat_executor_bootlogic.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/ydb_over_fq/explain_data_query.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/ydb_over_fq/explain_data_query.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/sequenceshard/ut_sequenceshard.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/sequenceshard/ut_sequenceshard.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_hullreplwritesst_ut.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_hullreplwritesst_ut.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/sub_columns_fetching.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_vdisk/mon_reregister_ut.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/sub_columns_fetching.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/mon_reregister_ut.cpp |84.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/libreader-common_reader-iterator.global.a |84.3%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/libreader-common_reader-iterator.global.a |84.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/libreader-common_reader-iterator.global.a |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yql/providers/generic/connector/libcpp/ut_helpers/connector_client_mock.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yql/providers/generic/connector/libcpp/ut_helpers/connector_client_mock.cpp |84.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/generic/connector/libcpp/ut_helpers/libconnector-libcpp-ut_helpers.a |84.3%| [AR] {RESULT} $(B)/ydb/library/yql/providers/generic/connector/libcpp/ut_helpers/libconnector-libcpp-ut_helpers.a |84.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/library/yql/providers/generic/connector/libcpp/ut_helpers/libconnector-libcpp-ut_helpers.a |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_broker_ut.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_broker_ut.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/query/rpc_execute_query.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/query/rpc_execute_query.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_fq_internal.cpp |84.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_fq_internal.cpp |84.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/loading/stages.cpp |84.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/loading/libcolumnshard-engines-loading.a |84.3%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/loading/libcolumnshard-engines-loading.a |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/loading/stages.cpp |84.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/loading/libcolumnshard-engines-loading.a |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/ut_sequence/dsproxy_config_retrieval.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/ut_sequence/dsproxy_config_retrieval.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/base/generated/runtime_feature_flags_ut.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/base/generated/runtime_feature_flags_ut.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/get_queue_url.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/get_queue_url.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/not_sorted.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/not_sorted.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/full_scan_sorted.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/full_scan_sorted.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tools/query_replay/query_proccessor.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tools/query_replay/query_proccessor.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tools/query_replay/query_replay.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tools/query_replay/query_replay.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/rpc_fq.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/rpc_fq.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/limit_sorted.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/limit_sorted.cpp |84.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/libsimple_reader-iterator-collections.a |84.4%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/libsimple_reader-iterator-collections.a |84.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/libsimple_reader-iterator-collections.a |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tools/blobsan/main.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tools/blobsan/main.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/flat_executor_leases_ut.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/flat_executor_leases_ut.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/storage_pool_info_ut.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/storage_pool_info_ut.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_services/grpc_request_proxy.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_services/grpc_request_proxy.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cms_ut_common.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_ut_common.cpp |84.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/grpc_services/libydb-core-grpc_services.a |84.4%| [AR] {RESULT} $(B)/ydb/core/grpc_services/libydb-core-grpc_services.a |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_pdiskfit/ut/main.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_pdiskfit/ut/main.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/flat_executor.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/flat_executor.cpp |84.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tablet_flat/libydb-core-tablet_flat.a |84.4%| [AR] {RESULT} $(B)/ydb/core/tablet_flat/libydb-core-tablet_flat.a |84.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tablet_flat/libydb-core-tablet_flat.a |84.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/grpc_services/libydb-core-grpc_services.a |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_counters_ut.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_counters_ut.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/immediate_controls_configurator_ut.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/immediate_controls_configurator_ut.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tools/query_replay/main.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tools/query_replay/main.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tools/query_replay_yt/query_replay.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tools/query_replay_yt/query_replay.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/feature_flags_configurator_ut.cpp |84.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/feature_flags_configurator_ut.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/encryption.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/encryption.cpp |84.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_external_table/ut_external_table.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_external_table/ut_external_table.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_auditsettings/ut_auditsettings.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_auditsettings/ut_auditsettings.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/blobstorage_hullwritesst_ut.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/blobstorage_hullwritesst_ut.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/log_settings_configurator_ut.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/log_settings_configurator_ut.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/result_formatter/result_formatter_ut.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/result_formatter/result_formatter_ut.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/scrub_fast.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/scrub_fast.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/defrag.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/defrag.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kesus/tablet/ut_helpers.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/tablet/ut_helpers.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/sharding/ut/ut_sharding.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/sharding/ut/ut_sharding.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/control_plane_proxy/ut/control_plane_proxy_ut.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/scheme_board/cache_ut.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/control_plane_proxy/ut/control_plane_proxy_ut.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/scheme_board/cache_ut.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cms_ut.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_ut.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_user_attributes/ut_user_attributes.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_user_attributes/ut_user_attributes.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/modifications_validator_ut.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/modifications_validator_ut.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_connections_ut.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_connections_ut.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/data_integrity/kqp_data_integrity_trails_ut.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/data_integrity/kqp_data_integrity_trails_ut.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/helpers/local.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/helpers/local.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ydb/ydb_login_ut.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_login_ut.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/dynamic_config/dynamic_config_ut.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/dynamic_config/dynamic_config_ut.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/coordinator/coordinator_ut.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/coordinator/coordinator_ut.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/read_only_vdisk.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/read_only_vdisk.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/service/ut/ut_column_statistics.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/ut/ut_with_sdk/balancing_ut.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/service/ut/ut_column_statistics.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/ut_with_sdk/balancing_ut.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_replication_reboots/ut_replication_reboots.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_replication_reboots/ut_replication_reboots.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_data_ut.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_data_ut.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/proxy_service/kqp_script_executions_ut.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/proxy_service/kqp_script_executions_ut.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/effects/kqp_write_ut.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/effects/kqp_write_ut.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_ut.cpp |84.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_ut.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/closed_interval_set_ut.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/closed_interval_set_ut.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/fq/ut_integration/fq_ut.cpp |84.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_vector_index_build_reboots/ut_vector_index_build_reboots.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/fq/ut_integration/fq_ut.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_vector_index_build_reboots/ut_vector_index_build_reboots.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_queries_permissions_ut.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_queries_permissions_ut.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_upload_rows.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_upload_rows.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/ut_sequence/datashard_ut_sequence.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_quotas_ut.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_quotas_ut.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/ut_sequence/datashard_ut_sequence.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/describe_topic_ut.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/describe_topic_ut.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tools/query_replay/query_compiler.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tools/query_replay/query_compiler.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/join/kqp_index_lookup_join_ut.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/join/kqp_index_lookup_join_ut.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/scan/kqp_split_ut.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/scan/kqp_split_ut.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/ut/pq_ut.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/pq_ut.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/scan/kqp_flowcontrol_ut.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/scan/kqp_flowcontrol_ut.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_cdc_stream_reboots/ut_cdc_stream_reboots.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_cdc_stream_reboots/ut_cdc_stream_reboots.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/scheme/kqp_acl_ut.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/scheme/kqp_acl_ut.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_serverless/ut_serverless.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_serverless/ut_serverless.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/control/immediate_control_board_actor_ut.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/control/immediate_control_board_actor_ut.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/ut/ut_transaction_coordinator.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/ut/ut_transaction_coordinator.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/quoter/ut_helpers.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/quoter/ut_helpers.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_quorum_tracker_ut.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_quorum_tracker_ut.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/retry_policy_ut.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/retry_policy_ut.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/coordinator/coordinator_volatile_ut.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/coordinator/coordinator_volatile_ut.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/opt/kqp_not_null_ut.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_not_null_ut.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/tenant_ut_local.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/tenant_ut_local.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/viewer/topic_data_ut.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/viewer/topic_data_ut.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/configs_cache_ut.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/sequenceproxy/sequenceproxy_ut.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/configs_cache_ut.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/sequenceproxy/sequenceproxy_ut.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/data/kqp_read_null_ut.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/data/kqp_read_null_ut.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/viewer/viewer_ut.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/viewer/viewer_ut.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/sparsed_ut.cpp |84.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/sparsed_ut.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/secret/ut/ut_secret.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/secret/ut/ut_secret.cpp |84.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_internal_ut.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_internal_ut.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_index_build_reboots/ut_index_build_reboots.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_index_build_reboots/ut_index_build_reboots.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kesus/tablet/ut_helpers.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/tablet/ut_helpers.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_proxy/schemereq_ut.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/schemereq_ut.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/ut_fat/blobstorage_node_warden_ut_fat.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/ut_fat/blobstorage_node_warden_ut_fat.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/object_storage_listing_ut.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/object_storage_listing_ut.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/ut/list_all_topics_ut.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/list_all_topics_ut.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_request_reporting_ut.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_request_reporting_ut.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/mv_object_map_ut.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/mv_object_map_ut.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/kqp_olap_ut.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/kqp_olap_ut.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/ut/metering_ut.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/ut/metering_ut.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_user_attributes_reboots/ut_user_attributes_reboots.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_user_attributes_reboots/ut_user_attributes_reboots.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_stats.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_stats.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_external_data_source_reboots/ut_external_data_source_reboots.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_external_data_source_reboots/ut_external_data_source_reboots.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/actorlib_impl/test_protocols_ut.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/actorlib_impl/test_protocols_ut.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/ut_large.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/ut_large.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/json_ut.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/json_ut.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/aggregator/ut/ut_analyze_datashard.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/ut/ut_analyze_datashard.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/read_session_ut.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/read_session_ut.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_read_iterator_ext_blobs.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_read_iterator_ext_blobs.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/keyvalue/keyvalue_collector_ut.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/keyvalue/keyvalue_collector_ut.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/ycloud/impl/folder_service_ut.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/table_creator/table_creator_ut.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/ycloud/impl/folder_service_ut.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/table_creator/table_creator_ut.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/replication.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/replication.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_replrecoverymachine_ut.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_replrecoverymachine_ut.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/ut/pqrb_describes_ut.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/pqrb_describes_ut.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/tiering_ut.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/tiering_ut.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/hullds_generic_it_ut.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/hullds_generic_it_ut.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_get_ut.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_get_ut.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/service/kqp_service_ut.cpp |84.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/service/kqp_service_ut.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_kqp_scan.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_kqp_scan.cpp |84.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/common/cache_ut.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/common/cache_ut.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/ut/ut_program.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/ut/ut_program.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_subdomain/ut_subdomain.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/load_test/ut/group_test_ut.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/load_test/ut/group_test_ut.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_subdomain/ut_subdomain.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/common/entity_id_ut.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/common/entity_id_ut.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/load_test/ut_ycsb.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/load_test/ut_ycsb.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/balancing.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/batch_operations/kqp_batch_update_ut.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/batch_operations/kqp_batch_update_ut.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/balancing.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ydb/backup_ut/ydb_backup_ut.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/backup_ut/ydb_backup_ut.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/provider/yql_kikimr_gateway_ut.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/provider/yql_kikimr_gateway_ut.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/service/kqp_qs_scripts_ut.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/service/kqp_qs_scripts_ut.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tools/stress_tool/device_test_tool_ut.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tools/stress_tool/device_test_tool_ut.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/race.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/race.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_stats/ut_stats.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_stats/ut_stats.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/ut_ftol/dsproxy_fault_tolerance_ut.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut_ftol/dsproxy_fault_tolerance_ut.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/federated_query/generic_ut/kqp_generic_provider_ut.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/federated_query/generic_ut/kqp_generic_provider_ut.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/ut/slow/autopartitioning_ut.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/slow/autopartitioning_ut.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_allocator/txallocator_ut_helpers.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_allocator/txallocator_ut_helpers.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/backup/impl/local_partition_reader_ut.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/backup/impl/local_partition_reader_ut.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/stream_creator_ut.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/stream_creator_ut.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console_ut_configs.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console_ut_configs.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/dread_cache_service/ut/caching_proxy_ut.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/dread_cache_service/ut/caching_proxy_ut.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_read_iterator.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_bsvolume/ut_bsvolume.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_bsvolume/ut_bsvolume.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_read_iterator.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/ut/partition_writer_cache_actor_fixture.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/ut/partition_writer_cache_actor_fixture.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/graph/shard/ut/shard_ut.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/graph/shard/ut/shard_ut.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_localwriter_ut.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_localwriter_ut.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/clickbench_ut.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/clickbench_ut.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_data_erasure_reboots/ut_data_erasure_reboots.cpp |84.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_data_erasure_reboots/ut_data_erasure_reboots.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/object_distribution_ut.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/object_distribution_ut.cpp |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/yql/kqp_pragma_ut.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/yql/kqp_pragma_ut.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/read_only_pdisk.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_column_build/ut_column_build.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_column_build/ut_column_build.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/read_only_pdisk.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_change_exchange.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_change_exchange.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/apps/etcd_proxy/service/ut/etcd_service_ut.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/apps/etcd_proxy/service/ut/etcd_service_ut.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/delete_ut.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/validation.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/delete_ut.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/validation.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/ut_bscontroller/main.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/ut_bscontroller/main.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/decimal_ut.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/decimal_ut.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_cluster_discovery/cluster_discovery_service_ut.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_cluster_discovery/cluster_discovery_service_ut.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/write_ut.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/write_ut.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/actorlib_impl/actor_bootstrapped_ut.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/actorlib_impl/actor_bootstrapped_ut.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_bsvolume_reboots/ut_bsvolume_reboots.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_bsvolume_reboots/ut_bsvolume_reboots.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/federated_query/s3/s3_recipe_ut_helpers.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/federated_query/s3/s3_recipe_ut_helpers.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/yql/kqp_scripting_ut.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/yql/kqp_scripting_ut.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/ydb_proxy/ydb_proxy_ut.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ydb/ydb_table_split_ut.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_table_split_ut.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/ydb_proxy/ydb_proxy_ut.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/first_class_src_ids_ut.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/first_class_src_ids_ut.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_continuous_backup/ut_continuous_backup.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_continuous_backup/ut_continuous_backup.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/retry_policy_ut.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/retry_policy_ut.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/topic_yql_ut.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/topic_yql_ut.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/opt/kqp_ne_ut.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_ne_ut.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ydb/ydb_register_node_ut.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_register_node_ut.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/common/iceberg_processor_ut.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/common/iceberg_processor_ut.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/decommit_3dc.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/decommit_3dc.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_proxy/proxy_ut.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/proxy_ut.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/service/ut/ut_http_request.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/service/ut/ut_http_request.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/restart_pdisk.cpp |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/restart_pdisk.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/service/ut/ut_aggregation/ut_aggregate_statistics.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/service/ut/ut_aggregation/ut_aggregate_statistics.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_snapshot.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_snapshot.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_minstep.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_minstep.cpp |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/assign_tx_id_ut.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_backup_collection_reboots/ut_backup_collection_reboots.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_backup_collection_reboots/ut_backup_collection_reboots.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/assign_tx_id_ut.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/yc_search_ut/index_events_processor_ut.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/yc_search_ut/index_events_processor_ut.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/ut/partition_chooser_ut.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/partition_chooser_ut.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_extsubdomain_reboots/ut_extsubdomain_reboots.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_extsubdomain_reboots/ut_extsubdomain_reboots.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ydb/ydb_index_table_ut.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_index_table_ut.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ydb/ydb_ldap_login_ut.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_ldap_login_ut.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/sdk/cpp/src/client/federated_topic/ut/basic_usage_ut.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/federated_topic/ut/basic_usage_ut.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/ut/ut_with_sdk/autoscaling_ut.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/idx_test/ydb_index_ut.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/ut_with_sdk/autoscaling_ut.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/idx_test/ydb_index_ut.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/address_classification/net_classifier_ut.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/address_classification/net_classifier_ut.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/sync.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/sync.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/self_heal.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/self_heal.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/multiget.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/multiget.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ydb/ydb_coordination_ut.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_coordination_ut.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_read_table.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_read_table.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/ut/ut_insert_table.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/ut/ut_insert_table.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_rtmr_reboots/ut_rtmr_reboots.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_rtmr_reboots/ut_rtmr_reboots.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tiering/ut/ut_object.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tiering/ut/ut_object.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/get.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/get.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/datastreams/datastreams_ut.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/datastreams/datastreams_ut.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/keyvalue/keyvalue_ut.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/space_check.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/keyvalue/keyvalue_ut.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/space_check.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_serverless_reboots/ut_serverless_reboots.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_serverless_reboots/ut_serverless_reboots.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/config/bsconfig_ut.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/config/bsconfig_ut.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/ut/flat_test_db.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/ut/flat_test_db.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/patch.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tests/tools/kqprun/kqprun.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/patch.cpp |85.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/tools/kqprun/kqprun.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/effects/kqp_inplace_update_ut.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/effects/kqp_inplace_update_ut.cpp |85.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/donor.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/donor.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/abstract/read_metadata.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/abstract/read_metadata.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ydb/ydb_import_ut.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_import_ut.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/barriers/barriers_tree_ut.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/barriers/barriers_tree_ut.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/database/ut/ut_database.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/database/ut/ut_database.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/block_race.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/block_race.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/get_block.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/tenant_node_enumeration_ut.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/get_block.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/tenant_node_enumeration_ut.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/ut/partition_writer_cache_actor_ut.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/ut/partition_writer_cache_actor_ut.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/assimilation.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/snapshots.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/assimilation.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/snapshots.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/rm_service/kqp_rm_ut.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/rm_service/kqp_rm_ut.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/mirror3of4.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/mirror3of4.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/helpers/writer.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/helpers/writer.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/index_restore_get.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/index_restore_get.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/compression_ut.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/compression_ut.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/ut/describes_ut/describe_topic_ut.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/ut/describes_ut/describe_topic_ut.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/hive_impl_ut.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/hive_impl_ut.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/read_session_ut.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/read_session_ut.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_move_reboots/ut_move_reboots.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_move_reboots/ut_move_reboots.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ydb/ydb_bulk_upsert_ut.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_kqp_errors.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_kqp_errors.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_bulk_upsert_ut.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/aggregator/ut/ut_analyze_columnshard.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/ut/ut_analyze_columnshard.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/hullds_heap_it_ut.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/hullds_heap_it_ut.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/persqueue_common_new_schemecache_ut.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/persqueue_common_new_schemecache_ut.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_reassign.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_pq_reboots/ut_pq_reboots.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_reassign.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_pq_reboots/ut_pq_reboots.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_incremental_restore_scan.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_incremental_restore_scan.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_vdisk2/huge.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk2/huge.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tools/stress_tool/device_test_tool.cpp |85.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tools/stress_tool/device_test_tool.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_mirror3of4/main.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_mirror3of4/main.cpp |85.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_helpers.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/scheme_board/subscriber_ut.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_helpers.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/scheme_board/subscriber_ut.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_export_reboots_s3/ut_export_reboots_s3.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_export_reboots_s3/ut_export_reboots_s3.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_data_erasure/ut_data_erasure.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_data_erasure/ut_data_erasure.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet/tablet_pipe_ut.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/tablet_pipe_ut.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_sequence_reboots/ut_sequence_reboots.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_sequence_reboots/ut_sequence_reboots.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/testlib/actors/test_runtime_ut.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/testlib/actors/test_runtime_ut.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/ut/topic_service_ut.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/ut/topic_service_ut.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/indexes_ut.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_index_build/ut_index_build.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_index_build/ut_index_build.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/indexes_ut.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_ut.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/query/kqp_explain_ut.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_ut.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/aggregator/ut/ut_traverse_datashard.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/query/kqp_explain_ut.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/ut/ut_traverse_datashard.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yql/tools/dqrun/lib/dqrun_lib.cpp |85.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/tools/dqrun/lib/libtools-dqrun-lib.a |85.2%| [AR] {RESULT} $(B)/ydb/library/yql/tools/dqrun/lib/libtools-dqrun-lib.a |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/ut/ut_shared_sausagecache_actor.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yql/tools/dqrun/lib/dqrun_lib.cpp |85.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/library/yql/tools/dqrun/lib/libtools-dqrun-lib.a |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/ut/ut_shared_sausagecache_actor.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_config_ut.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_config_ut.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/persqueue_common_ut.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/persqueue_common_ut.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/join/kqp_flip_join_ut.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/join/kqp_flip_join_ut.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_vdisk/vdisk_test.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/vdisk_test.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_volatile.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_volatile.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/quoter/quoter_service_ut.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/quoter/quoter_service_ut.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/helpers/aggregation.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/helpers/aggregation.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ydb/ydb_ut.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_ut.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/cms/cms_ut.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/cms/cms_ut.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/external_sources/hive_metastore/ut/hive_metastore_fetcher_ut.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/external_sources/hive_metastore/ut/hive_metastore_fetcher_ut.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_vdisk/gen_restarts.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/gen_restarts.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/query/kqp_stats_ut.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/query/kqp_stats_ut.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_index/ut_async_index.cpp |85.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_index/ut_async_index.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_vpatch_actor_ut.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_vpatch_actor_ut.cpp |85.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_pdiskfit/pdiskfit/pdiskfit.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_pdiskfit/pdiskfit/pdiskfit.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/sysview/kqp_sys_view_ut.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/sysview/kqp_sys_view_ut.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/ut/pqtablet_mock.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/pqtablet_mock.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/sysview/kqp_sys_col_ut.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/sysview/kqp_sys_col_ut.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/extra_block_checks.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/extra_block_checks.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/tx/kqp_tx_ut.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/tenant_ut_pool.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_tx_ut.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/tenant_ut_pool.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/scheme/kqp_scheme_ut.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/tx/kqp_sink_mvcc_ut.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_sink_mvcc_ut.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/scheme/kqp_scheme_ut.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet/tablet_pipecache_ut.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/tablet_pipecache_ut.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/aggregator/ut/ut_traverse_columnshard.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/aggregator/ut/ut_traverse_columnshard.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_kqp_stream_lookup.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_kqp_stream_lookup.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_init.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_init.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/arrow/kqp_arrow_in_channels_ut.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/arrow/kqp_arrow_in_channels_ut.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/group_mapper_ut.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/group_mapper_ut.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/actors/ut/database_resolver_ut.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/actors/ut/database_resolver_ut.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_vdisk/huge_migration_ut.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/huge_migration_ut.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/abstract/constructor.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/abstract/constructor.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/blobstorage_node_warden_ut.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/blobstorage_node_warden_ut.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_multishard_ut.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/ut/counters_ut.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_multishard_ut.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/counters_ut.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/flat_executor_ut_large.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/ycloud/impl/access_service_ut.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/flat_executor_ut_large.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/ycloud/impl/access_service_ut.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/optimizer_ut.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/optimizer_ut.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/service/table_writer_ut.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/service/table_writer_ut.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_sequence_ut.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_sequence_ut.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/compress_executor_ut.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/compress_executor_ut.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/ut/sourceid_ut.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/sourceid_ut.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/flat_executor_ut.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/flat_executor_ut.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_oos_logic_ut.cpp |85.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_oos_logic_ut.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tiering/ut/ut_tiers.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tiering/ut/ut_tiers.cpp |85.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/ut_common.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/ut_common.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_background_cleaning/ut_background_cleaning.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_background_cleaning/ut_background_cleaning.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ext_index/ut/ut_ext_index.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ext_index/ut/ut_ext_index.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet/tablet_req_blockbs_ut.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/tablet_req_blockbs_ut.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/deadlines.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/deadlines.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/opt/kqp_extract_predicate_unpack_ut.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_extract_predicate_unpack_ut.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tools/query_replay_yt/main.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_erase_rows.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tools/query_replay_yt/main.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_erase_rows.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_base_reboots/ut_base_reboots.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_base_reboots/ut_base_reboots.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_external_table_reboots/ut_external_table_reboots.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_external_table_reboots/ut_external_table_reboots.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/metadata/initializer/ut/ut_init.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/metadata/initializer/ut/ut_init.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_ext_blobs_multiple_channels.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_ext_blobs_multiple_channels.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_login_large/ut_login_large.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_login_large/ut_login_large.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/ut_common.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/ut_common.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/ut_helpers.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/ut_helpers.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/ut/actors_ut.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_locks.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/ut/actors_ut.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_locks.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_put_ut.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_put_ut.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_base/ut_commit_redo_limit.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_base/ut_commit_redo_limit.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/replication_huge.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/replication_huge.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_filestore_reboots/ut_filestore_reboots.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_filestore_reboots/ut_filestore_reboots.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/ut/ut_with_sdk/commitoffset_ut.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/ut_with_sdk/commitoffset_ut.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_index_build/ut_vector_index_build.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_index_build/ut_vector_index_build.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ydb/ydb_read_rows_ut.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_vector_ut.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_read_rows_ut.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_vector_ut.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_proxy/encrypted_storage_ut.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/encrypted_storage_ut.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/ut/user_info_ut.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/user_info_ut.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/proxy_service/kqp_proxy_ut.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/proxy_service/kqp_proxy_ut.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/ut_rw/ut_normalizer.cpp |85.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/ut_rw/ut_normalizer.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/query/kqp_analyze_ut.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/query/kqp_analyze_ut.cpp |85.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_appendix_ut.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_appendix_ut.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogkeeper_ut.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogkeeper_ut.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/vdisk_malfunction.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/vdisk_malfunction.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/counting_events.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/counting_events.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_allocator_client/actor_client_ut.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_allocator_client/actor_client_ut.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/ut_helpers.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/ut_helpers.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/ut_rw/ut_columnshard_read_write.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/ut_rw/ut_columnshard_read_write.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/target_discoverer_ut.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/target_discoverer_ut.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/stop_pdisk.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/stop_pdisk.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/rate_limiter/rate_limiter_ut.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/rate_limiter/rate_limiter_ut.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kesus/proxy/proxy_actor_ut.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/proxy/proxy_actor_ut.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blob_depot/given_id_range_ut.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blob_depot/given_id_range_ut.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/opt/kqp_kv_ut.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/scheme_board/populator_ut.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_common_pq.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/scheme_board/populator_ut.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_common_pq.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_kv_ut.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kesus/tablet/tablet_ut.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/tablet/tablet_ut.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/security/ldap_auth_provider/ldap_auth_provider_ut.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/security/ldap_auth_provider/ldap_auth_provider_ut.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/configs_dispatcher_ut.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/configs_dispatcher_ut.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hullop/blobstorage_readbatch_ut.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hullop/blobstorage_readbatch_ut.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/scheme_board/monitoring_ut.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/scheme_board/monitoring_ut.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/partition_stats/partition_stats_ut.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/partition_stats/partition_stats_ut.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_backup_collection/ut_backup_collection.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_backup_collection/ut_backup_collection.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ydb/ydb_query_ut.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/blobs_sharing_ut.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_query_ut.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/blobs_sharing_ut.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/actorlib_impl/actor_activity_ut.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/actorlib_impl/actor_activity_ut.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ydb/ydb_logstore_ut.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/query/kqp_query_ut.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/cache_block/cache_block_ut.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_logstore_ut.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/cache_block/cache_block_ut.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/query/kqp_query_ut.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/health_check/health_check_ut.cpp |85.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/health_check/health_check_ut.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/quoter/kesus_quoter_ut.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/quoter/kesus_quoter_ut.cpp |85.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ydb/ydb_monitoring_ut.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_monitoring_ut.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ydb/ydb_stats_ut.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_stats_ut.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/base/board_subscriber_ut.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/base/board_subscriber_ut.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhugeheap_ctx_ut.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/keyvalue/keyvalue_ut_trace.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhugeheap_ctx_ut.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/keyvalue/keyvalue_ut_trace.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cms_tenants_ut.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_tenants_ut.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/federated_query/s3/kqp_s3_plan_ut.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/checkpointing/ut/checkpoint_coordinator_ut.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/federated_query/s3/kqp_s3_plan_ut.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/checkpointing/ut/checkpoint_coordinator_ut.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/cancel_tx_ut.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/cancel_tx_ut.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/huge.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/huge.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/service/worker_ut.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/service/worker_ut.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/ut/rate_limiter_test_setup.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/ut/rate_limiter_test_setup.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/blobstorage_hullsatisfactionrank_ut.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/blobstorage_hullsatisfactionrank_ut.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/ut/slow/pq_ut.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/slow/pq_ut.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/ycloud/impl/service_account_service_ut.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/ycloud/impl/service_account_service_ut.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/keyvalue/keyvalue_storage_read_request_ut.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/keyvalue/keyvalue_storage_read_request_ut.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/locks_ut.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/pg/pg_catalog_ut.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/locks_ut.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/pg/pg_catalog_ut.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/federated_query/s3/kqp_federated_scheme_ut.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/federated_query/s3/kqp_federated_scheme_ut.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_subdomain_reboots/ut_subdomain_reboots.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_subdomain_reboots/ut_subdomain_reboots.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/node_broker_ut.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/node_broker_ut.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_ttl/ut_ttl.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_ttl/ut_ttl.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/external_sources/s3/ut/s3_aws_credentials_ut.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/external_sources/s3/ut/s3_aws_credentials_ut.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/scrub.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/scrub.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/compression_ut.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/compression_ut.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/common/rows_proto_splitter_ut.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/common/rows_proto_splitter_ut.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_base/ut_info_types.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_base/ut_info_types.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/ut/ut_script.cpp |85.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/ut/ut_script.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/ut/pqtablet_ut.cpp |85.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/pqtablet_ut.cpp |85.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_olap/ut_olap.cpp |85.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_olap/ut_olap.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp |85.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/service/kqp_document_api_ut.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/mirror3dc.cpp |85.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/service/kqp_document_api_ut.cpp |85.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/mirror3dc.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/basic_usage_ut.cpp |85.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/basic_usage_ut.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/controller/dst_creator_ut.cpp |85.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/controller/dst_creator_ut.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_blockdevice_ut.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_change_collector.cpp |85.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_blockdevice_ut.cpp |85.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_change_collector.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_run.cpp |85.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_run.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_sequence/ut_sequence.cpp |85.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_sequence/ut_sequence.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/ut_rw/ut_backup.cpp |85.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/ut_rw/ut_backup.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_incremental_backup.cpp |85.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_incremental_backup.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ydb/ydb_bulk_upsert_olap_ut.cpp |85.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_bulk_upsert_olap_ut.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/perf/kqp_workload_ut.cpp |85.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/perf/kqp_workload_ut.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |85.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/tx/kqp_snapshot_isolation_ut.cpp |85.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_snapshot_isolation_ut.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/statistics/service/ut/ut_basic_statistics.cpp |85.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/statistics/service/ut/ut_basic_statistics.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cluster_info_ut.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhuge_ut.cpp |85.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhuge_ut.cpp |85.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cluster_info_ut.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/opt/kqp_sort_ut.cpp |85.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_sort_ut.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/actorlib_impl/actor_tracker_ut.cpp |85.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/actorlib_impl/actor_tracker_ut.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |85.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/ut/ut_rename_table_column.cpp |85.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/ut/ut_rename_table_column.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/ut_helpers.cpp |85.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/ut_helpers.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/grpc_streaming/grpc_streaming_ut.cpp |85.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/grpc_streaming/grpc_streaming_ut.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_group/main.cpp |85.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_group/main.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/gc.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/incorrect_queries.cpp |85.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/gc.cpp |85.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/incorrect_queries.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/opt/kqp_returning_ut.cpp |85.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_returning_ut.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/grouper_ut.cpp |85.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/grouper_ut.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/opt/kqp_sqlin_ut.cpp |85.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_sqlin_ut.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/ut/describes_ut/ic_cache_ut.cpp |85.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/ut/describes_ut/ic_cache_ut.cpp |85.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/opt/kqp_named_expressions_ut.cpp |85.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_named_expressions_ut.cpp |85.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_view/ut_view.cpp |85.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_view/ut_view.cpp |85.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/main.cpp |85.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/main.cpp |85.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/ut/ut_datetime.cpp |85.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_bindings_ut.cpp |85.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/ut/ut_datetime.cpp |85.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_bindings_ut.cpp |85.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/group_reconfiguration.cpp |85.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/group_reconfiguration.cpp |85.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_patch_ut.cpp |85.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_patch_ut.cpp |85.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/hullbase_barrier_ut.cpp |85.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/hullbase_barrier_ut.cpp |85.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/locks_ut.cpp |85.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/hive_ut.cpp |85.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/locks_ut.cpp |85.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/hive_ut.cpp |85.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/ut_strategy/strategy_ut.cpp |85.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut_strategy/strategy_ut.cpp |85.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/ut_schema/ut_columnshard_schema.cpp |85.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/ut_schema/ut_columnshard_schema.cpp |85.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hullds_sst_it_all_ut.cpp |85.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hullds_sst_it_all_ut.cpp |85.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_backup/ut_backup.cpp |85.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_backup/ut_backup.cpp |85.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/hive/scale_recommender_policy_ut.cpp |85.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/scale_recommender_policy_ut.cpp |85.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/ut/ut_with_sdk/mirrorer_ut.cpp |85.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/ut_with_sdk/mirrorer_ut.cpp |85.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_index/ut_unique_index.cpp |85.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_index/ut_unique_index.cpp |85.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_pdisk_error_ut.cpp |85.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_pdisk_error_ut.cpp |85.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/ut/demo_tx.cpp |85.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/ut/demo_tx.cpp |85.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_extsubdomain/ut_extsubdomain.cpp |85.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_extsubdomain/ut_extsubdomain.cpp |85.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/backpressure/queue_backpressure_server_ut.cpp |85.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/backpressure/queue_backpressure_server_ut.cpp |85.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/checkpoint_storage/ut/gc_ut.cpp |85.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/checkpoint_storage/ut/gc_ut.cpp |85.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/defrag/defrag_actor_ut.cpp |85.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/defrag/defrag_actor_ut.cpp |85.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ymq/actor/yc_search_ut/test_events_writer.cpp |85.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ymq/actor/yc_search_ut/test_events_writer.cpp |85.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/http_proxy/ut/kinesis_ut.cpp |85.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/http_proxy/ut/kinesis_ut.cpp |85.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/opt/kqp_merge_ut.cpp |85.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_merge_ut.cpp |85.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/compress_executor_ut.cpp |85.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/compress_executor_ut.cpp |85.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/query/kqp_types_ut.cpp |85.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/query/kqp_types_ut.cpp |85.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/ds_proxy_lwtrace.cpp |85.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/ds_proxy_lwtrace.cpp |85.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/ut/metarequest_ut.cpp |85.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/ut/metarequest_ut.cpp |85.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/aggregations_ut.cpp |85.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/topic_to_table_ut.cpp |85.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/aggregations_ut.cpp |85.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/topic_to_table_ut.cpp |85.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/mvp/oidc_proxy/oidc_proxy_ut.cpp |85.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/mvp/oidc_proxy/oidc_proxy_ut.cpp |85.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/cost/kqp_cost_ut.cpp |85.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/cost/kqp_cost_ut.cpp |85.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/flat_executor_database_ut.cpp |85.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/flat_executor_database_ut.cpp |85.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/ut/ut_shared_sausagecache.cpp |85.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/ut/ut_shared_sausagecache.cpp |85.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/recovery.cpp |85.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/recovery.cpp |85.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_transfer/ut_transfer.cpp |85.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_transfer/ut_transfer.cpp |85.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/effects/kqp_immediate_effects_ut.cpp |85.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/tx/kqp_mvcc_ut.cpp |85.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_mvcc_ut.cpp |85.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/effects/kqp_immediate_effects_ut.cpp |85.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_external_data_source/ut_external_data_source.cpp |85.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_external_data_source/ut_external_data_source.cpp |85.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/compstrat/hulldb_compstrat_ut.cpp |85.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/compstrat/hulldb_compstrat_ut.cpp |85.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_cdc_stream/ut_cdc_stream.cpp |85.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_cdc_stream/ut_cdc_stream.cpp |85.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_fat.cpp |85.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_fat.cpp |85.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kesus/proxy/ut_helpers.cpp |85.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/proxy/ut_helpers.cpp |85.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_split_merge/ut_split_merge.cpp |85.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/ut/pqtablet_mock.cpp |85.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/ut/pqtablet_mock.cpp |85.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_split_merge/ut_split_merge.cpp |85.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/monitoring.cpp |85.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/monitoring.cpp |85.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/helpers/typed_local.cpp |85.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/query/kqp_limits_ut.cpp |85.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/helpers/typed_local.cpp |85.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/query/kqp_limits_ut.cpp |85.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/wrappers/s3_wrapper_ut.cpp |85.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/wrappers/s3_wrapper_ut.cpp |85.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cms_ut_common.cpp |85.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_ut_common.cpp |85.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_prefixed_vector_ut.cpp |85.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_prefixed_vector_ut.cpp |85.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/backpressure/queue_backpressure_client_ut.cpp |85.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/osiris.cpp |85.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/backpressure/queue_backpressure_client_ut.cpp |85.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/osiris.cpp |85.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_event_managers.cpp |85.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_event_managers.cpp |85.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/quoter/quoter_service_bandwidth_test/main.cpp |85.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/quoter/quoter_service_bandwidth_test/main.cpp |85.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_split_merge_reboots/ut_split_merge_reboots.cpp |85.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_split_merge_reboots/ut_split_merge_reboots.cpp |85.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/cms_maintenance_api_ut.cpp |85.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/cms_maintenance_api_ut.cpp |85.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/acceleration.cpp |85.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/acceleration.cpp |85.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_allocator_client/ut_helpers.cpp |85.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_allocator_client/ut_helpers.cpp |85.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tests/tools/fqrun/src/fq_setup.cpp |85.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/tools/fqrun/src/fq_setup.cpp |85.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/tests/tools/fqrun/src/libtools-fqrun-src.a |85.9%| [AR] {RESULT} $(B)/ydb/tests/tools/fqrun/src/libtools-fqrun-src.a |86.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/tests/tools/fqrun/src/libtools-fqrun-src.a |85.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_bindings_permissions_ut.cpp |86.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_bindings_permissions_ut.cpp |86.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_segment_ut.cpp |86.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_segment_ut.cpp |86.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ydb/ydb_olapstore_ut.cpp |86.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_olapstore_ut.cpp |86.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/ncloud/impl/access_service_ut.cpp |86.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/ncloud/impl/access_service_ut.cpp |86.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/sanitize_groups.cpp |86.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/sanitize_groups.cpp |86.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/provider/yql_kikimr_provider_ut.cpp |86.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/provider/yql_kikimr_provider_ut.cpp |86.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_base/ut_base.cpp |86.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_base/ut_base.cpp |86.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/quoter/quoter_service_bandwidth_test/server.cpp |86.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/quoter/quoter_service_bandwidth_test/server.cpp |86.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tests/tools/fqrun/fqrun.cpp |86.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/tools/fqrun/fqrun.cpp |86.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/ut/ut_other.cpp |86.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/ut/ut_other.cpp |86.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/flat_ut.cpp |86.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/flat_ut.cpp |86.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tests/fq/control_plane_storage/in_memory_control_plane_storage_ut.cpp |86.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/fq/control_plane_storage/in_memory_control_plane_storage_ut.cpp |86.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_minikql.cpp |86.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_minikql.cpp |86.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_allocator/txallocator_ut.cpp |86.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_allocator/txallocator_ut.cpp |86.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/ut_selfheal/self_heal_actor_ut.cpp |86.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/ut_selfheal/self_heal_actor_ut.cpp |86.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/service/kqp_qs_queries_ut.cpp |86.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/yql/kqp_yql_ut.cpp |86.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/yql/kqp_yql_ut.cpp |86.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/service/kqp_qs_queries_ut.cpp |86.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/security/ticket_parser_ut.cpp |86.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/security/ticket_parser_ut.cpp |86.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/scheme_board/replica_ut.cpp |86.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/scheme_board/replica_ut.cpp |86.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kesus/tablet/quoter_performance_test/main.cpp |86.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kesus/tablet/quoter_performance_test/main.cpp |86.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/vdisk_restart.cpp |86.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/vdisk_restart.cpp |86.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_move/ut_move.cpp |86.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_move/ut_move.cpp |86.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/quoter/quoter_service_bandwidth_test/quota_requester.cpp |86.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/quoter/quoter_service_bandwidth_test/quota_requester.cpp |86.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/tx/kqp_sink_tx_ut.cpp |86.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_sink_tx_ut.cpp |86.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet/tablet_resolver_ut.cpp |86.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/tablet_resolver_ut.cpp |86.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet/tablet_counters_aggregator_ut.cpp |86.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/tablet_counters_aggregator_ut.cpp |86.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/ut/rate_limiter_test_setup.cpp |86.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/ut/rate_limiter_test_setup.cpp |86.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet/bootstrapper_ut.cpp |86.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/bootstrapper_ut.cpp |86.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/tx/kqp_sink_locks_ut.cpp |86.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_sink_locks_ut.cpp |86.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/ut/ut_transaction_actor.cpp |86.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/ut/ut_transaction_actor.cpp |86.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/downtime_ut.cpp |86.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/downtime_ut.cpp |86.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/discover.cpp |86.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/discover.cpp |86.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/gc_quorum_3dc.cpp |86.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/gateway/ut/metadata_conversion.cpp |86.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/gateway/ut/metadata_conversion.cpp |86.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/gc_quorum_3dc.cpp |86.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/ut/ut_data_cleanup.cpp |86.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/ut/ut_data_cleanup.cpp |86.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/ycloud/impl/user_account_service_ut.cpp |86.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/ycloud/impl/user_account_service_ut.cpp |86.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/pg/kqp_pg_ut.cpp |86.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/pg/kqp_pg_ut.cpp |86.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/tx/kqp_locks_ut.cpp |86.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_locks_ut.cpp |86.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/persqueue_compat_ut.cpp |86.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/persqueue_compat_ut.cpp |86.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_compaction/ut_compaction.cpp |86.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_compaction/ut_compaction.cpp |86.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_olap_reboots/ut_olap_reboots.cpp |86.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_olap_reboots/ut_olap_reboots.cpp |86.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ydb/ydb_scripting_ut.cpp |86.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_scripting_ut.cpp |86.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_pq_metarequest_ut.cpp |86.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_pq_metarequest_ut.cpp |86.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ydb/ydb_object_storage_ut.cpp |86.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_object_storage_ut.cpp |86.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_proxy/storage_tenant_ut.cpp |86.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/storage_tenant_ut.cpp |86.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet/resource_broker_ut.cpp |86.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet/resource_broker_ut.cpp |86.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/arrow/kqp_types_arrow_ut.cpp |86.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/arrow/kqp_types_arrow_ut.cpp |86.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/ydb_convert/table_description_ut.cpp |86.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/ydb_convert/table_description_ut.cpp |86.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/effects/kqp_effects_ut.cpp |86.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/actorlib_impl/test_interconnect_ut.cpp |86.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/effects/kqp_effects_ut.cpp |86.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/actorlib_impl/test_interconnect_ut.cpp |86.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/ut_counters.cpp |86.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/ut_counters.cpp |86.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_util_ut.cpp |86.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/statistics_ut.cpp |86.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_util_ut.cpp |86.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/statistics_ut.cpp |86.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_queries_ut.cpp |86.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/batch_operations/kqp_batch_delete_ut.cpp |86.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/batch_operations/kqp_batch_delete_ut.cpp |86.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_queries_ut.cpp |86.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/helpers/query_executor.cpp |86.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/helpers/query_executor.cpp |86.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/olap/helpers/libut-olap-helpers.a |86.1%| [AR] {RESULT} $(B)/ydb/core/kqp/ut/olap/helpers/libut-olap-helpers.a |86.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/ut/olap/helpers/libut-olap-helpers.a |86.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_replication.cpp |86.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_replication.cpp |86.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_base/ut_table_pg_types.cpp |86.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/query/kqp_params_ut.cpp |86.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_base/ut_table_pg_types.cpp |86.1%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/provider/yql_kikimr_provider.h_serialized.cpp |86.1%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/transactions/tx_controller.h_serialized.cpp |86.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/query/kqp_params_ut.cpp |86.1%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/transactions/tx_controller.h_serialized.cpp |86.1%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/provider/yql_kikimr_provider.h_serialized.cpp |86.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/http_proxy/ut/ymq_ut.cpp |86.1%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/datashard_active_transaction.h_serialized.cpp |86.2%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/tx/datashard/datashard_active_transaction.h_serialized.cpp |86.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/http_proxy/ut/ymq_ut.cpp |86.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/provider/read_attributes_utils_ut.cpp |86.2%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/columnshard.h_serialized.cpp |86.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/provider/read_attributes_utils_ut.cpp |86.2%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/columnshard.h_serialized.cpp |86.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_ttl/ut_ttl_utility.cpp |86.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_ttl/ut_ttl_utility.cpp |86.2%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/ymq/actor/queue_schema.h_serialized.cpp |86.2%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/ymq/actor/queue_schema.h_serialized.cpp |86.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_kqp.cpp |86.2%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/opt/kqp_query_plan.h_serialized.cpp |86.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_kqp.cpp |86.2%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/opt/kqp_query_plan.h_serialized.cpp |86.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/jaeger_tracing_configurator_ut.cpp |86.2%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/column_engine_logs.h_serialized.cpp |86.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/jaeger_tracing_configurator_ut.cpp |86.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tablet_flat/benchmark/b_part.cpp |86.2%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/column_engine_logs.h_serialized.cpp |86.2%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/persqueue/read_balancer__balancing.h_serialized.cpp |86.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tablet_flat/benchmark/b_part.cpp |86.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_index/ut_vector_index.cpp |86.2%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/persqueue/read_balancer__balancing.h_serialized.cpp |86.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_index/ut_vector_index.cpp |86.2%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/operation.h_serialized.cpp |86.2%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/tx/datashard/operation.h_serialized.cpp |86.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/runtime/kqp_scan_logging_ut.cpp |86.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/runtime/kqp_scan_logging_ut.cpp |86.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/shred.cpp |86.2%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/abstract/read_metadata.h_serialized.cpp |86.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/shred.cpp |86.2%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/abstract/read_metadata.h_serialized.cpp |86.2%| [TA] $(B)/ydb/tests/functional/benchmarks_init/test-results/py3test/{meta.json ... results_accumulator.log} |86.2%| [TA] {RESULT} $(B)/ydb/tests/functional/benchmarks_init/test-results/py3test/{meta.json ... results_accumulator.log} |86.2%| [BN] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stability/tool/simple_queue |86.2%| [BN] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stability/tool/simple_queue |86.2%| [BN] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stability/tool/olap_workload |86.2%| [BN] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stability/tool/olap_workload |86.2%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/ymq/actor/fifo_cleanup.h_serialized.cpp |86.2%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/ymq/actor/fifo_cleanup.h_serialized.cpp |86.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/join/kqp_join_order_ut.cpp |86.2%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/executer_actor/kqp_executer.h_serialized.cpp |86.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/join/kqp_join_order_ut.cpp |86.2%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/executer_actor/kqp_executer.h_serialized.cpp |86.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_connections_permissions_ut.cpp |86.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_connections_permissions_ut.cpp |86.2%| [TA] $(B)/ydb/core/erasure/ut/test-results/unittest/{meta.json ... results_accumulator.log} |86.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/ut_selfheal/main.cpp |86.2%| [TA] {RESULT} $(B)/ydb/core/erasure/ut/test-results/unittest/{meta.json ... results_accumulator.log} |86.2%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/storage/granule/granule.h_serialized.cpp |86.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/ut_selfheal/main.cpp |86.2%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/datashard.h_serialized.cpp |86.2%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/granule/granule.h_serialized.cpp |86.2%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/tx/datashard/datashard.h_serialized.cpp |86.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/ydb/ydb_table_ut.cpp |86.2%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/changes/abstract/abstract.h_serialized.cpp |86.2%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/abstract/abstract.h_serialized.cpp |86.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/ydb/ydb_table_ut.cpp |86.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/scan/kqp_scan_ut.cpp |86.2%| [PR] {default-linux-x86_64, release, asan} $(B)/ydb/core/base/generated/runtime_feature_flags.cpp |86.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/scan/kqp_scan_ut.cpp |86.3%| [PR] {BAZEL_UPLOAD} $(B)/ydb/core/base/generated/runtime_feature_flags.cpp |86.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/fq/libs/checkpoint_storage/ut/storage_service_ydb_ut.cpp |86.3%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/operation_queue_timer.h_serialized.cpp |86.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/fq/libs/checkpoint_storage/ut/storage_service_ydb_ut.cpp |86.3%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/operation_queue_timer.h_serialized.cpp |86.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/lib/ydb_cli/topic/topic_read_ut.cpp |86.3%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/ymq/actor/events.h_serialized.cpp |86.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/lib/ydb_cli/topic/topic_read_ut.cpp |86.3%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/ymq/actor/events.h_serialized.cpp |86.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/ut_labeled.cpp |86.3%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/execution_unit.h_serialized.cpp |86.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/ut_labeled.cpp |86.3%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/tx/datashard/execution_unit.h_serialized.cpp |86.3%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/columnshard.h_serialized.cpp |86.3%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/base/generated/runtime_feature_flags.cpp |86.3%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/columnshard.h_serialized.cpp |86.3%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/base/generated/runtime_feature_flags.cpp |86.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/base/generated/libcore-base-generated.a |86.3%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/blob_depot/schema.h_serialized.cpp |86.3%| [AR] {RESULT} $(B)/ydb/core/base/generated/libcore-base-generated.a |86.3%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/blob_depot/schema.h_serialized.cpp |86.3%| [BN] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stability/tool/statistics_workload |86.3%| [BN] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stability/tool/statistics_workload |86.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/base/generated/libcore-base-generated.a |86.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/tools/query_replay_yt/query_compiler.cpp |86.3%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/volatile_tx.h_serialized.cpp |86.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/tools/query_replay_yt/query_compiler.cpp |86.3%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/tx/datashard/volatile_tx.h_serialized.cpp |86.3%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/data_sharing/common/session/common.h_serialized.cpp |86.3%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/common/session/common.h_serialized.cpp |86.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yql/providers/generic/actors/ut/yql_generic_lookup_actor_ut.cpp |86.3%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/schemeshard_info_types.h_serialized.cpp |86.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yql/providers/generic/actors/ut/yql_generic_lookup_actor_ut.cpp |86.3%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/schemeshard_info_types.h_serialized.cpp |86.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/base/ut_auth/ydb-core-base-ut_auth |86.3%| [LD] {RESULT} $(B)/ydb/core/base/ut_auth/ydb-core-base-ut_auth |86.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/persqueue/topic_parser/ut/ydb-library-persqueue-topic_parser-ut |86.3%| [LD] {RESULT} $(B)/ydb/library/persqueue/topic_parser/ut/ydb-library-persqueue-topic_parser-ut |86.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/service/ut/ydb-core-sys_view-service-ut |86.3%| [LD] {RESULT} $(B)/ydb/core/sys_view/service/ut/ydb-core-sys_view-service-ut |86.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/anubis_osiris/ut/ydb-core-blobstorage-vdisk-anubis_osiris-ut |86.3%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/anubis_osiris/ut/ydb-core-blobstorage-vdisk-anubis_osiris-ut |86.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/query/ut/ydb-core-blobstorage-vdisk-query-ut |86.3%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/query/ut/ydb-core-blobstorage-vdisk-query-ut |86.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/util/ut/ydb-core-util-ut |86.3%| [LD] {RESULT} $(B)/ydb/core/util/ut/ydb-core-util-ut |86.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/hulldb/barriers/ut/ydb-core-blobstorage-vdisk-hulldb-barriers-ut |86.3%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/barriers/ut/ydb-core-blobstorage-vdisk-hulldb-barriers-ut |86.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/base/ut/ydb-core-base-ut |86.3%| [LD] {RESULT} $(B)/ydb/core/base/ut/ydb-core-base-ut |86.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yaml_config/tools/dump_ds_init/yaml-to-proto-dump-ds-init |86.3%| [LD] {RESULT} $(B)/ydb/library/yaml_config/tools/dump_ds_init/yaml-to-proto-dump-ds-init |86.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yaml_config/tools/dump_ds_init/yaml-to-proto-dump-ds-init |86.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/cms/console/validators/ut/ydb-core-cms-console-validators-ut |86.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/cms/console/validators/ut/ydb-core-cms-console-validators-ut |86.3%| [LD] {RESULT} $(B)/ydb/core/cms/console/validators/ut/ydb-core-cms-console-validators-ut |86.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/hullop/ut/ydb-core-blobstorage-vdisk-hullop-ut |86.3%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hullop/ut/ydb-core-blobstorage-vdisk-hullop-ut |86.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/hulldb/base/ut/ydb-core-blobstorage-vdisk-hulldb-base-ut |86.3%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/base/ut/ydb-core-blobstorage-vdisk-hulldb-base-ut |86.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/pdisk/ut/ydb-core-blobstorage-pdisk-ut |86.3%| [LD] {RESULT} $(B)/ydb/core/blobstorage/pdisk/ut/ydb-core-blobstorage-pdisk-ut |86.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/pdisk/ut/ydb-core-blobstorage-pdisk-ut |86.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/ydb-core-blobstorage-vdisk-hulldb-compstrat-ut |86.3%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/ydb-core-blobstorage-vdisk-hulldb-compstrat-ut |86.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/testlib/actors/ut/ydb-core-testlib-actors-ut |86.3%| [LD] {RESULT} $(B)/ydb/core/testlib/actors/ut/ydb-core-testlib-actors-ut |86.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/testlib/actors/ut/ydb-core-testlib-actors-ut |86.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_test_functions.cpp |86.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_test_functions.cpp |86.3%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/base/ut_auth/ydb-core-base-ut_auth |86.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/reader/abstract/read_context.cpp |86.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/ingress/ut/ydb-core-blobstorage-vdisk-ingress-ut |86.3%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/ingress/ut/ydb-core-blobstorage-vdisk-ingress-ut |86.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/reader/abstract/read_context.cpp |86.3%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/export/session/session.h_serialized.cpp |86.3%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/export/session/session.h_serialized.cpp |86.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/engine/mkql_engine_flat_ut.cpp |86.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/common/ut/ydb-core-blobstorage-vdisk-common-ut |86.3%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/common/ut/ydb-core-blobstorage-vdisk-common-ut |86.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/hulldb/generic/ut/ydb-core-blobstorage-vdisk-hulldb-generic-ut |86.3%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/generic/ut/ydb-core-blobstorage-vdisk-hulldb-generic-ut |86.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/compute/common/ut/ydb-core-fq-libs-compute-common-ut |86.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/compute/common/ut/ydb-core-fq-libs-compute-common-ut |86.4%| [LD] {RESULT} $(B)/ydb/core/fq/libs/compute/common/ut/ydb-core-fq-libs-compute-common-ut |86.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/dsproxy/ut_strategy/ydb-core-blobstorage-dsproxy-ut_strategy |86.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/groupinfo/ut/ydb-core-blobstorage-groupinfo-ut |86.4%| [LD] {RESULT} $(B)/ydb/core/blobstorage/dsproxy/ut_strategy/ydb-core-blobstorage-dsproxy-ut_strategy |86.4%| [LD] {RESULT} $(B)/ydb/core/blobstorage/groupinfo/ut/ydb-core-blobstorage-groupinfo-ut |86.4%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/datashard.h_serialized.cpp |86.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/config/validation/auth_config_validator_ut/core-config-validation-auth_config_validator_ut |86.4%| [LD] {RESULT} $(B)/ydb/core/config/validation/auth_config_validator_ut/core-config-validation-auth_config_validator_ut |86.4%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/tx/datashard/datashard.h_serialized.cpp |86.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/backpressure/ut/ydb-core-blobstorage-backpressure-ut |86.4%| [LD] {RESULT} $(B)/ydb/core/blobstorage/backpressure/ut/ydb-core-blobstorage-backpressure-ut |86.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tablet_flat/ut_pg/ydb-core-tablet_flat-ut_pg |86.4%| [LD] {RESULT} $(B)/ydb/core/tablet_flat/ut_pg/ydb-core-tablet_flat-ut_pg |86.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tablet_flat/ut_pg/ydb-core-tablet_flat-ut_pg |86.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/engine/mkql_engine_flat_ut.cpp |86.4%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/columnshard_impl.h_serialized.cpp |86.4%| [BN] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stability/tool/nemesis |86.4%| [BN] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stability/tool/nemesis |86.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/base/generated/ut/ydb-core-base-generated-ut |86.4%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/datashard_active_transaction.h_serialized.cpp |86.4%| [LD] {RESULT} $(B)/ydb/core/base/generated/ut/ydb-core-base-generated-ut |86.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/hulldb/fresh/ut/ydb-core-blobstorage-vdisk-hulldb-fresh-ut |86.4%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/fresh/ut/ydb-core-blobstorage-vdisk-hulldb-fresh-ut |86.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yaml_config/tools/dump/yaml-to-proto-dump |86.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yaml_config/tools/dump/yaml-to-proto-dump |86.4%| [LD] {RESULT} $(B)/ydb/library/yaml_config/tools/dump/yaml-to-proto-dump |86.4%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/columnshard_impl.h_serialized.cpp |86.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/config/init/ut/ydb-core-config-init-ut |86.4%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/counters/columnshard.h_serialized.cpp |86.4%| [LD] {RESULT} $(B)/ydb/core/config/init/ut/ydb-core-config-init-ut |86.4%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/tx/datashard/datashard_active_transaction.h_serialized.cpp |86.4%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/sys_view/service/ut/ydb-core-sys_view-service-ut |86.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_restore/ut_restore.cpp |86.4%| [EN] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/counters/columnshard.h_serialized.cpp |86.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/metering/ut/ydb-core-metering-ut |86.4%| [LD] {RESULT} $(B)/ydb/core/metering/ut/ydb-core-metering-ut |86.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/mvp/oidc_proxy/ut/ydb-mvp-oidc_proxy-ut |86.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/mvp/oidc_proxy/ut/ydb-mvp-oidc_proxy-ut |86.4%| [LD] {RESULT} $(B)/ydb/mvp/oidc_proxy/ut/ydb-mvp-oidc_proxy-ut |86.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/sys_view_ut.cpp |86.4%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut_pg/unittest |86.4%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/ymq/actor/fifo_cleanup.h_serialized.cpp |86.4%| [EN] {default-linux-x86_64, release, asan} $(B)/ydb/services/metadata/manager/abstract.h_serialized.cpp |86.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/sequenceshard/public/ut/ydb-core-tx-sequenceshard-public-ut |86.4%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/persqueue/topic_parser/ut/ydb-library-persqueue-topic_parser-ut |86.4%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/ymq/actor/fifo_cleanup.h_serialized.cpp |86.4%| [LD] {RESULT} $(B)/ydb/core/tx/sequenceshard/public/ut/ydb-core-tx-sequenceshard-public-ut |86.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/backup/common/ut/ydb-core-backup-common-ut |86.4%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/operation.h_serialized.cpp |86.4%| [LD] {RESULT} $(B)/ydb/core/backup/common/ut/ydb-core-backup-common-ut |86.4%| [EN] {BAZEL_UPLOAD} $(B)/ydb/services/metadata/manager/abstract.h_serialized.cpp |86.4%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/tx/datashard/operation.h_serialized.cpp >> PgTest::DumpStringCells >> PgTest::DumpStringCells [GOOD] >> AuthTokenAllowed::PassOnEmptyListAndToken [GOOD] >> AuthTokenAllowed::PassOnEmptyListAndTokenWithEmptyUserSid [GOOD] |86.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/sys_view_ut.cpp |86.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/io_formats/arrow/scheme/ut/ydb-core-io_formats-arrow-scheme-ut |86.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/io_formats/arrow/scheme/ut/ydb-core-io_formats-arrow-scheme-ut |86.4%| [LD] {RESULT} $(B)/ydb/core/io_formats/arrow/scheme/ut/ydb-core-io_formats-arrow-scheme-ut |86.4%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut_pg/unittest >> PgTest::DumpStringCells [GOOD] |86.4%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut_pg/unittest >> SysViewQueryHistory::TopDurationAdd [GOOD] |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_auth/unittest >> AuthTokenAllowed::PassOnEmptyListAndTokenWithEmptyUserSid [GOOD] |86.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/long_tx_service/public/ut/ydb-core-tx-long_tx_service-public-ut |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/service/ut/unittest >> SysViewQueryHistory::TopDurationAdd [GOOD] |86.4%| [LD] {RESULT} $(B)/ydb/core/tx/long_tx_service/public/ut/ydb-core-tx-long_tx_service-public-ut |86.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/base/ut/ydb-core-blobstorage-base-ut |86.4%| [LD] {RESULT} $(B)/ydb/core/blobstorage/base/ut/ydb-core-blobstorage-base-ut |86.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_restore/ut_restore.cpp >> TFragmentedBufferTest::TestWriteRead [GOOD] >> TFragmentedBufferTest::TestOverwriteRead [GOOD] >> TFragmentedBufferTest::TestIsNotMonolith [GOOD] >> TFragmentedBufferTest::TestSetMonolith [GOOD] >> TFragmentedBufferTest::TestReplaceWithSetMonolith [GOOD] >> THazardTest::CachedPointers [GOOD] >> THazardTest::AutoProtectedPointers [GOOD] >> THyperLogCounterTest::TestGetSet [GOOD] >> THyperLogCounterTest::TestIncrement [GOOD] >> THyperLogCounterTest::TestAddRandom |86.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/mvp/core/ut/ydb-mvp-core-ut |86.4%| [LD] {RESULT} $(B)/ydb/mvp/core/ut/ydb-mvp-core-ut |86.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/mvp/core/ut/ydb-mvp-core-ut |86.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/mvp/meta/ut/ydb-mvp-meta-ut >> TCacheCacheTest::Random [GOOD] |86.5%| [LD] {RESULT} $(B)/ydb/mvp/meta/ut/ydb-mvp-meta-ut >> TCacheTest::TestUnboundedMapCache [GOOD] |86.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/pgproxy/ut/ydb-core-pgproxy-ut >> TCacheTest::EnsureNoLeakAfterUnboundedCacheOnMapDtor [GOOD] >> TCacheTest::TestSizeBasedOverflowCallback [GOOD] >> TCacheTest::TestLruCache [GOOD] >> TCacheTest::EnsureNoLeakAfterLruCacheDtor [GOOD] >> TCacheTest::Test2QCache [GOOD] >> TCacheTest::EnsureNoLeakAfterQ2CacheDtor [GOOD] >> TCacheTest::TestUpdateItemSize [GOOD] >> TCircularOperationQueueTest::CheckOnDoneInflight1 [GOOD] >> TCircularOperationQueueTest::CheckOnDoneInflight2 [GOOD] >> TCircularOperationQueueTest::CheckOnDoneNotExisting [GOOD] >> TCircularOperationQueueTest::CheckRemoveNotRunning [GOOD] >> TCircularOperationQueueTest::CheckRemoveRunning [GOOD] >> TCircularOperationQueueTest::CheckRemoveWaiting [GOOD] >> TCircularOperationQueueTest::CheckRemoveNotExisting [GOOD] >> THyperLogCounterTest::TestAddRandom [GOOD] >> TCircularOperationQueueTest::CheckTimeout [GOOD] >> TCircularOperationQueueTest::CheckTimeoutWhenFirstItemRemoved [GOOD] >> THyperLogCounterTest::TestAddFixed [GOOD] >> TPDiskRaces::KillOwnerWhileDeletingChunk >> TCircularOperationQueueTest::RemoveExistingWhenShuffle [GOOD] >> TCircularOperationQueueTest::BasicRPSCheck [GOOD] >> THyperLogCounterTest::TestHybridIncrement [GOOD] >> TCircularOperationQueueTest::BasicRPSCheckWithRound [GOOD] >> TCircularOperationQueueTest::CheckWakeupAfterStop [GOOD] >> TCircularOperationQueueTest::CheckWakeupWhenRPSExhausted [GOOD] >> THyperLogCounterTest::TestHybridAdd [GOOD] >> TCircularOperationQueueTest::CheckWakeupWhenRPSExhausted2 [GOOD] >> TIntervalSetTest::IntervalMapTestEmpty [GOOD] >> TCircularOperationQueueTest::CheckStartAfterStop [GOOD] >> TIntervalSetTest::IntervalMapTestSpecificAdd [GOOD] |86.5%| [LD] {RESULT} $(B)/ydb/core/pgproxy/ut/ydb-core-pgproxy-ut |86.5%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/volatile_tx.h_serialized.cpp >> TIntervalSetTest::IntervalMapTestAdd >> TYardTest::TestInit >> TIntervalSetTest::IntervalMapTestAdd [GOOD] >> TIntervalSetTest::IntervalMapTestAddSubtract [GOOD] >> TIntervalSetTest::IntervalMapTestAddAgainstReference >> TIntervalSetTest::IntervalMapTestAddAgainstReference [GOOD] >> TIntervalSetTest::IntervalMapTestIsSubsetOfAgainstReference |86.5%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/tx/datashard/volatile_tx.h_serialized.cpp |86.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/config/validation/column_shard_config_validator_ut/column_shard_config_validator_ut |86.5%| [LD] {RESULT} $(B)/ydb/core/config/validation/column_shard_config_validator_ut/column_shard_config_validator_ut |86.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/external_sources/ut/ydb-core-external_sources-ut |86.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/external_sources/ut/ydb-core-external_sources-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/util/ut/unittest >> TCircularOperationQueueTest::CheckStartAfterStop [GOOD] Test command err: 0.27657 >> TIntervalSetTest::IntervalMapTestIsSubsetOfAgainstReference [GOOD] >> TIntervalSetTest::IntervalMapIntersection |86.5%| [LD] {RESULT} $(B)/ydb/core/external_sources/ut/ydb-core-external_sources-ut |86.5%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/ymq/actor/queue_schema.h_serialized.cpp |86.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/control_plane_storage/internal/ut/core-fq-libs-control_plane_storage-internal-ut |86.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/control_plane_storage/internal/ut/core-fq-libs-control_plane_storage-internal-ut |86.5%| [LD] {RESULT} $(B)/ydb/core/fq/libs/control_plane_storage/internal/ut/core-fq-libs-control_plane_storage-internal-ut |86.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_reboots/ut_reboots.cpp |86.5%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/transactions/tx_controller.h_serialized.cpp |86.5%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/ymq/actor/queue_schema.h_serialized.cpp >> TIntervalSetTest::IntervalMapIntersection [GOOD] >> TIntervalSetTest::IntervalMapIntersectionInplace |86.5%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/transactions/tx_controller.h_serialized.cpp >> TYardTest::TestInit [GOOD] >> TYardTest::TestInitOnIncompleteFormat |86.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/log_backend/ut/ydb-core-log_backend-ut |86.5%| [LD] {RESULT} $(B)/ydb/core/log_backend/ut/ydb-core-log_backend-ut >> TPDiskTest::TestThatEveryValueOfEStateEnumKeepsItIntegerValue [GOOD] >> TPDiskTest::TestPDiskOwnerRecreation |86.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_reboots/ut_reboots.cpp >> TIntervalSetTest::IntervalMapIntersectionInplace [GOOD] >> TIntervalSetTest::IntervalMapIntersectionInplaceSelf [GOOD] >> TIntervalSetTest::IntervalMapDifference >> TopicNameConverterTest::LegacyStyleDoubleName [GOOD] >> TopicNameConverterTest::NoTopicName [GOOD] |86.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/s3/provider/ut/ydb-library-yql-providers-s3-provider-ut >> AddressClassifierTest::TestAddressExtraction [GOOD] >> AddressClassifierTest::TestAddressParsing [GOOD] |86.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/providers/s3/provider/ut/ydb-library-yql-providers-s3-provider-ut |86.5%| [LD] {RESULT} $(B)/ydb/library/yql/providers/s3/provider/ut/ydb-library-yql-providers-s3-provider-ut >> AddressClassifierTest::TestClassfierWithAllIpTypes [GOOD] >> AddressClassifierTest::TestLabeledClassifier [GOOD] >> AddressClassifierTest::TestLabeledClassifierFromNetData [GOOD] >> TBitsTest::TestNaiveClz [GOOD] >> TBTreeTest::Basics [GOOD] >> TBTreeTest::ClearAndReuse [GOOD] >> TBTreeTest::SeekForwardPermutationsInplace [GOOD] >> TBTreeTest::SeekForwardPermutationsThreadSafe >> TBTreeTest::SeekForwardPermutationsThreadSafe [GOOD] >> TBTreeTest::SeekBackwardPermutationsInplace [GOOD] >> PDiskCompatibilityInfo::OldCompatible >> TBTreeTest::SeekBackwardPermutationsThreadSafe [GOOD] >> TYardTest::TestInitOnIncompleteFormat [GOOD] >> TYardTest::TestInitOwner >> TBTreeTest::RandomInsertInplace |86.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/replication/service/topic_reader_ut.cpp |86.5%| [TS] {asan, default-linux-x86_64, release} ydb/library/persqueue/topic_parser/ut/unittest >> TopicNameConverterTest::NoTopicName [GOOD] >> TYardTest::TestBadDeviceInit >> TYardTest::TestBadDeviceInit [GOOD] >> TYardTest::TestChunkReadRandomOffset >> PDiskCompatibilityInfo::OldCompatible [GOOD] >> TIntervalSetTest::IntervalMapDifference [GOOD] >> PDiskCompatibilityInfo::Incompatible >> TIntervalSetTest::IntervalMapDifferenceInplaceSelf [GOOD] >> TYardTest::TestInitOwner [GOOD] >> TYardTest::TestIncorrectRequests |86.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tablet_flat/ut_util/ydb-core-tablet_flat-ut_util |86.5%| [LD] {RESULT} $(B)/ydb/core/tablet_flat/ut_util/ydb-core-tablet_flat-ut_util |86.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_export/ut_export.cpp >> PDiskCompatibilityInfo::Incompatible [GOOD] >> PDiskCompatibilityInfo::NewIncompatibleWithDefault |86.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/replication/service/topic_reader_ut.cpp |86.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/opt/kqp_agg_ut.cpp |86.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_topic_splitmerge/ut_topic_splitmerge.cpp >> TYardTest::TestIncorrectRequests [GOOD] >> TYardTest::TestLogWriteRead |86.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/util/ut/unittest >> TIntervalSetTest::IntervalMapDifferenceInplaceSelf [GOOD] >> PDiskCompatibilityInfo::NewIncompatibleWithDefault [GOOD] >> PDiskCompatibilityInfo::Trunk |86.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_agg_ut.cpp >> TYardTest::TestLogWriteRead [GOOD] >> TYardTest::TestLogWriteReadMedium |86.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_topic_splitmerge/ut_topic_splitmerge.cpp >> TQueryResultSizeTrackerTest::SerializeDeserializeMaxPtotobufSize >> TBlobStorageHullFresh::SimpleBackwardEnd [GOOD] >> TBlobStorageHullFresh::SimpleBackWardMiddle2Times [GOOD] >> PDiskCompatibilityInfo::Trunk [GOOD] >> PDiskCompatibilityInfo::SuppressCompatibilityCheck >> TYardTest::TestEmptyLogRead |86.5%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/query/ut/ydb-core-blobstorage-vdisk-query-ut >> TYardTest::TestLogWriteReadMedium [GOOD] >> TYardTest::TestLogWriteReadMediumWithHddSectorMap >> TYardTest::TestEmptyLogRead [GOOD] >> TYardTest::TestChunkWriteRead >> PDiskCompatibilityInfo::SuppressCompatibilityCheck [GOOD] >> PDiskCompatibilityInfo::Migration |86.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest >> TBlobStorageHullFresh::SimpleBackWardMiddle2Times [GOOD] |86.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/s3/actors/ut/ydb-library-yql-providers-s3-actors-ut |86.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/providers/s3/actors/ut/ydb-library-yql-providers-s3-actors-ut |86.5%| [LD] {RESULT} $(B)/ydb/library/yql/providers/s3/actors/ut/ydb-library-yql-providers-s3-actors-ut >> TBlobStorageHullStorageRatio::Test [GOOD] |86.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_export/ut_export.cpp >> TBlobStorageKeyBarrierTest::ParseTest [GOOD] >> TPDiskTest::TestPDiskOwnerRecreation [GOOD] >> TPDiskTest::TestPDiskOwnerRecreationWithStableOwner >> TSTreeTest::Basic [GOOD] >> TSVecTest::Basic [GOOD] >> TPDiskUtil::SectorRestoratorOldNewHash [GOOD] >> TPDiskUtil::TChunkIdFormatter [GOOD] >> TPDiskUtil::TOwnerPrintTest [GOOD] >> TPDiskUtil::TChunkStateEnumPrintTest [GOOD] >> TPDiskUtil::TIoResultEnumPrintTest [GOOD] >> TPDiskUtil::TIoTypeEnumPrintTest [GOOD] >> TPDiskUtil::TestNVMeSerial [GOOD] >> TPDiskUtil::TestDeviceList [GOOD] >> TPDiskUtil::TestBufferPool >> TBTreeTest::RandomInsertInplace [GOOD] |86.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/config/validation/ut/ydb-core-config-validation-ut |86.5%| [LD] {RESULT} $(B)/ydb/core/config/validation/ut/ydb-core-config-validation-ut |86.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_rtmr/ut_rtmr.cpp >> TBTreeTest::RandomInsertThreadSafe >> PDiskCompatibilityInfo::Migration [GOOD] >> ReadOnlyPDisk::SimpleRestartReadOnly |86.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/s3/common/ut/ydb-library-yql-providers-s3-common-ut |86.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/providers/s3/common/ut/ydb-library-yql-providers-s3-common-ut |86.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest >> TBlobStorageKeyBarrierTest::ParseTest [GOOD] |86.6%| [LD] {RESULT} $(B)/ydb/library/yql/providers/s3/common/ut/ydb-library-yql-providers-s3-common-ut >> TYardTest::TestLogWriteReadMediumWithHddSectorMap [GOOD] >> TYardTest::TestLogWriteReadLarge |86.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_rtmr/ut_rtmr.cpp |86.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_base/ut_table_decimal_types.cpp |86.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest >> TSVecTest::Basic [GOOD] >> ReadOnlyPDisk::SimpleRestartReadOnly [GOOD] >> ReadOnlyPDisk::StartReadOnlyUnformattedShouldFail >> TYardTest::TestChunkWriteRead [GOOD] >> TYardTest::TestChunkWriteReadWithHddSectorMap |86.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/defrag/ut/ydb-core-blobstorage-vdisk-defrag-ut |86.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/vdisk/defrag/ut/ydb-core-blobstorage-vdisk-defrag-ut |86.6%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/defrag/ut/ydb-core-blobstorage-vdisk-defrag-ut >> TYardTest::TestLogWriteReadLarge [GOOD] >> TYardTest::TestLogWriteCutEqual |86.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/db_id_async_resolver_impl/ut/ydb-core-fq-libs-db_id_async_resolver_impl-ut >> ReadOnlyPDisk::StartReadOnlyUnformattedShouldFail [GOOD] >> ReadOnlyPDisk::StartReadOnlyZeroedShouldFail |86.6%| [LD] {RESULT} $(B)/ydb/core/fq/libs/db_id_async_resolver_impl/ut/ydb-core-fq-libs-db_id_async_resolver_impl-ut >> ReadOnlyPDisk::StartReadOnlyZeroedShouldFail [GOOD] >> ReadOnlyPDisk::VDiskStartsOnReadOnlyPDisk |86.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_base/ut_table_decimal_types.cpp >> TBlobStorageHullFresh::SimpleBackWardEnd2Times [GOOD] >> TBlobStorageHullFresh::Perf |86.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tablet_flat/ut_large/ydb-core-tablet_flat-ut_large |86.6%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/provider/yql_kikimr_provider.h_serialized.cpp |86.6%| [LD] {RESULT} $(B)/ydb/core/tablet_flat/ut_large/ydb-core-tablet_flat-ut_large >> ReadOnlyPDisk::VDiskStartsOnReadOnlyPDisk [GOOD] >> ReadOnlyPDisk::ReadOnlyPDiskEvents |86.6%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/provider/yql_kikimr_provider.h_serialized.cpp |86.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tablet_flat/test/tool/surg/surg |86.6%| [LD] {RESULT} $(B)/ydb/core/tablet_flat/test/tool/surg/surg |86.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yaml_config/ut/ydb-library-yaml_config-ut |86.6%| [LD] {RESULT} $(B)/ydb/library/yaml_config/ut/ydb-library-yaml_config-ut >> AuthDatabaseAdmin::FailOnOwnerAndTokenWithEmptyUserSidAndGroups [GOOD] >> DiscoveryConverterTest::FullLegacyNames [GOOD] >> TPDiskUtil::TestBufferPool [GOOD] >> AuthDatabaseAdmin::FailOnOwnerAndTokenWithEmptyUserSid [GOOD] >> AuthDatabaseAdmin::PassOnOwnerMatchGroupSid [GOOD] >> DiscoveryConverterTest::FirstClass [GOOD] >> TSectorMapPerformance::TestHDD1960GBRead100MBOnFirstSector |86.6%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/abstract/read_metadata.h_serialized.cpp |86.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yaml_config/ut/ydb-library-yaml_config-ut >> ReadOnlyPDisk::ReadOnlyPDiskEvents [GOOD] >> ShredPDisk::EmptyShred |86.6%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/ymq/actor/events.h_serialized.cpp >> THullDsHeapItTest::HeapAppendixTreeForwardIteratorBenchmark |86.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/query/ut/unittest |86.6%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/util/ut/ydb-core-util-ut |86.6%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/abstract/read_metadata.h_serialized.cpp |86.6%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/ymq/actor/events.h_serialized.cpp >> THullDsHeapItTest::HeapAppendixTreeForwardIteratorBenchmark [GOOD] >> THullDsHeapItTest::HeapAppendixTreeBackwardIteratorBenchmark >> TYardTest::TestChunkWriteReadWithHddSectorMap [GOOD] >> TYardTest::TestChunkWriteReadMultiple |86.6%| [TS] {asan, default-linux-x86_64, release} ydb/library/persqueue/topic_parser/ut/unittest >> DiscoveryConverterTest::FirstClass [GOOD] |86.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_auth/unittest >> AuthDatabaseAdmin::PassOnOwnerMatchGroupSid [GOOD] |86.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/transactions/libtx-columnshard-transactions.a |86.6%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/transactions/libtx-columnshard-transactions.a >> THullDsHeapItTest::HeapAppendixTreeBackwardIteratorBenchmark [GOOD] |86.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/generic/provider/ut/pushdown/yql-providers-generic-provider-ut-pushdown |86.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/providers/generic/provider/ut/pushdown/yql-providers-generic-provider-ut-pushdown |86.6%| [LD] {RESULT} $(B)/ydb/library/yql/providers/generic/provider/ut/pushdown/yql-providers-generic-provider-ut-pushdown >> TBlobStorageHullFresh::Perf [GOOD] >> ShredPDisk::EmptyShred [GOOD] >> ShredPDisk::SimpleShred >> TFreshAppendixTest::IterateForwardAll [GOOD] >> TFreshAppendixTest::IterateBackwardIncluding [GOOD] |86.6%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/base/generated/ut/ydb-core-base-generated-ut |86.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest >> THullDsHeapItTest::HeapAppendixTreeBackwardIteratorBenchmark [GOOD] |86.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/columnshard/engines/ut/ut_logs_engine.cpp |86.6%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/anubis_osiris/ut/ydb-core-blobstorage-vdisk-anubis_osiris-ut |86.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/transactions/libtx-columnshard-transactions.a |86.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest >> TBlobStorageHullFresh::Perf [GOOD] >> THullDsHeapItTest::HeapLevelSliceForwardIteratorBenchmark >> TBTreeTest::RandomInsertThreadSafe [GOOD] >> TBTreeTest::DuplicateKeysInplace >> TQueryResultSizeTrackerTest::CheckWithoutQueryResult [GOOD] >> TPDiskTest::TestPDiskOwnerRecreationWithStableOwner [GOOD] >> TPDiskTest::TestVDiskMock >> TFreshAppendixTest::IterateBackwardAll [GOOD] >> THullDsHeapItTest::HeapLevelSliceForwardIteratorBenchmark [GOOD] >> TFreshAppendixTest::IterateBackwardExcluding [GOOD] >> THullDsHeapItTest::HeapLevelSliceBackwardIteratorBenchmark |86.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest >> TFreshAppendixTest::IterateBackwardIncluding [GOOD] >> TBlobStorageHullFreshSegment::PerfAppendix >> TSectorMapPerformance::TestHDD1960GBRead100MBOnFirstSector [GOOD] >> TSectorMapPerformance::TestHDD1960GBRead100MBOnLastSector >> THullDsHeapItTest::HeapLevelSliceBackwardIteratorBenchmark [GOOD] |86.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/federated_query/generic_ut/iceberg_ut_data.cpp |86.6%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/metering/ut/ydb-core-metering-ut >> TBlobStorageHullFresh::SolomonStandCrash [GOOD] >> TBlobStorageHullFreshSegment::IteratorTest >> TBTreeTest::DuplicateKeysInplace [GOOD] >> TBTreeTest::DuplicateKeysThreadSafe |86.6%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/tx/sequenceshard/public/ut/ydb-core-tx-sequenceshard-public-ut |86.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/query/ut/unittest >> TQueryResultSizeTrackerTest::CheckWithoutQueryResult [GOOD] >> TPDiskTest::TestVDiskMock [GOOD] >> TPDiskTest::TestRealFile |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest >> TFreshAppendixTest::IterateBackwardExcluding [GOOD] |86.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/federated_query/generic_ut/iceberg_ut_data.cpp |86.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/columnshard/engines/ut/ut_logs_engine.cpp >> TBTreeTest::DuplicateKeysThreadSafe [GOOD] >> TBTreeTest::ShouldCallDtorsInplace |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest >> THullDsHeapItTest::HeapLevelSliceBackwardIteratorBenchmark [GOOD] >> TBTreeTest::ShouldCallDtorsInplace [GOOD] >> TBTreeTest::ShouldCallDtorsThreadSafe [GOOD] >> TBTreeTest::Concurrent >> TBlobStorageHullFreshSegment::IteratorTest [GOOD] |86.7%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/barriers/ut/ydb-core-blobstorage-vdisk-hulldb-barriers-ut >> ShredPDisk::SimpleShred [GOOD] >> ShredPDisk::SimpleShredDirtyChunks >> THullDsHeapItTest::HeapForwardIteratorAllEntities >> TBTreeTest::Concurrent [GOOD] >> TBTreeTest::IteratorDestructor [GOOD] >> TCacheCacheTest::MoveToWarm [GOOD] >> TCacheCacheTest::EvictNext [GOOD] >> CompressionTest::lz4_generator_basic [GOOD] >> THullDsHeapItTest::HeapForwardIteratorAllEntities [GOOD] >> CompressionTest::lz4_generator_deflates [GOOD] >> THullDsHeapItTest::HeapBackwardIteratorAllEntities [GOOD] >> StLog::Basic [GOOD] >> TBlobStorageHullFresh::AppendixPerf |86.7%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/execution_unit.h_serialized.cpp |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest >> TBlobStorageHullFreshSegment::IteratorTest [GOOD] >> TBlobStorageHullFresh::SimpleForward [GOOD] >> TBlobStorageHullFresh::SimpleBackwardMiddle [GOOD] |86.7%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/tx/datashard/execution_unit.h_serialized.cpp >> TFreshAppendixTest::IterateForwardIncluding [GOOD] >> TFreshAppendixTest::IterateForwardExcluding [GOOD] >> THullDsGenericNWayIt::ForwardIteration [GOOD] >> THullDsGenericNWayIt::BackwardIteration [GOOD] >> TQueryResultSizeTrackerTest::SerializeDeserializeMaxPtotobufSizeMinusOne |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest >> THullDsHeapItTest::HeapBackwardIteratorAllEntities [GOOD] |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest >> TBlobStorageHullFresh::SimpleBackwardMiddle [GOOD] >> DiscoveryConverterTest::FullLegacyPath [GOOD] >> DiscoveryConverterTest::FullLegacyNamesWithRootDatabase [GOOD] |86.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/util/btree_benchmark/btree_benchmark |86.7%| [LD] {RESULT} $(B)/ydb/core/util/btree_benchmark/btree_benchmark |86.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/formats/arrow/ut/ydb-core-formats-arrow-ut |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest >> THullDsGenericNWayIt::BackwardIteration [GOOD] |86.7%| [LD] {RESULT} $(B)/ydb/core/formats/arrow/ut/ydb-core-formats-arrow-ut |86.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/formats/arrow/ut/ydb-core-formats-arrow-ut |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest >> TFreshAppendixTest::IterateForwardExcluding [GOOD] >> TBlobStorageDiskBlob::CreateFromDistinctParts [GOOD] >> TBlobStorageDiskBlob::CreateIterate [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/util/ut/unittest >> StLog::Basic [GOOD] Test command err: Producer 0 worked for 0.2717376729 seconds Producer 1 worked for 0.2291169211 seconds Consumer 0 worked for 0.4653419797 seconds Consumer 1 worked for 0.4564745124 seconds Consumer 2 worked for 0.4720535934 seconds Consumer 3 worked for 0.360690455 seconds |86.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_object_storage_listing.cpp |86.7%| [TS] {asan, default-linux-x86_64, release} ydb/library/persqueue/topic_parser/ut/unittest >> DiscoveryConverterTest::FullLegacyNamesWithRootDatabase [GOOD] |86.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tablet_flat/test/tool/perf/table-perf |86.7%| [LD] {RESULT} $(B)/ydb/core/tablet_flat/test/tool/perf/table-perf |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest >> TBlobStorageDiskBlob::CreateIterate [GOOD] |86.7%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/backpressure/ut/ydb-core-blobstorage-backpressure-ut >> ShredPDisk::SimpleShredDirtyChunks [GOOD] >> ShredPDisk::KillVDiskWhilePreShredding >> TSectorMapPerformance::TestHDD1960GBRead100MBOnLastSector [GOOD] >> TSectorMapPerformance::TestHDD1960GBWrite100MBOnFirstSector |86.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_object_storage_listing.cpp |86.7%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/base/ut/ydb-core-blobstorage-base-ut |86.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut_pg/unittest |86.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut_pg/unittest >> TBlobStorageDiskBlob::Merge [GOOD] >> TBlobStorageHullDecimal::TestMkDecimal [GOOD] |86.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/federated_query/ut/ydb-core-kqp-federated_query-ut |86.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/federated_query/ut/ydb-core-kqp-federated_query-ut |86.7%| [LD] {RESULT} $(B)/ydb/core/kqp/federated_query/ut/ydb-core-kqp-federated_query-ut >> TSectorMapPerformance::TestHDD1960GBWrite100MBOnFirstSector [GOOD] >> TSectorMapPerformance::TestHDD1960GBWrite100MBOnLastSector >> TPDiskTest::TestRealFile [GOOD] >> TPDiskTest::TestSIGSEGVInTUndelivered |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest >> TBlobStorageHullDecimal::TestMkDecimal [GOOD] |86.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut_pg/unittest |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/query/ut/unittest |86.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut_pg/unittest >> TPDiskTest::TestSIGSEGVInTUndelivered [GOOD] >> TPDiskTest::WrongPDiskKey >> TBlobStorageHullDecimal::TestMkRatio [GOOD] >> TBlobStorageHullDecimal::TestMult [GOOD] >> TFlatDatabasePgTest::BasicTypes |86.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut_pg/unittest >> TFlatDatabasePgTest::BasicTypes [GOOD] |86.7%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/backup/common/ut/ydb-core-backup-common-ut >> TPDiskTest::WrongPDiskKey [GOOD] >> TPDiskTest::TestStartEncryptedOrPlainAndRestart |86.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/runtime/kqp_scan_spilling_ut.cpp |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest >> TBlobStorageHullDecimal::TestMult [GOOD] >> TBlobStorageLinearTrackBar::TestLinearTrackBarDouble [GOOD] >> TBlobStorageLinearTrackBar::TestLinearTrackBarWithDecimal [GOOD] >> ShredPDisk::KillVDiskWhilePreShredding [GOOD] >> ShredPDisk::KillVDiskWhileShredding |86.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut_pg/unittest |86.7%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/tablet_flat/ut_util/ydb-core-tablet_flat-ut_util |86.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/runtime/kqp_scan_spilling_ut.cpp |86.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/reader/abstract/libengines-reader-abstract.a |86.7%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/reader/abstract/libengines-reader-abstract.a |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest >> TBlobStorageLinearTrackBar::TestLinearTrackBarWithDecimal [GOOD] |86.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut_pg/unittest >> TFlatDatabasePgTest::BasicTypes [GOOD] |86.7%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/tx/long_tx_service/public/ut/ydb-core-tx-long_tx_service-public-ut |86.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_replication/ut_replication.cpp |86.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut_pg/unittest >> TSectorMapPerformance::TestHDD1960GBWrite100MBOnLastSector [GOOD] >> TSectorMapPerformance::TestSSD1960GBRead100MBOnFirstSector |86.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/sys_view/ut_kqp.cpp |86.8%| [TS] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut_pg/unittest |86.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/abstract/libengines-reader-abstract.a |86.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_replication/ut_replication.cpp |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/backpressure/ut/unittest |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/backpressure/ut/unittest >> TSectorMapPerformance::TestSSD1960GBRead100MBOnFirstSector [GOOD] >> TSectorMapPerformance::TestSSD1960GBWrite100MBOnFirstSector |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/backpressure/ut/unittest |86.8%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/ingress/ut/ydb-core-blobstorage-vdisk-ingress-ut >> TQueueBackpressureTest::CreateDelete [GOOD] >> TBlobStorageQueueTest::TMessageLost [GOOD] >> TSectorMapPerformance::TestSSD1960GBWrite100MBOnFirstSector [GOOD] >> TSectorMapPerformance::TestSSD1960GBRead1000MBOnFirstSector >> TQueueBackpressureTest::IncorrectMessageId [GOOD] >> TQueueBackpressureTest::PerfInFlight >> ShredPDisk::KillVDiskWhileShredding [GOOD] >> ShredPDisk::InitVDiskAfterShredding |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/backpressure/ut/unittest >> TBlobStorageQueueTest::TMessageLost [GOOD] |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/backpressure/ut/unittest >> TQueueBackpressureTest::CreateDelete [GOOD] |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/backpressure/ut/unittest >> TQueueBackpressureTest::IncorrectMessageId [GOOD] >> TQueueBackpressureTest::PerfTrivial >> TBlobStorageGroupInfoIterTest::IteratorForwardAndBackward [GOOD] >> TBlobStorageGroupInfoIterTest::PerFailDomainRange [GOOD] |86.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/net_classifier_updater_ut.cpp >> TBlobStorageGroupInfoIterTest::Domains [GOOD] >> TBlobStorageGroupInfoIterTest::Indexes [GOOD] |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/backpressure/ut/unittest |86.8%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/common/ut/ydb-core-blobstorage-vdisk-common-ut >> TSubgroupPartLayoutTest::CountEffectiveReplicas1of4 >> TBlobStorageGroupInfoTest::GroupQuorumCheckerOrdinary >> TBlobStorageGroupInfoIterTest::IteratorForward [GOOD] >> TBlobStorageGroupInfoIterTest::IteratorBackward [GOOD] |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/groupinfo/ut/unittest >> TBlobStorageGroupInfoIterTest::PerFailDomainRange [GOOD] |86.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/net_classifier_updater_ut.cpp >> TBlobStorageHullFreshSegment::PerfAppendix [GOOD] >> TBlobStorageHullFreshSegment::PerfSkipList |86.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/sys_view/ut_kqp.cpp |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/groupinfo/ut/unittest >> TBlobStorageGroupInfoIterTest::Indexes [GOOD] >> TBlobStorageGroupInfoTest::GroupQuorumCheckerOrdinary [GOOD] >> TBlobStorageGroupInfoTest::GroupQuorumCheckerMirror3dc [GOOD] |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/groupinfo/ut/unittest >> TBlobStorageGroupInfoIterTest::IteratorBackward [GOOD] |86.8%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/blob_depot/schema.h_serialized.cpp >> TYardTest::TestChunkWriteReadMultiple [GOOD] >> TYardTest::TestChunkWriteReadMultipleWithHddSectorMap >> TBlobStorageGroupInfoBlobMapTest::CheckCorrectBehaviourWithHashOverlow [GOOD] >> TBlobStorageGroupInfoBlobMapTest::Mirror3dcMapper |86.8%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/blob_depot/schema.h_serialized.cpp >> TBlobStorageGroupInfoIterTest::PerRealmIterator [GOOD] >> TSubgroupPartLayoutTest::CountEffectiveReplicas3of4 >> TBlobStorageGroupInfoIterTest::WalkFailRealms [GOOD] |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/groupinfo/ut/unittest >> TBlobStorageGroupInfoTest::GroupQuorumCheckerMirror3dc [GOOD] >> TBlobStorageGroupInfoBlobMapTest::BelongsToSubgroupBenchmark >> TBlobStorageGroupInfoTest::TestBelongsToSubgroup >> TPDiskTest::TestStartEncryptedOrPlainAndRestart [GOOD] >> TPDiskUtil::AtomicBlockCounterFunctional [GOOD] >> TPDiskUtil::AtomicBlockCounterSeqno >> TPDiskUtil::AtomicBlockCounterSeqno [GOOD] >> TPDiskUtil::Light [GOOD] >> TPDiskUtil::LightOverflow [GOOD] >> TPDiskUtil::DriveEstimator |86.8%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/tablet_flat/ut_large/ydb-core-tablet_flat-ut_large |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/groupinfo/ut/unittest >> TBlobStorageGroupInfoIterTest::WalkFailRealms [GOOD] |86.8%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/opt/kqp_query_plan.h_serialized.cpp |86.8%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/fq/libs/db_id_async_resolver_impl/ut/ydb-core-fq-libs-db_id_async_resolver_impl-ut >> TBlobStorageGroupInfoTest::TestBelongsToSubgroup [GOOD] >> TBlobStorageGroupInfoTest::SubgroupPartLayout |86.8%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/pgproxy/ut/ydb-core-pgproxy-ut |86.8%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/opt/kqp_query_plan.h_serialized.cpp >> ShredPDisk::InitVDiskAfterShredding [GOOD] >> ShredPDisk::ReinitVDiskWhilePreShredding |86.8%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/groupinfo/ut/ydb-core-blobstorage-groupinfo-ut |86.8%| [LD] {BAZEL_UPLOAD} $(B)/ydb/mvp/meta/ut/ydb-mvp-meta-ut |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_strategy/unittest >> TQueueBackpressureTest::PerfTrivial [GOOD] >> TBlobStorageHullFreshSegment::PerfSkipList [GOOD] |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_strategy/unittest |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_strategy/unittest |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/backpressure/ut/unittest >> TQueueBackpressureTest::PerfTrivial [GOOD] |86.8%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/tablet_flat/test/tool/surg/surg >> TSectorMapPerformance::TestSSD1960GBRead1000MBOnFirstSector [GOOD] >> TSectorMapPerformance::TestSSD1960GBWrite1000MBOnFirstSector |86.8%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/util/btree_benchmark/btree_benchmark >> DSProxyStrategyTest::Restore_mirror3dc |86.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/pq/provider/ut/ydb-library-yql-providers-pq-provider-ut |86.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/providers/pq/provider/ut/ydb-library-yql-providers-pq-provider-ut |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest >> TBlobStorageHullFreshSegment::PerfSkipList [GOOD] >> TPDiskRaces::KillOwnerWhileDeletingChunk [GOOD] >> TPDiskRaces::KillOwnerWhileDeletingChunkWithInflight |86.8%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/config/validation/auth_config_validator_ut/core-config-validation-auth_config_validator_ut |86.8%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/log_backend/ut/ydb-core-log_backend-ut |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/backpressure/ut/unittest |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_strategy/unittest |86.8%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/base/ut/ydb-core-base-ut |86.8%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/tablet_flat/test/tool/perf/table-perf >> TYardTest::TestChunkReadRandomOffset [GOOD] >> TYardTest::TestChunkWrite20Read02 |86.8%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/config/init/ut/ydb-core-config-init-ut |86.8%| [TS] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut_pg/unittest >> TQueueBackpressureTest::PerfInFlight [GOOD] |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_strategy/unittest >> TYardTest::TestChunkWrite20Read02 [GOOD] >> TYardTest::TestChunkContinuity2 >> ShredPDisk::ReinitVDiskWhilePreShredding [GOOD] >> ShredPDisk::ReinitVDiskWhileShredding >> StatsFormat::FullStat |86.8%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/hullop/ut/ydb-core-blobstorage-vdisk-hullop-ut |86.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/provider/libcore-kqp-provider.a |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_strategy/unittest >> StatsFormat::FullStat [GOOD] >> TYardTest::TestChunkContinuity2 [GOOD] >> TYardTest::TestChunkContinuity3000 |86.9%| [AR] {RESULT} $(B)/ydb/core/kqp/provider/libcore-kqp-provider.a |86.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/backpressure/ut/unittest >> TQueueBackpressureTest::PerfInFlight [GOOD] |86.9%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/ydb-core-blobstorage-vdisk-hulldb-compstrat-ut |86.9%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/base/ut/ydb-core-blobstorage-vdisk-hulldb-base-ut |86.9%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/export/session/session.h_serialized.cpp >> DSProxyStrategyTest::Restore_block42 >> Config::IncludeScope [GOOD] >> FormatTimes::DurationMs [GOOD] |86.9%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/export/session/session.h_serialized.cpp |86.9%| [TA] $(B)/ydb/core/tablet_flat/ut_pg/test-results/unittest/{meta.json ... results_accumulator.log} |86.9%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/generic/ut/ydb-core-blobstorage-vdisk-hulldb-generic-ut >> StatsFormat::AggregateStat [GOOD] |86.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_strategy/unittest |86.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_strategy/unittest |86.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/compute/common/ut/unittest >> StatsFormat::FullStat [GOOD] >> TYardTest::TestChunkContinuity3000 [GOOD] >> TYardTest::TestChunkContinuity9000 |86.9%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/persqueue/read_balancer__balancing.h_serialized.cpp |86.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/compute/common/ut/unittest >> Config::IncludeScope [GOOD] |86.9%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/persqueue/read_balancer__balancing.h_serialized.cpp |86.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/compute/common/ut/unittest >> FormatTimes::DurationMs [GOOD] |86.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/compute/common/ut/unittest >> StatsFormat::AggregateStat [GOOD] >> TYardTest::TestChunkContinuity9000 [GOOD] >> TYardTest::TestChunkLock |86.9%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/config/validation/column_shard_config_validator_ut/column_shard_config_validator_ut |86.9%| [LD] {RESULT} $(B)/ydb/library/yql/providers/pq/provider/ut/ydb-library-yql-providers-pq-provider-ut |86.9%| [TA] {RESULT} $(B)/ydb/core/tablet_flat/ut_pg/test-results/unittest/{meta.json ... results_accumulator.log} |86.9%| [TA] $(B)/ydb/core/blobstorage/backpressure/ut/test-results/unittest/{meta.json ... results_accumulator.log} |86.9%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/config/validation/ut/ydb-core-config-validation-ut |86.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/compute/common/ut/unittest >> TBlobStorageGroupInfoBlobMapTest::Mirror3dcMapper [GOOD] >> FormatTimes::DurationUs [GOOD] >> TYardTest::TestChunkLock [GOOD] >> TYardTest::TestChunkUnlock |86.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/provider/libcore-kqp-provider.a |86.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/export/session/libcolumnshard-export-session.a |86.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/tools/dq/worker_node/worker_node |86.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/tools/dq/worker_node/worker_node |86.9%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/export/session/libcolumnshard-export-session.a |86.9%| [LD] {RESULT} $(B)/ydb/library/yql/tools/dq/worker_node/worker_node |86.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/export/session/libcolumnshard-export-session.a >> TSectorMapPerformance::TestSSD1960GBWrite1000MBOnFirstSector [GOOD] >> TYardTest::Test3AsyncLog |86.9%| [TA] {RESULT} $(B)/ydb/core/blobstorage/backpressure/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> ShredPDisk::ReinitVDiskWhileShredding [GOOD] >> ShredPDisk::RetryPreShredCompactError >> Config::ExcludeScope [GOOD] >> TYardTest::TestChunkUnlock [GOOD] >> TYardTest::TestChunkUnlockHarakiri |86.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/tools/dq/service_node/service_node |86.9%| [LD] {RESULT} $(B)/ydb/library/yql/tools/dq/service_node/service_node |86.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/tools/dq/service_node/service_node |86.9%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/dsproxy/ut_strategy/ydb-core-blobstorage-dsproxy-ut_strategy |86.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_trace.cpp |86.9%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/fresh/ut/ydb-core-blobstorage-vdisk-hulldb-fresh-ut |86.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/compute/common/ut/unittest >> FormatTimes::DurationUs [GOOD] |86.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_trace.cpp |86.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/compute/common/ut/unittest >> FormatTimes::ParseDuration [GOOD] |86.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/compute/common/ut/unittest >> Config::ExcludeScope [GOOD] >> TYardTest::TestChunkUnlockHarakiri [GOOD] >> TYardTest::TestChunkUnlockRestart >> TBlobStorageHullWriteSst::LogoBlobOneSstOneIndexWithSmallWriteBlocks [GOOD] >> TBlobStorageHullWriteSst::LogoBlobOneSstOneIndexPartOutbound >> TBlobStorageHullWriteSst::LogoBlobOneSstOneIndexPartOutbound [GOOD] >> TBlobStorageHullWriteSst::BlockOneSstOneIndex [GOOD] >> TBlobStorageHullWriteSst::BlockOneSstMultiIndex ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/groupinfo/ut/unittest >> TBlobStorageGroupInfoBlobMapTest::Mirror3dcMapper [GOOD] Test command err: [0:1:0:3:1]# 173 184 157 167 152 185 195 192 144 [0:1:1:1:1]# 189 195 192 171 157 161 167 155 196 [0:1:3:3:1]# 184 157 182 152 185 157 192 144 189 [0:1:3:4:0]# 148 154 155 158 194 160 156 163 140 [0:1:2:3:2]# 152 177 174 176 154 146 161 170 168 [0:1:1:2:1]# 157 167 152 189 195 192 171 157 161 [0:1:1:0:2]# 158 150 131 167 177 161 177 174 173 [0:1:3:0:1]# 161 155 171 196 154 167 184 157 182 [0:1:0:3:2]# 174 173 152 146 184 176 168 157 161 [0:1:2:2:0]# 163 140 161 148 162 159 168 178 190 [0:1:0:2:0]# 161 156 163 159 196 148 190 162 168 [0:1:3:2:1]# 152 185 157 192 144 189 161 155 171 [0:1:2:3:1]# 157 182 173 185 157 167 144 189 195 [0:1:3:1:2]# 157 161 170 131 190 158 161 178 167 [0:1:2:0:1]# 155 171 157 154 167 155 157 182 173 [0:1:3:0:2]# 131 190 158 161 178 167 173 152 177 [0:1:2:0:2]# 190 158 150 178 167 177 152 177 174 [0:1:2:4:1]# 154 167 155 157 182 173 185 157 167 [0:1:2:1:2]# 161 170 168 190 158 150 178 167 177 [0:1:2:4:2]# 178 167 177 152 177 174 176 154 146 [0:1:0:2:1]# 167 152 185 195 192 144 157 161 155 [0:1:0:0:0]# 190 162 168 174 148 154 177 158 194 [0:1:3:2:0]# 156 163 140 196 148 162 162 168 178 [0:1:1:0:1]# 171 157 161 167 155 196 182 173 184 [0:1:0:2:2]# 146 184 176 168 157 161 150 131 190 [0:1:1:0:0]# 178 190 162 155 174 148 160 177 158 [0:1:2:3:0]# 194 160 177 163 140 161 148 162 159 [0:1:2:4:0]# 154 155 174 194 160 177 163 140 161 [0:1:1:3:2]# 177 174 173 154 146 184 170 168 157 [0:1:2:1:1]# 144 189 195 155 171 157 154 167 155 [0:1:1:1:0]# 162 159 196 178 190 162 155 174 148 [0:1:1:3:1]# 182 173 184 157 167 152 189 195 192 [0:1:3:4:1]# 196 154 167 184 157 182 152 185 157 [0:1:1:4:2]# 167 177 161 177 174 173 154 146 184 [0:1:0:1:0]# 159 196 148 190 162 168 174 148 154 [0:1:3:4:2]# 161 178 167 173 152 177 184 176 154 [0:1:0:0:1]# 157 161 155 155 196 154 173 184 157 [0:1:1:4:0]# 155 174 148 160 177 158 140 161 156 [0:1:2:1:0]# 148 162 159 168 178 190 154 155 174 [0:1:2:0:0]# 168 178 190 154 155 174 194 160 177 [0:1:3:3:2]# 173 152 177 184 176 154 157 161 170 [0:1:0:4:0]# 174 148 154 177 158 194 161 156 163 [0:1:1:2:0]# 140 161 156 162 159 196 178 190 162 [0:1:0:1:1]# 195 192 144 157 161 155 155 196 154 [0:1:3:0:0]# 162 168 178 148 154 155 158 194 160 [0:1:3:1:1]# 192 144 189 161 155 171 196 154 167 [0:1:0:4:1]# 155 196 154 173 184 157 167 152 185 [0:1:2:2:1]# 185 157 167 144 189 195 155 171 157 [0:1:3:1:0]# 196 148 162 162 168 178 148 154 155 [0:1:2:2:2]# 176 154 146 161 170 168 190 158 150 [0:1:0:3:0]# 177 158 194 161 156 163 159 196 148 [0:1:3:3:0]# 158 194 160 156 163 140 196 148 162 [0:1:0:1:2]# 168 157 161 150 131 190 177 161 178 [0:1:3:2:2]# 184 176 154 157 161 170 131 190 158 [0:1:1:3:0]# 160 177 158 140 161 156 162 159 196 [0:1:1:2:2]# 154 146 184 170 168 157 158 150 131 [0:1:1:4:1]# 167 155 196 182 173 184 157 167 152 [0:1:1:1:2]# 170 168 157 158 150 131 167 177 161 [0:1:0:0:2]# 150 131 190 177 161 178 174 173 152 [0:1:0:4:2]# 177 161 178 174 173 152 146 184 176 mean# 166.6666667 dev# 15.11254078 >> TBlobStorageHullWriteSst::BlockOneSstMultiIndex [GOOD] >> TBlobStorageHullSstIt::TestSstIndexSeekAndIterate [GOOD] >> TBlobStorageHullWriteSst::BlockMultiSstOneIndex >> TYardTest::TestChunkUnlockRestart [GOOD] >> TYardTest::TestChunkReserve >> TBlobStorageHullOrderedSstsIt::TestSeekToFirst [GOOD] >> TBlobStorageHullOrderedSstsIt::TestSeekToLast [GOOD] >> TBlobStorageHullOrderedSstsIt::TestSeekAfterAndPrev [GOOD] |86.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/compute/common/ut/unittest >> FormatTimes::ParseDuration [GOOD] |86.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/scheme_board/ut_double_indexed/ydb-core-tx-scheme_board-ut_double_indexed >> TBlobStorageHullWriteSst::LogoBlobOneSstMultiIndex [GOOD] >> TBlobStorageHullWriteSst::LogoBlobMultiSstOneIndexPartOutbound >> TBlobStorageHullWriteSst::BlockMultiSstOneIndex [GOOD] |86.9%| [LD] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_double_indexed/ydb-core-tx-scheme_board-ut_double_indexed >> TBlobStorageHullWriteSst::LogoBlobMultiSstOneIndexPartOutbound [GOOD] |86.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest >> TBlobStorageHullWriteSst::LogoBlobOneSstOneIndexPartOutbound [GOOD] >> TBlobStorageHullWriteSst::LogoBlobMultiSstOneIndex [GOOD] >> TBlobStorageHullWriteSst::LogoBlobMultiSstMultiIndex [GOOD] >> TBlobStorageHullSstIt::TestSeekExactAndNext [GOOD] >> TBlobStorageHullSstIt::TestSeekBefore |86.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest >> TBlobStorageHullWriteSst::BlockOneSstMultiIndex [GOOD] |86.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/opt/libcore-kqp-opt.a >> TBlobStorageHullSstIt::TestSeekBefore [GOOD] >> TBlobStorageHullSstIt::TestSeekAfterAndPrev [GOOD] >> TYardTest::TestChunkReserve [GOOD] >> TYardTest::TestCheckSpace |86.9%| [AR] {RESULT} $(B)/ydb/core/kqp/opt/libcore-kqp-opt.a >> ShredPDisk::RetryPreShredCompactError [GOOD] >> ShredPDisk::RetryShredError >> TYardTest::Test3AsyncLog [GOOD] >> TYardTest::Test3HugeAsyncLog >> TCircleBufStringStreamTest::TestNotAligned [GOOD] >> TCircleBufStringStreamTest::TestOverflow [GOOD] >> TCircleBufTest::EmptyTest [GOOD] >> TCircleBufTest::OverflowTest [GOOD] >> TBlobStorageHullSstIt::TestSeekToLast |86.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest >> TBlobStorageHullWriteSst::LogoBlobMultiSstMultiIndex [GOOD] |86.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest >> TBlobStorageHullWriteSst::BlockMultiSstOneIndex [GOOD] >> TBlobStorageHullSstIt::TestSeekToLast [GOOD] >> TBlobStorageHullSstIt::TestSstIndexSaveLoad [GOOD] >> TBlobStorageSyncNeighborsTest::IterateOverAllDisks [GOOD] >> TBlobStorageSyncNeighborsTest::SerDes [GOOD] >> TBlobStorageSyncNeighborsTest::CheckVDiskIterators [GOOD] >> TCircleBufStringStreamTest::TestAligned [GOOD] >> TYardTest::TestCheckSpace [GOOD] >> TYardTest::TestBootingState |86.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest >> TBlobStorageHullOrderedSstsIt::TestSeekAfterAndPrev [GOOD] |86.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest >> TBlobStorageHullWriteSst::LogoBlobMultiSstOneIndexPartOutbound [GOOD] |86.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest >> TBlobStorageHullSstIt::TestSstIndexSaveLoad [GOOD] |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest >> TBlobStorageHullSstIt::TestSeekAfterAndPrev [GOOD] |87.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/opt/libcore-kqp-opt.a |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/common/ut/unittest >> TCircleBufTest::OverflowTest [GOOD] >> TCircleBufTest::SimpleTest [GOOD] >> TCircleBufTest::PtrTest [GOOD] >> TLsnAllocTrackerTests::Test1 [GOOD] >> TLsnMngrTests::AllocLsnForLocalUse |87.0%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/tx/scheme_board/ut_double_indexed/ydb-core-tx-scheme_board-ut_double_indexed |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/compute/common/ut/unittest |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/common/ut/unittest >> TCircleBufStringStreamTest::TestAligned [GOOD] >> TBlobStorageHullWriteSst::LogoBlobOneSstOneIndex [GOOD] >> TBlobStorageHullWriteSst::LogoBlobOneSstMultiIndexPartOutbound [GOOD] >> TTrackable::TVector [GOOD] >> TTrackable::TList [GOOD] >> TTrackable::TString [GOOD] >> TPDiskErrorStateTests::Basic [GOOD] >> TPDiskErrorStateTests::Basic2 [GOOD] >> TPDiskErrorStateTests::BasicErrorReason [GOOD] >> TLsnMngrTests::AllocLsnForLocalUse2Threads |87.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_mirror3of4/ydb-core-blobstorage-ut_mirror3of4 |87.0%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_mirror3of4/ydb-core-blobstorage-ut_mirror3of4 |87.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_mirror3of4/ydb-core-blobstorage-ut_mirror3of4 |87.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blob_depot/libydb-core-blob_depot.a |87.0%| [AR] {RESULT} $(B)/ydb/core/blob_depot/libydb-core-blob_depot.a |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/common/ut/unittest >> TTrackable::TString [GOOD] |87.0%| [TA] $(B)/ydb/core/fq/libs/compute/common/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> ShredPDisk::RetryShredError [GOOD] |87.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/persqueue/libydb-core-persqueue.a |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest >> TBlobStorageHullWriteSst::LogoBlobOneSstMultiIndexPartOutbound [GOOD] |87.0%| [AR] {RESULT} $(B)/ydb/core/persqueue/libydb-core-persqueue.a |87.0%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut_pg/unittest |87.0%| [TA] {RESULT} $(B)/ydb/core/fq/libs/compute/common/ut/test-results/unittest/{meta.json ... results_accumulator.log} |87.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/ymq/actor/libcore-ymq-actor.a |87.0%| [AR] {RESULT} $(B)/ydb/core/ymq/actor/libcore-ymq-actor.a |87.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_vdisk2/ydb-core-blobstorage-ut_vdisk2 |87.0%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_vdisk2/ydb-core-blobstorage-ut_vdisk2 |87.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_vdisk2/ydb-core-blobstorage-ut_vdisk2 >> AuthTokenAllowed::PassOnEmptyListAndTokenWithEmptyUserSidAndGroups [GOOD] >> AuthTokenAllowed::PassOnListMatchGroupSid [GOOD] |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/common/ut/unittest >> TPDiskErrorStateTests::BasicErrorReason [GOOD] |87.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blob_depot/libydb-core-blob_depot.a >> TopicNameConverterTest::Paths [GOOD] >> TopicNameConverterTest::PathFromDiscoveryConverter [GOOD] >> TBlobStorageHullSstIt::TestSeekToFirst [GOOD] >> TBlobStorageHullSstIt::TestSeekExactAndPrev >> TBlobStorageHullSstIt::TestSeekExactAndPrev [GOOD] >> TBlobStorageHullSstIt::TestSeekNotExactBefore [GOOD] >> TLsnMngrTests::AllocLsnForLocalUse [GOOD] |87.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/persqueue/libydb-core-persqueue.a ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/pdisk/ut/unittest >> ShredPDisk::RetryShredError [GOOD] Test command err: /home/runner/actions_runner/_work/ydb/ydb/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_env.h:379 /home/runner/actions_runner/_work/ydb/ydb/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_env.h:379 /home/runner/actions_runner/_work/ydb/ydb/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_env.h:379 /home/runner/actions_runner/_work/ydb/ydb/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_env.h:379 /home/runner/actions_runner/_work/ydb/ydb/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_env.h:379 /home/runner/actions_runner/_work/ydb/ydb/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_env.h:379 /home/runner/actions_runner/_work/ydb/ydb/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_env.h:379 /home/runner/actions_runner/_work/ydb/ydb/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_env.h:379 |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/query/ut/unittest >> TVDiskConfigTest::JustConfig [GOOD] >> TVDiskConfigTest::Basic [GOOD] >> TVDiskConfigTest::NoMoneyNoHoney [GOOD] >> TVDiskConfigTest::RtmrProblem1 [GOOD] >> TVDiskConfigTest::RtmrProblem2 [GOOD] >> TVDiskConfigTest::ThreeLevels [GOOD] |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_auth/unittest >> AuthTokenAllowed::PassOnListMatchGroupSid [GOOD] |87.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/ymq/actor/libcore-ymq-actor.a |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest >> TBlobStorageHullSstIt::TestSeekNotExactBefore [GOOD] >> TLsnMngrTests::AllocLsnForLocalUse2Threads [GOOD] >> TLsnMngrTests::AllocLsnForLocalUse10Threads |87.0%| [TS] {asan, default-linux-x86_64, release} ydb/library/persqueue/topic_parser/ut/unittest >> TopicNameConverterTest::PathFromDiscoveryConverter [GOOD] |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/common/ut/unittest >> TLsnMngrTests::AllocLsnForLocalUse [GOOD] |87.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_group/ydb-core-blobstorage-ut_group |87.0%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_group/ydb-core-blobstorage-ut_group >> TBlobStorageSyncNeighborsTest::CheckRevLookup [GOOD] >> TBlobStorageSyncNeighborsTest::CheckIsMyDomain [GOOD] >> TBlobStorageSyncNeighborsTest::CheckFailDomainsIterators [GOOD] >> TBlobStorageSyncNeighborsTest::CheckVDiskDistance [GOOD] >> TBlobStorageHullDecimal::TestRoundToInt [GOOD] >> TBlobStorageHullDecimal::TestToUi64 [GOOD] |87.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/backpressure/ut_client/ydb-core-blobstorage-backpressure-ut_client |87.0%| [LD] {RESULT} $(B)/ydb/core/blobstorage/backpressure/ut_client/ydb-core-blobstorage-backpressure-ut_client |87.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/backpressure/ut_client/ydb-core-blobstorage-backpressure-ut_client |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/common/ut/unittest >> TVDiskConfigTest::NoMoneyNoHoney [GOOD] |87.0%| [TA] $(B)/ydb/core/blobstorage/vdisk/hulldb/generic/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TYardTest::Test3HugeAsyncLog [GOOD] >> TYardTest::TestAllocateAllChunks |87.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/ymq/base/ut/ydb-core-ymq-base-ut |87.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/ymq/base/ut/ydb-core-ymq-base-ut |87.0%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/generic/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TYardTest::TestBootingState [GOOD] >> TYardTest::TestChunkRecommit |87.0%| [LD] {RESULT} $(B)/ydb/core/ymq/base/ut/ydb-core-ymq-base-ut |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/common/ut/unittest >> TVDiskConfigTest::ThreeLevels [GOOD] >> TBlobStorageIngress::IngressCreateFromRepl [GOOD] >> TBlobStorageIngress::IngressGetMainReplica [GOOD] >> TBlobStorageIngress::IngressHandoffPartsDelete [GOOD] |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/common/ut/unittest >> TBlobStorageSyncNeighborsTest::CheckVDiskDistance [GOOD] >> TBlobStorageIngressMatrix::VectorTestEmpty [GOOD] >> TBlobStorageIngressMatrix::VectorTestBitwiseComplement2 [GOOD] |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest >> TBlobStorageHullDecimal::TestToUi64 [GOOD] >> TBlobStorageIngressMatrix::VectorTestBitwiseAnd [GOOD] >> TBlobStorageIngressMatrix::VectorTestBitwiseComplement1 [GOOD] >> TBlobStorageIngressMatrix::VectorTestBitsBefore2 [GOOD] >> TBlobStorageIngressMatrix::VectorTestIterator1 [GOOD] >> TBlobStorageIngressMatrix::VectorTestIterator2 [GOOD] >> TYardTest::TestChunkRecommit [GOOD] >> TYardTest::TestChunkRestartRecommit >> TYardTest::TestAllocateAllChunks [GOOD] |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/ingress/ut/unittest >> TBlobStorageIngressMatrix::VectorTestBitwiseComplement2 [GOOD] >> TBlobStorageIngress::BarrierIngressQuorumBasicMirror3_4_2 [GOOD] >> TBlobStorageIngress::BarrierIngressQuorumBasic4Plus2_8_1 [GOOD] >> TBlobStorageIngress::BarrierIngressQuorumMirror3 [GOOD] >> TBlobStorageIngressMatrix::VectorTest [GOOD] >> TBlobStorageIngressMatrix::VectorTestBitsBefore1 [GOOD] >> TBlobStorageIngressMatrix::ShiftedMainBitVec [GOOD] >> TResizableCircleBufTest::Test1 [GOOD] >> TResizableCircleBufTest::Test2 [GOOD] >> TTrackable::TBuffer [GOOD] |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/ingress/ut/unittest >> TBlobStorageIngress::IngressHandoffPartsDelete [GOOD] |87.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/test_connection/ut/ydb-core-fq-libs-test_connection-ut >> TLsnMngrTests::AllocLsnForLocalUse10Threads [GOOD] >> TOutOfSpaceStateTests::TestLocal [GOOD] >> TOutOfSpaceStateTests::TestGlobal [GOOD] >> TBlobStorageIngressMatrix::VectorTestMinus [GOOD] >> TBlobStorageIngressMatrix::VectorTestIterator3 [GOOD] |87.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/test_connection/ut/ydb-core-fq-libs-test_connection-ut |87.1%| [LD] {RESULT} $(B)/ydb/core/fq/libs/test_connection/ut/ydb-core-fq-libs-test_connection-ut |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/ingress/ut/unittest >> TBlobStorageIngressMatrix::VectorTestIterator2 [GOOD] |87.1%| [TA] $(B)/ydb/core/blobstorage/vdisk/hulldb/base/ut/test-results/unittest/{meta.json ... results_accumulator.log} |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/ingress/ut/unittest >> TBlobStorageIngressMatrix::VectorTestBitsBefore2 [GOOD] |87.1%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/base/ut/test-results/unittest/{meta.json ... results_accumulator.log} |87.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/external_sources/object_storage/inference/ut/external_sources-object_storage-inference-ut |87.1%| [LD] {RESULT} $(B)/ydb/core/external_sources/object_storage/inference/ut/external_sources-object_storage-inference-ut |87.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/external_sources/object_storage/inference/ut/external_sources-object_storage-inference-ut |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/ingress/ut/unittest >> TBlobStorageIngress::BarrierIngressQuorumMirror3 [GOOD] |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/ingress/ut/unittest >> TBlobStorageIngressMatrix::ShiftedMainBitVec [GOOD] |87.1%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/ut_group/ydb-core-blobstorage-ut_group >> TYardTest::TestChunkRestartRecommit [GOOD] >> TYardTest::TestChunkDelete |87.1%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/operation_queue_timer.h_serialized.cpp |87.1%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/operation_queue_timer.h_serialized.cpp |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/ingress/ut/unittest >> TBlobStorageIngressMatrix::VectorTestIterator3 [GOOD] |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/common/ut/unittest >> TOutOfSpaceStateTests::TestGlobal [GOOD] >> TBlobStorageIngressMatrix::MatrixTest [GOOD] >> TBlobStorageIngressMatrix::ShiftedBitVecBase [GOOD] >> TBlobStorageIngressMatrix::ShiftedHandoffBitVec [GOOD] |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/common/ut/unittest >> TTrackable::TBuffer [GOOD] |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hullop/ut/unittest |87.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/libcore-tx-datashard.a |87.1%| [AR] {RESULT} $(B)/ydb/core/tx/datashard/libcore-tx-datashard.a |87.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/repl/ut/ydb-core-blobstorage-vdisk-repl-ut |87.1%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/repl/ut/ydb-core-blobstorage-vdisk-repl-ut |87.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/vdisk/repl/ut/ydb-core-blobstorage-vdisk-repl-ut |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/ingress/ut/unittest >> TBlobStorageIngressMatrix::ShiftedHandoffBitVec [GOOD] >> TYardTest::TestChunkDelete [GOOD] >> TYardTest::TestChunkForget |87.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/ydb-core-blobstorage-vdisk-hulldb-cache_block-ut |87.1%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/ydb-core-blobstorage-vdisk-hulldb-cache_block-ut |87.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/ydb-core-blobstorage-vdisk-hulldb-cache_block-ut |87.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blob_depot/ut/ydb-core-blob_depot-ut |87.1%| [LD] {RESULT} $(B)/ydb/core/blob_depot/ut/ydb-core-blob_depot-ut |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/pdisk/ut/unittest >> TYardTest::TestAllocateAllChunks [GOOD] |87.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/incrhuge/ut/ydb-core-blobstorage-incrhuge-ut |87.1%| [LD] {RESULT} $(B)/ydb/core/blobstorage/incrhuge/ut/ydb-core-blobstorage-incrhuge-ut |87.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/incrhuge/ut/ydb-core-blobstorage-incrhuge-ut |87.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/huge/ut/ydb-core-blobstorage-vdisk-huge-ut |87.1%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/huge/ut/ydb-core-blobstorage-vdisk-huge-ut |87.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/vdisk/huge/ut/ydb-core-blobstorage-vdisk-huge-ut |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk2/unittest >> VDiskTest::HugeBlobWrite |87.1%| [TA] $(B)/ydb/core/blobstorage/vdisk/common/ut/test-results/unittest/{meta.json ... results_accumulator.log} |87.1%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/common/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TYardTest::TestChunkForget [GOOD] >> TYardTest::TestChunkFlushReboot |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/query/ut/unittest |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk2/unittest |87.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tools/blobsan/blobsan |87.1%| [LD] {RESULT} $(B)/ydb/tools/blobsan/blobsan |87.1%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blob_depot/ut/ydb-core-blob_depot-ut |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk2/unittest >> TDelayedResponsesTests::Test |87.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/datashard/libcore-tx-datashard.a >> TDelayedResponsesTests::Test [GOOD] |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hullop/ut/unittest >> TYardTest::TestChunkFlushReboot [GOOD] >> TYardTest::TestChunkDeletionWhileWriting |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk2/unittest |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk2/unittest |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk2/unittest >> TSubgroupPartLayoutTest::CountEffectiveReplicas1of4 [GOOD] >> TSubgroupPartLayoutTest::CountEffectiveReplicas2of4 >> TBlobStorageIngress::IngressPartsWeMustHaveLocally [GOOD] >> TBlobStorageIngress::IngressLocalParts [GOOD] >> TBlobStorageIngress::IngressPrintDistribution [GOOD] >> TYardTest::TestChunkDeletionWhileWriting [GOOD] >> TYardTest::TestChunkPriorityBlock |87.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hullop/ut/unittest >> TDelayedResponsesTests::Test [GOOD] |87.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk2/unittest >> TBlobStorageIngress::Ingress [GOOD] >> TBlobStorageIngress::IngressCacheMirror3 [GOOD] >> TBlobStorageIngress::IngressCache4Plus2 [GOOD] >> TPDiskRaces::KillOwnerWhileDeletingChunkWithInflight [GOOD] >> TPDiskRaces::KillOwnerWhileDeletingChunkWithInflightMock |87.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/synclog/ut/ydb-core-blobstorage-vdisk-synclog-ut |87.2%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/synclog/ut/ydb-core-blobstorage-vdisk-synclog-ut |87.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/vdisk/synclog/ut/ydb-core-blobstorage-vdisk-synclog-ut |87.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/ingress/ut/unittest >> TBlobStorageIngress::IngressPrintDistribution [GOOD] |87.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_vdisk/ydb-core-blobstorage-ut_vdisk |87.2%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_vdisk/ydb-core-blobstorage-ut_vdisk |87.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_vdisk/ydb-core-blobstorage-ut_vdisk >> TYardTest::TestChunkWriteReadMultipleWithHddSectorMap [GOOD] >> TYardTest::TestChunkWriteReadWhole >> TYardTest::TestChunkPriorityBlock [GOOD] |87.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk2/unittest |87.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/federated_query/s3/kqp_federated_query_ut.cpp >> TActorTest::TestWaitForFirstEvent >> TQueryResultSizeTrackerTest::CheckOnlyQueryResult [GOOD] |87.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/ingress/ut/unittest >> TBlobStorageIngress::IngressCache4Plus2 [GOOD] >> TopicNameConverterForCPTest::BadLegacyTopics [GOOD] >> TopicNameConverterForCPTest::BadModernTopics [GOOD] |87.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/federated_query/s3/kqp_federated_query_ut.cpp |87.2%| [LD] {BAZEL_UPLOAD} $(B)/ydb/tools/blobsan/blobsan >> TActorTest::TestWaitForFirstEvent [GOOD] >> TActorTest::TestWaitFuture [GOOD] >> TActorTest::TestStateSwitch [GOOD] >> TSubgroupPartLayoutTest::CountEffectiveReplicas2of4 [GOOD] |87.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk2/unittest |87.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/query/ut/unittest >> TQueryResultSizeTrackerTest::CheckOnlyQueryResult [GOOD] |87.2%| [TS] {asan, default-linux-x86_64, release} ydb/library/persqueue/topic_parser/ut/unittest >> TopicNameConverterForCPTest::BadModernTopics [GOOD] >> TActorTest::TestCreateChildActor >> ReadBatcher::Range |87.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/repl/ut/unittest >> TActorTest::TestCreateChildActor [GOOD] >> TActorTest::TestBlockEvents |87.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_pdiskfit/ut/ydb-core-blobstorage-ut_pdiskfit-ut |87.2%| [TA] $(B)/ydb/core/blobstorage/vdisk/ingress/ut/test-results/unittest/{meta.json ... results_accumulator.log} |87.2%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_pdiskfit/ut/ydb-core-blobstorage-ut_pdiskfit-ut |87.2%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/column_engine_logs.h_serialized.cpp >> TActorTest::TestBlockEvents [GOOD] >> TBlobStorageHullHugeChain::HeapAllocSmall [GOOD] >> TBlobStorageHullHugeHeap::AllocateAllFromOneChunk [GOOD] |87.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/pdisk/ut/unittest >> TYardTest::TestChunkPriorityBlock [GOOD] |87.2%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/column_engine_logs.h_serialized.cpp ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/groupinfo/ut/unittest >> TSubgroupPartLayoutTest::CountEffectiveReplicas2of4 [GOOD] Test command err: testing erasure none main# 0 main# 1 Checked 2 cases, took 3390 us testing erasure block-4-2 main# 0 main# 1 main# 2 main# 3 main# 4 main# 5 main# 6 main# 7 main# 8 main# 9 main# 10 main# 11 main# 12 main# 13 main# 14 main# 15 main# 16 main# 17 main# 18 main# 19 main# 20 main# 21 main# 22 main# 23 main# 24 main# 25 main# 26 main# 27 main# 28 main# 29 main# 30 main# 31 main# 32 main# 33 main# 34 main# 35 main# 36 main# 37 main# 38 main# 39 main# 40 main# 41 main# 42 main# 43 main# 44 main# 45 main# 46 main# 47 main# 48 main# 49 main# 50 main# 51 main# 52 main# 53 main# 54 main# 55 main# 56 main# 57 main# 58 main# 59 main# 60 main# 61 main# 62 main# 63 Checked 262144 cases, took 1759508 us testing erasure mirror-3-2 main# 0 main# 1 main# 2 main# 3 main# 4 main# 5 main# 6 main# 7 Checked 512 cases, took 134 us testing erasure block-2-2 main# 0 main# 1 main# 2 main# 3 main# 4 main# 5 main# 6 main# 7 main# 8 main# 9 main# 10 main# 11 main# 12 main# 13 main# 14 main# 15 Checked 4096 cases, took 547695 us testing erasure mirror-3 main# 0 main# 1 main# 2 main# 3 main# 4 main# 5 main# 6 main# 7 Checked 64 cases, took 25 us testing erasure block-3-2 main# 0 main# 1 main# 2 main# 3 main# 4 main# 5 main# 6 main# 7 main# 8 main# 9 main# 10 main# 11 main# 12 main# 13 main# 14 main# 15 main# 16 main# 17 main# 18 main# 19 main# 20 main# 21 main# 22 main# 23 main# 24 main# 25 main# 26 main# 27 main# 28 main# 29 main# 30 main# 31 Checked 32768 cases, took 1456880 us testing erasure stripe-2-2 main# 0 main# 1 main# 2 main# 3 main# 4 main# 5 main# 6 main# 7 main# 8 main# 9 main# 10 main# 11 main# 12 main# 13 main# 14 main# 15 Checked 4096 cases, took 226054 us |87.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/testlib/actors/ut/unittest >> TActorTest::TestStateSwitch [GOOD] |87.2%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/ingress/ut/test-results/unittest/{meta.json ... results_accumulator.log} |87.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_pdiskfit/pdiskfit/pdiskfit >> TYardTest::TestChunkWriteReadWhole [GOOD] >> TYardTest::TestChunkWriteReadWholeWithHddSectorMap |87.2%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_pdiskfit/pdiskfit/pdiskfit |87.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_pdiskfit/pdiskfit/pdiskfit |87.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/testlib/actors/ut/unittest >> TActorTest::TestWaitFuture [GOOD] |87.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_pdiskfit/ut/ydb-core-blobstorage-ut_pdiskfit-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/testlib/actors/ut/unittest >> TActorTest::TestWaitForFirstEvent [GOOD] Test command err: ... waiting for NKikimr::NTestSuiteTActorTest::TTestCaseTestWaitForFirstEvent::Execute_(NUnitTest::TTestContext&)::TEvTrigger ... waiting for NKikimr::NTestSuiteTActorTest::TTestCaseTestWaitForFirstEvent::Execute_(NUnitTest::TTestContext&)::TEvTrigger (done) ... waiting for NKikimr::NTestSuiteTActorTest::TTestCaseTestWaitForFirstEvent::Execute_(NUnitTest::TTestContext&)::TEvTrigger ... waiting for NKikimr::NTestSuiteTActorTest::TTestCaseTestWaitForFirstEvent::Execute_(NUnitTest::TTestContext&)::TEvTrigger (done) >> TBlobStorageHullHugeChain::HeapAllocLargeStandard [GOOD] >> TBlobStorageHullHugeChain::HeapAllocLargeNonStandard [GOOD] |87.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/huge/ut/unittest >> TBlobStorageHullHugeHeap::AllocateAllFromOneChunk [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/testlib/actors/ut/unittest >> TActorTest::TestBlockEvents [GOOD] Test command err: ... waiting for blocked 3 events ... blocking NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TEvTrigger from NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TSourceActor to NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TTargetActor cookie 0 ... blocking NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TEvTrigger from NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TSourceActor to NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TTargetActor cookie 0 ... blocking NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TEvTrigger from NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TSourceActor to NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TTargetActor cookie 0 ... waiting for blocked 3 events (done) ... unblocking NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TEvTrigger from NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TSourceActor to NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TTargetActor ... unblocking NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TEvTrigger from NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TSourceActor to NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TTargetActor ... waiting for blocked 1 more event ... blocking NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TEvTrigger from NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TSourceActor to NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TTargetActor cookie 0 ... waiting for blocked 1 more event (done) ... waiting for processed 2 more events ... waiting for processed 2 more events (done) ... unblocking NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TEvTrigger from NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TSourceActor to NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TTargetActor ... unblocking NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TEvTrigger from NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TSourceActor to NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TTargetActor ... waiting for processed 3 more events ... waiting for processed 3 more events (done) >> TBlobStorageHullHugeHeap::AllocateAllReleaseAll [GOOD] >> TBlobStorageHullHugeHeap::AllocateAllSerializeDeserializeReleaseAll [GOOD] >> TBlobStorageHullHugeChain::AllocFreeAllocTest [GOOD] >> TBlobStorageHullHugeChain::AllocFreeRestartAllocTest [GOOD] >> TopTest::Test1 [GOOD] |87.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/libtx-columnshard-engines.a |87.2%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/libtx-columnshard-engines.a |87.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/libtx-columnshard-engines.a |87.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/huge/ut/unittest >> TBlobStorageHullHugeHeap::AllocateAllSerializeDeserializeReleaseAll [GOOD] |87.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/huge/ut/unittest >> TBlobStorageHullHugeChain::HeapAllocLargeNonStandard [GOOD] >> TBlobStorageHullHugeHeap::RecoveryMode [GOOD] >> TBlobStorageHullHugeHeap::BorderValues [GOOD] >> TBlobStorageHullHugeHeap::WriteRestore [GOOD] >> TBlobStorageHullHugeKeeperPersState::SerializeParse [GOOD] |87.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/huge/ut/unittest >> TBlobStorageHullHugeChain::AllocFreeRestartAllocTest [GOOD] >> TYardTest::TestChunkWriteReadWholeWithHddSectorMap [GOOD] |87.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/huge/ut/unittest >> TopTest::Test1 [GOOD] |87.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/huge/ut/unittest >> TBlobStorageHullHugeHeap::BorderValues [GOOD] >> TYardTest::TestHttpInfo >> THugeHeapCtxTests::Basic [GOOD] >> TopTest::Test2 [GOOD] >> TYardTest::TestHttpInfo [GOOD] >> TYardTest::TestHttpInfoFileDoesntExist >> TBsVDiskRange::Simple3PutRangeGetNothingForwardFresh >> TYardTest::TestHttpInfoFileDoesntExist [GOOD] >> TYardTest::TestFirstRecordToKeep >> TBlobStorageSyncLogDsk::SeveralChunks [GOOD] >> TBlobStorageSyncLogDsk::OverlappingPages_OnePageIndexed [GOOD] >> TBlobStorageSyncLogDsk::OverlappingPages_SeveralPagesIndexed [GOOD] >> TBlobStorageSyncLogDsk::TrimLog [GOOD] |87.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/huge/ut/unittest >> THugeHeapCtxTests::Basic [GOOD] |87.2%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/data_sharing/common/session/common.h_serialized.cpp >> TChainLayoutBuilder::TestProdConf [GOOD] >> TChainLayoutBuilder::TestMilestoneId [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetAllForwardFresh >> TBsVDiskGC::TGCManyVPutsDelTabletTest |87.2%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/common/session/common.h_serialized.cpp |87.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/huge/ut/unittest >> TBlobStorageHullHugeKeeperPersState::SerializeParse [GOOD] >> TIncrHugeBlobIdDict::Basic [GOOD] >> TBsVDiskRepl1::ReplProxyKeepBits |87.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/data_sharing/common/session/libdata_sharing-common-session.a |87.2%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/data_sharing/common/session/libdata_sharing-common-session.a |87.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/huge/ut/unittest >> TopTest::Test2 [GOOD] |87.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/common/session/libdata_sharing-common-session.a >> TBsVDiskManyPutGet::ManyPutGetWaitCompaction >> TBsVDiskExtreme::SimpleGetFromEmptyDB >> TBsVDiskOutOfSpace::WriteUntilOrangeZone [GOOD] >> TBsVDiskOutOfSpace::WriteUntilYellowZone |87.2%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/executer_actor/kqp_executer.h_serialized.cpp >> TBsVDiskRange::Simple3PutRangeGetAllForwardFresh |87.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/synclog/ut/unittest >> TBlobStorageSyncLogDsk::TrimLog [GOOD] |87.3%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/executer_actor/kqp_executer.h_serialized.cpp |87.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/huge/ut/unittest >> TChainLayoutBuilder::TestMilestoneId [GOOD] >> TBsLocalRecovery::WriteRestartReadHuge >> TBsVDiskExtremeHuge::Simple3Put3GetFresh >> TYardTest::TestFirstRecordToKeep [GOOD] >> TYardTest::TestHugeChunkAndLotsOfTinyAsyncLogOrder |87.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/incrhuge/ut/unittest >> TIncrHugeBlobIdDict::Basic [GOOD] >> TBsVDiskBadBlobId::PutBlobWithBadId >> TBsLocalRecovery::StartStopNotEmptyDB >> TBsVDiskRange::Simple3PutRangeGetNothingForwardFresh [GOOD] >> TBsVDiskRange::Simple3PutRangeGetNothingForwardCompaction |87.3%| [TA] $(B)/ydb/core/blobstorage/vdisk/huge/ut/test-results/unittest/{meta.json ... results_accumulator.log} |87.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/executer_actor/libcore-kqp-executer_actor.a >> TBsVDiskRangeHuge::Simple3PutRangeGetAllForwardFresh [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetAllForwardCompaction >> TBsDbStat::ChaoticParallelWrite_DbStat |87.3%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/huge/ut/test-results/unittest/{meta.json ... results_accumulator.log} |87.3%| [AR] {RESULT} $(B)/ydb/core/kqp/executer_actor/libcore-kqp-executer_actor.a >> TBsVDiskRepl3::SyncLogTest >> TBsVDiskExtreme::Simple3Put1SeqGetAllFresh >> TBsVDiskRange::Simple3PutRangeGetAllForwardFresh [GOOD] >> TBsVDiskRange::Simple3PutRangeGetAllForwardCompaction >> TBsVDiskExtreme::SimpleGetFromEmptyDB [GOOD] >> TBsVDiskExtremeHandoff::SimpleHnd6Put1SeqGetFresh |87.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/executer_actor/libcore-kqp-executer_actor.a >> TBsVDiskExtremeHuge::Simple3Put3GetFresh [GOOD] >> TBsVDiskExtremeHuge::Simple3Put3GetCompaction >> TBsVDiskExtremeHandoffHuge::SimpleHndPut1SeqGetFresh >> TBsVDiskManyPutGet::ManyPutGetWaitCompaction [GOOD] >> TBsVDiskManyPutGet::ManyPutRangeGetFreshIndexOnly >> TBsVDiskExtreme::Simple3Put3GetFresh >> TBsVDiskBadBlobId::PutBlobWithBadId [GOOD] >> TBsVDiskBrokenPDisk::WriteUntilDeviceDeath >> TBsVDiskGC::GCPutKeepIntoEmptyDB >> TBsVDiskRangeHuge::Simple3PutRangeGetNothingBackwardFresh >> TBsVDiskGC::TGCManyVPutsDelTabletTest [GOOD] >> TBsVDiskManyPutGet::ManyPutGet |87.3%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/counters/columnshard.h_serialized.cpp |87.3%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/counters/columnshard.h_serialized.cpp >> TBsVDiskRange::Simple3PutRangeGetNothingForwardCompaction [GOOD] >> TBsVDiskRange::Simple3PutRangeGetNothingBackwardFresh >> TBsVDiskRangeHuge::Simple3PutRangeGetAllForwardCompaction [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetAllBackwardFresh |87.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/kqp_olap_stats_ut.cpp |87.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/counters/libtx-columnshard-counters.a |87.3%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/counters/libtx-columnshard-counters.a >> TBsVDiskRepl1::ReplProxyKeepBits [GOOD] >> TBsVDiskRepl2::ReplEraseDiskRestoreWOOneDisk |87.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/kqp_olap_stats_ut.cpp |87.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/counters/libtx-columnshard-counters.a >> CodecsTest::Basic [GOOD] >> CodecsTest::NaturalNumbersAndZero [GOOD] >> CodecsTest::LargeAndRepeated >> TBsVDiskExtremeHandoff::SimpleHnd6Put1SeqGetFresh [GOOD] >> TBsVDiskExtremeHandoff::SimpleHnd6Put1SeqGetCompaction >> CodecsTest::LargeAndRepeated [GOOD] >> NaiveFragmentWriterTest::Basic [GOOD] >> TBsVDiskExtreme::Simple3Put1SeqGetAllFresh [GOOD] >> TBsVDiskExtreme::Simple3Put1SeqGetAllCompaction >> TBsVDiskRepl3::SyncLogTest [GOOD] >> THugeMigration::ExtendMap_HugeBlobs >> TBsVDiskRangeHuge::Simple3PutRangeGetNothingForwardFresh >> TBsVDiskExtremeHuge::Simple3Put3GetCompaction [GOOD] >> TBsVDiskExtremeHuge::Simple3Put1SeqSubsOkFresh |87.3%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/services/metadata/manager/abstract.h_serialized.cpp >> TBsVDiskBrokenPDisk::WriteUntilDeviceDeath [GOOD] >> TBsVDiskDefrag::DefragEmptyDB >> TBsVDiskRange::Simple3PutRangeGetAllForwardCompaction [GOOD] >> TBsVDiskRange::Simple3PutRangeGetMiddleForwardCompaction |87.3%| [CC] {BAZEL_UPLOAD} $(B)/ydb/services/metadata/manager/abstract.h_serialized.cpp |87.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/synclog/ut/unittest >> NaiveFragmentWriterTest::Basic [GOOD] >> TBsVDiskExtreme::Simple3Put3GetFresh [GOOD] >> TBsVDiskExtreme::Simple3Put3GetCompaction >> TBsVDiskGC::GCPutKeepIntoEmptyDB [GOOD] >> TBsVDiskGC::GCPutBarrierVDisk0NoSync >> TBsVDiskExtremeHandoffHuge::SimpleHndPut1SeqGetFresh [GOOD] >> TBsVDiskExtremeHandoffHuge::SimpleHnd2Put1GetFresh >> TBsVDiskManyPutGet::ManyPutRangeGetFreshIndexOnly [GOOD] >> TBsVDiskManyPutGet::ManyPutRangeGetCompactionIndexOnly |87.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/mediator/mediator_ut.cpp |87.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/mediator/mediator_ut.cpp >> TYardTest::TestHugeChunkAndLotsOfTinyAsyncLogOrder [GOOD] >> TYardTest::TestDamagedFirstRecordToKeep |87.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/metadata/manager/libservices-metadata-manager.a >> TBsVDiskRangeHuge::Simple3PutRangeGetNothingBackwardFresh [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetNothingBackwardCompaction >> TBsVDiskRange::Simple3PutRangeGetNothingBackwardFresh [GOOD] >> TBsVDiskRange::Simple3PutRangeGetNothingBackwardCompaction |87.3%| [AR] {RESULT} $(B)/ydb/services/metadata/manager/libservices-metadata-manager.a |87.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/metadata/manager/libservices-metadata-manager.a >> TBsVDiskRangeHuge::Simple3PutRangeGetAllBackwardFresh [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetAllBackwardCompaction >> NaiveFragmentWriterTest::Long >> TBlobStorageSyncLogKeeper::CutLog_EntryPointNewFormat [GOOD] >> TBlobStorageSyncLogMem::EmptyMemRecLog [GOOD] >> TBlobStorageSyncLogMem::FilledIn1 [GOOD] >> TBlobStorageSyncLogMem::EmptyMemRecLogPutAfterSnapshot [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetNothingForwardFresh [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetNothingForwardCompaction >> TBsVDiskExtremeHandoff::SimpleHnd6Put1SeqGetCompaction [GOOD] >> TBsVDiskExtremeHandoff::SimpleHnd2Put1GetFresh >> TBsVDiskDefrag::DefragEmptyDB [GOOD] >> TBsVDiskDefrag::Defrag50PercentGarbage >> TBsVDiskExtremeHuge::Simple3Put1SeqSubsOkFresh [GOOD] >> TBsVDiskExtremeHuge::Simple3Put1SeqSubsOkCompaction >> TBsVDiskExtremeHandoffHuge::SimpleHnd2Put1GetFresh [GOOD] >> TBsVDiskExtremeHuge::Simple3Put1SeqGetAllFresh |87.3%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/schemeshard_info_types.h_serialized.cpp |87.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/synclog/ut/unittest >> TBlobStorageSyncLogMem::EmptyMemRecLogPutAfterSnapshot [GOOD] >> TBlobStorageSyncLogMem::FilledIn1PutAfterSnapshot [GOOD] >> TBlobStorageSyncLogMem::ManyLogoBlobsPerf |87.3%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/schemeshard_info_types.h_serialized.cpp >> SemiSortedDeltaAndVarLengthCodec::Random32 >> TBsVDiskExtreme::Simple3Put1SeqGetAllCompaction [GOOD] >> TBsVDiskExtreme::Simple3Put1SeqGet2Fresh >> TBsVDiskExtreme::Simple3Put3GetCompaction [GOOD] >> TBsVDiskExtreme::Simple3Put1SeqSubsOkFresh >> NaiveFragmentWriterTest::Long [GOOD] >> ReorderCodecTest::Basic [GOOD] >> RunLengthCodec::BasicTest32 [GOOD] >> RunLengthCodec::BasicTest64 [GOOD] >> TBsVDiskRange::Simple3PutRangeGetMiddleForwardCompaction [GOOD] >> TBsVDiskRange::Simple3PutRangeGetMiddleBackwardFresh >> TBsVDiskManyPutGet::ManyPutGet [GOOD] >> TBsVDiskManyPutGet::ManyMultiSinglePutGet >> TBlobStorageSyncLogData::SerializeParseEmpty1_Proto [GOOD] >> TBlobStorageSyncLogData::SerializeParseEmpty2_Proto [GOOD] >> SemiSortedDeltaCodec::Random32 |87.3%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/storage/granule/granule.h_serialized.cpp >> SemiSortedDeltaAndVarLengthCodec::Random32 [GOOD] >> SemiSortedDeltaAndVarLengthCodec::Random64 >> TBsVDiskGC::GCPutBarrierVDisk0NoSync [GOOD] >> TBsVDiskGC::GCPutBarrierSync >> TQueryResultSizeTrackerTest::SerializeDeserializeMaxPtotobufSize [GOOD] >> SemiSortedDeltaCodec::Random32 [GOOD] >> SemiSortedDeltaCodec::Random64 >> TBsVDiskRangeHuge::Simple3PutRangeGetNothingBackwardCompaction [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetMiddleForwardFresh |87.3%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/changes/abstract/abstract.h_serialized.cpp >> SemiSortedDeltaAndVarLengthCodec::Random64 [GOOD] >> TYardTest::TestDamagedFirstRecordToKeep [GOOD] >> TYardTest::TestDamageAtTheBoundary >> SemiSortedDeltaCodec::BasicTest32 [GOOD] >> SemiSortedDeltaCodec::BasicTest64 [GOOD] >> SemiSortedDeltaCodec::Random64 [GOOD] |87.3%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/abstract/abstract.h_serialized.cpp |87.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/synclog/ut/unittest >> RunLengthCodec::BasicTest64 [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetAllBackwardCompaction [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetMiddleBackwardCompaction |87.3%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/granule/granule.h_serialized.cpp |87.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/query/ut/unittest >> TQueryResultSizeTrackerTest::SerializeDeserializeMaxPtotobufSize [GOOD] >> TBsVDiskRange::Simple3PutRangeGetNothingBackwardCompaction [GOOD] >> TBsVDiskRange::Simple3PutRangeGetMiddleForwardFresh |87.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/cms/console/console_ut_tenants.cpp |87.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/tx_proxy/proxy_ext_tenant_ut.cpp |87.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/cms/console/console_ut_tenants.cpp |87.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/tx_proxy/proxy_ext_tenant_ut.cpp >> TBsVDiskExtremeHandoff::SimpleHnd2Put1GetFresh [GOOD] >> TBsVDiskExtremeHandoff::SimpleHnd2Put1GetCompaction >> THugeMigration::ExtendMap_HugeBlobs [GOOD] >> THugeMigration::ExtendMap_SmallBlobsBecameHuge |87.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/synclog/ut/unittest >> SemiSortedDeltaCodec::BasicTest64 [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetNothingForwardCompaction [GOOD] >> TBsVDiskRepl1::ReplProxyData |87.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/synclog/ut/unittest >> SemiSortedDeltaCodec::Random64 [GOOD] |87.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/storage/granule/libengines-storage-granule.a |87.3%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/storage/granule/libengines-storage-granule.a |87.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/tools/dqrun/dqrun >> VarLengthIntCodec::BasicTest64 [GOOD] >> VarLengthIntCodec::Random32 >> TBsVDiskExtremeHuge::Simple3Put1SeqGetAllFresh [GOOD] >> TBsVDiskExtremeHuge::Simple3Put1SeqGetAllCompaction >> VarLengthIntCodec::Random32 [GOOD] >> VarLengthIntCodec::Random64 |87.3%| [LD] {RESULT} $(B)/ydb/library/yql/tools/dqrun/dqrun |87.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/tools/dqrun/dqrun |87.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/granule/libengines-storage-granule.a |87.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/changes/abstract/libengines-changes-abstract.a >> TBsVDiskExtreme::Simple3Put1SeqGet2Fresh [GOOD] >> TBsVDiskExtreme::Simple3Put1SeqGet2Compaction >> TBsVDiskExtremeHuge::Simple3Put1SeqSubsOkCompaction [GOOD] >> TBsVDiskExtremeHuge::Simple3Put1SeqSubsErrorFresh |87.3%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/engines/changes/abstract/libengines-changes-abstract.a |87.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_followers.cpp >> TBsVDiskRange::Simple3PutRangeGetMiddleBackwardFresh [GOOD] >> TBsVDiskRange::Simple3PutRangeGetMiddleBackwardCompaction >> VarLengthIntCodec::Random64 [GOOD] >> TBsVDiskManyPutGet::ManyPutRangeGetCompactionIndexOnly [GOOD] >> TBsVDiskExtreme::Simple3Put1SeqSubsOkFresh [GOOD] >> TBsVDiskManyPutGet::ManyPutRangeGet2ChannelsIndexOnly >> TBsVDiskExtreme::Simple3Put1SeqSubsOkCompaction |87.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/abstract/libengines-changes-abstract.a |87.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_followers.cpp >> RunLengthCodec::Random32 >> RunLengthCodec::Random32 [GOOD] >> RunLengthCodec::Random64 >> RunLengthCodec::Random64 [GOOD] >> SemiSortedDeltaAndVarLengthCodec::BasicTest32 [GOOD] >> SemiSortedDeltaAndVarLengthCodec::BasicTest64 [GOOD] |87.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/incrhuge/ut/unittest >> TBsVDiskRangeHuge::Simple3PutRangeGetMiddleForwardFresh [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetMiddleForwardCompaction |87.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/synclog/ut/unittest >> VarLengthIntCodec::Random64 [GOOD] >> TBsVDiskRange::Simple3PutRangeGetMiddleForwardFresh [GOOD] >> TIncrHugeBasicTest::WriteReadDeleteEnum [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetMiddleBackwardCompaction [GOOD] >> TBlobStorageSyncLogDsk::AddByOne [GOOD] >> TBlobStorageSyncLogDsk::AddFive [GOOD] >> TBlobStorageSyncLogDsk::ComplicatedSerializeWithOverlapping |87.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/synclog/ut/unittest >> SemiSortedDeltaAndVarLengthCodec::BasicTest64 [GOOD] >> TIncrHugeBasicTest::WriteReadDeleteEnumRecover [GOOD] >> TBlobStorageSyncLogDsk::ComplicatedSerializeWithOverlapping [GOOD] >> TBlobStorageSyncLogDsk::DeleteChunks [GOOD] >> TBsVDiskRepl1::ReplProxyData [GOOD] >> TBsVDiskRepl1::ReplEraseDiskRestore >> TBsVDiskExtremeHuge::Simple3Put1SeqGetAllCompaction [GOOD] >> TBsVDiskExtremeHuge::Simple3Put1SeqGet2Fresh >> TBlobStorageSyncLogMem::ManyLogoBlobsPerf [GOOD] >> TBlobStorageSyncLogMem::ManyLogoBlobsBuildSwapSnapshot >> TBsLocalRecovery::WriteRestartReadHuge [GOOD] >> TBsLocalRecovery::WriteRestartReadHugeIncreased |87.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/incrhuge/ut/unittest |87.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskRange::Simple3PutRangeGetMiddleForwardFresh [GOOD] |87.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/incrhuge/ut/unittest >> TIncrHugeBasicTest::WriteReadDeleteEnum [GOOD] >> TBsVDiskExtremeHuge::Simple3Put1SeqSubsErrorFresh [GOOD] >> TBsVDiskExtremeHuge::Simple3Put1SeqSubsErrorCompaction >> TBlobStorageSyncLogMem::ManyLogoBlobsBuildSwapSnapshot [GOOD] >> VarLengthIntCodec::BasicTest32 [GOOD] >> TBsVDiskManyPutGet::ManyMultiSinglePutGet [GOOD] >> TBsVDiskManyPutGet::ManyMultiPutGet >> TBsVDiskExtremeHandoff::SimpleHnd2Put1GetCompaction [GOOD] >> TBsVDiskExtremeHandoffHuge::SimpleHnd2Put1GetCompaction >> TBsVDiskExtreme::Simple3Put1SeqGet2Compaction [GOOD] >> TBsVDiskExtreme::Simple3Put1GetMissingPartFresh |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/incrhuge/ut/unittest >> TBsVDiskExtreme::Simple3Put1SeqSubsOkCompaction [GOOD] >> TBsVDiskExtreme::Simple3Put1SeqSubsErrorFresh >> TIncrHugeBasicTest::Recovery [GOOD] >> THugeMigration::ExtendMap_SmallBlobsBecameHuge [GOOD] >> THugeMigration::RollbackMap_HugeBlobs >> TBsVDiskRange::Simple3PutRangeGetMiddleBackwardCompaction [GOOD] |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/incrhuge/ut/unittest >> TIncrHugeBasicTest::WriteReadDeleteEnumRecover [GOOD] |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskRangeHuge::Simple3PutRangeGetMiddleBackwardCompaction [GOOD] |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/synclog/ut/unittest >> TBlobStorageSyncLogDsk::DeleteChunks [GOOD] >> TIncrHugeBasicTest::Defrag |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/incrhuge/ut/unittest >> TBlobStorageBlocksCacheTest::DeepInFlight [GOOD] >> TBsVDiskGC::GCPutBarrierSync [GOOD] >> TBsVDiskGC::GCPutKeepBarrierSync |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/synclog/ut/unittest >> VarLengthIntCodec::BasicTest32 [GOOD] |87.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/ut/ut_with_sdk/topic_ut.cpp |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/incrhuge/ut/unittest >> TIncrHugeBasicTest::Recovery [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetMiddleForwardCompaction [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetMiddleBackwardFresh |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskRange::Simple3PutRangeGetMiddleBackwardCompaction [GOOD] |87.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/ut_with_sdk/topic_ut.cpp |87.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/perf/kqp_query_perf_ut.cpp |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/incrhuge/ut/unittest |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest >> TBsVDiskExtremeHuge::Simple3Put1SeqGet2Fresh [GOOD] >> TBsVDiskExtremeHuge::Simple3Put1SeqGet2Compaction |87.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/perf/kqp_query_perf_ut.cpp |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest >> TBlobStorageBlocksCacheTest::DeepInFlight [GOOD] >> TBsVDiskExtreme::Simple3Put1SeqSubsErrorFresh [GOOD] >> TBsVDiskExtreme::Simple3Put1SeqSubsErrorCompaction >> TBsVDiskExtreme::Simple3Put1GetMissingPartFresh [GOOD] >> TBsVDiskExtreme::Simple3Put1GetMissingPartCompaction >> TBlobStorageBlocksCacheTest::Repeat [GOOD] >> TBsVDiskExtremeHandoffHuge::SimpleHnd2Put1GetCompaction [GOOD] >> TBsVDiskExtremeHuge::Simple3Put1SeqSubsErrorCompaction [GOOD] >> TBlobStorageBlocksCacheTest::PutDeepIntoPast [GOOD] |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest >> TQueryResultSizeTrackerTest::SerializeDeserializeMaxPtotobufSizeMinusOne [GOOD] >> TBsVDiskManyPutGet::ManyMultiPutGet [GOOD] >> TBsVDiskManyPutGet::ManyMultiPutGetWithLargeBatch >> TBlobStorageBlocksCacheTest::PutIntoPast [GOOD] >> TBlobStorageBlocksCacheTest::MultipleTables [GOOD] >> TBlobStorageBlocksCacheTest::LegacyAndModern [GOOD] |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest >> TBlobStorageBlocksCacheTest::Repeat [GOOD] >> TPDiskRaces::KillOwnerWhileDeletingChunkWithInflightMock [GOOD] >> TPDiskRaces::KillOwnerWhileDecommitting |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hullop/ut/unittest |87.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/opt/kqp_ranges_ut.cpp |87.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_ranges_ut.cpp |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest >> TBlobStorageBlocksCacheTest::MultipleTables [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetMiddleBackwardFresh [GOOD] |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskExtremeHandoffHuge::SimpleHnd2Put1GetCompaction [GOOD] |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest >> TBlobStorageBlocksCacheTest::PutIntoPast [GOOD] |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/query/ut/unittest >> TQueryResultSizeTrackerTest::SerializeDeserializeMaxPtotobufSizeMinusOne [GOOD] |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskExtremeHuge::Simple3Put1SeqSubsErrorCompaction [GOOD] |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest >> TBlobStorageBlocksCacheTest::LegacyAndModern [GOOD] |87.4%| [TA] $(B)/ydb/core/blobstorage/vdisk/synclog/ut/test-results/unittest/{meta.json ... results_accumulator.log} |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest >> TBlobStorageBlocksCacheTest::PutDeepIntoPast [GOOD] |87.4%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/synclog/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> HullReplWriteSst::Basic >> TBsVDiskManyPutGet::ManyPutRangeGet2ChannelsIndexOnly [GOOD] >> TBsVDiskManyPutGetCheckSize::ManyPutGetCheckSize |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/repl/ut/unittest |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/repl/ut/unittest >> TBlobStorageReplRecoveryMachine::BasicFunctionality |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/repl/ut/unittest |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskRangeHuge::Simple3PutRangeGetMiddleBackwardFresh [GOOD] |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/repl/ut/unittest >> THugeMigration::RollbackMap_HugeBlobs [GOOD] >> TMonitoring::ReregisterTest |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/repl/ut/unittest >> TBsVDiskExtremeHuge::Simple3Put1SeqGet2Compaction [GOOD] >> TBsVDiskExtreme::Simple3Put1GetMissingPartCompaction [GOOD] >> TMonitoring::ReregisterTest [GOOD] >> TBlobStorageReplRecoveryMachine::BasicFunctionality [GOOD] >> TActorTest::TestSendEvent >> TActorTest::TestSendEvent [GOOD] >> TActorTest::TestSendAfterDelay >> TActorTest::TestDie >> TBsVDiskExtreme::Simple3Put1SeqSubsErrorCompaction [GOOD] >> TActorTest::TestSendAfterDelay [GOOD] >> TActorTest::TestDie [GOOD] >> TActorTest::TestFilteredGrab |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/repl/ut/unittest |87.4%| [TA] $(B)/ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TBsVDiskManyPutGet::ManyMultiPutGetWithLargeBatch [GOOD] >> TActorTest::TestFilteredGrab [GOOD] |87.4%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TActorTest::TestSendFromAnotherThread |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskExtreme::Simple3Put1GetMissingPartCompaction [GOOD] >> TYardTest::TestDamageAtTheBoundary [GOOD] >> TYardTest::TestDestroySystem >> TActorTest::TestHandleEvent [GOOD] >> TActorTest::TestGetCtxTime [GOOD] >> TActorTest::TestScheduleEvent >> TBlobStorageCompStrat::Test1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TMonitoring::ReregisterTest [GOOD] Test command err: RUN TEST SendData iteration SendData iteration SendData iteration SendData iteration SendData iteration SendData iteration SendData iteration SendData iteration SendData iteration SendData iteration |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskExtreme::Simple3Put1SeqSubsErrorCompaction [GOOD] |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskExtremeHuge::Simple3Put1SeqGet2Compaction [GOOD] >> TActorTest::TestScheduleEvent [GOOD] >> TActorTest::TestScheduleReaction [GOOD] |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/repl/ut/unittest >> TBlobStorageReplRecoveryMachine::BasicFunctionality [GOOD] |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/testlib/actors/ut/unittest >> TActorTest::TestSendAfterDelay [GOOD] |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/testlib/actors/ut/unittest >> TActorTest::TestFilteredGrab [GOOD] >> TBlobStorageGroupInfoTest::SubgroupPartLayout [GOOD] |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskManyPutGet::ManyMultiPutGetWithLargeBatch [GOOD] |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest >> TActorTest::TestWaitFor >> TBsVDiskGC::GCPutKeepBarrierSync [GOOD] >> TBsVDiskGC::GCPutManyBarriersNoSync >> TActorTest::TestWaitFor [GOOD] |87.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/join/kqp_join_ut.cpp |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/repl/ut/unittest |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest >> TBlobStorageCompStrat::Test1 [GOOD] >> ReadBatcher::Range [GOOD] |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/testlib/actors/ut/unittest >> TActorTest::TestGetCtxTime [GOOD] |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest >> TActorTest::TestSendFromAnotherThread [GOOD] |87.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/join/kqp_join_ut.cpp |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/testlib/actors/ut/unittest >> TActorTest::TestScheduleReaction [GOOD] >> TBsVDiskRepl2::ReplEraseDiskRestoreWOOneDisk [GOOD] >> TBsVDiskRepl3::ReplEraseDiskRestoreMultipart |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/groupinfo/ut/unittest >> TBlobStorageGroupInfoTest::SubgroupPartLayout [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/testlib/actors/ut/unittest >> TActorTest::TestWaitFor [GOOD] Test command err: ... waiting for value = 42 ... waiting for value = 42 (done) |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest >> TYardTest::TestDestroySystem [GOOD] >> TYardTest::TestCutMultipleLogChunks >> TBlobStoragePDiskCrypto::TestMixedStreamCypher >> TYardTest::TestWholeLogRead >> TBlobStoragePDiskCrypto::TestMixedStreamCypher [GOOD] >> TBlobStoragePDiskCrypto::TestInplaceStreamCypher [GOOD] >> TBlockDeviceTest::TestDeviceWithSubmitGetThread |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest >> TBlobStorageCompStrat::Test1 [GOOD] |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/testlib/actors/ut/unittest >> TActorTest::TestSendFromAnotherThread [GOOD] >> AuthDatabaseAdmin::FailOnEmptyOwnerAndTokenWithEmptyUserSidAndGroups [GOOD] >> AuthDatabaseAdmin::FailOnOwnerAndEmptyToken [GOOD] >> AuthDatabaseAdmin::FailOnOwnerAndNoToken [GOOD] >> TQueryResultSizeTrackerTest::SerializeDeserializeMaxPtotobufSizePlusOne >> TBlockDeviceTest::TestDeviceWithSubmitGetThread [GOOD] >> TBlockDeviceTest::TestWriteSectorMapAllTypes >> DiscoveryConverterTest::MinimalName [GOOD] >> DiscoveryConverterTest::WithLogbrokerPath [GOOD] >> TQueryResultSizeTrackerTest::CheckAll [GOOD] >> TPDiskTest::TestAbstractPDiskInterface >> TPDiskTest::TestAbstractPDiskInterface [GOOD] >> TPDiskTest::TestPDiskActorErrorState >> TBsVDiskGC::GCPutManyBarriersNoSync [GOOD] >> TBsVDiskGC::TGCManyVPutsCompactGCAllTest >> TYardTest::TestWholeLogRead [GOOD] >> TYardTest::TestSysLogReordering |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hullop/ut/unittest |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_auth/unittest >> AuthDatabaseAdmin::FailOnOwnerAndNoToken [GOOD] >> TBlobStorageHullCompactDeferredQueueTest::Basic >> DiscoveryConverterTest::AccountDatabase [GOOD] >> DiscoveryConverterTest::CmWay [GOOD] >> NameserviceConfigValidatorTests::TestLongWalleDC [GOOD] >> NameserviceConfigValidatorTests::TestModifyClusterUUID [GOOD] >> NameserviceConfigValidatorTests::TestModifyIdForAddrPort [GOOD] >> NameserviceConfigValidatorTests::TestModifyHost [GOOD] >> TPDiskTest::TestPDiskActorErrorState [GOOD] >> TPDiskTest::TestPDiskActorPDiskStopStart |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hullop/ut/unittest |87.5%| [TA] $(B)/ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/test-results/unittest/{meta.json ... results_accumulator.log} |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hullop/ut/unittest >> ReadBatcher::Range [GOOD] |87.5%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TBlobStorageAnubisAlgo::Mirror3 [GOOD] >> TYardTest::TestCutMultipleLogChunks [GOOD] >> TYardTest::TestDestructionWhileWritingChunk |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hullop/ut/unittest |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest >> TRegistryTests::TestAddGet [GOOD] >> TRegistryTests::TestCheckConfig [GOOD] >> ResourceBrokerConfigValidatorTests::TestZeroQueueWeight [GOOD] >> ResourceBrokerConfigValidatorTests::TestZeroDefaultDuration [GOOD] |87.5%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut_pg/unittest |87.5%| [TA] $(B)/ydb/core/testlib/actors/ut/test-results/unittest/{meta.json ... results_accumulator.log} |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest |87.5%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut_pg/unittest |87.5%| [TS] {asan, default-linux-x86_64, release} ydb/library/persqueue/topic_parser/ut/unittest >> DiscoveryConverterTest::WithLogbrokerPath [GOOD] |87.5%| [TS] {asan, default-linux-x86_64, release} ydb/library/persqueue/topic_parser/ut/unittest >> DiscoveryConverterTest::CmWay [GOOD] >> ReadBatcher::ReadBatcher >> NameserviceConfigValidatorTests::TestModifyIdForHostPort [GOOD] >> NameserviceConfigValidatorTests::TestModifyIdForResolveHostPort [GOOD] >> NameserviceConfigValidatorTests::TestModifyResolveHost [GOOD] >> NameserviceConfigValidatorTests::TestModifyPort [GOOD] >> AuthDatabaseAdmin::PassOnOwnerMatchUserSid |87.5%| [TA] {RESULT} $(B)/ydb/core/testlib/actors/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> AuthDatabaseAdmin::PassOnOwnerMatchUserSid [GOOD] >> AuthDatabaseAdmin::PassOnOwnerMatchUserSidWithGroup [GOOD] >> AuthTokenAllowed::FailOnListAndEmptyToken [GOOD] >> TopicNameConverterForCPTest::CorrectLegacyTopics [GOOD] >> TopicNameConverterForCPTest::CorrectModernTopics >> TPDiskTest::TestPDiskActorPDiskStopStart [GOOD] >> TPDiskTest::TestPDiskActorPDiskStopBroken >> TopicNameConverterForCPTest::CorrectModernTopics [GOOD] |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/query/ut/unittest >> TQueryResultSizeTrackerTest::CheckAll [GOOD] >> NameserviceConfigValidatorTests::TestEmptyConfig [GOOD] >> NameserviceConfigValidatorTests::TestDuplicatingId [GOOD] >> NameserviceConfigValidatorTests::TestDuplicatingResolveHostPort [GOOD] >> NameserviceConfigValidatorTests::TestEmptyAddresses [GOOD] >> NameserviceConfigValidatorTests::TestRemoveTooMany [GOOD] >> ResourceBrokerConfigValidatorTests::TestEmptyConfig [GOOD] |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/validators/ut/unittest >> NameserviceConfigValidatorTests::TestModifyHost [GOOD] |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest >> TBlobStorageAnubisAlgo::Mirror3 [GOOD] >> ResourceBrokerConfigValidatorTests::TestEmptyQueueName [GOOD] >> ResourceBrokerConfigValidatorTests::TestEmptyTaskName [GOOD] >> TYardTest::TestDestructionWhileWritingChunk [GOOD] >> TYardTest::TestDestructionWhileReadingChunk >> TPDiskTest::TestPDiskActorPDiskStopBroken [GOOD] >> TPDiskTest::TestPDiskActorPDiskStopUninitialized |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/validators/ut/unittest >> ResourceBrokerConfigValidatorTests::TestZeroDefaultDuration [GOOD] |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/validators/ut/unittest >> NameserviceConfigValidatorTests::TestModifyPort [GOOD] |87.5%| [TS] {asan, default-linux-x86_64, release} ydb/library/persqueue/topic_parser/ut/unittest >> TopicNameConverterForCPTest::CorrectModernTopics [GOOD] |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_auth/unittest >> AuthTokenAllowed::FailOnListAndEmptyToken [GOOD] |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/validators/ut/unittest >> NameserviceConfigValidatorTests::TestEmptyAddresses [GOOD] >> TPDiskTest::TestPDiskActorPDiskStopUninitialized [GOOD] >> TPDiskTest::TestChunkWriteRelease >> TYardTest::TestDestructionWhileReadingChunk [GOOD] >> TYardTest::TestDestructionWhileReadingLog |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/validators/ut/unittest >> ResourceBrokerConfigValidatorTests::TestEmptyTaskName [GOOD] >> ResourceBrokerConfigValidatorTests::TestRepeatedTaskName [GOOD] >> ResourceBrokerConfigValidatorTests::TestUnknownQueue [GOOD] >> ResourceBrokerConfigValidatorTests::TestUnlimitedResource [GOOD] >> ResourceBrokerConfigValidatorTests::TestUnusedQueue [GOOD] |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest >> TopicNameConverterTest::LegacyStyle [GOOD] >> TopicNameConverterTest::FirstClass [GOOD] >> TYardTest::TestDestructionWhileReadingLog [GOOD] >> TYardTest::TestFormatInfo >> TLogoBlobIdHashTest::SimpleTestWithDifferentTabletId [GOOD] >> TLogoBlobIdHashTest::SimpleTestWithDifferentSteps [GOOD] >> TLogoBlobTest::LogoBlobParse >> Path::Name_EnglishAlphabet [GOOD] >> Path::Name_RussianAlphabet [GOOD] >> Path::Name_RussianAlphabet_SetLocale_C [GOOD] >> Path::Name_ExtraSymbols [GOOD] >> ResourceBrokerConfigValidatorTests::TestMinConfig |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest >> TLogoBlobTest::LogoBlobParse [GOOD] >> TLogoBlobTest::LogoBlobCompare [GOOD] >> TableIndex::CompatibleSecondaryIndex [GOOD] >> TableIndex::NotCompatibleSecondaryIndex [GOOD] >> BootstrapTabletsValidatorTests::TestNoNodeForTablet [GOOD] >> TableIndex::CompatibleVectorIndex [GOOD] >> TableIndex::NotCompatibleVectorIndex [GOOD] >> BootstrapTabletsValidatorTests::TestRequiredTablet [GOOD] >> BootstrapTabletsValidatorTests::TestImportantTablet [GOOD] >> BootstrapTabletsValidatorTests::TestCompactionBroker [GOOD] >> ResourceBrokerConfigValidatorTests::TestMinConfig [GOOD] >> ResourceBrokerConfigValidatorTests::TestRepeatedQueueName [GOOD] >> ResourceBrokerConfigValidatorTests::TestNoDefaultQueue [GOOD] >> ResourceBrokerConfigValidatorTests::TestNoUnknownTask [GOOD] >> BootstrapTabletsValidatorTests::TestUnknownNodeForTablet [GOOD] >> NameserviceConfigValidatorTests::TestAddNewNode [GOOD] >> NameserviceConfigValidatorTests::TestDuplicatingHostPort [GOOD] >> NameserviceConfigValidatorTests::TestDuplicatingAddrPort [GOOD] >> TYardTest::TestFormatInfo [GOOD] >> TYardTest::TestEnormousDisk >> ReadBatcher::ReadBatcher [GOOD] |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest |87.5%| [PY] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stability/tool/objcopy_533f06087e794c7af638ea75dc.o |87.5%| [PY] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stability/tool/objcopy_533f06087e794c7af638ea75dc.o |87.5%| [TS] {asan, default-linux-x86_64, release} ydb/library/persqueue/topic_parser/ut/unittest >> TopicNameConverterTest::FirstClass [GOOD] |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut/unittest >> TableIndex::NotCompatibleVectorIndex [GOOD] |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/validators/ut/unittest >> ResourceBrokerConfigValidatorTests::TestUnusedQueue [GOOD] |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut/unittest >> TLogoBlobTest::LogoBlobCompare [GOOD] |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/validators/ut/unittest >> ResourceBrokerConfigValidatorTests::TestNoUnknownTask [GOOD] |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/validators/ut/unittest >> BootstrapTabletsValidatorTests::TestCompactionBroker [GOOD] |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/validators/ut/unittest >> NameserviceConfigValidatorTests::TestDuplicatingAddrPort [GOOD] |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut/unittest >> Path::Name_ExtraSymbols [GOOD] >> DiscoveryConverterTest::DiscoveryConverter [GOOD] >> DiscoveryConverterTest::EmptyModern [GOOD] |87.6%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut_pg/unittest >> TBlockDeviceTest::TestWriteSectorMapAllTypes [GOOD] >> TBlockDeviceTest::WriteReadRestart >> TBsVDiskGC::TGCManyVPutsCompactGCAllTest [GOOD] >> TBlobStorageGroupTypeTest::TestCorrectLayout [GOOD] >> TGuardianImpl::FollowerTracker [GOOD] >> TGuardianImpl::FollowerTrackerDuplicates [GOOD] >> TLocalDbTest::BackupTaskNameChangedAtLoadTime [GOOD] >> TLogoBlobIdHashTest::SimpleTest [GOOD] >> TLogoBlobIdHashTest::SimpleTestPartIdDoesNotMatter [GOOD] >> TLogoBlobIdHashTest::SimpleTestBlobSizeDoesNotMatter [GOOD] >> TLogoBlobIdHashTest::SimpleTestWithDifferentChannel [GOOD] >> TRegistryTests::TestLock [GOOD] >> TRegistryTests::TestClasses [GOOD] >> TRegistryTests::TestDisableEnable [GOOD] >> TBlobStorageGroupTypeTest::OutputInfoAboutErasureSpecies [GOOD] >> Path::CanonizeOld [GOOD] >> Path::CanonizeFast [GOOD] >> Path::CanonizedStringIsSame1 [GOOD] >> Path::CanonizedStringIsSame2 [GOOD] >> Path::Name_AllSymbols [GOOD] >> Path::Name_RussianAlphabet_SetLocale_C_UTF8 [GOOD] >> Path::Name_WeirdLocale_RegularName [GOOD] >> Path::Name_WeirdLocale_WeirdName [GOOD] |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hullop/ut/unittest >> ReadBatcher::ReadBatcher [GOOD] |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest >> TMemoryStatsAggregator::Aggregate_Summarize_ExternalConsumption_DifferentHosts [GOOD] >> TMemoryStatsAggregator::Aggregate_Summarize_NoExternalConsumption_DifferentHosts [GOOD] >> TMemoryStatsAggregator::Aggregate_Summarize_ExternalConsumption_OneHost [GOOD] >> TMemoryStatsAggregator::Aggregate_Summarize_NoExternalConsumption_OneHost [GOOD] |87.6%| [TS] {asan, default-linux-x86_64, release} ydb/library/persqueue/topic_parser/ut/unittest >> DiscoveryConverterTest::EmptyModern [GOOD] |87.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/ut/partition_ut.cpp |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut/unittest >> Path::Name_AllSymbols [GOOD] >> TLogoBlobTest::LogoBlobSort [GOOD] >> TMemoryStatsAggregator::Aggregate_Empty [GOOD] >> TMemoryStatsAggregator::Aggregate_Single [GOOD] >> TMemoryStatsAggregator::Aggregate_ExternalConsumption_CollidingHosts [GOOD] |87.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/partition_ut.cpp |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut/unittest >> TLocalDbTest::BackupTaskNameChangedAtLoadTime [GOOD] |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/validators/ut/unittest >> TRegistryTests::TestDisableEnable [GOOD] |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut/unittest >> TLogoBlobIdHashTest::SimpleTestWithDifferentChannel [GOOD] |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut/unittest >> Path::Name_WeirdLocale_WeirdName [GOOD] >> TBlobStorageBarriersTreeTest::Tree [GOOD] |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskGC::TGCManyVPutsCompactGCAllTest [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut/unittest >> TMemoryStatsAggregator::Aggregate_Summarize_NoExternalConsumption_OneHost [GOOD] Test command err: AnonRss: 11 CGroupLimit: 21 MemTotal: 31 MemAvailable: 41 AllocatedMemory: 51 AllocatorCachesMemory: 61 HardLimit: 71 SoftLimit: 81 TargetUtilization: 91 ExternalConsumption: 101 SharedCacheConsumption: 111 SharedCacheLimit: 121 MemTableConsumption: 131 MemTableLimit: 141 QueryExecutionConsumption: 151 QueryExecutionLimit: 161 AnonRss: 12 CGroupLimit: 22 MemTotal: 32 MemAvailable: 42 AllocatedMemory: 52 AllocatorCachesMemory: 62 HardLimit: 72 SoftLimit: 82 TargetUtilization: 92 ExternalConsumption: 102 SharedCacheConsumption: 112 SharedCacheLimit: 122 MemTableConsumption: 132 MemTableLimit: 142 QueryExecutionConsumption: 152 QueryExecutionLimit: 162 AnonRss: 13 CGroupLimit: 23 MemTotal: 33 MemAvailable: 43 AllocatedMemory: 53 AllocatorCachesMemory: 63 HardLimit: 73 SoftLimit: 83 TargetUtilization: 93 ExternalConsumption: 103 SharedCacheConsumption: 113 SharedCacheLimit: 123 MemTableConsumption: 133 MemTableLimit: 143 QueryExecutionConsumption: 153 QueryExecutionLimit: 163 AnonRss: 36 CGroupLimit: 66 MemTotal: 96 MemAvailable: 126 AllocatedMemory: 156 AllocatorCachesMemory: 186 HardLimit: 216 SoftLimit: 246 TargetUtilization: 276 ExternalConsumption: 306 SharedCacheConsumption: 336 SharedCacheLimit: 366 MemTableConsumption: 396 MemTableLimit: 426 QueryExecutionConsumption: 456 QueryExecutionLimit: 486 AnonRss: 11 CGroupLimit: 21 MemTotal: 31 MemAvailable: 41 AllocatedMemory: 51 AllocatorCachesMemory: 61 HardLimit: 71 SoftLimit: 81 TargetUtilization: 91 SharedCacheConsumption: 111 SharedCacheLimit: 121 MemTableConsumption: 131 MemTableLimit: 141 QueryExecutionConsumption: 151 QueryExecutionLimit: 161 AnonRss: 12 CGroupLimit: 22 MemTotal: 32 MemAvailable: 42 AllocatedMemory: 52 AllocatorCachesMemory: 62 HardLimit: 72 SoftLimit: 82 TargetUtilization: 92 SharedCacheConsumption: 112 SharedCacheLimit: 122 MemTableConsumption: 132 MemTableLimit: 142 QueryExecutionConsumption: 152 QueryExecutionLimit: 162 AnonRss: 13 CGroupLimit: 23 MemTotal: 33 MemAvailable: 43 AllocatedMemory: 53 AllocatorCachesMemory: 63 HardLimit: 73 SoftLimit: 83 TargetUtilization: 93 SharedCacheConsumption: 113 SharedCacheLimit: 123 MemTableConsumption: 133 MemTableLimit: 143 QueryExecutionConsumption: 153 QueryExecutionLimit: 163 AnonRss: 36 CGroupLimit: 66 MemTotal: 96 MemAvailable: 126 AllocatedMemory: 156 AllocatorCachesMemory: 186 HardLimit: 216 SoftLimit: 246 TargetUtilization: 276 SharedCacheConsumption: 336 SharedCacheLimit: 366 MemTableConsumption: 396 MemTableLimit: 426 QueryExecutionConsumption: 456 QueryExecutionLimit: 486 AnonRss: 11 CGroupLimit: 21 MemTotal: 31 MemAvailable: 41 AllocatedMemory: 51 AllocatorCachesMemory: 61 HardLimit: 71 SoftLimit: 81 TargetUtilization: 91 ExternalConsumption: 101 SharedCacheConsumption: 111 SharedCacheLimit: 121 MemTableConsumption: 131 MemTableLimit: 141 QueryExecutionConsumption: 151 QueryExecutionLimit: 161 AnonRss: 12 CGroupLimit: 22 MemTotal: 32 MemAvailable: 42 AllocatedMemory: 52 AllocatorCachesMemory: 62 HardLimit: 72 SoftLimit: 82 TargetUtilization: 92 ExternalConsumption: 102 SharedCacheConsumption: 112 SharedCacheLimit: 122 MemTableConsumption: 132 MemTableLimit: 142 QueryExecutionConsumption: 152 QueryExecutionLimit: 162 AnonRss: 13 CGroupLimit: 23 MemTotal: 33 MemAvailable: 43 AllocatedMemory: 53 AllocatorCachesMemory: 63 HardLimit: 73 SoftLimit: 83 TargetUtilization: 93 ExternalConsumption: 103 SharedCacheConsumption: 113 SharedCacheLimit: 123 MemTableConsumption: 133 MemTableLimit: 143 QueryExecutionConsumption: 153 QueryExecutionLimit: 163 AnonRss: 36 CGroupLimit: 66 MemTotal: 33 MemAvailable: 43 AllocatedMemory: 156 AllocatorCachesMemory: 186 HardLimit: 73 SoftLimit: 83 TargetUtilization: 93 ExternalConsumption: 80 SharedCacheConsumption: 336 SharedCacheLimit: 366 MemTableConsumption: 396 MemTableLimit: 426 QueryExecutionConsumption: 456 QueryExecutionLimit: 486 AnonRss: 11 CGroupLimit: 21 MemTotal: 31 MemAvailable: 41 AllocatedMemory: 51 AllocatorCachesMemory: 61 HardLimit: 71 SoftLimit: 81 TargetUtilization: 91 ExternalConsumption: 101 SharedCacheConsumption: 111 SharedCacheLimit: 121 MemTableConsumption: 131 MemTableLimit: 141 QueryExecutionConsumption: 151 QueryExecutionLimit: 161 AnonRss: 12 CGroupLimit: 22 MemTotal: 32 MemAvailable: 42 AllocatedMemory: 52 AllocatorCachesMemory: 62 HardLimit: 72 SoftLimit: 82 TargetUtilization: 92 ExternalConsumption: 102 SharedCacheConsumption: 112 SharedCacheLimit: 122 MemTableConsumption: 132 MemTableLimit: 142 QueryExecutionConsumption: 152 QueryExecutionLimit: 162 AnonRss: 13 CGroupLimit: 23 MemTotal: 33 MemAvailable: 43 AllocatedMemory: 53 AllocatorCachesMemory: 63 HardLimit: 73 SoftLimit: 83 TargetUtilization: 93 ExternalConsumption: 103 SharedCacheConsumption: 113 SharedCacheLimit: 123 MemTableConsumption: 133 MemTableLimit: 143 QueryExecutionConsumption: 153 QueryExecutionLimit: 163 AnonRss: 36 CGroupLimit: 66 MemTotal: 96 MemAvailable: 126 AllocatedMemory: 156 AllocatorCachesMemory: 186 HardLimit: 216 SoftLimit: 246 TargetUtilization: 276 SharedCacheConsumption: 336 SharedCacheLimit: 366 MemTableConsumption: 396 MemTableLimit: 426 QueryExecutionConsumption: 456 QueryExecutionLimit: 486 |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest >> TCircularQueueTest::ShouldPush |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest >> TIntervalSetTest::IntervalVecTestSpecificAdd >> TIntervalSetTest::IntervalVecTestEmpty [GOOD] >> TCircularQueueTest::ShouldPush [GOOD] >> TCircularQueueTest::ShouldNotPushTwice [GOOD] >> TCircularQueueTest::ShouldRemove [GOOD] >> TCircularQueueTest::ShouldNotRemoveMissing [GOOD] >> TCircularQueueTest::ShouldRemoveCurrent [GOOD] >> TCircularQueueTest::ShouldRemoveCurrentLast [GOOD] >> TConcurrentRWHashTest::TEmptyGetTest [GOOD] >> TConcurrentRWHashTest::TInsertTest [GOOD] >> TConcurrentRWHashTest::TInsertIfAbsentTest |87.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/scheme/kqp_constraints_ut.cpp |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest >> TCowBTreeTest::SeekForwardPermutationsInplace [GOOD] >> TCowBTreeTest::SeekForwardPermutationsThreadSafe >> TIntervalSetTest::IntervalVecTestSpecificAdd [GOOD] >> TIntervalSetTest::IntervalVecTestAdd >> TConcurrentRWHashTest::TInsertIfAbsentTest [GOOD] >> TConcurrentRWHashTest::TInsertIfAbsentTestFunc [GOOD] >> TConcurrentRWHashTest::TRemoveTest [GOOD] >> TConcurrentRWHashTest::TEraseTest [GOOD] >> TCowBTreeTest::Empty [GOOD] >> TCowBTreeTest::Basics [GOOD] >> TCowBTreeTest::ClearAndReuse >> TBlobStorageBarriersTreeTest::MemViewSnapshots [GOOD] >> TStateStorageConfig::TestReplicaSelection >> TCowBTreeTest::SeekForwardPermutationsThreadSafe [GOOD] >> TCowBTreeTest::SeekBackwardPermutationsInplace [GOOD] >> TCowBTreeTest::SeekBackwardPermutationsThreadSafe |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest |87.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/scheme/kqp_constraints_ut.cpp >> TIntervalSetTest::IntervalVecTestAdd [GOOD] >> TIntervalSetTest::IntervalVecTestAddSubtract [GOOD] >> TIntervalSetTest::IntervalVecTestSubtract [GOOD] >> TIntervalSetTest::IntervalVecTestSubtractAgainstReference >> TCowBTreeTest::SeekBackwardPermutationsThreadSafe [GOOD] >> TCowBTreeTest::RandomInsertInplace >> TCowBTreeTest::ClearAndReuse [GOOD] >> TCowBTreeTest::MultipleSnapshots |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest >> TBlobStorageBarriersTreeTest::Tree [GOOD] |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest >> TIntervalSetTest::IntervalVecTestSubtractAgainstReference [GOOD] >> TIntervalSetTest::IntervalVecTestAddAgainstReference >> TCircularOperationQueueTest::ShouldStartInflight100 [GOOD] >> TCircularOperationQueueTest::ShouldStartInflightEnqueue1 [GOOD] >> TCircularOperationQueueTest::ShouldStartInflight3 [GOOD] >> TCircularOperationQueueTest::ShouldScheduleWakeupWhenHasWaitingAndStart [GOOD] >> TCircularOperationQueueTest::ShouldNotStartUntilStart [GOOD] >> TCircularOperationQueueTest::ShouldReturnExecTime [GOOD] >> TCircularOperationQueueTest::ShouldStartInflightEnqueue100 [GOOD] >> TCircularOperationQueueTest::ShouldStartInflight1 [GOOD] >> TCircularOperationQueueTest::ShouldStartEmpty [GOOD] >> TCircularOperationQueueTest::ShouldStartInflight10 [GOOD] >> TCircularOperationQueueTest::ShouldStartInflight2 [GOOD] >> TCircularOperationQueueTest::ShouldStartInflightEnqueue10 [GOOD] >> TCircularOperationQueueTest::ShouldScheduleWakeupWhenNothingStarted [GOOD] >> TCircularOperationQueueTest::ShouldStartInflightEnqueue3 [GOOD] >> TCircularOperationQueueTest::ShouldStartInflightEnqueue2 [GOOD] >> TCircularOperationQueueTest::ShouldTryToStartAnotherOneWhenStartFails [GOOD] >> TCircularOperationQueueTest::ShouldShuffle >> TCircularOperationQueueTest::UseMinOperationRepeatDelayWhenTimeout [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut/unittest >> TMemoryStatsAggregator::Aggregate_ExternalConsumption_CollidingHosts [GOOD] Test command err: AnonRss: 11 CGroupLimit: 21 MemTotal: 31 MemAvailable: 41 AllocatedMemory: 51 AllocatorCachesMemory: 61 HardLimit: 71 SoftLimit: 81 TargetUtilization: 91 ExternalConsumption: 101 SharedCacheConsumption: 111 SharedCacheLimit: 121 MemTableConsumption: 131 MemTableLimit: 141 QueryExecutionConsumption: 151 QueryExecutionLimit: 161 AnonRss: 11 CGroupLimit: 21 MemTotal: 31 MemAvailable: 41 AllocatedMemory: 51 AllocatorCachesMemory: 61 HardLimit: 71 SoftLimit: 81 TargetUtilization: 91 ExternalConsumption: 101 SharedCacheConsumption: 111 SharedCacheLimit: 121 MemTableConsumption: 131 MemTableLimit: 141 QueryExecutionConsumption: 151 QueryExecutionLimit: 161 AnonRss: 11 CGroupLimit: 21 MemTotal: 31 MemAvailable: 41 AllocatedMemory: 51 AllocatorCachesMemory: 61 HardLimit: 71 SoftLimit: 81 TargetUtilization: 91 ExternalConsumption: 101 SharedCacheConsumption: 111 SharedCacheLimit: 121 MemTableConsumption: 131 MemTableLimit: 141 QueryExecutionConsumption: 151 QueryExecutionLimit: 161 AnonRss: 12 CGroupLimit: 22 MemTotal: 32 MemAvailable: 42 AllocatedMemory: 52 AllocatorCachesMemory: 62 HardLimit: 72 SoftLimit: 82 TargetUtilization: 92 ExternalConsumption: 102 SharedCacheConsumption: 112 SharedCacheLimit: 122 MemTableConsumption: 132 MemTableLimit: 142 QueryExecutionConsumption: 152 QueryExecutionLimit: 162 AnonRss: 13 CGroupLimit: 23 MemTotal: 33 MemAvailable: 43 AllocatedMemory: 53 AllocatorCachesMemory: 63 HardLimit: 73 SoftLimit: 83 TargetUtilization: 93 ExternalConsumption: 103 SharedCacheConsumption: 113 SharedCacheLimit: 123 MemTableConsumption: 133 MemTableLimit: 143 QueryExecutionConsumption: 153 QueryExecutionLimit: 163 AnonRss: 36 CGroupLimit: 66 MemTotal: 65 MemAvailable: 85 AllocatedMemory: 156 AllocatorCachesMemory: 186 HardLimit: 145 SoftLimit: 165 TargetUtilization: 185 ExternalConsumption: 194 SharedCacheConsumption: 336 SharedCacheLimit: 366 MemTableConsumption: 396 MemTableLimit: 426 QueryExecutionConsumption: 456 QueryExecutionLimit: 486 >> TCircularOperationQueueTest::ShouldShuffle [GOOD] >> TCircularOperationQueueTest::RemoveNonExistingWhenShuffle [GOOD] >> TCircularOperationQueueTest::ShouldTolerateInaccurateTimer [GOOD] >> TCircularQueueTest::Empty [GOOD] >> TCircularQueueTest::ShouldNextSingleItem [GOOD] >> TCircularQueueTest::ShouldNextMulti [GOOD] >> TCircularQueueTest::ShouldGetQueue [GOOD] >> TBsLocalRecovery::WriteRestartReadHugeIncreased [GOOD] >> TBsLocalRecovery::WriteRestartReadHugeDecreased >> TPriorityOperationQueueTest::ShouldStartEmpty [GOOD] >> TPriorityOperationQueueTest::ShouldStartByPriority [GOOD] >> TPriorityOperationQueueTest::ShouldStartByPriorityWithRemove [GOOD] >> TIntervalSetTest::IntervalVecTestAddAgainstReference [GOOD] >> TIntervalSetTest::IntervalVecTestIsSubsetOfAgainstReference [GOOD] >> TIntervalSetTest::IntervalVecTestToStringAgainstReference [GOOD] >> TIntervalSetTest::IntervalVecUnion |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest >> TPriorityOperationQueueTest::ShouldUpdatePriorityReadyQueue [GOOD] >> TPriorityOperationQueueTest::ShouldUpdatePriorityWaitingQueue [GOOD] >> TPriorityOperationQueueTest::ShouldReturnExecTimeWhenUpdateRunningPriority [GOOD] >> TPriorityOperationQueueTest::UpdateNonExistingShouldReturnFalse [GOOD] >> TPriorityQueueTest::TestOrder [GOOD] >> TQueueInplaceTests::TestSimpleInplace [GOOD] >> TQueueInplaceTests::CleanInDestructor [GOOD] >> TSimpleCacheTest::TestSimpleCache [GOOD] >> TSimpleCacheTest::TestNotSoSimpleCache [GOOD] >> TStrongTypeTest::DefaultConstructorDeleted [GOOD] >> TStrongTypeTest::DefaultConstructorValue [GOOD] >> TTokenBucketTest::Unlimited [GOOD] >> TTokenBucketTest::Limited [GOOD] >> TTokenBucketTest::DelayCalculation [GOOD] >> TULID::ParseAndFormat [GOOD] >> TULID::HeadByteOrder [GOOD] >> TULID::TailByteOrder [GOOD] >> TULID::EveryBitOrder [GOOD] >> TULID::Generate [GOOD] >> TWildcardTest::TestWildcard [GOOD] >> TWildcardTest::TestWildcards [GOOD] |87.6%| [TA] $(B)/ydb/library/persqueue/topic_parser/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TIntrusiveStackTest::TestEmptyPop [GOOD] >> TIntrusiveStackTest::TestPushPop [GOOD] >> TLockFreeIntrusiveStackTest::ConcurrentRefCountNeverEmpty >> TIntervalSetTest::IntervalVecUnion [GOOD] >> TIntervalSetTest::IntervalVecUnionInplace |87.6%| [TA] {RESULT} $(B)/ydb/library/persqueue/topic_parser/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> AuthTokenAllowed::FailOnListAndTokenWithEmptyUserSid [GOOD] >> AuthTokenAllowed::FailOnListAndTokenWithEmptyUserSidAndGroups [GOOD] >> AuthTokenAllowed::FailOnListAndNoToken [GOOD] >> TIntervalSetTest::IntervalSetTestSpecificAdd [GOOD] >> TIntervalSetTest::IntervalSetTestAdd >> TIntervalSetTest::IntervalSetTestEmpty [GOOD] |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest >> TBlobStorageBarriersTreeTest::MemViewSnapshots [GOOD] >> SysViewQueryHistory::StableMerge [GOOD] >> TIntervalSetTest::IntervalVecUnionInplace [GOOD] >> TIntervalSetTest::IntervalVecUnionInplaceSelf [GOOD] >> TIntervalSetTest::IntervalVecIntersection >> PgTest::DumpIntCells >> TIntervalSetTest::IntervalSetTestAdd [GOOD] >> TIntervalSetTest::IntervalSetTestAddSubtract [GOOD] >> TIntervalSetTest::IntervalMapTestSubtract [GOOD] >> TIntervalSetTest::IntervalSetTestSubtract [GOOD] >> TIntervalSetTest::IntervalMapTestSubtractAgainstReference >> TBsVDiskRepl1::ReplEraseDiskRestore [GOOD] >> TBsVDiskRepl1::ReadOnly |87.6%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut_pg/unittest >> PgTest::DumpIntCells [GOOD] |87.6%| [TA] $(B)/ydb/core/cms/console/validators/ut/test-results/unittest/{meta.json ... results_accumulator.log} |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_auth/unittest >> AuthTokenAllowed::FailOnListAndNoToken [GOOD] >> SysViewQueryHistory::AddDedup [GOOD] >> SysViewQueryHistory::AddDedup2 >> TIntervalSetTest::IntervalVecIntersection [GOOD] |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/util/ut/unittest >> TCircularQueueTest::ShouldGetQueue [GOOD] >> TIntervalSetTest::IntervalVecIntersectionInplace >> AuthDatabaseAdmin::FailOnEmptyOwnerAndEmptyToken [GOOD] >> AuthDatabaseAdmin::FailOnEmptyOwnerAndTokenWithEmptyUserSid [GOOD] >> AuthDatabaseAdmin::FailOnEmptyOwnerAndNoToken [GOOD] >> SysViewQueryHistory::AddDedup2 [GOOD] >> TIntervalSetTest::IntervalMapTestSubtractAgainstReference [GOOD] >> TIntervalSetTest::IntervalSetTestSubtractAgainstReference |87.6%| [TA] {RESULT} $(B)/ydb/core/cms/console/validators/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TBsVDiskDefrag::Defrag50PercentGarbage [GOOD] >> TBsVDiskExtreme::Simple3Put1GetMissingKeyFresh >> TIntervalSetTest::IntervalSetTestSubtractAgainstReference [GOOD] >> TIntervalSetTest::IntervalSetTestAddAgainstReference >> TBsVDiskRepl3::ReplEraseDiskRestoreMultipart [GOOD] >> TBsVDiskRepl3::AnubisTest [GOOD] >> TBsVDiskRepl3::ReplPerf >> SysViewQueryHistory::AddDedupRandom >> TIntervalSetTest::IntervalSetTestAddAgainstReference [GOOD] >> TIntervalSetTest::IntervalSetTestIsSubsetOfAgainstReference >> TIntervalSetTest::IntervalVecIntersectionInplace [GOOD] >> TIntervalSetTest::IntervalVecIntersectionInplaceSelf [GOOD] >> TIntervalSetTest::IntervalVecDifference >> SysViewQueryHistory::ServiceQueryHistoryAdd >> AuthTokenAllowed::PassOnEmptyListAndNoToken [GOOD] >> AuthTokenAllowed::PassOnEmptyListAndInvalidTokenSerialized [GOOD] |87.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/olap/datatime64_ut.cpp |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/util/ut/unittest >> TWildcardTest::TestWildcards [GOOD] >> SysViewQueryHistory::AddDedupRandom [GOOD] >> TIntervalSetTest::IntervalSetTestIsSubsetOfAgainstReference [GOOD] >> TIntervalSetTest::IntervalMapTestToStringAgainstReference [GOOD] >> SysViewQueryHistory::ServiceQueryHistoryAdd [GOOD] >> AuthTokenAllowed::PassOnEmptyListAndEmptyToken [GOOD] >> AuthTokenAllowed::FailOnListMatchGroupSid [GOOD] >> SysViewQueryHistory::StableMerge2 [GOOD] |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/service/ut/unittest >> SysViewQueryHistory::StableMerge [GOOD] |87.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/olap/datatime64_ut.cpp >> TIntervalSetTest::IntervalSetTestToStringAgainstReference [GOOD] >> TIntervalSetTest::IntervalMapUnion >> AuthTokenAllowed::PassOnListMatchUserSid [GOOD] >> AuthTokenAllowed::PassOnListMatchUserSidWithGroup [GOOD] >> TIntervalSetTest::IntervalVecDifference [GOOD] >> TIntervalSetTest::IntervalVecDifferenceInplaceSelf [GOOD] >> TIntrusiveFixedHashSetTest::TestEmptyFind [GOOD] >> TIntrusiveFixedHashSetTest::TestPushFindClear [GOOD] >> TIntrusiveHeapTest::TestEmpty [GOOD] >> TIntrusiveHeapTest::TestAddRemove [GOOD] >> TIntrusiveHeapTest::TestUpdateNoChange [GOOD] >> TIntrusiveHeapTest::TestUpdateIncrease [GOOD] >> TIntrusiveHeapTest::TestUpdateDecrease [GOOD] |87.6%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut_pg/unittest >> PgTest::DumpIntCells [GOOD] |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest |87.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_auth/unittest >> AuthDatabaseAdmin::FailOnEmptyOwnerAndNoToken [GOOD] >> TStateStorageConfig::TestReplicaSelection [GOOD] >> TStateStorageConfig::TestMultiReplicaFailDomains |87.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/schemeshard/ut_login/ut_login.cpp >> SysViewQueryHistory::AggrMerge [GOOD] >> SysViewQueryHistory::AggrMergeDedup [GOOD] >> SysViewQueryHistory::ScanQueryHistoryMerge [GOOD] |87.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_auth/unittest >> AuthTokenAllowed::PassOnEmptyListAndInvalidTokenSerialized [GOOD] |87.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/service/ut/unittest >> SysViewQueryHistory::AddDedup2 [GOOD] |87.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/service/ut/unittest >> SysViewQueryHistory::AddDedupRandom [GOOD] |87.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/schemeshard/ut_login/ut_login.cpp |87.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/service/ut/unittest >> SysViewQueryHistory::StableMerge2 [GOOD] |87.7%| [TA] $(B)/ydb/core/blobstorage/vdisk/hulldb/barriers/ut/test-results/unittest/{meta.json ... results_accumulator.log} |87.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/service/ut/unittest >> SysViewQueryHistory::ServiceQueryHistoryAdd [GOOD] |87.7%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/barriers/ut/test-results/unittest/{meta.json ... results_accumulator.log} |87.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_auth/unittest >> AuthTokenAllowed::FailOnListMatchGroupSid [GOOD] |87.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/service/ut/unittest >> SysViewQueryHistory::ScanQueryHistoryMerge [GOOD] |87.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_auth/unittest >> AuthTokenAllowed::PassOnListMatchUserSidWithGroup [GOOD] >> TIntervalSetTest::IntervalMapUnion [GOOD] >> TIntervalSetTest::IntervalSetUnion |87.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut_pg/unittest |87.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/service/ut/unittest >> SysViewQueryHistory::AggrMergeDedup [GOOD] |87.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/service/ut/unittest >> SysViewQueryHistory::AggrMerge [GOOD] |87.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/util/ut/unittest >> TIntrusiveHeapTest::TestUpdateDecrease [GOOD] >> TBsVDiskRepl1::ReadOnly [GOOD] >> TCowBTreeTest::RandomInsertInplace [GOOD] >> TCowBTreeTest::RandomInsertThreadSafe >> TIntervalSetTest::IntervalSetUnion [GOOD] >> TIntervalSetTest::IntervalMapUnionInplace >> TBsVDiskExtreme::Simple3Put1GetMissingKeyFresh [GOOD] >> TBsVDiskExtreme::Simple3Put1GetMissingKeyCompaction >> TPDiskUtil::DriveEstimator [GOOD] >> TPDiskUtil::OffsetParsingCorrectness >> SysViewQueryHistory::TopReadBytesAdd [GOOD] >> TPDiskUtil::OffsetParsingCorrectness [GOOD] >> TPDiskUtil::PayloadParsingTest [GOOD] >> TPDiskUtil::SectorRestorator [GOOD] >> TPDiskUtil::SectorPrint [GOOD] >> TPDiskUtil::SectorMap >> TStreamRequestUnitsCalculatorTest::Basic [GOOD] >> TTimeGridTest::TimeGrid [GOOD] >> TPDiskUtil::SectorMap [GOOD] >> TPDiskUtil::FormatSectorMap ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskRepl1::ReadOnly [GOOD] Test command err: 2025-05-07T08:45:50.490437Z :BS_SYNCER ERROR: guid_recovery.cpp:714: PDiskId# 4 VDISK[0:_:0:1:1]: (0) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-05-07T08:45:50.662294Z :BS_SYNCER ERROR: guid_recovery.cpp:767: PDiskId# 4 VDISK[0:_:0:1:1]: (0) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 6901257814107392838] 2025-05-07T08:45:51.750846Z :BS_SYNCER ERROR: blobstorage_osiris.cpp:203: PDiskId# 4 VDISK[0:_:0:1:1]: (0) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 >> TPDiskUtil::FormatSectorMap [GOOD] >> TPDiskUtil::SectorMapStoreLoadFromFile >> TPDiskUtil::SectorMapStoreLoadFromFile [GOOD] >> TIntervalSetTest::IntervalMapUnionInplace [GOOD] >> TIntervalSetTest::IntervalSetUnionInplace >> Init::TWithDefaultParser [GOOD] |87.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/metering/ut/unittest >> TTimeGridTest::TimeGrid [GOOD] |87.7%| [TA] $(B)/ydb/core/scheme/ut_pg/test-results/unittest/{meta.json ... results_accumulator.log} >> TPDiskTest::TestChunkWriteRelease [GOOD] >> TPDiskTest::TestPDiskManyOwnersInitiation |87.7%| [TS] {RESULT} ydb/core/metering/ut/unittest |87.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/service/ut/unittest >> SysViewQueryHistory::TopReadBytesAdd [GOOD] >> TIntervalSetTest::IntervalSetUnionInplace [GOOD] >> TIntervalSetTest::IntervalMapUnionInplaceSelf [GOOD] >> TIntervalSetTest::IntervalSetUnionInplaceSelf [GOOD] >> TIntervalSetTest::IntervalSetIntersection |87.7%| [TA] $(B)/ydb/core/blobstorage/vdisk/anubis_osiris/ut/test-results/unittest/{meta.json ... results_accumulator.log} |87.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/tx/sequenceshard/public/ut/unittest |87.7%| [TA] $(B)/ydb/core/base/ut_auth/test-results/unittest/{meta.json ... results_accumulator.log} >> JsonEnvelopeTest::BinaryData [GOOD] >> JsonEnvelopeTest::ArrayItem [GOOD] >> JsonEnvelopeTest::NoReplace [GOOD] >> JsonEnvelopeTest::Simple [GOOD] >> JsonEnvelopeTest::Escape [GOOD] >> TBlockDeviceTest::WriteReadRestart [GOOD] >> TColorLimitsTest::Colors [GOOD] >> TColorLimitsTest::OwnerFreeSpaceShare [GOOD] >> TLogCache::Simple [GOOD] >> TLogCache::EraseRangeOnEmpty [GOOD] >> TLogCache::EraseRangeOutsideOfData [GOOD] >> TLogCache::EraseRangeSingleMinElement [GOOD] >> TLogCache::EraseRangeSingleMidElement [GOOD] >> TLogCache::EraseRangeSingleMaxElement [GOOD] >> TLogCache::EraseRangeSample [GOOD] >> TLogCache::EraseRangeAllExact [GOOD] >> TLogCache::EraseRangeAllAmple [GOOD] >> TPDiskRaces::Decommit |87.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/config/init/ut/unittest >> Init::TWithDefaultParser [GOOD] |87.7%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/anubis_osiris/ut/test-results/unittest/{meta.json ... results_accumulator.log} |87.7%| [TS] {RESULT} ydb/core/tx/sequenceshard/public/ut/unittest |87.7%| [TA] {RESULT} $(B)/ydb/core/scheme/ut_pg/test-results/unittest/{meta.json ... results_accumulator.log} |87.7%| [TS] {RESULT} ydb/core/config/init/ut/unittest >> EncryptedFileSerializerTest::WrongParametersForDeserializer [GOOD] >> EncryptedFileSerializerTest::WrongParametersForSerializer [GOOD] >> EncryptedFileSerializerTest::SerializeWholeFileAtATime [GOOD] >> EncryptedFileSerializerTest::SplitOnBlocks |87.7%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stability/tool/libpy3tests-stability-tool.global.a >> TLockFreeIntrusiveStackTest::ConcurrentRefCountNeverEmpty [GOOD] >> TLockFreeIntrusiveStackTest::ConcurrentRefCountHeavyContention >> TIntervalSetTest::IntervalSetIntersection [GOOD] >> TIntervalSetTest::IntervalSetIntersectionInplace |87.7%| [AR] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stability/tool/libpy3tests-stability-tool.global.a |87.7%| [AR] {RESULT} $(B)/ydb/tests/stability/tool/libpy3tests-stability-tool.global.a ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/pdisk/ut/unittest >> TPDiskUtil::SectorMapStoreLoadFromFile [GOOD] Test command err: Path# /home/runner/.ya/build/build_root/zvgn/002bab/r3tmp/tmpUdbqL2//pdisk/data.bin testCase# 0 plainDataChunk# 0 all chunk reads are received all chunk writes are received all log writes are received testCase# 1 plainDataChunk# 1 all chunk reads are received all chunk writes are received all log writes are received testCase# 2 plainDataChunk# 0 restart all chunk reads are received all chunk writes are received all log writes are received testCase# 3 plainDataChunk# 1 restart all chunk reads are received all chunk writes are received all log writes are received reformat testCase# 0 plainDataChunk# 0 all chunk reads are received all chunk writes are received all log writes are received testCase# 1 plainDataChunk# 1 all chunk reads are received all chunk writes are received all log writes are received testCase# 2 plainDataChunk# 0 restart all chunk reads are received all chunk writes are received all log writes are received testCase# 3 plainDataChunk# 1 restart all chunk reads are received all chunk writes are received all log writes are received reformat |87.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/log_backend/ut/unittest >> JsonEnvelopeTest::Escape [GOOD] |87.7%| [TA] {RESULT} $(B)/ydb/core/base/ut_auth/test-results/unittest/{meta.json ... results_accumulator.log} >> EncryptedFileSerializerTest::SplitOnBlocks [GOOD] >> EncryptedFileSerializerTest::EmptyFile [GOOD] >> EncryptedFileSerializerTest::ReadPartial [GOOD] >> EncryptedFileSerializerTest::DeleteLastByte [GOOD] >> EncryptedFileSerializerTest::AddByte [GOOD] >> EncryptedFileSerializerTest::RemoveLastBlock [GOOD] >> EncryptedFileSerializerTest::ChangeAnyByte |87.7%| [TS] {RESULT} ydb/core/log_backend/ut/unittest >> Mvp::TokenatorGetMetadataTokenGood >> EncryptedFileSerializerTest::ChangeAnyByte [GOOD] >> EncryptedFileSerializerTest::BigHeaderSize [GOOD] >> EncryptedFileSerializerTest::BigBlockSize [GOOD] >> EncryptedFileSerializerTest::RestoreFromState >> TBsVDiskExtreme::Simple3Put1GetMissingKeyCompaction [GOOD] >> Mvp::TokenatorGetMetadataTokenGood [GOOD] >> Mvp::TokenatorRefreshMetadataTokenGood >> EncryptedFileSerializerTest::RestoreFromState [GOOD] >> EncryptedFileSerializerTest::IVSerialization [GOOD] >> FormatCSV::Instants [GOOD] >> FormatCSV::EmptyData >> TPGTest::TestLogin >> FormatCSV::EmptyData [GOOD] >> FormatCSV::Common >> TPGTest::TestLogin [GOOD] >> FormatCSV::Common [GOOD] >> FormatCSV::Strings [GOOD] >> FormatCSV::Nulls [GOOD] >> TIntervalSetTest::IntervalSetIntersectionInplace [GOOD] >> TIntervalSetTest::IntervalSetIntersectionInplaceSelf [GOOD] >> TIntervalSetTest::IntervalSetDifference >> LongTxServicePublicTypes::LongTxId [GOOD] >> LongTxServicePublicTypes::Snapshot [GOOD] >> LongTxServicePublicTypes::SnapshotMaxTxId [GOOD] >> LongTxServicePublicTypes::SnapshotReadOnly [GOOD] >> TCowBTreeTest::MultipleSnapshots [GOOD] >> TCowBTreeTest::MultipleSnapshotsWithGc >> TMemoryPoolTest::AppendString [GOOD] >> TMemoryPoolTest::AllocOneByte [GOOD] >> TMemoryPoolTest::Transactions [GOOD] >> TMemoryPoolTest::TransactionsWithAlignment |87.7%| [TA] $(B)/ydb/core/sys_view/service/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskExtreme::Simple3Put1GetMissingKeyCompaction [GOOD] Test command err: 2025-05-07T08:45:25.968678Z :BS_VDISK_PUT ERROR: blobstorage_skeleton.cpp:559: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVPut: TabletID cannot be empty; id# [0:1:10:0:0:10:1] Marker# BSVS43 2025-05-07T08:45:27.680839Z :BS_VDISK_OTHER ERROR: vdisk_context.h:143: PDiskId# 1 VDISK[0:_:0:0:0]: (0) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'PDiskId# 1 TEvLog error because PDisk State# Error, there is a terminal internal error in PDisk. Did you check EvYardInit result? Marker# BSY07 StateErrorReason# PDisk is in StateError, reason# Received TEvYardControl::Brake' 2025-05-07T08:45:27.680942Z :BS_SKELETON ERROR: blobstorage_skeletonfront.cpp:1749: PDiskId# 1 VDISK[0:_:0:0:0]: (0) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# PDiskId# 1 TEvLog error because PDisk State# Error, there is a terminal internal error in PDisk. Did you check EvYardInit result? Marker# BSY07 StateErrorReason# PDisk is in StateError, reason# Received TEvYardControl::Brake Marker# BSVSF03 >> ClosedIntervalSet::Union >> TPDiskRaces::KillOwnerWhileDecommitting [GOOD] >> TPDiskRaces::KillOwnerWhileDecommittingWithInflight >> TMemoryPoolTest::TransactionsWithAlignment [GOOD] >> TMemoryPoolTest::LongRollback [GOOD] >> UtilString::ShrinkToFit [GOOD] |87.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/backup/common/ut/unittest >> EncryptedFileSerializerTest::IVSerialization [GOOD] |87.7%| [TS] {RESULT} ydb/core/backup/common/ut/unittest >> TIntervalSetTest::IntervalSetDifference [GOOD] >> TIntervalSetTest::IntervalSetDifferenceInplaceSelf [GOOD] >> TIntervalSetTest::IntervalSetTestIterator [GOOD] >> ArrowTest::BatchBuilder |87.7%| [TA] {RESULT} $(B)/ydb/core/sys_view/service/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> ArrowTest::BatchBuilder [GOOD] >> ArrowTest::ArrowToYdbConverter ------- [TS] {asan, default-linux-x86_64, release} ydb/core/io_formats/arrow/scheme/ut/unittest >> FormatCSV::Nulls [GOOD] Test command err: 12000000 Cannot read CSV: no columns specified Cannot read CSV: Invalid: Empty CSV file d'Artagnan '"' Jeanne d'Arc "'" 'd'Artagnan' ''"'' 'Jeanne d'Arc' '"'"' d'Artagnan '"' Jeanne d'Arc "'" src: ,"","" ,"","" ,, parsed: ᴺᵁᴸᴸ,, ᴺᵁᴸᴸ,, ᴺᵁᴸᴸ,ᴺᵁᴸᴸ,ᴺᵁᴸᴸ src: ,"","" ,"","" ,, parsed: ᴺᵁᴸᴸ,, ᴺᵁᴸᴸ,, ᴺᵁᴸᴸ,ᴺᵁᴸᴸ,ᴺᵁᴸᴸ src: \N,"","" \N,"\N","\N" \N,\N,\N parsed: ᴺᵁᴸᴸ,, ᴺᵁᴸᴸ,\N,\N ᴺᵁᴸᴸ,ᴺᵁᴸᴸ,ᴺᵁᴸᴸ src: NULL,"","" NULL,"NULL","NULL" NULL,NULL,NULL parsed: ᴺᵁᴸᴸ,, ᴺᵁᴸᴸ,NULL,NULL ᴺᵁᴸᴸ,ᴺᵁᴸᴸ,ᴺᵁᴸᴸ >> ArrowTest::ArrowToYdbConverter [GOOD] >> ArrowTest::SortWithCompositeKey [GOOD] >> ArrowTest::MergingSortedInputStream [GOOD] >> ArrowTest::MergingSortedInputStreamReversed [GOOD] >> ArrowTest::MergingSortedInputStreamReplace |87.7%| [TS] {RESULT} ydb/core/io_formats/arrow/scheme/ut/unittest |87.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/tx/long_tx_service/public/ut/unittest >> LongTxServicePublicTypes::SnapshotReadOnly [GOOD] |87.7%| [TS] {RESULT} ydb/core/tx/long_tx_service/public/ut/unittest >> TPDiskTest::TestPDiskManyOwnersInitiation [GOOD] >> TPDiskTest::TestLogWriteReadWithRestarts >> ArrowTest::MergingSortedInputStreamReplace [GOOD] >> ColumnFilter::MergeFilters [GOOD] >> ColumnFilter::CombineFilters [GOOD] >> ColumnFilter::FilterSlice [GOOD] >> ColumnFilter::FilterCheckSlice [GOOD] >> ColumnFilter::FilterSlice1 [GOOD] >> ColumnFilter::CutFilter1 [GOOD] >> ColumnFilter::CutFilter2 [GOOD] >> Dictionary::Simple ------- [TS] {asan, default-linux-x86_64, release} ydb/core/pgproxy/ut/unittest >> TPGTest::TestLogin [GOOD] Test command err: 2025-05-07T08:45:59.240176Z :PGWIRE INFO: sock_listener.cpp:66: Listening on [::]:22319 2025-05-07T08:45:59.253038Z :PGWIRE DEBUG: pg_connection.cpp:61: (#13,[::1]:57630) incoming connection opened 2025-05-07T08:45:59.253239Z :PGWIRE DEBUG: pg_connection.cpp:241: (#13,[::1]:57630) -> [1] 'i' "Initial" Size(15) protocol(0x00000300) user=user 2025-05-07T08:45:59.253590Z :PGWIRE DEBUG: pg_connection.cpp:241: (#13,[::1]:57630) <- [1] 'R' "Auth" Size(4) OK |87.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/local_partition_ut.cpp |87.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut_large/unittest |87.8%| [TS] {RESULT} ydb/core/pgproxy/ut/unittest |87.8%| [TM] {RESULT} ydb/core/tablet_flat/ut_large/unittest |87.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/local_partition_ut.cpp |87.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut_util/unittest >> UtilString::ShrinkToFit [GOOD] >> TStateStorageConfig::TestMultiReplicaFailDomains [GOOD] >> TStateStorageConfig::TestReplicaSelectionUniqueCombinations >> TestS3UrlEscape::EscapeEscapedForce [GOOD] >> TestS3UrlEscape::EscapeUnescapeForceRet [GOOD] >> TestS3UrlEscape::EscapeAdditionalSymbols [GOOD] >> TestUrlBuilder::UriOnly [GOOD] >> TestUrlBuilder::Basic [GOOD] >> TestUrlBuilder::BasicWithEncoding [GOOD] >> TestUrlBuilder::BasicWithAdditionalEncoding [GOOD] |87.8%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/libcore-tx-schemeshard.a |87.8%| [AR] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/libcore-tx-schemeshard.a |87.8%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/libcore-tx-schemeshard.a >> ConsoleDumper::Basic [GOOD] >> ConsoleDumper::CoupleMerge [GOOD] >> ConsoleDumper::CoupleOverwrite |87.8%| [TM] {RESULT} ydb/core/tablet_flat/ut_util/unittest >> ConsoleDumper::CoupleOverwrite [GOOD] >> ConsoleDumper::CoupleMergeOverwriteRepeated [GOOD] >> ConsoleDumper::ReverseMerge [GOOD] >> ConsoleDumper::ReverseOverwrite [GOOD] >> ConsoleDumper::ReverseMergeOverwriteRepeated [GOOD] >> ConsoleDumper::Different [GOOD] >> ConsoleDumper::SimpleNode >> ConsoleDumper::SimpleNode [GOOD] >> ConsoleDumper::JoinSimilar [GOOD] >> ConsoleDumper::DontJoinDifferent [GOOD] >> ConsoleDumper::SimpleTenant [GOOD] >> ConsoleDumper::SimpleNodeTenant [GOOD] >> ConsoleDumper::SimpleHostId [GOOD] >> ConsoleDumper::SimpleNodeId |87.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/util/ut/unittest >> TIntervalSetTest::IntervalSetTestIterator [GOOD] >> ConsoleDumper::SimpleNodeId [GOOD] >> ConsoleDumper::DontJoinNodeTenant [GOOD] >> ConsoleDumper::JoinMultipleSimple [GOOD] >> ConsoleDumper::MergeNode [GOOD] >> ConsoleDumper::MergeOverwriteRepeatedNode [GOOD] >> ConsoleDumper::Ordering [GOOD] >> ConsoleDumper::IgnoreUnmanagedItems [GOOD] >> YamlConfig::CollectLabels [GOOD] >> YamlConfig::MaterializeSpecificConfig >> TBsLocalRecovery::WriteRestartReadHugeDecreased [GOOD] >> TBsOther1::PoisonPill >> YamlConfig::MaterializeSpecificConfig [GOOD] >> YamlConfig::MaterializeAllConfigSimple [GOOD] >> YamlConfig::MaterializeAllConfigs |87.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/ut/tx/kqp_locks_tricky_ut.cpp >> TCollectingS3ListingStrategyTests::IfNoIssuesOccursShouldReturnCollectedPaths [GOOD] >> TCollectingS3ListingStrategyTests::IfThereAreMoreRecordsThanSpecifiedByLimitShouldReturnError [GOOD] >> TCollectingS3ListingStrategyTests::IfAnyIterationReturnIssueThanWholeStrategyShouldReturnIt [GOOD] >> TCollectingS3ListingStrategyTests::IfExceptionIsReturnedFromIteratorThanItShouldCovertItToIssue [GOOD] |87.8%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/s3/common/ut/unittest >> TestUrlBuilder::BasicWithAdditionalEncoding [GOOD] |87.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_locks_tricky_ut.cpp >> YamlConfig::MaterializeAllConfigs [GOOD] >> YamlConfig::AppendVolatileConfig [GOOD] >> YamlConfig::AppendAndResolve >> Backpressure::MonteCarlo >> MdbEndpoingGenerator::Legacy [GOOD] >> MdbEndpoingGenerator::Generic_NoTransformHost [GOOD] >> MdbEndpoingGenerator::Generic_WithTransformHost [GOOD] >> YamlConfig::AppendAndResolve [GOOD] >> YamlConfig::GetMetadata [GOOD] >> YamlConfig::ReplaceMetadata [GOOD] >> YamlConfigParser::Iterate [GOOD] >> YamlConfigParser::ProtoBytesFieldDoesNotDecodeBase64 [GOOD] >> YamlConfigParser::PdiskCategoryFromString [GOOD] >> YamlConfigParser::AllowDefaultHostConfigId [GOOD] >> YamlConfigParser::IncorrectHostConfigIdFails >> ConfigValidation::SameStaticGroup [GOOD] >> ConfigValidation::StaticGroupSizesGrow [GOOD] >> ConfigValidation::StaticGroupSizesShrink [GOOD] >> ConfigValidation::VDiskChanged [GOOD] >> ConfigValidation::TooManyVDiskChanged [GOOD] >> DatabaseConfigValidation::AllowedFields >> ExternalDataSourceTest::ValidateName [GOOD] >> ExternalDataSourceTest::ValidatePack [GOOD] >> ExternalDataSourceTest::ValidateAuth [GOOD] >> ExternalDataSourceTest::ValidateParameters [GOOD] >> ExternalDataSourceTest::ValidateHasExternalTable [GOOD] >> ExternalDataSourceTest::ValidateProperties [GOOD] >> ExternalDataSourceTest::ValidateLocation [GOOD] >> ExternalSourceBuilderTest::ValidateName [GOOD] >> ExternalSourceBuilderTest::ValidateAuthWithoutCondition [GOOD] >> ExternalSourceBuilderTest::ValidateAuthWithCondition [GOOD] >> ExternalSourceBuilderTest::ValidateUnsupportedField [GOOD] >> ExternalSourceBuilderTest::ValidateNonRequiredField [GOOD] >> ExternalSourceBuilderTest::ValidateRequiredField [GOOD] >> ExternalSourceBuilderTest::ValidateNonRequiredFieldValues [GOOD] >> ExternalSourceBuilderTest::ValidateRequiredFieldValues [GOOD] >> ExternalSourceBuilderTest::ValidateRequiredFieldOnCondition [GOOD] >> IcebergDdlTest::HiveCatalogWithS3Test [GOOD] >> BufferWithGaps::IsReadable [GOOD] >> PtrTest::Test1 [GOOD] >> BufferWithGaps::Basic [GOOD] >> TBatchedVecTest::TestOutputTOutputType [GOOD] >> TBatchedVecTest::TestToStringInt [GOOD] >> YamlConfigParser::IncorrectHostConfigIdFails [GOOD] >> YamlConfigParser::NoMixedHostConfigIds [GOOD] >> YamlConfigProto2Yaml::StorageConfig [GOOD] >> DatabaseConfigValidation::AllowedFields [GOOD] >> DatabaseConfigValidation::NotAllowedFields [GOOD] >> IcebergDdlTest::HadoopCatalogWithS3Test [GOOD] >> ObjectStorageTest::SuccessValidation [GOOD] >> ObjectStorageTest::FailedCreate [GOOD] >> ObjectStorageTest::FailedValidation [GOOD] >> ObjectStorageTest::FailedJsonListValidation >> ObjectStorageTest::FailedJsonListValidation [GOOD] >> ObjectStorageTest::FailedOptionalTypeValidation [GOOD] >> ObjectStorageTest::WildcardsValidation [GOOD] |87.8%| [TS] {asan, default-linux-x86_64, release} ydb/core/fq/libs/test_connection/ut/unittest |87.8%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/s3/provider/ut/unittest >> TCollectingS3ListingStrategyTests::IfExceptionIsReturnedFromIteratorThanItShouldCovertItToIssue [GOOD] |87.8%| [TS] {asan, default-linux-x86_64, release} ydb/core/fq/libs/db_id_async_resolver_impl/ut/unittest >> MdbEndpoingGenerator::Generic_WithTransformHost [GOOD] >> ArrowInferenceTest::csv_simple [GOOD] >> ArrowInferenceTest::tsv_simple |87.8%| [TS] {asan, default-linux-x86_64, release} ydb/core/blobstorage/base/ut/gtest >> TBatchedVecTest::TestToStringInt [GOOD] |87.8%| [TS] {asan, default-linux-x86_64, release} ydb/core/config/validation/ut/unittest >> DatabaseConfigValidation::NotAllowedFields [GOOD] >> ArrowInferenceTest::tsv_simple [GOOD] >> ArrowInferenceTest::tsv_empty [GOOD] >> ArrowInferenceTest::broken_json [GOOD] >> ArrowInferenceTest::empty_json_each_row [GOOD] >> ArrowInferenceTest::empty_json_list [GOOD] >> ArrowInferenceTest::broken_json_list [GOOD] >> TLockFreeIntrusiveStackTest::ConcurrentRefCountHeavyContention [GOOD] >> TLockFreeIntrusiveStackTest::ConcurrentAutoNeverEmpty ------- [TS] {asan, default-linux-x86_64, release} ydb/library/yaml_config/ut/unittest >> YamlConfigProto2Yaml::StorageConfig [GOOD] Test command err: host_config: "[{\"drive\":[{\"type\":\"NVME\",\"path\":\"\\/dev\\/disk\\/by-partlabel\\/kikimr_nvme_01\"},{\"type\":\"NVME\",\"path\":\"\\/dev\\/disk\\/by-partlabel\\/kikimr_nvme_02\"}],\"host_config_id\":1},{\"drive\":[{\"type\":\"SSD\",\"path\":\"\\/dev\\/disk\\/by-partlabel\\/kikimr_nvme_01\"}],\"host_config_id\":2}]" "\/dev\/disk\/by-partlabel\/kikimr_nvme_02" host_config: "[{\"drive\":[{\"type\":\"NVME\",\"path\":\"\\/dev\\/disk\\/by-partlabel\\/kikimr_nvme_01\"},{\"type\":\"NVME\",\"path\":\"\\/dev\\/disk\\/by-partlabel\\/kikimr_nvme_02\"}],\"host_config_id\":1},{\"drive\":[{\"type\":\"SSD\",\"path\":\"\\/dev\\/disk\\/by-partlabel\\/kikimr_nvme_01\"}],\"host_config_id\":2}]" host_configs: - host_config_id: 1 drive: - path: /dev/disk/by-partlabel/kikimr_nvme_01 type: NVME expected_slot_count: 9 - path: /dev/disk/by-partlabel/kikimr_nvme_02 type: NVME expected_slot_count: 9 - host_config_id: 2 drive: - path: /dev/disk/by-partlabel/kikimr_nvme_01 type: SSD expected_slot_count: 9 hosts: - host: sas8-6954.search.yandex.net port: 19000 host_config_id: 1 - host: sas8-6955.search.yandex.net port: 19000 host_config_id: 2 item_config_generation: 0 |87.8%| [TS] {asan, default-linux-x86_64, release} ydb/core/external_sources/ut/unittest >> ObjectStorageTest::WildcardsValidation [GOOD] >> Mvp::TokenatorRefreshMetadataTokenGood [GOOD] >> Mvp::OpenIdConnectRequestWithIamTokenYandex [GOOD] >> Mvp::OpenIdConnectRequestWithIamTokenNebius [GOOD] >> Mvp::OpenIdConnectNonAuthorizeRequestWithOptionMethodYandex [GOOD] >> Mvp::OpenIdConnectNonAuthorizeRequestWithOptionMethodNebius >> Mvp::OpenIdConnectNonAuthorizeRequestWithOptionMethodNebius [GOOD] >> Mvp::OpenIdConnectSessionServiceCheckValidCookieYandex [GOOD] >> Mvp::OpenIdConnectSessionServiceCheckValidCookieNebius >> Mvp::OpenIdConnectSessionServiceCheckValidCookieNebius [GOOD] >> Mvp::OpenIdConnectProxyOnHttpsHost ------- [TS] {asan, default-linux-x86_64, release} ydb/core/external_sources/object_storage/inference/ut/gtest >> ArrowInferenceTest::broken_json_list [GOOD] Test command err: {
: Error: couldn't open csv/tsv file, check format and compression parameters: empty file, code: 1001 } {
: Error: couldn't open json file, check format and compression parameters: empty file, code: 1001 } {
: Error: couldn't open json file, check format and compression parameters: empty file, code: 1001 } 2025-05-07T08:46:03.327197Z 1 00h00m00.000000s :OBJECT_STORAGE_INFERENCINATOR DEBUG: TArrowInferencinator: [1:6:6]. HandleFileError: {
: Error: couldn't run arrow json chunker for /path/is/neither/real: Invalid: straddling object straddles two block boundaries (try to increase block size?), code: 1001 } {
: Error: couldn't run arrow json chunker for /path/is/neither/real: Invalid: straddling object straddles two block boundaries (try to increase block size?), code: 1001 } {
: Error: couldn't open json file, check format and compression parameters: Invalid: JSON parse error: Invalid value. in row 0, code: 1001 } >> Mvp::OpenIdConnectProxyOnHttpsHost [GOOD] >> Mvp::OpenIdConnectFixLocationHeader >> TArrowPushDown::SimplePushDown [GOOD] >> TArrowPushDown::FilterEverything [GOOD] >> TArrowPushDown::MatchSeveralRowGroups [GOOD] >> Mvp::OpenIdConnectFixLocationHeader [GOOD] >> Mvp::OpenIdConnectExchangeNebius >> PushdownTest::NoFilter >> Mvp::OpenIdConnectExchangeNebius [GOOD] >> Mvp::OpenIdConnectSessionServiceCheckAuthorizationFail >> PushdownTest::NoFilter [GOOD] >> TBlobStorageHullCompactDeferredQueueTest::Basic [GOOD] >> Mvp::OpenIdConnectSessionServiceCheckAuthorizationFail [GOOD] >> Mvp::OpenIdConnectFullAuthorizationFlow >> PushdownTest::Equal [GOOD] ------- [TS] {asan, default-linux-x86_64, release} ydb/mvp/core/ut/unittest >> Mvp::TokenatorRefreshMetadataTokenGood [GOOD] Test command err: 2025-05-07T08:45:58.913602Z :MVP DEBUG: mvp_tokens.cpp:77: Refreshing token metadataTokenName 2025-05-07T08:45:58.913878Z :MVP DEBUG: mvp_tokens.cpp:217: Updating metadata token 2025-05-07T08:45:58.999280Z :MVP DEBUG: mvp_tokens.cpp:77: Refreshing token metadataTokenName 2025-05-07T08:45:58.999602Z :MVP DEBUG: mvp_tokens.cpp:217: Updating metadata token 2025-05-07T08:46:04.002170Z :MVP DEBUG: mvp_tokens.cpp:77: Refreshing token metadataTokenName 2025-05-07T08:46:04.002481Z :MVP DEBUG: mvp_tokens.cpp:217: Updating metadata token >> RuntimeFeatureFlags::DefaultValues >> Mvp::OpenIdConnectFullAuthorizationFlow [GOOD] >> Mvp::OpenIdConnectFullAuthorizationFlowAjax >> RuntimeFeatureFlags::DefaultValues [GOOD] >> RuntimeFeatureFlags::ConversionToProto [GOOD] >> RuntimeFeatureFlags::ConversionFromProto [GOOD] >> RuntimeFeatureFlags::UpdatingRuntimeFlags [GOOD] >> PushdownTest::NotEqualInt32Int64 [GOOD] >> Mvp::OpenIdConnectFullAuthorizationFlowAjax [GOOD] >> Mvp::OpenIdConnectWrongStateAuthorizationFlow >> TVDiskDefrag::HugeHeapDefragmentationRequired [GOOD] >> PushdownTest::TrueCoalesce [GOOD] >> Mvp::OpenIdConnectWrongStateAuthorizationFlow [GOOD] >> Mvp::OpenIdConnectWrongStateAuthorizationFlowAjax >> TPDiskTest::TestLogWriteReadWithRestarts [GOOD] >> TPDiskTest::TestLogSpliceNonceJump |87.8%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/s3/actors/ut/unittest >> TArrowPushDown::MatchSeveralRowGroups [GOOD] >> GroupStress::Test [GOOD] >> PushdownTest::CmpInt16AndInt32 [GOOD] >> AuthConfigValidation::AcceptValidPasswordComplexity [GOOD] >> AuthConfigValidation::CannotAcceptInvalidPasswordComplexity [GOOD] >> AuthConfigValidation::AcceptValidAccountLockoutConfig [GOOD] >> AuthConfigValidation::CannotAcceptInvalidAccountLockoutConfig [GOOD] >> Mvp::OpenIdConnectWrongStateAuthorizationFlowAjax [GOOD] >> Mvp::OpenIdConnectSessionServiceCreateAuthorizationFail >> PushdownTest::PartialAnd >> Mvp::OpenIdConnectSessionServiceCreateAuthorizationFail [GOOD] >> PushdownTest::PartialAnd [GOOD] >> Mvp::OpenIdConnectSessionServiceCreateAccessTokenInvalid >> PushdownTest::PartialAndOneBranchPushdownable >> Mvp::OpenIdConnectSessionServiceCreateAccessTokenInvalid [GOOD] >> Mvp::OpenIdConnectSessionServiceCreateAccessTokenInvalidAjax |87.8%| [TS] {asan, default-linux-x86_64, release} ydb/core/base/generated/ut/unittest >> RuntimeFeatureFlags::UpdatingRuntimeFlags [GOOD] >> test.py::test[solomon-BadDownsamplingAggregation-] >> PushdownTest::PartialAndOneBranchPushdownable [GOOD] >> PushdownTest::NotNull [GOOD] >> Mvp::OpenIdConnectSessionServiceCreateAccessTokenInvalidAjax [GOOD] >> Mvp::OpenIdConnectSessionServiceCreateOpenIdScopeMissed [GOOD] >> Mvp::OpenIdConnectAllowedHostsList >> TCowBTreeTest::RandomInsertThreadSafe [GOOD] >> TCowBTreeTest::SnapshotCascade [GOOD] >> TCowBTreeTest::SnapshotRollback >> PushdownTest::NotNullForDatetime |87.8%| [TS] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/defrag/ut/unittest >> TVDiskDefrag::HugeHeapDefragmentationRequired [GOOD] >> TSubgroupPartLayoutTest::CountEffectiveReplicas3of4 [GOOD] >> TSubgroupPartLayoutTest::CountEffectiveReplicas4of4 >> Mvp::OpenIdConnectAllowedHostsList [GOOD] >> Mvp::OpenIdConnectHandleNullResponseFromProtectedResource [GOOD] >> Mvp::OpenIdConnectSessionServiceCreateNotFoundCookie >> PushdownTest::NotNullForDatetime [GOOD] >> ParseStats::ParseWithSources >> TBlobStorageHullFresh::AppendixPerf [GOOD] >> TBlobStorageHullFresh::AppendixPerf_Tune >> Mvp::OpenIdConnectSessionServiceCreateNotFoundCookie [GOOD] >> Mvp::OpenIdConnectSessionServiceCreateGetWrongStateAndWrongCookie [GOOD] >> Mvp::OidcImpersonationStartFlow >> PushdownTest::IsNull [GOOD] >> TCowBTreeTest::MultipleSnapshotsWithGc [GOOD] >> TCowBTreeTest::MultipleSnapshotsWithClear >> ParseStats::ParseWithSources [GOOD] >> ParseStats::ParseJustOutput [GOOD] >> ParseStats::ParseMultipleGraphsV1 [GOOD] >> ParseStats::ParseMultipleGraphsV2 [GOOD] >> Mvp::OidcImpersonationStartFlow [GOOD] >> Mvp::OidcImpersonationStartNeedServiceAccountId [GOOD] >> Mvp::OidcImpersonationStopFlow [GOOD] |87.8%| [TS] {asan, default-linux-x86_64, release} ydb/core/config/validation/auth_config_validator_ut/unittest >> AuthConfigValidation::CannotAcceptInvalidAccountLockoutConfig [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hullop/ut/unittest >> TBlobStorageHullCompactDeferredQueueTest::Basic [GOOD] Test command err: STEP 1 STEP 2 StringToId# 63 numItems# 110271 |87.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_group/unittest >> GroupStress::Test [GOOD] >> PushdownTest::StringFieldsNotSupported [GOOD] >> Mvp::OidcImpersonatedAccessToProtectedResource [GOOD] >> Mvp::OidcImpersonatedAccessNotAuthorized [GOOD] >> PushdownTest::StringFieldsNotSupported2 [GOOD] |87.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/persqueue_ut.cpp |87.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stability/tool/tool |87.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stability/tool/tool >> MetaCache::BasicForwarding |87.8%| [TS] {RESULT} ydb/library/yql/providers/s3/common/ut/unittest >> TPDiskTest::TestLogSpliceNonceJump [GOOD] >> TPDiskTest::TestMultipleLogSpliceNonceJump >> MetaCache::BasicForwarding [GOOD] >> MetaCache::TimeoutFallback [GOOD] |87.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/control_plane_storage/internal/ut/unittest >> ParseStats::ParseMultipleGraphsV2 [GOOD] |87.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/persqueue_new_schemecache_ut.cpp |87.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/persqueue_ut.cpp ------- [TS] {asan, default-linux-x86_64, release} ydb/mvp/oidc_proxy/ut/unittest >> Mvp::OidcImpersonatedAccessNotAuthorized [GOOD] Test command err: 2025-05-07T08:46:03.981519Z :MVP DEBUG: oidc_protected_page.cpp:83: Forward user request bypass OIDC 2025-05-07T08:46:03.982161Z :MVP DEBUG: oidc_protected_page.cpp:37: Incoming response for protected resource: 200 2025-05-07T08:46:04.001865Z :MVP DEBUG: oidc_protected_page.cpp:83: Forward user request bypass OIDC 2025-05-07T08:46:04.002275Z :MVP DEBUG: oidc_protected_page.cpp:37: Incoming response for protected resource: 200 2025-05-07T08:46:04.009737Z :MVP DEBUG: oidc_protected_page.cpp:83: Forward user request bypass OIDC 2025-05-07T08:46:04.010038Z :MVP DEBUG: oidc_protected_page.cpp:37: Incoming response for protected resource: 204 2025-05-07T08:46:04.042555Z :MVP DEBUG: oidc_protected_page.cpp:83: Forward user request bypass OIDC 2025-05-07T08:46:04.042924Z :MVP DEBUG: oidc_protected_page.cpp:37: Incoming response for protected resource: 204 2025-05-07T08:46:04.115248Z :MVP DEBUG: oidc_protected_page.cpp:83: Forward user request bypass OIDC 2025-05-07T08:46:04.122154Z :MVP DEBUG: oidc_protected_page.cpp:37: Incoming response for protected resource: 204 2025-05-07T08:46:04.204606Z :MVP DEBUG: oidc_protected_page.cpp:83: Forward user request bypass OIDC 2025-05-07T08:46:04.205050Z :MVP DEBUG: oidc_protected_page.cpp:37: Incoming response for protected resource: 204 2025-05-07T08:46:04.462065Z :MVP DEBUG: oidc_protected_page_yandex.cpp:25: SessionService.Check(): OK 2025-05-07T08:46:04.462168Z :MVP DEBUG: oidc_protected_page.cpp:83: Forward user request bypass OIDC 2025-05-07T08:46:04.462492Z :MVP DEBUG: oidc_protected_page.cpp:37: Incoming response for protected resource: 400 2025-05-07T08:46:04.462549Z :MVP DEBUG: oidc_protected_page.cpp:178: Try to send request to HTTPS port 2025-05-07T08:46:04.462590Z :MVP DEBUG: oidc_protected_page.cpp:83: Forward user request bypass OIDC 2025-05-07T08:46:04.462803Z :MVP DEBUG: oidc_protected_page.cpp:37: Incoming response for protected resource: 200 2025-05-07T08:46:04.475021Z :MVP DEBUG: oidc_protected_page_yandex.cpp:25: SessionService.Check(): OK 2025-05-07T08:46:04.475098Z :MVP DEBUG: oidc_protected_page.cpp:83: Forward user request bypass OIDC 2025-05-07T08:46:04.475346Z :MVP DEBUG: oidc_protected_page.cpp:37: Incoming response for protected resource: 400 2025-05-07T08:46:04.660583Z :MVP DEBUG: oidc_protected_page_yandex.cpp:25: SessionService.Check(): OK 2025-05-07T08:46:04.660685Z :MVP DEBUG: oidc_protected_page.cpp:83: Forward user request bypass OIDC 2025-05-07T08:46:04.660960Z :MVP DEBUG: oidc_protected_page.cpp:37: Incoming response for protected resource: 307 2025-05-07T08:46:04.676929Z :MVP DEBUG: oidc_protected_page_yandex.cpp:25: SessionService.Check(): OK 2025-05-07T08:46:04.677028Z :MVP DEBUG: oidc_protected_page.cpp:83: Forward user request bypass OIDC 2025-05-07T08:46:04.677278Z :MVP DEBUG: oidc_protected_page.cpp:37: Incoming response for protected resource: 302 2025-05-07T08:46:04.693372Z :MVP DEBUG: oidc_protected_page_yandex.cpp:25: SessionService.Check(): OK 2025-05-07T08:46:04.693447Z :MVP DEBUG: oidc_protected_page.cpp:83: Forward user request bypass OIDC 2025-05-07T08:46:04.693667Z :MVP DEBUG: oidc_protected_page.cpp:37: Incoming response for protected resource: 302 2025-05-07T08:46:04.711180Z :MVP DEBUG: oidc_protected_page_yandex.cpp:25: SessionService.Check(): OK 2025-05-07T08:46:04.711264Z :MVP DEBUG: oidc_protected_page.cpp:83: Forward user request bypass OIDC 2025-05-07T08:46:04.711503Z :MVP DEBUG: oidc_protected_page.cpp:37: Incoming response for protected resource: 302 2025-05-07T08:46:04.727421Z :MVP DEBUG: oidc_protected_page_yandex.cpp:25: SessionService.Check(): OK 2025-05-07T08:46:04.727504Z :MVP DEBUG: oidc_protected_page.cpp:83: Forward user request bypass OIDC 2025-05-07T08:46:04.727732Z :MVP DEBUG: oidc_protected_page.cpp:37: Incoming response for protected resource: 302 2025-05-07T08:46:04.808771Z :MVP DEBUG: oidc_protected_page_nebius.cpp:21: Start OIDC process 2025-05-07T08:46:04.809366Z :MVP DEBUG: openid_connect.cpp:256: Using cookie (__Host_session_cookie_79632E6F617574682E7964622D766965776572: c2Vz****aWU= (CE0CB168)) 2025-05-07T08:46:04.809430Z :MVP DEBUG: oidc_protected_page_nebius.cpp:93: Exchange session token 2025-05-07T08:46:04.809805Z :MVP DEBUG: oidc_protected_page_nebius.cpp:50: Getting access token: 200 OK 2025-05-07T08:46:04.809879Z :MVP DEBUG: oidc_protected_page.cpp:83: Forward user request bypass OIDC 2025-05-07T08:46:04.810052Z :MVP DEBUG: oidc_protected_page.cpp:37: Incoming response for protected resource: 200 2025-05-07T08:46:05.158094Z :MVP DEBUG: oidc_protected_page_yandex.cpp:33: SessionService.Check(): 401 2025-05-07T08:46:05.428944Z :MVP DEBUG: oidc_protected_page_yandex.cpp:33: SessionService.Check(): 400 2025-05-07T08:46:05.429730Z :MVP DEBUG: oidc_session_create.cpp:21: Restore oidc session 2025-05-07T08:46:05.430287Z :MVP DEBUG: oidc_session_create.cpp:71: Incoming response from authorization server: 200 2025-05-07T08:46:05.444343Z :MVP DEBUG: oidc_session_create_yandex.cpp:69: SessionService.Create(): OK 2025-05-07T08:46:05.458067Z :MVP DEBUG: oidc_protected_page_yandex.cpp:25: SessionService.Check(): OK 2025-05-07T08:46:05.458169Z :MVP DEBUG: oidc_protected_page.cpp:83: Forward user request bypass OIDC 2025-05-07T08:46:05.458436Z :MVP DEBUG: oidc_protected_page.cpp:37: Incoming response for protected resource: 200 2025-05-07T08:46:05.637795Z :MVP DEBUG: oidc_protected_page_yandex.cpp:33: SessionService.Check(): 400 2025-05-07T08:46:05.638793Z :MVP DEBUG: oidc_session_create.cpp:21: Restore oidc session 2025-05-07T08:46:05.639465Z :MVP DEBUG: oidc_session_create.cpp:71: Incoming response from authorization server: 200 2025-05-07T08:46:05.656160Z :MVP DEBUG: oidc_session_create_yandex.cpp:69: SessionService.Create(): OK 2025-05-07T08:46:05.662113Z :MVP DEBUG: oidc_protected_page_yandex.cpp:25: SessionService.Check(): OK 2025-05-07T08:46:05.662203Z :MVP DEBUG: oidc_protected_page.cpp:83: Forward user request bypass OIDC 2025-05-07T08:46:05.662453Z :MVP DEBUG: oidc_protected_page.cpp:37: Incoming response for protected resource: 200 2025-05-07T08:46:05.780578Z :MVP DEBUG: oidc_session_create.cpp:21: Restore oidc session 2025-05-07T08:46:05.780828Z :MVP DEBUG: oidc_session_create.cpp:51: Check state failed: Calculated digest is not equal expected digest 2025-05-07T08:46:05.938093Z :MVP DEBUG: oidc_session_create.cpp:21: Restore oidc session 2025-05-07T08:46:05.938320Z :MVP DEBUG: oidc_session_create.cpp:51: Check state failed: Calculated digest is not equal expected digest 2025-05-07T08:46:06.218627Z :MVP DEBUG: oidc_session_create.cpp:21: Restore oidc session 2025-05-07T08:46:06.219377Z :MVP DEBUG: oidc_session_create.cpp:71: Incoming response from authorization server: 200 2025-05-07T08:46:06.284077Z :MVP DEBUG: oidc_session_create_yandex.cpp:79: SessionService.Create(): 401 2025-05-07T08:46:06.360977Z :MVP DEBUG: oidc_session_create.cpp:21: Restore oidc session 2025-05-07T08:46:06.361643Z :MVP DEBUG: oidc_session_create.cpp:71: Incoming response from authorization server: 200 2025-05-07T08:46:06.494124Z :MVP DEBUG: oidc_session_create_yandex.cpp:79: SessionService.Create(): 400 2025-05-07T08:46:06.630214Z :MVP DEBUG: oidc_session_create.cpp:21: Restore oidc session 2025-05-07T08:46:06.646593Z :MVP DEBUG: oidc_session_create.cpp:71: Incoming response from authorization server: 200 2025-05-07T08:46:06.692183Z :MVP DEBUG: oidc_session_create_yandex.cpp:79: SessionService.Create(): 400 2025-05-07T08:46:06.754236Z :MVP DEBUG: oidc_session_create.cpp:21: Restore oidc session 2025-05-07T08:46:06.754985Z :MVP DEBUG: oidc_session_create.cpp:71: Incoming response from authorization server: 200 2025-05-07T08:46:06.760368Z :MVP DEBUG: oidc_session_create_yandex.cpp:79: SessionService.Create(): 412 2025-05-07T08:46:06.817696Z :MVP DEBUG: oidc_protected_page_yandex.cpp:33: SessionService.Check(): 400 2025-05-07T08:46:06.829775Z :MVP DEBUG: oidc_protected_page_yandex.cpp:33: SessionService.Check(): 400 2025-05-07T08:46:06.837981Z :MVP DEBUG: oidc_protected_page_yandex.cpp:33: SessionService.Check(): 400 2025-05-07T08:46:06.857409Z :MVP DEBUG: oidc_protected_page.cpp:83: Forward user request bypass OIDC 2025-05-07T08:46:06.857737Z :MVP DEBUG: oidc_protected_page.cpp:51: Can not process request to protected resource: GET /counters HTTP/1.1 Host: ydb.viewer.page Accept: */* Accept-Encoding: deflate Authorization: 2025-05-07T08:46:06.898859Z :MVP DEBUG: oidc_session_create.cpp:21: Restore oidc session 2025-05-07T08:46:06.899077Z :MVP DEBUG: oidc_session_create.cpp:43: Restore oidc context failed: Cannot find cookie ydb_oidc_cookie 2025-05-07T08:46:06.937765Z :MVP DEBUG: oidc_session_create.cpp:21: Restore oidc session 2025-05-07T08:46:06.938016Z :MVP DEBUG: oidc_session_create.cpp:51: Check state failed: Calculated digest is not equal expected digest 2025-05-07T08:46:06.969158Z :MVP DEBUG: oidc_impersonate_start_page_nebius.cpp:23: Start impersonation process 2025-05-07T08:46:06.969301Z :MVP DEBUG: openid_connect.cpp:256: Using cookie (__Host_session_cookie_636C69656E745F6964: c2Vz****aWU= (CE0CB168)) 2025-05-07T08:46:06.969341Z :MVP DEBUG: oidc_impersonate_start_page_nebius.cpp:49: Request impersonated token 2025-05-07T08:46:06.969671Z :MVP DEBUG: oidc_impersonate_start_page_nebius.cpp:100: Incoming response from authorization server: 200 2025-05-07T08:46:06.969783Z :MVP DEBUG: oidc_impersonate_start_page_nebius.cpp:89: Set impersonated cookie: (__Host_impersonated_cookie_636C69656E745F6964: aW1w****bg== (B126DD61)) 2025-05-07T08:46:07.004371Z :MVP DEBUG: oidc_impersonate_start_page_nebius.cpp:23: Start impersonation process 2025-05-07T08:46:07.004452Z :MVP DEBUG: openid_connect.cpp:256: Using cookie (__Host_session_cookie_636C69656E745F6964: c2Vz****aWU= (CE0CB168)) 2025-05-07T08:46:07.044574Z :MVP DEBUG: oidc_cleanup_page.cpp:20: Clear cookie: (__Host_impersonated_cookie_636C69656E745F6964) 2025-05-07T08:46:07.101185Z :MVP DEBUG: oidc_protected_page_nebius.cpp:21: Start OIDC process 2025-05-07T08:46:07.101298Z :MVP DEBUG: openid_connect.cpp:256: Using cookie (__Host_session_cookie_636C69656E745F6964: c2Vz****aWU= (CE0CB168)) 2025-05-07T08:46:07.101354Z :MVP DEBUG: openid_connect.cpp:256: Using cookie (__Host_impersonated_cookie_636C69656E745F6964: aW1w****ZQ== (1A20D8C0)) 2025-05-07T08:46:07.101392Z :MVP DEBUG: oidc_protected_page_nebius.cpp:104: Exchange impersonated token 2025-05-07T08:46:07.101670Z :MVP DEBUG: oidc_protected_page_nebius.cpp:50: Getting access token: 200 OK 2025-05-07T08:46:07.101736Z :MVP DEBUG: oidc_protected_page.cpp:83: Forward user request bypass OIDC 2025-05-07T08:46:07.101887Z :MVP DEBUG: oidc_protected_page.cpp:37: Incoming response for protected resource: 200 2025-05-07T08:46:07.130779Z :MVP DEBUG: oidc_protected_page_nebius.cpp:21: Start OIDC process 2025-05-07T08:46:07.130880Z :MVP DEBUG: openid_connect.cpp:256: Using cookie (__Host_session_cookie_636C69656E745F6964: c2Vz****aWU= (CE0CB168)) 2025-05-07T08:46:07.130961Z :MVP DEBUG: openid_connect.cpp:256: Using cookie (__Host_impersonated_cookie_636C69656E745F6964: aW1w****ZQ== (1A20D8C0)) 2025-05-07T08:46:07.131000Z :MVP DEBUG: oidc_protected_page_nebius.cpp:104: Exchange impersonated token 2025-05-07T08:46:07.131506Z :MVP DEBUG: oidc_protected_page_nebius.cpp:50: Getting access token: 401 OK 2025-05-07T08:46:07.131554Z :MVP DEBUG: oidc_protected_page_nebius.cpp:62: Getting access token: {"error": "bad_token"} 2025-05-07T08:46:07.131605Z :MVP DEBUG: oidc_protected_page_nebius.cpp:118: Clear impersonated cookie (__Host_impersonated_cookie_636C69656E745F6964) and retry |87.8%| [TS] {RESULT} ydb/core/external_sources/object_storage/inference/ut/gtest |87.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/persqueue_new_schemecache_ut.cpp ------- [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/generic/provider/ut/pushdown/unittest >> PushdownTest::StringFieldsNotSupported2 [GOOD] Test command err: Initial program: ( (let $data_source (DataSource '"generic" '"test_cluster")) (let $empty_lambda (lambda '($arg) (Bool '"true"))) (let $table (MrTableConcat (Key '('table (String '"test_table")))) ) (let $read (Read! world $data_source $table)) (let $map_lambda (lambda '($row) (OptionalIf (Bool '"true") $row ) )) (let $filtered_data (FlatMap (Right! $read) $map_lambda)) (let $resulte_data_sink (DataSink '"result")) (let $result (ResWrite! (Left! $read) $resulte_data_sink (Key) $filtered_data '('('type)))) (return (Commit! $result $resulte_data_sink)) ) 2025-05-07 08:46:04.927 DEBUG yql-providers-generic-provider-ut-pushdown(pid=90651, tid=0x00007FBDF53A8F00) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (Read! world (DataSource '"generic" '"test_cluster") (MrTableConcat (Key '('table (String '"test_table")))))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($4) (OptionalIf (Bool '"true") $4))) '('('type)))) (return (Commit! $3 $2)) ) 2025-05-07 08:46:04.929 DEBUG yql-providers-generic-provider-ut-pushdown(pid=90651, tid=0x00007FBDF53A8F00) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (Read! world (DataSource '"generic" '"test_cluster") (MrTableConcat (Key '('table (String '"test_table")))))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($4) (OptionalIf (Bool '"true") $4))) '('('type)))) (return (Commit! $3 $2)) ) 2025-05-07 08:46:04.929 DEBUG yql-providers-generic-provider-ut-pushdown(pid=90651, tid=0x00007FBDF53A8F00) [generic] yql_generic_io_discovery.cpp:55: discovered cluster name: test_cluster 2025-05-07 08:46:04.929 INFO yql-providers-generic-provider-ut-pushdown(pid=90651, tid=0x00007FBDF53A8F00) [generic] yql_generic_load_meta.cpp:91: Loading table meta for: `test_cluster`.`test_table` 2025-05-07 08:46:04.932 DEBUG yql-providers-generic-provider-ut-pushdown(pid=90651, tid=0x00007FBDF53A8F00) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($5) (OptionalIf (Bool '"true") $5))) '('('type)))) (return (Commit! $3 $2)) ) 2025-05-07 08:46:04.944 DEBUG yql-providers-generic-provider-ut-pushdown(pid=90651, tid=0x00007FBDF53A8F00) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($5) (OptionalIf (Bool '"true") $5))) '('('type)))) (return (Commit! $3 $2)) ) 2025-05-07 08:46:04.945 DEBUG yql-providers-generic-provider-ut-pushdown(pid=90651, tid=0x00007FBDF53A8F00) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($5) (OptionalIf (Bool '"true") $5))) '('('type)))) (return (Commit! $3 $2)) ) 2025-05-07 08:46:04.946 DEBUG yql-providers-generic-provider-ut-pushdown(pid=90651, tid=0x00007FBDF53A8F00) [core] yql_out_transformers.cpp:62: Expr to optimize: ( (let $1 (Bool '"true")) (let $2 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($5) $1))) (let $3 (DataSink '"result")) (let $4 (ResWrite! (Left! $2) $3 (Key) (FlatMap (Right! $2) (lambda '($6) (OptionalIf $1 $6))) '('('type)))) (return (Commit! $4 $3)) ) 2025-05-07 08:46:04.947 DEBUG yql-providers-generic-provider-ut-pushdown(pid=90651, tid=0x00007FBDF53A8F00) [core] yql_co_simple1.cpp:986: OptionalIf over Bool 'true 2025-05-07 08:46:04.947 DEBUG yql-providers-generic-provider-ut-pushdown(pid=90651, tid=0x00007FBDF53A8F00) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($5) (Just $5))) '('('type)))) (return (Commit! $3 $2)) ) 2025-05-07 08:46:04.948 DEBUG yql-providers-generic-provider-ut-pushdown(pid=90651, tid=0x00007FBDF53A8F00) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($5) (Just $5))) '('('type)))) (return (Commit! $3 $2)) ) 2025-05-07 08:46:04.948 DEBUG yql-providers-generic-provider-ut-pushdown(pid=90651, tid=0x00007FBDF53A8F00) [core] yql_out_transformers.cpp:62: Expr to optimize: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($5) (Just $5))) '('('type)))) (return (Commit! $3 $2)) ) 2025-05-07 08:46:04.948 DEBUG yql-providers-generic-provider-ut-pushdown(pid=90651, tid=0x00007FBDF53A8F00) [core] yql_co_simple1.cpp:2040: FlatMap with Just 2025-05-07 08:46:04.949 DEBUG yql-providers-generic-provider-ut-pushdown(pid=90651, tid=0x00007FBDF53A8F00) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (Right! $1) '('('type)))) (return (Commit! $3 $2)) ) 2025-05-07 08:46:04.949 DEBUG yql-providers-generic-provider-ut-pushdown(pid=90651, tid=0x00007FBDF53A8F00) [core] yql_out_transformers.cpp:62: Expr to optimize: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (Right! $1) '('('type)))) (return (Commit! $3 $2)) ) 2025-05-07 08:46:04.951 INFO yql-providers-generic-provider-ut-pushdown(pid=90651, tid=0x00007FBDF53A8F00) [generic] yql_optimize.cpp:135: PhysicalOptimizer-TrimReadWorld 2025-05-07 08:46:04.951 DEBUG yql-providers-generic-provider-ut-pushdown(pid=90651, tid=0x00007FBDF53A8F00) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (DataSink '"result")) (let $2 (ResWrite! world $1 (Key) (Right! (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($3) (Bool '"true")))) '('('type)))) (return (Commit! $2 $1)) ) 2025-05-07 08:46:04.951 DEBUG yql-providers-generic-provider-ut-pushdown(pid=90651, tid=0x00007FBDF53A8F00) [core] yql_out_transformers.cpp:62: Expr to optimize: ( (let $1 (DataSink '"result")) (let $2 (ResWrite! world $1 (Key) (Right! (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($3) (Bool '"true")))) '('('type)))) (return (Commit! $2 $1)) ) 2025-05-07 08:46:04.952 INFO yql-providers-generic-provider-ut-pushdown(pid=90651, tid=0x00007FBDF53A8F00) [RESULT] yql_result_provider.cpp:773: ResPull 2025-05-07 08:46:04.952 DEBUG yql-providers-generic-provider-ut-pushdown(pid=90651, tid=0x00007FBDF53A8F00) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (DataSink '"result")) (let $2 (ResPull! world $1 (Key) (Right! (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($3) (Bool '"true")))) '('('type)) '"generic")) (return (Commit! $2 $1)) ) 2025-05-07 08:46:04.953 DEBUG yql-providers-generic-provider-ut-pushdown(pid=90651, tid=0x00007FBDF53A8F00) [core] yql_out_transformers.cpp:62: Expr to optimize: ( (let $1 (DataSink '"result")) (let $2 (ResPull! world $1 (Key) (Right! (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($3) (Bool '"true")))) '('('type)) '"generic")) (return (Commit! $2 $1)) ) 2025-05-07 08:46:04.954 DEBUG yql-providers-generic-provider-ut-pushdown(pid=90651, tid=0x00007FBDF53A8F00) [core] yql_out_transformers.cpp:62: Optimized expr: ( (let $1 (DataSink '"result")) (let $2 (ResPull! world $1 (Key) (Right! (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($3) (Bool '"true")))) '('('type)) '"generic")) (return (Commit! $2 $1)) ) 2025-05-07 08:46:04.955 INFO yql-providers-generic-provider-ut-pushdown(pid=90651, tid=0x00007FBDF53A8F00) [generic] yql_generic_dq_integration.cpp:191: Filling source settings: cluster: test_cluster, table: test_table, endpoint: host: "host" port: 42 2025-05-07 08:46:04.964 INFO yql-providers-generic-provider-ut-pushdown(pid=90651, tid=0x00007FBDF53A8F00) [generic] yql_optimize.cpp:135: BuildGenericDqSourceSettings 2025-05-07 08:46:04.967 DEBUG yql-providers-generic-provider-ut-pushdown(pid=90651, tid=0x00007FBDF53A8F00) [core] yql_out_transformers.cpp:62: Built settings: ( (let $1 (DataSink '"result")) (let $2 '('"col_bool" '"col_date" '"col_datetime" '"col_double" '"col_dynumber" '"col_float" '"col_int16" '"col_int32" '"col_int64" '"col_int8" '"col_interval" '"col_json" '"col_json_document" '"col_optional_bool" '"col_optional_date" '"col_optional_datetime" '"col_optional_double" '"col_optional_dynumber" '"col_optional_float" '"col_optional_int16" '"col_optional_int32" '"col_optional_int64" '"col_optional_int8" '"col_optional_interval" '"col_optional_json" '"col_optional_json_document" '"col_optional_string" '"col_optional_timestamp" '"col_optional_tz_date" '"col_optional_tz_datetime" '"col_optional_tz_timestamp" '"col_optional_uint16" '"col_optional_uint32" '"col_optional_uint64" '"col_optional_uint8" '"col_optional_utf8" '"col_optional_uuid" '"col_optional_yson" '"col_string" '"col_timestamp" '"col_tz_date" '"col_tz_datetime" '"col_tz_timestamp" '"col_uint16" '"col_uint32" '"col_uint64" '"col_uint8" '"col_utf8" '"col_uuid" '"col_yson")) (let $3 (GenSourceSettings world '"test_cluster" '"test_table" (SecureParam '"cluster:default_test_cluster") $2 (lambda '($32) (Bool '"true")))) (let $4 (DataType 'Bool)) (let $5 (DataType 'Date)) (let $6 (DataType 'Datetime)) (let $7 (DataType 'Double)) (let $8 (DataType 'DyNumber)) (let $9 (DataType 'Float)) (let $10 (DataType 'Int16)) (let $11 (DataType 'Int32)) (let $12 (DataType 'Int64)) (let $13 (DataType 'Int8)) (let $14 (DataType 'Interval)) (let $15 (DataType 'Json)) (let $16 (DataType 'JsonDocument)) (let $17 (DataType 'String)) (let $18 (DataType 'Timestamp)) (let $19 (DataType 'TzDate)) (let $20 (DataType 'TzDatetime)) (let $21 (DataType 'TzTimestamp)) (let $22 (DataType 'Uint16)) (let $23 (DataType 'Uint32)) (let $24 (DataType 'Uint64)) (let $25 (DataType 'Uint8)) (let $26 (DataType 'Utf8)) (let $27 (DataType 'Uuid)) (let $28 (DataType 'Yson)) (let $29 (StructType '('"col_bool" $4) '('"col_date" $5) '('"col_datetime" $6) '('"col_double" $7) '('"col_dynumber" $8) '('"col_float" $9) '('"col_int16" $10) '('"col_int32" $11) '('"col_int64" $12) '('"col_int8" $13) '('"col_interval" $14) '('"col_json" $15) '('"col_json_document" $16) '('"col_optional_bool" (OptionalType $4)) '('"col_optional_date" (OptionalType $5)) '('"col_optional_datetime" (OptionalType $6)) '('"col_optional_double" (OptionalType $7)) '('"col_optional_dynumber" (OptionalType $8)) '('"col_optional_float" (OptionalType $9)) '('"col_optional_int16" (OptionalType $10)) '('"col_optional_int32" (OptionalType $11)) '('"col_optional_int64" (OptionalType $12)) '('"col_optional_int8" (OptionalType $13)) '('"col_optional_interval" (OptionalType $14)) '('"col_optional_json" (OptionalType $15)) '('"col_optional_json_document" (OptionalType $16)) '('"col_optional_string" (OptionalType $17)) '('"col_optional_timestamp" (OptionalType $18)) '('"col_optional_tz_date" (OptionalT ... right_value { column: "col_optional_utf8" } } } 2025-05-07 08:46:07.139 INFO yql-providers-generic-provider-ut-pushdown(pid=90651, tid=0x00007FBDF53A8F00) [generic] yql_generic_settings.cpp:38: GenericConfiguration::AddCluster: name = test_cluster, kind = POSTGRESQL, database name = database, database id = , endpoint = { host: "host" port: 42 }, use tls = 0, protocol = NATIVE Initial program: ( (let $data_source (DataSource '"generic" '"test_cluster")) (let $empty_lambda (lambda '($arg) (Bool '"true"))) (let $table (MrTableConcat (Key '('table (String '"test_table")))) ) (let $read (Read! world $data_source $table)) (let $map_lambda (lambda '($row) (OptionalIf (!= (Member $row '"col_string") (String '"value") ) $row ) )) (let $filtered_data (FlatMap (Right! $read) $map_lambda)) (let $resulte_data_sink (DataSink '"result")) (let $result (ResWrite! (Left! $read) $resulte_data_sink (Key) $filtered_data '('('type)))) (return (Commit! $result $resulte_data_sink)) ) 2025-05-07 08:46:07.141 DEBUG yql-providers-generic-provider-ut-pushdown(pid=90651, tid=0x00007FBDF53A8F00) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (Read! world (DataSource '"generic" '"test_cluster") (MrTableConcat (Key '('table (String '"test_table")))))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($4) (OptionalIf (!= (Member $4 '"col_string") (String '"value")) $4))) '('('type)))) (return (Commit! $3 $2)) ) 2025-05-07 08:46:07.143 DEBUG yql-providers-generic-provider-ut-pushdown(pid=90651, tid=0x00007FBDF53A8F00) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (Read! world (DataSource '"generic" '"test_cluster") (MrTableConcat (Key '('table (String '"test_table")))))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($4) (OptionalIf (!= (Member $4 '"col_string") (String '"value")) $4))) '('('type)))) (return (Commit! $3 $2)) ) 2025-05-07 08:46:07.144 DEBUG yql-providers-generic-provider-ut-pushdown(pid=90651, tid=0x00007FBDF53A8F00) [generic] yql_generic_io_discovery.cpp:55: discovered cluster name: test_cluster 2025-05-07 08:46:07.144 INFO yql-providers-generic-provider-ut-pushdown(pid=90651, tid=0x00007FBDF53A8F00) [generic] yql_generic_load_meta.cpp:91: Loading table meta for: `test_cluster`.`test_table` 2025-05-07 08:46:07.146 DEBUG yql-providers-generic-provider-ut-pushdown(pid=90651, tid=0x00007FBDF53A8F00) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($5) (OptionalIf (!= (Member $5 '"col_string") (String '"value")) $5))) '('('type)))) (return (Commit! $3 $2)) ) 2025-05-07 08:46:07.148 DEBUG yql-providers-generic-provider-ut-pushdown(pid=90651, tid=0x00007FBDF53A8F00) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($5) (OptionalIf (!= (Member $5 '"col_string") (String '"value")) $5))) '('('type)))) (return (Commit! $3 $2)) ) 2025-05-07 08:46:07.148 DEBUG yql-providers-generic-provider-ut-pushdown(pid=90651, tid=0x00007FBDF53A8F00) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($5) (OptionalIf (!= (Member $5 '"col_string") (String '"value")) $5))) '('('type)))) (return (Commit! $3 $2)) ) 2025-05-07 08:46:07.149 DEBUG yql-providers-generic-provider-ut-pushdown(pid=90651, tid=0x00007FBDF53A8F00) [core] yql_out_transformers.cpp:62: Expr to optimize: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($5) (OptionalIf (!= (Member $5 '"col_string") (String '"value")) $5))) '('('type)))) (return (Commit! $3 $2)) ) 2025-05-07 08:46:07.151 INFO yql-providers-generic-provider-ut-pushdown(pid=90651, tid=0x00007FBDF53A8F00) [generic] yql_optimize.cpp:135: PhysicalOptimizer-TrimReadWorld 2025-05-07 08:46:07.151 INFO yql-providers-generic-provider-ut-pushdown(pid=90651, tid=0x00007FBDF53A8F00) [default] physical_opt.cpp:76: Push filter lambda: ( (return (lambda '($1) (!= (Member $1 '"col_string") (String '"value")))) ) 2025-05-07 08:46:07.152 INFO yql-providers-generic-provider-ut-pushdown(pid=90651, tid=0x00007FBDF53A8F00) [generic] yql_optimize.cpp:135: PhysicalOptimizer-PushFilterToReadTable 2025-05-07 08:46:07.152 DEBUG yql-providers-generic-provider-ut-pushdown(pid=90651, tid=0x00007FBDF53A8F00) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (DataSink '"result")) (let $2 (String '"value")) (let $3 (ResWrite! world $1 (Key) (FlatMap (Right! (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (!= (Member $4 '"col_string") $2)))) (lambda '($5) (OptionalIf (!= (Member $5 '"col_string") $2) $5))) '('('type)))) (return (Commit! $3 $1)) ) 2025-05-07 08:46:07.153 DEBUG yql-providers-generic-provider-ut-pushdown(pid=90651, tid=0x00007FBDF53A8F00) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (DataSink '"result")) (let $2 (String '"value")) (let $3 (ResWrite! world $1 (Key) (FlatMap (Right! (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (!= (Member $4 '"col_string") $2)))) (lambda '($5) (OptionalIf (!= (Member $5 '"col_string") $2) $5))) '('('type)))) (return (Commit! $3 $1)) ) 2025-05-07 08:46:07.154 DEBUG yql-providers-generic-provider-ut-pushdown(pid=90651, tid=0x00007FBDF53A8F00) [core] yql_out_transformers.cpp:62: Expr to optimize: ( (let $1 (DataSink '"result")) (let $2 (String '"value")) (let $3 (ResWrite! world $1 (Key) (FlatMap (Right! (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (!= (Member $4 '"col_string") $2)))) (lambda '($5) (OptionalIf (!= (Member $5 '"col_string") $2) $5))) '('('type)))) (return (Commit! $3 $1)) ) 2025-05-07 08:46:07.155 TRACE yql-providers-generic-provider-ut-pushdown(pid=90651, tid=0x00007FBDF53A8F00) [generic] yql_generic_physical_opt.cpp:138: Push filter. Lambda is already not empty 2025-05-07 08:46:07.156 DEBUG yql-providers-generic-provider-ut-pushdown(pid=90651, tid=0x00007FBDF53A8F00) [core] yql_out_transformers.cpp:62: Optimized expr: ( (let $1 (DataSink '"result")) (let $2 (String '"value")) (let $3 (ResWrite! world $1 (Key) (FlatMap (Right! (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (!= (Member $4 '"col_string") $2)))) (lambda '($5) (OptionalIf (!= (Member $5 '"col_string") $2) $5))) '('('type)))) (return (Commit! $3 $1)) ) 2025-05-07 08:46:07.157 INFO yql-providers-generic-provider-ut-pushdown(pid=90651, tid=0x00007FBDF53A8F00) [generic] yql_generic_dq_integration.cpp:191: Filling source settings: cluster: test_cluster, table: test_table, endpoint: host: "host" port: 42 2025-05-07 08:46:07.166 INFO yql-providers-generic-provider-ut-pushdown(pid=90651, tid=0x00007FBDF53A8F00) [generic] yql_optimize.cpp:135: BuildGenericDqSourceSettings 2025-05-07 08:46:07.168 DEBUG yql-providers-generic-provider-ut-pushdown(pid=90651, tid=0x00007FBDF53A8F00) [core] yql_out_transformers.cpp:62: Built settings: ( (let $1 (DataSink '"result")) (let $2 '('"col_bool" '"col_date" '"col_datetime" '"col_double" '"col_dynumber" '"col_float" '"col_int16" '"col_int32" '"col_int64" '"col_int8" '"col_interval" '"col_json" '"col_json_document" '"col_optional_bool" '"col_optional_date" '"col_optional_datetime" '"col_optional_double" '"col_optional_dynumber" '"col_optional_float" '"col_optional_int16" '"col_optional_int32" '"col_optional_int64" '"col_optional_int8" '"col_optional_interval" '"col_optional_json" '"col_optional_json_document" '"col_optional_string" '"col_optional_timestamp" '"col_optional_tz_date" '"col_optional_tz_datetime" '"col_optional_tz_timestamp" '"col_optional_uint16" '"col_optional_uint32" '"col_optional_uint64" '"col_optional_uint8" '"col_optional_utf8" '"col_optional_uuid" '"col_optional_yson" '"col_string" '"col_timestamp" '"col_tz_date" '"col_tz_datetime" '"col_tz_timestamp" '"col_uint16" '"col_uint32" '"col_uint64" '"col_uint8" '"col_utf8" '"col_uuid" '"col_yson")) (let $3 (String '"value")) (let $4 (GenSourceSettings world '"test_cluster" '"test_table" (SecureParam '"cluster:default_test_cluster") $2 (lambda '($33) (!= (Member $33 '"col_string") $3)))) (let $5 (DataType 'Bool)) (let $6 (DataType 'Date)) (let $7 (DataType 'Datetime)) (let $8 (DataType 'Double)) (let $9 (DataType 'DyNumber)) (let $10 (DataType 'Float)) (let $11 (DataType 'Int16)) (let $12 (DataType 'Int32)) (let $13 (DataType 'Int64)) (let $14 (DataType 'Int8)) (let $15 (DataType 'Interval)) (let $16 (DataType 'Json)) (let $17 (DataType 'JsonDocument)) (let $18 (DataType 'String)) (let $19 (DataType 'Timestamp)) (let $20 (DataType 'TzDate)) (let $21 (DataType 'TzDatetime)) (let $22 (DataType 'TzTimestamp)) (let $23 (DataType 'Uint16)) (let $24 (DataType 'Uint32)) (let $25 (DataType 'Uint64)) (let $26 (DataType 'Uint8)) (let $27 (DataType 'Utf8)) (let $28 (DataType 'Uuid)) (let $29 (DataType 'Yson)) (let $30 (StructType '('"col_bool" $5) '('"col_date" $6) '('"col_datetime" $7) '('"col_double" $8) '('"col_dynumber" $9) '('"col_float" $10) '('"col_int16" $11) '('"col_int32" $12) '('"col_int64" $13) '('"col_int8" $14) '('"col_interval" $15) '('"col_json" $16) '('"col_json_document" $17) '('"col_optional_bool" (OptionalType $5)) '('"col_optional_date" (OptionalType $6)) '('"col_optional_datetime" (OptionalType $7)) '('"col_optional_double" (OptionalType $8)) '('"col_optional_dynumber" (OptionalType $9)) '('"col_optional_float" (OptionalType $10)) '('"col_optional_int16" (OptionalType $11)) '('"col_optional_int32" (OptionalType $12)) '('"col_optional_int64" (OptionalType $13)) '('"col_optional_int8" (OptionalType $14)) '('"col_optional_interval" (OptionalType $15)) '('"col_optional_json" (OptionalType $16)) '('"col_optional_json_document" (OptionalType $17)) '('"col_optional_string" (OptionalType $18)) '('"col_optional_timestamp" (OptionalType $19)) '('"col_optional_tz_date" (OptionalType $20)) '('"col_optional_tz_datetime" (OptionalType $21)) '('"col_optional_tz_timestamp" (OptionalType $22)) '('"col_optional_uint16" (OptionalType $23)) '('"col_optional_uint32" (OptionalType $24)) '('"col_optional_uint64" (OptionalType $25)) '('"col_optional_uint8" (OptionalType $26)) '('"col_optional_utf8" (OptionalType $27)) '('"col_optional_uuid" (OptionalType $28)) '('"col_optional_yson" (OptionalType $29)) '('"col_string" $18) '('"col_timestamp" $19) '('"col_tz_date" $20) '('"col_tz_datetime" $21) '('"col_tz_timestamp" $22) '('"col_uint16" $23) '('"col_uint32" $24) '('"col_uint64" $25) '('"col_uint8" $26) '('"col_utf8" $27) '('"col_uuid" $28) '('"col_yson" $29))) (let $31 (DqSourceWrap $4 (DataSource '"generic" '"test_cluster") $30)) (let $32 (ResWrite! world $1 (Key) (FlatMap $31 (lambda '($34) (OptionalIf (!= (Member $34 '"col_string") $3) $34))) '('('type)))) (return (Commit! $32 $1)) ) Dq source filter settings: filter_typed { comparison { operation: NE left_value { column: "col_string" } right_value { typed_value { type { type_id: STRING } value { bytes_value: "value" } } } } } >> ColumnShardConfigValidation::AcceptDefaultCompression [GOOD] >> ColumnShardConfigValidation::NotAcceptDefaultCompression [GOOD] >> ColumnShardConfigValidation::CorrectPlainCompression [GOOD] >> ColumnShardConfigValidation::NotCorrectPlainCompression [GOOD] >> ColumnShardConfigValidation::CorrectLZ4Compression [GOOD] >> ColumnShardConfigValidation::NotCorrectLZ4Compression [GOOD] >> ColumnShardConfigValidation::CorrectZSTDCompression [GOOD] >> ColumnShardConfigValidation::NotCorrectZSTDCompression [GOOD] |87.8%| [TS] {RESULT} ydb/core/blobstorage/vdisk/defrag/ut/unittest ------- [TS] {asan, default-linux-x86_64, release} ydb/mvp/meta/ut/unittest >> MetaCache::TimeoutFallback [GOOD] Test command err: 2025-05-07T08:46:07.495813Z :HTTP INFO: http_proxy_acceptor.cpp:88: Listening on http://[::]:7526 2025-05-07T08:46:07.496427Z :HTTP INFO: http_proxy_acceptor.cpp:88: Listening on http://[::]:15502 2025-05-07T08:46:07.496946Z :HTTP DEBUG: http_proxy.cpp:22: Connection created [1:14:2061] 2025-05-07T08:46:07.497014Z :HTTP DEBUG: http_proxy_outgoing.cpp:131: resolving 127.0.0.1:7526 2025-05-07T08:46:07.497140Z :HTTP DEBUG: http_proxy_outgoing.cpp:101: connecting to 127.0.0.1:7526 2025-05-07T08:46:07.497485Z :HTTP DEBUG: http_proxy_outgoing.cpp:248: (#11,127.0.0.1:7526) outgoing connection opened 2025-05-07T08:46:07.497576Z :HTTP DEBUG: http_proxy_outgoing.cpp:250: (#11,127.0.0.1:7526) <- (GET /server) 2025-05-07T08:46:07.497927Z :HTTP DEBUG: http_proxy_incoming.cpp:83: (#12,[::ffff:127.0.0.1]:48202) incoming connection opened 2025-05-07T08:46:07.498126Z :HTTP DEBUG: http_proxy_incoming.cpp:145: (#12,[::ffff:127.0.0.1]:48202) -> (GET /server) 2025-05-07T08:46:07.498311Z :HTTP DEBUG: meta_cache.cpp:231: Updating ownership http://127.0.0.1:15502 with deadline 2025-05-07T08:47:07.498268Z 2025-05-07T08:46:07.498371Z :HTTP DEBUG: meta_cache.cpp:237: SetRefreshTime "/server" to 2025-05-07T08:47:07.498268Z (+1746607627.498268s) 2025-05-07T08:46:07.498459Z :HTTP DEBUG: meta_cache.cpp:198: IncomingForward /server to http://127.0.0.1:15502 timeout 30.000000s 2025-05-07T08:46:07.498668Z :HTTP DEBUG: http_proxy.cpp:22: Connection created [1:16:2063] 2025-05-07T08:46:07.498730Z :HTTP DEBUG: http_proxy_outgoing.cpp:131: resolving 127.0.0.1:15502 2025-05-07T08:46:07.498814Z :HTTP DEBUG: http_proxy_outgoing.cpp:101: connecting to 127.0.0.1:15502 2025-05-07T08:46:07.498987Z :HTTP DEBUG: http_proxy_outgoing.cpp:248: (#13,127.0.0.1:15502) outgoing connection opened 2025-05-07T08:46:07.499026Z :HTTP DEBUG: http_proxy_outgoing.cpp:250: (#13,127.0.0.1:15502) <- (GET /server) 2025-05-07T08:46:07.499217Z :HTTP DEBUG: http_proxy_incoming.cpp:83: (#14,[::ffff:127.0.0.1]:47706) incoming connection opened 2025-05-07T08:46:07.499397Z :HTTP DEBUG: http_proxy_incoming.cpp:145: (#14,[::ffff:127.0.0.1]:47706) -> (GET /server) 2025-05-07T08:46:07.499768Z :HTTP DEBUG: http_proxy_incoming.cpp:243: (#14,[::ffff:127.0.0.1]:47706) <- (200 Found) 2025-05-07T08:46:07.499888Z :HTTP DEBUG: http_proxy_incoming.cpp:296: (#14,[::ffff:127.0.0.1]:47706) connection closed 2025-05-07T08:46:07.500249Z :HTTP DEBUG: http_proxy_outgoing.cpp:58: (#13,127.0.0.1:15502) -> (200 Found) 2025-05-07T08:46:07.500359Z :HTTP DEBUG: http_proxy_outgoing.cpp:64: (#13,127.0.0.1:15502) connection closed 2025-05-07T08:46:07.500719Z :HTTP DEBUG: meta_cache.cpp:146: Cache received successfull (200) response for /server 2025-05-07T08:46:07.500944Z :HTTP DEBUG: http_proxy_incoming.cpp:243: (#12,[::ffff:127.0.0.1]:48202) <- (200 Found) 2025-05-07T08:46:07.501071Z :HTTP DEBUG: http_proxy_incoming.cpp:296: (#12,[::ffff:127.0.0.1]:48202) connection closed 2025-05-07T08:46:07.501218Z :HTTP DEBUG: http_proxy.cpp:141: Connection closed [1:16:2063] 2025-05-07T08:46:07.501374Z :HTTP DEBUG: http_proxy_outgoing.cpp:58: (#11,127.0.0.1:7526) -> (200 Found) 2025-05-07T08:46:07.501441Z :HTTP DEBUG: http_proxy_outgoing.cpp:64: (#11,127.0.0.1:7526) connection closed 2025-05-07T08:46:07.501962Z :HTTP DEBUG: http_proxy.cpp:141: Connection closed [1:14:2061] 2025-05-07T08:46:07.527892Z :HTTP INFO: http_proxy_acceptor.cpp:88: Listening on http://[::]:12498 2025-05-07T08:46:07.528380Z :HTTP INFO: http_proxy_acceptor.cpp:88: Listening on http://[::]:13917 2025-05-07T08:46:07.528853Z :HTTP DEBUG: http_proxy.cpp:22: Connection created [2:14:2061] 2025-05-07T08:46:07.528929Z :HTTP DEBUG: http_proxy_outgoing.cpp:131: resolving 127.0.0.1:12498 2025-05-07T08:46:07.529056Z :HTTP DEBUG: http_proxy_outgoing.cpp:101: connecting to 127.0.0.1:12498 2025-05-07T08:46:07.529338Z :HTTP DEBUG: http_proxy_outgoing.cpp:248: (#11,127.0.0.1:12498) outgoing connection opened 2025-05-07T08:46:07.529466Z :HTTP DEBUG: http_proxy_outgoing.cpp:250: (#11,127.0.0.1:12498) <- (GET /server) 2025-05-07T08:46:07.529773Z :HTTP DEBUG: http_proxy_incoming.cpp:83: (#12,[::ffff:127.0.0.1]:60300) incoming connection opened 2025-05-07T08:46:07.529926Z :HTTP DEBUG: http_proxy_incoming.cpp:145: (#12,[::ffff:127.0.0.1]:60300) -> (GET /server) 2025-05-07T08:46:07.530128Z :HTTP DEBUG: meta_cache.cpp:231: Updating ownership http://127.0.0.1:13917 with deadline 2025-05-07T08:56:07.530071Z 2025-05-07T08:46:07.530180Z :HTTP DEBUG: meta_cache.cpp:237: SetRefreshTime "/server" to 2025-05-07T08:56:07.530071Z (+1746608167.530071s) 2025-05-07T08:46:07.530266Z :HTTP DEBUG: meta_cache.cpp:198: IncomingForward /server to http://127.0.0.1:13917 timeout 30.000000s 2025-05-07T08:46:07.530428Z :HTTP DEBUG: http_proxy.cpp:22: Connection created [2:16:2063] 2025-05-07T08:46:07.530477Z :HTTP DEBUG: http_proxy_outgoing.cpp:131: resolving 127.0.0.1:13917 2025-05-07T08:46:07.530611Z :HTTP DEBUG: http_proxy_outgoing.cpp:101: connecting to 127.0.0.1:13917 2025-05-07T08:46:07.530806Z :HTTP DEBUG: http_proxy_outgoing.cpp:248: (#13,127.0.0.1:13917) outgoing connection opened 2025-05-07T08:46:07.530848Z :HTTP DEBUG: http_proxy_outgoing.cpp:250: (#13,127.0.0.1:13917) <- (GET /server) 2025-05-07T08:46:07.530988Z :HTTP DEBUG: http_proxy_incoming.cpp:83: (#14,[::ffff:127.0.0.1]:60462) incoming connection opened 2025-05-07T08:46:07.531175Z :HTTP DEBUG: http_proxy_incoming.cpp:145: (#14,[::ffff:127.0.0.1]:60462) -> (GET /server) 2025-05-07T08:46:07.541634Z :HTTP ERROR: http_proxy_outgoing.cpp:75: (#13,127.0.0.1:13917) connection closed with error: Connection timed out 2025-05-07T08:46:07.542788Z :HTTP DEBUG: http_proxy_incoming.cpp:178: (#14,[::ffff:127.0.0.1]:60462) connection closed 2025-05-07T08:46:07.543189Z :HTTP WARN: meta_cache.cpp:151: Cache received failed response with error "Connection timed out" for /server - retrying locally 2025-05-07T08:46:07.543285Z :HTTP DEBUG: http_proxy.cpp:141: Connection closed [2:16:2063] 2025-05-07T08:46:07.553887Z :HTTP DEBUG: http_proxy_incoming.cpp:243: (#12,[::ffff:127.0.0.1]:60300) <- (200 Found) 2025-05-07T08:46:07.554115Z :HTTP DEBUG: http_proxy_incoming.cpp:296: (#12,[::ffff:127.0.0.1]:60300) connection closed 2025-05-07T08:46:07.554478Z :HTTP DEBUG: http_proxy_outgoing.cpp:58: (#11,127.0.0.1:12498) -> (200 Found) 2025-05-07T08:46:07.554614Z :HTTP DEBUG: http_proxy_outgoing.cpp:64: (#11,127.0.0.1:12498) connection closed 2025-05-07T08:46:07.555063Z :HTTP DEBUG: http_proxy.cpp:141: Connection closed [2:14:2061] |87.8%| [TM] {RESULT} ydb/core/blobstorage/ut_group/unittest >> TBsOther1::PoisonPill [GOOD] >> TBsOther1::ChaoticParallelWrite |87.8%| [TS] {asan, default-linux-x86_64, release} ydb/core/config/validation/column_shard_config_validator_ut/unittest >> ColumnShardConfigValidation::NotCorrectZSTDCompression [GOOD] |87.8%| [TS] {RESULT} ydb/core/config/validation/auth_config_validator_ut/unittest >> Mirror3of4::ReplicationSmall >> TLockFreeIntrusiveStackTest::ConcurrentAutoNeverEmpty [GOOD] >> TLockFreeIntrusiveStackTest::ConcurrentAutoHeavyContention |87.8%| [TS] {RESULT} ydb/mvp/core/ut/unittest >> TBlobStorageHullFresh::AppendixPerf_Tune [GOOD] |87.8%| [TS] {RESULT} ydb/library/yql/providers/s3/actors/ut/unittest |87.8%| [TS] {RESULT} ydb/core/base/generated/ut/unittest |87.8%| [TA] $(B)/ydb/core/blobstorage/vdisk/hullop/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> ActionParsingTest::ToAndFromStringAreConsistent [GOOD] >> ActionParsingTest::ActionsForQueueTest [GOOD] >> ActionParsingTest::BatchActionTest [GOOD] >> ActionParsingTest::ActionsForMessageTest [GOOD] >> ActionParsingTest::FastActionsTest [GOOD] >> HttpCountersTest::CountersAggregationTest [GOOD] >> LazyCounterTest::LazyCounterTest [GOOD] >> LazyCounterTest::AggregationLazyTest [GOOD] >> LazyCounterTest::AggregationNonLazyTest [GOOD] >> LazyCounterTest::HistogramAggregationTest [GOOD] >> MessageAttributeValidationTest::MessageAttributeValidationTest [GOOD] >> MessageBodyValidationTest::MessageBodyValidationTest [GOOD] >> MeteringCountersTest::CountersAggregationTest [GOOD] >> NameValidationTest::NameValidationTest [GOOD] >> QueueAttributes::BasicStdTest [GOOD] >> QueueAttributes::BasicFifoTest [GOOD] >> QueueAttributes::BasicClampTest [GOOD] >> QueueCountersTest::InsertCountersTest [GOOD] >> QueueCountersTest::RemoveQueueCountersNonLeaderWithoutFolderTest [GOOD] >> QueueCountersTest::RemoveQueueCountersLeaderWithoutFolderTest [GOOD] >> QueueCountersTest::RemoveQueueCountersNonLeaderWithFolderTest [GOOD] >> QueueCountersTest::RemoveQueueCountersLeaderWithFolderTest [GOOD] >> QueueCountersTest::CountersAggregationTest >> DoubleIndexedTests::TestMerge [GOOD] >> DoubleIndexedTests::TestFind [GOOD] >> DoubleIndexedTests::TestUpsertByBothKeys [GOOD] >> DoubleIndexedTests::TestErase [GOOD] >> DoubleIndexedTests::TestUpsertBySingleKey [GOOD] |87.8%| [TS] {RESULT} ydb/library/yql/providers/generic/provider/ut/pushdown/unittest >> QueueCountersTest::CountersAggregationTest [GOOD] >> QueueCountersTest::CountersAggregationCloudTest [GOOD] >> RedrivePolicy::RedrivePolicyValidationTest [GOOD] >> RedrivePolicy::RedrivePolicyToJsonTest [GOOD] >> RedrivePolicy::RedrivePolicyArnValidationTest [GOOD] >> SecureProtobufPrinterTest::MessageBody [GOOD] >> SecureProtobufPrinterTest::Tokens [GOOD] >> StringValidationTest::IsAlphaNumAndPunctuationTest [GOOD] >> UserCountersTest::DisableCountersTest [GOOD] >> UserCountersTest::RemoveUserCountersTest [GOOD] >> UserCountersTest::CountersAggregationTest [GOOD] >> TPDiskTest::TestMultipleLogSpliceNonceJump [GOOD] >> TPDiskTest::TestFakeErrorPDiskManyLogWrite |87.8%| [TS] {RESULT} ydb/mvp/meta/ut/unittest |87.8%| [TS] {RESULT} ydb/core/config/validation/column_shard_config_validator_ut/unittest |87.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest >> TBlobStorageHullFresh::AppendixPerf_Tune [GOOD] >> ClosedIntervalSet::Union [GOOD] >> ClosedIntervalSet::Difference |87.8%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hullop/ut/test-results/unittest/{meta.json ... results_accumulator.log} |87.9%| [TS] {RESULT} ydb/core/fq/libs/test_connection/ut/unittest >> TestFederatedQueryHelpers::TestCheckNestingDepth [GOOD] >> TestFederatedQueryHelpers::TestTruncateIssues [GOOD] >> TestFederatedQueryHelpers::TestValidateResultSetColumns [GOOD] |87.9%| [TS] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_double_indexed/unittest >> DoubleIndexedTests::TestUpsertBySingleKey [GOOD] |87.9%| [TS] {RESULT} ydb/core/blobstorage/base/ut/gtest |87.9%| [TS] {RESULT} ydb/library/yaml_config/ut/unittest |87.9%| [TS] {RESULT} ydb/core/tx/scheme_board/ut_double_indexed/unittest >> test_transform.py::TestYamlConfigTransformations::test_basic[args0-dump] >> TStateStorageConfig::TestReplicaSelectionUniqueCombinations [GOOD] >> TStateStorageConfig::UniformityTest |87.9%| [TS] {asan, default-linux-x86_64, release} ydb/core/ymq/base/ut/unittest >> UserCountersTest::CountersAggregationTest [GOOD] |87.9%| [TS] {RESULT} ydb/core/external_sources/ut/unittest |87.9%| [TS] {RESULT} ydb/core/config/validation/ut/unittest |87.9%| [TS] {RESULT} ydb/core/ymq/base/ut/unittest |87.9%| [TS] {RESULT} ydb/library/yql/providers/s3/provider/ut/unittest |87.9%| [TS] {RESULT} ydb/core/fq/libs/db_id_async_resolver_impl/ut/unittest |87.9%| [LD] {RESULT} $(B)/ydb/tests/stability/tool/tool >> TPDiskTest::TestFakeErrorPDiskManyLogWrite [GOOD] >> TPDiskTest::TestFakeErrorPDiskLogRead |87.9%| [TS] {RESULT} ydb/mvp/oidc_proxy/ut/unittest |87.9%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/federated_query/ut/unittest >> TestFederatedQueryHelpers::TestValidateResultSetColumns [GOOD] |87.9%| [TM] {RESULT} ydb/core/fq/libs/control_plane_storage/internal/ut/unittest |87.9%| [TS] {RESULT} ydb/core/kqp/federated_query/ut/unittest >> TCowBTreeTest::MultipleSnapshotsWithClear [GOOD] >> TCowBTreeTest::MultipleSnapshotsWithClearWithGc >> TCowBTreeTest::SnapshotRollback [GOOD] >> TCowBTreeTest::SnapshotRollbackEarlyErase |87.9%| [TA] $(B)/ydb/core/blobstorage/vdisk/hulldb/fresh/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TPDiskTest::TestFakeErrorPDiskLogRead [GOOD] >> TPDiskTest::TestFakeErrorPDiskSysLogRead >> TPDiskTest::TestFakeErrorPDiskSysLogRead [GOOD] >> TPDiskTest::TestFakeErrorPDiskManyChunkRead >> TCowBTreeTest::SnapshotRollbackEarlyErase [GOOD] >> TCowBTreeTest::ShouldCallDtorsInplace [GOOD] >> TCowBTreeTest::ShouldCallDtorsThreadSafe |87.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_order.cpp >> TCowBTreeTest::ShouldCallDtorsThreadSafe [GOOD] >> TEventPriorityQueueTest::TestPriority [GOOD] >> TFastTlsTest::IterationAfterThreadDeath >> TFastTlsTest::IterationAfterThreadDeath [GOOD] >> TFastTlsTest::ManyThreadLocals |87.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_order.cpp >> TFastTlsTest::ManyThreadLocals [GOOD] >> TFastTlsTest::ManyConcurrentKeys >> TLockFreeIntrusiveStackTest::ConcurrentAutoHeavyContention [GOOD] >> TLogPriorityMuteTests::MuteUntilTest [GOOD] >> TLogPriorityMuteTests::AtomicMuteUntilTest [GOOD] >> TLogPriorityMuteTests::UnmuteTest [GOOD] >> TLogPriorityMuteTests::AtomicUnmuteTest [GOOD] >> TLogPriorityMuteTests::CheckPriorityWithSetMuteTest [GOOD] >> TLogPriorityMuteTests::AtomicCheckPriorityWithSetMuteTest [GOOD] >> TLogPriorityMuteTests::CheckPriorityWithSetMuteDurationTest [GOOD] >> TLogPriorityMuteTests::AtomicCheckPriorityWithSetMuteDurationTest [GOOD] >> TOneOneQueueTests::TestSimpleEnqueueDequeue [GOOD] >> TOneOneQueueTests::CleanInDestructor [GOOD] >> TOneOneQueueTests::ReadIterator [GOOD] >> TPageMapTest::TestResize [GOOD] >> TPageMapTest::TestRandom >> test.py::test[solomon-BadDownsamplingAggregation-] [GOOD] >> test.py::test[solomon-BadDownsamplingDisabled-] >> TFastTlsTest::ManyConcurrentKeys [GOOD] >> TFifoQueueTest::ShouldPushPop [GOOD] >> TFragmentedBufferTest::TestIntersectedWriteRead [GOOD] >> TFragmentedBufferTest::TestIntersectedWriteRead2 [GOOD] >> TFragmentedBufferTest::TestIntersectedWriteRead3 [GOOD] >> TFragmentedBufferTest::Test3WriteRead [GOOD] >> TFragmentedBufferTest::Test5WriteRead [GOOD] >> TFragmentedBufferTest::TestGetMonolith [GOOD] >> TFragmentedBufferTest::CopyFrom [GOOD] >> TFragmentedBufferTest::ReadWriteRandom >> TCowBTreeTest::MultipleSnapshotsWithClearWithGc [GOOD] >> TCowBTreeTest::DuplicateKeysInplace >> TCowBTreeTest::DuplicateKeysInplace [GOOD] >> TCowBTreeTest::DuplicateKeysThreadSafe >> TBsLocalRecovery::StartStopNotEmptyDB [GOOD] >> TBsLocalRecovery::WriteRestartRead >> TPDiskTest::TestFakeErrorPDiskManyChunkRead [GOOD] >> TPDiskTest::TestFakeErrorPDiskManyChunkWrite |87.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/blobstorage-ut_blobstorage-ut_vdisk_restart |87.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/blobstorage-ut_blobstorage-ut_vdisk_restart |87.9%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/fresh/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> test_transform.py::TestYamlConfigTransformations::test_basic[args0-dump] [GOOD] >> test_transform.py::TestYamlConfigTransformations::test_basic[args0-dump_ds_init] |87.9%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/blobstorage-ut_blobstorage-ut_vdisk_restart >> TPDiskRaces::Decommit [GOOD] >> TPDiskRaces::DecommitWithInflight >> TCowBTreeTest::DuplicateKeysThreadSafe [GOOD] >> TCowBTreeTest::IteratorDestructor [GOOD] >> TCowBTreeTest::Concurrent >> TCowBTreeTest::Concurrent [GOOD] >> TCowBTreeTest::Alignment [GOOD] |87.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/ymq/http/ut/ydb-core-ymq-http-ut >> TPDiskTest::TestFakeErrorPDiskManyChunkWrite [GOOD] >> TPDiskTest::TestLogSpliceChunkReserve >> TFragmentedBufferTest::ReadWriteRandom [GOOD] |87.9%| [LD] {RESULT} $(B)/ydb/core/ymq/http/ut/ydb-core-ymq-http-ut |87.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/ymq/http/ut/ydb-core-ymq-http-ut >> TPDiskRaces::KillOwnerWhileDecommittingWithInflight [GOOD] >> TPDiskRaces::KillOwnerWhileDecommittingWithInflightMock >> VDiskRestart::Simple [GOOD] >> TBsVDiskRepl3::ReplPerf [GOOD] |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest >> TStateStorageConfig::UniformityTest [GOOD] |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest >> VDiskRestart::Simple [GOOD] |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |87.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/dsproxy/ut_fat/dsproxy_ut.cpp >> Dictionary::Simple [GOOD] >> Dictionary::ComparePayloadAndFull |87.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_ru_calculator/ydb-core-tx-schemeshard-ut_ru_calculator |87.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_ru_calculator/ydb-core-tx-schemeshard-ut_ru_calculator |87.9%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_ru_calculator/ydb-core-tx-schemeshard-ut_ru_calculator |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskRepl3::ReplPerf [GOOD] Test command err: 2025-05-07T08:45:41.214728Z :BS_SYNCER ERROR: guid_recovery.cpp:714: PDiskId# 4 VDISK[0:_:0:1:1]: (0) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-05-07T08:45:41.250436Z :BS_SYNCER ERROR: guid_recovery.cpp:767: PDiskId# 4 VDISK[0:_:0:1:1]: (0) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 15903353189860024919] 2025-05-07T08:45:41.963591Z :BS_SYNCER ERROR: blobstorage_osiris.cpp:203: PDiskId# 4 VDISK[0:_:0:1:1]: (0) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2025-05-07T08:45:52.752783Z :BS_SYNCER ERROR: guid_recovery.cpp:714: PDiskId# 4 VDISK[0:_:0:3:0]: (0) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-05-07T08:45:52.946442Z :BS_SYNCER ERROR: guid_recovery.cpp:767: PDiskId# 4 VDISK[0:_:0:3:0]: (0) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 2519209789532235097] 2025-05-07T08:45:53.159068Z :BS_SYNCER ERROR: blobstorage_osiris.cpp:203: PDiskId# 4 VDISK[0:_:0:3:0]: (0) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2025-05-07T08:46:11.002227Z :BS_SYNCER ERROR: guid_recovery.cpp:714: PDiskId# 4 VDISK[0:_:0:1:1]: (0) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-05-07T08:46:11.130300Z :BS_SYNCER ERROR: guid_recovery.cpp:767: PDiskId# 4 VDISK[0:_:0:1:1]: (0) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 1439524912524330535] 2025-05-07T08:46:12.239814Z :BS_SYNCER ERROR: blobstorage_osiris.cpp:203: PDiskId# 4 VDISK[0:_:0:1:1]: (0) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 |87.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/control/ut/ydb-core-control-ut |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut/unittest >> TStateStorageConfig::UniformityTest [GOOD] |87.9%| [LD] {RESULT} $(B)/ydb/core/control/ut/ydb-core-control-ut |87.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/control/ut/ydb-core-control-ut |87.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut_fat/dsproxy_ut.cpp |87.9%| [CC] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/columnshard_impl.h_serialized.cpp |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/util/ut/unittest >> TFragmentedBufferTest::ReadWriteRandom [GOOD] |87.9%| [CC] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/columnshard_impl.h_serialized.cpp |87.9%| [TA] $(B)/ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/test-results/unittest/{meta.json ... results_accumulator.log} >> TBsLocalRecovery::WriteRestartRead [GOOD] >> TBsLocalRecovery::MultiPutWriteRestartRead >> test.py::test[solomon-BadDownsamplingDisabled-] [GOOD] >> XmlBuilderTest::WritesProperly [GOOD] >> test.py::test[solomon-BadDownsamplingFill-] >> XmlBuilderTest::MacroBuilder [GOOD] |87.9%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/util/ut/unittest >> TCowBTreeTest::Alignment [GOOD] Test command err: Producer 0 worked for 0.1905157382 seconds Producer 1 worked for 0.3538885971 seconds Consumer 0 worked for 0.3780483748 seconds on a snapshot of size 20000 Consumer 1 worked for 0.3811635702 seconds on a snapshot of size 40000 Consumer 2 worked for 0.5753531846 seconds on a snapshot of size 60000 Consumer 3 worked for 0.7832314762 seconds on a snapshot of size 80000 Consumers had 1199974 successful seeks |87.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/blobstorage-ut_blobstorage-ut_restart_pdisk |87.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/blobstorage-ut_blobstorage-ut_restart_pdisk |88.0%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/blobstorage-ut_blobstorage-ut_restart_pdisk >> TPDiskTest::TestLogSpliceChunkReserve [GOOD] >> TPDiskTest::SpaceColor [GOOD] >> TPDiskTest::TestPDiskOnDifferentKeys >> test_transform.py::TestYamlConfigTransformations::test_basic[args0-dump_ds_init] [GOOD] >> test_transform.py::TestYamlConfigTransformations::test_basic[args1-dump] |88.0%| [TA] $(B)/ydb/core/base/ut/test-results/unittest/{meta.json ... results_accumulator.log} |88.0%| [TS] {asan, default-linux-x86_64, release} ydb/core/ymq/http/ut/unittest >> XmlBuilderTest::MacroBuilder [GOOD] |88.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/public_http/ut/ydb-core-public_http-ut |88.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/public_http/ut/ydb-core-public_http-ut >> TPDiskTest::TestPDiskOnDifferentKeys [GOOD] >> TPDiskTest::SuprisinglySmallDisk |88.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/splitter/ut/ydb-core-tx-columnshard-splitter-ut |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/control/ut/unittest |88.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/columnshard/splitter/ut/ydb-core-tx-columnshard-splitter-ut |88.0%| [TA] {RESULT} $(B)/ydb/core/base/ut/test-results/unittest/{meta.json ... results_accumulator.log} |88.0%| [LD] {RESULT} $(B)/ydb/core/public_http/ut/ydb-core-public_http-ut |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/control/ut/unittest |88.0%| [TS] {RESULT} ydb/core/ymq/http/ut/unittest |88.0%| [LD] {RESULT} $(B)/ydb/core/tx/columnshard/splitter/ut/ydb-core-tx-columnshard-splitter-ut |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/control/ut/unittest |88.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/ut_blobstorage-ut_read_only_pdisk |88.0%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/ut_blobstorage-ut_read_only_pdisk >> TRUCalculatorTests::TestReadTable [GOOD] >> TRUCalculatorTests::TestBulkUpsert [GOOD] |88.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/ut_blobstorage-ut_read_only_pdisk |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/control/ut/unittest |88.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_donor/ydb-core-blobstorage-ut_blobstorage-ut_donor |88.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_donor/ydb-core-blobstorage-ut_blobstorage-ut_donor |88.0%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_donor/ydb-core-blobstorage-ut_blobstorage-ut_donor |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/control/ut/unittest >> IcbAsActorTests::TestHttpGetResponse |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/control/ut/unittest >> IcbAsActorTests::TestHttpGetResponse [GOOD] |88.0%| [TS] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ru_calculator/unittest >> TRUCalculatorTests::TestBulkUpsert [GOOD] |88.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/mind/bscontroller/ut_selfheal/ydb-core-mind-bscontroller-ut_selfheal |88.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/mind/bscontroller/ut_selfheal/ydb-core-mind-bscontroller-ut_selfheal |88.0%| [TS] {RESULT} ydb/core/tx/schemeshard/ut_ru_calculator/unittest |88.0%| [LD] {RESULT} $(B)/ydb/core/mind/bscontroller/ut_selfheal/ydb-core-mind-bscontroller-ut_selfheal |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/control/ut/unittest |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/control/ut/unittest >> TPDiskTest::SuprisinglySmallDisk [GOOD] >> TPDiskTest::TestChunkWriteCrossOwner >> IcbAsActorTests::TestHttpPostReaction |88.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_balancing/ydb-core-blobstorage-ut_blobstorage-ut_balancing |88.0%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_balancing/ydb-core-blobstorage-ut_blobstorage-ut_balancing |88.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_balancing/ydb-core-blobstorage-ut_blobstorage-ut_balancing >> IcbAsActorTests::TestHttpPostReaction [GOOD] |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/control/ut/unittest >> IcbAsActorTests::TestHttpGetResponse [GOOD] >> TBsOther1::ChaoticParallelWrite [GOOD] >> TBsOther2::ChaoticParallelWrite_SkeletonFrontQueuesOverload >> TPDiskTest::TestChunkWriteCrossOwner [GOOD] >> TBsLocalRecovery::MultiPutWriteRestartRead [GOOD] >> TBsLocalRecovery::MultiPutWriteRestartReadHuge |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest >> BSCRestartPDisk::RestartOneByOneWithReconnects >> BSCRestartPDisk::RestartBrokenDiskInBrokenGroup |88.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/persqueue/ut/fetch_request_ut.cpp |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest >> Dictionary::ComparePayloadAndFull [GOOD] >> Hash::ScalarBinaryHash [GOOD] >> Hash::ScalarCTypeHash [GOOD] >> Hash::ScalarCompositeHash [GOOD] >> ProgramStep::Round0 [GOOD] >> ProgramStep::Round1 >> ProgramStep::Round1 [GOOD] >> ProgramStep::Filter [GOOD] >> ProgramStep::Add [GOOD] >> ProgramStep::Substract >> HttpRouter::Basic [GOOD] >> ProgramStep::Substract [GOOD] >> ProgramStep::Multiply [GOOD] >> ProgramStep::Divide [GOOD] >> ProgramStep::Gcd [GOOD] >> ProgramStep::Lcm [GOOD] >> ProgramStep::Mod [GOOD] >> ProgramStep::ModOrZero [GOOD] >> ProgramStep::Abs |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/control/ut/unittest >> IcbAsActorTests::TestHttpPostReaction [GOOD] |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest |88.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/persqueue/ut/fetch_request_ut.cpp >> TBsDbStat::ChaoticParallelWrite_DbStat [GOOD] >> TBsHuge::Simple >> BSCRestartPDisk::RestartNotAllowed >> ProgramStep::Abs [GOOD] >> ProgramStep::Negate [GOOD] >> ProgramStep::Compares [GOOD] >> ProgramStep::Logic0 [GOOD] >> ProgramStep::Logic1 [GOOD] >> ProgramStep::StartsWith [GOOD] >> ProgramStep::EndsWith [GOOD] >> ProgramStep::MatchSubstring [GOOD] >> ProgramStep::StartsWithIgnoreCase [GOOD] >> ProgramStep::EndsWithIgnoreCase [GOOD] >> BSCRestartPDisk::RestartOneByOne >> ProgramStep::MatchSubstringIgnoreCase [GOOD] >> ProgramStep::ScalarTest [GOOD] >> ProgramStep::TestValueFromNull [GOOD] >> ProgramStep::MergeFilterSimple |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest >> BSCReadOnlyPDisk::SetGoodDiskInBrokenGroupReadOnlyNotAllowed >> ProgramStep::MergeFilterSimple [GOOD] >> ProgramStep::Projection [GOOD] >> ProgramStep::MinMax [GOOD] >> ProgramStep::Sum [GOOD] >> ProgramStep::SumGroupBy [GOOD] >> ProgramStep::SumGroupByNotNull |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/pdisk/ut/unittest >> TPDiskTest::TestChunkWriteCrossOwner [GOOD] |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest >> ProgramStep::SumGroupByNotNull [GOOD] >> ProgramStep::MinMaxSomeGroupBy [GOOD] >> ProgramStep::MinMaxSomeGroupByNotNull >> BSCReadOnlyPDisk::ReadOnlySlay >> ProgramStep::MinMaxSomeGroupByNotNull [GOOD] >> BSCRestartPDisk::RestartGoodDiskInBrokenGroupNotAllowed >> SortableBatchPosition::FindPosition [GOOD] >> Splitter::Simple >> Donor::ContinueWithFaultyDonor |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> BSCRestartPDisk::RestartBrokenDiskInBrokenGroup [GOOD] >> BSCReadOnlyPDisk::SetBrokenDiskInBrokenGroupReadOnly >> BSCReadOnlyPDisk::ReadOnlyOneByOne |88.0%| [TS] {asan, default-linux-x86_64, release} ydb/core/public_http/ut/unittest >> HttpRouter::Basic [GOOD] |88.1%| [TS] {RESULT} ydb/core/public_http/ut/unittest >> Donor::SkipBadDonor >> BSCReadOnlyPDisk::ReadOnlyNotAllowed >> BSCReadOnlyPDisk::RestartAndReadOnlyConsecutive >> Splitter::Simple [GOOD] >> Splitter::Small [GOOD] >> Splitter::Minimal [GOOD] >> Splitter::Trivial |88.1%| [TA] $(B)/ydb/core/control/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> Splitter::Trivial [GOOD] >> Splitter::BigAndSmall >> TBsLocalRecovery::MultiPutWriteRestartReadHuge [GOOD] >> BSCReadOnlyPDisk::SetGoodDiskInBrokenGroupReadOnlyNotAllowed [GOOD] >> TBsLocalRecovery::ChaoticWriteRestartHugeXXX |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest |88.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/libcore-tx-columnshard.a |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> BSCRestartPDisk::RestartGoodDiskInBrokenGroupNotAllowed [GOOD] >> Splitter::BigAndSmall [GOOD] |88.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/ydb-core-blobstorage-ut_blobstorage-ut_stop_pdisk |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> Splitter::CritSmallPortions >> Donor::SlayAfterWiping |88.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/ydb-core-blobstorage-ut_blobstorage-ut_stop_pdisk |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_selfheal/unittest >> TBsHuge::Simple [GOOD] >> TBsHuge::SimpleErasureNone ------- [TS] {asan, default-linux-x86_64, release} ydb/core/formats/arrow/ut/unittest >> SortableBatchPosition::FindPosition [GOOD] Test command err: Process: 100000d;/100000; 10000d;/10000; NO_CODEC(poolsize=1024;keylen=1) 0.2021203448 0.2210911404 NO_CODEC(poolsize=1024;keylen=10) 0.1534132783 0.2482180533 NO_CODEC(poolsize=1024;keylen=16) 0.1104676508 0.2045372848 NO_CODEC(poolsize=1024;keylen=32) 0.06592569055 0.1591802296 NO_CODEC(poolsize=1024;keylen=64) 0.03972180035 0.1324717476 NO_CODEC(poolsize=128;keylen=1) 0.2016566193 0.2164784476 NO_CODEC(poolsize=128;keylen=10) 0.07304169975 0.08752922393 NO_CODEC(poolsize=128;keylen=16) 0.05151637558 0.06514358749 NO_CODEC(poolsize=128;keylen=32) 0.02919093319 0.04189888314 NO_CODEC(poolsize=128;keylen=64) 0.01605694811 0.02821124922 NO_CODEC(poolsize=16;keylen=1) 0.2010010074 0.2099570542 NO_CODEC(poolsize=16;keylen=10) 0.0719219365 0.07635285397 NO_CODEC(poolsize=16;keylen=16) 0.05039654131 0.05396013899 NO_CODEC(poolsize=16;keylen=32) 0.02807102527 0.03070808446 NO_CODEC(poolsize=16;keylen=64) 0.01493699686 0.01701612239 NO_CODEC(poolsize=1;keylen=1) 0.2008730831 0.2086845872 NO_CODEC(poolsize=1;keylen=10) 0.07177339648 0.07487027428 NO_CODEC(poolsize=1;keylen=16) 0.0502445638 0.05244238527 NO_CODEC(poolsize=1;keylen=32) 0.02791992658 0.0291982148 NO_CODEC(poolsize=1;keylen=64) 0.01478641518 0.01551089526 NO_CODEC(poolsize=512;keylen=1) 0.2021203448 0.2210911404 NO_CODEC(poolsize=512;keylen=10) 0.1482943606 0.1971260763 NO_CODEC(poolsize=512;keylen=16) 0.1053484084 0.1534129488 NO_CODEC(poolsize=512;keylen=32) 0.0608061115 0.1080222928 NO_CODEC(poolsize=512;keylen=64) 0.03460202321 0.08129402495 NO_CODEC(poolsize=64;keylen=1) 0.2013687897 0.2136153969 NO_CODEC(poolsize=64;keylen=10) 0.07240183504 0.08114272681 NO_CODEC(poolsize=64;keylen=16) 0.05087647028 0.05875304549 NO_CODEC(poolsize=64;keylen=32) 0.02855098581 0.03550414104 NO_CODEC(poolsize=64;keylen=64) 0.01541697597 0.02181403389 lz4(poolsize=1024;keylen=1) 0.006629768257 0.05541610349 lz4(poolsize=1024;keylen=10) 0.04233951498 0.3344832994 lz4(poolsize=1024;keylen=16) 0.05657489465 0.404264214 lz4(poolsize=1024;keylen=32) 0.09037137941 0.5318074361 lz4(poolsize=1024;keylen=64) 0.01074936154 0.1063492063 lz4(poolsize=128;keylen=1) 0.003831111821 0.02881389382 lz4(poolsize=128;keylen=10) 0.00718182175 0.06087121933 lz4(poolsize=128;keylen=16) 0.008735936466 0.07523964551 lz4(poolsize=128;keylen=32) 0.01375268158 0.117441454 lz4(poolsize=128;keylen=64) 0.02262360212 0.1850289108 lz4(poolsize=16;keylen=1) 0.00273442178 0.01820340324 lz4(poolsize=16;keylen=10) 0.003078137332 0.02169239789 lz4(poolsize=16;keylen=16) 0.003266503667 0.02356577168 lz4(poolsize=16;keylen=32) 0.003742685614 0.02844311377 lz4(poolsize=16;keylen=64) 0.004937163375 0.03979647465 lz4(poolsize=1;keylen=1) 0.00251497006 0.01603325416 lz4(poolsize=1;keylen=10) 0.002531395234 0.01628089447 lz4(poolsize=1;keylen=16) 0.002515970516 0.01617933723 lz4(poolsize=1;keylen=32) 0.00251450677 0.01630226314 lz4(poolsize=1;keylen=64) 0.002511620933 0.01653353149 lz4(poolsize=512;keylen=1) 0.005362411291 0.04359726295 lz4(poolsize=512;keylen=10) 0.02347472854 0.1933066062 lz4(poolsize=512;keylen=16) 0.03056053336 0.2426853056 lz4(poolsize=512;keylen=32) 0.04856356058 0.3467897492 lz4(poolsize=512;keylen=64) 0.04102771881 0.3228658321 lz4(poolsize=64;keylen=1) 0.003312844256 0.02372010279 lz4(poolsize=64;keylen=10) 0.004839661617 0.03863241259 lz4(poolsize=64;keylen=16) 0.005715507689 0.04687204687 lz4(poolsize=64;keylen=32) 0.007821957352 0.06669044223 lz4(poolsize=64;keylen=64) 0.01258912656 0.1073551894 zstd(poolsize=1024;keylen=1) 0.007324840764 0.0754840827 zstd(poolsize=1024;keylen=10) 0.04506846012 0.3776978417 zstd(poolsize=1024;keylen=16) 0.0655640205 0.4694540288 zstd(poolsize=1024;keylen=32) 0.1110720087 0.6098141264 zstd(poolsize=1024;keylen=64) 0.1914108287 0.7447345433 zstd(poolsize=128;keylen=1) 0.003769847609 0.04002713704 zstd(poolsize=128;keylen=10) 0.007456731695 0.07809798271 zstd(poolsize=128;keylen=16) 0.0102539786 0.1029455519 zstd(poolsize=128;keylen=32) 0.01677217062 0.1578947368 zstd(poolsize=128;keylen=64) 0.03005940945 0.2517949988 zstd(poolsize=16;keylen=1) 0.002620896858 0.02794819359 zstd(poolsize=16;keylen=10) 0.002816201441 0.03048416019 zstd(poolsize=16;keylen=16) 0.003368308096 0.03570300158 zstd(poolsize=16;keylen=32) 0.004159808469 0.0434375 zstd(poolsize=16;keylen=64) 0.005779996974 0.05875115349 zstd(poolsize=1;keylen=1) 0.002461243407 0.02626193724 zstd(poolsize=1;keylen=10) 0.002154636612 0.0234375 zstd(poolsize=1;keylen=16) 0.002356872222 0.02519132653 zstd(poolsize=1;keylen=32) 0.002427911996 0.02573879886 zstd(poolsize=1;keylen=64) 0.00258021431 0.02699269609 zstd(poolsize=512;keylen=1) 0.005583027596 0.05848930481 zstd(poolsize=512;keylen=10) 0.0236929438 0.2237078941 zstd(poolsize=512;keylen=16) 0.03443366072 0.2936507937 zstd(poolsize=512;keylen=32) 0.05917328099 0.4212765957 zstd(poolsize=512;keylen=64) 0.1058929843 0.5749553837 zstd(poolsize=64;keylen=1) 0.00319560285 0.03401360544 zstd(poolsize=64;keylen=10) 0.004852093844 0.05176470588 zstd(poolsize=64;keylen=16) 0.00633344236 0.06557881773 zstd(poolsize=64;keylen=32) 0.009647738439 0.09619952494 zstd(poolsize=64;keylen=64) 0.01626771323 0.1514644351 NO_CODEC --1000 ----1 ------1 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5168;columns=1; --------5168 / 5296 = 2.416918429% ------10 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=14168;columns=1; --------14168 / 14296 = 0.8953553442% ------16 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=20168;columns=1; --------20168 / 20296 = 0.6306661411% ------32 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=36168;columns=1; --------36168 / 36296 = 0.35265594% ------64 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=68168;columns=1; --------68168 / 68296 = 0.1874194682% ----16 ------1 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5168;columns=1; --------5168 / 5296 = 2.416918429% ------10 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=14168;columns=1; --------14168 / 14296 = 0.8953553442% ------16 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=20168;columns=1; --------20168 / 20296 = 0.6306661411% ------32 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=36168;columns=1; --------36168 / 36296 = 0.35265594% ------64 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=68168;columns=1; --------68168 / 68296 = 0.1874194682% ----64 ------1 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5168;columns=1; --------5168 / 5296 = 2.416918429% ------10 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=14168;columns=1; --------14168 / 14296 = 0.8953553442% ------16 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=20168;columns=1; --------20168 / 20296 = 0.6306661411% ------32 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=36168;columns=1; --------36168 / 36296 = 0.35265594% ------64 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=68168;columns=1; --------68168 / 68296 = 0.1874194682% ----128 ------1 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5168;columns=1; --------5168 / 5296 = 2.416918429% ------10 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=14168;columns=1; --------14168 / 14296 = 0.8953553442% ------16 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=20168;columns=1; --------20168 / 20296 = 0.6306661411% ------32 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=36168;columns=1; --------36168 / 36296 = 0.35265594% ------64 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=68168;columns=1; --------68168 / 68296 = 0.1874194682% ----512 ------1 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5168;columns=1; --------5168 / 5296 = 2.416918429% ------10 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=14168;columns=1; --------14168 / 14296 = 0.8953553442% ------16 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=20168;columns=1; --------20168 / 20296 = 0.6306661411% ------32 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=36168;columns=1; --------36168 / 36296 = 0.35265594% ------64 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=68168;columns=1; --------68168 / 68296 = 0.1874194682% ----1024 ------1 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5168;columns=1; --------5168 / 5296 = 2.416918429% ------10 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=14168;columns=1; --------14168 / 14296 = 0.8953553442% ------16 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=20168;columns=1; --------20168 / 20296 = 0.6306661411% ------32 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=36168;columns=1; --------36168 / 36296 = 0.35265594% ------64 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=68168;columns=1; --------68168 / 68296 = 0.1874194682% --10000 ---- ... "N2(9):{\"i\":\"1\",\"p\":{\"address\":{\"name\":\"x\",\"id\":1}},\"o\":\"1\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N3(9):{\"i\":\"2\",\"p\":{\"address\":{\"name\":\"y\",\"id\":2}},\"o\":\"2\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(36):{\"i\":\"1,2,3,4\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N2 -> N5[label="1"]; N4 -> N5[label="2"]; N0 -> N5[label="3"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"1,2\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N2->N4->N0->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2100;fline=simple_arrays_cache.h:65;event=slice_from_cache;key=int16;records=0;count=0; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N7[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0[shape=box, label="N4(18):{\"i\":\"1,2\",\"o\":\"3,4\",\"t\":\"Aggregation\"}\nREMOVE:2"]; N2 -> N0[label="1"]; N4 -> N0[label="2"]; N2[shape=box, label="N2(9):{\"i\":\"1\",\"p\":{\"address\":{\"name\":\"x\",\"id\":1}},\"o\":\"1\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N3(9):{\"i\":\"2\",\"p\":{\"address\":{\"name\":\"y\",\"id\":2}},\"o\":\"2\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(27):{\"i\":\"1,3,4\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N2 -> N5[label="1"]; N0 -> N5[label="2"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"1,2\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N2->N4->N0->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N7[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0[shape=box, label="N4(18):{\"i\":\"1,2\",\"o\":\"3,4\",\"t\":\"Aggregation\"}\nREMOVE:2"]; N2 -> N0[label="1"]; N4 -> N0[label="2"]; N2[shape=box, label="N2(9):{\"i\":\"1\",\"p\":{\"address\":{\"name\":\"x\",\"id\":1}},\"o\":\"1\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N3(9):{\"i\":\"2\",\"p\":{\"address\":{\"name\":\"y\",\"id\":2}},\"o\":\"2\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(27):{\"i\":\"1,3,4\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N2 -> N5[label="1"]; N0 -> N5[label="2"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"1,2\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N2->N4->N0->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=216;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=216;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N7[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0[shape=box, label="N4(18):{\"i\":\"1,2\",\"o\":\"3,4\",\"t\":\"Aggregation\"}\nREMOVE:2"]; N2 -> N0[label="1"]; N4 -> N0[label="2"]; N2[shape=box, label="N2(9):{\"i\":\"1\",\"p\":{\"address\":{\"name\":\"x\",\"id\":1}},\"o\":\"1\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N3(9):{\"i\":\"2\",\"p\":{\"address\":{\"name\":\"y\",\"id\":2}},\"o\":\"2\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(27):{\"i\":\"1,3,4\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N2 -> N5[label="1"]; N0 -> N5[label="2"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"1,2\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N2->N4->N0->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=216;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=216;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N7[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0[shape=box, label="N4(18):{\"i\":\"1,2\",\"o\":\"3,4\",\"t\":\"Aggregation\"}\nREMOVE:2"]; N2 -> N0[label="1"]; N4 -> N0[label="2"]; N2[shape=box, label="N2(9):{\"i\":\"1\",\"p\":{\"address\":{\"name\":\"x\",\"id\":1}},\"o\":\"1\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N3(9):{\"i\":\"2\",\"p\":{\"address\":{\"name\":\"y\",\"id\":2}},\"o\":\"2\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(27):{\"i\":\"1,3,4\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N2 -> N5[label="1"]; N0 -> N5[label="2"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"1,2\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N2->N4->N0->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=216;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=216;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N7[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0[shape=box, label="N4(18):{\"i\":\"1,2\",\"o\":\"3,4\",\"t\":\"Aggregation\"}\nREMOVE:2"]; N2 -> N0[label="1"]; N4 -> N0[label="2"]; N2[shape=box, label="N2(9):{\"i\":\"1\",\"p\":{\"address\":{\"name\":\"x\",\"id\":1}},\"o\":\"1\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N3(9):{\"i\":\"2\",\"p\":{\"address\":{\"name\":\"y\",\"id\":2}},\"o\":\"2\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(27):{\"i\":\"1,3,4\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N2 -> N5[label="1"]; N0 -> N5[label="2"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"1,2\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N2->N4->N0->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N7[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0[shape=box, label="N4(18):{\"i\":\"1,2\",\"o\":\"3,4\",\"t\":\"Aggregation\"}\nREMOVE:2"]; N2 -> N0[label="1"]; N4 -> N0[label="2"]; N2[shape=box, label="N2(9):{\"i\":\"1\",\"p\":{\"address\":{\"name\":\"x\",\"id\":1}},\"o\":\"1\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N3(9):{\"i\":\"2\",\"p\":{\"address\":{\"name\":\"y\",\"id\":2}},\"o\":\"2\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(27):{\"i\":\"1,3,4\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N2 -> N5[label="1"]; N0 -> N5[label="2"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"1,2\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N2->N4->N0->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N7[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0[shape=box, label="N4(18):{\"i\":\"1,2\",\"o\":\"3,4\",\"t\":\"Aggregation\"}\nREMOVE:2"]; N2 -> N0[label="1"]; N4 -> N0[label="2"]; N2[shape=box, label="N2(9):{\"i\":\"1\",\"p\":{\"address\":{\"name\":\"x\",\"id\":1}},\"o\":\"1\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N3(9):{\"i\":\"2\",\"p\":{\"address\":{\"name\":\"y\",\"id\":2}},\"o\":\"2\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(27):{\"i\":\"1,3,4\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N2 -> N5[label="1"]; N0 -> N5[label="2"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"1,2\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N2->N4->N0->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest >> BSCRestartPDisk::RestartBrokenDiskInBrokenGroup [GOOD] Test command err: RandomSeed# 11498432963995645352 2025-05-07T08:46:28.602692Z 1 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:28.602853Z 2 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:28.602932Z 3 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:28.603039Z 4 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:28.603099Z 5 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:28.603163Z 6 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:28.603226Z 7 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:28.603289Z 8 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:28.604211Z 1 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:28.604290Z 2 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:28.604346Z 3 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:28.604416Z 4 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:28.604469Z 5 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:28.604519Z 6 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:28.604569Z 7 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:28.604617Z 8 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:28.604710Z 1 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:28.604766Z 6 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:28.604802Z 7 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:28.604840Z 8 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:28.604893Z 2 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:28.604938Z 3 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:28.604969Z 4 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:28.605001Z 5 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:28.606927Z 1 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:28.607007Z 6 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:28.607057Z 7 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:28.607117Z 8 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:28.607172Z 2 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:28.607230Z 3 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:28.607279Z 4 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:28.607324Z 5 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 >> SelfHealActorTest::SingleErrorDisk |88.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_testshard/ydb-core-blobstorage-ut_testshard |88.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_testshard/ydb-core-blobstorage-ut_testshard |88.1%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/ydb-core-blobstorage-ut_blobstorage-ut_stop_pdisk >> SelfHealActorTest::SingleErrorDisk [GOOD] |88.1%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_testshard/ydb-core-blobstorage-ut_testshard |88.1%| [AR] {RESULT} $(B)/ydb/core/tx/columnshard/libcore-tx-columnshard.a |88.1%| [TA] {RESULT} $(B)/ydb/core/control/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> VDiskBalancing::TestStopOneNode_Block42 >> test_transform.py::TestYamlConfigTransformations::test_basic[args1-dump] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest >> BSCRestartPDisk::RestartGoodDiskInBrokenGroupNotAllowed [GOOD] Test command err: RandomSeed# 4928592257149043986 2025-05-07T08:46:29.450625Z 1 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:29.450816Z 2 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:29.450921Z 3 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:29.450994Z 4 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:29.451056Z 5 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:29.451132Z 6 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:29.451211Z 7 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:29.452193Z 1 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:29.452281Z 2 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:29.452334Z 3 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:29.452383Z 4 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:29.452430Z 5 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:29.452475Z 6 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:29.452521Z 7 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:29.452609Z 1 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:29.452687Z 6 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:29.452737Z 7 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:29.452813Z 2 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:29.452854Z 3 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:29.452888Z 4 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:29.452922Z 5 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:29.459712Z 1 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:29.459816Z 6 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:29.459867Z 7 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:29.459944Z 2 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:29.460008Z 3 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:29.460090Z 4 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:29.460154Z 5 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 >> BsControllerTest::TestLocalSelfHeal >> test_transform.py::TestYamlConfigTransformations::test_basic[args1-dump_ds_init] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> BSCReadOnlyPDisk::SetGoodDiskInBrokenGroupReadOnlyNotAllowed [GOOD] Test command err: RandomSeed# 7341105299215542400 2025-05-07T08:46:29.225856Z 1 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:29.234235Z 2 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:29.234404Z 3 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:29.234491Z 4 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:29.234576Z 5 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:29.234654Z 6 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:29.234731Z 7 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:29.236018Z 1 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:29.236128Z 2 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:29.236192Z 3 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:29.236257Z 4 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:29.236322Z 5 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:29.236379Z 6 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:29.236436Z 7 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:29.236535Z 1 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:29.236592Z 6 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:29.236626Z 7 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:29.236699Z 2 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:29.236739Z 3 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:29.236773Z 4 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:29.236806Z 5 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:29.238818Z 1 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:29.238899Z 6 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:29.238972Z 7 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:29.239043Z 2 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:29.239099Z 3 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:29.239167Z 4 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:29.239220Z 5 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_selfheal/unittest >> SelfHealActorTest::SingleErrorDisk [GOOD] |88.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/tx/columnshard/libcore-tx-columnshard.a >> VDiskBalancing::TestStopOneNode_Mirror3dc >> TPageMapTest::TestRandom [GOOD] >> TPageMapTest::TestIntrusive [GOOD] >> TPageMapTest::TestSimplePointer [GOOD] >> TPageMapTest::TestSharedPointer [GOOD] >> TPageMapTest::TestSimplePointerFull >> VDiskBalancing::TestRandom_Block42 |88.1%| [TS] {RESULT} ydb/core/formats/arrow/ut/unittest >> TPageMapTest::TestSimplePointerFull [GOOD] >> TPriorityOperationQueueTest::ShouldNotStartUntilStart [GOOD] >> VDiskBalancing::TwoPartsOnOneNodeTest_Block42_HugeBlob >> test.py::test[solomon-BadDownsamplingFill-] [GOOD] >> test.py::test[solomon-BadDownsamplingInterval-] |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> BSCReadOnlyPDisk::SetBrokenDiskInBrokenGroupReadOnly [GOOD] >> Donor::ContinueWithFaultyDonor [GOOD] >> TQueryResultSizeTrackerTest::SerializeDeserializeMaxPtotobufSizePlusOne [GOOD] >> TBsHuge::SimpleErasureNone [GOOD] >> TBsLocalRecovery::ChaoticWriteRestart >> VDiskBalancing::TestStopOneNode_Mirror3dc_HugeBlob >> VDiskBalancing::TestRandom_Mirror3dc ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> BSCReadOnlyPDisk::SetBrokenDiskInBrokenGroupReadOnly [GOOD] Test command err: RandomSeed# 17732741884284581133 2025-05-07T08:46:31.187035Z 1 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:31.187220Z 2 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:31.187302Z 3 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:31.187401Z 4 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:31.187474Z 5 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:31.187539Z 6 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:31.187620Z 7 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:31.187701Z 8 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:31.188706Z 1 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:31.188793Z 2 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:31.188846Z 3 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:31.188902Z 4 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:31.188956Z 5 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:31.189018Z 6 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:31.189071Z 7 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:31.189118Z 8 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:31.189195Z 1 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:31.189283Z 6 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:31.189341Z 7 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:31.189384Z 8 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:31.189430Z 2 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:31.189484Z 3 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:31.189533Z 4 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:31.189571Z 5 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-05-07T08:46:31.199810Z 1 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:31.199913Z 6 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:31.199962Z 7 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:31.200015Z 8 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:31.200063Z 2 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:31.200114Z 3 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:31.200163Z 4 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:31.200210Z 5 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-05-07T08:46:31.729674Z 1 00h01m30.011024s :BS_LOCALRECOVERY CRIT: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "Some error reason" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR >> VDiskBalancing::TestStopOneNode_Block42_HugeBlob |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_selfheal/unittest |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/query/ut/unittest >> TQueryResultSizeTrackerTest::SerializeDeserializeMaxPtotobufSizePlusOne [GOOD] >> VDiskBalancing::TwoPartsOnOneNodeTest_Block42 >> Donor::SkipBadDonor [GOOD] >> BsControllerTest::TestLocalBrokenRelocation ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest >> Donor::ContinueWithFaultyDonor [GOOD] Test command err: RandomSeed# 1672263296665848776 2025-05-07T08:46:30.885304Z 1 00h01m14.361024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-05-07T08:46:30.887352Z 1 00h01m14.361024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 16031378204040996573] 2025-05-07T08:46:30.925388Z 1 00h01m14.361024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/util/ut/unittest >> TPriorityOperationQueueTest::ShouldNotStartUntilStart [GOOD] >> Splitter::CritSmallPortions [GOOD] >> Splitter::Crit |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest >> Donor::SkipBadDonor [GOOD] Test command err: RandomSeed# 17619720906566091890 2025-05-07T08:46:31.816655Z 1 00h01m14.361024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-05-07T08:46:31.822687Z 1 00h01m14.361024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 6748263096675760473] 2025-05-07T08:46:31.875866Z 1 00h01m14.361024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 >> TBsOther2::ChaoticParallelWrite_SkeletonFrontQueuesOverload [GOOD] |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest >> VDiskBalancing::TestStopOneNode_Block42 [GOOD] |88.1%| [TA] $(B)/ydb/core/blobstorage/vdisk/query/ut/test-results/unittest/{meta.json ... results_accumulator.log} |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest >> VDiskBalancing::TwoPartsOnOneNodeTest_Block42_HugeBlob [GOOD] >> test_transform.py::TestYamlConfigTransformations::test_basic[args1-dump_ds_init] [GOOD] |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest >> test_transform.py::TestYamlConfigTransformations::test_simplified[dump] |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsOther2::ChaoticParallelWrite_SkeletonFrontQueuesOverload [GOOD] >> Donor::SlayAfterWiping [GOOD] >> BsControllerTest::SelfHealBlock4Plus2 |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TestStopOneNode_Block42 [GOOD] Test command err: RandomSeed# 3373723585116168064 SEND TEvPut with key [1:1:1:0:0:100:0] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:100:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:2:0:0:100:0] 2025-05-07T08:46:32.846104Z 3 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [3:188:17] ServerId# [1:296:58] TabletId# 72057594037932033 PipeClientId# [3:188:17] 2025-05-07T08:46:32.846378Z 8 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [8:223:17] ServerId# [1:301:63] TabletId# 72057594037932033 PipeClientId# [8:223:17] 2025-05-07T08:46:32.846510Z 6 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [6:209:17] ServerId# [1:299:61] TabletId# 72057594037932033 PipeClientId# [6:209:17] 2025-05-07T08:46:32.846602Z 5 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [5:202:17] ServerId# [1:298:60] TabletId# 72057594037932033 PipeClientId# [5:202:17] 2025-05-07T08:46:32.846696Z 4 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [4:195:17] ServerId# [1:297:59] TabletId# 72057594037932033 PipeClientId# [4:195:17] 2025-05-07T08:46:32.846838Z 2 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [2:181:17] ServerId# [1:295:57] TabletId# 72057594037932033 PipeClientId# [2:181:17] 2025-05-07T08:46:32.846998Z 7 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [7:216:17] ServerId# [1:300:62] TabletId# 72057594037932033 PipeClientId# [7:216:17] TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:100:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Start compaction Finish compaction ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TwoPartsOnOneNodeTest_Block42_HugeBlob [GOOD] Test command err: RandomSeed# 4407126669419482421 SEND TEvPut with key [1:1:1:0:0:3201024:0] 2025-05-07T08:46:34.013848Z 1 00h01m00.010512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 6 2025-05-07T08:46:34.014379Z 1 00h01m00.010512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 5 TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:3201024:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Node 0: 4 Node 1: 5 Node 2: 6 Node 3: 1 Node 4: Node 5: Node 6: 2 Node 7: 3 2025-05-07T08:46:34.111764Z 1 00h01m00.011024s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 7 Node 0: 4 Node 1: 5 Node 2: 6 Node 3: 1 2 Node 4: Node 5: 1 Node 6: Node 7: 3 Start compaction 1 Finish compaction 1 >> BsControllerTest::TestLocalSelfHeal [GOOD] >> VDiskBalancing::TestStopOneNode_Mirror3dc [GOOD] >> VDiskBalancing::TwoPartsOnOneNodeTest_Block42 [GOOD] >> BSCStopPDisk::PDiskStop >> TPDiskRaces::DecommitWithInflight [GOOD] >> TPDiskRaces::DecommitWithInflightMock ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest >> Donor::SlayAfterWiping [GOOD] Test command err: RandomSeed# 14427462778458637925 2025-05-07T08:46:33.081473Z 1 00h01m14.361024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-05-07T08:46:33.083267Z 1 00h01m14.361024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 15072467615346047742] 2025-05-07T08:46:33.102560Z 1 00h01m14.361024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 >> BsControllerTest::SelfHealMirror3dc |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest |88.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/ymq/ut/ydb-core-ymq-ut |88.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/ymq/ut/ydb-core-ymq-ut >> VDiskBalancing::TestDontSendToReadOnlyTest_Block42 |88.1%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/query/ut/test-results/unittest/{meta.json ... results_accumulator.log} |88.1%| [LD] {RESULT} $(B)/ydb/core/ymq/ut/ydb-core-ymq-ut >> BsControllerTest::DecommitRejected >> VDiskBalancing::TestStopOneNode_Block42_HugeBlob [GOOD] |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TwoPartsOnOneNodeTest_Block42 [GOOD] Test command err: RandomSeed# 17373994193122079306 SEND TEvPut with key [1:1:1:0:0:100:0] 2025-05-07T08:46:34.980785Z 1 00h01m00.010512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 6 2025-05-07T08:46:34.981285Z 1 00h01m00.010512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 5 TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:100:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Node 0: 4 Node 1: 5 Node 2: 6 Node 3: 1 Node 4: Node 5: Node 6: 2 Node 7: 3 2025-05-07T08:46:35.059759Z 1 00h01m00.011024s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 7 Node 0: 4 Node 1: 5 Node 2: 6 Node 3: 1 2 Node 4: Node 5: 1 Node 6: Node 7: 3 Start compaction 1 Finish compaction 1 |88.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot/ydb-core-blobstorage-ut_blobstorage-ut_blob_depot |88.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot/ydb-core-blobstorage-ut_blobstorage-ut_blob_depot |88.1%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot/ydb-core-blobstorage-ut_blobstorage-ut_blob_depot ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TestStopOneNode_Mirror3dc [GOOD] Test command err: RandomSeed# 9482855339378472493 SEND TEvPut with key [1:1:1:0:0:100:0] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:100:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:2:0:0:100:0] 2025-05-07T08:46:33.855755Z 1 00h01m00.010512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 2 TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:100:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Start compaction Finish compaction >> BSCStopPDisk::PDiskStop [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_selfheal/unittest >> BsControllerTest::TestLocalSelfHeal [GOOD] Test command err: 2025-05-07T08:46:31.334525Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] Bootstrap 2025-05-07T08:46:31.334580Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] Connect 2025-05-07T08:46:31.334667Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] Bootstrap 2025-05-07T08:46:31.334691Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] Connect 2025-05-07T08:46:31.334739Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] Bootstrap 2025-05-07T08:46:31.334777Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] Connect 2025-05-07T08:46:31.334816Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] Bootstrap 2025-05-07T08:46:31.334844Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] Connect 2025-05-07T08:46:31.334878Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] Bootstrap 2025-05-07T08:46:31.334897Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] Connect 2025-05-07T08:46:31.334927Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] Bootstrap 2025-05-07T08:46:31.334947Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] Connect 2025-05-07T08:46:31.334992Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] Bootstrap 2025-05-07T08:46:31.335012Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] Connect 2025-05-07T08:46:31.335043Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] Bootstrap 2025-05-07T08:46:31.335063Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] Connect 2025-05-07T08:46:31.335092Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] Bootstrap 2025-05-07T08:46:31.335112Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] Connect 2025-05-07T08:46:31.335149Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] Bootstrap 2025-05-07T08:46:31.335173Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] Connect 2025-05-07T08:46:31.335215Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] Bootstrap 2025-05-07T08:46:31.335244Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] Connect 2025-05-07T08:46:31.335291Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] Bootstrap 2025-05-07T08:46:31.335311Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] Connect 2025-05-07T08:46:31.335360Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] Bootstrap 2025-05-07T08:46:31.335384Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] Connect 2025-05-07T08:46:31.335416Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] Bootstrap 2025-05-07T08:46:31.335436Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] Connect 2025-05-07T08:46:31.335468Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] Bootstrap 2025-05-07T08:46:31.335488Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] Connect 2025-05-07T08:46:31.335524Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] Bootstrap 2025-05-07T08:46:31.335543Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] Connect 2025-05-07T08:46:31.335572Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] Bootstrap 2025-05-07T08:46:31.335591Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] Connect 2025-05-07T08:46:31.335621Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] Bootstrap 2025-05-07T08:46:31.335642Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] Connect 2025-05-07T08:46:31.335688Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] Bootstrap 2025-05-07T08:46:31.335717Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] Connect 2025-05-07T08:46:31.335771Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] Bootstrap 2025-05-07T08:46:31.335792Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] Connect 2025-05-07T08:46:31.335823Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] Bootstrap 2025-05-07T08:46:31.335845Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] Connect 2025-05-07T08:46:31.335875Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] Bootstrap 2025-05-07T08:46:31.335895Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] Connect 2025-05-07T08:46:31.335942Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] Bootstrap 2025-05-07T08:46:31.335975Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] Connect 2025-05-07T08:46:31.336009Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] Bootstrap 2025-05-07T08:46:31.336028Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] Connect 2025-05-07T08:46:31.336059Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] Bootstrap 2025-05-07T08:46:31.336078Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] Connect 2025-05-07T08:46:31.336113Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] Bootstrap 2025-05-07T08:46:31.336133Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] Connect 2025-05-07T08:46:31.336178Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] Bootstrap 2025-05-07T08:46:31.336207Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] Connect 2025-05-07T08:46:31.336248Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] Bootstrap 2025-05-07T08:46:31.336269Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] Connect 2025-05-07T08:46:31.336301Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] Bootstrap 2025-05-07T08:46:31.336321Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] Connect 2025-05-07T08:46:31.336354Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] Bootstrap 2025-05-07T08:46:31.336373Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] Connect 2025-05-07T08:46:31.336402Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] Bootstrap 2025-05-07T08:46:31.336422Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] Connect 2025-05-07T08:46:31.336455Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] Bootstrap 2025-05-07T08:46:31.336473Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] Connect 2025-05-07T08:46:31.336518Z 33 00h00m00.000000s :BS_NODE DEBUG: [33] Bootstrap 2025-05-07T08:46:31.336540Z 33 00h00m00.000000s :BS_NODE DEBUG: [33] Connect 2025-05-07T08:46:31.336580Z 34 00h00m00.000000s :BS_NODE DEBUG: [34] Bootstrap 2025-05-07T08:46:31.336605Z 34 00h00m00.000000s :BS_NODE DEBUG: [34] Connect 2025-05-07T08:46:31.336651Z 35 00h00m00.000000s :BS_NODE DEBUG: [35] Bootstrap 2025-05-07T08:46:31.336683Z 35 00h00m00.000000s :BS_NODE DEBUG: [35] Connect 2025-05-07T08:46:31.336721Z 36 00h00m00.000000s :BS_NODE DEBUG: [36] Bootstrap 2025-05-07T08:46:31.336741Z 36 00h00m00.000000s :BS_NODE DEBUG: [36] Connect 2025-05-07T08:46:31.372665Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] ClientConnected Sender# [1:2713:53] Status# ERROR ClientId# [1:2713:53] ServerId# [0:0:0] PipeClient# [1:2713:53] 2025-05-07T08:46:31.373947Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] ClientConnected Sender# [2:2714:41] Status# ERROR ClientId# [2:2714:41] ServerId# [0:0:0] PipeClient# [2:2714:41] 2025-05-07T08:46:31.374023Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] ClientConnected Sender# [3:2715:41] Status# ERROR ClientId# [3:2715:41] ServerId# [0:0:0] PipeClient# [3:2715:41] 2025-05-07T08:46:31.374062Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] ClientConnected Sender# [4:2716:41] Status# ERROR ClientId# [4:2716:41] ServerId# [0:0:0] PipeClient# [4:2716:41] 2025-05-07T08:46:31.374111Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] ClientConnected Sender# [5:2717:41] Status# ERROR ClientId# [5:2717:41] ServerId# [0:0:0] PipeClient# [5:2717:41] 2025-05-07T08:46:31.374147Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] ClientConnected Sender# [6:2718:41] Status# ERROR ClientId# [6:2718:41] ServerId# [0:0:0] PipeClient# [6:2718:41] 2025-05-07T08:46:31.374188Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] ClientConnected Sender# [7:2719:41] Status# ERROR ClientId# [7:2719:41] ServerId# [0:0:0] PipeClient# [7:2719:41] 2025-05-07T08:46:31.374235Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] ClientConnected Sender# [8:2720:41] Status# ERROR ClientId# [8:2720:41] ServerId# [0:0:0] PipeClient# [8:2720:41] 2025-05-07T08:46:31.374272Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] ClientConnected Sender# [9:2721:41] Status# ERROR ClientId# [9:2721:41] ServerId# [0:0:0] PipeClient# [9:2721:41] 2025-05-07T08:46:31.374312Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] ClientConnected Sender# [10:2722:41] Status# ERROR ClientId# [10:2722:41] ServerId# [0:0:0] PipeClient# [10:2722:41] 2025-05-07T08:46:31.374360Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] ClientConnected Sender# [11:2723:41] Status# ERROR ClientId# [11:2723:41] ServerId# [0:0:0] PipeClient# [11:2723:41] 2025-05-07T08:46:31.374414Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] ClientConnected Sender# [12:2724:41] Status# ERROR ClientId# [12:2724:41] ServerId# [0:0:0] PipeClient# [12:2724:41] 2025-05-07T08:46:31.374449Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] ClientConnected Sender# [13:2725:41] Status# ERROR ClientId# [13:2725:41] ServerId# [0:0:0] PipeClient# [13:2725:41] 2025-05-07T08:46:31.374499Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] ClientConnected Sender# [14:2726:41] Status# ERROR ClientId# [14:2726:41] ServerId# [0:0:0] PipeClient# [14:2726:41] 2025-05-07T08:46:31.374539Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] ClientConnected Sender# [15:2727:41] Status# ERROR ClientId# [15:2727:41] ServerId# [0:0:0] PipeClient# [15:2727:41] 2025-05-07T08:46:31.374578Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] ClientConnected Sender# [16:2728:41] Status# ERROR ClientId# [16:2728:41] ServerId# [0:0:0] PipeClient# [16:2728:41] 2025-05-07T08:46:31.374613Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] ClientConnected Sender# [17:2729:41] Status# ERROR ClientId# [17:2729:41] ServerId# [0:0:0] PipeClient# [17:2729:41] 2025-05-07T08:46:31.374649Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] ClientConnected Sender# [18:2730:41] Status# ERROR ClientId# [18:2730:41] ServerId# [0:0:0] PipeClient# [18:2730:41] 2025-05-07T08:46:31.374684Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] ClientConnected Sender# [19:2731:41] Status# ERROR ClientId# [19:2731:41] ServerId# [0:0:0] PipeClient# [19:2731:41] 2025-05-07T08:46:31.374724Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] ClientConnected Sender# [20:2732:41] Status# ERROR ClientId# [20:2732:41] ServerId# [0:0:0] PipeClient# [20:2732:41] 2025-05-07T08:46:31.374785Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] ClientConnected Sender# [21:2733:41] Status# ERROR ClientId# [21:2733:41] ServerId# [0:0:0] PipeClient# [21:2733:41] 2025-05-07T08:46:31.374824Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] ClientConnected Sender# [22:2734:41] Status# ERROR ClientId# [22:2734:41] ServerId# [0:0:0] PipeClient# [22:2734:41] 2025-05-07T08:46:31.374864Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] ClientConnected Sender# [23:2735:41] Status# ERROR ClientId# [23:2735:41] ServerId# [0:0:0] PipeClient# [23:2735:41] 2025-05-07T08:46:31.374918Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] ClientConnected Sender# [24:2736:41] Status# ERROR ClientId# [24:2736:41] ServerId# [0:0:0] PipeClient# [24:2736:41] 2025-05-07T08:46:31.374954Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] ClientConnected Sender# [25:2737:41] Status# ERROR ClientId# [25:2737:41] ServerId# [0:0:0] PipeClient# [25:2737:41] 2025-05-07T08:46:31.374992Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] ClientConnected Sender# [26:2738:41] Status# ERROR ClientId# [26:2738:41] ServerId# [0:0:0] PipeClient# [26:2738:41] 2025-05-07T08:46:31.375028Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] ClientConnected Sender# [27:2739:41] Status# ERROR ClientId# [27:2739:41] ServerId# [0:0:0] PipeClient# [27:2739:41] 2025-05-07T08:46:31.375063Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] ClientConnected Sender# [28:2740:41] Status# ERROR ClientId# [28:2740:41] ServerId# [0:0:0] PipeClient# [28:2740:41] 2025-05-07T08:46:31.375102Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] ClientConnected Sender# [29:2741:41] Status# ERROR ClientId# [29:2741:41] ServerId# [0:0:0] PipeClient# [29:2741:41] 2025-05-07T08:46:31.375138Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] ClientConnected Sender# [30:2742:41] Status# ERROR ClientId# [30:2742:41] ServerId# [0:0:0] PipeClient# [30:2742:41] 2025-05-07T08:46:31.375175Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] ClientConnected Sender# [31:2743:41] Status# ERROR ClientId# [31:2743:41] ServerId# [0:0:0] PipeClient# [31:2743:41] 2025-05-07T08:46:31.375211Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] ClientConnected Sender# [32:2744:41] Status# ERROR ClientId# [32:2744:41] ServerId# [0:0:0] PipeClient# [32:2744:41] 2025-05-07T08:46:31.375252Z 33 00h00m00.000000s :BS_NODE DEBUG: [33] ClientConnected Sender# [33:2745:41] Status# ERROR ClientId# [33:2745:41] ServerId# [0:0:0] PipeClient# [33:2745:41] 2025-05-07T08:46:31.375291Z 34 00h00m00.000000s :BS_NODE DEBUG: [34] ClientConnected Sender# [34:2746:41] Status# ERROR ClientId# [34:2746:41] ServerId# [0:0:0] PipeClient# [34:2746:41] 2025-05-07T08:46:31.375338Z 35 00h00m00.000000s :BS_NODE DEBUG: [35] ClientConnected Sender# [35:2747:41] Status# ERROR ClientId# [35:2747:41 ... true Replicated# true 2025-05-07T08:46:35.198476Z 1 00h05m00.104096s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483666 VDiskId# [80000012:1:2:1:0] DiskIsOk# true 2025-05-07T08:46:35.198513Z 1 00h05m00.104096s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483666 Status# OK JoinedGroup# true Replicated# true 2025-05-07T08:46:35.198563Z 1 00h05m00.104096s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483666 VDiskId# [80000012:1:2:2:0] DiskIsOk# true 2025-05-07T08:46:35.211992Z 1 00h05m00.104608s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-05-07T08:46:35.212085Z 1 00h05m00.104608s :BS_NODE DEBUG: [1] VDiskId# [80000012:1:0:2:0] -> [80000012:2:0:2:0] 2025-05-07T08:46:35.212673Z 1 00h05m00.104608s :BS_SELFHEAL INFO: {BSSH09@self_heal.cpp:207} Reassigner succeeded GroupId# 2147483666 Items# [80000012:1:1:1:0]: 22:1001:1001 -> 22:1000:1010 ConfigTxSeqNo# 48 2025-05-07T08:46:35.212720Z 1 00h05m00.104608s :BS_SELFHEAL DEBUG: {BSSH08@self_heal.cpp:218} Reassigner finished GroupId# 2147483666 Success# true 2025-05-07T08:46:35.212900Z 34 00h05m00.104608s :BS_NODE DEBUG: [34] NodeServiceSetUpdate 2025-05-07T08:46:35.212963Z 34 00h05m00.104608s :BS_NODE DEBUG: [34] VDiskId# [80000012:1:2:1:0] -> [80000012:2:2:1:0] 2025-05-07T08:46:35.213080Z 19 00h05m00.104608s :BS_NODE DEBUG: [19] NodeServiceSetUpdate 2025-05-07T08:46:35.213127Z 19 00h05m00.104608s :BS_NODE DEBUG: [19] VDiskId# [80000012:1:1:0:0] -> [80000012:2:1:0:0] 2025-05-07T08:46:35.213237Z 22 00h05m00.104608s :BS_NODE DEBUG: [22] NodeServiceSetUpdate 2025-05-07T08:46:35.213283Z 22 00h05m00.104608s :BS_NODE DEBUG: [22] VDiskId# [80000012:2:1:1:0] PDiskId# 1000 VSlotId# 1010 created 2025-05-07T08:46:35.213354Z 22 00h05m00.104608s :BS_NODE DEBUG: [22] VDiskId# [80000012:2:1:1:0] status changed to INIT_PENDING 2025-05-07T08:46:35.213458Z 7 00h05m00.104608s :BS_NODE DEBUG: [7] NodeServiceSetUpdate 2025-05-07T08:46:35.213508Z 7 00h05m00.104608s :BS_NODE DEBUG: [7] VDiskId# [80000012:1:0:0:0] -> [80000012:2:0:0:0] 2025-05-07T08:46:35.213606Z 25 00h05m00.104608s :BS_NODE DEBUG: [25] NodeServiceSetUpdate 2025-05-07T08:46:35.213652Z 25 00h05m00.104608s :BS_NODE DEBUG: [25] VDiskId# [80000012:1:2:2:0] -> [80000012:2:2:2:0] 2025-05-07T08:46:35.213732Z 10 00h05m00.104608s :BS_NODE DEBUG: [10] NodeServiceSetUpdate 2025-05-07T08:46:35.213775Z 10 00h05m00.104608s :BS_NODE DEBUG: [10] VDiskId# [80000012:1:0:1:0] -> [80000012:2:0:1:0] 2025-05-07T08:46:35.213872Z 13 00h05m00.104608s :BS_NODE DEBUG: [13] NodeServiceSetUpdate 2025-05-07T08:46:35.213926Z 13 00h05m00.104608s :BS_NODE DEBUG: [13] VDiskId# [80000012:1:1:2:0] -> [80000012:2:1:2:0] 2025-05-07T08:46:35.222166Z 31 00h05m00.104608s :BS_NODE DEBUG: [31] NodeServiceSetUpdate 2025-05-07T08:46:35.222277Z 31 00h05m00.104608s :BS_NODE DEBUG: [31] VDiskId# [80000012:1:2:0:0] -> [80000012:2:2:0:0] 2025-05-07T08:46:35.222683Z 1 00h05m00.104608s :BS_SELFHEAL DEBUG: {BSSH01@self_heal.cpp:71} Reassigner starting GroupId# 2147483650 2025-05-07T08:46:35.223347Z 1 00h05m00.104608s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483650 Status# OK JoinedGroup# true Replicated# true 2025-05-07T08:46:35.223397Z 1 00h05m00.104608s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483650 VDiskId# [80000002:1:0:2:0] DiskIsOk# true 2025-05-07T08:46:35.223710Z 1 00h05m00.104608s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483650 Status# OK JoinedGroup# true Replicated# true 2025-05-07T08:46:35.223741Z 1 00h05m00.104608s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483650 VDiskId# [80000002:1:0:0:0] DiskIsOk# true 2025-05-07T08:46:35.223770Z 1 00h05m00.104608s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483650 Status# OK JoinedGroup# true Replicated# true 2025-05-07T08:46:35.223797Z 1 00h05m00.104608s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483650 VDiskId# [80000002:1:0:1:0] DiskIsOk# true 2025-05-07T08:46:35.223824Z 1 00h05m00.104608s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483650 Status# OK JoinedGroup# true Replicated# true 2025-05-07T08:46:35.223861Z 1 00h05m00.104608s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483650 VDiskId# [80000002:1:1:0:0] DiskIsOk# true 2025-05-07T08:46:35.223891Z 1 00h05m00.104608s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483650 Status# OK JoinedGroup# true Replicated# true 2025-05-07T08:46:35.223918Z 1 00h05m00.104608s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483650 VDiskId# [80000002:1:1:2:0] DiskIsOk# true 2025-05-07T08:46:35.223959Z 1 00h05m00.104608s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483650 Status# OK JoinedGroup# true Replicated# true 2025-05-07T08:46:35.223998Z 1 00h05m00.104608s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483650 VDiskId# [80000002:1:2:0:0] DiskIsOk# true 2025-05-07T08:46:35.224031Z 1 00h05m00.104608s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483650 Status# OK JoinedGroup# true Replicated# true 2025-05-07T08:46:35.224056Z 1 00h05m00.104608s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483650 VDiskId# [80000002:1:2:1:0] DiskIsOk# true 2025-05-07T08:46:35.224087Z 1 00h05m00.104608s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483650 Status# OK JoinedGroup# true Replicated# true 2025-05-07T08:46:35.224128Z 1 00h05m00.104608s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483650 VDiskId# [80000002:1:2:2:0] DiskIsOk# true 2025-05-07T08:46:35.237327Z 1 00h05m00.105120s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-05-07T08:46:35.237411Z 1 00h05m00.105120s :BS_NODE DEBUG: [1] VDiskId# [80000002:1:0:2:0] -> [80000002:2:0:2:0] 2025-05-07T08:46:35.246048Z 1 00h05m00.105120s :BS_SELFHEAL INFO: {BSSH09@self_heal.cpp:207} Reassigner succeeded GroupId# 2147483650 Items# [80000002:1:1:1:0]: 22:1001:1000 -> 22:1002:1010 ConfigTxSeqNo# 49 2025-05-07T08:46:35.246138Z 1 00h05m00.105120s :BS_SELFHEAL DEBUG: {BSSH08@self_heal.cpp:218} Reassigner finished GroupId# 2147483650 Success# true 2025-05-07T08:46:35.246381Z 34 00h05m00.105120s :BS_NODE DEBUG: [34] NodeServiceSetUpdate 2025-05-07T08:46:35.246468Z 34 00h05m00.105120s :BS_NODE DEBUG: [34] VDiskId# [80000002:1:2:1:0] -> [80000002:2:2:1:0] 2025-05-07T08:46:35.246625Z 19 00h05m00.105120s :BS_NODE DEBUG: [19] NodeServiceSetUpdate 2025-05-07T08:46:35.246673Z 19 00h05m00.105120s :BS_NODE DEBUG: [19] VDiskId# [80000002:1:1:0:0] -> [80000002:2:1:0:0] 2025-05-07T08:46:35.246800Z 22 00h05m00.105120s :BS_NODE DEBUG: [22] NodeServiceSetUpdate 2025-05-07T08:46:35.246845Z 22 00h05m00.105120s :BS_NODE DEBUG: [22] VDiskId# [80000002:2:1:1:0] PDiskId# 1002 VSlotId# 1010 created 2025-05-07T08:46:35.246933Z 22 00h05m00.105120s :BS_NODE DEBUG: [22] VDiskId# [80000002:2:1:1:0] status changed to INIT_PENDING 2025-05-07T08:46:35.247037Z 7 00h05m00.105120s :BS_NODE DEBUG: [7] NodeServiceSetUpdate 2025-05-07T08:46:35.247094Z 7 00h05m00.105120s :BS_NODE DEBUG: [7] VDiskId# [80000002:1:0:0:0] -> [80000002:2:0:0:0] 2025-05-07T08:46:35.247194Z 25 00h05m00.105120s :BS_NODE DEBUG: [25] NodeServiceSetUpdate 2025-05-07T08:46:35.247241Z 25 00h05m00.105120s :BS_NODE DEBUG: [25] VDiskId# [80000002:1:2:2:0] -> [80000002:2:2:2:0] 2025-05-07T08:46:35.247324Z 10 00h05m00.105120s :BS_NODE DEBUG: [10] NodeServiceSetUpdate 2025-05-07T08:46:35.247365Z 10 00h05m00.105120s :BS_NODE DEBUG: [10] VDiskId# [80000002:1:0:1:0] -> [80000002:2:0:1:0] 2025-05-07T08:46:35.247467Z 13 00h05m00.105120s :BS_NODE DEBUG: [13] NodeServiceSetUpdate 2025-05-07T08:46:35.247518Z 13 00h05m00.105120s :BS_NODE DEBUG: [13] VDiskId# [80000002:1:1:2:0] -> [80000002:2:1:2:0] 2025-05-07T08:46:35.247602Z 31 00h05m00.105120s :BS_NODE DEBUG: [31] NodeServiceSetUpdate 2025-05-07T08:46:35.247647Z 31 00h05m00.105120s :BS_NODE DEBUG: [31] VDiskId# [80000002:1:2:0:0] -> [80000002:2:2:0:0] 2025-05-07T08:46:35.248902Z 22 00h05m02.411048s :BS_NODE DEBUG: [22] VDiskId# [80000062:2:1:1:0] status changed to REPLICATING 2025-05-07T08:46:35.249769Z 22 00h05m02.735536s :BS_NODE DEBUG: [22] VDiskId# [80000072:2:1:1:0] status changed to REPLICATING 2025-05-07T08:46:35.250725Z 22 00h05m02.957560s :BS_NODE DEBUG: [22] VDiskId# [80000052:2:1:1:0] status changed to REPLICATING 2025-05-07T08:46:35.251705Z 22 00h05m03.329608s :BS_NODE DEBUG: [22] VDiskId# [80000012:2:1:1:0] status changed to REPLICATING 2025-05-07T08:46:35.252564Z 22 00h05m03.482584s :BS_NODE DEBUG: [22] VDiskId# [80000032:2:1:1:0] status changed to REPLICATING 2025-05-07T08:46:35.253421Z 22 00h05m04.020120s :BS_NODE DEBUG: [22] VDiskId# [80000002:2:1:1:0] status changed to REPLICATING 2025-05-07T08:46:35.262638Z 22 00h05m04.293096s :BS_NODE DEBUG: [22] VDiskId# [80000022:2:1:1:0] status changed to REPLICATING 2025-05-07T08:46:35.264475Z 22 00h05m05.403072s :BS_NODE DEBUG: [22] VDiskId# [80000042:2:1:1:0] status changed to REPLICATING 2025-05-07T08:46:35.265394Z 22 00h05m09.244584s :BS_NODE DEBUG: [22] VDiskId# [80000032:2:1:1:0] status changed to READY 2025-05-07T08:46:35.275418Z 22 00h05m09.245096s :BS_NODE DEBUG: [22] NodeServiceSetUpdate 2025-05-07T08:46:35.275513Z 22 00h05m09.245096s :BS_NODE DEBUG: [22] VDiskId# [80000032:1:1:1:0] destroyed 2025-05-07T08:46:35.276783Z 22 00h05m20.982536s :BS_NODE DEBUG: [22] VDiskId# [80000072:2:1:1:0] status changed to READY 2025-05-07T08:46:35.286849Z 22 00h05m20.983048s :BS_NODE DEBUG: [22] NodeServiceSetUpdate 2025-05-07T08:46:35.286923Z 22 00h05m20.983048s :BS_NODE DEBUG: [22] VDiskId# [80000072:1:1:1:0] destroyed 2025-05-07T08:46:35.287167Z 22 00h05m23.102608s :BS_NODE DEBUG: [22] VDiskId# [80000012:2:1:1:0] status changed to READY 2025-05-07T08:46:35.288835Z 22 00h05m23.103120s :BS_NODE DEBUG: [22] NodeServiceSetUpdate 2025-05-07T08:46:35.288894Z 22 00h05m23.103120s :BS_NODE DEBUG: [22] VDiskId# [80000012:1:1:1:0] destroyed 2025-05-07T08:46:35.289104Z 22 00h05m23.893072s :BS_NODE DEBUG: [22] VDiskId# [80000042:2:1:1:0] status changed to READY 2025-05-07T08:46:35.290752Z 22 00h05m23.893584s :BS_NODE DEBUG: [22] NodeServiceSetUpdate 2025-05-07T08:46:35.290842Z 22 00h05m23.893584s :BS_NODE DEBUG: [22] VDiskId# [80000042:1:1:1:0] destroyed 2025-05-07T08:46:35.291707Z 22 00h05m25.906560s :BS_NODE DEBUG: [22] VDiskId# [80000052:2:1:1:0] status changed to READY 2025-05-07T08:46:35.293407Z 22 00h05m25.907072s :BS_NODE DEBUG: [22] NodeServiceSetUpdate 2025-05-07T08:46:35.293461Z 22 00h05m25.907072s :BS_NODE DEBUG: [22] VDiskId# [80000052:1:1:1:0] destroyed 2025-05-07T08:46:35.294436Z 22 00h05m32.692120s :BS_NODE DEBUG: [22] VDiskId# [80000002:2:1:1:0] status changed to READY 2025-05-07T08:46:35.296029Z 22 00h05m32.692632s :BS_NODE DEBUG: [22] NodeServiceSetUpdate 2025-05-07T08:46:35.296079Z 22 00h05m32.692632s :BS_NODE DEBUG: [22] VDiskId# [80000002:1:1:1:0] destroyed 2025-05-07T08:46:35.296442Z 22 00h05m35.731048s :BS_NODE DEBUG: [22] VDiskId# [80000062:2:1:1:0] status changed to READY 2025-05-07T08:46:35.298023Z 22 00h05m35.731560s :BS_NODE DEBUG: [22] NodeServiceSetUpdate 2025-05-07T08:46:35.298072Z 22 00h05m35.731560s :BS_NODE DEBUG: [22] VDiskId# [80000062:1:1:1:0] destroyed 2025-05-07T08:46:35.298491Z 22 00h05m37.128096s :BS_NODE DEBUG: [22] VDiskId# [80000022:2:1:1:0] status changed to READY 2025-05-07T08:46:35.300092Z 22 00h05m37.128608s :BS_NODE DEBUG: [22] NodeServiceSetUpdate 2025-05-07T08:46:35.300137Z 22 00h05m37.128608s :BS_NODE DEBUG: [22] VDiskId# [80000022:1:1:1:0] destroyed >> SelfHealActorTest::NoMoreThanOneReplicating [GOOD] >> BsControllerTest::DecommitRejected [GOOD] |88.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TestStopOneNode_Block42_HugeBlob [GOOD] Test command err: RandomSeed# 15312022641802697333 SEND TEvPut with key [1:1:1:0:0:3201024:0] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:3201024:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:2:0:0:3201024:0] 2025-05-07T08:46:35.249819Z 3 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [3:188:17] ServerId# [1:296:58] TabletId# 72057594037932033 PipeClientId# [3:188:17] 2025-05-07T08:46:35.250084Z 8 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [8:223:17] ServerId# [1:301:63] TabletId# 72057594037932033 PipeClientId# [8:223:17] 2025-05-07T08:46:35.250226Z 6 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [6:209:17] ServerId# [1:299:61] TabletId# 72057594037932033 PipeClientId# [6:209:17] 2025-05-07T08:46:35.250353Z 5 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [5:202:17] ServerId# [1:298:60] TabletId# 72057594037932033 PipeClientId# [5:202:17] 2025-05-07T08:46:35.250502Z 4 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [4:195:17] ServerId# [1:297:59] TabletId# 72057594037932033 PipeClientId# [4:195:17] 2025-05-07T08:46:35.250632Z 2 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [2:181:17] ServerId# [1:295:57] TabletId# 72057594037932033 PipeClientId# [2:181:17] 2025-05-07T08:46:35.250752Z 7 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [7:216:17] ServerId# [1:300:62] TabletId# 72057594037932033 PipeClientId# [7:216:17] TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:3201024:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Start compaction Finish compaction >> Mirror3of4::ReplicationSmall [GOOD] >> Mirror3of4::ReplicationHuge >> TBsVDiskOutOfSpace::WriteUntilYellowZone [GOOD] >> TBsVDiskRange::RangeGetFromEmptyDB |88.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/gateway/ut/ydb-core-kqp-gateway-ut >> TPDiskRaces::KillOwnerWhileDecommittingWithInflightMock [GOOD] >> TPDiskRaces::OwnerRecreationRaces |88.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/gateway/ut/ydb-core-kqp-gateway-ut |88.2%| [LD] {RESULT} $(B)/ydb/core/kqp/gateway/ut/ydb-core-kqp-gateway-ut |88.2%| [TA] $(B)/ydb/core/util/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest >> BSCStopPDisk::PDiskStop [GOOD] Test command err: RandomSeed# 2152553838387843791 |88.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_selfheal/unittest >> VDiskBalancing::TestStopOneNode_Mirror3dc_HugeBlob [GOOD] >> Donor::ConsistentWritesWhenSwitchingToDonorMode >> Donor::CheckOnlineReadRequestToDonor >> test.py::test[solomon-BadDownsamplingInterval-] [GOOD] |88.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_selfheal/unittest >> SelfHealActorTest::NoMoreThanOneReplicating [GOOD] >> test.py::test[solomon-Basic-default.txt] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_selfheal/unittest >> BsControllerTest::DecommitRejected [GOOD] Test command err: 2025-05-07T08:46:39.149871Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] Bootstrap 2025-05-07T08:46:39.149924Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] Connect 2025-05-07T08:46:39.150008Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] Bootstrap 2025-05-07T08:46:39.150039Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] Connect 2025-05-07T08:46:39.150089Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] Bootstrap 2025-05-07T08:46:39.150120Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] Connect 2025-05-07T08:46:39.150157Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] Bootstrap 2025-05-07T08:46:39.150178Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] Connect 2025-05-07T08:46:39.150207Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] Bootstrap 2025-05-07T08:46:39.150226Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] Connect 2025-05-07T08:46:39.150266Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] Bootstrap 2025-05-07T08:46:39.150289Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] Connect 2025-05-07T08:46:39.150324Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] Bootstrap 2025-05-07T08:46:39.150347Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] Connect 2025-05-07T08:46:39.150374Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] Bootstrap 2025-05-07T08:46:39.150394Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] Connect 2025-05-07T08:46:39.150442Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] Bootstrap 2025-05-07T08:46:39.150461Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] Connect 2025-05-07T08:46:39.150492Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] Bootstrap 2025-05-07T08:46:39.150510Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] Connect 2025-05-07T08:46:39.150552Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] Bootstrap 2025-05-07T08:46:39.150580Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] Connect 2025-05-07T08:46:39.150629Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] Bootstrap 2025-05-07T08:46:39.150650Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] Connect 2025-05-07T08:46:39.150692Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] Bootstrap 2025-05-07T08:46:39.150722Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] Connect 2025-05-07T08:46:39.150753Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] Bootstrap 2025-05-07T08:46:39.150771Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] Connect 2025-05-07T08:46:39.150814Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] Bootstrap 2025-05-07T08:46:39.150834Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] Connect 2025-05-07T08:46:39.162723Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] ClientConnected Sender# [1:508:32] Status# ERROR ClientId# [1:508:32] ServerId# [0:0:0] PipeClient# [1:508:32] 2025-05-07T08:46:39.163292Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] ClientConnected Sender# [2:509:20] Status# ERROR ClientId# [2:509:20] ServerId# [0:0:0] PipeClient# [2:509:20] 2025-05-07T08:46:39.163340Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] ClientConnected Sender# [3:510:20] Status# ERROR ClientId# [3:510:20] ServerId# [0:0:0] PipeClient# [3:510:20] 2025-05-07T08:46:39.163397Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] ClientConnected Sender# [4:511:20] Status# ERROR ClientId# [4:511:20] ServerId# [0:0:0] PipeClient# [4:511:20] 2025-05-07T08:46:39.163441Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] ClientConnected Sender# [5:512:20] Status# ERROR ClientId# [5:512:20] ServerId# [0:0:0] PipeClient# [5:512:20] 2025-05-07T08:46:39.163498Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] ClientConnected Sender# [6:513:20] Status# ERROR ClientId# [6:513:20] ServerId# [0:0:0] PipeClient# [6:513:20] 2025-05-07T08:46:39.163534Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] ClientConnected Sender# [7:514:20] Status# ERROR ClientId# [7:514:20] ServerId# [0:0:0] PipeClient# [7:514:20] 2025-05-07T08:46:39.163568Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] ClientConnected Sender# [8:515:20] Status# ERROR ClientId# [8:515:20] ServerId# [0:0:0] PipeClient# [8:515:20] 2025-05-07T08:46:39.163614Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] ClientConnected Sender# [9:516:20] Status# ERROR ClientId# [9:516:20] ServerId# [0:0:0] PipeClient# [9:516:20] 2025-05-07T08:46:39.163661Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] ClientConnected Sender# [10:517:20] Status# ERROR ClientId# [10:517:20] ServerId# [0:0:0] PipeClient# [10:517:20] 2025-05-07T08:46:39.163753Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] ClientConnected Sender# [11:518:20] Status# ERROR ClientId# [11:518:20] ServerId# [0:0:0] PipeClient# [11:518:20] 2025-05-07T08:46:39.163791Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] ClientConnected Sender# [12:519:20] Status# ERROR ClientId# [12:519:20] ServerId# [0:0:0] PipeClient# [12:519:20] 2025-05-07T08:46:39.163838Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] ClientConnected Sender# [13:520:20] Status# ERROR ClientId# [13:520:20] ServerId# [0:0:0] PipeClient# [13:520:20] 2025-05-07T08:46:39.163875Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] ClientConnected Sender# [14:521:20] Status# ERROR ClientId# [14:521:20] ServerId# [0:0:0] PipeClient# [14:521:20] 2025-05-07T08:46:39.163910Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] ClientConnected Sender# [15:522:20] Status# ERROR ClientId# [15:522:20] ServerId# [0:0:0] PipeClient# [15:522:20] 2025-05-07T08:46:39.262276Z 1 00h00m00.100000s :BS_NODE DEBUG: [1] Connect 2025-05-07T08:46:39.262351Z 2 00h00m00.100000s :BS_NODE DEBUG: [2] Connect 2025-05-07T08:46:39.262388Z 3 00h00m00.100000s :BS_NODE DEBUG: [3] Connect 2025-05-07T08:46:39.262428Z 4 00h00m00.100000s :BS_NODE DEBUG: [4] Connect 2025-05-07T08:46:39.262463Z 5 00h00m00.100000s :BS_NODE DEBUG: [5] Connect 2025-05-07T08:46:39.262514Z 6 00h00m00.100000s :BS_NODE DEBUG: [6] Connect 2025-05-07T08:46:39.262557Z 7 00h00m00.100000s :BS_NODE DEBUG: [7] Connect 2025-05-07T08:46:39.262619Z 8 00h00m00.100000s :BS_NODE DEBUG: [8] Connect 2025-05-07T08:46:39.262656Z 9 00h00m00.100000s :BS_NODE DEBUG: [9] Connect 2025-05-07T08:46:39.262691Z 10 00h00m00.100000s :BS_NODE DEBUG: [10] Connect 2025-05-07T08:46:39.262731Z 11 00h00m00.100000s :BS_NODE DEBUG: [11] Connect 2025-05-07T08:46:39.262765Z 12 00h00m00.100000s :BS_NODE DEBUG: [12] Connect 2025-05-07T08:46:39.262816Z 13 00h00m00.100000s :BS_NODE DEBUG: [13] Connect 2025-05-07T08:46:39.262884Z 14 00h00m00.100000s :BS_NODE DEBUG: [14] Connect 2025-05-07T08:46:39.262928Z 15 00h00m00.100000s :BS_NODE DEBUG: [15] Connect 2025-05-07T08:46:39.275740Z 1 00h00m00.100000s :BS_NODE DEBUG: [1] ClientConnected Sender# [1:581:60] Status# OK ClientId# [1:581:60] ServerId# [1:610:61] PipeClient# [1:581:60] 2025-05-07T08:46:39.275804Z 1 00h00m00.100000s :BS_NODE DEBUG: [1] State switched from 0 to 1 2025-05-07T08:46:39.291801Z 2 00h00m00.100000s :BS_NODE DEBUG: [2] ClientConnected Sender# [2:582:21] Status# OK ClientId# [2:582:21] ServerId# [1:611:62] PipeClient# [2:582:21] 2025-05-07T08:46:39.291872Z 2 00h00m00.100000s :BS_NODE DEBUG: [2] State switched from 0 to 1 2025-05-07T08:46:39.291937Z 3 00h00m00.100000s :BS_NODE DEBUG: [3] ClientConnected Sender# [3:583:21] Status# OK ClientId# [3:583:21] ServerId# [1:612:63] PipeClient# [3:583:21] 2025-05-07T08:46:39.291975Z 3 00h00m00.100000s :BS_NODE DEBUG: [3] State switched from 0 to 1 2025-05-07T08:46:39.292015Z 4 00h00m00.100000s :BS_NODE DEBUG: [4] ClientConnected Sender# [4:584:21] Status# OK ClientId# [4:584:21] ServerId# [1:613:64] PipeClient# [4:584:21] 2025-05-07T08:46:39.292039Z 4 00h00m00.100000s :BS_NODE DEBUG: [4] State switched from 0 to 1 2025-05-07T08:46:39.292074Z 5 00h00m00.100000s :BS_NODE DEBUG: [5] ClientConnected Sender# [5:585:21] Status# OK ClientId# [5:585:21] ServerId# [1:614:65] PipeClient# [5:585:21] 2025-05-07T08:46:39.292098Z 5 00h00m00.100000s :BS_NODE DEBUG: [5] State switched from 0 to 1 2025-05-07T08:46:39.292132Z 6 00h00m00.100000s :BS_NODE DEBUG: [6] ClientConnected Sender# [6:586:21] Status# OK ClientId# [6:586:21] ServerId# [1:615:66] PipeClient# [6:586:21] 2025-05-07T08:46:39.292172Z 6 00h00m00.100000s :BS_NODE DEBUG: [6] State switched from 0 to 1 2025-05-07T08:46:39.292211Z 7 00h00m00.100000s :BS_NODE DEBUG: [7] ClientConnected Sender# [7:587:21] Status# OK ClientId# [7:587:21] ServerId# [1:616:67] PipeClient# [7:587:21] 2025-05-07T08:46:39.292237Z 7 00h00m00.100000s :BS_NODE DEBUG: [7] State switched from 0 to 1 2025-05-07T08:46:39.292286Z 8 00h00m00.100000s :BS_NODE DEBUG: [8] ClientConnected Sender# [8:588:21] Status# OK ClientId# [8:588:21] ServerId# [1:617:68] PipeClient# [8:588:21] 2025-05-07T08:46:39.292345Z 8 00h00m00.100000s :BS_NODE DEBUG: [8] State switched from 0 to 1 2025-05-07T08:46:39.292391Z 9 00h00m00.100000s :BS_NODE DEBUG: [9] ClientConnected Sender# [9:589:21] Status# OK ClientId# [9:589:21] ServerId# [1:618:69] PipeClient# [9:589:21] 2025-05-07T08:46:39.292416Z 9 00h00m00.100000s :BS_NODE DEBUG: [9] State switched from 0 to 1 2025-05-07T08:46:39.292452Z 10 00h00m00.100000s :BS_NODE DEBUG: [10] ClientConnected Sender# [10:590:21] Status# OK ClientId# [10:590:21] ServerId# [1:619:70] PipeClient# [10:590:21] 2025-05-07T08:46:39.292485Z 10 00h00m00.100000s :BS_NODE DEBUG: [10] State switched from 0 to 1 2025-05-07T08:46:39.292533Z 11 00h00m00.100000s :BS_NODE DEBUG: [11] ClientConnected Sender# [11:591:21] Status# OK ClientId# [11:591:21] ServerId# [1:620:71] PipeClient# [11:591:21] 2025-05-07T08:46:39.292559Z 11 00h00m00.100000s :BS_NODE DEBUG: [11] State switched from 0 to 1 2025-05-07T08:46:39.292607Z 12 00h00m00.100000s :BS_NODE DEBUG: [12] ClientConnected Sender# [12:592:21] Status# OK ClientId# [12:592:21] ServerId# [1:621:72] PipeClient# [12:592:21] 2025-05-07T08:46:39.292636Z 12 00h00m00.100000s :BS_NODE DEBUG: [12] State switched from 0 to 1 2025-05-07T08:46:39.292679Z 13 00h00m00.100000s :BS_NODE DEBUG: [13] ClientConnected Sender# [13:593:21] Status# OK ClientId# [13:593:21] ServerId# [1:622:73] PipeClient# [13:593:21] 2025-05-07T08:46:39.292706Z 13 00h00m00.100000s :BS_NODE DEBUG: [13] State switched from 0 to 1 2025-05-07T08:46:39.292740Z 14 00h00m00.100000s :BS_NODE DEBUG: [14] ClientConnected Sender# [14:594:21] Status# OK ClientId# [14:594:21] ServerId# [1:623:74] PipeClient# [14:594:21] 2025-05-07T08:46:39.292763Z 14 00h00m00.100000s :BS_NODE DEBUG: [14] State switched from 0 to 1 2025-05-07T08:46:39.292798Z 15 00h00m00.100000s :BS_NODE DEBUG: [15] ClientConnected Sender# [15:595:21] Status# OK ClientId# [15:595:21] ServerId# [1:624:75] PipeClient# [15:595:21] 2025-05-07T08:46:39.292888Z 15 00h00m00.100000s :BS_NODE DEBUG: [15] State switched from 0 to 1 2025-05-07T08:46:39.301126Z 1 00h00m00.100512s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-05-07T08:46:39.301204Z 1 00h00m00.100512s :BS_NODE DEBUG: [1] VDiskId# [80000000:1:0:0:0] PDiskId# 1000 VSlotId# 1000 created 2025-05-07T08:46:39.344987Z 1 00h00m00.100512s :BS_NODE DEBUG: [1] VDiskId# [80000000:1:0:0:0] status changed to INIT_PENDING 2025-05-07T08:46:39.346064Z 2 00h00m00.100512s :BS_NODE DEBUG: [2] NodeServiceSetUpdate 2025-05-07T08:46:39.346126Z 2 00h00m00.100512s :BS_NODE DEBUG: [2] VDiskId# [80000000:1:0:1:0] PDiskId# 1000 VSlotId# 1000 created 2025-05-07T08:46:39.346200Z 2 00h00m00.100512s :BS_NODE DEBUG: [2] VDiskId# [80000000:1:0:1:0] status changed to INIT_PENDING 2025-05-07T08:46:39.346303Z 3 00h00m00.100512s :BS_NODE DEBUG: [3] NodeServiceSetUpdate 2025-05-07T08:46:39.346335Z 3 00h00m00.100512s :BS_NODE DEBUG: [3] VDiskId# [80000000:1:0:2:0] PDiskId# 1000 VSlotId# 1000 created 2025-05-07T08:46:39.346398Z 3 00h00m00.100512s :BS_NODE DEBUG: [3] VDiskId# [80000000:1:0:2:0] status changed to INIT_PENDING 2025-05-07T08:46:39.346525Z 4 00h00m00.100512s :BS_NODE DEBUG: [4] NodeServiceSetUpdate 2025-05-07T08:46:39.346561Z 4 00h00m00.100512s :BS_NODE DEBUG: [4] VDiskId# [80000000:1:1:0:0] PDiskId# 1000 VSlotId# 1000 created 2025-05-07T08:46:39.346602Z 4 00h00m00.100512s :BS_NODE DEBUG: [4] VDiskId# [80000000:1:1:0:0] status changed to INIT_PENDING 2025-05-07T08:46:39.346705Z 5 00h00m00.100512s :BS_NODE DEBUG: [5] NodeServiceSetUpdate 2025-05-07T08:46:39.346739Z 5 00h00m00.100512s :BS_NODE DEBUG: [5] VDiskId# [80000000:1:1:1:0] PDiskId# 1000 VSlotId# 1000 created 2025-05-07T08:46:39.346791Z 5 00h00m00.100512s :BS_NODE DEBUG: [5] VDiskId# [80000000:1:1:1:0] status changed to INIT_PENDING 2025-05-07T0 ... # 2147483648 2025-05-07T08:46:39.789452Z 3 00h01m05.469512s :BS_NODE DEBUG: [3] VDiskId# [80000001:1:2:2:0] status changed to REPLICATING 2025-05-07T08:46:39.789843Z 1 00h01m05.469512s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:709} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] NotReady},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2025-05-07T08:46:39.798195Z 15 00h01m05.636512s :BS_NODE DEBUG: [15] VDiskId# [80000001:1:1:2:0] status changed to REPLICATING 2025-05-07T08:46:39.798612Z 1 00h01m05.636512s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:709} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] NotReady},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2025-05-07T08:46:39.798884Z 11 00h01m06.185512s :BS_NODE DEBUG: [11] VDiskId# [80000001:1:0:1:0] status changed to REPLICATING 2025-05-07T08:46:39.799244Z 1 00h01m06.185512s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:709} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] NotReady},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2025-05-07T08:46:39.799556Z 1 00h01m10.000000s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:709} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] NotReady},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2025-05-07T08:46:39.799945Z 14 00h01m12.010536s :BS_NODE DEBUG: [14] VDiskId# [80000000:3:2:1:0] status changed to READY 2025-05-07T08:46:39.800334Z 1 00h01m12.010536s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:709} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] NotReady},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2025-05-07T08:46:39.800973Z 8 00h01m12.011048s :BS_NODE DEBUG: [8] NodeServiceSetUpdate 2025-05-07T08:46:39.801031Z 8 00h01m12.011048s :BS_NODE DEBUG: [8] VDiskId# [80000000:2:2:1:0] destroyed 2025-05-07T08:46:39.801166Z 10 00h01m12.895512s :BS_NODE DEBUG: [10] VDiskId# [80000001:1:0:0:0] status changed to READY 2025-05-07T08:46:39.801436Z 1 00h01m12.895512s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:709} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] NotReady},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2025-05-07T08:46:39.801619Z 13 00h01m13.330512s :BS_NODE DEBUG: [13] VDiskId# [80000001:1:1:0:0] status changed to READY 2025-05-07T08:46:39.802003Z 1 00h01m13.330512s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:709} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] NotReady},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2025-05-07T08:46:39.802574Z 14 00h01m15.478512s :BS_NODE DEBUG: [14] VDiskId# [80000001:1:1:1:0] status changed to READY 2025-05-07T08:46:39.802971Z 1 00h01m15.478512s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:709} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] NotReady},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2025-05-07T08:46:39.803150Z 2 00h01m16.198512s :BS_NODE DEBUG: [2] VDiskId# [80000001:1:2:1:0] status changed to READY 2025-05-07T08:46:39.803501Z 1 00h01m16.198512s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:709} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] NotReady},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2025-05-07T08:46:39.803710Z 13 00h01m17.509024s :BS_NODE DEBUG: [13] VDiskId# [80000000:3:2:0:0] status changed to READY 2025-05-07T08:46:39.804112Z 1 00h01m17.509024s :BS_SELFHEAL DEBUG: {BSSH01@self_heal.cpp:71} Reassigner starting GroupId# 2147483648 2025-05-07T08:46:39.804779Z 1 00h01m17.509024s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483648 Status# OK JoinedGroup# true Replicated# true 2025-05-07T08:46:39.804827Z 1 00h01m17.509024s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483648 VDiskId# [80000000:3:0:0:0] DiskIsOk# true 2025-05-07T08:46:39.805157Z 1 00h01m17.509024s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483648 Status# OK JoinedGroup# true Replicated# true 2025-05-07T08:46:39.805214Z 1 00h01m17.509024s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483648 VDiskId# [80000000:3:0:1:0] DiskIsOk# true 2025-05-07T08:46:39.805251Z 1 00h01m17.509024s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483648 Status# OK JoinedGroup# true Replicated# true 2025-05-07T08:46:39.805280Z 1 00h01m17.509024s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483648 VDiskId# [80000000:3:0:2:0] DiskIsOk# true 2025-05-07T08:46:39.805309Z 1 00h01m17.509024s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483648 Status# OK JoinedGroup# true Replicated# true 2025-05-07T08:46:39.805337Z 1 00h01m17.509024s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483648 VDiskId# [80000000:3:1:0:0] DiskIsOk# true 2025-05-07T08:46:39.805364Z 1 00h01m17.509024s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483648 Status# OK JoinedGroup# true Replicated# true 2025-05-07T08:46:39.805390Z 1 00h01m17.509024s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483648 VDiskId# [80000000:3:1:1:0] DiskIsOk# true 2025-05-07T08:46:39.805417Z 1 00h01m17.509024s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483648 Status# OK JoinedGroup# true Replicated# true 2025-05-07T08:46:39.805452Z 1 00h01m17.509024s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483648 VDiskId# [80000000:3:1:2:0] DiskIsOk# true 2025-05-07T08:46:39.805487Z 1 00h01m17.509024s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483648 Status# OK JoinedGroup# true Replicated# true 2025-05-07T08:46:39.805525Z 1 00h01m17.509024s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483648 VDiskId# [80000000:3:2:0:0] DiskIsOk# true 2025-05-07T08:46:39.805570Z 1 00h01m17.509024s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483648 Status# OK JoinedGroup# true Replicated# true 2025-05-07T08:46:39.805599Z 1 00h01m17.509024s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483648 VDiskId# [80000000:3:2:1:0] DiskIsOk# true 2025-05-07T08:46:39.812317Z 1 00h01m17.509536s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-05-07T08:46:39.812397Z 1 00h01m17.509536s :BS_NODE DEBUG: [1] VDiskId# [80000000:3:0:0:0] -> [80000000:4:0:0:0] 2025-05-07T08:46:39.813017Z 1 00h01m17.509536s :BS_SELFHEAL INFO: {BSSH09@self_heal.cpp:207} Reassigner succeeded GroupId# 2147483648 Items# [80000000:3:2:2:0]: 9:1000:1000 -> 15:1000:1001 ConfigTxSeqNo# 23 2025-05-07T08:46:39.813053Z 1 00h01m17.509536s :BS_SELFHEAL DEBUG: {BSSH08@self_heal.cpp:218} Reassigner finished GroupId# 2147483648 Success# true 2025-05-07T08:46:39.813188Z 7 00h01m17.509536s :BS_NODE DEBUG: [7] NodeServiceSetUpdate 2025-05-07T08:46:39.813231Z 7 00h01m17.509536s :BS_NODE DEBUG: [7] VDiskId# [80000000:1:2:0:0] destroyed 2025-05-07T08:46:39.813334Z 2 00h01m17.509536s :BS_NODE DEBUG: [2] NodeServiceSetUpdate 2025-05-07T08:46:39.813384Z 2 00h01m17.509536s :BS_NODE DEBUG: [2] VDiskId# [80000000:3:0:1:0] -> [80000000:4:0:1:0] 2025-05-07T08:46:39.813499Z 3 00h01m17.509536s :BS_NODE DEBUG: [3] NodeServiceSetUpdate 2025-05-07T08:46:39.813546Z 3 00h01m17.509536s :BS_NODE DEBUG: [3] VDiskId# [80000000:3:0:2:0] -> [80000000:4:0:2:0] 2025-05-07T08:46:39.813625Z 4 00h01m17.509536s :BS_NODE DEBUG: [4] NodeServiceSetUpdate 2025-05-07T08:46:39.813665Z 4 00h01m17.509536s :BS_NODE DEBUG: [4] VDiskId# [80000000:3:1:0:0] -> [80000000:4:1:0:0] 2025-05-07T08:46:39.813766Z 5 00h01m17.509536s :BS_NODE DEBUG: [5] NodeServiceSetUpdate 2025-05-07T08:46:39.813811Z 5 00h01m17.509536s :BS_NODE DEBUG: [5] VDiskId# [80000000:3:1:1:0] -> [80000000:4:1:1:0] 2025-05-07T08:46:39.813885Z 6 00h01m17.509536s :BS_NODE DEBUG: [6] NodeServiceSetUpdate 2025-05-07T08:46:39.813935Z 6 00h01m17.509536s :BS_NODE DEBUG: [6] VDiskId# [80000000:3:1:2:0] -> [80000000:4:1:2:0] 2025-05-07T08:46:39.814054Z 9 00h01m17.509536s :BS_NODE DEBUG: [9] NodeServiceSetUpdate 2025-05-07T08:46:39.814132Z 13 00h01m17.509536s :BS_NODE DEBUG: [13] NodeServiceSetUpdate 2025-05-07T08:46:39.814180Z 13 00h01m17.509536s :BS_NODE DEBUG: [13] VDiskId# [80000000:3:2:0:0] -> [80000000:4:2:0:0] 2025-05-07T08:46:39.814274Z 14 00h01m17.509536s :BS_NODE DEBUG: [14] NodeServiceSetUpdate 2025-05-07T08:46:39.814321Z 14 00h01m17.509536s :BS_NODE DEBUG: [14] VDiskId# [80000000:3:2:1:0] -> [80000000:4:2:1:0] 2025-05-07T08:46:39.814401Z 15 00h01m17.509536s :BS_NODE DEBUG: [15] NodeServiceSetUpdate 2025-05-07T08:46:39.814443Z 15 00h01m17.509536s :BS_NODE DEBUG: [15] VDiskId# [80000000:4:2:2:0] PDiskId# 1000 VSlotId# 1001 created 2025-05-07T08:46:39.814514Z 15 00h01m17.509536s :BS_NODE DEBUG: [15] VDiskId# [80000000:4:2:2:0] status changed to INIT_PENDING 2025-05-07T08:46:39.816105Z 11 00h01m19.996512s :BS_NODE DEBUG: [11] VDiskId# [80000001:1:0:1:0] status changed to READY 2025-05-07T08:46:39.816804Z 3 00h01m21.339512s :BS_NODE DEBUG: [3] VDiskId# [80000001:1:2:2:0] status changed to READY 2025-05-07T08:46:39.817389Z 15 00h01m23.294536s :BS_NODE DEBUG: [15] VDiskId# [80000000:4:2:2:0] status changed to REPLICATING 2025-05-07T08:46:39.827433Z 15 00h01m30.962536s :BS_NODE DEBUG: [15] VDiskId# [80000000:4:2:2:0] status changed to READY 2025-05-07T08:46:39.828402Z 9 00h01m30.963048s :BS_NODE DEBUG: [9] NodeServiceSetUpdate 2025-05-07T08:46:39.828471Z 9 00h01m30.963048s :BS_NODE DEBUG: [9] VDiskId# [80000000:3:2:2:0] destroyed 2025-05-07T08:46:39.829109Z 15 00h01m33.101512s :BS_NODE DEBUG: [15] VDiskId# [80000001:1:1:2:0] status changed to READY 2025-05-07T08:46:39.829796Z 12 00h01m35.668512s :BS_NODE DEBUG: [12] VDiskId# [80000001:1:0:2:0] status changed to READY 2025-05-07T08:46:39.838908Z 1 00h01m39.260512s :BS_NODE DEBUG: [1] VDiskId# [80000001:1:2:0:0] status changed to READY >> BlobDepotWithTestShard::PlainGroup [GOOD] >> TGenerateQueueIdTests::MakeQueueIdBasic [GOOD] >> TParseParamsTests::CreateUser [GOOD] >> TParseParamsTests::ChangeMessageVisibilityBatchRequest [GOOD] >> TParseParamsTests::DeleteMessageBatchRequest [GOOD] >> TParseParamsTests::MessageBody [GOOD] >> TParseParamsTests::SendMessageBatchRequest [GOOD] >> TParseParamsTests::DeleteQueueBatchRequest [GOOD] >> TParseParamsTests::PurgeQueueBatchRequest [GOOD] >> TParseParamsTests::GetQueueAttributesBatchRequest [GOOD] >> TParseParamsTests::UnnumberedAttribute [GOOD] >> TParseParamsTests::UnnumberedAttributeName [GOOD] >> TParseParamsTests::FailsOnInvalidDeduplicationId [GOOD] >> TParseParamsTests::FailsOnInvalidGroupId >> TParseParamsTests::FailsOnInvalidGroupId [GOOD] >> TParseParamsTests::FailsOnInvalidReceiveRequestAttemptId [GOOD] >> TParseParamsTests::FailsOnInvalidMaxNumberOfMessages [GOOD] >> TParseParamsTests::FailsOnInvalidWaitTime [GOOD] >> TParseParamsTests::FailsOnInvalidDelaySeconds [GOOD] |88.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest |88.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_testshard/unittest >> BlobDepotWithTestShard::PlainGroup [GOOD] |88.2%| [TA] $(B)/ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/test-results/unittest/{meta.json ... results_accumulator.log} |88.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest >> Splitter::Crit [GOOD] >> Splitter::CritSimple >> BlobDepot::BasicPutAndGet ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TestStopOneNode_Mirror3dc_HugeBlob [GOOD] Test command err: RandomSeed# 8713231994486203090 SEND TEvPut with key [1:1:1:0:0:533504:0] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:533504:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:2:0:0:533504:0] 2025-05-07T08:46:35.516462Z 1 00h01m00.010512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 2 TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:533504:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Start compaction Finish compaction |88.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/ymq/ut/unittest >> TParseParamsTests::FailsOnInvalidDelaySeconds [GOOD] >> TBsVDiskRange::RangeGetFromEmptyDB [GOOD] >> TBsVDiskRange::Simple3PutRangeGetAllBackwardFresh >> Donor::MultipleEvicts >> MetadataConversion::MakeAuthTest [GOOD] >> MetadataConversion::ConvertingExternalSourceMetadata [GOOD] >> BsControllerTest::TestLocalBrokenRelocation [GOOD] >> Donor::CheckOnlineReadRequestToDonor [GOOD] |88.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/apps/etcd_proxy/etcd_proxy |88.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/apps/etcd_proxy/etcd_proxy >> BSCReadOnlyPDisk::ReadOnlyNotAllowed [GOOD] |88.2%| [TA] {RESULT} $(B)/ydb/core/util/ut/test-results/unittest/{meta.json ... results_accumulator.log} |88.2%| [TM] {RESULT} ydb/core/blobstorage/ut_testshard/unittest |88.2%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/test-results/unittest/{meta.json ... results_accumulator.log} |88.2%| [TS] {RESULT} ydb/core/ymq/ut/unittest |88.2%| [LD] {RESULT} $(B)/ydb/apps/etcd_proxy/etcd_proxy >> TBsVDiskRange::Simple3PutRangeGetAllBackwardFresh [GOOD] >> TBsVDiskRange::Simple3PutRangeGetAllBackwardCompaction >> BSCRestartPDisk::RestartNotAllowed [GOOD] >> BSCReadOnlyPDisk::ReadOnlySlay [GOOD] |88.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/gateway/ut/gtest >> MetadataConversion::ConvertingExternalSourceMetadata [GOOD] |88.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_scrub/ydb-core-blobstorage-ut_blobstorage-ut_scrub |88.2%| [TS] {RESULT} ydb/core/kqp/gateway/ut/gtest |88.2%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_scrub/ydb-core-blobstorage-ut_blobstorage-ut_scrub |88.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_scrub/ydb-core-blobstorage-ut_blobstorage-ut_scrub >> BlobDepot::BasicPutAndGet [GOOD] >> BlobDepot::TestBlockedEvGetRequest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> BSCReadOnlyPDisk::ReadOnlyNotAllowed [GOOD] Test command err: RandomSeed# 7620286956543772039 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_selfheal/unittest >> BsControllerTest::TestLocalBrokenRelocation [GOOD] Test command err: 2025-05-07T08:46:33.847197Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] Bootstrap 2025-05-07T08:46:33.847247Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] Connect 2025-05-07T08:46:33.847350Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] Bootstrap 2025-05-07T08:46:33.847379Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] Connect 2025-05-07T08:46:33.847436Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] Bootstrap 2025-05-07T08:46:33.847466Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] Connect 2025-05-07T08:46:33.847515Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] Bootstrap 2025-05-07T08:46:33.847546Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] Connect 2025-05-07T08:46:33.847581Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] Bootstrap 2025-05-07T08:46:33.847603Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] Connect 2025-05-07T08:46:33.847636Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] Bootstrap 2025-05-07T08:46:33.847658Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] Connect 2025-05-07T08:46:33.847714Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] Bootstrap 2025-05-07T08:46:33.847738Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] Connect 2025-05-07T08:46:33.847776Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] Bootstrap 2025-05-07T08:46:33.847798Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] Connect 2025-05-07T08:46:33.847832Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] Bootstrap 2025-05-07T08:46:33.847854Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] Connect 2025-05-07T08:46:33.847895Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] Bootstrap 2025-05-07T08:46:33.847918Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] Connect 2025-05-07T08:46:33.847970Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] Bootstrap 2025-05-07T08:46:33.848004Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] Connect 2025-05-07T08:46:33.848052Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] Bootstrap 2025-05-07T08:46:33.848083Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] Connect 2025-05-07T08:46:33.848129Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] Bootstrap 2025-05-07T08:46:33.848152Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] Connect 2025-05-07T08:46:33.848186Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] Bootstrap 2025-05-07T08:46:33.848211Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] Connect 2025-05-07T08:46:33.848246Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] Bootstrap 2025-05-07T08:46:33.848269Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] Connect 2025-05-07T08:46:33.848311Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] Bootstrap 2025-05-07T08:46:33.848333Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] Connect 2025-05-07T08:46:33.848367Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] Bootstrap 2025-05-07T08:46:33.848388Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] Connect 2025-05-07T08:46:33.848518Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] Bootstrap 2025-05-07T08:46:33.848540Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] Connect 2025-05-07T08:46:33.848609Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] Bootstrap 2025-05-07T08:46:33.848651Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] Connect 2025-05-07T08:46:33.848705Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] Bootstrap 2025-05-07T08:46:33.848730Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] Connect 2025-05-07T08:46:33.848767Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] Bootstrap 2025-05-07T08:46:33.848793Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] Connect 2025-05-07T08:46:33.848829Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] Bootstrap 2025-05-07T08:46:33.848855Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] Connect 2025-05-07T08:46:33.848923Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] Bootstrap 2025-05-07T08:46:33.848958Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] Connect 2025-05-07T08:46:33.848997Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] Bootstrap 2025-05-07T08:46:33.849019Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] Connect 2025-05-07T08:46:33.849054Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] Bootstrap 2025-05-07T08:46:33.849076Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] Connect 2025-05-07T08:46:33.849116Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] Bootstrap 2025-05-07T08:46:33.849138Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] Connect 2025-05-07T08:46:33.849196Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] Bootstrap 2025-05-07T08:46:33.849229Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] Connect 2025-05-07T08:46:33.849274Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] Bootstrap 2025-05-07T08:46:33.849296Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] Connect 2025-05-07T08:46:33.849335Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] Bootstrap 2025-05-07T08:46:33.849358Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] Connect 2025-05-07T08:46:33.849391Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] Bootstrap 2025-05-07T08:46:33.849413Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] Connect 2025-05-07T08:46:33.849446Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] Bootstrap 2025-05-07T08:46:33.849469Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] Connect 2025-05-07T08:46:33.849511Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] Bootstrap 2025-05-07T08:46:33.849534Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] Connect 2025-05-07T08:46:33.849589Z 33 00h00m00.000000s :BS_NODE DEBUG: [33] Bootstrap 2025-05-07T08:46:33.849616Z 33 00h00m00.000000s :BS_NODE DEBUG: [33] Connect 2025-05-07T08:46:33.849667Z 34 00h00m00.000000s :BS_NODE DEBUG: [34] Bootstrap 2025-05-07T08:46:33.849690Z 34 00h00m00.000000s :BS_NODE DEBUG: [34] Connect 2025-05-07T08:46:33.849743Z 35 00h00m00.000000s :BS_NODE DEBUG: [35] Bootstrap 2025-05-07T08:46:33.849773Z 35 00h00m00.000000s :BS_NODE DEBUG: [35] Connect 2025-05-07T08:46:33.849820Z 36 00h00m00.000000s :BS_NODE DEBUG: [36] Bootstrap 2025-05-07T08:46:33.849843Z 36 00h00m00.000000s :BS_NODE DEBUG: [36] Connect 2025-05-07T08:46:33.866877Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] ClientConnected Sender# [1:2713:53] Status# ERROR ClientId# [1:2713:53] ServerId# [0:0:0] PipeClient# [1:2713:53] 2025-05-07T08:46:33.868280Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] ClientConnected Sender# [2:2714:41] Status# ERROR ClientId# [2:2714:41] ServerId# [0:0:0] PipeClient# [2:2714:41] 2025-05-07T08:46:33.868341Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] ClientConnected Sender# [3:2715:41] Status# ERROR ClientId# [3:2715:41] ServerId# [0:0:0] PipeClient# [3:2715:41] 2025-05-07T08:46:33.868384Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] ClientConnected Sender# [4:2716:41] Status# ERROR ClientId# [4:2716:41] ServerId# [0:0:0] PipeClient# [4:2716:41] 2025-05-07T08:46:33.868441Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] ClientConnected Sender# [5:2717:41] Status# ERROR ClientId# [5:2717:41] ServerId# [0:0:0] PipeClient# [5:2717:41] 2025-05-07T08:46:33.868482Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] ClientConnected Sender# [6:2718:41] Status# ERROR ClientId# [6:2718:41] ServerId# [0:0:0] PipeClient# [6:2718:41] 2025-05-07T08:46:33.868769Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] ClientConnected Sender# [7:2719:41] Status# ERROR ClientId# [7:2719:41] ServerId# [0:0:0] PipeClient# [7:2719:41] 2025-05-07T08:46:33.868844Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] ClientConnected Sender# [8:2720:41] Status# ERROR ClientId# [8:2720:41] ServerId# [0:0:0] PipeClient# [8:2720:41] 2025-05-07T08:46:33.868884Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] ClientConnected Sender# [9:2721:41] Status# ERROR ClientId# [9:2721:41] ServerId# [0:0:0] PipeClient# [9:2721:41] 2025-05-07T08:46:33.868942Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] ClientConnected Sender# [10:2722:41] Status# ERROR ClientId# [10:2722:41] ServerId# [0:0:0] PipeClient# [10:2722:41] 2025-05-07T08:46:33.869003Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] ClientConnected Sender# [11:2723:41] Status# ERROR ClientId# [11:2723:41] ServerId# [0:0:0] PipeClient# [11:2723:41] 2025-05-07T08:46:33.869051Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] ClientConnected Sender# [12:2724:41] Status# ERROR ClientId# [12:2724:41] ServerId# [0:0:0] PipeClient# [12:2724:41] 2025-05-07T08:46:33.869092Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] ClientConnected Sender# [13:2725:41] Status# ERROR ClientId# [13:2725:41] ServerId# [0:0:0] PipeClient# [13:2725:41] 2025-05-07T08:46:33.869149Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] ClientConnected Sender# [14:2726:41] Status# ERROR ClientId# [14:2726:41] ServerId# [0:0:0] PipeClient# [14:2726:41] 2025-05-07T08:46:33.869192Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] ClientConnected Sender# [15:2727:41] Status# ERROR ClientId# [15:2727:41] ServerId# [0:0:0] PipeClient# [15:2727:41] 2025-05-07T08:46:33.869236Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] ClientConnected Sender# [16:2728:41] Status# ERROR ClientId# [16:2728:41] ServerId# [0:0:0] PipeClient# [16:2728:41] 2025-05-07T08:46:33.869275Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] ClientConnected Sender# [17:2729:41] Status# ERROR ClientId# [17:2729:41] ServerId# [0:0:0] PipeClient# [17:2729:41] 2025-05-07T08:46:33.869312Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] ClientConnected Sender# [18:2730:41] Status# ERROR ClientId# [18:2730:41] ServerId# [0:0:0] PipeClient# [18:2730:41] 2025-05-07T08:46:33.869353Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] ClientConnected Sender# [19:2731:41] Status# ERROR ClientId# [19:2731:41] ServerId# [0:0:0] PipeClient# [19:2731:41] 2025-05-07T08:46:33.869394Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] ClientConnected Sender# [20:2732:41] Status# ERROR ClientId# [20:2732:41] ServerId# [0:0:0] PipeClient# [20:2732:41] 2025-05-07T08:46:33.869450Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] ClientConnected Sender# [21:2733:41] Status# ERROR ClientId# [21:2733:41] ServerId# [0:0:0] PipeClient# [21:2733:41] 2025-05-07T08:46:33.869496Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] ClientConnected Sender# [22:2734:41] Status# ERROR ClientId# [22:2734:41] ServerId# [0:0:0] PipeClient# [22:2734:41] 2025-05-07T08:46:33.869534Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] ClientConnected Sender# [23:2735:41] Status# ERROR ClientId# [23:2735:41] ServerId# [0:0:0] PipeClient# [23:2735:41] 2025-05-07T08:46:33.869594Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] ClientConnected Sender# [24:2736:41] Status# ERROR ClientId# [24:2736:41] ServerId# [0:0:0] PipeClient# [24:2736:41] 2025-05-07T08:46:33.869634Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] ClientConnected Sender# [25:2737:41] Status# ERROR ClientId# [25:2737:41] ServerId# [0:0:0] PipeClient# [25:2737:41] 2025-05-07T08:46:33.869674Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] ClientConnected Sender# [26:2738:41] Status# ERROR ClientId# [26:2738:41] ServerId# [0:0:0] PipeClient# [26:2738:41] 2025-05-07T08:46:33.869711Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] ClientConnected Sender# [27:2739:41] Status# ERROR ClientId# [27:2739:41] ServerId# [0:0:0] PipeClient# [27:2739:41] 2025-05-07T08:46:33.869751Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] ClientConnected Sender# [28:2740:41] Status# ERROR ClientId# [28:2740:41] ServerId# [0:0:0] PipeClient# [28:2740:41] 2025-05-07T08:46:33.869793Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] ClientConnected Sender# [29:2741:41] Status# ERROR ClientId# [29:2741:41] ServerId# [0:0:0] PipeClient# [29:2741:41] 2025-05-07T08:46:33.869834Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] ClientConnected Sender# [30:2742:41] Status# ERROR ClientId# [30:2742:41] ServerId# [0:0:0] PipeClient# [30:2742:41] 2025-05-07T08:46:33.869872Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] ClientConnected Sender# [31:2743:41] Status# ERROR ClientId# [31:2743:41] ServerId# [0:0:0] PipeClient# [31:2743:41] 2025-05-07T08:46:33.869910Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] ClientConnected Sender# [32:2744:41] Status# ERROR ClientId# [32:2744:41] ServerId# [0:0:0] PipeClient# [32:2744:41] 2025-05-07T08:46:33.869951Z 33 00h00m00.000000s :BS_NODE DEBUG: [33] ClientConnected Sender# [33:2745:41] Status# ERROR ClientId# [33:2745:41] ServerId# [0:0:0] PipeClient# [33:2745:41] 2025-05-07T08:46:33.870018Z 34 00h00m00.000000s :BS_NODE DEBUG: [34] ClientConnected Sender# [34:2746:41] Status# ERROR ClientId# [34:2746:41] ServerId# [0:0:0] PipeClient# [34:2746:41] 2025-05-07T08:46:33.870085Z 35 00h00m00.000000s :BS_NODE DEBUG: [35] ClientConnected Sender# [35:2747:41] Status# ERROR ClientId# [35:2747:41 ... 25m00.102560s :BS_NODE DEBUG: [28] VDiskId# [80000001:2:2:2:0] -> [80000001:3:2:2:0] 2025-05-07T08:46:40.785141Z 28 01h25m00.102560s :BS_NODE DEBUG: [28] VDiskId# [80000021:2:2:2:0] -> [80000021:3:2:2:0] 2025-05-07T08:46:40.785207Z 28 01h25m00.102560s :BS_NODE DEBUG: [28] VDiskId# [80000031:2:2:2:0] -> [80000031:3:2:2:0] 2025-05-07T08:46:40.785249Z 28 01h25m00.102560s :BS_NODE DEBUG: [28] VDiskId# [80000051:2:2:2:0] -> [80000051:3:2:2:0] 2025-05-07T08:46:40.785305Z 28 01h25m00.102560s :BS_NODE DEBUG: [28] VDiskId# [80000061:2:2:2:0] -> [80000061:3:2:2:0] 2025-05-07T08:46:40.785940Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] NodeServiceSetUpdate 2025-05-07T08:46:40.786015Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000010:2:1:0:0] -> [80000010:3:1:0:0] 2025-05-07T08:46:40.786057Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000040:2:1:0:0] -> [80000040:3:1:0:0] 2025-05-07T08:46:40.786097Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000070:2:1:0:0] -> [80000070:3:1:0:0] 2025-05-07T08:46:40.786136Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000001:2:1:1:0] -> [80000001:3:1:1:0] 2025-05-07T08:46:40.786177Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000021:2:1:1:0] -> [80000021:3:1:1:0] 2025-05-07T08:46:40.786229Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000031:2:1:1:0] -> [80000031:3:1:1:0] 2025-05-07T08:46:40.786275Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000051:2:1:1:0] -> [80000051:3:1:1:0] 2025-05-07T08:46:40.786317Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000061:2:1:1:0] -> [80000061:3:1:1:0] 2025-05-07T08:46:40.786374Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000002:1:1:2:0] -> [80000002:2:1:2:0] 2025-05-07T08:46:40.794058Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000012:1:1:2:0] -> [80000012:2:1:2:0] 2025-05-07T08:46:40.794161Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000022:1:1:2:0] -> [80000022:2:1:2:0] 2025-05-07T08:46:40.794232Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000032:1:1:2:0] -> [80000032:2:1:2:0] 2025-05-07T08:46:40.794293Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000042:1:1:2:0] -> [80000042:2:1:2:0] 2025-05-07T08:46:40.794363Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000052:1:1:2:0] -> [80000052:2:1:2:0] 2025-05-07T08:46:40.794413Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000062:1:1:2:0] -> [80000062:2:1:2:0] 2025-05-07T08:46:40.794455Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000072:1:1:2:0] -> [80000072:2:1:2:0] 2025-05-07T08:46:40.795146Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] NodeServiceSetUpdate 2025-05-07T08:46:40.795213Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000010:2:2:2:0] -> [80000010:3:2:2:0] 2025-05-07T08:46:40.795259Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000040:2:2:2:0] -> [80000040:3:2:2:0] 2025-05-07T08:46:40.795322Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000070:2:2:2:0] -> [80000070:3:2:2:0] 2025-05-07T08:46:40.795376Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000002:1:2:0:0] -> [80000002:2:2:0:0] 2025-05-07T08:46:40.795423Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000012:1:2:0:0] -> [80000012:2:2:0:0] 2025-05-07T08:46:40.795466Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000022:1:2:0:0] -> [80000022:2:2:0:0] 2025-05-07T08:46:40.795528Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000032:1:2:0:0] -> [80000032:2:2:0:0] 2025-05-07T08:46:40.795572Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000042:1:2:0:0] -> [80000042:2:2:0:0] 2025-05-07T08:46:40.795616Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000052:1:2:0:0] -> [80000052:2:2:0:0] 2025-05-07T08:46:40.795682Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000062:1:2:0:0] -> [80000062:2:2:0:0] 2025-05-07T08:46:40.795738Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000072:1:2:0:0] -> [80000072:2:2:0:0] 2025-05-07T08:46:40.796187Z 16 01h25m00.102560s :BS_NODE DEBUG: [16] NodeServiceSetUpdate 2025-05-07T08:46:40.796241Z 16 01h25m00.102560s :BS_NODE DEBUG: [16] VDiskId# [80000010:2:1:1:0] -> [80000010:3:1:1:0] 2025-05-07T08:46:40.796286Z 16 01h25m00.102560s :BS_NODE DEBUG: [16] VDiskId# [80000040:2:1:1:0] -> [80000040:3:1:1:0] 2025-05-07T08:46:40.796341Z 16 01h25m00.102560s :BS_NODE DEBUG: [16] VDiskId# [80000070:2:1:1:0] -> [80000070:3:1:1:0] 2025-05-07T08:46:40.796386Z 16 01h25m00.102560s :BS_NODE DEBUG: [16] VDiskId# [80000001:2:1:2:0] -> [80000001:3:1:2:0] 2025-05-07T08:46:40.796441Z 16 01h25m00.102560s :BS_NODE DEBUG: [16] VDiskId# [80000021:2:1:2:0] -> [80000021:3:1:2:0] 2025-05-07T08:46:40.796490Z 16 01h25m00.102560s :BS_NODE DEBUG: [16] VDiskId# [80000031:2:1:2:0] -> [80000031:3:1:2:0] 2025-05-07T08:46:40.796546Z 16 01h25m00.102560s :BS_NODE DEBUG: [16] VDiskId# [80000051:2:1:2:0] -> [80000051:3:1:2:0] 2025-05-07T08:46:40.796598Z 16 01h25m00.102560s :BS_NODE DEBUG: [16] VDiskId# [80000061:2:1:2:0] -> [80000061:3:1:2:0] 2025-05-07T08:46:40.807630Z 4 01h25m01.200560s :BS_NODE DEBUG: [4] VDiskId# [80000022:2:0:2:0] status changed to REPLICATING 2025-05-07T08:46:40.808179Z 7 01h25m01.438560s :BS_NODE DEBUG: [7] VDiskId# [80000001:3:0:1:0] status changed to REPLICATING 2025-05-07T08:46:40.808597Z 5 01h25m01.512560s :BS_NODE DEBUG: [5] VDiskId# [80000072:2:0:2:0] status changed to REPLICATING 2025-05-07T08:46:40.809013Z 4 01h25m01.838560s :BS_NODE DEBUG: [4] VDiskId# [80000002:2:0:2:0] status changed to REPLICATING 2025-05-07T08:46:40.809434Z 10 01h25m02.221560s :BS_NODE DEBUG: [10] VDiskId# [80000070:3:0:0:0] status changed to REPLICATING 2025-05-07T08:46:40.809920Z 10 01h25m02.360560s :BS_NODE DEBUG: [10] VDiskId# [80000010:3:0:0:0] status changed to REPLICATING 2025-05-07T08:46:40.818854Z 2 01h25m02.814560s :BS_NODE DEBUG: [2] VDiskId# [80000042:2:0:2:0] status changed to REPLICATING 2025-05-07T08:46:40.819401Z 7 01h25m03.088560s :BS_NODE DEBUG: [7] VDiskId# [80000021:3:0:1:0] status changed to REPLICATING 2025-05-07T08:46:40.819871Z 4 01h25m03.254560s :BS_NODE DEBUG: [4] VDiskId# [80000012:2:0:2:0] status changed to REPLICATING 2025-05-07T08:46:40.820289Z 8 01h25m03.863560s :BS_NODE DEBUG: [8] VDiskId# [80000061:3:0:1:0] status changed to REPLICATING 2025-05-07T08:46:40.820715Z 2 01h25m04.181560s :BS_NODE DEBUG: [2] VDiskId# [80000062:2:0:2:0] status changed to REPLICATING 2025-05-07T08:46:40.830922Z 4 01h25m05.169560s :BS_NODE DEBUG: [4] VDiskId# [80000032:2:0:2:0] status changed to REPLICATING 2025-05-07T08:46:40.831565Z 10 01h25m05.254560s :BS_NODE DEBUG: [10] VDiskId# [80000040:3:0:0:0] status changed to REPLICATING 2025-05-07T08:46:40.832088Z 7 01h25m05.351560s :BS_NODE DEBUG: [7] VDiskId# [80000031:3:0:1:0] status changed to REPLICATING 2025-05-07T08:46:40.832542Z 5 01h25m05.796560s :BS_NODE DEBUG: [5] VDiskId# [80000052:2:0:2:0] status changed to REPLICATING 2025-05-07T08:46:40.832940Z 7 01h25m05.812560s :BS_NODE DEBUG: [7] VDiskId# [80000051:3:0:1:0] status changed to REPLICATING 2025-05-07T08:46:40.833764Z 7 01h25m11.299560s :BS_NODE DEBUG: [7] VDiskId# [80000001:3:0:1:0] status changed to READY 2025-05-07T08:46:40.843123Z 1 01h25m11.300072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-05-07T08:46:40.843207Z 1 01h25m11.300072s :BS_NODE DEBUG: [1] VDiskId# [80000001:2:0:1:0] destroyed 2025-05-07T08:46:40.843433Z 10 01h25m14.868560s :BS_NODE DEBUG: [10] VDiskId# [80000010:3:0:0:0] status changed to READY 2025-05-07T08:46:40.844412Z 1 01h25m14.869072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-05-07T08:46:40.844466Z 1 01h25m14.869072s :BS_NODE DEBUG: [1] VDiskId# [80000010:2:0:0:0] destroyed 2025-05-07T08:46:40.845345Z 4 01h25m15.236560s :BS_NODE DEBUG: [4] VDiskId# [80000002:2:0:2:0] status changed to READY 2025-05-07T08:46:40.854580Z 1 01h25m15.237072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-05-07T08:46:40.854662Z 1 01h25m15.237072s :BS_NODE DEBUG: [1] VDiskId# [80000002:1:0:2:0] destroyed 2025-05-07T08:46:40.854878Z 7 01h25m16.829560s :BS_NODE DEBUG: [7] VDiskId# [80000051:3:0:1:0] status changed to READY 2025-05-07T08:46:40.855977Z 1 01h25m16.830072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-05-07T08:46:40.856032Z 1 01h25m16.830072s :BS_NODE DEBUG: [1] VDiskId# [80000051:2:0:1:0] destroyed 2025-05-07T08:46:40.856195Z 10 01h25m19.981560s :BS_NODE DEBUG: [10] VDiskId# [80000070:3:0:0:0] status changed to READY 2025-05-07T08:46:40.857053Z 1 01h25m19.982072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-05-07T08:46:40.857100Z 1 01h25m19.982072s :BS_NODE DEBUG: [1] VDiskId# [80000070:2:0:0:0] destroyed 2025-05-07T08:46:40.857561Z 5 01h25m20.195560s :BS_NODE DEBUG: [5] VDiskId# [80000052:2:0:2:0] status changed to READY 2025-05-07T08:46:40.866660Z 1 01h25m20.196072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-05-07T08:46:40.866767Z 1 01h25m20.196072s :BS_NODE DEBUG: [1] VDiskId# [80000052:1:0:2:0] destroyed 2025-05-07T08:46:40.867002Z 7 01h25m24.614560s :BS_NODE DEBUG: [7] VDiskId# [80000031:3:0:1:0] status changed to READY 2025-05-07T08:46:40.867917Z 1 01h25m24.615072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-05-07T08:46:40.867967Z 1 01h25m24.615072s :BS_NODE DEBUG: [1] VDiskId# [80000031:2:0:1:0] destroyed 2025-05-07T08:46:40.868370Z 4 01h25m25.764560s :BS_NODE DEBUG: [4] VDiskId# [80000032:2:0:2:0] status changed to READY 2025-05-07T08:46:40.869132Z 1 01h25m25.765072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-05-07T08:46:40.869177Z 1 01h25m25.765072s :BS_NODE DEBUG: [1] VDiskId# [80000032:1:0:2:0] destroyed 2025-05-07T08:46:40.869260Z 4 01h25m25.775560s :BS_NODE DEBUG: [4] VDiskId# [80000022:2:0:2:0] status changed to READY 2025-05-07T08:46:40.869897Z 1 01h25m25.776072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-05-07T08:46:40.869963Z 1 01h25m25.776072s :BS_NODE DEBUG: [1] VDiskId# [80000022:1:0:2:0] destroyed 2025-05-07T08:46:40.880345Z 5 01h25m31.813560s :BS_NODE DEBUG: [5] VDiskId# [80000072:2:0:2:0] status changed to READY 2025-05-07T08:46:40.881382Z 1 01h25m31.814072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-05-07T08:46:40.881496Z 1 01h25m31.814072s :BS_NODE DEBUG: [1] VDiskId# [80000072:1:0:2:0] destroyed 2025-05-07T08:46:40.881920Z 4 01h25m32.962560s :BS_NODE DEBUG: [4] VDiskId# [80000012:2:0:2:0] status changed to READY 2025-05-07T08:46:40.887951Z 1 01h25m32.963072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-05-07T08:46:40.888018Z 1 01h25m32.963072s :BS_NODE DEBUG: [1] VDiskId# [80000012:1:0:2:0] destroyed 2025-05-07T08:46:40.888163Z 2 01h25m33.022560s :BS_NODE DEBUG: [2] VDiskId# [80000062:2:0:2:0] status changed to READY 2025-05-07T08:46:40.888969Z 1 01h25m33.023072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-05-07T08:46:40.889022Z 1 01h25m33.023072s :BS_NODE DEBUG: [1] VDiskId# [80000062:1:0:2:0] destroyed 2025-05-07T08:46:40.889134Z 7 01h25m34.401560s :BS_NODE DEBUG: [7] VDiskId# [80000021:3:0:1:0] status changed to READY 2025-05-07T08:46:40.889826Z 1 01h25m34.402072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-05-07T08:46:40.889873Z 1 01h25m34.402072s :BS_NODE DEBUG: [1] VDiskId# [80000021:2:0:1:0] destroyed 2025-05-07T08:46:40.899151Z 10 01h25m35.276560s :BS_NODE DEBUG: [10] VDiskId# [80000040:3:0:0:0] status changed to READY 2025-05-07T08:46:40.900142Z 1 01h25m35.277072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-05-07T08:46:40.900200Z 1 01h25m35.277072s :BS_NODE DEBUG: [1] VDiskId# [80000040:2:0:0:0] destroyed 2025-05-07T08:46:40.900347Z 8 01h25m37.324560s :BS_NODE DEBUG: [8] VDiskId# [80000061:3:0:1:0] status changed to READY 2025-05-07T08:46:40.901145Z 1 01h25m37.325072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-05-07T08:46:40.901194Z 1 01h25m37.325072s :BS_NODE DEBUG: [1] VDiskId# [80000061:2:0:1:0] destroyed 2025-05-07T08:46:40.901319Z 2 01h25m37.515560s :BS_NODE DEBUG: [2] VDiskId# [80000042:2:0:2:0] status changed to READY 2025-05-07T08:46:40.907569Z 1 01h25m37.516072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-05-07T08:46:40.907647Z 1 01h25m37.516072s :BS_NODE DEBUG: [1] VDiskId# [80000042:1:0:2:0] destroyed ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest >> Donor::CheckOnlineReadRequestToDonor [GOOD] Test command err: RandomSeed# 10686580394400123079 2025-05-07T08:46:43.182234Z 7 00h01m11.311024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:6:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-05-07T08:46:43.184594Z 7 00h01m11.311024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:6:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 5141564385442957663] 2025-05-07T08:46:43.229271Z 7 00h01m11.311024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:6:0]: (2181038080) THullOsirisActor: RESURRECT: id# [1:1:0:0:0:2097152:1] 2025-05-07T08:46:43.229547Z 7 00h01m11.311024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:6:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 1 PartsResurrected# 1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest >> BSCRestartPDisk::RestartNotAllowed [GOOD] Test command err: RandomSeed# 11537555588270060275 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> BSCReadOnlyPDisk::ReadOnlySlay [GOOD] Test command err: RandomSeed# 10748337788436592645 2025-05-07T08:46:30.937755Z 1 00h01m14.361536s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-05-07T08:46:30.940164Z 1 00h01m14.361536s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 4925598403533277821] 2025-05-07T08:46:30.972665Z 1 00h01m14.361536s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 >> BSCReadOnlyPDisk::RestartAndReadOnlyConsecutive [GOOD] >> TBsLocalRecovery::ChaoticWriteRestartHugeXXX [GOOD] >> TBsLocalRecovery::ChaoticWriteRestartHugeIncreased >> TBsVDiskRange::Simple3PutRangeGetAllBackwardCompaction [GOOD] >> TBsLocalRecovery::ChaoticWriteRestart [GOOD] >> TBsLocalRecovery::ChaoticWriteRestartHuge [GOOD] >> TBsLocalRecovery::ChaoticWriteRestartHugeDecreased >> BlobDepot::TestBlockedEvGetRequest [GOOD] >> BlobDepot::BasicRange >> test_transform.py::TestYamlConfigTransformations::test_simplified[dump] [GOOD] >> test_transform.py::TestYamlConfigTransformations::test_simplified[dump_ds_init] >> TBsVDiskManyPutGetCheckSize::ManyPutGetCheckSize [GOOD] >> Donor::MultipleEvicts [GOOD] |88.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskRange::Simple3PutRangeGetAllBackwardCompaction [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> BSCReadOnlyPDisk::RestartAndReadOnlyConsecutive [GOOD] Test command err: RandomSeed# 18124886900238200806 >> VDiskBalancing::TestDontSendToReadOnlyTest_Block42 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskManyPutGetCheckSize::ManyPutGetCheckSize [GOOD] Test command err: 2025-05-07T08:46:46.705987Z :BS_VDISK_GET CRIT: query_base.h:102: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVGetResult: Result message is too large; size# 67108001 orig# {ExtrQuery# [5000:1:0:0:0:100000:1] sh# 257 sz# 99743 c# 0}{ExtrQuery# [5000:1:1:0:0:100000:1] sh# 257 sz# 99743 c# 1}{ExtrQuery# [5000:1:2:0:0:100000:1] sh# 257 sz# 99743 c# 2}{ExtrQuery# [5000:1:3:0:0:100000:1] sh# 257 sz# 99743 c# 3}{ExtrQuery# [5000:1:4:0:0:100000:1] sh# 257 sz# 99743 c# 4}{ExtrQuery# [5000:1:5:0:0:100000:1] sh# 257 sz# 99743 c# 5}{ExtrQuery# [5000:1:6:0:0:100000:1] sh# 257 sz# 99743 c# 6}{ExtrQuery# [5000:1:7:0:0:100000:1] sh# 257 sz# 99743 c# 7}{ExtrQuery# [5000:1:8:0:0:100000:1] sh# 257 sz# 99743 c# 8}{ExtrQuery# [5000:1:9:0:0:100000:1] sh# 257 sz# 99743 c# 9}{ExtrQuery# [5000:1:10:0:0:100000:1] sh# 257 sz# 99743 c# 10}{ExtrQuery# [5000:1:11:0:0:100000:1] sh# 257 sz# 99743 c# 11}{ExtrQuery# [5000:1:12:0:0:100000:1] sh# 257 sz# 99743 c# 12}{ExtrQuery# [5000:1:13:0:0:100000:1] sh# 257 sz# 99743 c# 13}{ExtrQuery# [5000:1:14:0:0:100000:1] sh# 257 sz# 99743 c# 14}{ExtrQuery# [5000:1:15:0:0:100000:1] sh# 257 sz# 99743 c# 15}{ExtrQuery# [5000:1:16:0:0:100000:1] sh# 257 sz# 99743 c# 16}{ExtrQuery# [5000:1:17:0:0:100000:1] sh# 257 sz# 99743 c# 17}{ExtrQuery# [5000:1:18:0:0:100000:1] sh# 257 sz# 99743 c# 18}{ExtrQuery# [5000:1:19:0:0:100000:1] sh# 257 sz# 99743 c# 19}{ExtrQuery# [5000:1:20:0:0:100000:1] sh# 257 sz# 99743 c# 20}{ExtrQuery# [5000:1:21:0:0:100000:1] sh# 257 sz# 99743 c# 21}{ExtrQuery# [5000:1:22:0:0:100000:1] sh# 257 sz# 99743 c# 22}{ExtrQuery# [5000:1:23:0:0:100000:1] sh# 257 sz# 99743 c# 23}{ExtrQuery# [5000:1:24:0:0:100000:1] sh# 257 sz# 99743 c# 24}{ExtrQuery# [5000:1:25:0:0:100000:1] sh# 257 sz# 99743 c# 25}{ExtrQuery# [5000:1:26:0:0:100000:1] sh# 257 sz# 99743 c# 26}{ExtrQuery# [5000:1:27:0:0:100000:1] sh# 257 sz# 99743 c# 27}{ExtrQuery# [5000:1:28:0:0:100000:1] sh# 257 sz# 99743 c# 28}{ExtrQuery# [5000:1:29:0:0:100000:1] sh# 257 sz# 99743 c# 29}{ExtrQuery# [5000:1:30:0:0:100000:1] sh# 257 sz# 99743 c# 30}{ExtrQuery# [5000:1:31:0:0:100000:1] sh# 257 sz# 99743 c# 31}{ExtrQuery# [5000:1:32:0:0:100000:1] sh# 257 sz# 99743 c# 32}{ExtrQuery# [5000:1:33:0:0:100000:1] sh# 257 sz# 99743 c# 33}{ExtrQuery# [5000:1:34:0:0:100000:1] sh# 257 sz# 99743 c# 34}{ExtrQuery# [5000:1:35:0:0:100000:1] sh# 257 sz# 99743 c# 35}{ExtrQuery# [5000:1:36:0:0:100000:1] sh# 257 sz# 99743 c# 36}{ExtrQuery# [5000:1:37:0:0:100000:1] sh# 257 sz# 99743 c# 37}{ExtrQuery# [5000:1:38:0:0:100000:1] sh# 257 sz# 99743 c# 38}{ExtrQuery# [5000:1:39:0:0:100000:1] sh# 257 sz# 99743 c# 39}{ExtrQuery# [5000:1:40:0:0:100000:1] sh# 257 sz# 99743 c# 40}{ExtrQuery# [5000:1:41:0:0:100000:1] sh# 257 sz# 99743 c# 41}{ExtrQuery# [5000:1:42:0:0:100000:1] sh# 257 sz# 99743 c# 42}{ExtrQuery# [5000:1:43:0:0:100000:1] sh# 257 sz# 99743 c# 43}{ExtrQuery# [5000:1:44:0:0:100000:1] sh# 257 sz# 99743 c# 44}{ExtrQuery# [5000:1:45:0:0:100000:1] sh# 257 sz# 99743 c# 45}{ExtrQuery# [5000:1:46:0:0:100000:1] sh# 257 sz# 99743 c# 46}{ExtrQuery# [5000:1:47:0:0:100000:1] sh# 257 sz# 99743 c# 47}{ExtrQuery# [5000:1:48:0:0:100000:1] sh# 257 sz# 99743 c# 48}{ExtrQuery# [5000:1:49:0:0:100000:1] sh# 257 sz# 99743 c# 49}{ExtrQuery# [5000:1:50:0:0:100000:1] sh# 257 sz# 99743 c# 50}{ExtrQuery# [5000:1:51:0:0:100000:1] sh# 257 sz# 99743 c# 51}{ExtrQuery# [5000:1:52:0:0:100000:1] sh# 257 sz# 99743 c# 52}{ExtrQuery# [5000:1:53:0:0:100000:1] sh# 257 sz# 99743 c# 53}{ExtrQuery# [5000:1:54:0:0:100000:1] sh# 257 sz# 99743 c# 54}{ExtrQuery# [5000:1:55:0:0:100000:1] sh# 257 sz# 99743 c# 55}{ExtrQuery# [5000:1:56:0:0:100000:1] sh# 257 sz# 99743 c# 56}{ExtrQuery# [5000:1:57:0:0:100000:1] sh# 257 sz# 99743 c# 57}{ExtrQuery# [5000:1:58:0:0:100000:1] sh# 257 sz# 99743 c# 58}{ExtrQuery# [5000:1:59:0:0:100000:1] sh# 257 sz# 99743 c# 59}{ExtrQuery# [5000:1:60:0:0:100000:1] sh# 257 sz# 99743 c# 60}{ExtrQuery# [5000:1:61:0:0:100000:1] sh# 257 sz# 99743 c# 61}{ExtrQuery# [5000:1:62:0:0:100000:1] sh# 257 sz# 99743 c# 62}{ExtrQuery# [5000:1:63:0:0:100000:1] sh# 257 sz# 99743 c# 63}{ExtrQuery# [5000:1:64:0:0:100000:1] sh# 257 sz# 99743 c# 64}{ExtrQuery# [5000:1:65:0:0:100000:1] sh# 257 sz# 99743 c# 65}{ExtrQuery# [5000:1:66:0:0:100000:1] sh# 257 sz# 99743 c# 66}{ExtrQuery# [5000:1:67:0:0:100000:1] sh# 257 sz# 99743 c# 67}{ExtrQuery# [5000:1:68:0:0:100000:1] sh# 257 sz# 99743 c# 68}{ExtrQuery# [5000:1:69:0:0:100000:1] sh# 257 sz# 99743 c# 69}{ExtrQuery# [5000:1:70:0:0:100000:1] sh# 257 sz# 99743 c# 70}{ExtrQuery# [5000:1:71:0:0:100000:1] sh# 257 sz# 99743 c# 71}{ExtrQuery# [5000:1:72:0:0:100000:1] sh# 257 sz# 99743 c# 72}{ExtrQuery# [5000:1:73:0:0:100000:1] sh# 257 sz# 99743 c# 73}{ExtrQuery# [5000:1:74:0:0:100000:1] sh# 257 sz# 99743 c# 74}{ExtrQuery# [5000:1:75:0:0:100000:1] sh# 257 sz# 99743 c# 75}{ExtrQuery# [5000:1:76:0:0:100000:1] sh# 257 sz# 99743 c# 76}{ExtrQuery# [5000:1:77:0:0:100000:1] sh# 257 sz# 99743 c# 77}{ExtrQuery# [5000:1:78:0:0:100000:1] sh# 257 sz# 99743 c# 78}{ExtrQuery# [5000:1:79:0:0:100000:1] sh# 257 sz# 99743 c# 79}{ExtrQuery# [5000:1:80:0:0:100000:1] sh# 257 sz# 99743 c# 80}{ExtrQuery# [5000:1:81:0:0:100000:1] sh# 257 sz# 99743 c# 81}{ExtrQuery# [5000:1:82:0:0:100000:1] sh# 257 sz# 99743 c# 82}{ExtrQuery# [5000:1:83:0:0:100000:1] sh# 257 sz# 99743 c# 83}{ExtrQuery# [5000:1:84:0:0:100000:1] sh# 257 sz# 99743 c# 84}{ExtrQuery# [5000:1:85:0:0:100000:1] sh# 257 sz# 99743 c# 85}{ExtrQuery# [5000:1:86:0:0:100000:1] sh# 257 sz# 99743 c# 86}{ExtrQuery# [5000:1:87:0:0:100000:1] sh# 257 sz# 99743 c# 87}{ExtrQuery# [5000:1:88:0:0:100000:1] sh# 257 sz# 99743 c# 88}{ExtrQuery# [5000:1:89:0:0:100000:1] sh# 257 sz# 99743 c# 89}{ExtrQuery# [5000:1:90:0:0:100000:1] sh# 257 sz# 99743 c# 90}{ExtrQuery# [5000:1:91:0:0:100000:1] sh# 257 sz# 99743 c# 91}{ExtrQuery# [5000:1:92:0:0:100000:1] sh# 257 sz# 99743 c# 92}{ExtrQuery# [5000:1:93:0:0:100000:1] sh# 257 sz# 99743 c# 93}{ExtrQuery# [5000:1:94:0:0:100000:1] sh# 257 sz# 99743 c# 94}{ExtrQuery# [5000:1:95:0:0:100000:1] sh# 257 sz# 99743 c# 95}{ExtrQuery# [5000:1:96:0:0:100000:1] sh# 257 sz# 99743 c# 96}{ExtrQuery# [5000:1:97:0:0:100000:1] sh# 257 sz# 99743 c# 97}{ExtrQuery# [5000:1:98:0:0:100000:1] sh# 257 sz# 99743 c# 98}{ExtrQuery# [5000:1:99:0:0:100000:1] sh# 257 sz# 99743 c# 99}{ExtrQuery# [5000:1:100:0:0:100000:1] sh# 257 sz# 99743 c# 100}{ExtrQuery# [5000:1:101:0:0:100000:1] sh# 257 sz# 99743 c# 101}{ExtrQuery# [5000:1:102:0:0:100000:1] sh# 257 sz# 99743 c# 102}{ExtrQuery# [5000:1:103:0:0:100000:1] sh# 257 sz# 99743 c# 103}{ExtrQuery# [5000:1:104:0:0:100000:1] sh# 257 sz# 99743 c# 104}{ExtrQuery# [5000:1:105:0:0:100000:1] sh# 257 sz# 99743 c# 105}{ExtrQuery# [5000:1:106:0:0:100000:1] sh# 257 sz# 99743 c# 106}{ExtrQuery# [5000:1:107:0:0:100000:1] sh# 257 sz# 99743 c# 107}{ExtrQuery# [5000:1:108:0:0:100000:1] sh# 257 sz# 99743 c# 108}{ExtrQuery# [5000:1:109:0:0:100000:1] sh# 257 sz# 99743 c# 109}{ExtrQuery# [5000:1:110:0:0:100000:1] sh# 257 sz# 99743 c# 110}{ExtrQuery# [5000:1:111:0:0:100000:1] sh# 257 sz# 99743 c# 111}{ExtrQuery# [5000:1:112:0:0:100000:1] sh# 257 sz# 99743 c# 112}{ExtrQuery# [5000:1:113:0:0:100000:1] sh# 257 sz# 99743 c# 113}{ExtrQuery# [5000:1:114:0:0:100000:1] sh# 257 sz# 99743 c# 114}{ExtrQuery# [5000:1:115:0:0:100000:1] sh# 257 sz# 99743 c# 115}{ExtrQuery# [5000:1:116:0:0:100000:1] sh# 257 sz# 99743 c# 116}{ExtrQuery# [5000:1:117:0:0:100000:1] sh# 257 sz# 99743 c# 117}{ExtrQuery# [5000:1:118:0:0:100000:1] sh# 257 sz# 99743 c# 118}{ExtrQuery# [5000:1:119:0:0:100000:1] sh# 257 sz# 99743 c# 119}{ExtrQuery# [5000:1:120:0:0:100000:1] sh# 257 sz# 99743 c# 120}{ExtrQuery# [5000:1:121:0:0:100000:1] sh# 257 sz# 99743 c# 121}{ExtrQuery# [5000:1:122:0:0:100000:1] sh# 257 sz# 99743 c# 122}{ExtrQuery# [5000:1:123:0:0:100000:1] sh# 257 sz# 99743 c# 123}{ExtrQuery# [5000:1:124:0:0:100000:1] sh# 257 sz# 99743 c# 124}{ExtrQuery# [5000:1:125:0:0:100000:1] sh# 257 sz# 99743 c# 125}{ExtrQuery# [5000:1:126:0:0:100000:1] sh# 257 sz# 99743 c# 126}{ExtrQuery# [5000:1:127:0:0:100000:1] sh# 257 sz# 99743 c# 127}{ExtrQuery# [5000:1:128:0:0:100000:1] sh# 257 sz# 99743 c# 128}{ExtrQuery# [5000:1:129:0:0:100000:1] sh# 257 sz# 99743 c# 129}{ExtrQuery# [5000:1:130:0:0:100000:1] sh# 257 sz# 99743 c# 130}{ExtrQuery# [5000:1:131:0:0:100000:1] sh# 257 sz# 99743 c# 131}{ExtrQuery# [5000:1:132:0:0:100000:1] sh# 257 sz# 99743 c# 132}{ExtrQuery# [5000:1:133:0:0:100000:1] sh# 257 sz# 99743 c# 133}{ExtrQuery# [5000:1:134:0:0:100000:1] sh# 257 sz# 99743 c# 134}{ExtrQuery# [5000:1:135:0:0:100000:1] sh# 257 sz# 99743 c# 135}{ExtrQuery# [5000:1:136:0:0:100000:1] sh# 257 sz# 99743 c# 136}{ExtrQuery# [5000:1:137:0:0:100000:1] sh# 257 sz# 99743 c# 137}{ExtrQuery# [5000:1:138:0:0:100000:1] sh# 257 sz# 99743 c# 138}{ExtrQuery# [5000:1:139:0:0:100000:1] sh# 257 sz# 99743 c# 139}{ExtrQuery# [5000:1:140:0:0:100000:1] sh# 257 sz# 99743 c# 140}{ExtrQuery# [5000:1:141:0:0:100000:1] sh# 257 sz# 99743 c# 141}{ExtrQuery# [5000:1:142:0:0:100000:1] sh# 257 sz# 99743 c# 142}{ExtrQuery# [5000:1:143:0:0:100000:1] sh# 257 sz# 99743 c# 143}{ExtrQuery# [5000:1:144:0:0:100000:1] sh# 257 sz# 99743 c# 144}{ExtrQuery# [5000:1:145:0:0:100000:1] sh# 257 sz# 99743 c# 145}{ExtrQuery# [5000:1:146:0:0:100000:1] sh# 257 sz# 99743 c# 146}{ExtrQuery# [5000:1:147:0:0:100000:1] sh# 257 sz# 99743 c# 147}{ExtrQuery# [5000:1:148:0:0:100000:1] sh# 257 sz# 99743 c# 148}{ExtrQuery# [5000:1:149:0:0:100000:1] sh# 257 sz# 99743 c# 149}{ExtrQuery# [5000:1:150:0:0:100000:1] sh# 257 sz# 99743 c# 150}{ExtrQuery# [5000:1:151:0:0:100000:1] sh# 257 sz# 99743 c# 151}{ExtrQuery# [5000:1:152:0:0:100000:1] sh# 257 sz# 99743 c# 152}{ExtrQuery# [5000:1:153:0:0:100000:1] sh# 257 sz# 99743 c# 153}{ExtrQuery# [5000:1:154:0:0:100000:1] sh# 257 sz# 99743 c# 154}{ExtrQuery# [5000:1:155:0:0:100000:1] sh# 257 sz# 99743 c# 155}{ExtrQuery# [5000:1:156:0:0:100000:1] sh# 257 sz# 99743 c# 156}{ExtrQuery# [5000:1:157:0:0:100000:1] sh# 257 sz# 99743 c# 157}{ExtrQuery# [5000:1:158:0:0:100000:1] sh# 257 sz# 99743 c# 158}{ExtrQuery# [5000:1:159:0:0:100000:1] sh# 257 sz# 99743 c# 159}{ExtrQuery# [5000:1:160:0:0:100000:1] sh# 257 sz# 99743 c# 160}{ExtrQuery# [5000:1:161:0:0:100000:1] sh# 257 sz# 99743 c# 161}{ExtrQuery# [5000:1:162:0:0:100000:1] sh# 257 sz# 99743 c# 162}{ExtrQuery# [5000:1:163:0:0:100000:1] sh# 257 sz# 99743 c# 163}{ExtrQuery# [5000:1:164:0:0:100000:1] sh# 257 sz# 99743 c# 164}{ExtrQuery# [5000:1:165:0:0:100000:1] sh# 257 sz# 99743 c# 165}{ExtrQuery# [5000:1:166:0:0:100000:1] sh# 257 sz# 99743 c# 166}{ExtrQuery# [5000:1:167:0:0:100000:1] sh# 257 sz# 99743 c# 167}{ExtrQuery# [5000:1:168:0:0:100000:1] sh# 257 sz# 99743 c# 168}{ExtrQuery# [5000:1:169:0:0:100000:1] sh# 257 sz# 99743 c# 169}{ExtrQuery# [5000:1:170:0:0:100000:1] sh# 257 sz# 99743 c# 170}{ExtrQuery# [5000:1:171:0:0:100000:1] sh# 257 sz# 99743 c# 171}{ExtrQuery# [5000:1:172:0:0:100000:1] sh# 257 sz# 99743 c# 172}{ExtrQuery# [5000:1:173:0:0:100000:1] sh# 257 sz# 99743 c# 173}{ExtrQuery# [5000:1:174:0:0:100000:1] sh# 257 sz# 99743 c# 174}{ExtrQuery# [5000:1:175:0:0:100000:1] sh# 257 sz# 99743 c# 175}{ExtrQuery# [5000:1:176:0:0:100000:1] sh# 257 sz# 99743 c# 176}{ExtrQuery# [5000:1:177:0:0:100000:1] sh# 257 sz# 99743 c# 177}{ExtrQuery# [5000:1:178:0:0:100000:1] sh# 257 sz# 99743 c# 178}{ExtrQuery# [5000:1:179:0:0:100000:1] sh# 257 sz# 99743 c# 179}{ExtrQuery# [5000:1:180:0:0:100000:1] sh# 257 sz# 99743 c# 180}{ExtrQuery# [5000:1:181:0:0:100000:1] sh# 257 sz# 99743 c# 181}{ExtrQuery# [5000:1:182:0:0:100000:1] sh# 257 sz# 99743 c# 182}{ExtrQuery# [5000:1:183:0:0:100000:1] sh# 257 sz# 99743 c# 183}{ExtrQuery# [5000:1:184:0:0:100000:1] sh# 257 sz# 99743 c# 184}{ExtrQuery# [5000:1:185:0:0:100000:1] sh# 257 sz# 99743 c# 185}{ExtrQuery# [5000:1:186:0:0:100000:1] sh# 257 sz# 99743 c# 186}{ExtrQuery# [5000:1:187:0:0:100000:1] sh# 257 sz# 99743 c# 187}{ExtrQuery# [5000:1:188:0:0:100000:1] sh# 257 sz# 99743 c# 188}{ExtrQuery# [5000:1:189:0:0:100000:1] sh# 257 sz# 99743 c# 189}{ExtrQuery# [5000:1:190:0:0:100000:1] sh# 257 sz# 99743 c# 190}{ExtrQuery# [5000:1:191 ... sz# 99743 c# 484}{ExtrQuery# [5000:1:485:0:0:100000:1] sh# 257 sz# 99743 c# 485}{ExtrQuery# [5000:1:486:0:0:100000:1] sh# 257 sz# 99743 c# 486}{ExtrQuery# [5000:1:487:0:0:100000:1] sh# 257 sz# 99743 c# 487}{ExtrQuery# [5000:1:488:0:0:100000:1] sh# 257 sz# 99743 c# 488}{ExtrQuery# [5000:1:489:0:0:100000:1] sh# 257 sz# 99743 c# 489}{ExtrQuery# [5000:1:490:0:0:100000:1] sh# 257 sz# 99743 c# 490}{ExtrQuery# [5000:1:491:0:0:100000:1] sh# 257 sz# 99743 c# 491}{ExtrQuery# [5000:1:492:0:0:100000:1] sh# 257 sz# 99743 c# 492}{ExtrQuery# [5000:1:493:0:0:100000:1] sh# 257 sz# 99743 c# 493}{ExtrQuery# [5000:1:494:0:0:100000:1] sh# 257 sz# 99743 c# 494}{ExtrQuery# [5000:1:495:0:0:100000:1] sh# 257 sz# 99743 c# 495}{ExtrQuery# [5000:1:496:0:0:100000:1] sh# 257 sz# 99743 c# 496}{ExtrQuery# [5000:1:497:0:0:100000:1] sh# 257 sz# 99743 c# 497}{ExtrQuery# [5000:1:498:0:0:100000:1] sh# 257 sz# 99743 c# 498}{ExtrQuery# [5000:1:499:0:0:100000:1] sh# 257 sz# 99743 c# 499}{ExtrQuery# [5000:1:500:0:0:100000:1] sh# 257 sz# 99743 c# 500}{ExtrQuery# [5000:1:501:0:0:100000:1] sh# 257 sz# 99743 c# 501}{ExtrQuery# [5000:1:502:0:0:100000:1] sh# 257 sz# 99743 c# 502}{ExtrQuery# [5000:1:503:0:0:100000:1] sh# 257 sz# 99743 c# 503}{ExtrQuery# [5000:1:504:0:0:100000:1] sh# 257 sz# 99743 c# 504}{ExtrQuery# [5000:1:505:0:0:100000:1] sh# 257 sz# 99743 c# 505}{ExtrQuery# [5000:1:506:0:0:100000:1] sh# 257 sz# 99743 c# 506}{ExtrQuery# [5000:1:507:0:0:100000:1] sh# 257 sz# 99743 c# 507}{ExtrQuery# [5000:1:508:0:0:100000:1] sh# 257 sz# 99743 c# 508}{ExtrQuery# [5000:1:509:0:0:100000:1] sh# 257 sz# 99743 c# 509}{ExtrQuery# [5000:1:510:0:0:100000:1] sh# 257 sz# 99743 c# 510}{ExtrQuery# [5000:1:511:0:0:100000:1] sh# 257 sz# 99743 c# 511}{ExtrQuery# [5000:1:512:0:0:100000:1] sh# 257 sz# 99743 c# 512}{ExtrQuery# [5000:1:513:0:0:100000:1] sh# 257 sz# 99743 c# 513}{ExtrQuery# [5000:1:514:0:0:100000:1] sh# 257 sz# 99743 c# 514}{ExtrQuery# [5000:1:515:0:0:100000:1] sh# 257 sz# 99743 c# 515}{ExtrQuery# [5000:1:516:0:0:100000:1] sh# 257 sz# 99743 c# 516}{ExtrQuery# [5000:1:517:0:0:100000:1] sh# 257 sz# 99743 c# 517}{ExtrQuery# [5000:1:518:0:0:100000:1] sh# 257 sz# 99743 c# 518}{ExtrQuery# [5000:1:519:0:0:100000:1] sh# 257 sz# 99743 c# 519}{ExtrQuery# [5000:1:520:0:0:100000:1] sh# 257 sz# 99743 c# 520}{ExtrQuery# [5000:1:521:0:0:100000:1] sh# 257 sz# 99743 c# 521}{ExtrQuery# [5000:1:522:0:0:100000:1] sh# 257 sz# 99743 c# 522}{ExtrQuery# [5000:1:523:0:0:100000:1] sh# 257 sz# 99743 c# 523}{ExtrQuery# [5000:1:524:0:0:100000:1] sh# 257 sz# 99743 c# 524}{ExtrQuery# [5000:1:525:0:0:100000:1] sh# 257 sz# 99743 c# 525}{ExtrQuery# [5000:1:526:0:0:100000:1] sh# 257 sz# 99743 c# 526}{ExtrQuery# [5000:1:527:0:0:100000:1] sh# 257 sz# 99743 c# 527}{ExtrQuery# [5000:1:528:0:0:100000:1] sh# 257 sz# 99743 c# 528}{ExtrQuery# [5000:1:529:0:0:100000:1] sh# 257 sz# 99743 c# 529}{ExtrQuery# [5000:1:530:0:0:100000:1] sh# 257 sz# 99743 c# 530}{ExtrQuery# [5000:1:531:0:0:100000:1] sh# 257 sz# 99743 c# 531}{ExtrQuery# [5000:1:532:0:0:100000:1] sh# 257 sz# 99743 c# 532}{ExtrQuery# [5000:1:533:0:0:100000:1] sh# 257 sz# 99743 c# 533}{ExtrQuery# [5000:1:534:0:0:100000:1] sh# 257 sz# 99743 c# 534}{ExtrQuery# [5000:1:535:0:0:100000:1] sh# 257 sz# 99743 c# 535}{ExtrQuery# [5000:1:536:0:0:100000:1] sh# 257 sz# 99743 c# 536}{ExtrQuery# [5000:1:537:0:0:100000:1] sh# 257 sz# 99743 c# 537}{ExtrQuery# [5000:1:538:0:0:100000:1] sh# 257 sz# 99743 c# 538}{ExtrQuery# [5000:1:539:0:0:100000:1] sh# 257 sz# 99743 c# 539}{ExtrQuery# [5000:1:540:0:0:100000:1] sh# 257 sz# 99743 c# 540}{ExtrQuery# [5000:1:541:0:0:100000:1] sh# 257 sz# 99743 c# 541}{ExtrQuery# [5000:1:542:0:0:100000:1] sh# 257 sz# 99743 c# 542}{ExtrQuery# [5000:1:543:0:0:100000:1] sh# 257 sz# 99743 c# 543}{ExtrQuery# [5000:1:544:0:0:100000:1] sh# 257 sz# 99743 c# 544}{ExtrQuery# [5000:1:545:0:0:100000:1] sh# 257 sz# 99743 c# 545}{ExtrQuery# [5000:1:546:0:0:100000:1] sh# 257 sz# 99743 c# 546}{ExtrQuery# [5000:1:547:0:0:100000:1] sh# 257 sz# 99743 c# 547}{ExtrQuery# [5000:1:548:0:0:100000:1] sh# 257 sz# 99743 c# 548}{ExtrQuery# [5000:1:549:0:0:100000:1] sh# 257 sz# 99743 c# 549}{ExtrQuery# [5000:1:550:0:0:100000:1] sh# 257 sz# 99743 c# 550}{ExtrQuery# [5000:1:551:0:0:100000:1] sh# 257 sz# 99743 c# 551}{ExtrQuery# [5000:1:552:0:0:100000:1] sh# 257 sz# 99743 c# 552}{ExtrQuery# [5000:1:553:0:0:100000:1] sh# 257 sz# 99743 c# 553}{ExtrQuery# [5000:1:554:0:0:100000:1] sh# 257 sz# 99743 c# 554}{ExtrQuery# [5000:1:555:0:0:100000:1] sh# 257 sz# 99743 c# 555}{ExtrQuery# [5000:1:556:0:0:100000:1] sh# 257 sz# 99743 c# 556}{ExtrQuery# [5000:1:557:0:0:100000:1] sh# 257 sz# 99743 c# 557}{ExtrQuery# [5000:1:558:0:0:100000:1] sh# 257 sz# 99743 c# 558}{ExtrQuery# [5000:1:559:0:0:100000:1] sh# 257 sz# 99743 c# 559}{ExtrQuery# [5000:1:560:0:0:100000:1] sh# 257 sz# 99743 c# 560}{ExtrQuery# [5000:1:561:0:0:100000:1] sh# 257 sz# 99743 c# 561}{ExtrQuery# [5000:1:562:0:0:100000:1] sh# 257 sz# 99743 c# 562}{ExtrQuery# [5000:1:563:0:0:100000:1] sh# 257 sz# 99743 c# 563}{ExtrQuery# [5000:1:564:0:0:100000:1] sh# 257 sz# 99743 c# 564}{ExtrQuery# [5000:1:565:0:0:100000:1] sh# 257 sz# 99743 c# 565}{ExtrQuery# [5000:1:566:0:0:100000:1] sh# 257 sz# 99743 c# 566}{ExtrQuery# [5000:1:567:0:0:100000:1] sh# 257 sz# 99743 c# 567}{ExtrQuery# [5000:1:568:0:0:100000:1] sh# 257 sz# 99743 c# 568}{ExtrQuery# [5000:1:569:0:0:100000:1] sh# 257 sz# 99743 c# 569}{ExtrQuery# [5000:1:570:0:0:100000:1] sh# 257 sz# 99743 c# 570}{ExtrQuery# [5000:1:571:0:0:100000:1] sh# 257 sz# 99743 c# 571}{ExtrQuery# [5000:1:572:0:0:100000:1] sh# 257 sz# 99743 c# 572}{ExtrQuery# [5000:1:573:0:0:100000:1] sh# 257 sz# 99743 c# 573}{ExtrQuery# [5000:1:574:0:0:100000:1] sh# 257 sz# 99743 c# 574}{ExtrQuery# [5000:1:575:0:0:100000:1] sh# 257 sz# 99743 c# 575}{ExtrQuery# [5000:1:576:0:0:100000:1] sh# 257 sz# 99743 c# 576}{ExtrQuery# [5000:1:577:0:0:100000:1] sh# 257 sz# 99743 c# 577}{ExtrQuery# [5000:1:578:0:0:100000:1] sh# 257 sz# 99743 c# 578}{ExtrQuery# [5000:1:579:0:0:100000:1] sh# 257 sz# 99743 c# 579}{ExtrQuery# [5000:1:580:0:0:100000:1] sh# 257 sz# 99743 c# 580}{ExtrQuery# [5000:1:581:0:0:100000:1] sh# 257 sz# 99743 c# 581}{ExtrQuery# [5000:1:582:0:0:100000:1] sh# 257 sz# 99743 c# 582}{ExtrQuery# [5000:1:583:0:0:100000:1] sh# 257 sz# 99743 c# 583}{ExtrQuery# [5000:1:584:0:0:100000:1] sh# 257 sz# 99743 c# 584}{ExtrQuery# [5000:1:585:0:0:100000:1] sh# 257 sz# 99743 c# 585}{ExtrQuery# [5000:1:586:0:0:100000:1] sh# 257 sz# 99743 c# 586}{ExtrQuery# [5000:1:587:0:0:100000:1] sh# 257 sz# 99743 c# 587}{ExtrQuery# [5000:1:588:0:0:100000:1] sh# 257 sz# 99743 c# 588}{ExtrQuery# [5000:1:589:0:0:100000:1] sh# 257 sz# 99743 c# 589}{ExtrQuery# [5000:1:590:0:0:100000:1] sh# 257 sz# 99743 c# 590}{ExtrQuery# [5000:1:591:0:0:100000:1] sh# 257 sz# 99743 c# 591}{ExtrQuery# [5000:1:592:0:0:100000:1] sh# 257 sz# 99743 c# 592}{ExtrQuery# [5000:1:593:0:0:100000:1] sh# 257 sz# 99743 c# 593}{ExtrQuery# [5000:1:594:0:0:100000:1] sh# 257 sz# 99743 c# 594}{ExtrQuery# [5000:1:595:0:0:100000:1] sh# 257 sz# 99743 c# 595}{ExtrQuery# [5000:1:596:0:0:100000:1] sh# 257 sz# 99743 c# 596}{ExtrQuery# [5000:1:597:0:0:100000:1] sh# 257 sz# 99743 c# 597}{ExtrQuery# [5000:1:598:0:0:100000:1] sh# 257 sz# 99743 c# 598}{ExtrQuery# [5000:1:599:0:0:100000:1] sh# 257 sz# 99743 c# 599}{ExtrQuery# [5000:1:600:0:0:100000:1] sh# 257 sz# 99743 c# 600}{ExtrQuery# [5000:1:601:0:0:100000:1] sh# 257 sz# 99743 c# 601}{ExtrQuery# [5000:1:602:0:0:100000:1] sh# 257 sz# 99743 c# 602}{ExtrQuery# [5000:1:603:0:0:100000:1] sh# 257 sz# 99743 c# 603}{ExtrQuery# [5000:1:604:0:0:100000:1] sh# 257 sz# 99743 c# 604}{ExtrQuery# [5000:1:605:0:0:100000:1] sh# 257 sz# 99743 c# 605}{ExtrQuery# [5000:1:606:0:0:100000:1] sh# 257 sz# 99743 c# 606}{ExtrQuery# [5000:1:607:0:0:100000:1] sh# 257 sz# 99743 c# 607}{ExtrQuery# [5000:1:608:0:0:100000:1] sh# 257 sz# 99743 c# 608}{ExtrQuery# [5000:1:609:0:0:100000:1] sh# 257 sz# 99743 c# 609}{ExtrQuery# [5000:1:610:0:0:100000:1] sh# 257 sz# 99743 c# 610}{ExtrQuery# [5000:1:611:0:0:100000:1] sh# 257 sz# 99743 c# 611}{ExtrQuery# [5000:1:612:0:0:100000:1] sh# 257 sz# 99743 c# 612}{ExtrQuery# [5000:1:613:0:0:100000:1] sh# 257 sz# 99743 c# 613}{ExtrQuery# [5000:1:614:0:0:100000:1] sh# 257 sz# 99743 c# 614}{ExtrQuery# [5000:1:615:0:0:100000:1] sh# 257 sz# 99743 c# 615}{ExtrQuery# [5000:1:616:0:0:100000:1] sh# 257 sz# 99743 c# 616}{ExtrQuery# [5000:1:617:0:0:100000:1] sh# 257 sz# 99743 c# 617}{ExtrQuery# [5000:1:618:0:0:100000:1] sh# 257 sz# 99743 c# 618}{ExtrQuery# [5000:1:619:0:0:100000:1] sh# 257 sz# 99743 c# 619}{ExtrQuery# [5000:1:620:0:0:100000:1] sh# 257 sz# 99743 c# 620}{ExtrQuery# [5000:1:621:0:0:100000:1] sh# 257 sz# 99743 c# 621}{ExtrQuery# [5000:1:622:0:0:100000:1] sh# 257 sz# 99743 c# 622}{ExtrQuery# [5000:1:623:0:0:100000:1] sh# 257 sz# 99743 c# 623}{ExtrQuery# [5000:1:624:0:0:100000:1] sh# 257 sz# 99743 c# 624}{ExtrQuery# [5000:1:625:0:0:100000:1] sh# 257 sz# 99743 c# 625}{ExtrQuery# [5000:1:626:0:0:100000:1] sh# 257 sz# 99743 c# 626}{ExtrQuery# [5000:1:627:0:0:100000:1] sh# 257 sz# 99743 c# 627}{ExtrQuery# [5000:1:628:0:0:100000:1] sh# 257 sz# 99743 c# 628}{ExtrQuery# [5000:1:629:0:0:100000:1] sh# 257 sz# 99743 c# 629}{ExtrQuery# [5000:1:630:0:0:100000:1] sh# 257 sz# 99743 c# 630}{ExtrQuery# [5000:1:631:0:0:100000:1] sh# 257 sz# 99743 c# 631}{ExtrQuery# [5000:1:632:0:0:100000:1] sh# 257 sz# 99743 c# 632}{ExtrQuery# [5000:1:633:0:0:100000:1] sh# 257 sz# 99743 c# 633}{ExtrQuery# [5000:1:634:0:0:100000:1] sh# 257 sz# 99743 c# 634}{ExtrQuery# [5000:1:635:0:0:100000:1] sh# 257 sz# 99743 c# 635}{ExtrQuery# [5000:1:636:0:0:100000:1] sh# 257 sz# 99743 c# 636}{ExtrQuery# [5000:1:637:0:0:100000:1] sh# 257 sz# 99743 c# 637}{ExtrQuery# [5000:1:638:0:0:100000:1] sh# 257 sz# 99743 c# 638}{ExtrQuery# [5000:1:639:0:0:100000:1] sh# 257 sz# 99743 c# 639}{ExtrQuery# [5000:1:640:0:0:100000:1] sh# 257 sz# 99743 c# 640}{ExtrQuery# [5000:1:641:0:0:100000:1] sh# 257 sz# 99743 c# 641}{ExtrQuery# [5000:1:642:0:0:100000:1] sh# 257 sz# 99743 c# 642}{ExtrQuery# [5000:1:643:0:0:100000:1] sh# 257 sz# 99743 c# 643}{ExtrQuery# [5000:1:644:0:0:100000:1] sh# 257 sz# 99743 c# 644}{ExtrQuery# [5000:1:645:0:0:100000:1] sh# 257 sz# 99743 c# 645}{ExtrQuery# [5000:1:646:0:0:100000:1] sh# 257 sz# 99743 c# 646}{ExtrQuery# [5000:1:647:0:0:100000:1] sh# 257 sz# 99743 c# 647}{ExtrQuery# [5000:1:648:0:0:100000:1] sh# 257 sz# 99743 c# 648}{ExtrQuery# [5000:1:649:0:0:100000:1] sh# 257 sz# 99743 c# 649}{ExtrQuery# [5000:1:650:0:0:100000:1] sh# 257 sz# 99743 c# 650}{ExtrQuery# [5000:1:651:0:0:100000:1] sh# 257 sz# 99743 c# 651}{ExtrQuery# [5000:1:652:0:0:100000:1] sh# 257 sz# 99743 c# 652}{ExtrQuery# [5000:1:653:0:0:100000:1] sh# 257 sz# 99743 c# 653}{ExtrQuery# [5000:1:654:0:0:100000:1] sh# 257 sz# 99743 c# 654}{ExtrQuery# [5000:1:655:0:0:100000:1] sh# 257 sz# 99743 c# 655}{ExtrQuery# [5000:1:656:0:0:100000:1] sh# 257 sz# 99743 c# 656}{ExtrQuery# [5000:1:657:0:0:100000:1] sh# 257 sz# 99743 c# 657}{ExtrQuery# [5000:1:658:0:0:100000:1] sh# 257 sz# 99743 c# 658}{ExtrQuery# [5000:1:659:0:0:100000:1] sh# 257 sz# 99743 c# 659}{ExtrQuery# [5000:1:660:0:0:100000:1] sh# 257 sz# 99743 c# 660}{ExtrQuery# [5000:1:661:0:0:100000:1] sh# 257 sz# 99743 c# 661}{ExtrQuery# [5000:1:662:0:0:100000:1] sh# 257 sz# 99743 c# 662}{ExtrQuery# [5000:1:663:0:0:100000:1] sh# 257 sz# 99743 c# 663}{ExtrQuery# [5000:1:664:0:0:100000:1] sh# 257 sz# 99743 c# 664}{ExtrQuery# [5000:1:665:0:0:100000:1] sh# 257 sz# 99743 c# 665}{ExtrQuery# [5000:1:666:0:0:100000:1] sh# 257 sz# 99743 c# 666}{ExtrQuery# [5000:1:667:0:0:100000:1] sh# 257 sz# 99743 c# 667}{ExtrQuery# [5000:1:668:0:0:100000:1] sh# 257 sz# 99743 c# 668}{ExtrQuery# [5000:1:669:0:0:100000:1] sh# 257 sz# 99743 c# 669}{ExtrQuery# [5000:1:670:0:0:100000:1] sh# 257 sz# 99743 c# 670}{ExtrQuery# [5000:1:671:0:0:100000:1] sh# 257 sz# 99743 c# 671}{ExtrQuery# [5000:1:672:0:0:17027:1] sh# 257 sz# 16770 c# 672} {MsgQoS} Notify# 0 Internals# 0 TabletId# 0 AcquireBlockedGeneration# 0 ForceBlockedGeneration# 0}; VDISK CAN NOT REPLY ON TEvVGet REQUEST ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest >> Donor::MultipleEvicts [GOOD] Test command err: RandomSeed# 13304977808972444522 0 donors: 2025-05-07T08:46:45.615080Z 26 00h00m20.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-05-07T08:46:45.615292Z 26 00h00m20.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 2436257518083643530] 2025-05-07T08:46:45.637304Z 26 00h00m20.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 24:1000 2025-05-07T08:46:45.725279Z 24 00h00m20.012048s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-05-07T08:46:45.725473Z 24 00h00m20.012048s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 2436257518083643530] 2025-05-07T08:46:45.744257Z 24 00h00m20.012048s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 26:1000 2025-05-07T08:46:45.809466Z 26 00h00m20.013072s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-05-07T08:46:45.809668Z 26 00h00m20.013072s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 2436257518083643530] 2025-05-07T08:46:45.821839Z 26 00h00m20.013072s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 24:1000 2025-05-07T08:46:45.887118Z 24 00h00m20.014096s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-05-07T08:46:45.887316Z 24 00h00m20.014096s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 2436257518083643530] 2025-05-07T08:46:45.899854Z 24 00h00m20.014096s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 26:1000 2025-05-07T08:46:45.966539Z 26 00h00m20.015120s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-05-07T08:46:45.966737Z 26 00h00m20.015120s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 2436257518083643530] 2025-05-07T08:46:45.979032Z 26 00h00m20.015120s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 24:1000 2025-05-07T08:46:46.128335Z 24 00h00m20.016144s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-05-07T08:46:46.128577Z 24 00h00m20.016144s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 2436257518083643530] 2025-05-07T08:46:46.153754Z 24 00h00m20.016144s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 26:1000 2025-05-07T08:46:46.361722Z 26 00h00m20.017168s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-05-07T08:46:46.361951Z 26 00h00m20.017168s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 2436257518083643530] 2025-05-07T08:46:46.401992Z 26 00h00m20.017168s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 24:1000 2025-05-07T08:46:46.597388Z 24 00h00m20.018192s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-05-07T08:46:46.597627Z 24 00h00m20.018192s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 2436257518083643530] 2025-05-07T08:46:46.627883Z 24 00h00m20.018192s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 26:1000 2025-05-07T08:46:46.805804Z 26 00h00m20.019216s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-05-07T08:46:46.806079Z 26 00h00m20.019216s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 2436257518083643530] 2025-05-07T08:46:46.829678Z 26 00h00m20.019216s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 24:1000 >> TIncrHugeBasicTest::Defrag [GOOD] >> Splitter::CritSimple [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TestDontSendToReadOnlyTest_Block42 [GOOD] Test command err: RandomSeed# 9460880571633885705 SEND TEvPut with key [1:1:1:0:0:100:0] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:100:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 1 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] SEND TEvPut with key [1:1:2:0:0:100:0] 2025-05-07T08:46:42.346555Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:6333:830] TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:100:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 0 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] Start compaction Finish compaction |88.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kafka_proxy/ut/ut_protocol.cpp >> BlobDepot::BasicRange [GOOD] >> BlobDepot::BasicDiscover |88.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kafka_proxy/ut/ut_protocol.cpp |88.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_group_reconfiguration/ut_group_reconfiguration |88.2%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_group_reconfiguration/ut_group_reconfiguration |88.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_group_reconfiguration/ut_group_reconfiguration |88.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_huge/ydb-core-blobstorage-ut_blobstorage-ut_huge |88.2%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_huge/ydb-core-blobstorage-ut_blobstorage-ut_huge |88.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_huge/ydb-core-blobstorage-ut_blobstorage-ut_huge ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/incrhuge/ut/unittest >> TIncrHugeBasicTest::Defrag [GOOD] Test command err: 2025-05-07T08:45:37.782316Z :BS_INCRHUGE DEBUG: incrhuge_keeper.cpp:71: BlockSize# 8128 BlocksInChunk# 2304 BlocksInMinBlob# 65 MaxBlobsPerChunk# 35 BlocksInDataSection# 2303 BlocksInIndexSection# 1 2025-05-07T08:45:37.782413Z :BS_INCRHUGE INFO: incrhuge_keeper_recovery.cpp:152: [PDisk# 000000001 Recovery] [IncrHugeKeeper PDisk# 000000001] starting ReadLog 2025-05-07T08:45:37.782874Z :BS_INCRHUGE INFO: incrhuge_keeper_recovery.cpp:161: [PDisk# 000000001 Recovery] [IncrHugeKeeper PDisk# 000000001] finished ReadLog 2025-05-07T08:45:37.782911Z :BS_INCRHUGE DEBUG: incrhuge_keeper_recovery.cpp:200: [PDisk# 000000001 Recovery] ApplyReadLog Chunks# [] Deletes# [] Owners# {} CurrentSerNum# 0 NextLsn# 1 2025-05-07T08:45:37.782952Z :BS_INCRHUGE INFO: incrhuge_keeper_recovery.cpp:515: [PDisk# 000000001 Recovery] [IncrHugeKeeper PDisk# 000000001] ready 2025-05-07T08:45:37.782979Z :TEST DEBUG: test_actor_concurrent.h:153: finished Init Reference# [] Enumerated# [] InFlightDeletes# [] 2025-05-07T08:45:37.782993Z :TEST DEBUG: test_actor_concurrent.h:209: ActionsTaken# 1 2025-05-07T08:45:37.783003Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 0 InFlightWritesSize# 0 2025-05-07T08:45:37.784517Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:1:1:0:811717:0:0] Lsn# 0 NumReq# 0 2025-05-07T08:45:37.786136Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 0 HandleWrite Lsn# 0 DataSize# 811717 WriteQueueSize# 1 WriteInProgressItemsSize# 0 2025-05-07T08:45:37.786174Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 1 WriteInProgressItemsSize# 0 2025-05-07T08:45:37.786238Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 0 ProcessWriteItem entry 2025-05-07T08:45:37.786256Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:230: [PDisk# 000000001 Writer] QueryId# 0 ProcessWriteItem no free chunks 2025-05-07T08:45:37.786602Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 1 InFlightWritesSize# 1 2025-05-07T08:45:37.807904Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:1:1:0:1745495:1:0] Lsn# 1 NumReq# 1 2025-05-07T08:45:37.810050Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 1 HandleWrite Lsn# 1 DataSize# 1745495 WriteQueueSize# 2 WriteInProgressItemsSize# 0 2025-05-07T08:45:37.810073Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 2 WriteInProgressItemsSize# 0 2025-05-07T08:45:37.810088Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 0 ProcessWriteItem entry 2025-05-07T08:45:37.810105Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:230: [PDisk# 000000001 Writer] QueryId# 0 ProcessWriteItem no free chunks 2025-05-07T08:45:37.815529Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 2 InFlightWritesSize# 2 2025-05-07T08:45:37.816231Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:1:1:0:602037:2:0] Lsn# 2 NumReq# 2 2025-05-07T08:45:37.817161Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 2 HandleWrite Lsn# 2 DataSize# 602037 WriteQueueSize# 3 WriteInProgressItemsSize# 0 2025-05-07T08:45:37.817181Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 3 WriteInProgressItemsSize# 0 2025-05-07T08:45:37.817204Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 0 ProcessWriteItem entry 2025-05-07T08:45:37.817224Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:230: [PDisk# 000000001 Writer] QueryId# 0 ProcessWriteItem no free chunks 2025-05-07T08:45:37.822555Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 3 InFlightWritesSize# 3 2025-05-07T08:45:37.824009Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:1:1:0:1287465:3:0] Lsn# 3 NumReq# 3 2025-05-07T08:45:37.826177Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 3 HandleWrite Lsn# 3 DataSize# 1287465 WriteQueueSize# 4 WriteInProgressItemsSize# 0 2025-05-07T08:45:37.826209Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 4 WriteInProgressItemsSize# 0 2025-05-07T08:45:37.826233Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 0 ProcessWriteItem entry 2025-05-07T08:45:37.826249Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:230: [PDisk# 000000001 Writer] QueryId# 0 ProcessWriteItem no free chunks 2025-05-07T08:45:37.826952Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 4 InFlightWritesSize# 4 2025-05-07T08:45:37.828774Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:1:1:0:1501676:4:0] Lsn# 4 NumReq# 4 2025-05-07T08:45:37.830194Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 4 HandleWrite Lsn# 4 DataSize# 1501676 WriteQueueSize# 5 WriteInProgressItemsSize# 0 2025-05-07T08:45:37.830213Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 5 WriteInProgressItemsSize# 0 2025-05-07T08:45:37.830230Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 0 ProcessWriteItem entry 2025-05-07T08:45:37.830248Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:230: [PDisk# 000000001 Writer] QueryId# 0 ProcessWriteItem no free chunks 2025-05-07T08:45:37.835911Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 5 InFlightWritesSize# 5 2025-05-07T08:45:37.836670Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:1:1:0:687721:5:0] Lsn# 5 NumReq# 5 2025-05-07T08:45:37.837881Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:460: [PDisk# 000000001 Logger] ApplyLogChunkItem Lsn# 1 Status# OK 2025-05-07T08:45:37.837924Z :BS_INCRHUGE DEBUG: incrhuge_keeper_alloc.cpp:64: [PDisk# 000000001 Allocator] ChunkIdx# 2 ChunkSerNum# 1000 2025-05-07T08:45:37.837955Z :BS_INCRHUGE DEBUG: incrhuge_keeper_alloc.cpp:64: [PDisk# 000000001 Allocator] ChunkIdx# 3 ChunkSerNum# 1001 2025-05-07T08:45:37.837985Z :BS_INCRHUGE DEBUG: incrhuge_keeper_alloc.cpp:64: [PDisk# 000000001 Allocator] ChunkIdx# 4 ChunkSerNum# 1002 2025-05-07T08:45:37.838018Z :BS_INCRHUGE DEBUG: incrhuge_keeper_alloc.cpp:64: [PDisk# 000000001 Allocator] ChunkIdx# 5 ChunkSerNum# 1003 2025-05-07T08:45:37.838030Z :BS_INCRHUGE DEBUG: incrhuge_keeper_alloc.cpp:64: [PDisk# 000000001 Allocator] ChunkIdx# 6 ChunkSerNum# 1004 2025-05-07T08:45:37.838054Z :BS_INCRHUGE DEBUG: incrhuge_keeper_alloc.cpp:64: [PDisk# 000000001 Allocator] ChunkIdx# 7 ChunkSerNum# 1005 2025-05-07T08:45:37.838068Z :BS_INCRHUGE DEBUG: incrhuge_keeper_alloc.cpp:64: [PDisk# 000000001 Allocator] ChunkIdx# 8 ChunkSerNum# 1006 2025-05-07T08:45:37.838079Z :BS_INCRHUGE DEBUG: incrhuge_keeper_alloc.cpp:64: [PDisk# 000000001 Allocator] ChunkIdx# 9 ChunkSerNum# 1007 2025-05-07T08:45:37.838099Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 5 WriteInProgressItemsSize# 0 2025-05-07T08:45:37.838118Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 0 ProcessWriteItem entry 2025-05-07T08:45:37.838870Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:319: [PDisk# 000000001 Writer] QueryId# 0 ProcessWriteItem OffsetInBlocks# 0 IndexInsideChunk# 0 SizeInBlocks# 100 SizeInBytes# 812800 Offset# 0 Size# 812800 End# 812800 Id# 0000000000000000 ChunkIdx# 2 ChunkSerNum# 1000 Defrag# false 2025-05-07T08:45:37.838908Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 1 ProcessWriteItem entry 2025-05-07T08:45:37.839261Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:319: [PDisk# 000000001 Writer] QueryId# 1 ProcessWriteItem OffsetInBlocks# 100 IndexInsideChunk# 1 SizeInBlocks# 215 SizeInBytes# 1747520 Offset# 812800 Size# 1747520 End# 2560320 Id# 0000000000000001 ChunkIdx# 2 ChunkSerNum# 1000 Defrag# false 2025-05-07T08:45:37.839277Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 2 ProcessWriteItem entry 2025-05-07T08:45:37.839424Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:319: [PDisk# 000000001 Writer] QueryId# 2 ProcessWriteItem OffsetInBlocks# 315 IndexInsideChunk# 2 SizeInBlocks# 75 SizeInBytes# 609600 Offset# 2560320 Size# 609600 End# 3169920 Id# 0000000000000002 ChunkIdx# 2 ChunkSerNum# 1000 Defrag# false 2025-05-07T08:45:37.839434Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 3 ProcessWriteItem entry 2025-05-07T08:45:37.839707Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:319: [PDisk# 000000001 Writer] QueryId# 3 ProcessWriteItem OffsetInBlocks# 390 IndexInsideChunk# 3 SizeInBlocks# 159 SizeInBytes# 1292352 Offset# 3169920 Size# 1292352 End# 4462272 Id# 0000000000000003 ChunkIdx# 2 ChunkSerNum# 1000 Defrag# false 2025-05-07T08:45:37.839732Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 4 ProcessWriteItem entry 2025-05-07T08:45:37.840048Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:319: [PDisk# 000000001 Writer] QueryId# 4 ProcessWriteItem OffsetInBlocks# 549 IndexInsideChunk# 4 SizeInBlocks# 185 SizeInBytes# 1503680 Offset# 4462272 Size# 1503680 End# 5965952 Id# 0000000000000004 ChunkIdx# 2 ChunkSerNum# 1000 Defrag# false 2025-05-07T08:45:37.840104Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 5 HandleWrite Lsn# 5 DataSize# 687721 WriteQueueSize# 1 WriteInProgressItemsSize# 5 2025-05-07T08:45:37.840117Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 1 WriteInProgressItemsSize# 5 2025-05-07T08:45:37.847110Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 6 InFlightWritesSize# 6 2025-05-07T08:45:37.849326Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 6 HandleWrite Lsn# 6 DataSize# 1957662 WriteQueueSize# 2 WriteInProgressItemsSize# 5 2025-05-07T08:45:37.849344Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 2 WriteInProgressItemsSize# 5 2025-05-07T08:45:37.854325Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:1:1:0:1957662:6:0] Lsn# 6 NumReq# 6 2025-05-07T08:45:37.858369Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 7 InFlightWritesSize# 7 2025-05-07T08:45:37.883677Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:1:1:0:1824284:7:0] Lsn# 7 NumReq# 7 2025-05-07T08:45:37.886064Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:344: [PDisk# 000000001 Writer] QueryId# 0 ApplyBlobWrite Status# OK 2025-05-07T08:45:37.886201Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 2 WriteInProgressItemsSize# 4 2025-05-07T08:45:37.886227Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 5 ProcessWriteItem entry 2025-05-07T08:45:37.886438Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:319: [PDisk# 000000001 Writer] QueryId# 5 ProcessWriteItem OffsetInBlocks# 734 IndexInsideChunk# 5 SizeInBlocks# 85 SizeInBytes# 690880 Offset# 5965952 Size# 690880 End# 6656832 Id# 0000000000000005 ChunkIdx# 2 ChunkSerNum# 1000 Defrag# false 2025-05-07T08:45:37.886487Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 7 HandleWrite Lsn# 7 DataSize# 1824284 WriteQueueSize# 2 WriteInProgressItemsSize# 5 2025-05-07T08:45:37.886501Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 2 WriteInProgressItemsSize# 5 2025-05-07T08:45:37.889479Z :TEST DEBUG: test_actor_concurrent.h:308: finished Write Id# 0000000000000000 LogoBlobId# [1:1:1:0:811717:0:0] Lsn# 0 2025-05-07T08:45:37.889507Z :TEST INFO: test_actor_concurrent.h:320: BytesWritten# 0 MB ElapsedTime# 0.288316s Speed# 0.00 MB/s 2025-05-07 ... 000000001 Writer] QueryId# 523 ProcessWriteItem OffsetInBlocks# 1428 IndexInsideChunk# 10 SizeInBlocks# 115 SizeInBytes# 934720 Offset# 11606784 Size# 934720 End# 12541504 Id# 000000000000001a ChunkIdx# 33 ChunkSerNum# 1115 Defrag# false 2025-05-07T08:46:48.395926Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 41 InFlightWritesSize# 23 2025-05-07T08:46:48.397581Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:2:1:0:1579253:1189:0] Lsn# 1189 NumReq# 41 2025-05-07T08:46:48.398028Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:344: [PDisk# 000000001 Writer] QueryId# 519 ApplyBlobWrite Status# OK 2025-05-07T08:46:48.398223Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 7 WriteInProgressItemsSize# 4 2025-05-07T08:46:48.398247Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 524 ProcessWriteItem entry 2025-05-07T08:46:48.398431Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:319: [PDisk# 000000001 Writer] QueryId# 524 ProcessWriteItem OffsetInBlocks# 1543 IndexInsideChunk# 11 SizeInBlocks# 79 SizeInBytes# 642112 Offset# 12541504 Size# 642112 End# 13183616 Id# 0000000000000026 ChunkIdx# 33 ChunkSerNum# 1115 Defrag# false 2025-05-07T08:46:48.398463Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 531 HandleWrite Lsn# 1189 DataSize# 1579253 WriteQueueSize# 7 WriteInProgressItemsSize# 5 2025-05-07T08:46:48.398479Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 7 WriteInProgressItemsSize# 5 2025-05-07T08:46:48.401222Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 42 InFlightWritesSize# 24 2025-05-07T08:46:48.401931Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:2:1:0:584806:1190:0] Lsn# 1190 NumReq# 42 2025-05-07T08:46:48.402021Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 532 HandleWrite Lsn# 1190 DataSize# 584806 WriteQueueSize# 8 WriteInProgressItemsSize# 5 2025-05-07T08:46:48.402035Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 8 WriteInProgressItemsSize# 5 2025-05-07T08:46:48.402064Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:344: [PDisk# 000000001 Writer] QueryId# 520 ApplyBlobWrite Status# OK 2025-05-07T08:46:48.402505Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 8 WriteInProgressItemsSize# 4 2025-05-07T08:46:48.402526Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 525 ProcessWriteItem entry 2025-05-07T08:46:48.402784Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:319: [PDisk# 000000001 Writer] QueryId# 525 ProcessWriteItem OffsetInBlocks# 1622 IndexInsideChunk# 12 SizeInBlocks# 129 SizeInBytes# 1048512 Offset# 13183616 Size# 1048512 End# 14232128 Id# 000000000000000c ChunkIdx# 33 ChunkSerNum# 1115 Defrag# false 2025-05-07T08:46:48.402819Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:344: [PDisk# 000000001 Writer] QueryId# 521 ApplyBlobWrite Status# OK 2025-05-07T08:46:48.402916Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 7 WriteInProgressItemsSize# 4 2025-05-07T08:46:48.402934Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 526 ProcessWriteItem entry 2025-05-07T08:46:48.403116Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:319: [PDisk# 000000001 Writer] QueryId# 526 ProcessWriteItem OffsetInBlocks# 1751 IndexInsideChunk# 13 SizeInBlocks# 88 SizeInBytes# 715264 Offset# 14232128 Size# 715264 End# 14947392 Id# 0000000000000006 ChunkIdx# 33 ChunkSerNum# 1115 Defrag# false 2025-05-07T08:46:48.411127Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 43 InFlightWritesSize# 25 2025-05-07T08:46:48.412082Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:2:1:0:831121:1191:0] Lsn# 1191 NumReq# 43 2025-05-07T08:46:48.413712Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 44 InFlightWritesSize# 26 2025-05-07T08:46:48.414047Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 533 HandleWrite Lsn# 1191 DataSize# 831121 WriteQueueSize# 7 WriteInProgressItemsSize# 5 2025-05-07T08:46:48.414068Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 7 WriteInProgressItemsSize# 5 2025-05-07T08:46:48.414467Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:2:1:0:562722:1192:0] Lsn# 1192 NumReq# 44 2025-05-07T08:46:48.415596Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 45 InFlightWritesSize# 27 2025-05-07T08:46:48.416390Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:2:1:0:632239:1193:0] Lsn# 1193 NumReq# 45 2025-05-07T08:46:48.417636Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 46 InFlightWritesSize# 28 2025-05-07T08:46:48.418044Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 534 HandleWrite Lsn# 1192 DataSize# 562722 WriteQueueSize# 8 WriteInProgressItemsSize# 5 2025-05-07T08:46:48.418063Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 8 WriteInProgressItemsSize# 5 2025-05-07T08:46:48.418084Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 535 HandleWrite Lsn# 1193 DataSize# 632239 WriteQueueSize# 9 WriteInProgressItemsSize# 5 2025-05-07T08:46:48.418097Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 9 WriteInProgressItemsSize# 5 2025-05-07T08:46:48.418122Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:344: [PDisk# 000000001 Writer] QueryId# 522 ApplyBlobWrite Status# OK 2025-05-07T08:46:48.429259Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 9 WriteInProgressItemsSize# 4 2025-05-07T08:46:48.429299Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 527 ProcessWriteItem entry 2025-05-07T08:46:48.429768Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:319: [PDisk# 000000001 Writer] QueryId# 527 ProcessWriteItem OffsetInBlocks# 1839 IndexInsideChunk# 14 SizeInBlocks# 258 SizeInBytes# 2097024 Offset# 14947392 Size# 2097024 End# 17044416 Id# 0000000000000012 ChunkIdx# 33 ChunkSerNum# 1115 Defrag# false 2025-05-07T08:46:48.429822Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:344: [PDisk# 000000001 Writer] QueryId# 523 ApplyBlobWrite Status# OK 2025-05-07T08:46:48.430641Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:2:1:0:941510:1194:0] Lsn# 1194 NumReq# 46 2025-05-07T08:46:48.432495Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 47 InFlightWritesSize# 29 2025-05-07T08:46:48.438015Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 8 WriteInProgressItemsSize# 4 2025-05-07T08:46:48.438050Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 528 ProcessWriteItem entry 2025-05-07T08:46:48.438085Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:534: [PDisk# 000000001 Writer] IndexWrite chunkIdx# 33 offset# 17044416 size# 1682496 end# 18726912 2025-05-07T08:46:48.438557Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:319: [PDisk# 000000001 Writer] QueryId# 528 ProcessWriteItem OffsetInBlocks# 0 IndexInsideChunk# 0 SizeInBlocks# 252 SizeInBytes# 2048256 Offset# 0 Size# 2048256 End# 2048256 Id# 0000000000000022 ChunkIdx# 34 ChunkSerNum# 1116 Defrag# false 2025-05-07T08:46:48.438594Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:344: [PDisk# 000000001 Writer] QueryId# 524 ApplyBlobWrite Status# OK 2025-05-07T08:46:48.438715Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 7 WriteInProgressItemsSize# 4 2025-05-07T08:46:48.438730Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 529 ProcessWriteItem entry 2025-05-07T08:46:48.439139Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:319: [PDisk# 000000001 Writer] QueryId# 529 ProcessWriteItem OffsetInBlocks# 252 IndexInsideChunk# 1 SizeInBlocks# 224 SizeInBytes# 1820672 Offset# 2048256 Size# 1820672 End# 3868928 Id# 000000000000002d ChunkIdx# 34 ChunkSerNum# 1116 Defrag# false 2025-05-07T08:46:48.439171Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 536 HandleWrite Lsn# 1194 DataSize# 941510 WriteQueueSize# 7 WriteInProgressItemsSize# 5 2025-05-07T08:46:48.439188Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 7 WriteInProgressItemsSize# 5 2025-05-07T08:46:48.439205Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:344: [PDisk# 000000001 Writer] QueryId# 525 ApplyBlobWrite Status# OK 2025-05-07T08:46:48.439363Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 7 WriteInProgressItemsSize# 4 2025-05-07T08:46:48.439378Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 530 ProcessWriteItem entry 2025-05-07T08:46:48.439726Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:319: [PDisk# 000000001 Writer] QueryId# 530 ProcessWriteItem OffsetInBlocks# 476 IndexInsideChunk# 2 SizeInBlocks# 193 SizeInBytes# 1568704 Offset# 3868928 Size# 1568704 End# 5437632 Id# 000000000000003b ChunkIdx# 34 ChunkSerNum# 1116 Defrag# false 2025-05-07T08:46:48.439750Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:344: [PDisk# 000000001 Writer] QueryId# 526 ApplyBlobWrite Status# OK 2025-05-07T08:46:48.439870Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 6 WriteInProgressItemsSize# 4 2025-05-07T08:46:48.439885Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 531 ProcessWriteItem entry 2025-05-07T08:46:48.440229Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:319: [PDisk# 000000001 Writer] QueryId# 531 ProcessWriteItem OffsetInBlocks# 669 IndexInsideChunk# 3 SizeInBlocks# 195 SizeInBytes# 1584960 Offset# 5437632 Size# 1584960 End# 7022592 Id# 000000000000003f ChunkIdx# 34 ChunkSerNum# 1116 Defrag# false 2025-05-07T08:46:48.440526Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:2:1:0:1618971:1195:0] Lsn# 1195 NumReq# 47 2025-05-07T08:46:48.443716Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 48 InFlightWritesSize# 30 2025-05-07T08:46:48.445839Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:2:1:0:2045677:1196:0] Lsn# 1196 NumReq# 48 2025-05-07T08:46:48.450042Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 537 HandleWrite Lsn# 1195 DataSize# 1618971 WriteQueueSize# 6 WriteInProgressItemsSize# 5 2025-05-07T08:46:48.450067Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 6 WriteInProgressItemsSize# 5 2025-05-07T08:46:48.450146Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 538 HandleWrite Lsn# 1196 DataSize# 2045677 WriteQueueSize# 7 WriteInProgressItemsSize# 5 2025-05-07T08:46:48.450157Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 7 WriteInProgressItemsSize# 5 2025-05-07T08:46:48.457808Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 49 InFlightWritesSize# 31 2025-05-07T08:46:48.458813Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:2:1:0:826134:1197:0] Lsn# 1197 NumReq# 49 2025-05-07T08:46:48.461055Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 539 HandleWrite Lsn# 1197 DataSize# 826134 WriteQueueSize# 8 WriteInProgressItemsSize# 5 2025-05-07T08:46:48.461076Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 8 WriteInProgressItemsSize# 5 >> BlobDepot::BasicDiscover [GOOD] >> BlobDepot::BasicBlock ------- [TS] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/splitter/ut/unittest >> Splitter::CritSimple [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=280336;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=280336;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=2088936;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=2088936;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5184936;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5184936;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=50200;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=50200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=seria ... 82944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8947912;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7964832;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=71282912;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8947912;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7964800;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7964832;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7964832;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7964832;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7964832;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7964832;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7964832;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7964832;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7964800;columns=1; >> test.py::test[solomon-Basic-default.txt] [GOOD] >> test.py::test[solomon-Downsampling-default.txt] |88.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_write.cpp |88.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_write.cpp |88.2%| [TA] $(B)/ydb/core/blobstorage/incrhuge/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> BlobDepot::BasicBlock [GOOD] >> BlobDepot::BasicCollectGarbage >> test_transform.py::TestYamlConfigTransformations::test_simplified[dump_ds_init] [GOOD] |88.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ydb-core-blobstorage-ut_blobstorage |88.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ydb-core-blobstorage-ut_blobstorage |88.2%| [TS] {RESULT} ydb/core/tx/columnshard/splitter/ut/unittest |88.2%| [TA] {RESULT} $(B)/ydb/core/blobstorage/incrhuge/ut/test-results/unittest/{meta.json ... results_accumulator.log} |88.2%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ydb-core-blobstorage-ut_blobstorage >> BsControllerTest::SelfHealBlock4Plus2 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/library/yaml_config/ut_transform/py3test >> test_transform.py::TestYamlConfigTransformations::test_simplified[dump_ds_init] [GOOD] 2025-05-07 08:46:55,232 ERROR devtools.ya.test.canon.compare: Cannot calculate diff: Traceback (most recent call last): File "devtools/ya/test/canon/compare.py", line 402, in _get_file_diff_via_diff raise Exception( Exception: 'ydb/library/yaml_config/tools/simple_json_diff/simple_json_diff' has finished unexpectedly with rc = 1 stdout: stderr: ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_selfheal/unittest >> BsControllerTest::SelfHealBlock4Plus2 [GOOD] Test command err: 2025-05-07T08:46:37.710896Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] Bootstrap 2025-05-07T08:46:37.710966Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] Connect 2025-05-07T08:46:37.711136Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] Bootstrap 2025-05-07T08:46:37.711172Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] Connect 2025-05-07T08:46:37.711230Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] Bootstrap 2025-05-07T08:46:37.711252Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] Connect 2025-05-07T08:46:37.711287Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] Bootstrap 2025-05-07T08:46:37.711309Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] Connect 2025-05-07T08:46:37.711342Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] Bootstrap 2025-05-07T08:46:37.711366Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] Connect 2025-05-07T08:46:37.711406Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] Bootstrap 2025-05-07T08:46:37.711427Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] Connect 2025-05-07T08:46:37.711457Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] Bootstrap 2025-05-07T08:46:37.711478Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] Connect 2025-05-07T08:46:37.711512Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] Bootstrap 2025-05-07T08:46:37.711530Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] Connect 2025-05-07T08:46:37.711570Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] Bootstrap 2025-05-07T08:46:37.711592Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] Connect 2025-05-07T08:46:37.711630Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] Bootstrap 2025-05-07T08:46:37.711657Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] Connect 2025-05-07T08:46:37.711720Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] Bootstrap 2025-05-07T08:46:37.711743Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] Connect 2025-05-07T08:46:37.711799Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] Bootstrap 2025-05-07T08:46:37.711820Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] Connect 2025-05-07T08:46:37.711852Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] Bootstrap 2025-05-07T08:46:37.711873Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] Connect 2025-05-07T08:46:37.711949Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] Bootstrap 2025-05-07T08:46:37.711972Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] Connect 2025-05-07T08:46:37.712004Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] Bootstrap 2025-05-07T08:46:37.712023Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] Connect 2025-05-07T08:46:37.712058Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] Bootstrap 2025-05-07T08:46:37.712079Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] Connect 2025-05-07T08:46:37.712116Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] Bootstrap 2025-05-07T08:46:37.712137Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] Connect 2025-05-07T08:46:37.712173Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] Bootstrap 2025-05-07T08:46:37.712203Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] Connect 2025-05-07T08:46:37.712275Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] Bootstrap 2025-05-07T08:46:37.712299Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] Connect 2025-05-07T08:46:37.712330Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] Bootstrap 2025-05-07T08:46:37.712350Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] Connect 2025-05-07T08:46:37.712398Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] Bootstrap 2025-05-07T08:46:37.712424Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] Connect 2025-05-07T08:46:37.712463Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] Bootstrap 2025-05-07T08:46:37.712498Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] Connect 2025-05-07T08:46:37.712537Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] Bootstrap 2025-05-07T08:46:37.712559Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] Connect 2025-05-07T08:46:37.712606Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] Bootstrap 2025-05-07T08:46:37.712630Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] Connect 2025-05-07T08:46:37.712669Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] Bootstrap 2025-05-07T08:46:37.712703Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] Connect 2025-05-07T08:46:37.712742Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] Bootstrap 2025-05-07T08:46:37.712770Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] Connect 2025-05-07T08:46:37.712830Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] Bootstrap 2025-05-07T08:46:37.712880Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] Connect 2025-05-07T08:46:37.712919Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] Bootstrap 2025-05-07T08:46:37.712939Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] Connect 2025-05-07T08:46:37.712970Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] Bootstrap 2025-05-07T08:46:37.712991Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] Connect 2025-05-07T08:46:37.713026Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] Bootstrap 2025-05-07T08:46:37.713047Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] Connect 2025-05-07T08:46:37.713079Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] Bootstrap 2025-05-07T08:46:37.713099Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] Connect 2025-05-07T08:46:37.713133Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] Bootstrap 2025-05-07T08:46:37.713159Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] Connect 2025-05-07T08:46:37.764782Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] ClientConnected Sender# [1:2157:49] Status# ERROR ClientId# [1:2157:49] ServerId# [0:0:0] PipeClient# [1:2157:49] 2025-05-07T08:46:37.770122Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] ClientConnected Sender# [2:2158:37] Status# ERROR ClientId# [2:2158:37] ServerId# [0:0:0] PipeClient# [2:2158:37] 2025-05-07T08:46:37.770225Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] ClientConnected Sender# [3:2159:37] Status# ERROR ClientId# [3:2159:37] ServerId# [0:0:0] PipeClient# [3:2159:37] 2025-05-07T08:46:37.770277Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] ClientConnected Sender# [4:2160:37] Status# ERROR ClientId# [4:2160:37] ServerId# [0:0:0] PipeClient# [4:2160:37] 2025-05-07T08:46:37.770331Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] ClientConnected Sender# [5:2161:37] Status# ERROR ClientId# [5:2161:37] ServerId# [0:0:0] PipeClient# [5:2161:37] 2025-05-07T08:46:37.770366Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] ClientConnected Sender# [6:2162:37] Status# ERROR ClientId# [6:2162:37] ServerId# [0:0:0] PipeClient# [6:2162:37] 2025-05-07T08:46:37.770402Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] ClientConnected Sender# [7:2163:37] Status# ERROR ClientId# [7:2163:37] ServerId# [0:0:0] PipeClient# [7:2163:37] 2025-05-07T08:46:37.770435Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] ClientConnected Sender# [8:2164:37] Status# ERROR ClientId# [8:2164:37] ServerId# [0:0:0] PipeClient# [8:2164:37] 2025-05-07T08:46:37.770517Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] ClientConnected Sender# [9:2165:37] Status# ERROR ClientId# [9:2165:37] ServerId# [0:0:0] PipeClient# [9:2165:37] 2025-05-07T08:46:37.770570Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] ClientConnected Sender# [10:2166:37] Status# ERROR ClientId# [10:2166:37] ServerId# [0:0:0] PipeClient# [10:2166:37] 2025-05-07T08:46:37.770624Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] ClientConnected Sender# [11:2167:37] Status# ERROR ClientId# [11:2167:37] ServerId# [0:0:0] PipeClient# [11:2167:37] 2025-05-07T08:46:37.770661Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] ClientConnected Sender# [12:2168:37] Status# ERROR ClientId# [12:2168:37] ServerId# [0:0:0] PipeClient# [12:2168:37] 2025-05-07T08:46:37.770705Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] ClientConnected Sender# [13:2169:37] Status# ERROR ClientId# [13:2169:37] ServerId# [0:0:0] PipeClient# [13:2169:37] 2025-05-07T08:46:37.770746Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] ClientConnected Sender# [14:2170:37] Status# ERROR ClientId# [14:2170:37] ServerId# [0:0:0] PipeClient# [14:2170:37] 2025-05-07T08:46:37.770797Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] ClientConnected Sender# [15:2171:37] Status# ERROR ClientId# [15:2171:37] ServerId# [0:0:0] PipeClient# [15:2171:37] 2025-05-07T08:46:37.770841Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] ClientConnected Sender# [16:2172:37] Status# ERROR ClientId# [16:2172:37] ServerId# [0:0:0] PipeClient# [16:2172:37] 2025-05-07T08:46:37.770880Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] ClientConnected Sender# [17:2173:37] Status# ERROR ClientId# [17:2173:37] ServerId# [0:0:0] PipeClient# [17:2173:37] 2025-05-07T08:46:37.770920Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] ClientConnected Sender# [18:2174:37] Status# ERROR ClientId# [18:2174:37] ServerId# [0:0:0] PipeClient# [18:2174:37] 2025-05-07T08:46:37.770958Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] ClientConnected Sender# [19:2175:37] Status# ERROR ClientId# [19:2175:37] ServerId# [0:0:0] PipeClient# [19:2175:37] 2025-05-07T08:46:37.770994Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] ClientConnected Sender# [20:2176:37] Status# ERROR ClientId# [20:2176:37] ServerId# [0:0:0] PipeClient# [20:2176:37] 2025-05-07T08:46:37.771052Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] ClientConnected Sender# [21:2177:37] Status# ERROR ClientId# [21:2177:37] ServerId# [0:0:0] PipeClient# [21:2177:37] 2025-05-07T08:46:37.771108Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] ClientConnected Sender# [22:2178:37] Status# ERROR ClientId# [22:2178:37] ServerId# [0:0:0] PipeClient# [22:2178:37] 2025-05-07T08:46:37.771154Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] ClientConnected Sender# [23:2179:37] Status# ERROR ClientId# [23:2179:37] ServerId# [0:0:0] PipeClient# [23:2179:37] 2025-05-07T08:46:37.771197Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] ClientConnected Sender# [24:2180:37] Status# ERROR ClientId# [24:2180:37] ServerId# [0:0:0] PipeClient# [24:2180:37] 2025-05-07T08:46:37.771234Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] ClientConnected Sender# [25:2181:37] Status# ERROR ClientId# [25:2181:37] ServerId# [0:0:0] PipeClient# [25:2181:37] 2025-05-07T08:46:37.771329Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] ClientConnected Sender# [26:2182:37] Status# ERROR ClientId# [26:2182:37] ServerId# [0:0:0] PipeClient# [26:2182:37] 2025-05-07T08:46:37.771377Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] ClientConnected Sender# [27:2183:37] Status# ERROR ClientId# [27:2183:37] ServerId# [0:0:0] PipeClient# [27:2183:37] 2025-05-07T08:46:37.771418Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] ClientConnected Sender# [28:2184:37] Status# ERROR ClientId# [28:2184:37] ServerId# [0:0:0] PipeClient# [28:2184:37] 2025-05-07T08:46:37.771457Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] ClientConnected Sender# [29:2185:37] Status# ERROR ClientId# [29:2185:37] ServerId# [0:0:0] PipeClient# [29:2185:37] 2025-05-07T08:46:37.771495Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] ClientConnected Sender# [30:2186:37] Status# ERROR ClientId# [30:2186:37] ServerId# [0:0:0] PipeClient# [30:2186:37] 2025-05-07T08:46:37.771549Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] ClientConnected Sender# [31:2187:37] Status# ERROR ClientId# [31:2187:37] ServerId# [0:0:0] PipeClient# [31:2187:37] 2025-05-07T08:46:37.771600Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] ClientConnected Sender# [32:2188:37] Status# ERROR ClientId# [32:2188:37] ServerId# [0:0:0] PipeClient# [32:2188:37] 2025-05-07T08:46:38.151699Z 1 00h00m00.002048s :BS_CONTROLLER ERROR: {BSC07@impl.h:2175} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.294222s 2025-05-07T08:46:38.151883Z 1 00h00m00.002048s :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:666} StateWork event processing took too much time Type# 2146435078 Duration# 0.294425s 2025-05-07T08:46:38.198134Z 1 00h00m00.002560s :BS_NODE DEBUG: [1] CheckState from [1:2257:73] expected 1 current 0 2025-05-07T08:46:38.198234Z 2 00h00m00.002560s :BS_NODE DEBUG: [2] CheckState from [2:2258:38] expected 1 current 0 2025-05-07T08:46:38.198272Z 3 00h00m00.002560s :BS_NODE DEBUG: [3] CheckState from [3:2259:38] expected 1 current 0 2025-05-07T08:46:38.198307Z 4 00h00m00.002560s :BS_NODE DEBUG: [4] CheckState from [4:2260:38] expected 1 current 0 2025-05-07T08:46:38.198356Z 5 00h00m00.002560s :BS_NODE DEBUG: [5] CheckState from [5:2261:38] expected 1 current 0 2025-05-07T08:46:38.198389Z 6 00h00m00.002560s :BS_NODE DEBUG: [6] CheckState from [6:2262:38] expected 1 current 0 2025-05-07T08:46:38.198444Z 7 00h00m00.002560s :BS_NODE DEBUG: [7] CheckState from [7 ... kId# [80000014:2:0:5:0] DiskIsOk# true 2025-05-07T08:46:54.003543Z 1 05h15m00.120992s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483668 Status# OK JoinedGroup# true Replicated# true 2025-05-07T08:46:54.003573Z 1 05h15m00.120992s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483668 VDiskId# [80000014:2:0:6:0] DiskIsOk# true 2025-05-07T08:46:54.003605Z 1 05h15m00.120992s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483668 Status# OK JoinedGroup# true Replicated# true 2025-05-07T08:46:54.003635Z 1 05h15m00.120992s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483668 VDiskId# [80000014:2:0:7:0] DiskIsOk# true 2025-05-07T08:46:54.011678Z 1 05h15m00.121504s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-05-07T08:46:54.011774Z 1 05h15m00.121504s :BS_NODE DEBUG: [1] VDiskId# [80000014:2:0:0:0] -> [80000014:3:0:0:0] 2025-05-07T08:46:54.012352Z 1 05h15m00.121504s :BS_SELFHEAL INFO: {BSSH09@self_heal.cpp:207} Reassigner succeeded GroupId# 2147483668 Items# [80000014:2:0:2:0]: 23:1000:1016 -> 20:1000:1014 ConfigTxSeqNo# 505 2025-05-07T08:46:54.012396Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH08@self_heal.cpp:218} Reassigner finished GroupId# 2147483668 Success# true 2025-05-07T08:46:54.012584Z 2 05h15m00.121504s :BS_NODE DEBUG: [2] NodeServiceSetUpdate 2025-05-07T08:46:54.012649Z 2 05h15m00.121504s :BS_NODE DEBUG: [2] VDiskId# [80000014:2:0:1:0] -> [80000014:3:0:1:0] 2025-05-07T08:46:54.012756Z 20 05h15m00.121504s :BS_NODE DEBUG: [20] NodeServiceSetUpdate 2025-05-07T08:46:54.012802Z 20 05h15m00.121504s :BS_NODE DEBUG: [20] VDiskId# [80000014:3:0:2:0] PDiskId# 1000 VSlotId# 1014 created 2025-05-07T08:46:54.012877Z 20 05h15m00.121504s :BS_NODE DEBUG: [20] VDiskId# [80000014:3:0:2:0] status changed to INIT_PENDING 2025-05-07T08:46:54.012975Z 4 05h15m00.121504s :BS_NODE DEBUG: [4] NodeServiceSetUpdate 2025-05-07T08:46:54.013024Z 4 05h15m00.121504s :BS_NODE DEBUG: [4] VDiskId# [80000014:2:0:3:0] -> [80000014:3:0:3:0] 2025-05-07T08:46:54.013108Z 5 05h15m00.121504s :BS_NODE DEBUG: [5] NodeServiceSetUpdate 2025-05-07T08:46:54.013155Z 5 05h15m00.121504s :BS_NODE DEBUG: [5] VDiskId# [80000014:2:0:4:0] -> [80000014:3:0:4:0] 2025-05-07T08:46:54.013216Z 23 05h15m00.121504s :BS_NODE DEBUG: [23] NodeServiceSetUpdate 2025-05-07T08:46:54.013301Z 6 05h15m00.121504s :BS_NODE DEBUG: [6] NodeServiceSetUpdate 2025-05-07T08:46:54.013353Z 6 05h15m00.121504s :BS_NODE DEBUG: [6] VDiskId# [80000014:2:0:5:0] -> [80000014:3:0:5:0] 2025-05-07T08:46:54.013434Z 7 05h15m00.121504s :BS_NODE DEBUG: [7] NodeServiceSetUpdate 2025-05-07T08:46:54.013482Z 7 05h15m00.121504s :BS_NODE DEBUG: [7] VDiskId# [80000014:2:0:6:0] -> [80000014:3:0:6:0] 2025-05-07T08:46:54.013567Z 8 05h15m00.121504s :BS_NODE DEBUG: [8] NodeServiceSetUpdate 2025-05-07T08:46:54.013615Z 8 05h15m00.121504s :BS_NODE DEBUG: [8] VDiskId# [80000014:2:0:7:0] -> [80000014:3:0:7:0] 2025-05-07T08:46:54.013911Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH01@self_heal.cpp:71} Reassigner starting GroupId# 2147483652 2025-05-07T08:46:54.018598Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483652 Status# OK JoinedGroup# true Replicated# true 2025-05-07T08:46:54.018665Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483652 VDiskId# [80000004:2:0:0:0] DiskIsOk# true 2025-05-07T08:46:54.018961Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483652 Status# OK JoinedGroup# true Replicated# true 2025-05-07T08:46:54.019001Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483652 VDiskId# [80000004:2:0:1:0] DiskIsOk# true 2025-05-07T08:46:54.019036Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483652 Status# OK JoinedGroup# true Replicated# true 2025-05-07T08:46:54.019066Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483652 VDiskId# [80000004:2:0:3:0] DiskIsOk# true 2025-05-07T08:46:54.019098Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483652 Status# OK JoinedGroup# true Replicated# true 2025-05-07T08:46:54.019125Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483652 VDiskId# [80000004:2:0:4:0] DiskIsOk# true 2025-05-07T08:46:54.019158Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483652 Status# OK JoinedGroup# true Replicated# true 2025-05-07T08:46:54.019186Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483652 VDiskId# [80000004:2:0:5:0] DiskIsOk# true 2025-05-07T08:46:54.019216Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483652 Status# OK JoinedGroup# true Replicated# true 2025-05-07T08:46:54.019244Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483652 VDiskId# [80000004:2:0:6:0] DiskIsOk# true 2025-05-07T08:46:54.019278Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483652 Status# OK JoinedGroup# true Replicated# true 2025-05-07T08:46:54.019305Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483652 VDiskId# [80000004:2:0:7:0] DiskIsOk# true 2025-05-07T08:46:54.027304Z 1 05h15m00.122016s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-05-07T08:46:54.027399Z 1 05h15m00.122016s :BS_NODE DEBUG: [1] VDiskId# [80000004:2:0:0:0] -> [80000004:3:0:0:0] 2025-05-07T08:46:54.027969Z 1 05h15m00.122016s :BS_SELFHEAL INFO: {BSSH09@self_heal.cpp:207} Reassigner succeeded GroupId# 2147483652 Items# [80000004:2:0:2:0]: 23:1000:1017 -> 20:1000:1015 ConfigTxSeqNo# 506 2025-05-07T08:46:54.028013Z 1 05h15m00.122016s :BS_SELFHEAL DEBUG: {BSSH08@self_heal.cpp:218} Reassigner finished GroupId# 2147483652 Success# true 2025-05-07T08:46:54.028192Z 2 05h15m00.122016s :BS_NODE DEBUG: [2] NodeServiceSetUpdate 2025-05-07T08:46:54.028253Z 2 05h15m00.122016s :BS_NODE DEBUG: [2] VDiskId# [80000004:2:0:1:0] -> [80000004:3:0:1:0] 2025-05-07T08:46:54.028353Z 20 05h15m00.122016s :BS_NODE DEBUG: [20] NodeServiceSetUpdate 2025-05-07T08:46:54.028400Z 20 05h15m00.122016s :BS_NODE DEBUG: [20] VDiskId# [80000004:3:0:2:0] PDiskId# 1000 VSlotId# 1015 created 2025-05-07T08:46:54.028475Z 20 05h15m00.122016s :BS_NODE DEBUG: [20] VDiskId# [80000004:3:0:2:0] status changed to INIT_PENDING 2025-05-07T08:46:54.028572Z 4 05h15m00.122016s :BS_NODE DEBUG: [4] NodeServiceSetUpdate 2025-05-07T08:46:54.028626Z 4 05h15m00.122016s :BS_NODE DEBUG: [4] VDiskId# [80000004:2:0:3:0] -> [80000004:3:0:3:0] 2025-05-07T08:46:54.028706Z 5 05h15m00.122016s :BS_NODE DEBUG: [5] NodeServiceSetUpdate 2025-05-07T08:46:54.028753Z 5 05h15m00.122016s :BS_NODE DEBUG: [5] VDiskId# [80000004:2:0:4:0] -> [80000004:3:0:4:0] 2025-05-07T08:46:54.028818Z 23 05h15m00.122016s :BS_NODE DEBUG: [23] NodeServiceSetUpdate 2025-05-07T08:46:54.028904Z 6 05h15m00.122016s :BS_NODE DEBUG: [6] NodeServiceSetUpdate 2025-05-07T08:46:54.028952Z 6 05h15m00.122016s :BS_NODE DEBUG: [6] VDiskId# [80000004:2:0:5:0] -> [80000004:3:0:5:0] 2025-05-07T08:46:54.029026Z 7 05h15m00.122016s :BS_NODE DEBUG: [7] NodeServiceSetUpdate 2025-05-07T08:46:54.029077Z 7 05h15m00.122016s :BS_NODE DEBUG: [7] VDiskId# [80000004:2:0:6:0] -> [80000004:3:0:6:0] 2025-05-07T08:46:54.029156Z 8 05h15m00.122016s :BS_NODE DEBUG: [8] NodeServiceSetUpdate 2025-05-07T08:46:54.029202Z 8 05h15m00.122016s :BS_NODE DEBUG: [8] VDiskId# [80000004:2:0:7:0] -> [80000004:3:0:7:0] 2025-05-07T08:46:54.034444Z 20 05h15m02.127480s :BS_NODE DEBUG: [20] VDiskId# [80000034:3:0:2:0] status changed to REPLICATING 2025-05-07T08:46:54.035217Z 20 05h15m04.260456s :BS_NODE DEBUG: [20] VDiskId# [8000001c:3:0:2:0] status changed to REPLICATING 2025-05-07T08:46:54.035900Z 20 05h15m04.516016s :BS_NODE DEBUG: [20] VDiskId# [80000004:3:0:2:0] status changed to REPLICATING 2025-05-07T08:46:54.036551Z 20 05h15m04.553992s :BS_NODE DEBUG: [20] VDiskId# [80000024:3:0:2:0] status changed to REPLICATING 2025-05-07T08:46:54.037207Z 23 05h15m04.605920s :BS_NODE DEBUG: [23] VDiskId# [8000002a:5:0:3:0] status changed to REPLICATING 2025-05-07T08:46:54.038073Z 20 05h15m04.930432s :BS_NODE DEBUG: [20] VDiskId# [8000003c:3:0:2:0] status changed to REPLICATING 2025-05-07T08:46:54.039458Z 20 05h15m05.121504s :BS_NODE DEBUG: [20] VDiskId# [80000014:3:0:2:0] status changed to REPLICATING 2025-05-07T08:46:54.040105Z 20 05h15m05.851944s :BS_NODE DEBUG: [20] VDiskId# [8000002c:3:0:2:0] status changed to REPLICATING 2025-05-07T08:46:54.040699Z 20 05h15m06.024968s :BS_NODE DEBUG: [20] VDiskId# [8000000c:3:0:2:0] status changed to REPLICATING 2025-05-07T08:46:54.041488Z 20 05h15m11.174504s :BS_NODE DEBUG: [20] VDiskId# [80000014:3:0:2:0] status changed to READY 2025-05-07T08:46:54.042745Z 23 05h15m11.175016s :BS_NODE DEBUG: [23] NodeServiceSetUpdate 2025-05-07T08:46:54.042824Z 23 05h15m11.175016s :BS_NODE DEBUG: [23] VDiskId# [80000014:2:0:2:0] destroyed 2025-05-07T08:46:54.043022Z 20 05h15m14.625944s :BS_NODE DEBUG: [20] VDiskId# [8000002c:3:0:2:0] status changed to READY 2025-05-07T08:46:54.044159Z 23 05h15m14.626456s :BS_NODE DEBUG: [23] NodeServiceSetUpdate 2025-05-07T08:46:54.044214Z 23 05h15m14.626456s :BS_NODE DEBUG: [23] VDiskId# [8000002c:2:0:2:0] destroyed 2025-05-07T08:46:54.044944Z 20 05h15m19.052016s :BS_NODE DEBUG: [20] VDiskId# [80000004:3:0:2:0] status changed to READY 2025-05-07T08:46:54.046106Z 23 05h15m19.052528s :BS_NODE DEBUG: [23] NodeServiceSetUpdate 2025-05-07T08:46:54.046159Z 23 05h15m19.052528s :BS_NODE DEBUG: [23] VDiskId# [80000004:2:0:2:0] destroyed 2025-05-07T08:46:54.046470Z 20 05h15m21.563480s :BS_NODE DEBUG: [20] VDiskId# [80000034:3:0:2:0] status changed to READY 2025-05-07T08:46:54.047559Z 23 05h15m21.563992s :BS_NODE DEBUG: [23] NodeServiceSetUpdate 2025-05-07T08:46:54.047604Z 23 05h15m21.563992s :BS_NODE DEBUG: [23] VDiskId# [80000034:2:0:2:0] destroyed 2025-05-07T08:46:54.048220Z 23 05h15m27.999920s :BS_NODE DEBUG: [23] VDiskId# [8000002a:5:0:3:0] status changed to READY 2025-05-07T08:46:54.049521Z 23 05h15m28.000432s :BS_NODE DEBUG: [23] NodeServiceSetUpdate 2025-05-07T08:46:54.049572Z 23 05h15m28.000432s :BS_NODE DEBUG: [23] VDiskId# [8000002a:4:0:3:0] destroyed 2025-05-07T08:46:54.050423Z 20 05h15m32.681432s :BS_NODE DEBUG: [20] VDiskId# [8000003c:3:0:2:0] status changed to READY 2025-05-07T08:46:54.051447Z 23 05h15m32.681944s :BS_NODE DEBUG: [23] NodeServiceSetUpdate 2025-05-07T08:46:54.051494Z 23 05h15m32.681944s :BS_NODE DEBUG: [23] VDiskId# [8000003c:2:0:2:0] destroyed 2025-05-07T08:46:54.051836Z 20 05h15m34.944992s :BS_NODE DEBUG: [20] VDiskId# [80000024:3:0:2:0] status changed to READY 2025-05-07T08:46:54.052733Z 23 05h15m34.945504s :BS_NODE DEBUG: [23] NodeServiceSetUpdate 2025-05-07T08:46:54.052779Z 23 05h15m34.945504s :BS_NODE DEBUG: [23] VDiskId# [80000024:2:0:2:0] destroyed 2025-05-07T08:46:54.053044Z 20 05h15m36.025456s :BS_NODE DEBUG: [20] VDiskId# [8000001c:3:0:2:0] status changed to READY 2025-05-07T08:46:54.053945Z 23 05h15m36.025968s :BS_NODE DEBUG: [23] NodeServiceSetUpdate 2025-05-07T08:46:54.054042Z 23 05h15m36.025968s :BS_NODE DEBUG: [23] VDiskId# [8000001c:2:0:2:0] destroyed 2025-05-07T08:46:54.054179Z 20 05h15m36.103968s :BS_NODE DEBUG: [20] VDiskId# [8000000c:3:0:2:0] status changed to READY 2025-05-07T08:46:54.055196Z 23 05h15m36.104480s :BS_NODE DEBUG: [23] NodeServiceSetUpdate 2025-05-07T08:46:54.055245Z 23 05h15m36.104480s :BS_NODE DEBUG: [23] VDiskId# [8000000c:2:0:2:0] destroyed >> TSubgroupPartLayoutTest::CountEffectiveReplicas4of4 [GOOD] >> BlobDepot::BasicCollectGarbage [GOOD] >> BlobDepot::VerifiedRandom ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/groupinfo/ut/unittest >> TSubgroupPartLayoutTest::CountEffectiveReplicas4of4 [GOOD] Test command err: testing erasure block-3-1 main# 0 main# 1 main# 2 main# 3 main# 4 main# 5 main# 6 main# 7 main# 8 main# 9 main# 10 main# 11 main# 12 main# 13 main# 14 main# 15 Checked 256 cases, took 81 us testing erasure stripe-4-2 main# 0 main# 1 main# 2 main# 3 main# 4 main# 5 main# 6 main# 7 main# 8 main# 9 main# 10 main# 11 main# 12 main# 13 main# 14 main# 15 main# 16 main# 17 main# 18 main# 19 main# 20 main# 21 main# 22 main# 23 main# 24 main# 25 main# 26 main# 27 main# 28 main# 29 main# 30 main# 31 main# 32 main# 33 main# 34 main# 35 main# 36 main# 37 main# 38 main# 39 main# 40 main# 41 main# 42 main# 43 main# 44 main# 45 main# 46 main# 47 main# 48 main# 49 main# 50 main# 51 main# 52 main# 53 main# 54 main# 55 main# 56 main# 57 main# 58 main# 59 main# 60 main# 61 main# 62 main# 63 Checked 262144 cases, took 1350006 us testing erasure block-2-3 main# 0 main# 1 main# 2 main# 3 main# 4 main# 5 main# 6 main# 7 main# 8 main# 9 main# 10 main# 11 main# 12 main# 13 main# 14 main# 15 main# 16 main# 17 main# 18 main# 19 main# 20 main# 21 main# 22 main# 23 main# 24 main# 25 main# 26 main# 27 main# 28 main# 29 main# 30 main# 31 Checked 1048576 cases, took 3385444 us testing erasure stripe-3-1 main# 0 main# 1 main# 2 main# 3 main# 4 main# 5 main# 6 main# 7 main# 8 main# 9 main# 10 main# 11 main# 12 main# 13 main# 14 main# 15 Checked 256 cases, took 25519 us testing erasure stripe-3-2 main# 0 main# 1 main# 2 main# 3 main# 4 main# 5 main# 6 main# 7 main# 8 main# 9 main# 10 main# 11 main# 12 main# 13 main# 14 main# 15 main# 16 main# 17 main# 18 main# 19 main# 20 main# 21 main# 22 main# 23 main# 24 main# 25 main# 26 main# 27 main# 28 main# 29 main# 30 main# 31 Checked 32768 cases, took 48475 us testing erasure stripe-2-3 main# 0 main# 1 main# 2 main# 3 main# 4 main# 5 main# 6 main# 7 main# 8 main# 9 main# 10 main# 11 main# 12 main# 13 main# 14 main# 15 main# 16 main# 17 main# 18 main# 19 main# 20 main# 21 main# 22 main# 23 main# 24 main# 25 main# 26 main# 27 main# 28 main# 29 main# 30 main# 31 Checked 1048576 cases, took 2654428 us >> TPDiskRaces::DecommitWithInflightMock [GOOD] >> ShredPDisk::SimpleShredRepeat |88.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/sharding/ut/ydb-core-tx-sharding-ut |88.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/sharding/ut/ydb-core-tx-sharding-ut |88.3%| [TM] {RESULT} ydb/library/yaml_config/ut_transform/py3test |88.3%| [LD] {RESULT} $(B)/ydb/core/tx/sharding/ut/ydb-core-tx-sharding-ut >> TBsLocalRecovery::ChaoticWriteRestartHugeIncreased [GOOD] >> ShredPDisk::SimpleShredRepeat [GOOD] >> ShredPDisk::SimpleShredRepeatAfterPDiskRestart >> test.py::test[solomon-Downsampling-default.txt] [GOOD] >> test.py::test[solomon-DownsamplingValidSettings-default.txt] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsLocalRecovery::ChaoticWriteRestartHugeIncreased [GOOD] Test command err: 2025-05-07T08:46:29.315634Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:370:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.315658Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:531:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.315677Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:720:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.315695Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:298:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.315719Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:905:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.315739Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:337:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.315757Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:113:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.315776Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:463:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.315794Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:205:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.315813Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:915:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.316468Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:225:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.316487Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:108:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.316508Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:502:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.316528Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:672:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.316546Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:924:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.316563Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:871:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.316582Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:191:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.316600Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:521:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.316616Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:847:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.316634Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:837:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.317131Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:696:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.317152Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:16:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.317170Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:939:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.317190Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:609:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.317208Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:779:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.317227Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:954:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.317246Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:65:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.317263Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:832:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.317281Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:949:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.317298Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:361:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.317843Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:64:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.317875Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:103:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.317895Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:31:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.317916Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:667:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.317934Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:45:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.317952Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:395:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.318225Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:512:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.318252Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:784:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.318271Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:614:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.318289Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:21:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.318775Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:701:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.318800Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:30:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.318818Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:346:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.318838Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:74:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.318858Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:652:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.318876Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:920:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.318895Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:278:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.318914Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:681:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.318938Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:895:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.318957Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:439:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.319469Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:963:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.319490Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:764:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.319511Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:98:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.319529Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:424:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.319547Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:308:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.319565Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:711:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.319587Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:551:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.319610Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:716:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.319627Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:862:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.319645Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:264:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.320213Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:404:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.320233Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:619:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.320251Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:842:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.320271Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:774:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.320292Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:239:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.320310Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:618:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.320327Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:891:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.320347Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:210:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.320369Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:506:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.320388Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:409:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.320941Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:900:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.320960Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:341:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.320981Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:958:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.320999Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:133:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.321018Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:274:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.321035Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:750:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.321057Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:492:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.321074Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:866:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.321091Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:99:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.321109Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:890:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.321576Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:195:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.321593Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:511:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.321612Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:857:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.321628Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:852:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.321647Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:584:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.321664Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:448:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.321682Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:438:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.321702Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:472:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.321719Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:234:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.321736Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:89:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.322443Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:322:0:0:66560:1] Marker# BSVS08 2025-05-07T08:46:29.322470Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:997:0:0:66560:1] Marker# BSVS08 >> TPDiskRaces::OwnerRecreationRaces [GOOD] >> TPDiskRaces::OwnerKilledWhileReadingLog >> Sharding::XXUsage |88.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/sharding/ut/unittest >> Sharding::XXUsage [GOOD] |88.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/sharding/ut/unittest |88.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/sharding/ut/unittest |88.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/sharding/ut/unittest |88.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/result_formatter/ut/ydb-core-fq-libs-result_formatter-ut |88.3%| [LD] {RESULT} $(B)/ydb/core/fq/libs/result_formatter/ut/ydb-core-fq-libs-result_formatter-ut |88.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/result_formatter/ut/ydb-core-fq-libs-result_formatter-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/sharding/ut/unittest >> Sharding::XXUsage [GOOD] Test command err: 2249711912916349199 14128560377008527040 12397953179328192791 17694494215937709953 1872815163066860186 3170449520789037051 14622926658150614197 12314329038445481291 5564808542296965746 7490997836052935571 17910175907345450837 14067028440501841369 2533506780328216104 13315168724817516363 10729167219108080623 18360376621195574120 2171123380985302522 2307105957780217354 7804600681699460456 8147992680589615819 2249711912916349199 17574882580004205161 11850823497802868122 2843691324881506469 2694153416073032636 11372880007360554020 7042253895866463862 16182985011390653875 16690939209458224500 16203155722579386984 7944457645031362548 3045359197365204624 15788011526683064516 8430512097865963947 12684858236165163369 12707101569171351542 10129051729567413941 899513940907705782 14473575222822286324 10982368081227004432 2244420788148980662 13516971036624691295 3002441759881847279 663948167809706381 18031424873183308977 1913801087416390615 12400276400372726031 3235945262335863659 11167889508109265837 8041530036315243205 1705245180426504302 18218664116835391090 1137957930684371653 8133448837872893786 3073832941606689708 12443352586669282769 12384538132321982192 6230220690702183793 9451246951547870551 14054543652223408776 17099506110185566985 12216522614935259457 17961663409317450767 6415070140291540491 4695285906668647313 113807010628818720 7961948724288627077 12688653334437526429 11407006516559426728 11459313411139019623 15196879830957199926 10444450089884289385 11388265142670171270 3173273054396710214 11888212676030210051 10479268907202572796 1266505237589642670 15365442027201585491 3092646397001199421 9977796976981828320 17721168523505783535 17324063948110670587 4113332255663558032 9294864432315236533 8721988621106478663 1221091403911486062 10256632859904521013 843956212818407690 17360572843019887681 14646865722209736804 >> ShredPDisk::SimpleShredRepeatAfterPDiskRestart [GOOD] |88.3%| [TA] $(B)/ydb/core/tx/sharding/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> BsControllerTest::SelfHealMirror3dc [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/pdisk/ut/unittest >> ShredPDisk::SimpleShredRepeatAfterPDiskRestart [GOOD] Test command err: GREEN 0.5025125628 0 CYAN 0.8623115578 0.862 LIGHT_YELLOW 0.8934673367 0.893 YELLOW 0.9145728643 0.914 LIGHT_ORANGE 0.9306532663 0.93 PRE_ORANGE 0.9467336683 0.946 ORANGE 0.9668341709 0.966 RED 0.9879396985 0.987 BLACK 0.9979899497 0.997 /home/runner/actions_runner/_work/ydb/ydb/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_env.h:379 /home/runner/actions_runner/_work/ydb/ydb/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_env.h:379 >> TYardTest::TestSysLogReordering [GOOD] >> TYardTest::TestStartingPoints >> ClosedIntervalSet::Difference [GOOD] >> ClosedIntervalSet::Contains >> ClosedIntervalSet::Contains [GOOD] >> ClosedIntervalSet::EnumInRange >> ResultFormatter::List >> ResultFormatter::List [GOOD] >> ResultFormatter::Null [GOOD] >> ResultFormatter::EmptyResultSet [GOOD] >> ResultFormatter::EmptyList [GOOD] >> ResultFormatter::EmptyTuple [GOOD] >> ResultFormatter::Utf8WithQuotes [GOOD] >> ResultFormatter::VariantStruct [GOOD] >> ResultFormatter::EmptyDict [GOOD] >> ResultFormatter::Dict [GOOD] >> ResultFormatter::Decimal [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_selfheal/unittest >> BsControllerTest::SelfHealMirror3dc [GOOD] Test command err: 2025-05-07T08:46:38.941420Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] Bootstrap 2025-05-07T08:46:38.941474Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] Connect 2025-05-07T08:46:38.941577Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] Bootstrap 2025-05-07T08:46:38.941618Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] Connect 2025-05-07T08:46:38.941676Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] Bootstrap 2025-05-07T08:46:38.941709Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] Connect 2025-05-07T08:46:38.941761Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] Bootstrap 2025-05-07T08:46:38.941791Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] Connect 2025-05-07T08:46:38.941834Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] Bootstrap 2025-05-07T08:46:38.941855Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] Connect 2025-05-07T08:46:38.941893Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] Bootstrap 2025-05-07T08:46:38.941916Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] Connect 2025-05-07T08:46:38.941955Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] Bootstrap 2025-05-07T08:46:38.941997Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] Connect 2025-05-07T08:46:38.942033Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] Bootstrap 2025-05-07T08:46:38.942055Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] Connect 2025-05-07T08:46:38.942111Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] Bootstrap 2025-05-07T08:46:38.942137Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] Connect 2025-05-07T08:46:38.942180Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] Bootstrap 2025-05-07T08:46:38.942201Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] Connect 2025-05-07T08:46:38.942252Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] Bootstrap 2025-05-07T08:46:38.942285Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] Connect 2025-05-07T08:46:38.942332Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] Bootstrap 2025-05-07T08:46:38.942355Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] Connect 2025-05-07T08:46:38.942403Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] Bootstrap 2025-05-07T08:46:38.942429Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] Connect 2025-05-07T08:46:38.942467Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] Bootstrap 2025-05-07T08:46:38.942490Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] Connect 2025-05-07T08:46:38.942532Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] Bootstrap 2025-05-07T08:46:38.942553Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] Connect 2025-05-07T08:46:38.942593Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] Bootstrap 2025-05-07T08:46:38.942617Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] Connect 2025-05-07T08:46:38.942651Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] Bootstrap 2025-05-07T08:46:38.942673Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] Connect 2025-05-07T08:46:38.942713Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] Bootstrap 2025-05-07T08:46:38.942737Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] Connect 2025-05-07T08:46:38.942815Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] Bootstrap 2025-05-07T08:46:38.942850Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] Connect 2025-05-07T08:46:38.942909Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] Bootstrap 2025-05-07T08:46:38.942946Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] Connect 2025-05-07T08:46:38.942992Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] Bootstrap 2025-05-07T08:46:38.943018Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] Connect 2025-05-07T08:46:38.943055Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] Bootstrap 2025-05-07T08:46:38.943079Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] Connect 2025-05-07T08:46:38.943119Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] Bootstrap 2025-05-07T08:46:38.943143Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] Connect 2025-05-07T08:46:38.943202Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] Bootstrap 2025-05-07T08:46:38.943226Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] Connect 2025-05-07T08:46:38.943285Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] Bootstrap 2025-05-07T08:46:38.943312Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] Connect 2025-05-07T08:46:38.943348Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] Bootstrap 2025-05-07T08:46:38.943369Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] Connect 2025-05-07T08:46:38.943424Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] Bootstrap 2025-05-07T08:46:38.943456Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] Connect 2025-05-07T08:46:38.943775Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] Bootstrap 2025-05-07T08:46:38.943806Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] Connect 2025-05-07T08:46:38.943846Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] Bootstrap 2025-05-07T08:46:38.943869Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] Connect 2025-05-07T08:46:38.943905Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] Bootstrap 2025-05-07T08:46:38.943927Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] Connect 2025-05-07T08:46:38.943964Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] Bootstrap 2025-05-07T08:46:38.943984Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] Connect 2025-05-07T08:46:38.944025Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] Bootstrap 2025-05-07T08:46:38.944048Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] Connect 2025-05-07T08:46:38.944103Z 33 00h00m00.000000s :BS_NODE DEBUG: [33] Bootstrap 2025-05-07T08:46:38.944129Z 33 00h00m00.000000s :BS_NODE DEBUG: [33] Connect 2025-05-07T08:46:38.944172Z 34 00h00m00.000000s :BS_NODE DEBUG: [34] Bootstrap 2025-05-07T08:46:38.944207Z 34 00h00m00.000000s :BS_NODE DEBUG: [34] Connect 2025-05-07T08:46:38.944257Z 35 00h00m00.000000s :BS_NODE DEBUG: [35] Bootstrap 2025-05-07T08:46:38.944290Z 35 00h00m00.000000s :BS_NODE DEBUG: [35] Connect 2025-05-07T08:46:38.944338Z 36 00h00m00.000000s :BS_NODE DEBUG: [36] Bootstrap 2025-05-07T08:46:38.944361Z 36 00h00m00.000000s :BS_NODE DEBUG: [36] Connect 2025-05-07T08:46:38.963984Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] ClientConnected Sender# [1:2713:53] Status# ERROR ClientId# [1:2713:53] ServerId# [0:0:0] PipeClient# [1:2713:53] 2025-05-07T08:46:38.965628Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] ClientConnected Sender# [2:2714:41] Status# ERROR ClientId# [2:2714:41] ServerId# [0:0:0] PipeClient# [2:2714:41] 2025-05-07T08:46:38.965710Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] ClientConnected Sender# [3:2715:41] Status# ERROR ClientId# [3:2715:41] ServerId# [0:0:0] PipeClient# [3:2715:41] 2025-05-07T08:46:38.965764Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] ClientConnected Sender# [4:2716:41] Status# ERROR ClientId# [4:2716:41] ServerId# [0:0:0] PipeClient# [4:2716:41] 2025-05-07T08:46:38.965845Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] ClientConnected Sender# [5:2717:41] Status# ERROR ClientId# [5:2717:41] ServerId# [0:0:0] PipeClient# [5:2717:41] 2025-05-07T08:46:38.965895Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] ClientConnected Sender# [6:2718:41] Status# ERROR ClientId# [6:2718:41] ServerId# [0:0:0] PipeClient# [6:2718:41] 2025-05-07T08:46:38.965952Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] ClientConnected Sender# [7:2719:41] Status# ERROR ClientId# [7:2719:41] ServerId# [0:0:0] PipeClient# [7:2719:41] 2025-05-07T08:46:38.966036Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] ClientConnected Sender# [8:2720:41] Status# ERROR ClientId# [8:2720:41] ServerId# [0:0:0] PipeClient# [8:2720:41] 2025-05-07T08:46:38.966084Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] ClientConnected Sender# [9:2721:41] Status# ERROR ClientId# [9:2721:41] ServerId# [0:0:0] PipeClient# [9:2721:41] 2025-05-07T08:46:38.966133Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] ClientConnected Sender# [10:2722:41] Status# ERROR ClientId# [10:2722:41] ServerId# [0:0:0] PipeClient# [10:2722:41] 2025-05-07T08:46:38.966185Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] ClientConnected Sender# [11:2723:41] Status# ERROR ClientId# [11:2723:41] ServerId# [0:0:0] PipeClient# [11:2723:41] 2025-05-07T08:46:38.966275Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] ClientConnected Sender# [12:2724:41] Status# ERROR ClientId# [12:2724:41] ServerId# [0:0:0] PipeClient# [12:2724:41] 2025-05-07T08:46:38.966328Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] ClientConnected Sender# [13:2725:41] Status# ERROR ClientId# [13:2725:41] ServerId# [0:0:0] PipeClient# [13:2725:41] 2025-05-07T08:46:38.966405Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] ClientConnected Sender# [14:2726:41] Status# ERROR ClientId# [14:2726:41] ServerId# [0:0:0] PipeClient# [14:2726:41] 2025-05-07T08:46:38.966489Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] ClientConnected Sender# [15:2727:41] Status# ERROR ClientId# [15:2727:41] ServerId# [0:0:0] PipeClient# [15:2727:41] 2025-05-07T08:46:38.966543Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] ClientConnected Sender# [16:2728:41] Status# ERROR ClientId# [16:2728:41] ServerId# [0:0:0] PipeClient# [16:2728:41] 2025-05-07T08:46:38.966593Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] ClientConnected Sender# [17:2729:41] Status# ERROR ClientId# [17:2729:41] ServerId# [0:0:0] PipeClient# [17:2729:41] 2025-05-07T08:46:38.966648Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] ClientConnected Sender# [18:2730:41] Status# ERROR ClientId# [18:2730:41] ServerId# [0:0:0] PipeClient# [18:2730:41] 2025-05-07T08:46:38.966700Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] ClientConnected Sender# [19:2731:41] Status# ERROR ClientId# [19:2731:41] ServerId# [0:0:0] PipeClient# [19:2731:41] 2025-05-07T08:46:38.966757Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] ClientConnected Sender# [20:2732:41] Status# ERROR ClientId# [20:2732:41] ServerId# [0:0:0] PipeClient# [20:2732:41] 2025-05-07T08:46:38.966856Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] ClientConnected Sender# [21:2733:41] Status# ERROR ClientId# [21:2733:41] ServerId# [0:0:0] PipeClient# [21:2733:41] 2025-05-07T08:46:38.966999Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] ClientConnected Sender# [22:2734:41] Status# ERROR ClientId# [22:2734:41] ServerId# [0:0:0] PipeClient# [22:2734:41] 2025-05-07T08:46:38.967042Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] ClientConnected Sender# [23:2735:41] Status# ERROR ClientId# [23:2735:41] ServerId# [0:0:0] PipeClient# [23:2735:41] 2025-05-07T08:46:38.967114Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] ClientConnected Sender# [24:2736:41] Status# ERROR ClientId# [24:2736:41] ServerId# [0:0:0] PipeClient# [24:2736:41] 2025-05-07T08:46:38.967158Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] ClientConnected Sender# [25:2737:41] Status# ERROR ClientId# [25:2737:41] ServerId# [0:0:0] PipeClient# [25:2737:41] 2025-05-07T08:46:38.967200Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] ClientConnected Sender# [26:2738:41] Status# ERROR ClientId# [26:2738:41] ServerId# [0:0:0] PipeClient# [26:2738:41] 2025-05-07T08:46:38.967238Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] ClientConnected Sender# [27:2739:41] Status# ERROR ClientId# [27:2739:41] ServerId# [0:0:0] PipeClient# [27:2739:41] 2025-05-07T08:46:38.967280Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] ClientConnected Sender# [28:2740:41] Status# ERROR ClientId# [28:2740:41] ServerId# [0:0:0] PipeClient# [28:2740:41] 2025-05-07T08:46:38.967324Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] ClientConnected Sender# [29:2741:41] Status# ERROR ClientId# [29:2741:41] ServerId# [0:0:0] PipeClient# [29:2741:41] 2025-05-07T08:46:38.967364Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] ClientConnected Sender# [30:2742:41] Status# ERROR ClientId# [30:2742:41] ServerId# [0:0:0] PipeClient# [30:2742:41] 2025-05-07T08:46:38.967403Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] ClientConnected Sender# [31:2743:41] Status# ERROR ClientId# [31:2743:41] ServerId# [0:0:0] PipeClient# [31:2743:41] 2025-05-07T08:46:38.967493Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] ClientConnected Sender# [32:2744:41] Status# ERROR ClientId# [32:2744:41] ServerId# [0:0:0] PipeClient# [32:2744:41] 2025-05-07T08:46:38.967535Z 33 00h00m00.000000s :BS_NODE DEBUG: [33] ClientConnected Sender# [33:2745:41] Status# ERROR ClientId# [33:2745:41] ServerId# [0:0:0] PipeClient# [33:2745:41] 2025-05-07T08:46:38.967574Z 34 00h00m00.000000s :BS_NODE DEBUG: [34] ClientConnected Sender# [34:2746:41] Status# ERROR ClientId# [34:2746:41] ServerId# [0:0:0] PipeClient# [34:2746:41] 2025-05-07T08:46:38.967613Z 35 00h00m00.000000s :BS_NODE DEBUG: [35] ClientConnected Sender# [35:2747:41] Status# ERROR ClientId# [35:2747:41 ... :BS_SELFHEAL INFO: {BSSH09@self_heal.cpp:207} Reassigner succeeded GroupId# 2147483670 Items# [80000016:2:1:2:0]: 14:1001:1001 -> 16:1001:1015 ConfigTxSeqNo# 505 2025-05-07T08:47:01.764848Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH08@self_heal.cpp:218} Reassigner finished GroupId# 2147483670 Success# true 2025-05-07T08:47:01.765026Z 35 05h45m00.123040s :BS_NODE DEBUG: [35] NodeServiceSetUpdate 2025-05-07T08:47:01.765100Z 35 05h45m00.123040s :BS_NODE DEBUG: [35] VDiskId# [80000016:2:2:1:0] -> [80000016:3:2:1:0] 2025-05-07T08:47:01.765214Z 2 05h45m00.123040s :BS_NODE DEBUG: [2] NodeServiceSetUpdate 2025-05-07T08:47:01.765263Z 2 05h45m00.123040s :BS_NODE DEBUG: [2] VDiskId# [80000016:2:0:2:0] -> [80000016:3:0:2:0] 2025-05-07T08:47:01.765351Z 20 05h45m00.123040s :BS_NODE DEBUG: [20] NodeServiceSetUpdate 2025-05-07T08:47:01.765403Z 20 05h45m00.123040s :BS_NODE DEBUG: [20] VDiskId# [80000016:2:1:0:0] -> [80000016:3:1:0:0] 2025-05-07T08:47:01.765489Z 23 05h45m00.123040s :BS_NODE DEBUG: [23] NodeServiceSetUpdate 2025-05-07T08:47:01.765537Z 23 05h45m00.123040s :BS_NODE DEBUG: [23] VDiskId# [80000016:2:1:1:0] -> [80000016:3:1:1:0] 2025-05-07T08:47:01.765626Z 8 05h45m00.123040s :BS_NODE DEBUG: [8] NodeServiceSetUpdate 2025-05-07T08:47:01.765674Z 8 05h45m00.123040s :BS_NODE DEBUG: [8] VDiskId# [80000016:2:0:0:0] -> [80000016:3:0:0:0] 2025-05-07T08:47:01.765760Z 26 05h45m00.123040s :BS_NODE DEBUG: [26] NodeServiceSetUpdate 2025-05-07T08:47:01.765807Z 26 05h45m00.123040s :BS_NODE DEBUG: [26] VDiskId# [80000016:2:2:2:0] -> [80000016:3:2:2:0] 2025-05-07T08:47:01.765894Z 11 05h45m00.123040s :BS_NODE DEBUG: [11] NodeServiceSetUpdate 2025-05-07T08:47:01.765941Z 11 05h45m00.123040s :BS_NODE DEBUG: [11] VDiskId# [80000016:2:0:1:0] -> [80000016:3:0:1:0] 2025-05-07T08:47:01.766025Z 14 05h45m00.123040s :BS_NODE DEBUG: [14] NodeServiceSetUpdate 2025-05-07T08:47:01.766109Z 32 05h45m00.123040s :BS_NODE DEBUG: [32] NodeServiceSetUpdate 2025-05-07T08:47:01.766160Z 32 05h45m00.123040s :BS_NODE DEBUG: [32] VDiskId# [80000016:2:2:0:0] -> [80000016:3:2:0:0] 2025-05-07T08:47:01.766252Z 16 05h45m00.123040s :BS_NODE DEBUG: [16] NodeServiceSetUpdate 2025-05-07T08:47:01.766298Z 16 05h45m00.123040s :BS_NODE DEBUG: [16] VDiskId# [80000016:3:1:2:0] PDiskId# 1001 VSlotId# 1015 created 2025-05-07T08:47:01.766379Z 16 05h45m00.123040s :BS_NODE DEBUG: [16] VDiskId# [80000016:3:1:2:0] status changed to INIT_PENDING 2025-05-07T08:47:01.766721Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH01@self_heal.cpp:71} Reassigner starting GroupId# 2147483654 2025-05-07T08:47:01.767593Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483654 Status# OK JoinedGroup# true Replicated# true 2025-05-07T08:47:01.767649Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483654 VDiskId# [80000006:2:0:0:0] DiskIsOk# true 2025-05-07T08:47:01.767691Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483654 Status# OK JoinedGroup# true Replicated# true 2025-05-07T08:47:01.767722Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483654 VDiskId# [80000006:2:0:1:0] DiskIsOk# true 2025-05-07T08:47:01.767752Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483654 Status# OK JoinedGroup# true Replicated# true 2025-05-07T08:47:01.767782Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483654 VDiskId# [80000006:2:0:2:0] DiskIsOk# true 2025-05-07T08:47:01.767812Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483654 Status# OK JoinedGroup# true Replicated# true 2025-05-07T08:47:01.767842Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483654 VDiskId# [80000006:2:1:0:0] DiskIsOk# true 2025-05-07T08:47:01.767873Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483654 Status# OK JoinedGroup# true Replicated# true 2025-05-07T08:47:01.767903Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483654 VDiskId# [80000006:2:1:1:0] DiskIsOk# true 2025-05-07T08:47:01.767934Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483654 Status# OK JoinedGroup# true Replicated# true 2025-05-07T08:47:01.767963Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483654 VDiskId# [80000006:2:2:0:0] DiskIsOk# true 2025-05-07T08:47:01.767992Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483654 Status# OK JoinedGroup# true Replicated# true 2025-05-07T08:47:01.768023Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483654 VDiskId# [80000006:2:2:1:0] DiskIsOk# true 2025-05-07T08:47:01.768054Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483654 Status# OK JoinedGroup# true Replicated# true 2025-05-07T08:47:01.768084Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483654 VDiskId# [80000006:2:2:2:0] DiskIsOk# true 2025-05-07T08:47:01.774017Z 1 05h45m00.123552s :BS_SELFHEAL INFO: {BSSH09@self_heal.cpp:207} Reassigner succeeded GroupId# 2147483654 Items# [80000006:2:1:2:0]: 14:1001:1000 -> 16:1001:1016 ConfigTxSeqNo# 506 2025-05-07T08:47:01.774077Z 1 05h45m00.123552s :BS_SELFHEAL DEBUG: {BSSH08@self_heal.cpp:218} Reassigner finished GroupId# 2147483654 Success# true 2025-05-07T08:47:01.774260Z 35 05h45m00.123552s :BS_NODE DEBUG: [35] NodeServiceSetUpdate 2025-05-07T08:47:01.774332Z 35 05h45m00.123552s :BS_NODE DEBUG: [35] VDiskId# [80000006:2:2:1:0] -> [80000006:3:2:1:0] 2025-05-07T08:47:01.774437Z 2 05h45m00.123552s :BS_NODE DEBUG: [2] NodeServiceSetUpdate 2025-05-07T08:47:01.774486Z 2 05h45m00.123552s :BS_NODE DEBUG: [2] VDiskId# [80000006:2:0:2:0] -> [80000006:3:0:2:0] 2025-05-07T08:47:01.774576Z 20 05h45m00.123552s :BS_NODE DEBUG: [20] NodeServiceSetUpdate 2025-05-07T08:47:01.774623Z 20 05h45m00.123552s :BS_NODE DEBUG: [20] VDiskId# [80000006:2:1:0:0] -> [80000006:3:1:0:0] 2025-05-07T08:47:01.774708Z 23 05h45m00.123552s :BS_NODE DEBUG: [23] NodeServiceSetUpdate 2025-05-07T08:47:01.774754Z 23 05h45m00.123552s :BS_NODE DEBUG: [23] VDiskId# [80000006:2:1:1:0] -> [80000006:3:1:1:0] 2025-05-07T08:47:01.774863Z 8 05h45m00.123552s :BS_NODE DEBUG: [8] NodeServiceSetUpdate 2025-05-07T08:47:01.774912Z 8 05h45m00.123552s :BS_NODE DEBUG: [8] VDiskId# [80000006:2:0:0:0] -> [80000006:3:0:0:0] 2025-05-07T08:47:01.775000Z 26 05h45m00.123552s :BS_NODE DEBUG: [26] NodeServiceSetUpdate 2025-05-07T08:47:01.775050Z 26 05h45m00.123552s :BS_NODE DEBUG: [26] VDiskId# [80000006:2:2:2:0] -> [80000006:3:2:2:0] 2025-05-07T08:47:01.775134Z 11 05h45m00.123552s :BS_NODE DEBUG: [11] NodeServiceSetUpdate 2025-05-07T08:47:01.775181Z 11 05h45m00.123552s :BS_NODE DEBUG: [11] VDiskId# [80000006:2:0:1:0] -> [80000006:3:0:1:0] 2025-05-07T08:47:01.775265Z 30 05h45m00.123552s :BS_NODE DEBUG: [30] NodeServiceSetUpdate 2025-05-07T08:47:01.775315Z 30 05h45m00.123552s :BS_NODE DEBUG: [30] VDiskId# [80000006:2:2:0:0] -> [80000006:3:2:0:0] 2025-05-07T08:47:01.775377Z 14 05h45m00.123552s :BS_NODE DEBUG: [14] NodeServiceSetUpdate 2025-05-07T08:47:01.775460Z 16 05h45m00.123552s :BS_NODE DEBUG: [16] NodeServiceSetUpdate 2025-05-07T08:47:01.775501Z 16 05h45m00.123552s :BS_NODE DEBUG: [16] VDiskId# [80000006:3:1:2:0] PDiskId# 1001 VSlotId# 1016 created 2025-05-07T08:47:01.775590Z 16 05h45m00.123552s :BS_NODE DEBUG: [16] VDiskId# [80000006:3:1:2:0] status changed to INIT_PENDING 2025-05-07T08:47:01.776827Z 16 05h45m01.257968s :BS_NODE DEBUG: [16] VDiskId# [80000066:4:1:2:0] status changed to REPLICATING 2025-05-07T08:47:01.777599Z 16 05h45m01.259480s :BS_NODE DEBUG: [16] VDiskId# [80000056:3:1:2:0] status changed to REPLICATING 2025-05-07T08:47:01.778348Z 16 05h45m01.460992s :BS_NODE DEBUG: [16] VDiskId# [80000046:3:1:2:0] status changed to REPLICATING 2025-05-07T08:47:01.779071Z 16 05h45m02.112456s :BS_NODE DEBUG: [16] VDiskId# [80000076:3:1:2:0] status changed to REPLICATING 2025-05-07T08:47:01.779813Z 16 05h45m03.158552s :BS_NODE DEBUG: [16] VDiskId# [80000006:3:1:2:0] status changed to REPLICATING 2025-05-07T08:47:01.780521Z 16 05h45m03.288040s :BS_NODE DEBUG: [16] VDiskId# [80000016:3:1:2:0] status changed to REPLICATING 2025-05-07T08:47:01.781226Z 23 05h45m03.310528s :BS_NODE DEBUG: [23] VDiskId# [80000027:5:1:2:0] status changed to REPLICATING 2025-05-07T08:47:01.781781Z 16 05h45m04.635016s :BS_NODE DEBUG: [16] VDiskId# [80000026:3:1:2:0] status changed to REPLICATING 2025-05-07T08:47:01.782580Z 16 05h45m04.683504s :BS_NODE DEBUG: [16] VDiskId# [80000036:4:1:2:0] status changed to REPLICATING 2025-05-07T08:47:01.785239Z 16 05h45m15.399968s :BS_NODE DEBUG: [16] VDiskId# [80000066:4:1:2:0] status changed to READY 2025-05-07T08:47:01.786740Z 14 05h45m15.400480s :BS_NODE DEBUG: [14] NodeServiceSetUpdate 2025-05-07T08:47:01.786806Z 14 05h45m15.400480s :BS_NODE DEBUG: [14] VDiskId# [80000066:3:1:2:0] destroyed 2025-05-07T08:47:01.787004Z 16 05h45m17.885992s :BS_NODE DEBUG: [16] VDiskId# [80000046:3:1:2:0] status changed to READY 2025-05-07T08:47:01.788301Z 14 05h45m17.886504s :BS_NODE DEBUG: [14] NodeServiceSetUpdate 2025-05-07T08:47:01.788361Z 14 05h45m17.886504s :BS_NODE DEBUG: [14] VDiskId# [80000046:2:1:2:0] destroyed 2025-05-07T08:47:01.788518Z 16 05h45m18.092016s :BS_NODE DEBUG: [16] VDiskId# [80000026:3:1:2:0] status changed to READY 2025-05-07T08:47:01.789739Z 14 05h45m18.092528s :BS_NODE DEBUG: [14] NodeServiceSetUpdate 2025-05-07T08:47:01.789793Z 14 05h45m18.092528s :BS_NODE DEBUG: [14] VDiskId# [80000026:2:1:2:0] destroyed 2025-05-07T08:47:01.790315Z 16 05h45m20.909456s :BS_NODE DEBUG: [16] VDiskId# [80000076:3:1:2:0] status changed to READY 2025-05-07T08:47:01.791553Z 14 05h45m20.909968s :BS_NODE DEBUG: [14] NodeServiceSetUpdate 2025-05-07T08:47:01.791603Z 14 05h45m20.909968s :BS_NODE DEBUG: [14] VDiskId# [80000076:2:1:2:0] destroyed 2025-05-07T08:47:01.791753Z 16 05h45m22.033480s :BS_NODE DEBUG: [16] VDiskId# [80000056:3:1:2:0] status changed to READY 2025-05-07T08:47:01.792829Z 14 05h45m22.033992s :BS_NODE DEBUG: [14] NodeServiceSetUpdate 2025-05-07T08:47:01.792874Z 14 05h45m22.033992s :BS_NODE DEBUG: [14] VDiskId# [80000056:2:1:2:0] destroyed 2025-05-07T08:47:01.792995Z 23 05h45m22.214528s :BS_NODE DEBUG: [23] VDiskId# [80000027:5:1:2:0] status changed to READY 2025-05-07T08:47:01.793787Z 14 05h45m22.215040s :BS_NODE DEBUG: [14] NodeServiceSetUpdate 2025-05-07T08:47:01.793836Z 14 05h45m22.215040s :BS_NODE DEBUG: [14] VDiskId# [80000027:4:1:2:0] destroyed 2025-05-07T08:47:01.794342Z 16 05h45m25.446040s :BS_NODE DEBUG: [16] VDiskId# [80000016:3:1:2:0] status changed to READY 2025-05-07T08:47:01.795590Z 14 05h45m25.446552s :BS_NODE DEBUG: [14] NodeServiceSetUpdate 2025-05-07T08:47:01.795643Z 14 05h45m25.446552s :BS_NODE DEBUG: [14] VDiskId# [80000016:2:1:2:0] destroyed 2025-05-07T08:47:01.797495Z 16 05h45m35.728504s :BS_NODE DEBUG: [16] VDiskId# [80000036:4:1:2:0] status changed to READY 2025-05-07T08:47:01.798701Z 14 05h45m35.729016s :BS_NODE DEBUG: [14] NodeServiceSetUpdate 2025-05-07T08:47:01.798755Z 14 05h45m35.729016s :BS_NODE DEBUG: [14] VDiskId# [80000036:3:1:2:0] destroyed 2025-05-07T08:47:01.799159Z 16 05h45m36.164552s :BS_NODE DEBUG: [16] VDiskId# [80000006:3:1:2:0] status changed to READY 2025-05-07T08:47:01.800412Z 14 05h45m36.165064s :BS_NODE DEBUG: [14] NodeServiceSetUpdate 2025-05-07T08:47:01.800464Z 14 05h45m36.165064s :BS_NODE DEBUG: [14] VDiskId# [80000006:2:1:2:0] destroyed >> ResultFormatter::Optional [GOOD] >> ResultFormatter::Pg [GOOD] >> ResultFormatter::Tuple [GOOD] >> ResultFormatter::Tagged [GOOD] >> ResultFormatter::Primitive [GOOD] >> ResultFormatter::Struct [GOOD] >> ResultFormatter::FormatEmptySchema [GOOD] >> ResultFormatter::FormatNonEmptySchema [GOOD] >> ResultFormatter::StructWithNoFields [GOOD] >> ResultFormatter::StructTypeNameAsString [GOOD] |88.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::Null [GOOD] |88.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::Decimal [GOOD] |88.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::VariantStruct [GOOD] |88.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::EmptyTuple [GOOD] |88.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::Tagged [GOOD] >> ResultFormatter::Void [GOOD] >> ResultFormatter::VariantTuple [GOOD] |88.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::StructTypeNameAsString [GOOD] |88.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::FormatNonEmptySchema [GOOD] >> TYardTest::TestStartingPoints [GOOD] >> TYardTest::TestWhiteboard |88.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::Struct [GOOD] |88.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::Pg [GOOD] |88.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_login/ydb-core-tx-schemeshard-ut_login |88.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_login/ydb-core-tx-schemeshard-ut_login |88.3%| [TA] {RESULT} $(B)/ydb/core/tx/sharding/ut/test-results/unittest/{meta.json ... results_accumulator.log} |88.3%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_login/ydb-core-tx-schemeshard-ut_login |88.3%| [TA] $(B)/ydb/core/mind/bscontroller/ut_selfheal/test-results/unittest/{meta.json ... results_accumulator.log} |88.3%| [TA] {RESULT} $(B)/ydb/core/mind/bscontroller/ut_selfheal/test-results/unittest/{meta.json ... results_accumulator.log} |88.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::VariantTuple [GOOD] >> TBsLocalRecovery::ChaoticWriteRestartHugeDecreased [GOOD] |88.3%| [TA] $(B)/ydb/core/fq/libs/result_formatter/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TYardTest::TestWhiteboard [GOOD] >> TYardTest::TestMultiYardLogLatency ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsLocalRecovery::ChaoticWriteRestartHugeDecreased [GOOD] Test command err: 2025-05-07T08:46:29.649186Z :BS_PDISK ERROR: {BPD01@blobstorage_pdisk_impl.cpp:2935} PDiskId# 1 ownerId# 4 invalid OwnerRound, got# 101 expected# 151 error in TLogWrite for ownerId# 4 ownerRound# 101 lsn# 12 PDiskId# 1 2025-05-07T08:46:31.911445Z :BS_PDISK ERROR: {BPD01@blobstorage_pdisk_impl.cpp:2935} PDiskId# 1 ownerId# 7 invalid OwnerRound, got# 101 expected# 151 error in TLogWrite for ownerId# 7 ownerRound# 101 lsn# 12 PDiskId# 1 >> BlobDepot::VerifiedRandom [GOOD] >> BlobDepot::LoadPutAndRead >> Mirror3of4::ReplicationHuge [GOOD] >> TSchemeShardLoginTest::RemoveUser_NonExisting-StrictAclCheck-false >> TSchemeShardLoginTest::RemoveUser-StrictAclCheck-false |88.3%| [TA] $(B)/ydb/core/blobstorage/ut_vdisk/test-results/unittest/{meta.json ... results_accumulator.log} >> TWebLoginService::AuditLogLoginSuccess >> ClosedIntervalSet::EnumInRange [GOOD] >> ClosedIntervalSet::EnumInRangeReverse >> TSchemeShardLoginTest::BanUnbanUser >> TSchemeShardLoginTest::RemoveUser_Owner-StrictAclCheck-true >> TWebLoginService::AuditLogEmptySIDsLoginSuccess >> TSchemeShardLoginTest::UserLogin >> TSchemeShardLoginTest::RemoveGroup_NonExisting-StrictAclCheck-false >> test.py::test[solomon-DownsamplingValidSettings-default.txt] [GOOD] >> test.py::test[solomon-HistResponse-default.txt] |88.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/tools/fqrun/fqrun |88.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/tools/fqrun/fqrun |88.3%| [TA] {RESULT} $(B)/ydb/core/fq/libs/result_formatter/ut/test-results/unittest/{meta.json ... results_accumulator.log} |88.3%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_vdisk/test-results/unittest/{meta.json ... results_accumulator.log} >> TSchemeShardLoginTest::AddAccess_NonExisting-StrictAclCheck-false >> TSchemeShardLoginTest::RemoveGroup-StrictAclCheck-false |88.3%| [LD] {RESULT} $(B)/ydb/tests/tools/fqrun/fqrun |88.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/runtime/ut/ydb-core-kqp-runtime-ut |88.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/runtime/ut/ydb-core-kqp-runtime-ut |88.3%| [LD] {RESULT} $(B)/ydb/core/kqp/runtime/ut/ydb-core-kqp-runtime-ut |88.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_topic_splitmerge/ydb-core-tx-schemeshard-ut_topic_splitmerge |88.3%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_topic_splitmerge/ydb-core-tx-schemeshard-ut_topic_splitmerge |88.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_topic_splitmerge/ydb-core-tx-schemeshard-ut_topic_splitmerge ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_mirror3of4/unittest >> Mirror3of4::ReplicationHuge [GOOD] Test command err: 2025-05-07T08:46:08.571109Z 1 00h00m00.000000s :BS_SKELETON INFO: PDiskId# 1 VDISK[0:_:0:0:0]: (0) SKELETON START Marker# BSVS37 2025-05-07T08:46:08.571393Z 2 00h00m00.000000s :BS_SKELETON INFO: PDiskId# 1 VDISK[0:_:0:1:0]: (0) SKELETON START Marker# BSVS37 2025-05-07T08:46:08.571556Z 3 00h00m00.000000s :BS_SKELETON INFO: PDiskId# 1 VDISK[0:_:0:2:0]: (0) SKELETON START Marker# BSVS37 2025-05-07T08:46:08.571701Z 4 00h00m00.000000s :BS_SKELETON INFO: PDiskId# 1 VDISK[0:_:0:3:0]: (0) SKELETON START Marker# BSVS37 2025-05-07T08:46:08.571848Z 5 00h00m00.000000s :BS_SKELETON INFO: PDiskId# 1 VDISK[0:_:0:4:0]: (0) SKELETON START Marker# BSVS37 2025-05-07T08:46:08.572031Z 6 00h00m00.000000s :BS_SKELETON INFO: PDiskId# 1 VDISK[0:_:0:5:0]: (0) SKELETON START Marker# BSVS37 2025-05-07T08:46:08.572170Z 7 00h00m00.000000s :BS_SKELETON INFO: PDiskId# 1 VDISK[0:_:0:6:0]: (0) SKELETON START Marker# BSVS37 2025-05-07T08:46:08.572320Z 8 00h00m00.000000s :BS_SKELETON INFO: PDiskId# 1 VDISK[0:_:0:7:0]: (0) SKELETON START Marker# BSVS37 2025-05-07T08:46:08.572707Z 1 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:0:0]: (0) LocalRecovery START 2025-05-07T08:46:08.572794Z 1 00h00m00.000000s :BS_LOCALRECOVERY DEBUG: PDiskId# 1 VDISK[0:_:0:0:0]: (0) Sending TEvYardInit: pdiskGuid# 7307817969340904081 skeletonid# [1:139:13] selfid# [1:155:22] delay 0.000000 sec 2025-05-07T08:46:08.572845Z 2 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:1:0]: (0) LocalRecovery START 2025-05-07T08:46:08.572879Z 2 00h00m00.000000s :BS_LOCALRECOVERY DEBUG: PDiskId# 1 VDISK[0:_:0:1:0]: (0) Sending TEvYardInit: pdiskGuid# 10633576183498828400 skeletonid# [2:140:11] selfid# [2:156:12] delay 0.000000 sec 2025-05-07T08:46:08.572909Z 3 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:2:0]: (0) LocalRecovery START 2025-05-07T08:46:08.572952Z 3 00h00m00.000000s :BS_LOCALRECOVERY DEBUG: PDiskId# 1 VDISK[0:_:0:2:0]: (0) Sending TEvYardInit: pdiskGuid# 1512513924185914238 skeletonid# [3:141:11] selfid# [3:157:12] delay 0.000000 sec 2025-05-07T08:46:08.572983Z 4 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:3:0]: (0) LocalRecovery START 2025-05-07T08:46:08.573018Z 4 00h00m00.000000s :BS_LOCALRECOVERY DEBUG: PDiskId# 1 VDISK[0:_:0:3:0]: (0) Sending TEvYardInit: pdiskGuid# 16720102877521480380 skeletonid# [4:142:11] selfid# [4:158:12] delay 0.000000 sec 2025-05-07T08:46:08.573048Z 5 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:4:0]: (0) LocalRecovery START 2025-05-07T08:46:08.573082Z 5 00h00m00.000000s :BS_LOCALRECOVERY DEBUG: PDiskId# 1 VDISK[0:_:0:4:0]: (0) Sending TEvYardInit: pdiskGuid# 4293102461296763370 skeletonid# [5:143:11] selfid# [5:159:12] delay 0.000000 sec 2025-05-07T08:46:08.573124Z 6 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:5:0]: (0) LocalRecovery START 2025-05-07T08:46:08.573156Z 6 00h00m00.000000s :BS_LOCALRECOVERY DEBUG: PDiskId# 1 VDISK[0:_:0:5:0]: (0) Sending TEvYardInit: pdiskGuid# 15167338153439158746 skeletonid# [6:144:11] selfid# [6:160:12] delay 0.000000 sec 2025-05-07T08:46:08.573184Z 7 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:6:0]: (0) LocalRecovery START 2025-05-07T08:46:08.573211Z 7 00h00m00.000000s :BS_LOCALRECOVERY DEBUG: PDiskId# 1 VDISK[0:_:0:6:0]: (0) Sending TEvYardInit: pdiskGuid# 5050522864644018197 skeletonid# [7:145:11] selfid# [7:161:12] delay 0.000000 sec 2025-05-07T08:46:08.573246Z 8 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:7:0]: (0) LocalRecovery START 2025-05-07T08:46:08.573287Z 8 00h00m00.000000s :BS_LOCALRECOVERY DEBUG: PDiskId# 1 VDISK[0:_:0:7:0]: (0) Sending TEvYardInit: pdiskGuid# 11583489370390328331 skeletonid# [8:146:11] selfid# [8:162:12] delay 0.000000 sec 2025-05-07T08:46:08.573662Z 1 00h00m00.000000s :BS_PDISK NOTICE: {PDM01@pdisk_mock.cpp:453} PDiskMock[1:1] received TEvYardInit Msg# {EvYardInit ownerRound# 2 VDisk# [0:4294967295:0:0:0] PDiskGuid# 7307817969340904081 CutLogID# [1:139:13] WhiteboardProxyId# [1:122:10]} 2025-05-07T08:46:08.574360Z 1 00h00m00.000000s :BS_PDISK INFO: {PDM02@pdisk_mock.cpp:488} PDiskMock[1:1] sending TEvYardInitResult Msg# {EvYardInitResult Status# OK ErrorReason# "" StatusFlags# None PDiskParams# {{TPDiskParams ownerId# 1 ownerRound# 2 ChunkSize# 134217728 AppendBlockSize# 4096 RecommendedReadSize# 45056 SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 BulkWriteBlockSize# 65536 PrefetchSizeBytes# 209715 GlueRequestDistanceBytes# 41943}} OwnedChunks# {}} Created# true 2025-05-07T08:46:08.574463Z 2 00h00m00.000000s :BS_PDISK NOTICE: {PDM01@pdisk_mock.cpp:453} PDiskMock[2:1] received TEvYardInit Msg# {EvYardInit ownerRound# 2 VDisk# [0:4294967295:0:1:0] PDiskGuid# 10633576183498828400 CutLogID# [2:140:11] WhiteboardProxyId# [2:124:10]} 2025-05-07T08:46:08.574530Z 2 00h00m00.000000s :BS_PDISK INFO: {PDM02@pdisk_mock.cpp:488} PDiskMock[2:1] sending TEvYardInitResult Msg# {EvYardInitResult Status# OK ErrorReason# "" StatusFlags# None PDiskParams# {{TPDiskParams ownerId# 1 ownerRound# 2 ChunkSize# 134217728 AppendBlockSize# 4096 RecommendedReadSize# 45056 SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 BulkWriteBlockSize# 65536 PrefetchSizeBytes# 209715 GlueRequestDistanceBytes# 41943}} OwnedChunks# {}} Created# true 2025-05-07T08:46:08.574589Z 3 00h00m00.000000s :BS_PDISK NOTICE: {PDM01@pdisk_mock.cpp:453} PDiskMock[3:1] received TEvYardInit Msg# {EvYardInit ownerRound# 2 VDisk# [0:4294967295:0:2:0] PDiskGuid# 1512513924185914238 CutLogID# [3:141:11] WhiteboardProxyId# [3:126:10]} 2025-05-07T08:46:08.574650Z 3 00h00m00.000000s :BS_PDISK INFO: {PDM02@pdisk_mock.cpp:488} PDiskMock[3:1] sending TEvYardInitResult Msg# {EvYardInitResult Status# OK ErrorReason# "" StatusFlags# None PDiskParams# {{TPDiskParams ownerId# 1 ownerRound# 2 ChunkSize# 134217728 AppendBlockSize# 4096 RecommendedReadSize# 45056 SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 BulkWriteBlockSize# 65536 PrefetchSizeBytes# 209715 GlueRequestDistanceBytes# 41943}} OwnedChunks# {}} Created# true 2025-05-07T08:46:08.574705Z 4 00h00m00.000000s :BS_PDISK NOTICE: {PDM01@pdisk_mock.cpp:453} PDiskMock[4:1] received TEvYardInit Msg# {EvYardInit ownerRound# 2 VDisk# [0:4294967295:0:3:0] PDiskGuid# 16720102877521480380 CutLogID# [4:142:11] WhiteboardProxyId# [4:128:10]} 2025-05-07T08:46:08.574770Z 4 00h00m00.000000s :BS_PDISK INFO: {PDM02@pdisk_mock.cpp:488} PDiskMock[4:1] sending TEvYardInitResult Msg# {EvYardInitResult Status# OK ErrorReason# "" StatusFlags# None PDiskParams# {{TPDiskParams ownerId# 1 ownerRound# 2 ChunkSize# 134217728 AppendBlockSize# 4096 RecommendedReadSize# 45056 SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 BulkWriteBlockSize# 65536 PrefetchSizeBytes# 209715 GlueRequestDistanceBytes# 41943}} OwnedChunks# {}} Created# true 2025-05-07T08:46:08.574826Z 5 00h00m00.000000s :BS_PDISK NOTICE: {PDM01@pdisk_mock.cpp:453} PDiskMock[5:1] received TEvYardInit Msg# {EvYardInit ownerRound# 2 VDisk# [0:4294967295:0:4:0] PDiskGuid# 4293102461296763370 CutLogID# [5:143:11] WhiteboardProxyId# [5:130:10]} 2025-05-07T08:46:08.574873Z 5 00h00m00.000000s :BS_PDISK INFO: {PDM02@pdisk_mock.cpp:488} PDiskMock[5:1] sending TEvYardInitResult Msg# {EvYardInitResult Status# OK ErrorReason# "" StatusFlags# None PDiskParams# {{TPDiskParams ownerId# 1 ownerRound# 2 ChunkSize# 134217728 AppendBlockSize# 4096 RecommendedReadSize# 45056 SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 BulkWriteBlockSize# 65536 PrefetchSizeBytes# 209715 GlueRequestDistanceBytes# 41943}} OwnedChunks# {}} Created# true 2025-05-07T08:46:08.574926Z 6 00h00m00.000000s :BS_PDISK NOTICE: {PDM01@pdisk_mock.cpp:453} PDiskMock[6:1] received TEvYardInit Msg# {EvYardInit ownerRound# 2 VDisk# [0:4294967295:0:5:0] PDiskGuid# 15167338153439158746 CutLogID# [6:144:11] WhiteboardProxyId# [6:132:10]} 2025-05-07T08:46:08.574973Z 6 00h00m00.000000s :BS_PDISK INFO: {PDM02@pdisk_mock.cpp:488} PDiskMock[6:1] sending TEvYardInitResult Msg# {EvYardInitResult Status# OK ErrorReason# "" StatusFlags# None PDiskParams# {{TPDiskParams ownerId# 1 ownerRound# 2 ChunkSize# 134217728 AppendBlockSize# 4096 RecommendedReadSize# 45056 SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 BulkWriteBlockSize# 65536 PrefetchSizeBytes# 209715 GlueRequestDistanceBytes# 41943}} OwnedChunks# {}} Created# true 2025-05-07T08:46:08.575016Z 7 00h00m00.000000s :BS_PDISK NOTICE: {PDM01@pdisk_mock.cpp:453} PDiskMock[7:1] received TEvYardInit Msg# {EvYardInit ownerRound# 2 VDisk# [0:4294967295:0:6:0] PDiskGuid# 5050522864644018197 CutLogID# [7:145:11] WhiteboardProxyId# [7:134:10]} 2025-05-07T08:46:08.575076Z 7 00h00m00.000000s :BS_PDISK INFO: {PDM02@pdisk_mock.cpp:488} PDiskMock[7:1] sending TEvYardInitResult Msg# {EvYardInitResult Status# OK ErrorReason# "" StatusFlags# None PDiskParams# {{TPDiskParams ownerId# 1 ownerRound# 2 ChunkSize# 134217728 AppendBlockSize# 4096 RecommendedReadSize# 45056 SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 BulkWriteBlockSize# 65536 PrefetchSizeBytes# 209715 GlueRequestDistanceBytes# 41943}} OwnedChunks# {}} Created# true 2025-05-07T08:46:08.575120Z 8 00h00m00.000000s :BS_PDISK NOTICE: {PDM01@pdisk_mock.cpp:453} PDiskMock[8:1] received TEvYardInit Msg# {EvYardInit ownerRound# 2 VDisk# [0:4294967295:0:7:0] PDiskGuid# 11583489370390328331 CutLogID# [8:146:11] WhiteboardProxyId# [8:136:10]} 2025-05-07T08:46:08.575176Z 8 00h00m00.000000s :BS_PDISK INFO: {PDM02@pdisk_mock.cpp:488} PDiskMock[8:1] sending TEvYardInitResult Msg# {EvYardInitResult Status# OK ErrorReason# "" StatusFlags# None PDiskParams# {{TPDiskParams ownerId# 1 ownerRound# 2 ChunkSize# 134217728 AppendBlockSize# 4096 RecommendedReadSize# 45056 SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 BulkWriteBlockSize# 65536 PrefetchSizeBytes# 209715 GlueRequestDistanceBytes# 41943}} OwnedChunks# {}} Created# true 2025-05-07T08:46:08.576635Z 1 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:0:0]: (0) MAX LSNS: LogoBlobs# [ExplicitlySet# true Derived# false Lsn# NotSet] Blocks# [ExplicitlySet# true Derived# false Lsn# NotSet] Barriers# [ExplicitlySet# true Derived# false Lsn# NotSet] SyncLog# 0 2025-05-07T08:46:08.577703Z 2 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:1:0]: (0) MAX LSNS: LogoBlobs# [ExplicitlySet# true Derived# false Lsn# NotSet] Blocks# [ExplicitlySet# true Derived# false Lsn# NotSet] Barriers# [ExplicitlySet# true Derived# false Lsn# NotSet] SyncLog# 0 2025-05-07T08:46:08.578783Z 3 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:2:0]: (0) MAX LSNS: LogoBlobs# [ExplicitlySet# true Derived# false Lsn# NotSet] Blocks# [ExplicitlySet# true Derived# false Lsn# NotSet] Barriers# [ExplicitlySet# true Derived# false Lsn# NotSet] SyncLog# 0 2025-05-07T08:46:08.579830Z 4 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:3:0]: (0) MAX LSNS: LogoBlobs# [ExplicitlySet# true Derived# false Lsn# NotSet] Blocks# [ExplicitlySet# true Derived# false Lsn# NotSet] Barriers# [ExplicitlySet# true Derived# false Lsn# NotSet] SyncLog# 0 2025-05-07T08:46:08.580813Z 5 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:4:0]: (0) MAX LSNS: LogoBlobs# [ExplicitlySet# true Derived# false Lsn# NotSet] Blocks# [ExplicitlySet# true Derived# false Lsn# NotSet] Barriers# [ExplicitlySet# true Derived# false Lsn# NotSet] SyncLog# 0 2025-05-07T08:46:08.581787Z 6 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:5:0]: (0) MAX LSNS: LogoBlobs# [ExplicitlySet# true D ... PDISK DEBUG: {PDM12@pdisk_mock.cpp:647} PDiskMock[7:1] sending TEvLogResult Msg# {EvLogResult Status# OK ErrorReason# "" StatusFlags# None LogChunkCount# 0{Lsn# 25 Cookie# 0}} Recipient# [7:345:29] 2025-05-07T08:47:07.008580Z 8 00h00m00.000000s :BS_PDISK DEBUG: {PDM11@pdisk_mock.cpp:585} PDiskMock[8:1] received TEvLog Msg# {EvLog ownerId# 1 ownerRound# 2 Signature# 138 DataSize# 580 Lsn# 25 LsnSegmentStart# 25 Cookie# 0{CommitRecord FirstLsnToKeep# 0 IsStartingPoint# 1 DeleteToDecommitted# 0 CommitChunks# [] DeleteChunks# [] DirtyChunks# []}} VDiskId# [0:4294967295:0:7:0] 2025-05-07T08:47:07.008636Z 8 00h00m00.000000s :BS_PDISK DEBUG: {PDM12@pdisk_mock.cpp:647} PDiskMock[8:1] sending TEvLogResult Msg# {EvLogResult Status# OK ErrorReason# "" StatusFlags# None LogChunkCount# 0{Lsn# 25 Cookie# 0}} Recipient# [8:355:29] 2025-05-07T08:47:07.011970Z 7 00h00m00.000000s :BS_PDISK DEBUG: {PDM11@pdisk_mock.cpp:585} PDiskMock[7:1] received TEvLog Msg# {EvLog ownerId# 1 ownerRound# 2 Signature# 138 DataSize# 581 Lsn# 26 LsnSegmentStart# 26 Cookie# 0{CommitRecord FirstLsnToKeep# 0 IsStartingPoint# 1 DeleteToDecommitted# 0 CommitChunks# [] DeleteChunks# [] DirtyChunks# []}} VDiskId# [0:4294967295:0:6:0] 2025-05-07T08:47:07.012066Z 7 00h00m00.000000s :BS_PDISK DEBUG: {PDM12@pdisk_mock.cpp:647} PDiskMock[7:1] sending TEvLogResult Msg# {EvLogResult Status# OK ErrorReason# "" StatusFlags# None LogChunkCount# 0{Lsn# 26 Cookie# 0}} Recipient# [7:345:29] 2025-05-07T08:47:07.012193Z 8 00h00m00.000000s :BS_PDISK DEBUG: {PDM11@pdisk_mock.cpp:585} PDiskMock[8:1] received TEvLog Msg# {EvLog ownerId# 1 ownerRound# 2 Signature# 138 DataSize# 580 Lsn# 26 LsnSegmentStart# 26 Cookie# 0{CommitRecord FirstLsnToKeep# 0 IsStartingPoint# 1 DeleteToDecommitted# 0 CommitChunks# [] DeleteChunks# [] DirtyChunks# []}} VDiskId# [0:4294967295:0:7:0] 2025-05-07T08:47:07.012251Z 8 00h00m00.000000s :BS_PDISK DEBUG: {PDM12@pdisk_mock.cpp:647} PDiskMock[8:1] sending TEvLogResult Msg# {EvLogResult Status# OK ErrorReason# "" StatusFlags# None LogChunkCount# 0{Lsn# 26 Cookie# 0}} Recipient# [8:355:29] 2025-05-07T08:47:07.012631Z 2 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:1:0]: (0) TEvVGet: {ExtrQuery# [1:1:1:0:0:1048576:0] sh# 0 sz# 0} {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 } Cost# 1680000 ExtQueueId# GetFastRead IntQueueId# IntGetFast CostSettings# { SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 MinHugeBlobInBytes# 65537} SendMeCostSettings# 1} Notify# 0 Internals# 1 TabletId# 0 AcquireBlockedGeneration# 0 ForceBlockedGeneration# 0} Marker# BSVS14 2025-05-07T08:47:07.013059Z 7 00h00m00.000000s :BS_PDISK DEBUG: {PDM11@pdisk_mock.cpp:585} PDiskMock[7:1] received TEvLog Msg# {EvLog ownerId# 1 ownerRound# 2 Signature# 138 DataSize# 581 Lsn# 27 LsnSegmentStart# 27 Cookie# 0{CommitRecord FirstLsnToKeep# 0 IsStartingPoint# 1 DeleteToDecommitted# 0 CommitChunks# [] DeleteChunks# [] DirtyChunks# []}} VDiskId# [0:4294967295:0:6:0] 2025-05-07T08:47:07.013122Z 7 00h00m00.000000s :BS_PDISK DEBUG: {PDM12@pdisk_mock.cpp:647} PDiskMock[7:1] sending TEvLogResult Msg# {EvLogResult Status# OK ErrorReason# "" StatusFlags# None LogChunkCount# 0{Lsn# 27 Cookie# 0}} Recipient# [7:345:29] 2025-05-07T08:47:07.013196Z 2 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:1:0]: (0) GLUEREAD(0x5110008f0600): {EvChunkRead chunkIdx# 1 Offset# 5 Size# 1048576 ownerId# 1 ownerRound# 2 PriorityClass# 2 Cookie# 89335320035904} 2025-05-07T08:47:07.013280Z 8 00h00m00.000000s :BS_PDISK DEBUG: {PDM11@pdisk_mock.cpp:585} PDiskMock[8:1] received TEvLog Msg# {EvLog ownerId# 1 ownerRound# 2 Signature# 138 DataSize# 580 Lsn# 27 LsnSegmentStart# 27 Cookie# 0{CommitRecord FirstLsnToKeep# 0 IsStartingPoint# 1 DeleteToDecommitted# 0 CommitChunks# [] DeleteChunks# [] DirtyChunks# []}} VDiskId# [0:4294967295:0:7:0] 2025-05-07T08:47:07.013333Z 8 00h00m00.000000s :BS_PDISK DEBUG: {PDM12@pdisk_mock.cpp:647} PDiskMock[8:1] sending TEvLogResult Msg# {EvLogResult Status# OK ErrorReason# "" StatusFlags# None LogChunkCount# 0{Lsn# 27 Cookie# 0}} Recipient# [8:355:29] 2025-05-07T08:47:07.013440Z 2 00h00m00.000000s :BS_PDISK DEBUG: {PDM13@pdisk_mock.cpp:737} PDiskMock[2:1] received TEvChunkRead Msg# {EvChunkRead chunkIdx# 1 Offset# 5 Size# 1048576 ownerId# 1 ownerRound# 2 PriorityClass# 2 Cookie# 89335320035904} VDiskId# [0:4294967295:0:1:0] 2025-05-07T08:47:07.014559Z 2 00h00m00.000000s :BS_PDISK DEBUG: {PDM14@pdisk_mock.cpp:777} PDiskMock[2:1] sending TEvChunkReadResult Msg# {EvChunkReadres Status# OK ErrorReason# "" chunkIdx# 1 Offset# 5 DataSize# 1048576 Cookie# 89335320035904 StatusFlags# None} 2025-05-07T08:47:07.014745Z 2 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:1:0]: (0) GLUEREAD FINISHED(0x5110008f0600): actualReadN# 1 origReadN# 1 2025-05-07T08:47:07.015178Z 2 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:1:0]: (0) TEvVGetResult: {EvVGetResult QueryResult Status# OK {[1:1:1:0:0:1048576:2] OK Size# 1048576 FullDataSize# 1048576 PayloadId# 0 Data# 1048576b Ingress# 1369701526376808448} BlockedGeneration# 0} 2025-05-07T08:47:07.019738Z 3 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:2:0]: (0) TEvVGet: {ExtrQuery# [1:1:1:0:0:1048576:0] sh# 0 sz# 0} {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 } Cost# 1680000 ExtQueueId# GetFastRead IntQueueId# IntGetFast CostSettings# { SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 MinHugeBlobInBytes# 65537} SendMeCostSettings# 1} Notify# 0 Internals# 1 TabletId# 0 AcquireBlockedGeneration# 0 ForceBlockedGeneration# 0} Marker# BSVS14 2025-05-07T08:47:07.020829Z 3 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:2:0]: (0) GLUEREAD(0x5110009f5780): {EvChunkRead chunkIdx# 1 Offset# 5 Size# 1048576 ownerId# 1 ownerRound# 2 PriorityClass# 2 Cookie# 89335320053312} 2025-05-07T08:47:07.021327Z 3 00h00m00.000000s :BS_PDISK DEBUG: {PDM13@pdisk_mock.cpp:737} PDiskMock[3:1] received TEvChunkRead Msg# {EvChunkRead chunkIdx# 1 Offset# 5 Size# 1048576 ownerId# 1 ownerRound# 2 PriorityClass# 2 Cookie# 89335320053312} VDiskId# [0:4294967295:0:2:0] 2025-05-07T08:47:07.022504Z 3 00h00m00.000000s :BS_PDISK DEBUG: {PDM14@pdisk_mock.cpp:777} PDiskMock[3:1] sending TEvChunkReadResult Msg# {EvChunkReadres Status# OK ErrorReason# "" chunkIdx# 1 Offset# 5 DataSize# 1048576 Cookie# 89335320053312 StatusFlags# None} 2025-05-07T08:47:07.022674Z 3 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:2:0]: (0) GLUEREAD FINISHED(0x5110009f5780): actualReadN# 1 origReadN# 1 2025-05-07T08:47:07.022799Z 3 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:2:0]: (0) TEvVGetResult: {EvVGetResult QueryResult Status# OK {[1:1:1:0:0:1048576:1] OK Size# 1048576 FullDataSize# 1048576 PayloadId# 0 Data# 1048576b Ingress# 2522623030983655424} BlockedGeneration# 0} 2025-05-07T08:47:07.025783Z 4 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:3:0]: (0) TEvVGet: {ExtrQuery# [1:1:1:0:0:1048576:0] sh# 0 sz# 0} {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 } Cost# 1680000 ExtQueueId# GetFastRead IntQueueId# IntGetFast CostSettings# { SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 MinHugeBlobInBytes# 65537} SendMeCostSettings# 1} Notify# 0 Internals# 1 TabletId# 0 AcquireBlockedGeneration# 0 ForceBlockedGeneration# 0} Marker# BSVS14 2025-05-07T08:47:07.026120Z 4 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:3:0]: (0) TEvVGetResult: {EvVGetResult QueryResult Status# OK {[1:1:1:0:0:1048576:3] OK Size# 0 FullDataSize# 1048576 BufferData# Ingress# 793240774073384960} BlockedGeneration# 0} 2025-05-07T08:47:07.027036Z 5 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:4:0]: (0) TEvVGet: {ExtrQuery# [1:1:1:0:0:1048576:0] sh# 0 sz# 0} {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 } Cost# 1680000 ExtQueueId# GetFastRead IntQueueId# IntGetFast CostSettings# { SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 MinHugeBlobInBytes# 65537} SendMeCostSettings# 1} Notify# 0 Internals# 1 TabletId# 0 AcquireBlockedGeneration# 0 ForceBlockedGeneration# 0} Marker# BSVS14 2025-05-07T08:47:07.027271Z 5 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:4:0]: (0) TEvVGetResult: {EvVGetResult QueryResult Status# OK {[1:1:1:0:0:1048576:3] OK Size# 0 FullDataSize# 1048576 BufferData# Ingress# 793240774073384960} BlockedGeneration# 0} 2025-05-07T08:47:07.028004Z 6 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:5:0]: (0) TEvVGet: {ExtrQuery# [1:1:1:0:0:1048576:0] sh# 0 sz# 0} {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 } Cost# 1680000 ExtQueueId# GetFastRead IntQueueId# IntGetFast CostSettings# { SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 MinHugeBlobInBytes# 65537} SendMeCostSettings# 1} Notify# 0 Internals# 1 TabletId# 0 AcquireBlockedGeneration# 0 ForceBlockedGeneration# 0} Marker# BSVS14 2025-05-07T08:47:07.028256Z 6 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:5:0]: (0) GLUEREAD(0x51100099c680): {EvChunkRead chunkIdx# 1 Offset# 5 Size# 1048576 ownerId# 1 ownerRound# 2 PriorityClass# 2 Cookie# 89335320008256} 2025-05-07T08:47:07.028346Z 6 00h00m00.000000s :BS_PDISK DEBUG: {PDM13@pdisk_mock.cpp:737} PDiskMock[6:1] received TEvChunkRead Msg# {EvChunkRead chunkIdx# 1 Offset# 5 Size# 1048576 ownerId# 1 ownerRound# 2 PriorityClass# 2 Cookie# 89335320008256} VDiskId# [0:4294967295:0:5:0] 2025-05-07T08:47:07.029445Z 6 00h00m00.000000s :BS_PDISK DEBUG: {PDM14@pdisk_mock.cpp:777} PDiskMock[6:1] sending TEvChunkReadResult Msg# {EvChunkReadres Status# OK ErrorReason# "" chunkIdx# 1 Offset# 5 DataSize# 1048576 Cookie# 89335320008256 StatusFlags# None} 2025-05-07T08:47:07.029528Z 6 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:5:0]: (0) GLUEREAD FINISHED(0x51100099c680): actualReadN# 1 origReadN# 1 2025-05-07T08:47:07.029660Z 6 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:5:0]: (0) TEvVGetResult: {EvVGetResult QueryResult Status# OK {[1:1:1:0:0:1048576:2] OK Size# 1048576 FullDataSize# 1048576 PayloadId# 0 Data# 1048576b Ingress# 1946162278680231936} {[1:1:1:0:0:1048576:3] OK Size# 0 FullDataSize# 1048576 BufferData# Ingress# 1946162278680231936} BlockedGeneration# 0} 2025-05-07T08:47:07.032321Z 7 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:6:0]: (0) TEvVGet: {ExtrQuery# [1:1:1:0:0:1048576:0] sh# 0 sz# 0} {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 } Cost# 1680000 ExtQueueId# GetFastRead IntQueueId# IntGetFast CostSettings# { SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 MinHugeBlobInBytes# 65537} SendMeCostSettings# 1} Notify# 0 Internals# 1 TabletId# 0 AcquireBlockedGeneration# 0 ForceBlockedGeneration# 0} Marker# BSVS14 2025-05-07T08:47:07.032578Z 7 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:6:0]: (0) TEvVGetResult: {EvVGetResult QueryResult Status# OK {[1:1:1:0:0:1048576:0] NODATA Ingress# 216780021769961472} BlockedGeneration# 0} 2025-05-07T08:47:07.033382Z 8 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:7:0]: (0) TEvVGet: {ExtrQuery# [1:1:1:0:0:1048576:0] sh# 0 sz# 0} {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 } Cost# 1680000 ExtQueueId# GetFastRead IntQueueId# IntGetFast CostSettings# { SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 MinHugeBlobInBytes# 65537} SendMeCostSettings# 1} Notify# 0 Internals# 1 TabletId# 0 AcquireBlockedGeneration# 0 ForceBlockedGeneration# 0} Marker# BSVS14 2025-05-07T08:47:07.033580Z 8 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:7:0]: (0) TEvVGetResult: {EvVGetResult QueryResult Status# OK {[1:1:1:0:0:1048576:0] NODATA Ingress# 216780021769961472} BlockedGeneration# 0} >> TYardTest::TestMultiYardLogLatency [GOOD] >> TYardTest::TestMultiYardStartingPoints |88.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_base/ydb-core-tx-schemeshard-ut_base |88.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_base/ydb-core-tx-schemeshard-ut_base >> TWebLoginService::AuditLogLoginSuccess [GOOD] >> TSchemeShardLoginTest::RemoveUser_NonExisting-StrictAclCheck-false [GOOD] >> TWebLoginService::AuditLogLoginBadPassword >> TWebLoginService::AuditLogEmptySIDsLoginSuccess [GOOD] >> TSchemeShardLoginTest::RemoveUser_NonExisting-StrictAclCheck-true >> TWebLoginService::AuditLogLdapLoginBadPassword >> TSchemeShardLoginTest::RemoveGroup_NonExisting-StrictAclCheck-false [GOOD] >> TSchemeShardLoginTest::BanUnbanUser [GOOD] >> TSchemeShardLoginTest::BanUserWithWaiting >> TSchemeShardLoginTest::RemoveGroup_NonExisting-StrictAclCheck-true >> TSchemeShardLoginTest::AddAccess_NonExisting-StrictAclCheck-false [GOOD] >> TSchemeShardLoginTest::RemoveUser-StrictAclCheck-false [GOOD] >> TSchemeShardLoginTest::RemoveUser_Owner-StrictAclCheck-true [GOOD] >> TSchemeShardLoginTest::RemoveUser-StrictAclCheck-true >> TSchemeShardLoginTest::TestExternalLogin >> TSchemeShardLoginTest::AddAccess_NonExisting-StrictAclCheck-true >> TSchemeShardLoginTest::RemoveGroup-StrictAclCheck-false [GOOD] >> TSchemeShardLoginTest::RemoveGroup-StrictAclCheck-true >> TSchemeShardLoginTest::UserLogin [GOOD] >> TSchemeShardLoginTest::UserStayLockedOutIfEnterValidPassword |88.3%| [TM] {RESULT} ydb/core/blobstorage/ut_mirror3of4/unittest |88.3%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_base/ydb-core-tx-schemeshard-ut_base |88.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/health_check/ut/ydb-core-health_check-ut |88.3%| [LD] {RESULT} $(B)/ydb/core/health_check/ut/ydb-core-health_check-ut |88.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/health_check/ut/ydb-core-health_check-ut >> TSchemeShardLoginTest::RemoveGroup_NonExisting-StrictAclCheck-true [GOOD] >> TSchemeShardLoginTest::RemoveGroup_Owner-StrictAclCheck-false >> TWebLoginService::AuditLogLdapLoginBadPassword [GOOD] >> TWebLoginService::AuditLogLdapLoginBadUser >> TSchemeShardLoginTest::TestExternalLogin [GOOD] >> TYardTest::TestMultiYardStartingPoints [GOOD] >> TYardTest::TestMultiYardLogMultipleWriteRead >> TSchemeShardLoginTest::TestExternalLoginWithIncorrectLdapDomain >> TWebLoginService::AuditLogLoginBadPassword [GOOD] >> TWebLoginService::AuditLogLdapLoginSuccess >> TSchemeShardLoginTest::UserStayLockedOutIfEnterValidPassword [GOOD] >> TWebLoginService::AuditLogAdminLoginSuccess >> TSchemeShardLoginTest::RemoveUser_NonExisting-StrictAclCheck-true [GOOD] >> TSchemeShardLoginTest::RemoveUser_Groups-StrictAclCheck-false >> TComputeScheduler::TTotalLimits [GOOD] >> TComputeScheduler::ResourceWeight [GOOD] >> TSchemeShardLoginTest::RemoveGroup-StrictAclCheck-true [GOOD] >> TSchemeShardLoginTest::DisableBuiltinAuthMechanism >> TKqpScanData::EmptyColumnsAndNonEmptyArrowBatch >> TComputeScheduler::QueryLimits [GOOD] >> TKqpScanData::FailOnUnsupportedPgType >> TSchemeShardLoginTest::RemoveUser-StrictAclCheck-true [GOOD] >> TSchemeShardLoginTest::RemoveUser_Acl-StrictAclCheck-false >> TKqpScanData::FailOnUnsupportedPgType [GOOD] >> TKqpScanData::EmptyColumnsAndNonEmptyArrowBatch [GOOD] >> TSchemeShardLoginTest::AddAccess_NonExisting-StrictAclCheck-true [GOOD] >> TSchemeShardLoginTest::AddAccess_NonYdb-StrictAclCheck-false >> TKqpScanData::ArrowToUnboxedValueConverter >> TWebLoginService::AuditLogLdapLoginBadUser [GOOD] >> TWebLoginService::AuditLogLdapLoginBadBind >> TKqpScanData::UnboxedValueSize >> TSchemeShardTopicSplitMergeTest::SplitWithWrongBoundary >> TSchemeShardLoginTest::RemoveGroup_Owner-StrictAclCheck-false [GOOD] >> TSchemeShardTopicSplitMergeTest::MargeInactivePartitions >> TSchemeShardLoginTest::RemoveGroup_Acl-StrictAclCheck-false ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/runtime/ut/unittest >> TComputeScheduler::TTotalLimits [GOOD] Test command err: 1610 1600 1610 1600 >> TKqpScanData::ArrowToUnboxedValueConverter [GOOD] >> TSchemeShardTopicSplitMergeTest::CreateTopicWithOnePartition >> TSchemeShardLoginTest::TestExternalLoginWithIncorrectLdapDomain [GOOD] >> TSchemeShardLoginTest::ResetFailedAttemptCount >> TKqpScanData::UnboxedValueSize [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/runtime/ut/unittest >> TComputeScheduler::ResourceWeight [GOOD] Test command err: 510 500 1510 1500 990 1000 1000 1000 >> TSchemeShardTopicSplitMergeTest::Boot >> TSchemeShardTopicSplitMergeTest::SplitWithOnePartition >> TWebLoginService::AuditLogLdapLoginSuccess [GOOD] >> TWebLoginService::AuditLogLogout |88.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/runtime/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/runtime/ut/unittest >> TComputeScheduler::QueryLimits [GOOD] Test command err: 800 800 800 800 >> TKqpScanData::EmptyColumns >> TSchemeShardTopicSplitMergeTest::SplitWithWrongPartition >> TKqpScanData::DifferentNumberOfInputAndResultColumns >> TSchemeShardTopicSplitMergeTest::MargePartitions >> TSchemeShardTopicSplitMergeTest::MargeUnorderedPartitions >> TKqpScanData::DifferentNumberOfInputAndResultColumns [GOOD] >> TKqpScanData::EmptyColumns [GOOD] |88.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/runtime/ut/unittest >> TKqpScanData::FailOnUnsupportedPgType [GOOD] >> test_sql_streaming.py::test[suites-GroupByHoppingWindowTimeExtractorUnusedColumns-default.txt] >> ClosedIntervalSet::EnumInRangeReverse [GOOD] >> GivenIdRange::IssueNewRange [GOOD] >> GivenIdRange::Trim >> test_sql_streaming.py::test[suites-ReadTopicWithMetadataWithFilter-default.txt] |88.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/runtime/ut/unittest >> TKqpScanData::EmptyColumnsAndNonEmptyArrowBatch [GOOD] |88.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/runtime/ut/unittest >> TKqpScanData::ArrowToUnboxedValueConverter [GOOD] >> TSchemeShardLoginTest::DisableBuiltinAuthMechanism [GOOD] >> TSchemeShardLoginTest::FailedLoginUserUnderNameOfGroup >> TWebLoginService::AuditLogAdminLoginSuccess [GOOD] >> TWebLoginService::AuditLogCreateModifyUser |88.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/runtime/ut/unittest >> TKqpScanData::UnboxedValueSize [GOOD] >> TSchemeShardLoginTest::RemoveUser_Groups-StrictAclCheck-false [GOOD] >> TSchemeShardLoginTest::RemoveUser_Groups-StrictAclCheck-true >> TSchemeShardLoginTest::RemoveUser_Acl-StrictAclCheck-false [GOOD] >> TSchemeShardLoginTest::RemoveUser_Acl-StrictAclCheck-true >> TSchemeShardLoginTest::RemoveGroup_Acl-StrictAclCheck-false [GOOD] >> TSchemeShardLoginTest::RemoveGroup_Acl-StrictAclCheck-true >> TWebLoginService::AuditLogLdapLoginBadBind [GOOD] |88.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/runtime/ut/unittest >> TKqpScanData::EmptyColumns [GOOD] >> TWebLoginService::AuditLogLogout [GOOD] |88.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/runtime/ut/unittest >> TKqpScanData::DifferentNumberOfInputAndResultColumns [GOOD] >> test_sql_streaming.py::test[suites-GroupByHop-default.txt] >> GivenIdRange::Trim [GOOD] >> GivenIdRange::Subtract >> TSchemeShardTopicSplitMergeTest::Boot [GOOD] >> TSchemeShardTopicSplitMergeTest::CreateTopicWithManyPartition >> TSchemeShardTopicSplitMergeTest::SplitTwoPartitions >> TSchemeShardLoginTest::FailedLoginUserUnderNameOfGroup [GOOD] >> TSchemeShardLoginTest::FailedLoginWithInvalidUser >> GivenIdRange::Subtract [GOOD] >> GivenIdRange::Points >> TWebLoginService::AuditLogCreateModifyUser [GOOD] >> TSchemeShardTopicSplitMergeTest::SplitWithWrongBoundary [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_login/unittest >> TWebLoginService::AuditLogLdapLoginBadBind [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:127:2058] recipient: [1:109:2141] 2025-05-07T08:47:07.781186Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:47:07.781276Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:07.781321Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:47:07.781352Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:47:07.781381Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:47:07.781401Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:47:07.781451Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:07.781499Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:47:07.782061Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:47:07.782321Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:47:07.845406Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:47:07.845462Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:47:07.859454Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:47:07.859655Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:47:07.859815Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:47:07.865783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:47:07.866107Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:47:07.866725Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:07.866935Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:47:07.869789Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:07.871100Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:07.871162Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:07.871232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:47:07.871296Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:07.871352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:47:07.871566Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:47:07.878199Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:47:07.984430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:47:07.984661Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:07.984886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:47:07.985145Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:47:07.985212Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:07.987520Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:07.987758Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:47:07.987982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:07.988040Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:47:07.988083Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:47:07.988116Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:47:07.989909Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:07.989992Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:47:07.990047Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:47:07.991878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:07.991933Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:07.991982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:07.992033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:47:07.995634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:47:07.997938Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:47:07.998171Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:47:07.999258Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:07.999406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 134 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:07.999457Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:07.999721Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:47:07.999770Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:07.999937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:47:08.000033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:47:08.002481Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:08.002526Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:08.002700Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:08.002744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:11.778784Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:11.778959Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:47:11.779123Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:11.779187Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:47:11.779230Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:47:11.779266Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:47:11.786755Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:11.786836Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:47:11.786880Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:47:11.790781Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:11.790855Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:11.790912Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:11.790968Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:47:11.791118Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:47:11.802695Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:47:11.802898Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:47:11.803711Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:11.803828Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 134 RawX2: 17179871341 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:11.803883Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:11.804132Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:47:11.804198Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:11.804373Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:47:11.804441Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:47:11.810916Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:11.810981Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:11.811168Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:11.811220Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:207:2209], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-05-07T08:47:11.811529Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:11.811586Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 1:0 ProgressState 2025-05-07T08:47:11.811708Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-05-07T08:47:11.811752Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:47:11.811804Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-05-07T08:47:11.811839Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:47:11.811880Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-05-07T08:47:11.811928Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:47:11.811976Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1:0 2025-05-07T08:47:11.812012Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 1:0 2025-05-07T08:47:11.812093Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:47:11.812141Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-05-07T08:47:11.812178Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-05-07T08:47:11.812757Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-05-07T08:47:11.812893Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-05-07T08:47:11.812949Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-05-07T08:47:11.812995Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-05-07T08:47:11.813040Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:47:11.813146Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-05-07T08:47:11.830207Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-05-07T08:47:11.830690Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:11.831742Z node 4 :HTTP WARN: login_page.cpp:102: 127.0.0.1:0 POST /login 2025-05-07T08:47:11.831921Z node 4 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [4:270:2261] Bootstrap 2025-05-07T08:47:11.858963Z node 4 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [4:270:2261] Become StateWork (SchemeCache [4:279:2270]) 2025-05-07T08:47:11.859214Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:16922, port: 16922 2025-05-07T08:47:11.859307Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-05-07T08:47:11.886793Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:201: Could not perform initial LDAP bind for dn cn=robouser,dc=search,dc=yandex,dc=net on server ldap://localhost:16922. Invalid credentials 2025-05-07T08:47:11.887297Z node 4 :HTTP ERROR: login_page.cpp:209: Login fail for user1@ldap: Could not login via LDAP 2025-05-07T08:47:11.887944Z node 4 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [4:270:2261] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-05-07T08:47:11.894756Z node 4 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 AUDIT LOG buffer(2): 2025-05-07T08:47:11.778915Z: component=schemeshard, tx_id=1, remote_address={none}, subject={none}, sanitized_token={none}, database={none}, operation=ALTER DATABASE, paths=[//MyRoot], status=SUCCESS, detailed_status=StatusAccepted 2025-05-07T08:47:11.887058Z: component=grpc-login, remote_address=localhost, database=/MyRoot, operation=LOGIN, status=ERROR, detailed_status=UNAUTHORIZED, reason=Could not login via LDAP: Could not perform initial LDAP bind for dn cn=robouser,dc=search,dc=yandex,dc=net on server ldap://localhost:16922. Invalid credentials, login_user=user1@ldap, sanitized_token={none} AUDIT LOG checked line: 2025-05-07T08:47:11.887058Z: component=grpc-login, remote_address=localhost, database=/MyRoot, operation=LOGIN, status=ERROR, detailed_status=UNAUTHORIZED, reason=Could not login via LDAP: Could not perform initial LDAP bind for dn cn=robouser,dc=search,dc=yandex,dc=net on server ldap://localhost:16922. Invalid credentials, login_user=user1@ldap, sanitized_token={none} >> TSchemeShardTopicSplitMergeTest::SplitWithOnePartition [GOOD] >> TYardTest::TestMultiYardLogMultipleWriteRead [GOOD] >> TYardTest::TestSysLogOverwrite >> TSchemeShardTopicSplitMergeTest::SplitWithManyPartition >> TSchemeShardLoginTest::RemoveUser_Acl-StrictAclCheck-true [GOOD] >> TSchemeShardLoginTest::RemoveGroup_Owner-StrictAclCheck-true >> TSchemeShardLoginTest::AddAccess_NonYdb-StrictAclCheck-false [GOOD] >> TSchemeShardLoginTest::AddAccess_NonYdb-StrictAclCheck-true |88.4%| [TA] $(B)/ydb/core/kqp/runtime/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_login/unittest >> TWebLoginService::AuditLogLogout [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:127:2058] recipient: [1:109:2141] 2025-05-07T08:47:07.777103Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:47:07.777208Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:07.777251Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:47:07.777289Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:47:07.777335Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:47:07.777367Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:47:07.777439Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:07.777529Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:47:07.778378Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:47:07.778684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:47:07.845463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:47:07.845539Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:47:07.860454Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:47:07.860627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:47:07.860765Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:47:07.866171Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:47:07.866404Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:47:07.867059Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:07.867208Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:47:07.871611Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:07.872921Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:07.872987Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:07.873057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:47:07.873111Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:07.873166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:47:07.873388Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:47:07.880118Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:47:08.041723Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:47:08.042011Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.042302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:47:08.042588Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:47:08.042671Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.046367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:08.046639Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:47:08.046978Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.047070Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:47:08.047123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:47:08.047179Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:47:08.050345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.050424Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:47:08.050489Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:47:08.053387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.054670Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.054739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:08.054843Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:47:08.059826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:47:08.062733Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:47:08.062972Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:47:08.064002Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:08.064145Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 134 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:08.064212Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:08.064517Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:47:08.064571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:08.064750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:47:08.064835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:47:08.067651Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:08.067706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:08.067870Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:08.067920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... xId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:47:11.975462Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 101:1, propose status:StatusSuccess, reason: , at schemeshard: 72057594046678944 2025-05-07T08:47:11.975621Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T08:47:11.975668Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T08:47:11.975714Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T08:47:11.975749Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T08:47:11.975808Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:47:11.975872Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-05-07T08:47:11.975911Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T08:47:11.975949Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:0 2025-05-07T08:47:11.975987Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 101, publications: 1, subscribers: 0 2025-05-07T08:47:11.976023Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 4 2025-05-07T08:47:11.977074Z node 4 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [4:270:2261] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-05-07T08:47:11.980875Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 101, response: Status: StatusSuccess TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:11.980987Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusSuccess, operation: CREATE USER, path: /MyRoot 2025-05-07T08:47:11.981241Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:11.981288Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:11.981495Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:11.981541Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:207:2209], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-05-07T08:47:11.982243Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T08:47:11.982357Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T08:47:11.982403Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-05-07T08:47:11.982814Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 4 2025-05-07T08:47:11.982890Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:47:11.983005Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-05-07T08:47:11.983361Z node 4 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-05-07T08:47:11.985046Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 2025-05-07T08:47:11.985449Z node 4 :HTTP WARN: login_page.cpp:102: 127.0.0.1:0 POST /login 2025-05-07T08:47:11.987397Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:43: TTxLogin Execute at schemeshard: 72057594046678944 2025-05-07T08:47:11.987452Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:46: TTxLogin RotateKeys at schemeshard: 72057594046678944 2025-05-07T08:47:12.018292Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:85: TTxLogin Complete, result: Token: "eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9NeVJvb3QiXSwiZXhwIjoxNzQ2NjUwODMyLCJpYXQiOjE3NDY2MDc2MzIsInN1YiI6InVzZXIxIn0.moMHwBkR4Z-bGQr4ld9LdkMkoD6iVzyGNnCn85U9yycQAjou6Iu3h0pIfzOdqLE9uGmejV5N_lZdyvdawrFguzNX3jEa-svLTa2GWk2J4_-x9-mDsMVDE7pEmekSEtPXmnWPy-b51XP8BQAq-9ULvnllnbuicDZLHnGTYWmZ3BuN8CTzyylfnKbnDNXlS9KVfD_Hkyj_NqI7qauyo3iQXJhxnc4oSRterMyQgXGUEWcMfOpEGEo73mntAUj7O7ih6Buv8hA3V70ZcmQ5my1VmSQdBaHdG9yoAQB2f274DTmlchBY_Rg1xvtDnWr73OmBszp0WzZk9UpgVFSLEN89XA" SanitizedToken: "eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9NeVJvb3QiXSwiZXhwIjoxNzQ2NjUwODMyLCJpYXQiOjE3NDY2MDc2MzIsInN1YiI6InVzZXIxIn0.**" IsAdmin: true, at schemeshard: 72057594046678944 2025-05-07T08:47:12.018669Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:12.018716Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 0, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:12.018929Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:12.018977Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:207:2209], at schemeshard: 72057594046678944, txId: 0, path id: 1 2025-05-07T08:47:12.020110Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 0 2025-05-07T08:47:12.020875Z node 4 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:47:12.021071Z node 4 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 240us result status StatusSuccess 2025-05-07T08:47:12.021586Z node 4 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { PublicKeys { KeyId: 1 KeyDataPEM: "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxsL+zvK2PiR7SfmxH9Ix\nqPTkmd72mRcXkP8nabiYmYw4Mrp5KXXteorKUqdrfD1AnZxtW1UZdE+tmu2ZojtH\ny9EMaH1Q/eAElX92YHHSf0iLmlolyUvM402BV+9uhwSp/dgwh/4tZl9foP5c2Tka\nfN3Luk8LanrS+8K8JBAVBk+oAPdY0XnPTupgO6JdyYoQBjg42DlkI3p38OKDMuW2\nhhNvG3MWhY5BAKR/KCBFQ4tuBl62J071+eER9lk89vvGsEbOy0EpZty8gXBHI3ip\nFl78cUD1J1stUMJXTXRs+yWLrdCgPawiuJ5yQwWIyM45/GsZj94mnYQ8BDpp7nfh\nnwIDAQAB\n-----END PUBLIC KEY-----\n" ExpiresAt: 1746694032007 } Sids { Name: "user1" Type: USER } Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:12.022231Z node 4 :HTTP WARN: login_page.cpp:248: 127.0.0.1:0 POST /logout 2025-05-07T08:47:12.022293Z node 4 :HTTP ERROR: login_page.cpp:326: Logout: No ydb_session_id cookie 2025-05-07T08:47:12.022723Z node 4 :HTTP WARN: login_page.cpp:248: 127.0.0.1:0 POST /logout 2025-05-07T08:47:12.023527Z node 4 :TICKET_PARSER ERROR: ticket_parser_impl.h:969: Ticket **** (589A015B): Token is not in correct format 2025-05-07T08:47:12.023612Z node 4 :HTTP ERROR: login_page.cpp:326: Logout: Token is not in correct format 2025-05-07T08:47:12.023965Z node 4 :HTTP WARN: login_page.cpp:248: 127.0.0.1:0 POST /logout AUDIT LOG buffer(4): 2025-05-07T08:47:11.931473Z: component=schemeshard, tx_id=1, remote_address={none}, subject={none}, sanitized_token={none}, database={none}, operation=ALTER DATABASE, paths=[//MyRoot], status=SUCCESS, detailed_status=StatusAccepted 2025-05-07T08:47:11.975343Z: component=schemeshard, tx_id=101, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=CREATE USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1 2025-05-07T08:47:12.018478Z: component=grpc-login, remote_address=localhost, database=/MyRoot, operation=LOGIN, status=SUCCESS, login_user=user1, sanitized_token=eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9NeVJvb3QiXSwiZXhwIjoxNzQ2NjUwODMyLCJpYXQiOjE3NDY2MDc2MzIsInN1YiI6InVzZXIxIn0.**, login_user_level=admin 2025-05-07T08:47:12.025108Z: component=web-login, remote_address=127.0.0.1, subject=user1, sanitized_token=eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9NeVJvb3QiXSwiZXhwIjoxNzQ2NjUwODMyLCJpYXQiOjE3NDY2MDc2MzIsInN1YiI6InVzZXIxIn0.**, operation=LOGOUT, status=SUCCESS AUDIT LOG checked line: 2025-05-07T08:47:12.025108Z: component=web-login, remote_address=127.0.0.1, subject=user1, sanitized_token=eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9NeVJvb3QiXSwiZXhwIjoxNzQ2NjUwODMyLCJpYXQiOjE3NDY2MDc2MzIsInN1YiI6InVzZXIxIn0.**, operation=LOGOUT, status=SUCCESS >> GivenIdRange::Points [GOOD] >> GivenIdRange::Runs >> TSchemeShardLoginTest::RemoveGroup_Acl-StrictAclCheck-true [GOOD] >> TSchemeShardTopicSplitMergeTest::MargeUnorderedPartitions [GOOD] >> TSchemeShardTopicSplitMergeTest::MargePartitions2 >> GivenIdRange::Runs [GOOD] >> TSchemeShardTopicSplitMergeTest::CreateTopicWithOnePartition [GOOD] >> TSchemeShardTopicSplitMergeTest::DisableSplitMerge >> GivenIdRange::Allocate >> GivenIdRange::Allocate [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_login/unittest >> TWebLoginService::AuditLogCreateModifyUser [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:47:07.863543Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:47:07.863647Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:07.863707Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:47:07.863750Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:47:07.863793Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:47:07.863824Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:47:07.863878Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:07.863954Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:47:07.864732Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:47:07.865075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:47:07.933740Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:47:07.933816Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:47:07.957513Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:47:07.957863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:47:07.958087Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:47:07.966105Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:47:07.966407Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:47:07.966972Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:07.967116Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:47:07.971006Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:07.972829Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:07.972910Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:07.973004Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:47:07.973064Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:07.973126Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:47:07.973389Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:47:07.980822Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:47:08.156751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:47:08.157026Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.157289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:47:08.157559Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:47:08.157637Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.160664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:08.160816Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:47:08.161012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.161110Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:47:08.161167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:47:08.161219Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:47:08.165462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.165562Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:47:08.165606Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:47:08.171752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.171868Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.171931Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:08.172009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:47:08.176071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:47:08.179585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:47:08.179887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:47:08.181101Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:08.181277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:08.181342Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:08.181656Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:47:08.181723Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:08.181922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:47:08.183039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:47:08.190281Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:08.190380Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:08.190597Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:08.190650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... peration: MODIFY USER, path: /MyRoot 2025-05-07T08:47:12.851617Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:12.851663Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 105, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:12.851844Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:12.851888Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:207:2209], at schemeshard: 72057594046678944, txId: 105, path id: 1 2025-05-07T08:47:12.852448Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 8 PathOwnerId: 72057594046678944, cookie: 105 2025-05-07T08:47:12.852558Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 8 PathOwnerId: 72057594046678944, cookie: 105 2025-05-07T08:47:12.852617Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 105 2025-05-07T08:47:12.852664Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 8 2025-05-07T08:47:12.852709Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:47:12.852819Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 105, subscribers: 0 2025-05-07T08:47:12.854737Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 AUDIT LOG buffer(6): 2025-05-07T08:47:12.751191Z: component=schemeshard, tx_id=1, remote_address={none}, subject={none}, sanitized_token={none}, database={none}, operation=ALTER DATABASE, paths=[//MyRoot], status=SUCCESS, detailed_status=StatusAccepted 2025-05-07T08:47:12.797915Z: component=schemeshard, tx_id=101, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=CREATE USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1 2025-05-07T08:47:12.816152Z: component=schemeshard, tx_id=102, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=MODIFY USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1, login_user_change=[password] 2025-05-07T08:47:12.827072Z: component=schemeshard, tx_id=103, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=MODIFY USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1, login_user_change=[blocking] 2025-05-07T08:47:12.837188Z: component=schemeshard, tx_id=104, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=MODIFY USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1, login_user_change=[unblocking] 2025-05-07T08:47:12.847927Z: component=schemeshard, tx_id=105, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=MODIFY USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1, login_user_change=[password] AUDIT LOG checked line: 2025-05-07T08:47:12.847927Z: component=schemeshard, tx_id=105, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=MODIFY USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1, login_user_change=[password] 2025-05-07T08:47:12.858115Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterLogin AlterLogin { ModifyUser { User: "user1" Password: "password1" CanLogin: false } } } TxId: 106 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:47:12.863569Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 106:1, propose status:StatusSuccess, reason: , at schemeshard: 72057594046678944 2025-05-07T08:47:12.863718Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#106:0 progress is 1/1 2025-05-07T08:47:12.863779Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-05-07T08:47:12.863833Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#106:0 progress is 1/1 2025-05-07T08:47:12.863868Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-05-07T08:47:12.863936Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:47:12.864004Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 106, ready parts: 1/1, is published: false 2025-05-07T08:47:12.864068Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-05-07T08:47:12.864110Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 106:0 2025-05-07T08:47:12.864152Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 106, publications: 1, subscribers: 0 2025-05-07T08:47:12.864289Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 106, [OwnerId: 72057594046678944, LocalPathId: 1], 9 2025-05-07T08:47:12.866920Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 106, response: Status: StatusSuccess TxId: 106 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:12.867063Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 106, database: /MyRoot, subject: , status: StatusSuccess, operation: MODIFY USER, path: /MyRoot 2025-05-07T08:47:12.867294Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:12.867357Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 106, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:12.867536Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:12.867582Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:207:2209], at schemeshard: 72057594046678944, txId: 106, path id: 1 2025-05-07T08:47:12.868126Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 106 2025-05-07T08:47:12.868235Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 106 2025-05-07T08:47:12.868281Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 106 2025-05-07T08:47:12.868442Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 106, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 9 2025-05-07T08:47:12.868511Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:47:12.868617Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 106, subscribers: 0 2025-05-07T08:47:12.870344Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 AUDIT LOG buffer(7): 2025-05-07T08:47:12.751191Z: component=schemeshard, tx_id=1, remote_address={none}, subject={none}, sanitized_token={none}, database={none}, operation=ALTER DATABASE, paths=[//MyRoot], status=SUCCESS, detailed_status=StatusAccepted 2025-05-07T08:47:12.797915Z: component=schemeshard, tx_id=101, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=CREATE USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1 2025-05-07T08:47:12.816152Z: component=schemeshard, tx_id=102, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=MODIFY USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1, login_user_change=[password] 2025-05-07T08:47:12.827072Z: component=schemeshard, tx_id=103, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=MODIFY USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1, login_user_change=[blocking] 2025-05-07T08:47:12.837188Z: component=schemeshard, tx_id=104, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=MODIFY USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1, login_user_change=[unblocking] 2025-05-07T08:47:12.847927Z: component=schemeshard, tx_id=105, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=MODIFY USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1, login_user_change=[password] 2025-05-07T08:47:12.863434Z: component=schemeshard, tx_id=106, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=MODIFY USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1, login_user_change=[password, blocking] AUDIT LOG checked line: 2025-05-07T08:47:12.863434Z: component=schemeshard, tx_id=106, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=MODIFY USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1, login_user_change=[password, blocking] >> TSchemeShardLoginTest::RemoveUser_Groups-StrictAclCheck-true [GOOD] >> TSchemeShardLoginTest::RemoveUser_Owner-StrictAclCheck-false >> TSchemeShardTopicSplitMergeTest::MargeInactivePartitions [GOOD] >> TSchemeShardTopicSplitMergeTest::EnableSplitMerge >> TSchemeShardTopicSplitMergeTest::MargePartitions [GOOD] >> TSchemeShardTopicSplitMergeTest::MargeNotAdjacentRangePartitions >> TSchemeShardLoginTest::BanUserWithWaiting [GOOD] >> TSchemeShardLoginTest::FailedLoginWithInvalidUser [GOOD] >> TSchemeShardLoginTest::ChangeAcceptablePasswordParameters ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest >> TSchemeShardTopicSplitMergeTest::SplitWithWrongBoundary [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:47:12.073417Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:47:12.073518Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:12.073557Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:47:12.073598Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:47:12.073644Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:47:12.073669Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:47:12.073722Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:12.073790Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:47:12.074555Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:47:12.074877Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:47:12.156607Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:47:12.156678Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:47:12.179189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:47:12.179306Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:47:12.179455Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:47:12.195067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:47:12.196080Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:47:12.196759Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:12.197110Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:47:12.199824Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:12.201291Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:12.201357Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:12.201409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:47:12.201452Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:12.201487Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:47:12.201631Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.215087Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:47:12.445682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:47:12.445915Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.446160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:47:12.446411Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:47:12.446471Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.451107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:12.451260Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:47:12.451470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.451539Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:47:12.451586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:47:12.451619Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:47:12.453523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.453590Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:47:12.453646Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:47:12.455442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.455497Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.455553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:12.455603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:47:12.459469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:47:12.461243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:47:12.461399Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:47:12.462440Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:12.462585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:12.462644Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:12.462940Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:47:12.462997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:12.463165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:47:12.463240Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:47:12.465082Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:12.465126Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:12.465325Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:12.465381Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... satisfy waiter [1:645:2569] TestWaitNotification: OK eventTxId 105 >>>>> Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "\001" } TestModificationResults wait txId: 106 2025-05-07T08:47:12.982967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_1" OperationType: ESchemeOpAlterPersQueueGroup AlterPersQueueGroup { Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "\001" } } } TxId: 106 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:47:12.983358Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_pq.cpp:509: TAlterPQ Propose, path: /MyRoot/USER_1/Topic1, pathId: , opId: 106:0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.983577Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 106:1, propose status:StatusInvalidParameter, reason: Split boundary less or equals FromBound of partition: '01' <= '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54', at schemeshard: 72057594046678944 2025-05-07T08:47:12.987377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 106, response: Status: StatusInvalidParameter Reason: "Split boundary less or equals FromBound of partition: \'01\' <= \'55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54\'" TxId: 106 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:12.987565Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 106, database: /MyRoot/USER_1, subject: , status: StatusInvalidParameter, reason: Split boundary less or equals FromBound of partition: '01' <= '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54', operation: ALTER PERSISTENT QUEUE, path: /MyRoot/USER_1/Topic1 TestModificationResult got TxId: 106, wait until txId: 106 TestWaitNotification wait txId: 106 2025-05-07T08:47:12.987895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 106: send EvNotifyTxCompletion 2025-05-07T08:47:12.987935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 106 2025-05-07T08:47:12.988344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 106, at schemeshard: 72057594046678944 2025-05-07T08:47:12.988438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-05-07T08:47:12.988477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [1:652:2576] TestWaitNotification: OK eventTxId 106 >>>>> Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "UUUUUUUUUUUUUUUT" } TestModificationResults wait txId: 107 2025-05-07T08:47:12.991553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_1" OperationType: ESchemeOpAlterPersQueueGroup AlterPersQueueGroup { Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "UUUUUUUUUUUUUUUT" } } } TxId: 107 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:47:12.992054Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_pq.cpp:509: TAlterPQ Propose, path: /MyRoot/USER_1/Topic1, pathId: , opId: 107:0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.992273Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 107:1, propose status:StatusInvalidParameter, reason: Split boundary less or equals FromBound of partition: '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54' <= '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54', at schemeshard: 72057594046678944 2025-05-07T08:47:12.994289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 107, response: Status: StatusInvalidParameter Reason: "Split boundary less or equals FromBound of partition: \'55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54\' <= \'55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54\'" TxId: 107 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:12.994631Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 107, database: /MyRoot/USER_1, subject: , status: StatusInvalidParameter, reason: Split boundary less or equals FromBound of partition: '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54' <= '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54', operation: ALTER PERSISTENT QUEUE, path: /MyRoot/USER_1/Topic1 TestModificationResult got TxId: 107, wait until txId: 107 TestWaitNotification wait txId: 107 2025-05-07T08:47:12.995025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 107: send EvNotifyTxCompletion 2025-05-07T08:47:12.995079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 107 2025-05-07T08:47:12.995478Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 107, at schemeshard: 72057594046678944 2025-05-07T08:47:12.995575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 107: got EvNotifyTxCompletionResult 2025-05-07T08:47:12.995615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 107: satisfy waiter [1:659:2583] TestWaitNotification: OK eventTxId 107 >>>>> Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "\255" } TestModificationResults wait txId: 108 2025-05-07T08:47:12.998770Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_1" OperationType: ESchemeOpAlterPersQueueGroup AlterPersQueueGroup { Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "\255" } } } TxId: 108 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:47:12.999020Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_pq.cpp:509: TAlterPQ Propose, path: /MyRoot/USER_1/Topic1, pathId: , opId: 108:0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.999232Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 108:1, propose status:StatusInvalidParameter, reason: Split boundary greate or equals ToBound of partition: 'AD' >= 'AA AA AA AA AA AA AA AA AA AA AA AA AA AA AA A9' (FromBound is '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54'), at schemeshard: 72057594046678944 2025-05-07T08:47:13.001326Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 108, response: Status: StatusInvalidParameter Reason: "Split boundary greate or equals ToBound of partition: \'AD\' >= \'AA AA AA AA AA AA AA AA AA AA AA AA AA AA AA A9\' (FromBound is \'55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54\')" TxId: 108 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:13.001499Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 108, database: /MyRoot/USER_1, subject: , status: StatusInvalidParameter, reason: Split boundary greate or equals ToBound of partition: 'AD' >= 'AA AA AA AA AA AA AA AA AA AA AA AA AA AA AA A9' (FromBound is '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54'), operation: ALTER PERSISTENT QUEUE, path: /MyRoot/USER_1/Topic1 TestModificationResult got TxId: 108, wait until txId: 108 TestWaitNotification wait txId: 108 2025-05-07T08:47:13.001811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 108: send EvNotifyTxCompletion 2025-05-07T08:47:13.001867Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 108 2025-05-07T08:47:13.002506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 108, at schemeshard: 72057594046678944 2025-05-07T08:47:13.002587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 108: got EvNotifyTxCompletionResult 2025-05-07T08:47:13.002624Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 108: satisfy waiter [1:666:2590] TestWaitNotification: OK eventTxId 108 >>>>> Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } TestModificationResults wait txId: 109 2025-05-07T08:47:13.006167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_1" OperationType: ESchemeOpAlterPersQueueGroup AlterPersQueueGroup { Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } } } TxId: 109 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:47:13.006383Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_pq.cpp:509: TAlterPQ Propose, path: /MyRoot/USER_1/Topic1, pathId: , opId: 109:0, at schemeshard: 72057594046678944 2025-05-07T08:47:13.006611Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 109:1, propose status:StatusInvalidParameter, reason: Split boundary greate or equals ToBound of partition: 'AA AA AA AA AA AA AA AA AA AA AA AA AA AA AA A9' >= 'AA AA AA AA AA AA AA AA AA AA AA AA AA AA AA A9' (FromBound is '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54'), at schemeshard: 72057594046678944 2025-05-07T08:47:13.008926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 109, response: Status: StatusInvalidParameter Reason: "Split boundary greate or equals ToBound of partition: \'AA AA AA AA AA AA AA AA AA AA AA AA AA AA AA A9\' >= \'AA AA AA AA AA AA AA AA AA AA AA AA AA AA AA A9\' (FromBound is \'55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54\')" TxId: 109 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:13.009077Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 109, database: /MyRoot/USER_1, subject: , status: StatusInvalidParameter, reason: Split boundary greate or equals ToBound of partition: 'AA AA AA AA AA AA AA AA AA AA AA AA AA AA AA A9' >= 'AA AA AA AA AA AA AA AA AA AA AA AA AA AA AA A9' (FromBound is '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54'), operation: ALTER PERSISTENT QUEUE, path: /MyRoot/USER_1/Topic1 TestModificationResult got TxId: 109, wait until txId: 109 TestWaitNotification wait txId: 109 2025-05-07T08:47:13.009416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 109: send EvNotifyTxCompletion 2025-05-07T08:47:13.009455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 109 2025-05-07T08:47:13.010238Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 109, at schemeshard: 72057594046678944 2025-05-07T08:47:13.013112Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 109: got EvNotifyTxCompletionResult 2025-05-07T08:47:13.013174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 109: satisfy waiter [1:673:2597] TestWaitNotification: OK eventTxId 109 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest >> TSchemeShardTopicSplitMergeTest::SplitWithOnePartition [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:47:11.953964Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:47:11.954099Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:11.954142Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:47:11.954178Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:47:11.954221Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:47:11.954251Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:47:11.954300Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:11.954365Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:47:11.955096Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:47:11.955426Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:47:12.043588Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:47:12.043650Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:47:12.058523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:47:12.058645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:47:12.058783Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:47:12.066553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:47:12.067105Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:47:12.067889Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:12.068212Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:47:12.070258Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:12.071826Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:12.071889Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:12.071955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:47:12.071999Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:12.072036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:47:12.072187Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.078619Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:47:12.218101Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:47:12.218330Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.218558Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:47:12.218792Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:47:12.218878Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.220850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:12.221014Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:47:12.221189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.221259Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:47:12.221316Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:47:12.221350Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:47:12.223179Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.223230Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:47:12.223273Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:47:12.224839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.224884Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.224936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:12.224991Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:47:12.228754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:47:12.230561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:47:12.230739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:47:12.231705Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:12.231847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:12.231910Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:12.232195Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:47:12.232249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:12.232428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:47:12.232498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:47:12.234514Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:12.234563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:12.234752Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:12.234848Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 000000s, InflightLimit# 10 2025-05-07T08:47:12.963795Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:47:12.964531Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:47:12.964880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:47:12.978316Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:47:12.979495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:47:12.979652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:47:12.979830Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:47:12.979863Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:47:12.979960Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:47:12.980621Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit for Paths, read records: 3, at schemeshard: 72057594046678944 2025-05-07T08:47:12.980701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:319: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 1], parent name: MyRoot, child name: USER_1, child id: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T08:47:12.980739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:319: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 2], parent name: USER_1, child name: Topic1, child id: [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-05-07T08:47:12.980814Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1457: TTxInit for UserAttributes, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.980877Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1483: TTxInit for UserAttributesAlterData, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.981088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-05-07T08:47:12.981331Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1785: TTxInit for Tables, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.981400Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:450: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-05-07T08:47:12.981581Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2011: TTxInit for Columns, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.981665Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2071: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.981777Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2129: TTxInit for Shards, read records: 4, at schemeshard: 72057594046678944 2025-05-07T08:47:12.981820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T08:47:12.981847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T08:47:12.981879Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 0 2025-05-07T08:47:12.981902Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-05-07T08:47:12.982041Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2215: TTxInit for TablePartitions, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.982112Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2281: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.982342Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2431: TTxInit for ChannelsBinding, read records: 14, at schemeshard: 72057594046678944 2025-05-07T08:47:12.982508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-05-07T08:47:12.982814Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2810: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.982960Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2889: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.983342Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3387: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.983407Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3423: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.983601Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3637: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.983698Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3782: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.983797Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3799: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.983978Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3959: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.984071Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3975: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.984245Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4260: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.984473Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4558: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.984624Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4705: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.984670Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4732: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.984715Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4759: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:13.003550Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:13.003614Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:13.004145Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:47:13.004215Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:13.004267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:47:13.005137Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:750:2663] sender: [1:809:2058] recipient: [1:15:2062] 2025-05-07T08:47:13.078814Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_1/Topic1" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-05-07T08:47:13.079099Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_1/Topic1" took 302us result status StatusSuccess 2025-05-07T08:47:13.079669Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_1/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 104 CreateStep: 150 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 2 } ChildrenExist: false BalancerTabletID: 72075186233409549 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 3 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 0 TabletId: 72075186233409548 Status: Inactive ChildPartitionIds: 1 ChildPartitionIds: 2 } Partitions { PartitionId: 1 TabletId: 72075186233409548 KeyRange { ToBound: "\177" } Status: Active ParentPartitionIds: 0 } Partitions { PartitionId: 2 TabletId: 72075186233409548 KeyRange { FromBound: "\177" } Status: Active ParentPartitionIds: 0 } AlterVersion: 2 BalancerTabletID: 72075186233409549 NextPartitionId: 3 Allocate { Name: "Topic1" AlterVersion: 2 TotalGroupCount: 3 NextPartitionId: 3 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 0 GroupId: 1 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Inactive } Partitions { PartitionId: 1 GroupId: 2 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active ParentPartitionIds: 0 KeyRange { ToBound: "\177" } } Partitions { PartitionId: 2 GroupId: 3 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active ParentPartitionIds: 0 KeyRange { FromBound: "\177" } } BalancerTabletID: 72075186233409549 BalancerOwnerId: 72057594046678944 BalancerShardId: 4 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardTest::DropIndexedTableAndForceDropSimultaneously >> TSchemeShardTopicSplitMergeTest::CreateTopicWithManyPartition [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_login/unittest >> TSchemeShardLoginTest::RemoveGroup_Acl-StrictAclCheck-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:47:07.848064Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:47:07.848152Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:07.848221Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:47:07.848266Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:47:07.848305Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:47:07.848335Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:47:07.848407Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:07.848489Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:47:07.849292Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:47:07.849652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:47:07.937281Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:47:07.937344Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:47:07.954854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:47:07.955057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:47:07.955231Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:47:07.961613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:47:07.961938Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:47:07.962649Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:07.962877Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:47:07.965990Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:07.967411Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:07.967474Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:07.967549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:47:07.967599Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:07.967641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:47:07.967867Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:47:07.974634Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:47:08.135115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:47:08.135337Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.135547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:47:08.135807Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:47:08.135884Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.138272Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:08.138408Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:47:08.138597Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.138663Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:47:08.138721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:47:08.138765Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:47:08.140865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.140931Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:47:08.140977Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:47:08.143026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.143083Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.143132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:08.143219Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:47:08.147264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:47:08.149683Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:47:08.149913Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:47:08.151017Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:08.151190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:08.151246Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:08.151516Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:47:08.151569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:08.151746Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:47:08.151831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:47:08.154369Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:08.154442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:08.154633Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:08.154676Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-05-07T08:47:13.360308Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 8 PathOwnerId: 72057594046678944, cookie: 105 2025-05-07T08:47:13.360415Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 8 PathOwnerId: 72057594046678944, cookie: 105 2025-05-07T08:47:13.360455Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 105 2025-05-07T08:47:13.360490Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 8 2025-05-07T08:47:13.360528Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:47:13.360619Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 105, subscribers: 0 2025-05-07T08:47:13.368375Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-05-07T08:47:13.369051Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 TestModificationResult got TxId: 105, wait until txId: 105 2025-05-07T08:47:13.369718Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Dir1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:47:13.369951Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Dir1" took 252us result status StatusSuccess 2025-05-07T08:47:13.370333Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Dir1" PathDescription { Self { Name: "Dir1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 102 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 2 EffectiveACLVersion: 2 UserAttrsVersion: 1 ChildrenVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 106 2025-05-07T08:47:13.383633Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterLogin AlterLogin { RemoveGroup { Group: "group1" } } } TxId: 106 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:47:13.383849Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5180: ExamineTreeVFS visit path id [OwnerId: 72057594046678944, LocalPathId: 1] name: MyRoot type: EPathTypeDir state: EPathStateNoChanges stepDropped: 0 droppedTxId: 0 parent: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:13.383902Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5196: ExamineTreeVFS run path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:13.383952Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5180: ExamineTreeVFS visit path id [OwnerId: 72057594046678944, LocalPathId: 2] name: Dir1 type: EPathTypeDir state: EPathStateNoChanges stepDropped: 0 droppedTxId: 0 parent: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:13.383993Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5196: ExamineTreeVFS run path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T08:47:13.384203Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 106:1, propose status:StatusSuccess, reason: , at schemeshard: 72057594046678944 2025-05-07T08:47:13.384314Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#106:0 progress is 1/1 2025-05-07T08:47:13.384352Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-05-07T08:47:13.384393Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#106:0 progress is 1/1 2025-05-07T08:47:13.384434Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-05-07T08:47:13.384493Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:47:13.384552Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 106, ready parts: 1/1, is published: false 2025-05-07T08:47:13.384615Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-05-07T08:47:13.384657Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 106:0 2025-05-07T08:47:13.384706Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 106, publications: 1, subscribers: 0 2025-05-07T08:47:13.384746Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 106, [OwnerId: 72057594046678944, LocalPathId: 1], 9 2025-05-07T08:47:13.396921Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 106, response: Status: StatusSuccess TxId: 106 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:13.397055Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 106, database: /MyRoot, subject: , status: StatusSuccess, operation: REMOVE GROUP, path: /MyRoot 2025-05-07T08:47:13.397271Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:13.397328Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 106, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:13.397537Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:13.397590Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:203:2205], at schemeshard: 72057594046678944, txId: 106, path id: 1 2025-05-07T08:47:13.398175Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 106 2025-05-07T08:47:13.398314Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 106 2025-05-07T08:47:13.398371Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 106 2025-05-07T08:47:13.398416Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 106, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 9 2025-05-07T08:47:13.398465Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:47:13.398583Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 106, subscribers: 0 2025-05-07T08:47:13.403202Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 TestModificationResult got TxId: 106, wait until txId: 106 2025-05-07T08:47:13.403907Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:47:13.404130Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 247us result status StatusSuccess 2025-05-07T08:47:13.404553Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 9 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 9 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 2 } ChildrenExist: true } Children { Name: "Dir1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 102 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardLoginTest::AddAccess_NonYdb-StrictAclCheck-true [GOOD] >> TSchemeShardLoginTest::AccountLockoutAndAutomaticallyUnlock >> TSchemeShardTopicSplitMergeTest::SplitWithWrongPartition [GOOD] >> TSchemeShardLoginTest::RemoveGroup_Owner-StrictAclCheck-true [GOOD] >> THealthCheckTest::Basic >> TSchemeShardTest::CacheEffectiveACL [GOOD] >> TSchemeShardTest::ConsistentCopyTable >> test_sql_streaming.py::test[suites-GroupByHopWithDataWatermarks-default.txt] >> THealthCheckTest::Issues100Groups100VCardListing >> TSchemeShardLoginTest::RemoveUser_Owner-StrictAclCheck-false [GOOD] >> THealthCheckTest::OrangeGroupIssueWhenDegradedGroupStatus |88.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blob_depot/ut/unittest >> GivenIdRange::Allocate [GOOD] >> TSchemeShardTopicSplitMergeTest::MargePartitions2 [GOOD] >> THealthCheckTest::Issues100GroupsListing >> TSchemeShardTopicSplitMergeTest::SplitTwoPartitions [GOOD] >> TSchemeShardTopicSplitMergeTest::SplitInactivePartition ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_login/unittest >> TSchemeShardLoginTest::FailedLoginWithInvalidUser [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:47:07.989624Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:47:07.989736Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:07.989799Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:47:07.989845Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:47:07.989904Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:47:07.989940Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:47:07.990026Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:07.990125Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:47:07.990965Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:47:07.991345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:47:08.073119Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:47:08.073190Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:47:08.097866Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:47:08.098128Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:47:08.098321Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:47:08.105917Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:47:08.106338Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:47:08.107081Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:08.107296Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:47:08.110869Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:08.112483Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:08.112566Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:08.112652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:47:08.112705Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:08.112749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:47:08.113014Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.121753Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:47:08.268416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:47:08.268673Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.268919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:47:08.269166Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:47:08.269237Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.275382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:08.275558Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:47:08.275795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.275872Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:47:08.275913Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:47:08.275949Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:47:08.279033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.279113Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:47:08.279184Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:47:08.282228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.282312Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.282393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:08.282473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:47:08.287448Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:47:08.293792Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:47:08.294092Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:47:08.295274Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:08.295737Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:08.295828Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:08.296174Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:47:08.296254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:08.296461Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:47:08.296549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:47:08.299602Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:08.299683Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:08.299906Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:08.299951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... HARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:13.998812Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 1:0 ProgressState 2025-05-07T08:47:13.998954Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-05-07T08:47:13.998997Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:47:13.999043Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-05-07T08:47:13.999081Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:47:13.999129Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-05-07T08:47:13.999174Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:47:13.999219Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1:0 2025-05-07T08:47:13.999265Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 1:0 2025-05-07T08:47:13.999348Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:47:13.999390Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-05-07T08:47:13.999431Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-05-07T08:47:14.000608Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-05-07T08:47:14.000736Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-05-07T08:47:14.000779Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-05-07T08:47:14.000822Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-05-07T08:47:14.000874Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:47:14.000980Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-05-07T08:47:14.015097Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-05-07T08:47:14.015730Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:14.016352Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:47:14.016558Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 251us result status StatusSuccess 2025-05-07T08:47:14.016954Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:14.017110Z node 5 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [5:266:2257] Bootstrap 2025-05-07T08:47:14.034461Z node 5 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [5:266:2257] Become StateWork (SchemeCache [5:271:2262]) 2025-05-07T08:47:14.035073Z node 5 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [5:266:2257] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-05-07T08:47:14.037589Z node 5 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944 2025-05-07T08:47:14.038469Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:43: TTxLogin Execute at schemeshard: 72057594046678944 2025-05-07T08:47:14.038523Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:46: TTxLogin RotateKeys at schemeshard: 72057594046678944 2025-05-07T08:47:14.179679Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:85: TTxLogin Complete, result: Error: "Cannot find user: user1", at schemeshard: 72057594046678944 2025-05-07T08:47:14.179811Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:14.179864Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 0, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:14.180091Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:14.180154Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:203:2205], at schemeshard: 72057594046678944, txId: 0, path id: 1 2025-05-07T08:47:14.180713Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 0 2025-05-07T08:47:14.181108Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:47:14.181301Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 212us result status StatusSuccess 2025-05-07T08:47:14.181746Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { PublicKeys { KeyId: 1 KeyDataPEM: "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAu8lUsRUtMoULvXAp0+vB\nTyO0ruZNfn18YVt87BEXAwrkCvG8TiBB6LH+uLt4vaTzDI4G5+cMmgfe94jWNhnS\nmOJ55uiN/fW6fx4SQwUG+zBMHMAQSBy5GLXw18+q0sy6btGEP4A1Buaz0KQceYSd\npeRmMdKUBQqWTdsD9LyBbl4+kd5beZwUHsYL1KGFt9nbzIkVwW0eP8yA+ZnoF1SF\nSYr4T2+UiaQrT9Y1hWhfMqi4ZvnjHqMAz46BGIspOUg6UWw4RB7yG5UMy6V1puZe\nQV0img3+ER900yRRERhgeXxa5z1qlXiieisFd/DENeLZ84jAOF0iY2D8puVzBCga\nxwIDAQAB\n-----END PUBLIC KEY-----\n" ExpiresAt: 1746694034176 } Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> THealthCheckTest::StaticGroupIssue >> THealthCheckTest::YellowGroupIssueWhenPartialGroupStatus >> THealthCheckTest::StorageLimit95 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest >> TSchemeShardTopicSplitMergeTest::SplitWithWrongPartition [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:47:13.301364Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:47:13.301462Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:13.301500Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:47:13.301533Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:47:13.301572Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:47:13.301602Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:47:13.301647Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:13.301715Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:47:13.302490Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:47:13.302839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:47:13.388707Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:47:13.388762Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:47:13.406624Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:47:13.406723Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:47:13.406890Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:47:13.440345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:47:13.440914Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:47:13.441584Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:13.441886Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:47:13.444135Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:13.445720Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:13.445790Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:13.445848Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:47:13.445897Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:13.445933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:47:13.446114Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:47:13.459247Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:47:13.671981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:47:13.672232Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:13.672469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:47:13.672711Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:47:13.672770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:13.684036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:13.684200Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:47:13.684399Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:13.684463Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:47:13.684503Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:47:13.684537Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:47:13.687563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:13.687634Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:47:13.687681Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:47:13.690359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:13.690419Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:13.690473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:13.690522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:47:13.696438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:47:13.701417Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:47:13.701764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:47:13.702698Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:13.702866Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:13.702917Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:13.703194Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:47:13.703241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:13.703406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:47:13.703485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:47:13.705437Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:13.705482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:13.705656Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:13.705701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 46678944 2025-05-07T08:47:14.570502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:754: NPQState::TPropose operationId# 104:0 can't persist state: ShardsInProgress is not empty, remain: 1 2025-05-07T08:47:14.612385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 104, tablet: 72075186233409548, partId: 0 2025-05-07T08:47:14.612600Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 104:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409548 Status: COMPLETE TxId: 104 Step: 150 2025-05-07T08:47:14.612688Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_pq.cpp:624: NPQState::TPropose operationId# 104:0 HandleReply TEvProposeTransactionResult triggers early, at schemeshard: 72057594046678944 message# Origin: 72075186233409548 Status: COMPLETE TxId: 104 Step: 150 2025-05-07T08:47:14.612773Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:265: CollectPQConfigChanged accept TEvPersQueue::TEvProposeTransactionResult, operationId: 104:0, shardIdx: 72057594046678944:3, shard: 72075186233409548, left await: 0, txState.State: Propose, txState.ReadyForNotifications: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:14.612839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:629: NPQState::TPropose operationId# 104:0 HandleReply TEvProposeTransactionResult CollectPQConfigChanged: true 2025-05-07T08:47:14.613089Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 104:0 128 -> 240 2025-05-07T08:47:14.613324Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T08:47:14.613405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-05-07T08:47:14.617363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-05-07T08:47:14.617657Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:14.617705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T08:47:14.617912Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-05-07T08:47:14.618175Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:14.618228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2208], at schemeshard: 72057594046678944, txId: 104, path id: 2 2025-05-07T08:47:14.618286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2208], at schemeshard: 72057594046678944, txId: 104, path id: 3 2025-05-07T08:47:14.619277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-05-07T08:47:14.619344Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 104:0 ProgressState 2025-05-07T08:47:14.619461Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 1/1 2025-05-07T08:47:14.619505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-05-07T08:47:14.619555Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 1/1 2025-05-07T08:47:14.619616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-05-07T08:47:14.619662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: false 2025-05-07T08:47:14.619739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-05-07T08:47:14.619791Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 104:0 2025-05-07T08:47:14.619830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 104:0 2025-05-07T08:47:14.619994Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-05-07T08:47:14.620043Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 104, publications: 2, subscribers: 1 2025-05-07T08:47:14.620081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 2], 5 2025-05-07T08:47:14.620113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-05-07T08:47:14.621058Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T08:47:14.621177Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T08:47:14.621236Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 104 2025-05-07T08:47:14.621338Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-05-07T08:47:14.621389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-05-07T08:47:14.622752Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T08:47:14.622890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T08:47:14.622932Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-05-07T08:47:14.622965Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-05-07T08:47:14.623050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-05-07T08:47:14.623125Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 1 2025-05-07T08:47:14.623209Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [1:413:2380] 2025-05-07T08:47:14.630870Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-05-07T08:47:14.631101Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-05-07T08:47:14.631222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-05-07T08:47:14.631261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:551:2486] TestWaitNotification: OK eventTxId 104 >>>>> Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 7 SplitBoundary: "W" } TestModificationResults wait txId: 105 2025-05-07T08:47:14.642386Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_1" OperationType: ESchemeOpAlterPersQueueGroup AlterPersQueueGroup { Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 7 SplitBoundary: "W" } } } TxId: 105 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:47:14.642679Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_pq.cpp:509: TAlterPQ Propose, path: /MyRoot/USER_1/Topic1, pathId: , opId: 105:0, at schemeshard: 72057594046678944 2025-05-07T08:47:14.642933Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 105:1, propose status:StatusInvalidParameter, reason: Splitting partition does not exists: 7, at schemeshard: 72057594046678944 2025-05-07T08:47:14.645506Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 105, response: Status: StatusInvalidParameter Reason: "Splitting partition does not exists: 7" TxId: 105 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:14.645701Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 105, database: /MyRoot/USER_1, subject: , status: StatusInvalidParameter, reason: Splitting partition does not exists: 7, operation: ALTER PERSISTENT QUEUE, path: /MyRoot/USER_1/Topic1 TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 2025-05-07T08:47:14.646080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 105: send EvNotifyTxCompletion 2025-05-07T08:47:14.646129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 105 2025-05-07T08:47:14.646703Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 105, at schemeshard: 72057594046678944 2025-05-07T08:47:14.646821Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-05-07T08:47:14.646881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [1:645:2569] TestWaitNotification: OK eventTxId 105 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest >> TSchemeShardTopicSplitMergeTest::CreateTopicWithManyPartition [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:47:12.253435Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:47:12.253654Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:12.253695Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:47:12.253732Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:47:12.253776Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:47:12.253803Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:47:12.253853Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:12.253917Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:47:12.254639Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:47:12.254981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:47:12.354296Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:47:12.354355Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:47:12.372679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:47:12.372797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:47:12.372927Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:47:12.383706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:47:12.386250Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:47:12.386938Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:12.387254Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:47:12.389385Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:12.390874Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:12.390933Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:12.390987Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:47:12.391046Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:12.391085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:47:12.391239Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.397622Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:47:12.544878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:47:12.545091Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.545278Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:47:12.545495Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:47:12.545544Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.551165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:12.551311Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:47:12.551501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.551551Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:47:12.551587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:47:12.551620Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:47:12.556130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.556187Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:47:12.556238Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:47:12.558203Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.558249Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.558304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:12.558348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:47:12.561685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:47:12.564980Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:47:12.565299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:47:12.566165Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:12.566286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:12.566337Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:12.566587Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:47:12.566638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:12.566792Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:47:12.566877Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:47:12.568834Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:12.568876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:12.569037Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:12.569078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 800.000000s, IsManualStartup# false 2025-05-07T08:47:14.418985Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:47:14.419271Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:47:14.433416Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:47:14.442867Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:47:14.443090Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:47:14.443356Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:47:14.443397Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:47:14.443514Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:47:14.444200Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit for Paths, read records: 3, at schemeshard: 72057594046678944 2025-05-07T08:47:14.444286Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:319: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 1], parent name: MyRoot, child name: USER_1, child id: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T08:47:14.444326Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:319: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 2], parent name: USER_1, child name: Topic1, child id: [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-05-07T08:47:14.444394Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1457: TTxInit for UserAttributes, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:14.444455Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1483: TTxInit for UserAttributesAlterData, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:14.444660Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-05-07T08:47:14.444891Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1785: TTxInit for Tables, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:14.444964Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:450: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-05-07T08:47:14.445148Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2011: TTxInit for Columns, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:14.445224Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2071: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:14.445319Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2129: TTxInit for Shards, read records: 4, at schemeshard: 72057594046678944 2025-05-07T08:47:14.445356Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T08:47:14.445383Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T08:47:14.445405Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 0 2025-05-07T08:47:14.445427Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-05-07T08:47:14.445517Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2215: TTxInit for TablePartitions, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:14.445586Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2281: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:14.445788Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2431: TTxInit for ChannelsBinding, read records: 14, at schemeshard: 72057594046678944 2025-05-07T08:47:14.445951Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-05-07T08:47:14.450594Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2810: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:14.450736Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2889: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:14.451141Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3387: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:14.451221Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3423: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:14.451423Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3637: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:14.451503Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3782: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:14.451574Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3799: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:14.451737Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3959: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:14.451821Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3975: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:14.452019Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4260: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:14.452266Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4558: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:14.452455Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4705: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:14.452517Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4732: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:14.452572Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4759: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:14.470559Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:14.470636Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:14.470711Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:47:14.470763Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:14.470804Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:47:14.473380Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [2:637:2558] sender: [2:697:2058] recipient: [2:15:2062] 2025-05-07T08:47:14.534805Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_1/Topic1" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-05-07T08:47:14.535116Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_1/Topic1" took 336us result status StatusSuccess 2025-05-07T08:47:14.535714Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_1/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 104 CreateStep: 150 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409549 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 3 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 WriteSpeedInBytesPerSecond: 1024 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 0 TabletId: 72075186233409548 KeyRange { ToBound: "UUUUUUUUUUUUUUUT" } Status: Active } Partitions { PartitionId: 1 TabletId: 72075186233409548 KeyRange { FromBound: "UUUUUUUUUUUUUUUT" ToBound: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } Status: Active } Partitions { PartitionId: 2 TabletId: 72075186233409548 KeyRange { FromBound: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409549 NextPartitionId: 3 Allocate { Name: "Topic1" AlterVersion: 1 TotalGroupCount: 3 NextPartitionId: 3 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 WriteSpeedInBytesPerSecond: 1024 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 0 GroupId: 1 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active KeyRange { ToBound: "UUUUUUUUUUUUUUUT" } } Partitions { PartitionId: 1 GroupId: 2 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active KeyRange { FromBound: "UUUUUUUUUUUUUUUT" ToBound: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } } Partitions { PartitionId: 2 GroupId: 3 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active KeyRange { FromBound: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } } BalancerTabletID: 72075186233409549 BalancerOwnerId: 72057594046678944 BalancerShardId: 4 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> BlobDepot::LoadPutAndRead [GOOD] >> BlobDepot::DecommitPutAndRead >> test.py::test[solomon-HistResponse-default.txt] [GOOD] >> test.py::test[solomon-InvalidProject-] >> TSchemeShardTopicSplitMergeTest::MargeNotAdjacentRangePartitions [GOOD] >> THealthCheckTest::ShardsLimit999 >> TYardTest::TestSysLogOverwrite [GOOD] >> TSchemeShardTopicSplitMergeTest::SplitWithManyPartition [GOOD] >> TYardTest::TestUpsAndDownsAtTheBoundary ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_login/unittest >> TSchemeShardLoginTest::RemoveGroup_Owner-StrictAclCheck-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:47:07.760340Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:47:07.760443Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:07.760508Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:47:07.760556Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:47:07.760602Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:47:07.760626Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:47:07.760716Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:07.760806Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:47:07.761580Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:47:07.762000Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:47:07.851302Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:47:07.851367Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:47:07.872635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:47:07.872848Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:47:07.873052Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:47:07.880183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:47:07.880572Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:47:07.881272Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:07.881515Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:47:07.885467Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:07.887050Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:07.887142Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:07.887247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:47:07.887303Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:07.887348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:47:07.887653Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:47:07.895497Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:47:08.056579Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:47:08.056831Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.057093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:47:08.057389Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:47:08.057471Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.060582Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:08.060770Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:47:08.061033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.061108Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:47:08.061157Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:47:08.061196Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:47:08.063656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.063728Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:47:08.063776Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:47:08.066183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.066260Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.066327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:08.066405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:47:08.070684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:47:08.073254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:47:08.073493Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:47:08.074717Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:08.074900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:08.074979Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:08.075315Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:47:08.075377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:08.075566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:47:08.075726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:47:08.078471Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:08.078571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:08.078793Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:08.078884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-05-07T08:47:14.665739Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 8 PathOwnerId: 72057594046678944, cookie: 105 2025-05-07T08:47:14.665827Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 8 PathOwnerId: 72057594046678944, cookie: 105 2025-05-07T08:47:14.665859Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 105 2025-05-07T08:47:14.665890Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 8 2025-05-07T08:47:14.665926Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:47:14.666340Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 105, subscribers: 0 2025-05-07T08:47:14.682533Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-05-07T08:47:14.683223Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 TestModificationResult got TxId: 105, wait until txId: 105 2025-05-07T08:47:14.683857Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Dir1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:47:14.684094Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Dir1" took 271us result status StatusSuccess 2025-05-07T08:47:14.684491Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Dir1" PathDescription { Self { Name: "Dir1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 102 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 106 2025-05-07T08:47:14.687995Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterLogin AlterLogin { RemoveGroup { Group: "group1" } } } TxId: 106 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:47:14.688247Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5180: ExamineTreeVFS visit path id [OwnerId: 72057594046678944, LocalPathId: 1] name: MyRoot type: EPathTypeDir state: EPathStateNoChanges stepDropped: 0 droppedTxId: 0 parent: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:14.688291Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5196: ExamineTreeVFS run path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:14.688347Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5180: ExamineTreeVFS visit path id [OwnerId: 72057594046678944, LocalPathId: 2] name: Dir1 type: EPathTypeDir state: EPathStateNoChanges stepDropped: 0 droppedTxId: 0 parent: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:14.688389Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5196: ExamineTreeVFS run path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T08:47:14.688640Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 106:1, propose status:StatusSuccess, reason: , at schemeshard: 72057594046678944 2025-05-07T08:47:14.688769Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#106:0 progress is 1/1 2025-05-07T08:47:14.688819Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-05-07T08:47:14.688878Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#106:0 progress is 1/1 2025-05-07T08:47:14.688928Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-05-07T08:47:14.689008Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:47:14.689083Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 106, ready parts: 1/1, is published: false 2025-05-07T08:47:14.689135Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-05-07T08:47:14.689183Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 106:0 2025-05-07T08:47:14.689234Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 106, publications: 1, subscribers: 0 2025-05-07T08:47:14.689294Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 106, [OwnerId: 72057594046678944, LocalPathId: 1], 9 2025-05-07T08:47:14.707058Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 106, response: Status: StatusSuccess TxId: 106 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:14.707222Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 106, database: /MyRoot, subject: , status: StatusSuccess, operation: REMOVE GROUP, path: /MyRoot 2025-05-07T08:47:14.707480Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:14.707540Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 106, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:14.707780Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:14.707848Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:203:2205], at schemeshard: 72057594046678944, txId: 106, path id: 1 2025-05-07T08:47:14.708492Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 106 2025-05-07T08:47:14.708619Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 106 2025-05-07T08:47:14.708672Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 106 2025-05-07T08:47:14.708720Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 106, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 9 2025-05-07T08:47:14.708766Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:47:14.708889Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 106, subscribers: 0 2025-05-07T08:47:14.718955Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 TestModificationResult got TxId: 106, wait until txId: 106 2025-05-07T08:47:14.719712Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:47:14.719948Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 273us result status StatusSuccess 2025-05-07T08:47:14.720491Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 9 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 9 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 2 } ChildrenExist: true } Children { Name: "Dir1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 102 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_login/unittest >> TSchemeShardLoginTest::RemoveUser_Owner-StrictAclCheck-false [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:47:07.728588Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:47:07.728694Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:07.728757Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:47:07.728798Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:47:07.728842Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:47:07.728890Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:47:07.728955Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:07.729035Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:47:07.729818Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:47:07.730187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:47:07.821990Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:47:07.822065Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:47:07.840765Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:47:07.840961Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:47:07.841136Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:47:07.847453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:47:07.847759Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:47:07.848414Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:07.848618Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:47:07.851413Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:07.852784Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:07.852851Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:07.852926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:47:07.852975Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:07.853018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:47:07.853251Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:47:07.859977Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:47:08.009537Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:47:08.009784Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.010056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:47:08.010308Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:47:08.010376Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.013291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:08.013463Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:47:08.013646Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.013716Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:47:08.013760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:47:08.013797Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:47:08.016048Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.016115Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:47:08.016163Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:47:08.018287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.018359Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.018413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:08.018756Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:47:08.023111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:47:08.025462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:47:08.025709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:47:08.026753Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:08.026934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:08.027005Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:08.027334Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:47:08.027398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:08.027575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:47:08.027664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:47:08.030126Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:08.030192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:08.030409Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:08.030464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... ode 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 105, response: Status: StatusSuccess TxId: 105 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:15.239726Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 105, database: /MyRoot, subject: , status: StatusSuccess, operation: MODIFY ACL, path: /MyRoot/Dir1/DirSub1, set owner:user2 2025-05-07T08:47:15.239978Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:15.240036Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 105, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-05-07T08:47:15.240212Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 105, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T08:47:15.240354Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:15.240415Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:203:2205], at schemeshard: 72057594046678944, txId: 105, path id: 3 2025-05-07T08:47:15.240476Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:203:2205], at schemeshard: 72057594046678944, txId: 105, path id: 2 2025-05-07T08:47:15.241181Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 5 PathOwnerId: 72057594046678944, cookie: 105 2025-05-07T08:47:15.241324Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 5 PathOwnerId: 72057594046678944, cookie: 105 2025-05-07T08:47:15.241374Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 105 2025-05-07T08:47:15.241426Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 5 2025-05-07T08:47:15.241488Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-05-07T08:47:15.242102Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 7 PathOwnerId: 72057594046678944, cookie: 105 2025-05-07T08:47:15.242190Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 7 PathOwnerId: 72057594046678944, cookie: 105 2025-05-07T08:47:15.242226Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 105 2025-05-07T08:47:15.242256Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 7 2025-05-07T08:47:15.242295Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T08:47:15.242381Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 105, subscribers: 0 2025-05-07T08:47:15.245757Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-05-07T08:47:15.246113Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 TestModificationResult got TxId: 105, wait until txId: 105 TestModificationResults wait txId: 106 2025-05-07T08:47:15.249401Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterLogin AlterLogin { RemoveUser { User: "user1" } } } TxId: 106 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:47:15.249902Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 106:1, propose status:StatusSuccess, reason: , at schemeshard: 72057594046678944 2025-05-07T08:47:15.250118Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#106:0 progress is 1/1 2025-05-07T08:47:15.250167Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-05-07T08:47:15.250218Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#106:0 progress is 1/1 2025-05-07T08:47:15.250256Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-05-07T08:47:15.250332Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:47:15.250408Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 106, ready parts: 1/1, is published: false 2025-05-07T08:47:15.250467Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-05-07T08:47:15.250523Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 106:0 2025-05-07T08:47:15.250571Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 106, publications: 1, subscribers: 0 2025-05-07T08:47:15.250612Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 106, [OwnerId: 72057594046678944, LocalPathId: 1], 10 2025-05-07T08:47:15.253456Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 106, response: Status: StatusSuccess TxId: 106 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:15.253587Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 106, database: /MyRoot, subject: , status: StatusSuccess, operation: REMOVE USER, path: /MyRoot 2025-05-07T08:47:15.253866Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:15.253920Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 106, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:15.254172Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:15.254227Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:203:2205], at schemeshard: 72057594046678944, txId: 106, path id: 1 2025-05-07T08:47:15.254822Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 10 PathOwnerId: 72057594046678944, cookie: 106 2025-05-07T08:47:15.254960Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 10 PathOwnerId: 72057594046678944, cookie: 106 2025-05-07T08:47:15.255013Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 106 2025-05-07T08:47:15.255068Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 106, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 10 2025-05-07T08:47:15.255130Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:47:15.255243Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 106, subscribers: 0 2025-05-07T08:47:15.257571Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 TestModificationResult got TxId: 106, wait until txId: 106 2025-05-07T08:47:15.258292Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Dir1/DirSub1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:47:15.258517Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Dir1/DirSub1" took 265us result status StatusSuccess 2025-05-07T08:47:15.258900Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Dir1/DirSub1" PathDescription { Self { Name: "DirSub1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 103 CreateStep: 5000002 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "user2" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:15.259497Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:43: TTxLogin Execute at schemeshard: 72057594046678944 2025-05-07T08:47:15.259626Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:85: TTxLogin Complete, result: Error: "Cannot find user: user1", at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest >> TSchemeShardTopicSplitMergeTest::MargePartitions2 [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:47:12.464397Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:47:12.464488Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:12.464527Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:47:12.464562Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:47:12.464605Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:47:12.464631Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:47:12.464679Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:12.464763Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:47:12.465470Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:47:12.465758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:47:12.547379Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:47:12.547427Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:47:12.566977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:47:12.567092Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:47:12.567220Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:47:12.580570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:47:12.586291Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:47:12.586959Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:12.587302Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:47:12.589543Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:12.591140Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:12.591210Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:12.591268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:47:12.591315Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:12.591354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:47:12.591525Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.606675Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:47:12.796545Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:47:12.796781Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.797018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:47:12.797260Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:47:12.797316Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.802866Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:12.803017Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:47:12.803228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.803290Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:47:12.803328Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:47:12.803362Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:47:12.805032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.805084Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:47:12.805122Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:47:12.806690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.806736Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.806788Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:12.806857Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:47:12.810761Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:47:12.812483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:47:12.812650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:47:12.813608Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:12.813741Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:12.813783Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:12.814078Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:47:12.814133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:12.814303Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:47:12.814398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:47:12.816407Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:12.816453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:12.816638Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:12.816679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 105, tablet: 72075186233409548, partId: 0 2025-05-07T08:47:15.269264Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 105:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409548 Status: COMPLETE TxId: 105 Step: 200 2025-05-07T08:47:15.269350Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_pq.cpp:624: NPQState::TPropose operationId# 105:0 HandleReply TEvProposeTransactionResult triggers early, at schemeshard: 72057594046678944 message# Origin: 72075186233409548 Status: COMPLETE TxId: 105 Step: 200 2025-05-07T08:47:15.269415Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:265: CollectPQConfigChanged accept TEvPersQueue::TEvProposeTransactionResult, operationId: 105:0, shardIdx: 72057594046678944:3, shard: 72075186233409548, left await: 0, txState.State: Propose, txState.ReadyForNotifications: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:15.269460Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:629: NPQState::TPropose operationId# 105:0 HandleReply TEvProposeTransactionResult CollectPQConfigChanged: true 2025-05-07T08:47:15.269641Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 105:0 128 -> 240 2025-05-07T08:47:15.269827Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-05-07T08:47:15.282871Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 105:0, at schemeshard: 72057594046678944 2025-05-07T08:47:15.283648Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:15.283699Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 105, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-05-07T08:47:15.284046Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:15.284093Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:205:2207], at schemeshard: 72057594046678944, txId: 105, path id: 3 2025-05-07T08:47:15.284201Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 105:0, at schemeshard: 72057594046678944 2025-05-07T08:47:15.284246Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 105:0 ProgressState 2025-05-07T08:47:15.284362Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#105:0 progress is 1/1 2025-05-07T08:47:15.284405Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 105 ready parts: 1/1 2025-05-07T08:47:15.284449Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#105:0 progress is 1/1 2025-05-07T08:47:15.284479Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 105 ready parts: 1/1 2025-05-07T08:47:15.284522Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 105, ready parts: 1/1, is published: false 2025-05-07T08:47:15.284568Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 105 ready parts: 1/1 2025-05-07T08:47:15.284611Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 105:0 2025-05-07T08:47:15.284666Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 105:0 2025-05-07T08:47:15.284801Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-05-07T08:47:15.284847Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 105, publications: 1, subscribers: 1 2025-05-07T08:47:15.284884Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 105, [OwnerId: 72057594046678944, LocalPathId: 3], 3 2025-05-07T08:47:15.285807Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 105 2025-05-07T08:47:15.285911Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 105 2025-05-07T08:47:15.285959Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 105 2025-05-07T08:47:15.287761Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-05-07T08:47:15.287820Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-05-07T08:47:15.287912Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 105, subscribers: 1 2025-05-07T08:47:15.287970Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [2:410:2376] 2025-05-07T08:47:15.294047Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-05-07T08:47:15.294377Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-05-07T08:47:15.294426Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [2:679:2600] TestWaitNotification: OK eventTxId 105 2025-05-07T08:47:15.312825Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_1/Topic1" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-05-07T08:47:15.313126Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_1/Topic1" took 341us result status StatusSuccess 2025-05-07T08:47:15.313890Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_1/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 104 CreateStep: 150 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 2 } ChildrenExist: false BalancerTabletID: 72075186233409549 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 5 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 0 TabletId: 72075186233409548 KeyRange { ToBound: "?\377\377\377\377\377\377\377\377\377\377\377\377\377\377\376" } Status: Active } Partitions { PartitionId: 1 TabletId: 72075186233409548 KeyRange { FromBound: "?\377\377\377\377\377\377\377\377\377\377\377\377\377\377\376" ToBound: "\177\377\377\377\377\377\377\377\377\377\377\377\377\377\377\375" } Status: Inactive ChildPartitionIds: 4 } Partitions { PartitionId: 2 TabletId: 72075186233409548 KeyRange { FromBound: "\177\377\377\377\377\377\377\377\377\377\377\377\377\377\377\375" ToBound: "\277\377\377\377\377\377\377\377\377\377\377\377\377\377\377\374" } Status: Inactive ChildPartitionIds: 4 } Partitions { PartitionId: 3 TabletId: 72075186233409548 KeyRange { FromBound: "\277\377\377\377\377\377\377\377\377\377\377\377\377\377\377\374" } Status: Active } Partitions { PartitionId: 4 TabletId: 72075186233409548 KeyRange { FromBound: "?\377\377\377\377\377\377\377\377\377\377\377\377\377\377\376" ToBound: "\277\377\377\377\377\377\377\377\377\377\377\377\377\377\377\374" } Status: Active ParentPartitionIds: 1 ParentPartitionIds: 2 } AlterVersion: 2 BalancerTabletID: 72075186233409549 NextPartitionId: 5 Allocate { Name: "Topic1" AlterVersion: 2 TotalGroupCount: 5 NextPartitionId: 5 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 1 GroupId: 2 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Inactive KeyRange { FromBound: "?\377\377\377\377\377\377\377\377\377\377\377\377\377\377\376" ToBound: "\177\377\377\377\377\377\377\377\377\377\377\377\377\377\377\375" } } Partitions { PartitionId: 2 GroupId: 3 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Inactive KeyRange { FromBound: "\177\377\377\377\377\377\377\377\377\377\377\377\377\377\377\375" ToBound: "\277\377\377\377\377\377\377\377\377\377\377\377\377\377\377\374" } } Partitions { PartitionId: 3 GroupId: 4 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active KeyRange { FromBound: "\277\377\377\377\377\377\377\377\377\377\377\377\377\377\377\374" } } Partitions { PartitionId: 0 GroupId: 1 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active KeyRange { ToBound: "?\377\377\377\377\377\377\377\377\377\377\377\377\377\377\376" } } Partitions { PartitionId: 4 GroupId: 5 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active ParentPartitionIds: 1 ParentPartitionIds: 2 KeyRange { FromBound: "?\377\377\377\377\377\377\377\377\377\377\377\377\377\377\376" ToBound: "\277\377\377\377\377\377\377\377\377\377\377\377\377\377\377\374" } } BalancerTabletID: 72075186233409549 BalancerOwnerId: 72057594046678944 BalancerShardId: 4 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 5 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardLoginTest::ResetFailedAttemptCount [GOOD] >> TSchemeShardLoginTest::ResetFailedAttemptCountAfterModifyUser >> TSchemeShardLoginTest::ChangeAcceptablePasswordParameters [GOOD] >> TSchemeShardLoginTest::ChangeAccountLockoutParameters >> THealthCheckTest::RedGroupIssueWhenDisintegratedGroupStatus >> TSchemeShardTest::CreateTable >> TSchemeShardTopicSplitMergeTest::DisableSplitMerge [GOOD] |88.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/fq/ut_integration/ydb-services-fq-ut_integration |88.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/fq/ut_integration/ydb-services-fq-ut_integration |88.4%| [TA] {RESULT} $(B)/ydb/core/kqp/runtime/ut/test-results/unittest/{meta.json ... results_accumulator.log} |88.4%| [TM] {RESULT} ydb/core/blob_depot/ut/unittest |88.4%| [LD] {RESULT} $(B)/ydb/services/fq/ut_integration/ydb-services-fq-ut_integration ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest >> TSchemeShardTopicSplitMergeTest::SplitWithManyPartition [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:47:14.451720Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:47:14.451809Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:14.451847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:47:14.451884Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:47:14.451931Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:47:14.451958Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:47:14.452011Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:14.452087Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:47:14.452842Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:47:14.453175Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:47:14.550144Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:47:14.550207Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:47:14.566539Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:47:14.566654Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:47:14.566780Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:47:14.587208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:47:14.587860Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:47:14.588510Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:14.588826Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:47:14.592952Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:14.596109Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:14.596209Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:14.596278Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:47:14.596326Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:14.596366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:47:14.596539Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:47:14.630846Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:47:14.849618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:47:14.849892Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:14.850178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:47:14.850506Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:47:14.850574Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:14.857267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:14.857449Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:47:14.857665Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:14.857738Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:47:14.857783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:47:14.857817Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:47:14.868882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:14.868982Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:47:14.869041Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:47:14.872636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:14.872702Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:14.872769Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:14.872828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:47:14.887354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:47:14.891034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:47:14.891277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:47:14.892374Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:14.892574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:14.892644Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:14.892971Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:47:14.893030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:14.893232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:47:14.893312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:47:14.907166Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:14.907233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:14.907448Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:14.907509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... ionId: 2 GroupId: 3 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active KeyRange { FromBound: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } } Partitions { PartitionId: 0 GroupId: 1 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active KeyRange { ToBound: "UUUUUUUUUUUUUUUT" } } Partitions { PartitionId: 3 GroupId: 4 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active ParentPartitionIds: 1 KeyRange { FromBound: "UUUUUUUUUUUUUUUT" ToBound: "\177" } } Partitions { PartitionId: 4 GroupId: 5 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active ParentPartitionIds: 1 KeyRange { FromBound: "\177" ToBound: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } } BalancerTabletID: 72075186233409549 BalancerOwnerId: 72057594046678944 BalancerShardId: 4 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 5 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:15.525332Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:763:2058] recipient: [1:102:2137] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:766:2058] recipient: [1:15:2062] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:767:2058] recipient: [1:765:2674] Leader for TabletID 72057594046678944 is [1:768:2675] sender: [1:769:2058] recipient: [1:765:2674] 2025-05-07T08:47:15.581348Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:47:15.581465Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:15.581508Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:47:15.581554Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:47:15.581596Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:47:15.581634Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:47:15.581688Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:15.581797Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:47:15.582691Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:47:15.583125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:47:15.599884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:47:15.601669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:47:15.601886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:47:15.602167Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:47:15.602220Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:47:15.602340Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:47:15.603206Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit for Paths, read records: 3, at schemeshard: 72057594046678944 2025-05-07T08:47:15.603314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:319: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 1], parent name: MyRoot, child name: USER_1, child id: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T08:47:15.603481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:319: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 2], parent name: USER_1, child name: Topic1, child id: [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-05-07T08:47:15.603562Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1457: TTxInit for UserAttributes, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:15.603634Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1483: TTxInit for UserAttributesAlterData, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:15.603885Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-05-07T08:47:15.604178Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1785: TTxInit for Tables, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:15.604311Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:450: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-05-07T08:47:15.604531Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2011: TTxInit for Columns, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:15.604625Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2071: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:15.604760Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2129: TTxInit for Shards, read records: 4, at schemeshard: 72057594046678944 2025-05-07T08:47:15.604810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T08:47:15.604844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T08:47:15.604867Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 0 2025-05-07T08:47:15.604890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-05-07T08:47:15.605015Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2215: TTxInit for TablePartitions, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:15.605087Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2281: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:15.606135Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2431: TTxInit for ChannelsBinding, read records: 14, at schemeshard: 72057594046678944 2025-05-07T08:47:15.606884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-05-07T08:47:15.609049Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2810: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:15.609780Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2889: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:15.611848Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3387: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:15.612325Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3423: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:15.612955Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3637: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:15.613233Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3782: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:15.613351Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3799: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:15.613577Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3959: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:15.613667Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3975: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:15.613943Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4260: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:15.614239Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4558: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:15.614424Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4705: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:15.614487Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4732: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:15.614542Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4759: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:15.643705Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:15.643809Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:15.644080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:47:15.644163Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:15.644211Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:47:15.644401Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 >> TSchemeShardTest::RmDirTwice >> TSchemeShardTopicSplitMergeTest::EnableSplitMerge [GOOD] >> TSchemeShardTopicSplitMergeTest::SplitInactivePartition [GOOD] >> TSchemeShardTest::DropIndexedTableAndForceDropSimultaneously [GOOD] >> TSchemeShardTest::DependentOps >> TYardTest::TestLogWriteCutEqual [GOOD] >> TYardTest::TestLogWriteCutEqualRandomWait ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest >> TSchemeShardTopicSplitMergeTest::MargeNotAdjacentRangePartitions [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:47:12.258311Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:47:12.258418Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:12.258458Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:47:12.258501Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:47:12.258550Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:47:12.258576Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:47:12.258625Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:12.258770Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:47:12.259501Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:47:12.259820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:47:12.411833Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:47:12.411882Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:47:12.429049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:47:12.429172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:47:12.429307Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:47:12.436839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:47:12.437339Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:47:12.438010Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:12.438350Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:47:12.440572Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:12.442080Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:12.442144Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:12.442196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:47:12.442242Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:12.442279Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:47:12.442445Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.448921Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:47:12.625381Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:47:12.625676Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.625883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:47:12.626242Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:47:12.626299Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.633905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:12.634076Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:47:12.634278Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.634325Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:47:12.634361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:47:12.634389Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:47:12.636163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.636217Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:47:12.636256Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:47:12.637837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.637880Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.637940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:12.638001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:47:12.641821Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:47:12.654655Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:47:12.654857Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:47:12.655787Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:12.655930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:12.655983Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:12.656244Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:47:12.656298Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:12.656486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:47:12.656566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:47:12.658558Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:12.658600Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:12.658774Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:12.658816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... -07T08:47:15.742143Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:754: NPQState::TPropose operationId# 104:0 can't persist state: ShardsInProgress is not empty, remain: 1 2025-05-07T08:47:15.812272Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 104, tablet: 72075186233409548, partId: 0 2025-05-07T08:47:15.812474Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 104:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409548 Status: COMPLETE TxId: 104 Step: 150 2025-05-07T08:47:15.812564Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_pq.cpp:624: NPQState::TPropose operationId# 104:0 HandleReply TEvProposeTransactionResult triggers early, at schemeshard: 72057594046678944 message# Origin: 72075186233409548 Status: COMPLETE TxId: 104 Step: 150 2025-05-07T08:47:15.812631Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:265: CollectPQConfigChanged accept TEvPersQueue::TEvProposeTransactionResult, operationId: 104:0, shardIdx: 72057594046678944:3, shard: 72075186233409548, left await: 0, txState.State: Propose, txState.ReadyForNotifications: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:15.812678Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:629: NPQState::TPropose operationId# 104:0 HandleReply TEvProposeTransactionResult CollectPQConfigChanged: true 2025-05-07T08:47:15.812884Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 104:0 128 -> 240 2025-05-07T08:47:15.813085Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T08:47:15.813160Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-05-07T08:47:15.816785Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-05-07T08:47:15.817450Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:15.817507Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T08:47:15.817719Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-05-07T08:47:15.817941Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:15.818009Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:206:2208], at schemeshard: 72057594046678944, txId: 104, path id: 2 2025-05-07T08:47:15.818055Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:206:2208], at schemeshard: 72057594046678944, txId: 104, path id: 3 2025-05-07T08:47:15.818530Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-05-07T08:47:15.818581Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 104:0 ProgressState 2025-05-07T08:47:15.818678Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 1/1 2025-05-07T08:47:15.818709Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-05-07T08:47:15.818752Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 1/1 2025-05-07T08:47:15.818787Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-05-07T08:47:15.818842Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: false 2025-05-07T08:47:15.818884Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-05-07T08:47:15.818923Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 104:0 2025-05-07T08:47:15.818953Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 104:0 2025-05-07T08:47:15.819104Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-05-07T08:47:15.819147Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 104, publications: 2, subscribers: 1 2025-05-07T08:47:15.819182Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 2], 5 2025-05-07T08:47:15.819212Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-05-07T08:47:15.820054Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T08:47:15.820152Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T08:47:15.820193Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 104 2025-05-07T08:47:15.820235Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-05-07T08:47:15.820274Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-05-07T08:47:15.827628Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T08:47:15.827745Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T08:47:15.827781Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-05-07T08:47:15.827812Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-05-07T08:47:15.827846Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-05-07T08:47:15.827931Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 1 2025-05-07T08:47:15.827991Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [2:404:2371] 2025-05-07T08:47:15.837871Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-05-07T08:47:15.842191Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-05-07T08:47:15.842318Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-05-07T08:47:15.842364Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [2:538:2474] TestWaitNotification: OK eventTxId 104 >>>>> Name: "Topic1" PQTabletConfig { PartitionConfig { } } Merge { Partition: 0 AdjacentPartition: 2 } TestModificationResults wait txId: 105 2025-05-07T08:47:15.876390Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_1" OperationType: ESchemeOpAlterPersQueueGroup AlterPersQueueGroup { Name: "Topic1" PQTabletConfig { PartitionConfig { } } Merge { Partition: 0 AdjacentPartition: 2 } } } TxId: 105 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:47:15.876603Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_pq.cpp:509: TAlterPQ Propose, path: /MyRoot/USER_1/Topic1, pathId: , opId: 105:0, at schemeshard: 72057594046678944 2025-05-07T08:47:15.876811Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 105:1, propose status:StatusInvalidParameter, reason: You cannot merge non-contiguous partitions, at schemeshard: 72057594046678944 2025-05-07T08:47:15.887156Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 105, response: Status: StatusInvalidParameter Reason: "You cannot merge non-contiguous partitions" TxId: 105 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:15.887385Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 105, database: /MyRoot/USER_1, subject: , status: StatusInvalidParameter, reason: You cannot merge non-contiguous partitions, operation: ALTER PERSISTENT QUEUE, path: /MyRoot/USER_1/Topic1 TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 2025-05-07T08:47:15.887726Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 105: send EvNotifyTxCompletion 2025-05-07T08:47:15.887790Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 105 2025-05-07T08:47:15.888283Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 105, at schemeshard: 72057594046678944 2025-05-07T08:47:15.888397Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-05-07T08:47:15.888438Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [2:634:2559] TestWaitNotification: OK eventTxId 105 >> THealthCheckTest::OneIssueListing >> TSchemeShardTest::CreateIndexedTable >> TSchemeShardCheckProposeSize::CopyTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest >> TSchemeShardTopicSplitMergeTest::DisableSplitMerge [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:47:12.106210Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:47:12.106338Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:12.106375Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:47:12.106410Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:47:12.106454Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:47:12.106481Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:47:12.106575Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:12.106667Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:47:12.107462Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:47:12.107835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:47:12.192030Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:47:12.192095Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:47:12.206909Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:47:12.207044Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:47:12.207180Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:47:12.215992Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:47:12.216584Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:47:12.217259Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:12.217605Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:47:12.220259Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:12.221898Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:12.221986Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:12.222058Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:47:12.222105Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:12.222147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:47:12.222304Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.229549Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:47:12.366866Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:47:12.367131Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.367387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:47:12.367642Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:47:12.367707Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.371184Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:12.371364Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:47:12.371539Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.371604Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:47:12.371644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:47:12.371680Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:47:12.373642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.373693Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:47:12.373735Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:47:12.375834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.375886Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.375947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:12.375994Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:47:12.379973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:47:12.381961Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:47:12.382294Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:47:12.383295Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:12.383463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:12.383522Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:12.383827Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:47:12.383885Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:12.384078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:47:12.384160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:47:12.386091Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:12.386148Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:12.386331Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:12.386366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... ation_side_effects.cpp:982: Publication still in progress, tx: 105, publications: 1, subscribers: 0 2025-05-07T08:47:15.877328Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 105, [OwnerId: 72057594046678944, LocalPathId: 3], 3 2025-05-07T08:47:15.878093Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 105 2025-05-07T08:47:15.907116Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 105 2025-05-07T08:47:15.907224Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 105 2025-05-07T08:47:15.907271Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-05-07T08:47:15.907319Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-05-07T08:47:15.907434Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 105, subscribers: 0 2025-05-07T08:47:15.916378Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 2025-05-07T08:47:15.940743Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 105: send EvNotifyTxCompletion 2025-05-07T08:47:15.940806Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 105 2025-05-07T08:47:15.941268Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 105, at schemeshard: 72057594046678944 2025-05-07T08:47:15.941378Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-05-07T08:47:15.941418Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [2:750:2663] TestWaitNotification: OK eventTxId 105 2025-05-07T08:47:16.602621Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 3 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:16.602954Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72057594046678944 describe pathId 3 took 347us result status StatusSuccess 2025-05-07T08:47:16.603814Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_1/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 104 CreateStep: 150 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 2 } ChildrenExist: false BalancerTabletID: 72075186233409549 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 3 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 0 TabletId: 72075186233409548 Status: Inactive ChildPartitionIds: 1 ChildPartitionIds: 2 } Partitions { PartitionId: 1 TabletId: 72075186233409548 KeyRange { ToBound: "\010" } Status: Active ParentPartitionIds: 0 } Partitions { PartitionId: 2 TabletId: 72075186233409548 KeyRange { FromBound: "\010" } Status: Active ParentPartitionIds: 0 } AlterVersion: 2 BalancerTabletID: 72075186233409549 NextPartitionId: 3 Allocate { Name: "Topic1" AlterVersion: 2 TotalGroupCount: 3 NextPartitionId: 3 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 0 GroupId: 1 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Inactive } Partitions { PartitionId: 1 GroupId: 2 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active ParentPartitionIds: 0 KeyRange { ToBound: "\010" } } Partitions { PartitionId: 2 GroupId: 3 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active ParentPartitionIds: 0 KeyRange { FromBound: "\010" } } BalancerTabletID: 72075186233409549 BalancerOwnerId: 72057594046678944 BalancerShardId: 4 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:16.686755Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_1/Topic1" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-05-07T08:47:16.687069Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_1/Topic1" took 352us result status StatusSuccess 2025-05-07T08:47:16.687665Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_1/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 104 CreateStep: 150 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 2 } ChildrenExist: false BalancerTabletID: 72075186233409549 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 3 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 0 TabletId: 72075186233409548 Status: Inactive ChildPartitionIds: 1 ChildPartitionIds: 2 } Partitions { PartitionId: 1 TabletId: 72075186233409548 KeyRange { ToBound: "\010" } Status: Active ParentPartitionIds: 0 } Partitions { PartitionId: 2 TabletId: 72075186233409548 KeyRange { FromBound: "\010" } Status: Active ParentPartitionIds: 0 } AlterVersion: 2 BalancerTabletID: 72075186233409549 NextPartitionId: 3 Allocate { Name: "Topic1" AlterVersion: 2 TotalGroupCount: 3 NextPartitionId: 3 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 0 GroupId: 1 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Inactive } Partitions { PartitionId: 1 GroupId: 2 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active ParentPartitionIds: 0 KeyRange { ToBound: "\010" } } Partitions { PartitionId: 2 GroupId: 3 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active ParentPartitionIds: 0 KeyRange { FromBound: "\010" } } BalancerTabletID: 72075186233409549 BalancerOwnerId: 72057594046678944 BalancerShardId: 4 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >>>>> Name: "Topic1" PQTabletConfig { PartitionConfig { } PartitionStrategy { PartitionStrategyType: DISABLED } } TestModificationResults wait txId: 106 2025-05-07T08:47:16.691094Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_1" OperationType: ESchemeOpAlterPersQueueGroup AlterPersQueueGroup { Name: "Topic1" PQTabletConfig { PartitionConfig { } PartitionStrategy { PartitionStrategyType: DISABLED } } } } TxId: 106 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:47:16.691325Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_pq.cpp:509: TAlterPQ Propose, path: /MyRoot/USER_1/Topic1, pathId: , opId: 106:0, at schemeshard: 72057594046678944 2025-05-07T08:47:16.691486Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 106:1, propose status:StatusInvalidParameter, reason: Can`t disable auto partitioning., at schemeshard: 72057594046678944 2025-05-07T08:47:16.696432Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 106, response: Status: StatusInvalidParameter Reason: "Can`t disable auto partitioning." TxId: 106 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:16.696627Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 106, database: /MyRoot/USER_1, subject: , status: StatusInvalidParameter, reason: Can`t disable auto partitioning., operation: ALTER PERSISTENT QUEUE, path: /MyRoot/USER_1/Topic1 TestModificationResult got TxId: 106, wait until txId: 106 TestWaitNotification wait txId: 106 2025-05-07T08:47:16.697234Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 106: send EvNotifyTxCompletion 2025-05-07T08:47:16.697283Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 106 2025-05-07T08:47:16.697757Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 106, at schemeshard: 72057594046678944 2025-05-07T08:47:16.697855Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-05-07T08:47:16.697893Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [2:765:2677] TestWaitNotification: OK eventTxId 106 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest >> TSchemeShardTopicSplitMergeTest::SplitInactivePartition [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:47:13.848416Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:47:13.848500Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:13.848547Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:47:13.848591Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:47:13.848640Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:47:13.848672Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:47:13.848730Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:13.848810Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:47:13.849584Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:47:13.849937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:47:14.029293Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:47:14.029362Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:47:14.045925Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:47:14.046075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:47:14.046218Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:47:14.055708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:47:14.056311Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:47:14.057003Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:14.057361Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:47:14.059753Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:14.061411Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:14.061477Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:14.061531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:47:14.061578Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:14.061617Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:47:14.061759Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:47:14.068012Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:47:14.222017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:47:14.222327Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:14.222602Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:47:14.222907Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:47:14.222978Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:14.227382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:14.227545Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:47:14.227797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:14.227856Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:47:14.227897Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:47:14.227931Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:47:14.231231Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:14.231309Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:47:14.231375Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:47:14.233632Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:14.233706Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:14.233764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:14.233826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:47:14.246244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:47:14.248573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:47:14.248787Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:47:14.249866Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:14.250059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:14.250112Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:14.250417Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:47:14.250470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:14.250648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:47:14.250723Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:47:14.259563Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:14.259630Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:14.259840Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:14.259885Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... AT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:648: NPQState::TPropose operationId# 105:0 HandleReply TEvProposeTransactionAttachResult CollectPQConfigChanged: false 2025-05-07T08:47:16.913225Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:754: NPQState::TPropose operationId# 105:0 can't persist state: ShardsInProgress is not empty, remain: 1 2025-05-07T08:47:16.915383Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 105:0, at schemeshard: 72057594046678944 TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 2025-05-07T08:47:16.915684Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 105: send EvNotifyTxCompletion 2025-05-07T08:47:16.915736Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 105 2025-05-07T08:47:16.916200Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 105, at schemeshard: 72057594046678944 2025-05-07T08:47:16.916253Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 105, ready parts: 0/1, is published: true 2025-05-07T08:47:16.916302Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 105, at schemeshard: 72057594046678944 2025-05-07T08:47:16.952284Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 200, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:16.952451Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 105 AckTo { RawX1: 0 RawX2: 0 } } Step: 200 MediatorID: 72075186233409547 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:16.952517Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_pq.cpp:662: NPQState::TPropose operationId# 105:0 HandleReply TEvOperationPlan, step: 200, at tablet: 72057594046678944 2025-05-07T08:47:16.952583Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:754: NPQState::TPropose operationId# 105:0 can't persist state: ShardsInProgress is not empty, remain: 1 2025-05-07T08:47:16.989145Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 105, tablet: 72075186233409548, partId: 0 2025-05-07T08:47:16.989365Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 105:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409548 Status: COMPLETE TxId: 105 Step: 200 2025-05-07T08:47:16.989459Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_pq.cpp:624: NPQState::TPropose operationId# 105:0 HandleReply TEvProposeTransactionResult triggers early, at schemeshard: 72057594046678944 message# Origin: 72075186233409548 Status: COMPLETE TxId: 105 Step: 200 2025-05-07T08:47:16.989531Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:265: CollectPQConfigChanged accept TEvPersQueue::TEvProposeTransactionResult, operationId: 105:0, shardIdx: 72057594046678944:3, shard: 72075186233409548, left await: 0, txState.State: Propose, txState.ReadyForNotifications: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:16.989578Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:629: NPQState::TPropose operationId# 105:0 HandleReply TEvProposeTransactionResult CollectPQConfigChanged: true 2025-05-07T08:47:16.989783Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 105:0 128 -> 240 2025-05-07T08:47:16.990034Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-05-07T08:47:16.993313Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 105:0, at schemeshard: 72057594046678944 2025-05-07T08:47:16.994047Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:16.994138Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 105, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-05-07T08:47:16.994462Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:16.994525Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:206:2208], at schemeshard: 72057594046678944, txId: 105, path id: 3 2025-05-07T08:47:16.994952Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 105:0, at schemeshard: 72057594046678944 2025-05-07T08:47:16.995005Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 105:0 ProgressState 2025-05-07T08:47:16.995140Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#105:0 progress is 1/1 2025-05-07T08:47:16.995188Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 105 ready parts: 1/1 2025-05-07T08:47:16.995234Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#105:0 progress is 1/1 2025-05-07T08:47:16.995279Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 105 ready parts: 1/1 2025-05-07T08:47:16.995334Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 105, ready parts: 1/1, is published: false 2025-05-07T08:47:16.995383Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 105 ready parts: 1/1 2025-05-07T08:47:16.995435Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 105:0 2025-05-07T08:47:16.995470Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 105:0 2025-05-07T08:47:16.995645Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-05-07T08:47:16.995703Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 105, publications: 1, subscribers: 1 2025-05-07T08:47:16.995745Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 105, [OwnerId: 72057594046678944, LocalPathId: 3], 3 2025-05-07T08:47:16.996401Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 105 2025-05-07T08:47:16.996495Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 105 2025-05-07T08:47:16.996535Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 105 2025-05-07T08:47:16.996571Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-05-07T08:47:16.996617Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-05-07T08:47:16.996710Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 105, subscribers: 1 2025-05-07T08:47:16.996770Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [2:404:2371] 2025-05-07T08:47:17.001747Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-05-07T08:47:17.001863Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-05-07T08:47:17.001897Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [2:663:2587] TestWaitNotification: OK eventTxId 105 >>>>> Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "W" } TestModificationResults wait txId: 106 2025-05-07T08:47:17.012047Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_1" OperationType: ESchemeOpAlterPersQueueGroup AlterPersQueueGroup { Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "W" } } } TxId: 106 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:47:17.012255Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_pq.cpp:509: TAlterPQ Propose, path: /MyRoot/USER_1/Topic1, pathId: , opId: 106:0, at schemeshard: 72057594046678944 2025-05-07T08:47:17.012452Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 106:1, propose status:StatusInvalidParameter, reason: Invalid partition status: 2, at schemeshard: 72057594046678944 2025-05-07T08:47:17.014757Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 106, response: Status: StatusInvalidParameter Reason: "Invalid partition status: 2" TxId: 106 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:17.014999Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 106, database: /MyRoot/USER_1, subject: , status: StatusInvalidParameter, reason: Invalid partition status: 2, operation: ALTER PERSISTENT QUEUE, path: /MyRoot/USER_1/Topic1 TestModificationResult got TxId: 106, wait until txId: 106 TestWaitNotification wait txId: 106 2025-05-07T08:47:17.015377Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 106: send EvNotifyTxCompletion 2025-05-07T08:47:17.015429Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 106 2025-05-07T08:47:17.015901Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 106, at schemeshard: 72057594046678944 2025-05-07T08:47:17.016018Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-05-07T08:47:17.016060Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [2:752:2664] TestWaitNotification: OK eventTxId 106 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest >> TSchemeShardTopicSplitMergeTest::EnableSplitMerge [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:47:11.834487Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:47:11.834595Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:11.834637Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:47:11.834680Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:47:11.834730Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:47:11.834759Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:47:11.834855Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:11.834941Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:47:11.835710Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:47:11.836215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:47:11.960430Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:47:11.960496Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:47:11.977049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:47:11.977182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:47:11.977318Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:47:11.987482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:47:11.991641Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:47:11.992308Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:11.992703Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:47:12.000728Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:12.002445Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:12.002531Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:12.002590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:47:12.002633Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:12.002673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:47:12.002856Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.010376Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:47:12.316440Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:47:12.316703Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.316943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:47:12.317253Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:47:12.317316Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.319977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:12.320136Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:47:12.320372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.320426Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:47:12.320464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:47:12.320496Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:47:12.322699Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.322765Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:47:12.322812Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:47:12.325026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.325083Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:12.325137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:12.325194Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:47:12.328943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:47:12.335234Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:47:12.335648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:47:12.336708Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:12.336872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:12.336919Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:12.337255Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:47:12.337315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:12.337519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:47:12.337609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:47:12.346944Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:12.347009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:12.347236Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:12.347277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... eshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:205:2207], at schemeshard: 72057594046678944, txId: 105, path id: 3 2025-05-07T08:47:16.442059Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 105:0, at schemeshard: 72057594046678944 2025-05-07T08:47:16.442111Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 105:0 ProgressState 2025-05-07T08:47:16.442240Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#105:0 progress is 1/1 2025-05-07T08:47:16.442284Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 105 ready parts: 1/1 2025-05-07T08:47:16.442330Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#105:0 progress is 1/1 2025-05-07T08:47:16.442371Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 105 ready parts: 1/1 2025-05-07T08:47:16.442417Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 105, ready parts: 1/1, is published: false 2025-05-07T08:47:16.442472Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 105 ready parts: 1/1 2025-05-07T08:47:16.442522Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 105:0 2025-05-07T08:47:16.442558Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 105:0 2025-05-07T08:47:16.442715Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-05-07T08:47:16.442764Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 105, publications: 1, subscribers: 0 2025-05-07T08:47:16.442810Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 105, [OwnerId: 72057594046678944, LocalPathId: 3], 3 2025-05-07T08:47:16.444137Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 105 2025-05-07T08:47:16.444235Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 105 2025-05-07T08:47:16.444274Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 105 2025-05-07T08:47:16.444326Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-05-07T08:47:16.444376Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-05-07T08:47:16.444473Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 105, subscribers: 0 2025-05-07T08:47:16.464188Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 2025-05-07T08:47:16.478858Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 105: send EvNotifyTxCompletion 2025-05-07T08:47:16.478949Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 105 2025-05-07T08:47:16.479444Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 105, at schemeshard: 72057594046678944 2025-05-07T08:47:16.479580Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-05-07T08:47:16.479640Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [2:758:2669] TestWaitNotification: OK eventTxId 105 2025-05-07T08:47:17.042511Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 3 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:17.042820Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72057594046678944 describe pathId 3 took 347us result status StatusSuccess 2025-05-07T08:47:17.043507Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_1/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 104 CreateStep: 150 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 2 } ChildrenExist: false BalancerTabletID: 72075186233409549 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 3 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 0 TabletId: 72075186233409548 KeyRange { ToBound: "UUUUUUUUUUUUUUUT" } Status: Active } Partitions { PartitionId: 1 TabletId: 72075186233409548 KeyRange { FromBound: "UUUUUUUUUUUUUUUT" ToBound: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } Status: Active } Partitions { PartitionId: 2 TabletId: 72075186233409548 KeyRange { FromBound: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } Status: Active } AlterVersion: 2 BalancerTabletID: 72075186233409549 NextPartitionId: 3 Allocate { Name: "Topic1" AlterVersion: 2 TotalGroupCount: 3 NextPartitionId: 3 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 1 GroupId: 2 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active KeyRange { FromBound: "UUUUUUUUUUUUUUUT" ToBound: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } } Partitions { PartitionId: 2 GroupId: 3 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active KeyRange { FromBound: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } } Partitions { PartitionId: 0 GroupId: 1 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active KeyRange { ToBound: "UUUUUUUUUUUUUUUT" } } BalancerTabletID: 72075186233409549 BalancerOwnerId: 72057594046678944 BalancerShardId: 4 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:17.126800Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_1/Topic1" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-05-07T08:47:17.127130Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_1/Topic1" took 362us result status StatusSuccess 2025-05-07T08:47:17.127760Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_1/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 104 CreateStep: 150 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 2 } ChildrenExist: false BalancerTabletID: 72075186233409549 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 3 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 0 TabletId: 72075186233409548 KeyRange { ToBound: "UUUUUUUUUUUUUUUT" } Status: Active } Partitions { PartitionId: 1 TabletId: 72075186233409548 KeyRange { FromBound: "UUUUUUUUUUUUUUUT" ToBound: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } Status: Active } Partitions { PartitionId: 2 TabletId: 72075186233409548 KeyRange { FromBound: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } Status: Active } AlterVersion: 2 BalancerTabletID: 72075186233409549 NextPartitionId: 3 Allocate { Name: "Topic1" AlterVersion: 2 TotalGroupCount: 3 NextPartitionId: 3 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 1 GroupId: 2 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active KeyRange { FromBound: "UUUUUUUUUUUUUUUT" ToBound: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } } Partitions { PartitionId: 2 GroupId: 3 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active KeyRange { FromBound: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } } Partitions { PartitionId: 0 GroupId: 1 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active KeyRange { ToBound: "UUUUUUUUUUUUUUUT" } } BalancerTabletID: 72075186233409549 BalancerOwnerId: 72057594046678944 BalancerShardId: 4 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >>>>> Verify partition 0 >>>>> Verify partition 1 >>>>> Verify partition 2 >> TSchemeShardLoginTest::ResetFailedAttemptCountAfterModifyUser [GOOD] >> TSchemeShardTest::DependentOps [GOOD] >> TSchemeShardTest::DefaultColumnFamiliesWithNonCanonicName >> TSchemeShardPgTypesInTables::CreateTableWithPgTypeColumn-EnableTablePgTypes-false |88.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_move/ydb-core-tx-schemeshard-ut_move |88.4%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_move/ydb-core-tx-schemeshard-ut_move |88.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_move/ydb-core-tx-schemeshard-ut_move |88.4%| [TA] $(B)/ydb/core/tx/schemeshard/ut_topic_splitmerge/test-results/unittest/{meta.json ... results_accumulator.log} >> TSchemeShardTest::RmDirTwice [GOOD] >> TSchemeShardTest::TopicMeteringMode |88.4%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_topic_splitmerge/test-results/unittest/{meta.json ... results_accumulator.log} >> TSchemeShardTest::InitRootAgain >> TSchemeShardLoginTest::AccountLockoutAndAutomaticallyUnlock [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_login/unittest >> TSchemeShardLoginTest::ResetFailedAttemptCountAfterModifyUser [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:47:07.798540Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:47:07.798654Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:07.798719Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:47:07.798763Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:47:07.798813Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:47:07.798871Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:47:07.798925Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:07.798994Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:47:07.799748Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:47:07.800102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:47:07.888845Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:47:07.888920Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:47:07.906543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:47:07.906767Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:47:07.906977Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:47:07.913242Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:47:07.913580Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:47:07.914291Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:07.914484Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:47:07.917429Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:07.918895Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:07.918964Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:07.919045Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:47:07.919096Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:07.919143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:47:07.919360Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:47:07.926362Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:47:08.078032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:47:08.078305Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.078560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:47:08.078812Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:47:08.078904Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.081427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:08.081572Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:47:08.081784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.081864Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:47:08.081910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:47:08.081998Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:47:08.084463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.084535Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:47:08.084585Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:47:08.086733Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.086796Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.086876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:08.086948Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:47:08.091228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:47:08.093503Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:47:08.093752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:47:08.094910Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:08.095066Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:08.095123Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:08.095424Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:47:08.095484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:08.095663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:47:08.095777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:47:08.098164Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:08.098227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:08.098440Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:08.098499Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 46678944 Generation: 3 LocalPathId: 1 Version: 8 PathOwnerId: 72057594046678944, cookie: 103 2025-05-07T08:47:18.420990Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 103 2025-05-07T08:47:18.421037Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 8 2025-05-07T08:47:18.421091Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:47:18.421216Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 103, subscribers: 0 2025-05-07T08:47:18.423444Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-05-07T08:47:18.424753Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 Leader for TabletID 72057594046678944 is [5:307:2294] sender: [5:400:2058] recipient: [5:102:2137] Leader for TabletID 72057594046678944 is [5:307:2294] sender: [5:403:2058] recipient: [5:15:2062] Leader for TabletID 72057594046678944 is [5:307:2294] sender: [5:404:2058] recipient: [5:402:2373] Leader for TabletID 72057594046678944 is [5:405:2374] sender: [5:406:2058] recipient: [5:402:2373] 2025-05-07T08:47:18.516832Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:47:18.516961Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:18.517024Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:47:18.517074Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:47:18.517122Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:47:18.517164Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:47:18.517222Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:18.517320Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:47:18.518248Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:47:18.518601Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:47:18.570150Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:47:18.571906Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:47:18.572123Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:47:18.572308Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:47:18.572368Z node 5 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:47:18.572529Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:47:18.573374Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit for Paths, read records: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:18.573514Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1457: TTxInit for UserAttributes, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:18.573610Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1483: TTxInit for UserAttributesAlterData, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:18.574121Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1785: TTxInit for Tables, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:18.574216Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:450: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-05-07T08:47:18.574449Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2011: TTxInit for Columns, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:18.574554Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2071: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:18.574638Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2129: TTxInit for Shards, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:18.574760Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2215: TTxInit for TablePartitions, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:18.574872Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2281: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:18.575028Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2431: TTxInit for ChannelsBinding, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:18.575358Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2810: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:18.575498Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2889: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:18.575926Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3387: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:18.576008Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3423: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:18.576196Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3637: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:18.576295Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3782: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:18.576398Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3799: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:18.576704Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3959: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:18.576803Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3975: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:18.576950Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4260: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:18.577205Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4558: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:18.577409Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4705: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:18.577469Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4732: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:18.577528Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4759: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:18.585086Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:18.585166Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:18.585306Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:47:18.585361Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:18.585408Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:47:18.586554Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [5:405:2374] sender: [5:462:2058] recipient: [5:15:2062] 2025-05-07T08:47:18.635498Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:43: TTxLogin Execute at schemeshard: 72057594046678944 2025-05-07T08:47:18.635559Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:46: TTxLogin RotateKeys at schemeshard: 72057594046678944 2025-05-07T08:47:18.677156Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:85: TTxLogin Complete, result: Token: "eyJhbGciOiJQUzI1NiIsImtpZCI6IjMifQ.eyJhdWQiOlsiXC9NeVJvb3QiXSwiZXhwIjoxNzQ2NjUwODM4LCJpYXQiOjE3NDY2MDc2MzgsInN1YiI6InVzZXIxIn0.BtWbUXcbVDjB42GJtrlpWSdTLljqESGCOXme-QveDXmGQCKlXS0xni4mORx6cGwa3ekExdHJ1hyuRqXN7aQAowAR0HGynoyyXlO2pqyxBVZsaudQPt_hGScp_MJ6mAI2XDbS_QV4d9SaBn-36zgS9FUaUZnLd26l8cB7b27nhjDUrmgd8n1zNMADUTr6FGf4Qhg7TPUqqYQKkwUfD8s6BF7LTvTDqW0xmDW_vrs-T2QgXh_eIyCYfEKB8NMnJjZdM0yzOMit4Hb-6M4fCqNeDAtTfHGg53hEqiYAiJZdQuiRN0TOqU2qDA4fdvmW5yjeyUX6hUsF6wqr8MVh_z5RRg" SanitizedToken: "eyJhbGciOiJQUzI1NiIsImtpZCI6IjMifQ.eyJhdWQiOlsiXC9NeVJvb3QiXSwiZXhwIjoxNzQ2NjUwODM4LCJpYXQiOjE3NDY2MDc2MzgsInN1YiI6InVzZXIxIn0.**" IsAdmin: true, at schemeshard: 72057594046678944 2025-05-07T08:47:18.677335Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:18.677393Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 0, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:18.677596Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:18.677645Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:454:2412], at schemeshard: 72057594046678944, txId: 0, path id: 1 2025-05-07T08:47:18.678243Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 0 >> Yq_1::DescribeJob >> TSchemeShardTest::MkRmDir >> TSchemeShardTest::Boot |88.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/mind/bscontroller/ut_bscontroller/ydb-core-mind-bscontroller-ut_bscontroller |88.4%| [LD] {RESULT} $(B)/ydb/core/mind/bscontroller/ut_bscontroller/ydb-core-mind-bscontroller-ut_bscontroller |88.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/mind/bscontroller/ut_bscontroller/ydb-core-mind-bscontroller-ut_bscontroller >> TSchemeShardTest::ConsistentCopyTable [GOOD] >> TSchemeShardTest::ConsistentCopyTableAwait >> TSchemeShardPgTypesInTables::CreateTableWithPgTypeColumn-EnableTablePgTypes-false [GOOD] >> TSchemeShardPgTypesInTables::CreateTableWithPgTypeColumn-EnableTablePgTypes-true >> TSchemeShardTest::DefaultColumnFamiliesWithNonCanonicName [GOOD] >> TSchemeShardTest::DropPQ >> Yq_1::CreateConnection_With_Existing_Name |88.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_index/ydb-core-tx-schemeshard-ut_index |88.4%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_index/ydb-core-tx-schemeshard-ut_index |88.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_index/ydb-core-tx-schemeshard-ut_index ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_login/unittest >> TSchemeShardLoginTest::AccountLockoutAndAutomaticallyUnlock [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:47:07.860291Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:47:07.860396Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:07.860452Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:47:07.860492Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:47:07.860535Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:47:07.860564Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:47:07.860614Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:07.860687Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:47:07.861450Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:47:07.861870Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:47:07.949840Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:47:07.949915Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:47:07.968430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:47:07.968640Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:47:07.968826Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:47:07.975122Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:47:07.975443Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:47:07.976107Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:07.976321Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:47:07.979547Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:07.981071Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:07.981140Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:07.981215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:47:07.981268Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:07.981308Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:47:07.981564Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:47:07.988731Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:47:08.131958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:47:08.132202Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.132436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:47:08.132737Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:47:08.132796Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.135563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:08.135707Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:47:08.135933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.135998Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:47:08.136051Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:47:08.136098Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:47:08.138064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.138128Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:47:08.138203Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:47:08.140189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.140252Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.140311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:08.140385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:47:08.144529Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:47:08.146986Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:47:08.147220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:47:08.148332Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:08.148486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:08.148535Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:08.148824Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:47:08.148876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:08.149045Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:47:08.149142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:47:08.151714Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:08.151783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:08.152026Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:08.152080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... peration: CREATE USER, path: /MyRoot 2025-05-07T08:47:15.624113Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:15.624184Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:15.624420Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:15.624485Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:203:2205], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-05-07T08:47:15.625135Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T08:47:15.625278Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T08:47:15.625332Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-05-07T08:47:15.625384Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 4 2025-05-07T08:47:15.625441Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:47:15.625570Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-05-07T08:47:15.635008Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 2025-05-07T08:47:15.635498Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:43: TTxLogin Execute at schemeshard: 72057594046678944 2025-05-07T08:47:15.635544Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:46: TTxLogin RotateKeys at schemeshard: 72057594046678944 2025-05-07T08:47:15.863512Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:85: TTxLogin Complete, result: Error: "Invalid password", at schemeshard: 72057594046678944 2025-05-07T08:47:15.863669Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:15.863724Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 0, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:15.863939Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:15.863990Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:203:2205], at schemeshard: 72057594046678944, txId: 0, path id: 1 2025-05-07T08:47:15.864555Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 0 2025-05-07T08:47:15.864885Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:43: TTxLogin Execute at schemeshard: 72057594046678944 2025-05-07T08:47:15.878939Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:85: TTxLogin Complete, result: Error: "Invalid password", at schemeshard: 72057594046678944 2025-05-07T08:47:15.879373Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:43: TTxLogin Execute at schemeshard: 72057594046678944 2025-05-07T08:47:15.887098Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:85: TTxLogin Complete, result: Error: "Invalid password", at schemeshard: 72057594046678944 2025-05-07T08:47:15.887524Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:43: TTxLogin Execute at schemeshard: 72057594046678944 2025-05-07T08:47:15.899477Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:85: TTxLogin Complete, result: Error: "Invalid password", at schemeshard: 72057594046678944 2025-05-07T08:47:15.899989Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:43: TTxLogin Execute at schemeshard: 72057594046678944 2025-05-07T08:47:15.900228Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:85: TTxLogin Complete, result: Error: "User user1 login denied: too many failed password attempts", at schemeshard: 72057594046678944 2025-05-07T08:47:15.900650Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:43: TTxLogin Execute at schemeshard: 72057594046678944 2025-05-07T08:47:15.900780Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:85: TTxLogin Complete, result: Error: "User user1 login denied: too many failed password attempts", at schemeshard: 72057594046678944 2025-05-07T08:47:15.901311Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:47:15.901539Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 281us result status StatusSuccess 2025-05-07T08:47:15.902061Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { PublicKeys { KeyId: 1 KeyDataPEM: "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxz0lq22/j2r5dXSGmVwK\nCdaHpJOAKS8/Rk709gTn4iCa4Lb2h7kpPEBHTBfAoHKovP1IeHO7rzuF6+ov8qLa\nrH2OUnos5mANuV2ZJz+VI20C8tjHOuDf+z5cMJ9riOTuYEEpY/L9t0KFfAqRMhKP\nXFwOaAF67njkGEMl68k3AeXhdeECuf5qxnawJZ8V5ejH0zr+brUNN+3K1P0eGlvW\nCwlMxRkYTi10Um8/eJcyxDEqr+cYcyn+3TMWuejuUjBs5RdCzaYQMzbiHUbbe3hI\nEXXi5SQbw8+ofTAWY5dfAg0pbm7w98r0SdhlfktHgTunnO8etHevfPkbVvMwIomg\n0wIDAQAB\n-----END PUBLIC KEY-----\n" ExpiresAt: 1746694035854 } Sids { Name: "user1" Type: USER } Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:19.906565Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:43: TTxLogin Execute at schemeshard: 72057594046678944 2025-05-07T08:47:19.931030Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:85: TTxLogin Complete, result: Error: "Invalid password", at schemeshard: 72057594046678944 2025-05-07T08:47:19.931598Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:43: TTxLogin Execute at schemeshard: 72057594046678944 2025-05-07T08:47:19.971929Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:85: TTxLogin Complete, result: Token: "eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9NeVJvb3QiXSwiZXhwIjoxNzQ2NjUwODM5LCJpYXQiOjE3NDY2MDc2MzksInN1YiI6InVzZXIxIn0.i3iKsmAuWdQVxtVR3GuCkCLuZYHWqq4zzFSUoyCB3l6dq6vYo9uEJ-PG7h0wfsGzUkdJpzZuGp9_MJ0-ocEBDj3nQ07Lnhg3hxUa5wqT7bvSRz7DusgCi7XnPUO5EoQb9c5WH9UxdhxB94sYyBRTehar9anqN1siD16IKO3Tf6z4uMQBaenk7CDb0D-7_WxPjzmOLbfw10Kz7M_o9sl54Gvx_jiP-c71axgjW-f_8lIAjk-KEtwqTa5Lwip5ZISs3taso_YTWBLb4O1qDrkt7PQQCOi8HCvEom5G1WfJiCrwHn24RGrYqOSRKOqKpNPyRmSDWNVAyLLSE6oE-dstlw" SanitizedToken: "eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9NeVJvb3QiXSwiZXhwIjoxNzQ2NjUwODM5LCJpYXQiOjE3NDY2MDc2MzksInN1YiI6InVzZXIxIn0.**" IsAdmin: true, at schemeshard: 72057594046678944 2025-05-07T08:47:19.972611Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:47:19.972863Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 251us result status StatusSuccess 2025-05-07T08:47:19.973384Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { PublicKeys { KeyId: 1 KeyDataPEM: "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxz0lq22/j2r5dXSGmVwK\nCdaHpJOAKS8/Rk709gTn4iCa4Lb2h7kpPEBHTBfAoHKovP1IeHO7rzuF6+ov8qLa\nrH2OUnos5mANuV2ZJz+VI20C8tjHOuDf+z5cMJ9riOTuYEEpY/L9t0KFfAqRMhKP\nXFwOaAF67njkGEMl68k3AeXhdeECuf5qxnawJZ8V5ejH0zr+brUNN+3K1P0eGlvW\nCwlMxRkYTi10Um8/eJcyxDEqr+cYcyn+3TMWuejuUjBs5RdCzaYQMzbiHUbbe3hI\nEXXi5SQbw8+ofTAWY5dfAg0pbm7w98r0SdhlfktHgTunnO8etHevfPkbVvMwIomg\n0wIDAQAB\n-----END PUBLIC KEY-----\n" ExpiresAt: 1746694035854 } Sids { Name: "user1" Type: USER } Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> THealthCheckTest::Basic [GOOD] >> THealthCheckTest::BasicNodeCheckRequest >> Yq_1::Basic_Null >> Yq_1::DescribeConnection >> TSchemeShardTest::InitRootAgain [GOOD] >> TSchemeShardTest::InitRootWithOwner >> TSchemeShardTest::CreateTable [GOOD] >> TSchemeShardTest::CreateTableWithDate >> TSchemeShardTest::TopicMeteringMode [GOOD] >> TSchemeShardTest::Restart >> PrivateApi::PingTask >> TSchemeShardCheckProposeSize::CopyTable [GOOD] >> TSchemeShardCheckProposeSize::CopyTables >> Yq_1::Basic >> TSchemeShardPgTypesInTables::CreateTableWithPgTypeColumn-EnableTablePgTypes-true [GOOD] >> TSchemeShardTest::AlterTableAndConcurrentSplit >> TSchemeShardTest::ConsistentCopyTableAwait [GOOD] >> TSchemeShardTest::ConsistentCopyTableRejects >> TPDiskRaces::OwnerKilledWhileReadingLog [GOOD] >> TPDiskRaces::OwnerKilledWhileReadingLogAndThenKillLastOwner >> TSchemeShardTest::Boot [GOOD] >> TSchemeShardTest::AlterTableKeyColumns >> Yq_1::CreateQuery_With_Idempotency >> TSchemeShardTest::InitRootWithOwner [GOOD] >> TSchemeShardTest::DropTableTwice >> BlobDepot::DecommitPutAndRead [GOOD] >> BlobDepot::DecommitVerifiedRandom >> Yq_1::ListConnections >> BSCRestartPDisk::RestartOneByOneWithReconnects [GOOD] >> TSchemeShardTest::MkRmDir [GOOD] >> TSchemeShardTest::PathName >> TSchemeShardTest::CreateIndexedTable [GOOD] >> TSchemeShardTest::CreateAlterTableWithCodec >> TSchemeShardTest::Restart [GOOD] >> TSchemeShardTest::SchemeErrors ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest >> BSCRestartPDisk::RestartOneByOneWithReconnects [GOOD] Test command err: RandomSeed# 3883421661371197409 >> TSchemeShardLoginTest::ChangeAccountLockoutParameters [GOOD] >> TSchemeShardLoginTest::CheckThatLockedOutParametersIsRestoredFromLocalDb >> THealthCheckTest::BasicNodeCheckRequest [GOOD] >> THealthCheckTest::BlueGroupIssueWhenPartialGroupStatusAndReplicationDisks >> TSchemeShardTest::DropTableTwice [GOOD] >> TSchemeShardTest::IgnoreUserColumnIds >> TSchemeShardTest::AlterTableKeyColumns [GOOD] >> TSchemeShardTest::AlterTableFollowers >> TYardTest::TestUpsAndDownsAtTheBoundary [GOOD] >> TYardTest::TestUnflushedChunk >> TSchemeShardTest::AlterTableAndConcurrentSplit [GOOD] >> TSchemeShardTest::AlterTable >> Yq_1::ModifyConnections >> TSchemeShardTest::PathName [GOOD] >> TSchemeShardTest::PathName_SetLocale >> TSchemeShardTest::SchemeErrors [GOOD] >> TSchemeShardTest::SerializedCellVec >> TSchemeShardTest::SerializedCellVec [GOOD] >> TSchemeShardTest::UpdateChannelsBindingSolomonShouldNotUpdate >> TSchemeShardTest::CreateAlterTableWithCodec [GOOD] >> TSchemeShardTest::CreateTableWithDate [GOOD] >> TSchemeShardTest::CreateIndexedTableRejects >> TSchemeShardTest::CopyTableTwiceSimultaneously |88.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/client/ut/ydb-core-client-ut >> TSchemeShardTest::PathName_SetLocale [GOOD] >> TSchemeShardTest::ModifyACL |88.4%| [LD] {RESULT} $(B)/ydb/core/client/ut/ydb-core-client-ut |88.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/client/ut/ydb-core-client-ut >> TSchemeShardTest::AlterTable [GOOD] >> TSchemeShardTest::AlterTableDropColumnReCreateSplit >> TSchemeShardTest::UpdateChannelsBindingSolomonShouldNotUpdate [GOOD] >> TSchemeShardTest::UpdateChannelsBindingSolomonShouldUpdate >> TSchemeShardTest::ConsistentCopyTableRejects [GOOD] >> TSchemeShardTest::ConsistentCopyTableToDeletedPath >> TSchemeShardTest::ModifyACL [GOOD] >> TSchemeShardTest::NameFormat >> TSchemeShardTest::IgnoreUserColumnIds [GOOD] >> TSchemeShardTest::DropTableAndConcurrentSplit >> TSchemeShardMoveTest::MoveIndexSameDst >> TSchemeShardTest::DropPQ [GOOD] >> TSchemeShardTest::DropBlockStoreVolume >> TSchemeShardLoginTest::CheckThatLockedOutParametersIsRestoredFromLocalDb [GOOD] >> TSchemeShardTest::UpdateChannelsBindingSolomonShouldUpdate [GOOD] >> TSchemeShardTest::UpdateChannelsBindingSolomonStorageConfig >> TSchemeShardTest::AlterTableFollowers [GOOD] >> TSchemeShardTest::AlterTableSizeToSplit >> BSCRestartPDisk::RestartOneByOne [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_login/unittest >> TSchemeShardLoginTest::CheckThatLockedOutParametersIsRestoredFromLocalDb [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:47:07.723156Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:47:07.723278Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:07.723347Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:47:07.723401Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:47:07.723451Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:47:07.723485Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:47:07.723554Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:07.723645Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:47:07.724518Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:47:07.724972Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:47:07.820203Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:47:07.820294Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:47:07.840304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:47:07.840559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:47:07.840786Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:47:07.848092Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:47:07.848483Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:47:07.849270Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:07.849489Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:47:07.853010Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:07.854386Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:07.854453Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:07.854542Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:47:07.854601Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:07.854659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:47:07.854896Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:47:07.861658Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:47:08.043033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:47:08.043259Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.043466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:47:08.043698Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:47:08.043757Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.046867Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:08.047005Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:47:08.047187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.047250Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:47:08.047310Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:47:08.047353Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:47:08.049856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.049931Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:47:08.049997Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:47:08.051958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.052019Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:08.052076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:08.052139Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:47:08.055771Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:47:08.057923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:47:08.058182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:47:08.059162Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:08.059299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:08.059344Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:08.059696Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:47:08.059748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:08.059920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:47:08.059992Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:47:08.062032Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:08.062093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:08.062299Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:08.062341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:47:41.403789Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:47:41.403834Z node 5 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:47:41.404965Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:47:41.410929Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit for Paths, read records: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:41.411906Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1457: TTxInit for UserAttributes, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:41.412197Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1483: TTxInit for UserAttributesAlterData, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:41.412699Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1785: TTxInit for Tables, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:41.412809Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:450: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-05-07T08:47:41.413065Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2011: TTxInit for Columns, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:41.413213Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2071: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:41.413319Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2129: TTxInit for Shards, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:41.413460Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2215: TTxInit for TablePartitions, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:41.413566Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2281: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:41.413759Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2431: TTxInit for ChannelsBinding, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:41.414117Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2810: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:41.414282Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2889: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:41.414721Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3387: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:41.414816Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3423: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:41.414999Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3637: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:41.415113Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3782: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:41.415226Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3799: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:41.415510Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3959: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:41.415618Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3975: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:41.415782Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4260: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:41.416067Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4558: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:41.416279Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4705: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:41.416350Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4732: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:41.416426Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4759: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-05-07T08:47:41.471793Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:41.472244Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:41.477244Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:47:41.477649Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:41.478043Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:47:41.478879Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [5:369:2338] sender: [5:426:2058] recipient: [5:15:2062] 2025-05-07T08:47:41.547223Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:43: TTxLogin Execute at schemeshard: 72057594046678944 2025-05-07T08:47:41.547615Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:46: TTxLogin RotateKeys at schemeshard: 72057594046678944 2025-05-07T08:47:41.674164Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:85: TTxLogin Complete, result: Error: "User user1 login denied: too many failed password attempts", at schemeshard: 72057594046678944 2025-05-07T08:47:41.674945Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:41.675373Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 0, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:41.677221Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:41.690249Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:420:2378], at schemeshard: 72057594046678944, txId: 0, path id: 1 2025-05-07T08:47:41.693854Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 0 2025-05-07T08:47:43.707997Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:43: TTxLogin Execute at schemeshard: 72057594046678944 2025-05-07T08:47:43.979384Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:85: TTxLogin Complete, result: Token: "eyJhbGciOiJQUzI1NiIsImtpZCI6IjMifQ.eyJhdWQiOlsiXC9NeVJvb3QiXSwiZXhwIjoxNzQ2NjUwODYzLCJpYXQiOjE3NDY2MDc2NjMsInN1YiI6InVzZXIxIn0.WW6LyW901Hac64TFer-WQmZ7IFLgRMMDSQbgscQ5wNudTzTuLJazPQzTdvFFU7LtnRKDb1erqNMnjEUwOcM5NE0xOrZITZgTmW9ecIAPiaSiCvdMY2LqYHjmeSGIpUYNRyvUl8b8aKzMQiz8SJdjbV5pHKNvxfXcky1tnWosHVz_JAIOQJm_4XEcPPibi4z574dIbLeVI2_7lKK4cFfVqkNCZfEVGzPETO8Rgtu0e5bMxUD5Ug0QzRJG46ukbXjtV2VoQT60rv8_7ugN9GFj7CuU_bVdhhjdZHboUFAgRFWFi3FEHUQovnugpJ5WuOcjz5V6HxOPcT3FsIXRGp6fgA" SanitizedToken: "eyJhbGciOiJQUzI1NiIsImtpZCI6IjMifQ.eyJhdWQiOlsiXC9NeVJvb3QiXSwiZXhwIjoxNzQ2NjUwODYzLCJpYXQiOjE3NDY2MDc2NjMsInN1YiI6InVzZXIxIn0.**" IsAdmin: true, at schemeshard: 72057594046678944 2025-05-07T08:47:43.996697Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:47:43.998399Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 2.85ms result status StatusSuccess 2025-05-07T08:47:44.000741Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 4 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { PublicKeys { KeyId: 1 KeyDataPEM: "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAoy8ZKsfDbv0UIEtrVQvx\nSOBOJ1wawWBi43JD0P0Q6PZ81dHPZocinqMFeshG6b/tBLfo/Xd2uszmLEjFCYkB\n6f4hbDCnl7IBVjI32K/petZbRccbrieQUORes5vQuNaq2mf3XI9eEfYArqE8Penk\nQALpQXIs4rfQ3Bm3BtSMcrd2yEjDpeg+vwsTpq/vSdzdgnLixdeYvulrnITMIvu8\nEqzfeJZY8I41HTrnV4lLF5aY646Jq3eRpS2WN1reXm+6TYToxdVml792cSfUEDDt\nQ6q/1mjginkQQmekwFCg8wLyZQvl2fThnW2DVl3ZnOQLi0dya6WSXH6a92VNIncI\ncQIDAQAB\n-----END PUBLIC KEY-----\n" ExpiresAt: 1746694058271 } PublicKeys { KeyId: 2 KeyDataPEM: "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5iVUAY2SzBS3UsFXMR+F\n6h6lE8y2k5gl1gOrrkzi2TY0aFX+B7+lhgkI1aWn8318EImCrefFYu0Sf0MM0YIR\nEpPGmi6LLenQEGFeBAl+nTED5OxFVFL85hjex0NnVgZOIiY3J796xx2M38DwCNpZ\ny3Rb9lOX8eoQBKD8XXmObXVq2zck6wpB6HHYkqAo6C800bneHT6xWRmxSX29Qd41\nwCCD3bSrQzpeE70TdIuCABVbWRUj/ve/58GnFDPtOsqkqKIcLzvRb8/nmbWpKQmW\nl7ObbKb382dYUJJMQSLEADiC3eIktnZ/POtlKNBxD4eBkz6Gs2WdrQ3KLQkg4BW2\n9QIDAQAB\n-----END PUBLIC KEY-----\n" ExpiresAt: 1746694058723 } PublicKeys { KeyId: 3 KeyDataPEM: "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA01Yz6Wx1YIshnSq0rARH\nQdz1win/Ysj7NRhuUvCA6cfBGS2m5jvRH+pyFF+upKcLA7LpicwcmBr1oW2HW35b\nbebAnlLfcPR9U96zIfwgRn5OduZKtBGdoiWI0dh45Mdgoi5O6210dpadk7fBib2J\nX5TeEj118sE3oY4QPkGsoTqBgxBtTdpGqtFAYYmJJDZ6Kdqma7vGGF9qBa3TZh47\ncpyGvRFz72rt8gETz8nxKx6m4SCe5SS3xJonoxL6ajO3lPunNNWJ5xrOkLBWvdjF\nU127GbiWedZ/wFrPOvatbBf0BNhox02N0ccl32h+PcGwOg6EJ5RiE/ngm25d6JMe\nEQIDAQAB\n-----END PUBLIC KEY-----\n" ExpiresAt: 1746694061635 } Sids { Name: "user1" Type: USER } Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardTest::CopyTableTwiceSimultaneously [GOOD] >> TSchemeShardTest::CopyTableWithAlterConfig >> test.py::test[solomon-InvalidProject-] [GOOD] >> test.py::test[solomon-LabelColumns-default.txt] ------- [TS] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_blob_depot/unittest >> BlobDepot::DecommitVerifiedRandom 2025-05-07 08:47:40,147 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-05-07 08:47:40,439 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 60 secs timeout. Process tree before termination: pid rss ref pdirt 99161 46.2M 46.2M 23.3M test_tool run_ut @/home/runner/.ya/build/build_root/zvgn/0016a8/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot/test-results/unittest/testing_out_stuff/test_tool.args 99412 818M 805M 645M └─ ydb-core-blobstorage-ut_blobstorage-ut_blob_depot --trace-path-append /home/runner/.ya/build/build_root/zvgn/0016a8/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot/test Test command err: Mersenne random seed 4151772416 RandomSeed# 75611806994464188 Mersenne random seed 1739054480 Mersenne random seed 3112435384 Mersenne random seed 2231235466 Mersenne random seed 378135982 2025-05-07T08:46:52.366143Z 1 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:1:1:0:1:100:1] status# {Status# BLOCKED} Marker# BSVS03 2025-05-07T08:46:52.366324Z 3 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:1:1:0:1:100:3] status# {Status# BLOCKED} Marker# BSVS03 2025-05-07T08:46:52.366387Z 4 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:1:1:0:1:100:3] status# {Status# BLOCKED} Marker# BSVS03 2025-05-07T08:46:52.366449Z 7 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:1:1:0:1:100:3] status# {Status# BLOCKED} Marker# BSVS03 2025-05-07T08:46:52.366510Z 8 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:1:1:0:1:100:3] status# {Status# BLOCKED} Marker# BSVS03 2025-05-07T08:46:52.366569Z 2 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:1:1:0:1:100:2] status# {Status# BLOCKED} Marker# BSVS03 2025-05-07T08:46:52.366647Z 6 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:1:1:0:1:100:1] status# {Status# BLOCKED} Marker# BSVS03 2025-05-07T08:46:52.366710Z 5 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:1:1:0:1:100:1] status# {Status# BLOCKED} Marker# BSVS03 2025-05-07T08:46:52.367035Z 1 00h00m25.012048s :BS_PROXY_PUT ERROR: [1f4a8c593b3d084f] Result# TEvPutResult {Id# [15:1:1:0:1:100:0] Status# BLOCKED StatusFlags# { } ErrorReason# "Got VPutResult status# BLOCKED from VDiskId# [82000000:1:0:0:0]" ApproximateFreeSpaceShare# 0} GroupId# 2181038080 Marker# BPP12 2025-05-07T08:46:52.368224Z 1 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:3:1:0:1:100:2] status# {Status# BLOCKED} Marker# BSVS03 2025-05-07T08:46:52.368379Z 2 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:3:1:0:1:100:3] status# {Status# BLOCKED} Marker# BSVS03 2025-05-07T08:46:52.368435Z 3 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:3:1:0:1:100:3] status# {Status# BLOCKED} Marker# BSVS03 2025-05-07T08:46:52.368489Z 6 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:3:1:0:1:100:3] status# {Status# BLOCKED} Marker# BSVS03 2025-05-07T08:46:52.368541Z 7 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:3:1:0:1:100:3] status# {Status# BLOCKED} Marker# BSVS03 2025-05-07T08:46:52.368606Z 5 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:3:1:0:1:100:1] status# {Status# BLOCKED} Marker# BSVS03 2025-05-07T08:46:52.368662Z 8 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:3:1:0:1:100:1] status# {Status# BLOCKED} Marker# BSVS03 2025-05-07T08:46:52.368718Z 4 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:3:1:0:1:100:1] status# {Status# BLOCKED} Marker# BSVS03 2025-05-07T08:46:52.406330Z 1 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [16:2:2:0:2:100:3] status# {Status# BLOCKED} Marker# BSVS03 2025-05-07T08:46:52.406574Z 5 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [16:2:2:0:2:100:3] status# {Status# BLOCKED} Marker# BSVS03 2025-05-07T08:46:52.406638Z 6 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [16:2:2:0:2:100:3] status# {Status# BLOCKED} Marker# BSVS03 2025-05-07T08:46:52.406711Z 4 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [16:2:2:0:2:100:2] status# {Status# BLOCKED} Marker# BSVS03 2025-05-07T08:46:52.406767Z 3 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [16:2:2:0:2:100:1] status# {Status# BLOCKED} Marker# BSVS03 2025-05-07T08:46:52.406835Z 8 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [16:2:2:0:2:100:1] status# {Status# BLOCKED} Marker# BSVS03 2025-05-07T08:46:52.406896Z 7 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [16:2:2:0:2:100:1] status# {Status# BLOCKED} Marker# BSVS03 2025-05-07T08:46:52.406950Z 2 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [16:2:2:0:2:100:3] status# {Status# BLOCKED} Marker# BSVS03 2025-05-07T08:46:52.407201Z 1 00h00m25.012048s :BS_PROXY_PUT ERROR: [3f15f228afeea273] Result# TEvPutResult {Id# [16:2:2:0:2:100:0] Status# BLOCKED StatusFlags# { } ErrorReason# "Got VPutResult status# BLOCKED from VDiskId# [82000000:1:0:0:0]" ApproximateFreeSpaceShare# 0} GroupId# 2181038080 Marker# BPP12 Mersenne random seed 1381717989 Read over the barrier, blob id# [15:1:1:0:1:100:0] Read over the barrier, blob id# [15:1:2:0:1:100:0] 2025-05-07T08:46:55.136537Z 1 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 1 soft] barrier# 1:2 new key# [15 0 2 2 soft] barrier# 1:1 2025-05-07T08:46:55.136958Z 2 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 1 soft] barrier# 1:2 new key# [15 0 2 2 soft] barrier# 1:1 2025-05-07T08:46:55.137077Z 3 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 1 soft] barrier# 1:2 new key# [15 0 2 2 soft] barrier# 1:1 2025-05-07T08:46:55.137170Z 4 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 1 soft] barrier# 1:2 new key# [15 0 2 2 soft] barrier# 1:1 2025-05-07T08:46:55.137280Z 5 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 1 soft] barrier# 1:2 new key# [15 0 2 2 soft] barrier# 1:1 2025-05-07T08:46:55.137382Z 6 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 1 soft] barrier# 1:2 new key# [15 0 2 2 soft] barrier# 1:1 2025-05-07T08:46:55.137486Z 7 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 1 soft] barrier# 1:2 new key# [15 0 2 2 soft] barrier# 1:1 2025-05-07T08:46:55.137567Z 8 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 1 soft] barrier# 1:2 new key# [15 0 2 2 soft] barrier# 1:1 Put over the barrier, blob id# [15:1:1:0:99:100:0] Put over the barrier, blob id# [15:1:3:0:99:100:0] 2025-05-07T08:46:55.232556Z 1 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 3 hard] barrier# 1:3 new key# [15 0 2 4 hard] barrier# 1:1 2025-05-07T08:46:55.232877Z 2 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 3 hard] barrier# 1:3 new key# [15 0 2 4 hard] barrier# 1:1 2025-05-07T08:46:55.232960Z 3 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 3 hard] barrier# 1:3 new key# [15 0 2 4 hard] barrier# 1:1 2025-05-07T08:46:55.233046Z 4 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 3 hard] barrier# 1:3 new key# [15 0 2 4 hard] barrier# 1:1 2025-05-07T08:46:55.233124Z 5 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 3 hard] barrier# 1:3 new key# [15 0 2 4 hard] barrier# 1:1 2025-05-07T08:46:55.233210Z 6 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 3 hard] barrier# 1:3 new key# [15 0 2 4 hard] barrier# 1:1 2025-05-07T08:46:55.233292Z 7 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 3 hard] barrier# 1:3 new key# [15 0 2 4 hard] barrier# 1:1 2025-05-07T08:46:55.233366Z 8 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 3 hard] barrier# 1:3 new key# [15 0 2 4 hard] barrier# 1:1 Read over the barrier, blob id# [15:1:5:0:1:100:0] Read over the barrier, blob id# [15:1:6:0:1:100:0] Read over the barrier, blob id# [15:1:19:0:1:100:0] Read over the barrier, blob id# [15:2:1:0:1:100:0] Read over the barrier, blob id# [15:2:2:0:1:100:0] TEvRange returned collected blob with id# [15:1:17:0:1:100:0] TEvRange returned collected blob with id# [15:1:19:0:1:100:0] TEvRange returned collected blob with id# [15:2:1:0:1:100:0] TEvRange returned collected blob with id# [15:2:2:0:1:100:0] TEvRange returned collected blob with id# [15:2:3:0:1:100:0] TEvRange returned collected blob with id# [15:2:4:0:1:100:0] TEvRange returned collected blob with id# [15:2:5:0:1:100:0] TEvRange returned collected blob with id# [15:2:6:0:1:100:0] Read over the barrier, blob id# [100:1:3:0:1:100:0] Read over the barrier, blob id# [100:1:5:0:1:100:0] Read over the barrier, blob id# [100:1:6:0:1:100:0] Read over the barrier, blob id# [100:2:1:0:1:100:0] Read over the barrier, blob id# [100:2:2:0:1:100:0] TEvRange returned collected blob with id# [100:2:2:0:1:100:0] TEvRange returned collected blob with id# [100:2:3:0:1:100:0] TEvRange returned collected blob with id# [100:2:4:0:1:100:0] TEvRange returned collected blob with id# [100:2:5:0:1:100:0] TEvRange returned collected blob with id# [100:2:6:0:1:100:0] Mersenne random seed 1403659207 Read over the barrier, blob id# [101:2:6:1:2511094:181:0] Read over the barrier, blob id# [100:1:2:1:10733157:88:0] TEvRange returned collected blob with id# [101:2:6:1:2511094:181:0] Read over the barrier, blob id# [101:2:5:0:6491076:958:0] TEvRange returned collected blob with id# ... idateGCCmd: decreasing barrier: existing key# [16 1 3 0 hard] barrier# 3:0 new key# [16 1 9 1 hard] barrier# 2:0 2025-05-07T08:47:38.438310Z 8 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [16 1 3 0 hard] barrier# 3:0 new key# [16 1 9 1 hard] barrier# 2:0 TEvRange returned collected blob with id# [16:2:5:2:6580849:169:0] TEvRange returned collected blob with id# [16:2:5:2:12393544:709:0] 2025-05-07T08:47:38.760146Z 2 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Db# Barriers ValidateGCCmd: incorrect collect cmd: tabletID# 17 key# [17 2 12 0 hard] existing barrier# 0:2 new barrier# 0:1 2025-05-07T08:47:38.761013Z 1 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Db# Barriers ValidateGCCmd: incorrect collect cmd: tabletID# 17 key# [17 2 12 0 hard] existing barrier# 0:2 new barrier# 0:1 2025-05-07T08:47:38.761162Z 3 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Db# Barriers ValidateGCCmd: incorrect collect cmd: tabletID# 17 key# [17 2 12 0 hard] existing barrier# 0:2 new barrier# 0:1 2025-05-07T08:47:38.761272Z 4 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Db# Barriers ValidateGCCmd: incorrect collect cmd: tabletID# 17 key# [17 2 12 0 hard] existing barrier# 0:2 new barrier# 0:1 2025-05-07T08:47:38.761378Z 5 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Db# Barriers ValidateGCCmd: incorrect collect cmd: tabletID# 17 key# [17 2 12 0 hard] existing barrier# 0:2 new barrier# 0:1 2025-05-07T08:47:38.761487Z 6 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Db# Barriers ValidateGCCmd: incorrect collect cmd: tabletID# 17 key# [17 2 12 0 hard] existing barrier# 0:2 new barrier# 0:1 2025-05-07T08:47:38.761607Z 7 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Db# Barriers ValidateGCCmd: incorrect collect cmd: tabletID# 17 key# [17 2 12 0 hard] existing barrier# 0:2 new barrier# 0:1 2025-05-07T08:47:38.761730Z 8 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Db# Barriers ValidateGCCmd: incorrect collect cmd: tabletID# 17 key# [17 2 12 0 hard] existing barrier# 0:2 new barrier# 0:1 Read over the barrier, blob id# [16:2:5:1:2266807:420:0] Read over the barrier, blob id# [16:2:3:1:7190714:391:0] Read over the barrier, blob id# [16:2:3:1:7190714:391:0] Read over the barrier, blob id# [16:2:3:1:7982151:646:0] Read over the barrier, blob id# [16:2:4:1:195488:427:0] Read over the barrier, blob id# [16:2:3:1:7982151:646:0] Read over the barrier, blob id# [16:2:3:1:7190714:391:0] Read over the barrier, blob id# [16:2:4:1:195488:427:0] Read over the barrier, blob id# [16:2:5:2:12393544:709:0] Read over the barrier, blob id# [15:1:1:2:7863605:740:0] 2025-05-07T08:47:39.020654Z 7 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [16 1 3 0 hard] barrier# 3:0 new key# [16 1 15 1 hard] barrier# 2:1 2025-05-07T08:47:39.020920Z 1 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [16 1 3 0 hard] barrier# 3:0 new key# [16 1 15 1 hard] barrier# 2:1 2025-05-07T08:47:39.021042Z 2 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [16 1 3 0 hard] barrier# 3:0 new key# [16 1 15 1 hard] barrier# 2:1 2025-05-07T08:47:39.021179Z 3 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [16 1 3 0 hard] barrier# 3:0 new key# [16 1 15 1 hard] barrier# 2:1 2025-05-07T08:47:39.021297Z 4 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [16 1 3 0 hard] barrier# 3:0 new key# [16 1 15 1 hard] barrier# 2:1 2025-05-07T08:47:39.021423Z 5 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [16 1 3 0 hard] barrier# 3:0 new key# [16 1 15 1 hard] barrier# 2:1 2025-05-07T08:47:39.021539Z 6 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [16 1 3 0 hard] barrier# 3:0 new key# [16 1 15 1 hard] barrier# 2:1 2025-05-07T08:47:39.021663Z 8 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [16 1 3 0 hard] barrier# 3:0 new key# [16 1 15 1 hard] barrier# 2:1 TEvRange returned collected blob with id# [16:2:5:2:6580849:169:0] TEvRange returned collected blob with id# [16:2:5:2:12393544:709:0] Read over the barrier, blob id# [16:2:5:1:2266807:420:0] Read over the barrier, blob id# [16:2:3:1:7982151:646:0] Read over the barrier, blob id# [16:2:3:1:7982151:646:0] TEvRange returned collected blob with id# [17:1:1:2:14804488:309:0] 2025-05-07T08:47:39.930218Z 1 00h00m25.013072s :HIVE ERROR: HIVE#72057594037927937 THive::TTxUpdateTabletGroups::Execute{88923003494304}: tablet 72075186224037888 could not find a group for channel 0 pool test 2025-05-07T08:47:39.930576Z 1 00h00m25.013072s :HIVE ERROR: HIVE#72057594037927937 THive::TTxUpdateTabletGroups::Execute{88923003494304}: tablet 72075186224037888 could not find a group for channel 1 pool test 2025-05-07T08:47:39.930616Z 1 00h00m25.013072s :HIVE ERROR: HIVE#72057594037927937 THive::TTxUpdateTabletGroups::Execute{88923003494304}: tablet 72075186224037888 could not find a group for channel 2 pool test 2025-05-07T08:47:39.930650Z 1 00h00m25.013072s :HIVE ERROR: HIVE#72057594037927937 THive::TTxUpdateTabletGroups::Execute{88923003494304}: tablet 72075186224037888 could not find a group for channel 3 pool test Read over the barrier, blob id# [15:1:1:2:7863605:740:0] Read over the barrier, blob id# [16:2:5:2:12393544:709:0] Read over the barrier, blob id# [16:2:5:1:2266807:420:0] Read over the barrier, blob id# [16:2:5:2:6580849:169:0] Read over the barrier, blob id# [17:1:2:1:12961278:553:0] 2025-05-07T08:47:40.216922Z 5 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 17 1 hard] barrier# 1:2 new key# [15 0 22 0 hard] barrier# 0:4 2025-05-07T08:47:40.218627Z 1 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 17 1 hard] barrier# 1:2 new key# [15 0 22 0 hard] barrier# 0:4 2025-05-07T08:47:40.218758Z 2 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 17 1 hard] barrier# 1:2 new key# [15 0 22 0 hard] barrier# 0:4 2025-05-07T08:47:40.219232Z 3 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 17 1 hard] barrier# 1:2 new key# [15 0 22 0 hard] barrier# 0:4 2025-05-07T08:47:40.219356Z 4 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 17 1 hard] barrier# 1:2 new key# [15 0 22 0 hard] barrier# 0:4 2025-05-07T08:47:40.219764Z 6 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 17 1 hard] barrier# 1:2 new key# [15 0 22 0 hard] barrier# 0:4 2025-05-07T08:47:40.219880Z 7 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 17 1 hard] barrier# 1:2 new key# [15 0 22 0 hard] barrier# 0:4 2025-05-07T08:47:40.219992Z 8 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 17 1 hard] barrier# 1:2 new key# [15 0 22 0 hard] barrier# 0:4 2025-05-07T08:47:40.332043Z 4 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [17 1 18 2 hard] barrier# 2:1 new key# [17 1 20 3 hard] barrier# 2:0 2025-05-07T08:47:40.338705Z 1 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [17 1 18 2 hard] barrier# 2:1 new key# [17 1 20 3 hard] barrier# 2:0 2025-05-07T08:47:40.339237Z 2 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [17 1 18 2 hard] barrier# 2:1 new key# [17 1 20 3 hard] barrier# 2:0 2025-05-07T08:47:40.339379Z 3 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [17 1 18 2 hard] barrier# 2:1 new key# [17 1 20 3 hard] barrier# 2:0 2025-05-07T08:47:40.339534Z 5 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [17 1 18 2 hard] barrier# 2:1 new key# [17 1 20 3 hard] barrier# 2:0 2025-05-07T08:47:40.340029Z 6 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [17 1 18 2 hard] barrier# 2:1 new key# [17 1 20 3 hard] barrier# 2:0 2025-05-07T08:47:40.340196Z 7 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [17 1 18 2 hard] barrier# 2:1 new key# [17 1 20 3 hard] barrier# 2:0 2025-05-07T08:47:40.340316Z 8 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [17 1 18 2 hard] barrier# 2:1 new key# [17 1 20 3 hard] barrier# 2:0 TEvRange returned collected blob with id# [16:2:5:2:6580849:169:0] TEvRange returned collected blob with id# [16:2:5:2:12393544:709:0] Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 764, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: 60 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/8580453620/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/zvgn/0016a8/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot/test-results/unittest/testing_out_stuff/test_tool.args']' stopped by 60 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("60 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/8580453620/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/zvgn/0016a8/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot/test-results/unittest/testing_out_stuff/test_tool.args']' stopped by 60 seconds timeout",), {}) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest >> BSCRestartPDisk::RestartOneByOne [GOOD] Test command err: RandomSeed# 4268565115116032236 >> THealthCheckTest::Issues100Groups100VCardListing [GOOD] >> THealthCheckTest::GreenStatusWhenInitPending |88.4%| [TA] $(B)/ydb/core/tx/schemeshard/ut_login/test-results/unittest/{meta.json ... results_accumulator.log} >> TSchemeShardTest::AlterTableDropColumnReCreateSplit [GOOD] >> TSchemeShardTest::AlterTableDropColumnSplitThenReCreate |88.5%| [TA] $(B)/ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/test-results/unittest/{meta.json ... results_accumulator.log} >> THealthCheckTest::YellowGroupIssueWhenPartialGroupStatus [GOOD] >> THealthCheckTest::TestTabletIsDead >> TSchemeShardTest::DropBlockStoreVolume [GOOD] >> TSchemeShardTest::DropBlockStoreVolumeWithNonReplicatedPartitions >> THealthCheckTest::RedGroupIssueWhenDisintegratedGroupStatus [GOOD] >> THealthCheckTest::ProtobufUnderLimitFor70LargeVdisksIssues >> THealthCheckTest::OrangeGroupIssueWhenDegradedGroupStatus [GOOD] >> THealthCheckTest::OnlyDiskIssueOnSpaceIssues >> THealthCheckTest::ShardsLimit999 [GOOD] >> THealthCheckTest::ShardsLimit995 >> TYardTest::TestUnflushedChunk [GOOD] >> TYardTest::TestRedZoneSurvivability >> THealthCheckTest::StaticGroupIssue [GOOD] >> THealthCheckTest::StorageLimit50 >> THealthCheckTest::StorageLimit95 [GOOD] >> THealthCheckTest::StorageLimit87 >> TSchemeShardTest::DropTableAndConcurrentSplit [GOOD] >> TSchemeShardTest::DropTable >> THealthCheckTest::Issues100GroupsListing [GOOD] >> THealthCheckTest::Issues100VCardListing >> TSchemeShardTest::UpdateChannelsBindingSolomonStorageConfig [GOOD] >> TSchemeShardTest::RejectAlterSolomon >> TSchemeShardTest::DropBlockStoreVolumeWithNonReplicatedPartitions [GOOD] >> TSchemeShardTest::DropBlockStoreVolume2 >> TSchemeShardTest::AlterTableSizeToSplit [GOOD] >> TSchemeShardTest::AlterTableSplitSchema >> TSchemeShardMoveTest::MoveIndexSameDst [GOOD] >> TSchemeShardMoveTest::MoveIntoBuildingIndex >> BsControllerConfig::AddDriveSerial >> TSchemeShardTest::CreateIndexedTableRejects [GOOD] >> TSchemeShardTest::CreateIndexedTableAndForceDrop >> TSchemeShardTest::NameFormat [GOOD] >> TSchemeShardTest::ParallelCreateTable |88.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_read_iterator/ydb-core-tx-datashard-ut_read_iterator |88.4%| [TS] {RESULT} ydb/core/blobstorage/ut_blobstorage/ut_blob_depot/unittest |88.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_read_iterator/ydb-core-tx-datashard-ut_read_iterator |88.5%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_login/test-results/unittest/{meta.json ... results_accumulator.log} |88.5%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/test-results/unittest/{meta.json ... results_accumulator.log} |88.5%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_read_iterator/ydb-core-tx-datashard-ut_read_iterator >> TAsyncIndexTests::SplitMainWithReboots[PipeResets] >> TSchemeShardTest::ConsistentCopyTableToDeletedPath [GOOD] >> TSchemeShardTest::CopyIndexedTable |88.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/mind/ut_fat/ydb-core-mind-ut_fat |88.5%| [LD] {RESULT} $(B)/ydb/core/mind/ut_fat/ydb-core-mind-ut_fat |88.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/mind/ut_fat/ydb-core-mind-ut_fat >> TSchemeShardTest::AlterTableDropColumnSplitThenReCreate [GOOD] >> TSchemeShardTest::AlterTableById >> TSchemeShardTest::DropBlockStoreVolume2 [GOOD] >> TSchemeShardTest::DropBlockStoreVolumeWithFillGeneration >> TSchemeShardTest::CopyTableWithAlterConfig [GOOD] >> TSchemeShardTest::CopyTableOmitFollowers >> TSchemeShardMoveTest::MoveIntoBuildingIndex [GOOD] >> TSchemeShardTest::RejectAlterSolomon [GOOD] >> TSchemeShardTest::SimultaneousDropForceDrop ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_move/unittest >> TSchemeShardMoveTest::MoveIntoBuildingIndex [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:47:57.072376Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:47:57.072495Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:57.072550Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:47:57.072613Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:47:57.072664Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:47:57.072695Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:47:57.072754Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:57.072843Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:47:57.073701Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:47:57.074128Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:47:57.490922Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:47:57.490992Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:47:57.513580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:47:57.513823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:47:57.514004Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:47:57.527239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:47:57.527598Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:47:57.528308Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:57.528531Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:47:57.534631Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:57.536045Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:57.536118Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:57.536199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:47:57.536255Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:57.536307Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:47:57.536611Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:47:57.545672Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:47:57.682910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:47:57.683167Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:57.683405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:47:57.683631Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:47:57.683688Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:57.686116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:57.686282Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:47:57.686492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:57.686557Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:47:57.686604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:47:57.686649Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:47:57.688678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:57.688836Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:47:57.688932Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:47:57.691068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:57.691122Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:57.691198Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:57.691246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:47:57.703410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:47:57.709001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:47:57.709232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:47:57.710552Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:57.710722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:57.710783Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:57.711114Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:47:57.711178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:57.711356Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:47:57.711429Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:47:57.714028Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:57.714093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:57.714316Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:57.714357Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... tablet strongly msg operationId: 281474976710760:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:281474976710760 msg type: 269090816 2025-05-07T08:48:01.858006Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 281474976710760, partId: 4294967295, tablet: 72057594046316545 2025-05-07T08:48:01.858543Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710760, at schemeshard: 72057594046678944 2025-05-07T08:48:01.858577Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976710760, ready parts: 0/1, is published: true 2025-05-07T08:48:01.859003Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710760, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Add transaction: 281474976710760 at step: 5000006 FAKE_COORDINATOR: advance: minStep5000006 State->FrontStep: 5000005 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710760 at step: 5000006 2025-05-07T08:48:01.860247Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000006, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:48:01.860674Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710760 Coordinator: 72057594046316545 AckTo { RawX1: 131 RawX2: 8589936747 } } Step: 5000006 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:48:01.861694Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_lock.cpp:44: [72057594046678944] TDropLock TPropose opId# 281474976710760:0 HandleReply TEvOperationPlan: step# 5000006 2025-05-07T08:48:01.872470Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976710760:0 128 -> 240 2025-05-07T08:48:01.879993Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710760:0, at schemeshard: 72057594046678944 2025-05-07T08:48:01.880054Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 281474976710760:0 ProgressState 2025-05-07T08:48:01.880135Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710760:0 progress is 1/1 2025-05-07T08:48:01.880476Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-05-07T08:48:01.880514Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710760:0 progress is 1/1 2025-05-07T08:48:01.880544Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-05-07T08:48:01.880576Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976710760, ready parts: 1/1, is published: true 2025-05-07T08:48:01.880641Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:125:2151] message: TxId: 281474976710760 2025-05-07T08:48:01.881316Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-05-07T08:48:01.881348Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976710760:0 2025-05-07T08:48:01.881678Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976710760:0 2025-05-07T08:48:01.881746Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 FAKE_COORDINATOR: Erasing txId 281474976710760 2025-05-07T08:48:01.886148Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6706: Handle: TEvNotifyTxCompletionResult: txId# 281474976710760 2025-05-07T08:48:01.886216Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6708: Message: TxId: 281474976710760 2025-05-07T08:48:01.886275Z node 2 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:2321: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, txId# 281474976710760, buildInfoId: 102 2025-05-07T08:48:01.886351Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:2324: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, txId# 281474976710760, buildInfo: TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobal, IndexName: Sync, IndexColumn: value0, State: Unlocking, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [2:454:2415], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000004, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-05-07T08:48:01.895436Z node 2 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1105: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Unlocking TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobal, IndexName: Sync, IndexColumn: value0, State: Unlocking, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [2:454:2415], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000004, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-05-07T08:48:01.895802Z node 2 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:25: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Unlocking to Done 2025-05-07T08:48:01.907262Z node 2 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1105: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Done TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobal, IndexName: Sync, IndexColumn: value0, State: Done, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [2:454:2415], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000004, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-05-07T08:48:01.907323Z node 2 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:325: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 102, subscribers count# 1 2025-05-07T08:48:01.908711Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T08:48:01.909458Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [2:632:2581] TestWaitNotification: OK eventTxId 102 2025-05-07T08:48:01.915375Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:48:01.917305Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table" took 1.94ms result status StatusSuccess 2025-05-07T08:48:01.918794Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table" PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 TableSchemaVersion: 3 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Table" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value0" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value1" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "SomeIndex" LocalPathId: 3 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "value1" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableIndexes { Name: "Sync" LocalPathId: 5 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "value0" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 3 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardTest::DropTable [GOOD] >> TSchemeShardTest::DropTableById >> TSchemeShardMoveTest::ResetCachedPath >> TSchemeShardTest::ParallelCreateTable [GOOD] >> TSchemeShardTest::ParallelCreateSameTable >> TSchemeShardTest::AlterTableSplitSchema [GOOD] >> TSchemeShardTest::AlterTableSettings >> TSchemeShardTest::CreateIndexedTableAndForceDrop [GOOD] >> TSchemeShardTest::CreateIndexedTableAndForceDropSimultaneously >> BsControllerConfig::AddDriveSerial [GOOD] >> BsControllerConfig::AddDriveSerialMassive >> THealthCheckTest::OneIssueListing [GOOD] >> THealthCheckTest::NoStoragePools >> test_sql_streaming.py::test[suites-GroupByHoppingWindowTimeExtractorUnusedColumns-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-GroupByHoppingWithDataWatermarks-default.txt] >> TSchemeShardTest::CopyTableOmitFollowers [GOOD] >> TSchemeShardTest::CreateIndexedTableAfterBackup >> TSchemeShardTest::DropBlockStoreVolumeWithFillGeneration [GOOD] >> TSchemeShardTest::CreateWithIntermediateDirs >> TSchemeShardTest::AlterTableById [GOOD] >> TSchemeShardTest::AlterTableConfig >> TSchemeShardTest::DropTableById [GOOD] >> TSchemeShardTest::DropPQFail >> TSchemeShardTest::CopyIndexedTable [GOOD] >> TSchemeShardTest::CopyTable >> THealthCheckTest::BlueGroupIssueWhenPartialGroupStatusAndReplicationDisks [GOOD] >> THealthCheckTest::GreenStatusWhenCreatingGroup >> TSchemeShardTest::SimultaneousDropForceDrop [GOOD] >> TSchemeShardTest::RejectSystemViewPath >> TSchemeShardTest::ParallelCreateSameTable [GOOD] >> TSchemeShardTest::MultipleColumnFamilies >> TSchemeShardTest::CreateIndexedTableAndForceDropSimultaneously [GOOD] >> TSchemeShardTest::CreateTableWithUniformPartitioning >> test_sql_streaming.py::test[suites-GroupByHopWithDataWatermarks-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-GroupByHoppingWindow-default.txt] >> test_sql_streaming.py::test[suites-ReadTopicWithMetadataWithFilter-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-ReadTopicWithSchema-default.txt] >> TVectorIndexTests::CreateTableWithError >> TSchemeShardTest::CreateIndexedTableAfterBackup [GOOD] >> TSchemeShardTest::CreateFinishedInDescription >> TSchemeShardTest::CopyTable [GOOD] >> TSchemeShardTest::CopyTableAndConcurrentChanges >> TSchemeShardTest::AlterTableSettings [GOOD] >> TSchemeShardTest::AssignBlockStoreVolume >> TSchemeShardTest::CreateWithIntermediateDirs [GOOD] >> TSchemeShardTest::DocumentApiVersion >> TYardTest::TestRedZoneSurvivability [GOOD] >> TYardTest::TestSlay >> TSchemeShardTest::RejectSystemViewPath [GOOD] >> TSchemeShardTest::SplitKey [GOOD] >> TSchemeShardTest::SplitAlterCopy >> TSchemeShardMoveTest::ResetCachedPath [GOOD] >> TSchemeShardTest::AlterTableConfig [GOOD] >> TSchemeShardTest::AlterTableCompactionPolicy >> TSchemeShardTest::CreateFinishedInDescription [GOOD] >> TSchemeShardTest::CreateBlockStoreVolume >> THealthCheckTest::StorageLimit87 [GOOD] >> THealthCheckTest::StorageLimit80 |88.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_background_cleaning/ydb-core-tx-schemeshard-ut_background_cleaning >> TPDiskRaces::OwnerKilledWhileReadingLogAndThenKillLastOwner [GOOD] >> TPDiskTest::PDiskRestart >> test_sql_streaming.py::test[suites-GroupByHop-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-GroupByHopByStringKey-default.txt] |88.5%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_background_cleaning/ydb-core-tx-schemeshard-ut_background_cleaning |88.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_background_cleaning/ydb-core-tx-schemeshard-ut_background_cleaning >> TVectorIndexTests::CreateTableWithError [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_move/unittest >> TSchemeShardMoveTest::ResetCachedPath [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:48:20.204164Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:48:20.204260Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:48:20.204300Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:48:20.204338Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:48:20.204377Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:48:20.204406Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:48:20.204455Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:48:20.204549Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:48:20.205315Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:48:20.205650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:48:20.293239Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:48:20.293308Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:48:20.321926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:48:20.322234Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:48:20.322446Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:48:20.346133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:48:20.346523Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:48:20.347295Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:48:20.347538Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:48:20.351240Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:48:20.352736Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:48:20.352818Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:48:20.352910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:48:20.352967Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:48:20.353041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:48:20.353303Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:48:20.364945Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:48:20.553187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:48:20.553450Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:48:20.553676Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:48:20.553933Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:48:20.554097Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:48:20.558471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:48:20.558637Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:48:20.558884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:48:20.558957Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:48:20.559019Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:48:20.559060Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:48:20.562192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:48:20.562271Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:48:20.562322Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:48:20.565513Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:48:20.565578Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:48:20.565651Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:48:20.565721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:48:20.570531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:48:20.574928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:48:20.575154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:48:20.576303Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:48:20.576482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:48:20.576534Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:48:20.576855Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:48:20.576927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:48:20.577163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:48:20.577257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:48:20.591284Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:48:20.591363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:48:20.591603Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:48:20.591647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... meshard: 72057594046678944 2025-05-07T08:48:21.720144Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_table.cpp:418: TAlterTable TPropose operationId# 105:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:48:21.720248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 105 ready parts: 1/1 2025-05-07T08:48:21.720410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } AffectedSet { TabletId: 72075186233409549 Flags: 2 } ExecLevel: 0 TxId: 105 MinStep: 1 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:48:21.723127Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 105:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:105 msg type: 269090816 2025-05-07T08:48:21.723281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 105, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 105 at step: 5000004 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 105 at step: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72075186233409549 for txId: 105 at step: 5000004 2025-05-07T08:48:21.723752Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000004, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:48:21.723882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 105 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000004 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:48:21.723954Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_table.cpp:359: TAlterTable TPropose operationId# 105:0 HandleReply TEvOperationPlan, operationId: 105:0, stepId: 5000004, at schemeshard: 72057594046678944 2025-05-07T08:48:21.724283Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 105:0 128 -> 129 2025-05-07T08:48:21.724429Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000004 2025-05-07T08:48:21.731822Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:48:21.731882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 105, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-05-07T08:48:21.732187Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:48:21.732286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 105, path id: 3 2025-05-07T08:48:21.733004Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 105:0, at schemeshard: 72057594046678944 2025-05-07T08:48:21.733066Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1036: NTableState::TProposedWaitParts operationId# 105:0 ProgressState at tablet: 72057594046678944 2025-05-07T08:48:21.734572Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 8 PathOwnerId: 72057594046678944, cookie: 105 2025-05-07T08:48:21.734679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 8 PathOwnerId: 72057594046678944, cookie: 105 2025-05-07T08:48:21.734727Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 105 2025-05-07T08:48:21.734769Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 8 2025-05-07T08:48:21.734847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-05-07T08:48:21.734968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 105, ready parts: 0/1, is published: true FAKE_COORDINATOR: Erasing txId 105 2025-05-07T08:48:21.735907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6245: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409549 Status: COMPLETE TxId: 105 Step: 5000004 OrderId: 105 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409549 CpuTimeUsec: 1383 } } 2025-05-07T08:48:21.735950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 105, tablet: 72075186233409549, partId: 0 2025-05-07T08:48:21.736077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 105:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409549 Status: COMPLETE TxId: 105 Step: 5000004 OrderId: 105 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409549 CpuTimeUsec: 1383 } } 2025-05-07T08:48:21.736170Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409549 Status: COMPLETE TxId: 105 Step: 5000004 OrderId: 105 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409549 CpuTimeUsec: 1383 } } 2025-05-07T08:48:21.736778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 672 RawX2: 4294969907 } Origin: 72075186233409549 State: 2 TxId: 105 Step: 0 Generation: 2 2025-05-07T08:48:21.736832Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 105, tablet: 72075186233409549, partId: 0 2025-05-07T08:48:21.736976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 105:0, at schemeshard: 72057594046678944, message: Source { RawX1: 672 RawX2: 4294969907 } Origin: 72075186233409549 State: 2 TxId: 105 Step: 0 Generation: 2 2025-05-07T08:48:21.737026Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1005: NTableState::TProposedWaitParts operationId# 105:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 2025-05-07T08:48:21.737133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1009: NTableState::TProposedWaitParts operationId# 105:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 672 RawX2: 4294969907 } Origin: 72075186233409549 State: 2 TxId: 105 Step: 0 Generation: 2 2025-05-07T08:48:21.737195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 105:0, shardIdx: 72057594046678944:4, datashard: 72075186233409549, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-05-07T08:48:21.737240Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 105:0, at schemeshard: 72057594046678944 2025-05-07T08:48:21.737275Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 105:0, datashard: 72075186233409549, at schemeshard: 72057594046678944 2025-05-07T08:48:21.737313Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 105:0 129 -> 240 2025-05-07T08:48:21.740334Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-05-07T08:48:21.741817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 105:0, at schemeshard: 72057594046678944 2025-05-07T08:48:21.741952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 105:0, at schemeshard: 72057594046678944 2025-05-07T08:48:21.742375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 105:0, at schemeshard: 72057594046678944 2025-05-07T08:48:21.742431Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 105:0 ProgressState 2025-05-07T08:48:21.742546Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#105:0 progress is 1/1 2025-05-07T08:48:21.742580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 105 ready parts: 1/1 2025-05-07T08:48:21.742621Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#105:0 progress is 1/1 2025-05-07T08:48:21.742652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 105 ready parts: 1/1 2025-05-07T08:48:21.742692Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 105, ready parts: 1/1, is published: true 2025-05-07T08:48:21.742771Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:334:2313] message: TxId: 105 2025-05-07T08:48:21.742824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 105 ready parts: 1/1 2025-05-07T08:48:21.742889Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 105:0 2025-05-07T08:48:21.742927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 105:0 2025-05-07T08:48:21.743071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-05-07T08:48:21.745078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-05-07T08:48:21.745128Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [1:839:2759] TestWaitNotification: OK eventTxId 105 >> TYardTest::TestSlay [GOOD] >> TYardTest::TestSlayRace >> TPDiskTest::PDiskRestart [GOOD] >> TPDiskTest::PDiskRestartManyLogWrites >> TSchemeShardTest::AssignBlockStoreVolume [GOOD] >> TSchemeShardTest::AssignBlockStoreVolumeDuringAlter >> BsControllerConfig::AddDriveSerialMassive [GOOD] >> test.py::test[solomon-LabelColumns-default.txt] [GOOD] >> test.py::test[solomon-Subquery-default.txt] >> TYardTest::TestSlayRace [GOOD] >> TYardTest::TestSlayRecreate >> TPDiskTest::PDiskRestartManyLogWrites [GOOD] >> TPDiskTest::CommitDeleteChunks >> TSchemeShardTest::CreateTableWithUniformPartitioning [GOOD] >> TSchemeShardTest::CreateTableWithSplitBoundaries >> TSchemeShardTest::CreateBlockStoreVolume [GOOD] >> TSchemeShardTest::CreateBlockStoreVolumeWithVolumeChannelsProfiles >> TSchemeShardTest::DocumentApiVersion [GOOD] >> TSchemeShardTest::DisablePublicationsOfDropping_Dir ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TVectorIndexTests::CreateTableWithError [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:48:21.996124Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:48:21.996233Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:48:21.996270Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:48:21.996306Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:48:21.996368Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:48:21.996402Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:48:21.996455Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:48:21.996522Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:48:21.997213Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:48:21.997609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:48:22.080275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:48:22.080338Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:48:22.097300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:48:22.097421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:48:22.097583Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:48:22.119632Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:48:22.121766Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:48:22.122571Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:48:22.122994Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:48:22.125481Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:48:22.126654Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:48:22.126700Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:48:22.126734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:48:22.126764Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:48:22.126838Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:48:22.127003Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:48:22.132396Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:48:22.288079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:48:22.288357Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:48:22.288583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:48:22.288789Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:48:22.288840Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:48:22.291722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:48:22.291879Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:48:22.292071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:48:22.292123Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:48:22.292180Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:48:22.292213Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:48:22.295577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:48:22.295644Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:48:22.295681Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:48:22.298048Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:48:22.298099Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:48:22.298152Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:48:22.298205Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:48:22.302120Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:48:22.304495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:48:22.304690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:48:22.305717Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:48:22.305864Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:48:22.305915Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:48:22.306228Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:48:22.306280Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:48:22.306446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:48:22.306510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:48:22.310687Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:48:22.310747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:48:22.310953Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:48:22.311016Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2208], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-05-07T08:48:22.311102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:48:22.311145Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 1:0 ProgressState 2025-05-07T08:48:22.311235Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-05-07T08:48:22.311282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:48:22.311320Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-05-07T08:48:22.311346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:48:22.311392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-05-07T08:48:22.311437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:48:22.311471Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1:0 2025-05-07T08:48:22.311499Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 1:0 2025-05-07T08:48:22.311590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:48:22.311630Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-05-07T08:48:22.311670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-05-07T08:48:22.314546Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-05-07T08:48:22.314657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-05-07T08:48:22.314710Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-05-07T08:48:22.314744Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-05-07T08:48:22.314804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:48:22.314939Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-05-07T08:48:22.317803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-05-07T08:48:22.318337Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-05-07T08:48:22.320326Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:269:2260] Bootstrap 2025-05-07T08:48:22.335649Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:269:2260] Become StateWork (SchemeCache [1:274:2265]) 2025-05-07T08:48:22.338422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateIndexedTable CreateIndexedTable { TableDescription { Name: "vectors" Columns { Name: "id" Type: "Uint64" } Columns { Name: "__ydb_parent" Type: "String" } KeyColumnNames: "id" } IndexDescription { Name: "idx_vector" KeyColumnNames: "__ydb_parent" Type: EIndexTypeGlobalVectorKmeansTree VectorIndexKmeansTreeDescription { Settings { settings { metric: DISTANCE_COSINE vector_type: VECTOR_TYPE_FLOAT vector_dimension: 1024 } } } } } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:48:22.338907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_indexed_table.cpp:101: TCreateTableIndex construct operation table path: /MyRoot/vectors domain path id: [OwnerId: 72057594046678944, LocalPathId: 1] domain path: /MyRoot shardsToCreate: 2 GetShardsInside: 0 MaxShards: 200000 2025-05-07T08:48:22.339077Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 101:0, explain: index key column shouldn't have a reserved name, at schemeshard: 72057594046678944 2025-05-07T08:48:22.339130Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 101:1, propose status:StatusInvalidParameter, reason: index key column shouldn't have a reserved name, at schemeshard: 72057594046678944 2025-05-07T08:48:22.340430Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:269:2260] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-05-07T08:48:22.351552Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 101, response: Status: StatusInvalidParameter Reason: "index key column shouldn\'t have a reserved name" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:48:22.351738Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: index key column shouldn't have a reserved name, operation: CREATE TABLE WITH INDEXES, path: /MyRoot/vectors 2025-05-07T08:48:22.352563Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 TestModificationResults wait txId: 102 2025-05-07T08:48:22.356420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateIndexedTable CreateIndexedTable { TableDescription { Name: "vectors" Columns { Name: "id" Type: "Uint64" } Columns { Name: "embedding" Type: "String" } KeyColumnNames: "id" } IndexDescription { Name: "idx_vector" KeyColumnNames: "embedding" Type: EIndexTypeGlobalVectorKmeansTree DataColumnNames: "id" VectorIndexKmeansTreeDescription { Settings { settings { metric: DISTANCE_COSINE vector_type: VECTOR_TYPE_FLOAT vector_dimension: 1024 } } } } } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:48:22.356851Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_indexed_table.cpp:101: TCreateTableIndex construct operation table path: /MyRoot/vectors domain path id: [OwnerId: 72057594046678944, LocalPathId: 1] domain path: /MyRoot shardsToCreate: 2 GetShardsInside: 0 MaxShards: 200000 2025-05-07T08:48:22.357013Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 102:0, explain: the same column can't be used as key and data column for one index, for example id, at schemeshard: 72057594046678944 2025-05-07T08:48:22.357063Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 102:1, propose status:StatusInvalidParameter, reason: the same column can't be used as key and data column for one index, for example id, at schemeshard: 72057594046678944 2025-05-07T08:48:22.362823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 102, response: Status: StatusInvalidParameter Reason: "the same column can\'t be used as key and data column for one index, for example id" TxId: 102 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:48:22.363122Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: the same column can't be used as key and data column for one index, for example id, operation: CREATE TABLE WITH INDEXES, path: /MyRoot/vectors TestModificationResult got TxId: 102, wait until txId: 102 >> THealthCheckTest::OnlyDiskIssueOnSpaceIssues [GOOD] >> THealthCheckTest::OnlyDiskIssueOnFaultyPDisks >> TPDiskTest::CommitDeleteChunks [GOOD] >> TPDiskTest::DeviceHaltTooLong >> TYardTest::TestSlayRecreate [GOOD] >> TYardTest::TestSlayLogWriteRaceActor |88.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/ut_schema/ydb-core-tx-columnshard-ut_schema |88.5%| [LD] {RESULT} $(B)/ydb/core/tx/columnshard/ut_schema/ydb-core-tx-columnshard-ut_schema |88.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/columnshard/ut_schema/ydb-core-tx-columnshard-ut_schema |88.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/metadata/secret/ut/ydb-services-metadata-secret-ut |88.5%| [LD] {RESULT} $(B)/ydb/services/metadata/secret/ut/ydb-services-metadata-secret-ut |88.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/metadata/secret/ut/ydb-services-metadata-secret-ut >> TSchemeShardTest::AlterTableCompactionPolicy [GOOD] >> TSchemeShardTest::AlterPersQueueGroup >> Yq_1::DescribeJob [FAIL] >> Yq_1::DescribeQuery ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::AddDriveSerialMassive [GOOD] Test command err: Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:214:2066] recipient: [1:202:2076] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:214:2066] recipient: [1:202:2076] Leader for TabletID 72057594037932033 is [1:216:2078] sender: [1:217:2066] recipient: [1:202:2076] 2025-05-07T08:48:01.363394Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2051} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-05-07T08:48:01.531040Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2051} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-05-07T08:48:01.542049Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-05-07T08:48:01.628245Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2051} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-05-07T08:48:01.628762Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2051} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-05-07T08:48:01.632412Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2051} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-05-07T08:48:01.632739Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:498} Handle TEvInterconnect::TEvNodesInfo 2025-05-07T08:48:01.637855Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-05-07T08:48:01.846152Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-05-07T08:48:01.847228Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-05-07T08:48:01.848844Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-05-07T08:48:01.849829Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-05-07T08:48:01.859025Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-05-07T08:48:01.859822Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [1:216:2078] sender: [1:239:2066] recipient: [1:20:2067] 2025-05-07T08:48:01.900103Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-05-07T08:48:01.901908Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-05-07T08:48:01.935055Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-05-07T08:48:01.935816Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-05-07T08:48:01.936170Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-05-07T08:48:01.938041Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-05-07T08:48:01.938753Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-05-07T08:48:01.938816Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-05-07T08:48:01.938849Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-05-07T08:48:01.939082Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-05-07T08:48:01.954532Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-05-07T08:48:01.954635Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-05-07T08:48:01.971889Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-05-07T08:48:01.973164Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:19} TTxLoadEverything Execute 2025-05-07T08:48:02.020524Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:546} TTxLoadEverything Complete 2025-05-07T08:48:02.021528Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2182} LoadFinished 2025-05-07T08:48:02.027218Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-05-07T08:48:02.028310Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:551} TTxLoadEverything InitQueue processed 2025-05-07T08:48:02.260707Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_123" BoxId: 1 } } } 2025-05-07T08:48:02.273902Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2175} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.235063s 2025-05-07T08:48:02.276213Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:666} StateWork event processing took too much time Type# 2146435078 Duration# 0.237687s 2025-05-07T08:48:02.289088Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_123" BoxId: 1 } } } 2025-05-07T08:48:02.302548Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_123" BoxId: 1 } } } Leader for TabletID 72057594037932033 is [0:0:0] sender: [11:214:2066] recipient: [11:202:2076] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [11:214:2066] recipient: [11:202:2076] Leader for TabletID 72057594037932033 is [11:216:2078] sender: [11:217:2066] recipient: [11:202:2076] 2025-05-07T08:48:08.937488Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2051} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-05-07T08:48:08.952970Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2051} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-05-07T08:48:08.954875Z node 11 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-05-07T08:48:08.957124Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2051} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-05-07T08:48:08.957557Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2051} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-05-07T08:48:08.967339Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2051} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-05-07T08:48:08.967389Z node 11 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:498} Handle TEvInterconnect::TEvNodesInfo 2025-05-07T08:48:08.967999Z node 11 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-05-07T08:48:09.231301Z node 11 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-05-07T08:48:09.231406Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-05-07T08:48:09.231659Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-05-07T08:48:09.231743Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-05-07T08:48:09.231852Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-05-07T08:48:09.231920Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [11:216:2078] sender: [11:239:2066] recipient: [11:20:2067] 2025-05-07T08:48:09.246385Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-05-07T08:48:09.248116Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-05-07T08:48:09.265390Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-05-07T08:48:09.265487Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-05-07T08:48:09.265827Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-05-07T08:48:09.270651Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-05-07T08:48:09.271400Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-05-07T08:48:09.272037Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-05-07T08:48:09.272375Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-05-07T08:48:09.272413Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-05-07T08:48:09.288402Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-05-07T08:48:09.288518Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-05-07T08:48:09.304687Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-05-07T08:48:09.305577Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:19} TTxLoadEverything Execute 2025-05-07T08:48:09.372389Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:546} TTxLoadEverything Complete 2025-05-07T08:48:09.373181Z node 11 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2182} LoadFinished 2025-05-07T08:48:09.383864Z node 11 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-05-07T08:48:09.384583Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:551} TTxLoadEverything InitQueue proc ... ommand { AddDriveSerial { Serial: "SN_5" BoxId: 1 } } } 2025-05-07T08:48:18.794984Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_6" BoxId: 1 } } } 2025-05-07T08:48:18.817422Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_7" BoxId: 1 } } } 2025-05-07T08:48:18.847577Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_8" BoxId: 1 } } } 2025-05-07T08:48:18.861839Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_9" BoxId: 1 } } } 2025-05-07T08:48:18.948863Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_0" } } } 2025-05-07T08:48:18.971667Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_1" } } } 2025-05-07T08:48:18.994809Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_2" } } } 2025-05-07T08:48:19.020517Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_3" } } } 2025-05-07T08:48:19.034209Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_4" } } } 2025-05-07T08:48:19.059259Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_5" } } } 2025-05-07T08:48:19.085913Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_6" } } } 2025-05-07T08:48:19.120215Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_7" } } } 2025-05-07T08:48:19.149173Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_8" } } } 2025-05-07T08:48:19.169316Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_9" } } } Leader for TabletID 72057594037932033 is [0:0:0] sender: [31:214:2066] recipient: [31:190:2076] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [31:214:2066] recipient: [31:190:2076] Leader for TabletID 72057594037932033 is [31:216:2078] sender: [31:217:2066] recipient: [31:190:2076] 2025-05-07T08:48:21.362146Z node 31 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2051} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-05-07T08:48:21.363142Z node 31 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2051} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-05-07T08:48:21.363394Z node 31 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-05-07T08:48:21.364964Z node 31 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2051} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-05-07T08:48:21.365194Z node 31 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2051} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-05-07T08:48:21.365841Z node 31 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2051} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-05-07T08:48:21.365875Z node 31 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:498} Handle TEvInterconnect::TEvNodesInfo 2025-05-07T08:48:21.366171Z node 31 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-05-07T08:48:21.388587Z node 31 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-05-07T08:48:21.388726Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-05-07T08:48:21.388866Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-05-07T08:48:21.388989Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-05-07T08:48:21.389096Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-05-07T08:48:21.389172Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [31:216:2078] sender: [31:239:2066] recipient: [31:20:2067] 2025-05-07T08:48:21.400558Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-05-07T08:48:21.400730Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-05-07T08:48:21.414525Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-05-07T08:48:21.414675Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-05-07T08:48:21.414757Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-05-07T08:48:21.414889Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-05-07T08:48:21.415002Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-05-07T08:48:21.415072Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-05-07T08:48:21.415117Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-05-07T08:48:21.415186Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-05-07T08:48:21.426150Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-05-07T08:48:21.426328Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-05-07T08:48:21.437152Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-05-07T08:48:21.437311Z node 31 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:19} TTxLoadEverything Execute 2025-05-07T08:48:21.438673Z node 31 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:546} TTxLoadEverything Complete 2025-05-07T08:48:21.438746Z node 31 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2182} LoadFinished 2025-05-07T08:48:21.438990Z node 31 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-05-07T08:48:21.439034Z node 31 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:551} TTxLoadEverything InitQueue processed 2025-05-07T08:48:21.439739Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_0" BoxId: 1 } } } 2025-05-07T08:48:21.440981Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_1" BoxId: 1 } } } 2025-05-07T08:48:21.441647Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_2" BoxId: 1 } } } 2025-05-07T08:48:21.442664Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_3" BoxId: 1 } } } 2025-05-07T08:48:21.443292Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_4" BoxId: 1 } } } 2025-05-07T08:48:21.443902Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_5" BoxId: 1 } } } 2025-05-07T08:48:21.444567Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_6" BoxId: 1 } } } 2025-05-07T08:48:21.446023Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_7" BoxId: 1 } } } 2025-05-07T08:48:21.446665Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_8" BoxId: 1 } } } 2025-05-07T08:48:21.447389Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_9" BoxId: 1 } } } 2025-05-07T08:48:21.448189Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_0" } } } 2025-05-07T08:48:21.448911Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_1" } } } 2025-05-07T08:48:21.449577Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_2" } } } 2025-05-07T08:48:21.450417Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_3" } } } 2025-05-07T08:48:21.451165Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_4" } } } 2025-05-07T08:48:21.451862Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_5" } } } 2025-05-07T08:48:21.452627Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_6" } } } 2025-05-07T08:48:21.453327Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_7" } } } 2025-05-07T08:48:21.454120Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_8" } } } 2025-05-07T08:48:21.455119Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_9" } } } >> THealthCheckTest::Issues100VCardListing [GOOD] >> THealthCheckTest::Issues100GroupsMerging >> TSchemeShardTest::CopyTableAndConcurrentChanges [GOOD] >> TSchemeShardTest::CopyTableAndConcurrentSplit >> TSchemeShardTest::AssignBlockStoreVolumeDuringAlter [GOOD] >> TSchemeShardTest::AssignBlockStoreCheckVersionInAlter >> THealthCheckTest::ProtobufUnderLimitFor70LargeVdisksIssues [GOOD] >> THealthCheckTest::ServerlessBadTablets >> TSchemeShardTest::SplitAlterCopy [GOOD] >> TSchemeShardTest::TopicReserveSize >> TSchemeShardTest::DropPQFail [GOOD] >> TSchemeShardTest::DropPQAbort |88.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/engine/ut/ydb-core-engine-ut |88.5%| [LD] {RESULT} $(B)/ydb/core/engine/ut/ydb-core-engine-ut |88.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/engine/ut/ydb-core-engine-ut >> TSchemeShardTest::MultipleColumnFamilies [GOOD] >> TSchemeShardTest::MultipleColumnFamiliesWithStorage >> THealthCheckTest::ShardsLimit995 [GOOD] >> THealthCheckTest::ShardsLimit905 >> TSchemeShardTest::DisablePublicationsOfDropping_Dir [GOOD] >> TSchemeShardTest::DisablePublicationsOfDropping_Table >> TBlobStorageGroupInfoBlobMapTest::BelongsToSubgroupBenchmark [GOOD] >> TBlobStorageGroupInfoBlobMapTest::BasicChecks >> TSchemeShardTest::CreateBlockStoreVolumeWithVolumeChannelsProfiles [GOOD] >> TSchemeShardTest::CreateBlockStoreVolumeWithNonReplicatedPartitions >> THealthCheckTest::StorageLimit50 [GOOD] >> THealthCheckTest::SpecificServerless >> THealthCheckTest::GreenStatusWhenInitPending [GOOD] >> THealthCheckTest::IgnoreOtherGenerations >> TSchemeShardTest::CreateTableWithSplitBoundaries [GOOD] >> TSchemeShardTest::CreateTableWithConfig >> TSchemeShardTest::AssignBlockStoreCheckVersionInAlter [GOOD] >> TSchemeShardTest::AssignBlockStoreCheckFillGenerationInAlter >> PrivateApi::PingTask [GOOD] >> PrivateApi::GetTask >> TAsyncIndexTests::DropTableWithInflightChanges[PipeResets] >> TSchemeShardTest::CopyTableAndConcurrentSplit [GOOD] >> THealthCheckTest::NoStoragePools [GOOD] >> THealthCheckTest::NoBscResponse >> TSchemeShardTest::CopyTableAndConcurrentMerge >> TSchemeShardTest::MultipleColumnFamiliesWithStorage [GOOD] >> TSchemeShardTest::ParallelModifying >> TAsyncIndexTests::MergeIndexWithReboots[TabletReboots] >> TSchemeShardTest::DisablePublicationsOfDropping_Table [GOOD] >> TSchemeShardTest::DisablePublicationsOfDropping_IndexedTable |88.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/wrappers/ut/ydb-core-wrappers-ut |88.5%| [LD] {RESULT} $(B)/ydb/core/wrappers/ut/ydb-core-wrappers-ut |88.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/wrappers/ut/ydb-core-wrappers-ut >> TSchemeShardTest::CreateBlockStoreVolumeWithNonReplicatedPartitions [GOOD] >> TSchemeShardTest::CreateAlterBlockStoreVolumeWithInvalidPoolKinds >> TSchemeShardTest::AssignBlockStoreCheckFillGenerationInAlter [GOOD] >> TSchemeShardTest::BlockStoreVolumeLimits >> TBlobStorageGroupInfoBlobMapTest::BasicChecks [GOOD] >> TFlatTest::SelectRangeNullArgs3 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/groupinfo/ut/unittest >> TBlobStorageGroupInfoBlobMapTest::BasicChecks [GOOD] Test command err: None domains 1 new (ns): 427.1092805 None domains 1 old (ns): 193.1346978 None domains 9 new (ns): 265.7794293 None domains 9 old (ns): 99.79514823 Mirror3 domains 4 new (ns): 189.3662895 Mirror3 domains 4 old (ns): 120.9521887 Mirror3 domains 9 new (ns): 158.7436936 Mirror3 domains 9 old (ns): 127.7588914 4Plus2Block domains 8 new (ns): 280.8670904 4Plus2Block domains 8 old (ns): 141.7636207 4Plus2Block domains 9 new (ns): 127.7203291 4Plus2Block domains 9 old (ns): 84.36670661 ErasureMirror3of4 domains 8 new (ns): 161.8875494 ErasureMirror3of4 domains 8 old (ns): 111.056498 ErasureMirror3of4 domains 9 new (ns): 247.6515415 ErasureMirror3of4 domains 9 old (ns): 97.14559685 >> Yq_1::Basic_Null [FAIL] >> Yq_1::Basic_TaggedLiteral >> Yq_1::Basic [FAIL] >> Yq_1::Basic_EmptyList >> TSchemeShardTest::CreateTableWithConfig [GOOD] >> TSchemeShardTest::CreateTableWithNamedConfig >> TSchemeShardTest::TopicReserveSize [GOOD] >> TSchemeShardTest::TopicWithAutopartitioningReserveSize |88.5%| [TA] $(B)/ydb/core/blobstorage/groupinfo/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TSchemeShardTest::CreateAlterBlockStoreVolumeWithInvalidPoolKinds [GOOD] >> TSchemeShardTest::CreateDropKesus >> THealthCheckTest::TestTabletIsDead [GOOD] >> THealthCheckTest::TestReBootingTabletIsDead >> THealthCheckTest::ServerlessBadTablets [GOOD] >> THealthCheckTest::ServerlessWhenTroublesWithSharedNodes >> Yq_1::CreateQuery_With_Idempotency [FAIL] >> Yq_1::CreateQuery_Without_Connection >> TSchemeShardTest::BlockStoreVolumeLimits [GOOD] >> TSchemeShardTest::BlockStoreNonreplVolumeLimits >> TSchemeShardTest::DisablePublicationsOfDropping_IndexedTable [GOOD] >> TSchemeShardTest::DisablePublicationsOfDropping_Pq >> TSchemeShardTest::CreateDropKesus [GOOD] >> TSchemeShardTest::CreateAlterKesus >> TSchemeShardTest::CopyTableAndConcurrentMerge [GOOD] >> TSchemeShardTest::CopyTableAndConcurrentSplitMerge >> TSchemeShardTest::CreateTableWithNamedConfig [GOOD] >> TSchemeShardTest::CreateTableWithUnknownNamedConfig >> test.py::test[solomon-Subquery-default.txt] [GOOD] >> test.py::test[solomon-UnknownSetting-] >> Yq_1::CreateConnection_With_Existing_Name [GOOD] >> Yq_1::CreateConnections_With_Idempotency >> TSchemeShardTest::TopicWithAutopartitioningReserveSize [GOOD] >> TLocksTest::SetLockFail |88.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/opt/ydb-core-kqp-ut-opt |88.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/opt/ydb-core-kqp-ut-opt |88.5%| [TA] {RESULT} $(B)/ydb/core/blobstorage/groupinfo/ut/test-results/unittest/{meta.json ... results_accumulator.log} |88.5%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/opt/ydb-core-kqp-ut-opt >> TSchemeShardTest::BlockStoreNonreplVolumeLimits [GOOD] >> TSchemeShardTest::BlockStoreSystemVolumeLimits >> THealthCheckTest::StorageLimit80 [GOOD] >> THealthCheckTest::StorageNoQuota >> TSchemeShardTest::CreateAlterKesus [GOOD] >> TSchemeShardTest::CreateDropSolomon >> TSchemeShardCheckProposeSize::CopyTables [GOOD] >> TSchemeShardDecimalTypesInTables::Parameterless >> TSchemeShardTest::CreateTableWithUnknownNamedConfig [GOOD] >> TSchemeShardTest::CreatePersQueueGroup >> THealthCheckTest::GreenStatusWhenCreatingGroup [GOOD] >> THealthCheckTest::DontIgnoreServerlessWithExclusiveNodesWhenNotSpecific |88.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/tx_proxy/ut_ext_tenant/ydb-core-tx-tx_proxy-ut_ext_tenant |88.5%| [LD] {RESULT} $(B)/ydb/core/tx/tx_proxy/ut_ext_tenant/ydb-core-tx-tx_proxy-ut_ext_tenant |88.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/tx_proxy/ut_ext_tenant/ydb-core-tx-tx_proxy-ut_ext_tenant >> TSchemeShardTest::DisablePublicationsOfDropping_Pq [GOOD] >> TSchemeShardTest::DisablePublicationsOfDropping_Solomon >> THealthCheckTest::Issues100GroupsMerging [GOOD] >> THealthCheckTest::Issues100VCardMerging >> Yq_1::DescribeConnection [GOOD] >> Yq_1::DeleteQuery |88.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/dynamic_config/ut/ydb-services-dynamic_config-ut |88.5%| [LD] {RESULT} $(B)/ydb/services/dynamic_config/ut/ydb-services-dynamic_config-ut |88.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/dynamic_config/ut/ydb-services-dynamic_config-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_base/unittest >> TSchemeShardTest::TopicWithAutopartitioningReserveSize [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:47:18.867444Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:47:18.867529Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:18.867582Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:47:18.867633Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:47:18.867683Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:47:18.867715Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:47:18.867790Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:18.867885Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:47:18.868605Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:47:18.868937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:47:18.970649Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:47:18.970716Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:47:18.991944Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:47:18.992072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:47:18.992255Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:47:19.013006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:47:19.014301Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:47:19.015012Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:19.015364Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:47:19.023860Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:19.025541Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:19.025627Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:19.025706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:47:19.025762Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:19.025876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:47:19.026098Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:47:19.055350Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:47:19.219261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:47:19.219507Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:19.219707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:47:19.219914Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:47:19.219987Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:19.222373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:19.222496Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:47:19.222680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:19.222783Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:47:19.222852Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:47:19.222886Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:47:19.224976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:19.225045Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:47:19.225098Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:47:19.227038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:19.227085Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:19.227147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:19.227199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:47:19.237327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:47:19.239719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:47:19.239906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:47:19.240824Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:19.240965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:19.241012Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:19.241286Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:47:19.241335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:19.241491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:47:19.241578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:47:19.243726Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:19.243778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:19.243950Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:19.243987Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 35Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:265: CollectPQConfigChanged accept TEvPersQueue::TEvProposeTransactionResult, operationId: 104:0, shardIdx: 72057594046678944:5, shard: 72075186233409550, left await: 1, txState.State: Propose, txState.ReadyForNotifications: 0, at schemeshard: 72057594046678944 2025-05-07T08:48:42.972768Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:629: NPQState::TPropose operationId# 104:0 HandleReply TEvProposeTransactionResult CollectPQConfigChanged: false 2025-05-07T08:48:42.972799Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:754: NPQState::TPropose operationId# 104:0 can't persist state: ShardsInProgress is not empty, remain: 1 2025-05-07T08:48:42.973415Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 104, tablet: 72075186233409551, partId: 0 2025-05-07T08:48:42.973503Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 104:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409551 Status: COMPLETE TxId: 104 Step: 5000005 2025-05-07T08:48:42.973553Z node 13 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_pq.cpp:624: NPQState::TPropose operationId# 104:0 HandleReply TEvProposeTransactionResult triggers early, at schemeshard: 72057594046678944 message# Origin: 72075186233409551 Status: COMPLETE TxId: 104 Step: 5000005 2025-05-07T08:48:42.973590Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:265: CollectPQConfigChanged accept TEvPersQueue::TEvProposeTransactionResult, operationId: 104:0, shardIdx: 72057594046678944:6, shard: 72075186233409551, left await: 0, txState.State: Propose, txState.ReadyForNotifications: 0, at schemeshard: 72057594046678944 2025-05-07T08:48:42.973620Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:629: NPQState::TPropose operationId# 104:0 HandleReply TEvProposeTransactionResult CollectPQConfigChanged: true 2025-05-07T08:48:42.973820Z node 13 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 104:0 128 -> 240 2025-05-07T08:48:42.974235Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 9 2025-05-07T08:48:42.983883Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-05-07T08:48:42.984033Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-05-07T08:48:42.984123Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-05-07T08:48:42.984240Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-05-07T08:48:42.984431Z node 13 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:48:42.984467Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T08:48:42.984741Z node 13 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:48:42.984779Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [13:205:2207], at schemeshard: 72057594046678944, txId: 104, path id: 2 2025-05-07T08:48:42.984854Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-05-07T08:48:42.984905Z node 13 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 104:0 ProgressState 2025-05-07T08:48:42.985108Z node 13 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 1/1 2025-05-07T08:48:42.985162Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-05-07T08:48:42.985224Z node 13 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 1/1 2025-05-07T08:48:42.985278Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-05-07T08:48:42.985343Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: false 2025-05-07T08:48:42.985407Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-05-07T08:48:42.985471Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 104:0 2025-05-07T08:48:42.985525Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 104:0 2025-05-07T08:48:42.985779Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 10 2025-05-07T08:48:42.985848Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 104, publications: 1, subscribers: 0 2025-05-07T08:48:42.985898Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 2], 5 2025-05-07T08:48:42.986901Z node 13 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T08:48:42.986999Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T08:48:42.987045Z node 13 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-05-07T08:48:42.987135Z node 13 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-05-07T08:48:42.987203Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 9 2025-05-07T08:48:42.987313Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 0 2025-05-07T08:48:42.997243Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 104 2025-05-07T08:48:43.025289Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2025-05-07T08:48:43.025371Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2025-05-07T08:48:43.025992Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-05-07T08:48:43.026122Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-05-07T08:48:43.026188Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [13:1476:3276] TestWaitNotification: OK eventTxId 104 2025-05-07T08:48:43.026904Z node 13 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Topic1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:48:43.027151Z node 13 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Topic1" took 326us result status StatusSuccess 2025-05-07T08:48:43.027830Z node 13 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 4 } ChildrenExist: false BalancerTabletID: 72075186233409547 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 6 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 13 WriteSpeedInBytesPerSecond: 19 } YdbDatabasePath: "/MyRoot" MeteringMode: METERING_MODE_RESERVED_CAPACITY PartitionStrategy { MinPartitionCount: 1 MaxPartitionCount: 7 PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 0 TabletId: 72075186233409546 Status: Inactive ChildPartitionIds: 1 ChildPartitionIds: 2 } Partitions { PartitionId: 1 TabletId: 72075186233409548 KeyRange { ToBound: "A" } Status: Inactive ParentPartitionIds: 0 ChildPartitionIds: 3 ChildPartitionIds: 4 } Partitions { PartitionId: 2 TabletId: 72075186233409549 KeyRange { FromBound: "A" } Status: Inactive ParentPartitionIds: 0 ChildPartitionIds: 5 } Partitions { PartitionId: 3 TabletId: 72075186233409550 KeyRange { ToBound: "0" } Status: Active ParentPartitionIds: 1 } Partitions { PartitionId: 4 TabletId: 72075186233409551 KeyRange { FromBound: "0" ToBound: "A" } Status: Inactive ParentPartitionIds: 1 ChildPartitionIds: 5 } Partitions { PartitionId: 5 TabletId: 72075186233409552 KeyRange { FromBound: "0" } Status: Active ParentPartitionIds: 2 ParentPartitionIds: 4 } AlterVersion: 4 BalancerTabletID: 72075186233409547 NextPartitionId: 6 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 494 AccountSize: 494 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 6 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardTest::CopyTableAndConcurrentSplitMerge [GOOD] >> TSchemeShardTest::CopyTableForBackup >> TSchemeShardTest::BlockStoreSystemVolumeLimits [GOOD] >> TSchemeShardTest::AlterTableWithCompactionStrategies >> TFlatTest::SelectRangeNullArgs3 [GOOD] >> TFlatTest::SelectRangeNullArgs4 >> TSchemeShardTest::AlterPersQueueGroup [GOOD] >> TSchemeShardTest::AlterPersQueueGroupWithKeySchema >> TSchemeShardDecimalTypesInTables::Parameterless [GOOD] >> TSchemeShardDecimalTypesInTables::Parameters_22_9-EnableParameterizedDecimal-false >> TSchemeShardTest::ParallelModifying [GOOD] >> TSchemeShardTest::PQGroupExplicitChannels >> THealthCheckTest::OnlyDiskIssueOnFaultyPDisks [GOOD] >> THealthCheckTest::ProtobufBelowLimitFor10VdisksIssues >> TSchemeShardTest::DisablePublicationsOfDropping_Solomon [GOOD] >> TSchemeShardTest::CreateDropSolomon [GOOD] >> TSchemeShardTest::CreateAlterDropSolomon >> TSchemeShardTest::AlterTableWithCompactionStrategies [GOOD] >> TSchemeShardTest::BackupBackupCollection-WithIncremental-false >> TSchemeShardTest::AlterPersQueueGroupWithKeySchema [GOOD] >> TSchemeShardTest::AlterBlockStoreVolume >> THealthCheckTest::ServerlessWhenTroublesWithSharedNodes [GOOD] >> THealthCheckTest::ServerlessWithExclusiveNodesWhenTroublesWithSharedNodes >> THealthCheckTest::SpecificServerless [GOOD] >> THealthCheckTest::SpecificServerlessWithExclusiveNodes >> Yq_1::ListConnections [GOOD] >> Yq_1::ListConnectionsOnEmptyConnectionsTable >> TSchemeShardDecimalTypesInTables::Parameters_22_9-EnableParameterizedDecimal-false [GOOD] >> TSchemeShardDecimalTypesInTables::Parameters_22_9-EnableParameterizedDecimal-true >> TSchemeShardTest::CreatePersQueueGroup [GOOD] >> TSchemeShardTest::CreatePersQueueGroupWithKeySchema >> TSchemeShardTest::CreateAlterDropSolomon [GOOD] >> TFlatTest::CopyCopiedTableAndRead >> TSchemeShardTest::PQGroupExplicitChannels [GOOD] >> TSchemeShardTest::ReadOnlyMode >> TLocksTest::SetLockFail [GOOD] >> TLocksTest::SetEraseSet ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_base/unittest >> TSchemeShardTest::DisablePublicationsOfDropping_Solomon [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:47:15.697938Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:47:15.698048Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:15.698093Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:47:15.698141Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:47:15.698188Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:47:15.698217Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:47:15.698299Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:15.698408Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:47:15.704905Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:47:15.705395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:47:15.838760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:47:15.838820Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:47:15.859111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:47:15.859295Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:47:15.859467Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:47:15.880251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:47:15.886514Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:47:15.887283Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:15.887670Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:47:15.890640Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:15.892236Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:15.892299Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:15.892366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:47:15.892418Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:15.892562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:47:15.892745Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:47:15.910417Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:47:16.115518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:47:16.115771Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:16.115997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:47:16.116153Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:47:16.116219Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:16.119009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:16.119116Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:47:16.119305Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:16.119358Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:47:16.119408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:47:16.119447Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:47:16.121760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:16.121834Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:47:16.121879Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:47:16.123712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:16.123752Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:16.123795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:16.123840Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:47:16.126797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:47:16.128756Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:47:16.128903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:47:16.129564Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:16.129691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:16.129735Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:16.129982Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:47:16.130025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:16.130152Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:47:16.130232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:47:16.132205Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:16.132251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:16.132451Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:16.132480Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 07T08:48:46.610817Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000005, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:48:46.611063Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 104 Coordinator: 72057594046316545 AckTo { RawX1: 132 RawX2: 64424511595 } } Step: 5000005 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:48:46.611176Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_solomon.cpp:47: TDropSolomon TPropose operationId# 104:0 HandleReply TEvOperationPlan, step: 5000005, at schemeshard: 72057594046678944 2025-05-07T08:48:46.611286Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5180: ExamineTreeVFS visit path id [OwnerId: 72057594046678944, LocalPathId: 3] name: Obj type: EPathTypeSolomonVolume state: EPathStateDrop stepDropped: 0 droppedTxId: 104 parent: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:48:46.611357Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5196: ExamineTreeVFS run path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-05-07T08:48:46.611545Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-05-07T08:48:46.611674Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 104:0 128 -> 130 2025-05-07T08:48:46.611882Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:48:46.611987Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-05-07T08:48:46.613271Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-05-07T08:48:46.615011Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 FAKE_COORDINATOR: Erasing txId 104 2025-05-07T08:48:46.616824Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:48:46.616886Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:48:46.617051Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-05-07T08:48:46.617249Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:48:46.617287Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [15:447:2407], at schemeshard: 72057594046678944, txId: 104, path id: 1 2025-05-07T08:48:46.617337Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [15:447:2407], at schemeshard: 72057594046678944, txId: 104, path id: 3 2025-05-07T08:48:46.617420Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-05-07T08:48:46.617487Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:416: [72057594046678944] TDeleteParts opId# 104:0 ProgressState 2025-05-07T08:48:46.617590Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 1/1 2025-05-07T08:48:46.617697Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-05-07T08:48:46.617754Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 1/1 2025-05-07T08:48:46.617816Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-05-07T08:48:46.617889Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: false 2025-05-07T08:48:46.617988Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-05-07T08:48:46.618069Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 104:0 2025-05-07T08:48:46.618139Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 104:0 2025-05-07T08:48:46.620036Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-05-07T08:48:46.620196Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 104, publications: 2, subscribers: 0 2025-05-07T08:48:46.620282Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 1], 11 2025-05-07T08:48:46.620344Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 3], 18446744073709551615 2025-05-07T08:48:46.621334Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T08:48:46.621448Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T08:48:46.621498Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 104 2025-05-07T08:48:46.621567Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-05-07T08:48:46.621654Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-05-07T08:48:46.622954Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T08:48:46.623054Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T08:48:46.623114Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-05-07T08:48:46.623160Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 11 2025-05-07T08:48:46.623203Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:48:46.623293Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 0 2025-05-07T08:48:46.625996Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-05-07T08:48:46.628195Z node 15 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 Forgetting tablet 72075186233409547 2025-05-07T08:48:46.634459Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-05-07T08:48:46.635017Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-05-07T08:48:46.635516Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-05-07T08:48:46.635854Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T08:48:46.635936Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-05-07T08:48:46.636053Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:48:46.636839Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-05-07T08:48:46.638754Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-05-07T08:48:46.638902Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-05-07T08:48:46.640610Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 104 2025-05-07T08:48:46.640981Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2025-05-07T08:48:46.641060Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2025-05-07T08:48:46.641659Z node 15 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-05-07T08:48:46.641829Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-05-07T08:48:46.641908Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [15:578:2520] TestWaitNotification: OK eventTxId 104 >> Yq_1::ModifyConnections [GOOD] >> Yq_1::ModifyQuery ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_base/unittest >> TSchemeShardTest::CreateAlterDropSolomon [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:47:21.142887Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:47:21.142963Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:21.143003Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:47:21.143046Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:47:21.143088Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:47:21.143119Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:47:21.143211Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:21.143292Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:47:21.143954Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:47:21.144294Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:47:21.565952Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:47:21.566294Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:47:21.754375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:47:21.755586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:47:21.757080Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:47:21.899320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:47:21.912490Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:47:21.917124Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:21.926534Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:47:21.970870Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:21.972308Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:21.972366Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:21.972426Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:47:21.972468Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:21.972559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:47:21.972699Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:47:22.012079Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:47:23.548515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:47:23.548819Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:23.549055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:47:23.549325Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:47:23.549425Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:23.586106Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:23.589766Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:47:23.592794Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:23.593718Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:47:23.603073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:47:23.603664Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:47:23.649001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:23.649638Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:47:23.651246Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:47:23.689291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:23.689340Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:23.698162Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:23.699134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:47:23.787910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:47:23.825860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:47:23.839483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:47:23.862282Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:23.862458Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:23.862505Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:23.862824Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:47:23.862891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:23.863090Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:47:23.863168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:47:23.870997Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:23.871050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:23.872745Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:23.873709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... HEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 103 2025-05-07T08:48:48.160917Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-05-07T08:48:48.160952Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:48:48.161037Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 103, subscribers: 0 2025-05-07T08:48:48.167961Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:3 hive 72057594037968897 at ss 72057594046678944 2025-05-07T08:48:48.168035Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-05-07T08:48:48.168059Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:4 hive 72057594037968897 at ss 72057594046678944 2025-05-07T08:48:48.168082Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-05-07T08:48:48.169198Z node 15 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186233409548 Forgetting tablet 72075186233409548 2025-05-07T08:48:48.169689Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-05-07T08:48:48.170076Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T08:48:48.171265Z node 15 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 2025-05-07T08:48:48.171570Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-05-07T08:48:48.171903Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 Forgetting tablet 72075186233409546 2025-05-07T08:48:48.173347Z node 15 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 TabletID: 72075186233409549 2025-05-07T08:48:48.173539Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-05-07T08:48:48.173768Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T08:48:48.174440Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-05-07T08:48:48.175068Z node 15 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 Forgetting tablet 72075186233409549 2025-05-07T08:48:48.175988Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 Forgetting tablet 72075186233409547 2025-05-07T08:48:48.176172Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-05-07T08:48:48.176417Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-05-07T08:48:48.176979Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T08:48:48.177064Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T08:48:48.177191Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:48:48.179795Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-05-07T08:48:48.179899Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-05-07T08:48:48.180431Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-05-07T08:48:48.180470Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-05-07T08:48:48.182390Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-05-07T08:48:48.182445Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186233409549 2025-05-07T08:48:48.182563Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-05-07T08:48:48.182625Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-05-07T08:48:48.182916Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-05-07T08:48:48.183282Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-05-07T08:48:48.183362Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-05-07T08:48:48.184027Z node 15 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-05-07T08:48:48.184904Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-05-07T08:48:48.184988Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [15:537:2491] TestWaitNotification: OK eventTxId 103 2025-05-07T08:48:48.185869Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Solomon" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:48:48.186235Z node 15 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Solomon" took 412us result status StatusPathDoesNotExist 2025-05-07T08:48:48.186482Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Solomon\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/Solomon" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 wait until 72075186233409546 is deleted wait until 72075186233409547 is deleted wait until 72075186233409548 is deleted wait until 72075186233409549 is deleted 2025-05-07T08:48:48.187189Z node 15 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409546 2025-05-07T08:48:48.187284Z node 15 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409547 2025-05-07T08:48:48.187341Z node 15 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409548 2025-05-07T08:48:48.187409Z node 15 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409549 Deleted tabletId 72075186233409546 Deleted tabletId 72075186233409547 Deleted tabletId 72075186233409548 Deleted tabletId 72075186233409549 2025-05-07T08:48:48.188112Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:48:48.188406Z node 15 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 319us result status StatusSuccess 2025-05-07T08:48:48.188947Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |88.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_incremental_restore_scan/ydb-core-tx-datashard-ut_incremental_restore_scan >> TSchemeShardTest::AlterBlockStoreVolume [GOOD] >> TSchemeShardTest::AlterBlockStoreVolumeWithNonReplicatedPartitions |88.5%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_incremental_restore_scan/ydb-core-tx-datashard-ut_incremental_restore_scan |88.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_incremental_restore_scan/ydb-core-tx-datashard-ut_incremental_restore_scan >> test.py::test[solomon-UnknownSetting-] [GOOD] >> TSchemeShardDecimalTypesInTables::Parameters_22_9-EnableParameterizedDecimal-true [GOOD] >> TSchemeShardDecimalTypesInTables::Parameters_35_6-EnableParameterizedDecimal-false >> TSchemeShardTest::BackupBackupCollection-WithIncremental-false [GOOD] >> TSchemeShardTest::BackupBackupCollection-WithIncremental-true >> TYardTest::TestSlayLogWriteRaceActor [GOOD] >> TYardTest::TestMultiYardHarakiri >> THealthCheckTest::IgnoreOtherGenerations [GOOD] >> THealthCheckTest::IgnoreServerlessWhenNotSpecific |88.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_trace/ydb-core-tx-datashard-ut_trace |88.5%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_trace/ydb-core-tx-datashard-ut_trace |88.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_trace/ydb-core-tx-datashard-ut_trace >> TSchemeShardTest::ReadOnlyMode [GOOD] >> TSchemeShardTest::PathErrors >> TSchemeShardTest::CreatePersQueueGroupWithKeySchema [GOOD] >> TSchemeShardTest::CreateTableWithCompactionStrategies >> THealthCheckTest::ShardsLimit905 [GOOD] >> THealthCheckTest::ShardsLimit800 >> TSchemeShardTest::AlterBlockStoreVolumeWithNonReplicatedPartitions [GOOD] >> TSchemeShardTest::AdoptDropSolomon >> TFlatTest::SelectRangeNullArgs4 [GOOD] >> TSchemeShardDecimalTypesInTables::Parameters_35_6-EnableParameterizedDecimal-false [GOOD] >> TSchemeShardDecimalTypesInTables::Parameters_35_6-EnableParameterizedDecimal-true >> TBlobStorageWardenTest::TestCreatePDiskAndEncryptedGroup >> TObjectStorageListingTest::CornerCases >> THealthCheckTest::DontIgnoreServerlessWithExclusiveNodesWhenNotSpecific [GOOD] >> TSchemeShardTest::PathErrors [GOOD] >> TSchemeShardTest::NestedDirs >> TSchemeShardTest::CreateTableWithCompactionStrategies [GOOD] >> TSchemeShardTest::CreateTopicOverDiskSpaceQuotas >> TSchemeShardTest::AdoptDropSolomon [GOOD] >> TSchemeShardTest::AlterTableAndAfterSplit >> TSchemeShardDecimalTypesInTables::Parameters_35_6-EnableParameterizedDecimal-true [GOOD] >> TSchemeShardDecimalTypesInTables::CopyTableShouldNotFailOnDisabledFeatureFlag >> TFlatTest::CopyCopiedTableAndRead [GOOD] >> TFlatTest::CopyTableAndAddFollowers |88.5%| [TM] {asan, default-linux-x86_64, pic, release} ydb/library/yql/tests/sql/solomon/pytest >> test.py::test[solomon-UnknownSetting-] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::SelectRangeNullArgs4 [GOOD] Test command err: 2025-05-07T08:48:40.465496Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623037405676138:2199];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:48:40.473485Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004a1c/r3tmp/tmpuNlqki/pdisk_1.dat 2025-05-07T08:48:41.106653Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:48:41.113660Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:48:41.113763Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:48:41.118428Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:21667 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-05-07T08:48:41.638415Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:48:41.694701Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-05-07T08:48:41.710730Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:48:46.529325Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501623064372529186:2170];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004a1c/r3tmp/tmpz3h9Fh/pdisk_1.dat 2025-05-07T08:48:46.652614Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:48:46.829946Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:48:46.861961Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:48:46.868371Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:48:46.870873Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:7515 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-05-07T08:48:47.192612Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:48:47.219140Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... ------- [TM] {asan, default-linux-x86_64, release} ydb/core/health_check/ut/unittest >> THealthCheckTest::DontIgnoreServerlessWithExclusiveNodesWhenNotSpecific [GOOD] Test command err: 2025-05-07T08:47:15.215450Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501622672681937434:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:47:15.215486Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003da4/r3tmp/tmpv5uK62/pdisk_1.dat 2025-05-07T08:47:16.037996Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:47:16.050690Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:47:16.050822Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:47:16.055768Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25395, node 1 2025-05-07T08:47:16.152032Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:47:16.152061Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:47:16.152093Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:47:16.152227Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28621 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:47:16.614859Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:47:16.646344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:47:24.350654Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501622713043919062:2058];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:47:24.379277Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003da4/r3tmp/tmpp2df57/pdisk_1.dat 2025-05-07T08:47:26.375955Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:27.930663Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:28.355092Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:47:28.355161Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:47:28.386611Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:47:28.388085Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20213, node 2 2025-05-07T08:47:29.340389Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7501622713043919062:2058];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:47:29.346794Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:47:29.426295Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:47:29.426602Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:47:29.426607Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:47:29.426692Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25911 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:47:31.337341Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:47:33.796833Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:48:04.496822Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:700:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:04.504104Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:04.504420Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:48:04.504766Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:697:2355], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:04.505094Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:04.505581Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003da4/r3tmp/tmpBcVXCW/pdisk_1.dat 2025-05-07T08:48:08.857963Z node 3 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4493, node 3 TClient is connected to server localhost:65169 2025-05-07T08:48:13.901522Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:48:13.902007Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:48:13.902421Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:48:13.905282Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:48:40.609525Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:698:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:40.610195Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:40.610595Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:48:40.611315Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:695:2355], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:40.611479Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:48:40.611649Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003da4/r3tmp/tmpG5Vxcx/pdisk_1.dat 2025-05-07T08:48:41.247582Z node 5 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9486, node 5 TClient is connected to server localhost:17510 2025-05-07T08:48:42.347095Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:48:42.347166Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:48:42.347213Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:48:42.347799Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration self_check_result: GOOD issue_log { id: "YELLOW-f489-1231c6b1" status: YELLOW message: "Database has compute issues" location { database { name: "/Root" } } reason: "YELLOW-1ba8-1231c6b1" type: "DATABASE" level: 1 } issue_log { id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-5" reason: "YELLOW-e9e2-1231c6b1-6" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-1231c6b1-5" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 5 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-6" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 6 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } location { id: 5 host: "::1" port: 12001 } 2025-05-07T08:48:49.805161Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:530:2416], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:49.805739Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:49.805938Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003da4/r3tmp/tmpLvr2tW/pdisk_1.dat 2025-05-07T08:48:50.296818Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26524, node 7 TClient is connected to server localhost:26050 2025-05-07T08:48:50.930446Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:48:50.930524Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:48:50.930591Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:48:50.931008Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration >> PrivateApi::GetTask [GOOD] >> PrivateApi::Nodes >> TSchemeShardTest::CreateTopicOverDiskSpaceQuotas [GOOD] >> TSchemeShardTest::CreateSystemColumn >> THealthCheckTest::StorageNoQuota [GOOD] >> TSchemeShardTest::BackupBackupCollection-WithIncremental-true [GOOD] >> THealthCheckTest::TestBootingTabletIsNotDead >> THealthCheckTest::NoBscResponse [GOOD] >> THealthCheckTest::LayoutIncorrect >> TSchemeShardTest::NestedDirs [GOOD] >> TSchemeShardTest::NewOwnerOnDatabase >> test_sql_streaming.py::test[suites-GroupByHoppingWithDataWatermarks-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-ReadTopic-default.txt] >> TSchemeShardTest::CreateSystemColumn [GOOD] >> TSchemeShardDecimalTypesInTables::CopyTableShouldNotFailOnDisabledFeatureFlag [GOOD] >> TSchemeShardDecimalTypesInTables::CreateWithWrongParameters >> THealthCheckTest::SpecificServerlessWithExclusiveNodes [GOOD] >> THealthCheckTest::SharedWhenTroublesWithExclusiveNodes >> test_sql_streaming.py::test[suites-GroupByHoppingWindow-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-GroupByHoppingWindowByStringKey-default.txt] >> TObjectStorageListingTest::CornerCases [GOOD] >> TObjectStorageListingTest::Decimal >> TSchemeShardTest::NewOwnerOnDatabase [GOOD] >> TLocksTest::SetEraseSet [GOOD] >> THealthCheckTest::Issues100VCardMerging [GOOD] >> THealthCheckTest::Issues100Groups100VCardMerging ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_base/unittest >> TSchemeShardTest::BackupBackupCollection-WithIncremental-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:47:23.124497Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:47:23.125365Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:23.125654Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:47:23.126140Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:47:23.126418Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:47:23.126449Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:47:23.126525Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:23.126920Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:47:23.137686Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:47:23.143729Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:47:24.234668Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:47:24.234726Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:47:24.347715Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:47:24.348668Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:47:24.351812Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:47:24.489087Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:47:24.511625Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:47:24.518557Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:24.520397Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:47:24.543560Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:24.568682Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:24.569017Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:24.569380Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:47:24.574033Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:24.586256Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:47:24.588195Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:47:24.670477Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:47:25.574574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:47:25.574759Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:25.574950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:47:25.575145Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:47:25.575229Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:25.590874Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:25.593286Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:47:25.606530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:25.607066Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:47:25.607808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:47:25.608076Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:47:25.661076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:25.661401Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:47:25.666478Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:47:25.709516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:25.709576Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:25.710255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:25.710619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:47:25.834778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:47:25.876965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:47:25.886890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:47:25.929840Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:25.935822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:25.936199Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:25.951689Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:47:25.952019Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:25.968235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:47:25.969013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:47:25.998021Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:25.998371Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:26.010697Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:26.011385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... essingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 32 PathsLimit: 10000 ShardsInside: 18 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 } UserAttributes { Key: "__incremental_backup" Value: "{}" } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 31 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:48:54.347069Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:48:54.348294Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA" took 1.24ms result status StatusSuccess 2025-05-07T08:48:54.348894Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA" PathDescription { Self { Name: "DirA" PathId: 29 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 109 CreateStep: 5000010 ParentPathId: 28 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 8 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 8 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 7 } ChildrenExist: true } Children { Name: "DirB" PathId: 30 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 109 CreateStep: 5000010 ParentPathId: 29 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: true } Children { Name: "Table2" PathId: 32 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 109 CreateStep: 5000010 ParentPathId: 29 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 32 PathsLimit: 10000 ShardsInside: 18 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 } } PathId: 29 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:48:54.350570Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA/Table2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:48:54.352250Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA/Table2" took 1.72ms result status StatusSuccess 2025-05-07T08:48:54.353000Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA/Table2" PathDescription { Self { Name: "Table2" PathId: 32 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 109 CreateStep: 5000010 ParentPathId: 29 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table2" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value0" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value1" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } Columns { Name: "__ydb_incrBackupImpl_deleted" Type: "Bool" TypeId: 6 Id: 4 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 32 PathsLimit: 10000 ShardsInside: 18 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 } UserAttributes { Key: "__incremental_backup" Value: "{}" } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 32 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:48:54.354621Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA/DirB" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:48:54.355040Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA/DirB" took 386us result status StatusSuccess 2025-05-07T08:48:54.355619Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA/DirB" PathDescription { Self { Name: "DirB" PathId: 30 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 109 CreateStep: 5000010 ParentPathId: 29 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 } ChildrenExist: true } Children { Name: "Table3" PathId: 33 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 109 CreateStep: 5000010 ParentPathId: 30 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 32 PathsLimit: 10000 ShardsInside: 18 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 } } PathId: 30 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:48:54.356889Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA/DirB/Table3" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:48:54.357406Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA/DirB/Table3" took 534us result status StatusSuccess 2025-05-07T08:48:54.358077Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA/DirB/Table3" PathDescription { Self { Name: "Table3" PathId: 33 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 109 CreateStep: 5000010 ParentPathId: 30 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table3" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value0" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "__ydb_incrBackupImpl_deleted" Type: "Bool" TypeId: 6 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 32 PathsLimit: 10000 ShardsInside: 18 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 } UserAttributes { Key: "__incremental_backup" Value: "{}" } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 33 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardDecimalTypesInTables::CreateWithWrongParameters [GOOD] >> TSchemeShardDecimalTypesInTables::AlterWithWrongParameters >> TSchemeShardTest::AlterTableAndAfterSplit [GOOD] >> TSchemeShardTest::AlterIndexTableDirectly ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_base/unittest >> TSchemeShardTest::CreateSystemColumn [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:47:17.831427Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:47:17.831513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:17.831555Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:47:17.831590Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:47:17.831640Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:47:17.831668Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:47:17.831746Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:17.831871Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:47:17.832610Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:47:17.832995Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:47:18.132146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:47:18.132468Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:47:18.269477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:47:18.270407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:47:18.271659Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:47:18.332472Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:47:18.333083Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:47:18.333689Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:18.333990Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:47:18.336225Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:18.337683Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:18.337745Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:18.337810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:47:18.337856Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:18.337955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:47:18.338137Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:47:18.344568Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:47:18.588147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:47:18.589342Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:18.590090Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:47:18.590454Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:47:18.590530Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:18.595524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:18.596363Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:47:18.597127Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:18.597393Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:47:18.597639Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:47:18.597799Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:47:18.601498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:18.601554Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:47:18.601592Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:47:18.606820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:18.606984Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:18.607126Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:18.607511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:47:18.629959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:47:18.638442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:47:18.639345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:47:18.641789Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:18.641940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:18.642014Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:18.642288Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:47:18.642350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:18.642502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:47:18.642584Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:47:18.644493Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:18.644532Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:18.644697Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:18.644735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... peration_copy_table.cpp:383: TCopyTable Propose, path: /MyRoot/SystemColumnInCopyAllowed, opId: 103:0, at schemeshard: 72057594046678944 2025-05-07T08:48:55.684196Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:319: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 1], parent name: MyRoot, child name: SystemColumnInCopyAllowed, child id: [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-05-07T08:48:55.684297Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 0 2025-05-07T08:48:55.684369Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction source path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T08:48:55.684452Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-05-07T08:48:55.684585Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-05-07T08:48:55.684831Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 103:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:48:55.685536Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:48:55.685626Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-05-07T08:48:55.689195Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 103, response: Status: StatusAccepted TxId: 103 SchemeshardId: 72057594046678944 PathId: 3, at schemeshard: 72057594046678944 2025-05-07T08:48:55.689447Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 103, database: /MyRoot, subject: , status: StatusAccepted, operation: CREATE TABLE, path: /MyRoot/SystemColumnInCopyAllowed 2025-05-07T08:48:55.689833Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:48:55.689900Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:48:55.690213Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-05-07T08:48:55.690347Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:48:55.690423Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [15:206:2208], at schemeshard: 72057594046678944, txId: 103, path id: 1 2025-05-07T08:48:55.690507Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [15:206:2208], at schemeshard: 72057594046678944, txId: 103, path id: 3 2025-05-07T08:48:55.691128Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-05-07T08:48:55.691231Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 103:0 ProgressState, operation type: TxCopyTable, at tablet# 72057594046678944 2025-05-07T08:48:55.691638Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:357: TCreateParts opId# 103:0 CreateRequest Event to Hive: 72057594037968897 msg: Owner: 72057594046678944 OwnerIdx: 2 TabletType: DataShard ObjectDomain { SchemeShard: 72057594046678944 PathId: 1 } ObjectId: 3 BindedChannels { StoragePoolName: "pool-1" } BindedChannels { StoragePoolName: "pool-1" } BindedChannels { StoragePoolName: "pool-1" } AllowedDomains { SchemeShard: 72057594046678944 PathId: 1 } 2025-05-07T08:48:55.692901Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 6 PathOwnerId: 72057594046678944, cookie: 103 2025-05-07T08:48:55.693081Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 6 PathOwnerId: 72057594046678944, cookie: 103 2025-05-07T08:48:55.693153Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-05-07T08:48:55.693228Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 6 2025-05-07T08:48:55.693324Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-05-07T08:48:55.694349Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 1 PathOwnerId: 72057594046678944, cookie: 103 2025-05-07T08:48:55.694446Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 1 PathOwnerId: 72057594046678944, cookie: 103 2025-05-07T08:48:55.694477Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-05-07T08:48:55.694510Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 1 2025-05-07T08:48:55.694543Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-05-07T08:48:55.694627Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 103, ready parts: 0/1, is published: true 2025-05-07T08:48:55.697913Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 103:0 from tablet: 72057594046678944 to tablet: 72057594037968897 cookie: 72057594046678944:2 msg type: 268697601 2025-05-07T08:48:55.698167Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 103, partId: 0, tablet: 72057594037968897 2025-05-07T08:48:55.698253Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1767: TOperation RegisterRelationByShardIdx, TxId: 103, shardIdx: 72057594046678944:2, partId: 0 2025-05-07T08:48:55.699555Z node 15 :HIVE INFO: tablet_helpers.cpp:1181: [72057594037968897] TEvCreateTablet, msg: Owner: 72057594046678944 OwnerIdx: 2 TabletType: DataShard ObjectDomain { SchemeShard: 72057594046678944 PathId: 1 } ObjectId: 3 BindedChannels { StoragePoolName: "pool-1" } BindedChannels { StoragePoolName: "pool-1" } BindedChannels { StoragePoolName: "pool-1" } AllowedDomains { SchemeShard: 72057594046678944 PathId: 1 } 2025-05-07T08:48:55.699899Z node 15 :HIVE INFO: tablet_helpers.cpp:1245: [72057594037968897] TEvCreateTablet, Owner 72057594046678944, OwnerIdx 2, type DataShard, boot OK, tablet id 72075186233409547 2025-05-07T08:48:55.700112Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5827: Handle TEvCreateTabletReply at schemeshard: 72057594046678944 message: Status: OK Owner: 72057594046678944 OwnerIdx: 2 TabletID: 72075186233409547 Origin: 72057594037968897 2025-05-07T08:48:55.700184Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1781: TOperation FindRelatedPartByShardIdx, TxId: 103, shardIdx: 72057594046678944:2, partId: 0 2025-05-07T08:48:55.700384Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 103:0, at schemeshard: 72057594046678944, message: Status: OK Owner: 72057594046678944 OwnerIdx: 2 TabletID: 72075186233409547 Origin: 72057594037968897 2025-05-07T08:48:55.700475Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:175: TCreateParts opId# 103:0 HandleReply TEvCreateTabletReply, at tabletId: 72057594046678944 2025-05-07T08:48:55.700593Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:178: TCreateParts opId# 103:0 HandleReply TEvCreateTabletReply, message: Status: OK Owner: 72057594046678944 OwnerIdx: 2 TabletID: 72075186233409547 Origin: 72057594037968897 2025-05-07T08:48:55.700750Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 103:0 2 -> 3 2025-05-07T08:48:55.702504Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-05-07T08:48:55.707748Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-05-07T08:48:55.712624Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-05-07T08:48:55.713407Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-05-07T08:48:55.713525Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_copy_table.cpp:70: TCopyTable TConfigureParts operationId# 103:0 ProgressState at tablet# 72057594046678944 2025-05-07T08:48:55.713635Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_copy_table.cpp:102: TCopyTable TConfigureParts operationId# 103:0 Propose modify scheme on dstDatashard# 72075186233409547 idx# 72057594046678944:2 srcDatashard# 72075186233409546 idx# 72057594046678944:1 operationId# 103:0 seqNo# 2:2 at tablet# 72057594046678944 2025-05-07T08:48:55.718912Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 103:0 from tablet: 72057594046678944 to tablet: 72075186233409547 cookie: 72057594046678944:2 msg type: 269549568 2025-05-07T08:48:55.719124Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 103:0 from tablet: 72057594046678944 to tablet: 72075186233409546 cookie: 72057594046678944:1 msg type: 269549568 2025-05-07T08:48:55.719223Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 103, partId: 0, tablet: 72075186233409547 2025-05-07T08:48:55.719261Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 103, partId: 0, tablet: 72075186233409546 TestModificationResult got TxId: 103, wait until txId: 103 >> THealthCheckTest::IgnoreServerlessWhenNotSpecific [GOOD] >> THealthCheckTest::HealthCheckConfigUpdate >> THealthCheckTest::ServerlessWithExclusiveNodesWhenTroublesWithSharedNodes [GOOD] >> TFlatTest::CopyTableAndAddFollowers [GOOD] >> TFlatTest::CopyCopiedTableAndDropFirstCopy >> TSchemeShardDecimalTypesInTables::AlterWithWrongParameters [GOOD] >> TSchemeShardInfoTypesTest::EmptyFamilies [GOOD] >> TSchemeShardInfoTypesTest::LostId [GOOD] >> TSchemeShardInfoTypesTest::DeduplicationOrder [GOOD] >> TSchemeShardInfoTypesTest::MultipleDeduplications [GOOD] >> TSchemeShardPgTypesInTables::AlterTableAddPgTypeColumn-EnableTablePgTypes-false ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_base/unittest >> TSchemeShardTest::NewOwnerOnDatabase [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:47:26.246064Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:47:26.246972Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:26.247284Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:47:26.247577Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:47:26.247891Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:47:26.248439Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:47:26.249046Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:26.249651Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:47:26.275600Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:47:26.280308Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:47:27.458609Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:47:27.458967Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:47:27.612416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:47:27.613544Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:47:27.615397Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:47:27.690789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:47:27.695890Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:47:27.709306Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:27.710543Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:47:27.767003Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:27.772223Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:27.772304Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:27.772399Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:47:27.772440Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:27.773029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:47:27.773275Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:47:27.835670Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:47:29.392804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:47:29.414677Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:29.416576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:47:29.445195Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:47:29.445892Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:29.492752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:29.495004Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:47:29.497023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:29.497668Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:47:29.497997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:47:29.498567Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:47:29.517674Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:29.517735Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:47:29.518350Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:47:29.526945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:29.527009Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:29.527059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:29.527109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:47:29.539929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:47:29.560928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:47:29.570940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:47:29.585872Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:29.586023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:29.587016Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:29.598366Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:47:29.598721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:29.602015Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:47:29.602702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:47:29.642093Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:29.642472Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:29.643890Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:29.644149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 1P\nX\200\200\001`nh\000p\000Rb\010\001\020\200\200\200\024\030\005 \020(\200\200\200\200\0020\377\377\377\377\0178\000B$\010e\020d\031\000\000\000\000\000\000\360?*\025background_compactionJ\017compaction_gen2P\nX\200\200\001`nh\200\200\200\004p\200\200\200\004Rc\010\002\020\200\200\200\310\001\030\005 \020(\200\200\200\200@0\377\377\377\377\0178\000B$\010e\020d\031\000\000\000\000\000\000\360?*\025background_compactionJ\017compaction_gen3P\nX\200\200\001`nh\200\200\200(p\200\200\200(X\001`\005j$\010e\020d\031\000\000\000\000\000\000\360?*\025background_compactionr\017compaction_gen0z\017compaction_gen0\202\001\004scan\210\001\200\200\200\010\220\001\364\003\230\0012\270\001\2008\300\001\006R\002\020\001J\r/MyRoot/Table\242\001\006\001\000\000\000\000\200\252\001\000\260\001\001\270\001\000\210\002\001\222\002\013\t\240\207\205\000\000\000\000\001\020\005:\004\010\002\020\001" TxId: 107 ExecLevel: 0 Flags: 0 SchemeShardId: 72057594046678944 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } SubDomainPathId: 1 2025-05-07T08:48:56.933196Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 107:0 from tablet: 72057594046678944 to tablet: 72075186233409549 cookie: 72057594046678944:4 msg type: 269549568 2025-05-07T08:48:56.933447Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 107, partId: 0, tablet: 72075186233409549 TestModificationResult got TxId: 107, wait until txId: 107 TestModificationResults wait txId: 108 2025-05-07T08:48:57.029030Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpModifyACL ModifyACL { Name: "Table" NewOwner: "user1" } ApplyIf { PathTypes: EPathTypeSubDomain PathTypes: EPathTypeExtSubDomain } } TxId: 108 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:48:57.029356Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_modify_acl.cpp:33: TModifyACL Propose, path: /MyRoot/Table, operationId: 108:0, at schemeshard: 72057594046678944 2025-05-07T08:48:57.029562Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 108:1, propose status:StatusPreconditionFailed, reason: fail in ApplyIf section: wrong Path type. Expected types: EPathTypeSubDomain, EPathTypeExtSubDomain; But actual Path type is EPathTypeTable, at schemeshard: 72057594046678944 2025-05-07T08:48:57.032702Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 108, response: Status: StatusPreconditionFailed Reason: "fail in ApplyIf section: wrong Path type. Expected types: EPathTypeSubDomain, EPathTypeExtSubDomain; But actual Path type is EPathTypeTable" TxId: 108 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:48:57.032967Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 108, database: /MyRoot, subject: , status: StatusPreconditionFailed, reason: fail in ApplyIf section: wrong Path type. Expected types: EPathTypeSubDomain, EPathTypeExtSubDomain; But actual Path type is EPathTypeTable, operation: MODIFY ACL, path: /MyRoot/Table, set owner:user1 TestModificationResult got TxId: 108, wait until txId: 108 TestModificationResults wait txId: 109 2025-05-07T08:48:57.036935Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpModifyACL ModifyACL { Name: "Table" NewOwner: "user1" } } TxId: 109 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:48:57.037182Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_modify_acl.cpp:33: TModifyACL Propose, path: /MyRoot/Table, operationId: 109:0, at schemeshard: 72057594046678944 2025-05-07T08:48:57.037326Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5180: ExamineTreeVFS visit path id [OwnerId: 72057594046678944, LocalPathId: 5] name: Table type: EPathTypeTable state: EPathStateCreate stepDropped: 0 droppedTxId: 0 parent: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:48:57.037389Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5196: ExamineTreeVFS run path id: [OwnerId: 72057594046678944, LocalPathId: 5] 2025-05-07T08:48:57.037622Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 109:1, propose status:StatusSuccess, reason: , at schemeshard: 72057594046678944 2025-05-07T08:48:57.037693Z node 15 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 109:0, at schemeshard: 72057594046678944 2025-05-07T08:48:57.037824Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#109:0 progress is 1/1 2025-05-07T08:48:57.037881Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 109 ready parts: 1/1 2025-05-07T08:48:57.037955Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#109:0 progress is 1/1 2025-05-07T08:48:57.038038Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 109 ready parts: 1/1 2025-05-07T08:48:57.038132Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 3 2025-05-07T08:48:57.038248Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-05-07T08:48:57.038301Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 109, ready parts: 1/1, is published: false 2025-05-07T08:48:57.038373Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 5], at schemeshard: 72057594046678944 2025-05-07T08:48:57.038437Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 109 ready parts: 1/1 2025-05-07T08:48:57.038496Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 109:0 2025-05-07T08:48:57.038566Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 109, publications: 2, subscribers: 0 2025-05-07T08:48:57.038618Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 109, [OwnerId: 72057594046678944, LocalPathId: 1], 14 2025-05-07T08:48:57.038666Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 109, [OwnerId: 72057594046678944, LocalPathId: 5], 2 2025-05-07T08:48:57.041663Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 109, response: Status: StatusSuccess TxId: 109 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:48:57.041931Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 109, database: /MyRoot, subject: , status: StatusSuccess, operation: MODIFY ACL, path: /MyRoot/Table, set owner:user1 2025-05-07T08:48:57.042271Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:48:57.042343Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 109, path id: [OwnerId: 72057594046678944, LocalPathId: 5] 2025-05-07T08:48:57.042531Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 109, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:48:57.042764Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:48:57.042839Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [15:206:2208], at schemeshard: 72057594046678944, txId: 109, path id: 5 2025-05-07T08:48:57.042935Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [15:206:2208], at schemeshard: 72057594046678944, txId: 109, path id: 1 2025-05-07T08:48:57.043718Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 2 PathOwnerId: 72057594046678944, cookie: 109 2025-05-07T08:48:57.043883Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 2 PathOwnerId: 72057594046678944, cookie: 109 2025-05-07T08:48:57.043952Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 109 2025-05-07T08:48:57.044023Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 109, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 2 2025-05-07T08:48:57.044096Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 4 2025-05-07T08:48:57.044714Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 14 PathOwnerId: 72057594046678944, cookie: 109 2025-05-07T08:48:57.044795Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 14 PathOwnerId: 72057594046678944, cookie: 109 2025-05-07T08:48:57.044823Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 109 2025-05-07T08:48:57.044853Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 109, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 14 2025-05-07T08:48:57.044884Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 5 2025-05-07T08:48:57.044966Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 109, subscribers: 0 2025-05-07T08:48:57.047986Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 109 2025-05-07T08:48:57.048391Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 109 TestModificationResult got TxId: 109, wait until txId: 109 >> test_sql_streaming.py::test[suites-ReadTopicWithSchema-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-ReadTwoTopics-default.txt] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksTest::SetEraseSet [GOOD] Test command err: 2025-05-07T08:48:43.886907Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623050343431018:2126];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:48:43.886955Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004a3e/r3tmp/tmpgPRnur/pdisk_1.dat 2025-05-07T08:48:45.075881Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:48:45.075963Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:48:45.076727Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:45.078024Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:48:45.103564Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TClient is connected to server localhost:8865 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-05-07T08:48:45.454294Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:48:45.508450Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-05-07T08:48:45.514045Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:48:45.759341Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:48:45.864064Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:48:45.958653Z node 1 :TX_DATASHARD ERROR: datashard_pipeline.cpp:1570: Shard 72075186224037888 cannot parse tx 281474976710662: Validate (783): Key validation status: 3 2025-05-07T08:48:45.962388Z node 1 :TX_PROXY ERROR: datareq.cpp:1873: Actor# [1:7501623058933366437:2497] txid# 281474976710662 HANDLE Prepare TEvProposeTransactionResult TDataReq TabletStatus# StatusWait GetStatus# ERROR shard id 72075186224037888 read size 0 out readset size 0 marker# P6 2025-05-07T08:48:45.962481Z node 1 :TX_PROXY ERROR: datareq.cpp:2071: Actor# [1:7501623058933366437:2497] txid# 281474976710662 HANDLE PrepareErrors TEvProposeTransactionResult TDataReq TabletStatus# StatusWait shard id 72075186224037888 2025-05-07T08:48:45.962517Z node 1 :TX_PROXY ERROR: datareq.cpp:1274: Actor# [1:7501623058933366437:2497] txid# 281474976710662 invalidateDistCache: 1 DIE TDataReq MarkShardError TabletsLeft# 1 2025-05-07T08:48:45.965758Z node 1 :TX_DATASHARD ERROR: datashard_pipeline.cpp:1570: Shard 72075186224037888 cannot parse tx 281474976710663: Validate (783): Key validation status: 3 2025-05-07T08:48:45.966030Z node 1 :TX_PROXY ERROR: datareq.cpp:1873: Actor# [1:7501623058933366459:2504] txid# 281474976710663 HANDLE Prepare TEvProposeTransactionResult TDataReq TabletStatus# StatusWait GetStatus# ERROR shard id 72075186224037888 read size 0 out readset size 0 marker# P6 2025-05-07T08:48:45.966105Z node 1 :TX_PROXY ERROR: datareq.cpp:2071: Actor# [1:7501623058933366459:2504] txid# 281474976710663 HANDLE PrepareErrors TEvProposeTransactionResult TDataReq TabletStatus# StatusWait shard id 72075186224037888 2025-05-07T08:48:45.966139Z node 1 :TX_PROXY ERROR: datareq.cpp:1274: Actor# [1:7501623058933366459:2504] txid# 281474976710663 invalidateDistCache: 1 DIE TDataReq MarkShardError TabletsLeft# 1 DataShardErrors: [SCHEME_ERROR] Validate (783): Key validation status: 3 proxy error code: ProxyShardNotAvailable 2025-05-07T08:48:45.974957Z node 1 :TX_DATASHARD ERROR: datashard_pipeline.cpp:1570: Shard 72075186224037888 cannot parse tx 281474976710664: Validate (783): Key validation status: 3 2025-05-07T08:48:45.975237Z node 1 :TX_PROXY ERROR: datareq.cpp:1873: Actor# [1:7501623058933366466:2508] txid# 281474976710664 HANDLE Prepare TEvProposeTransactionResult TDataReq TabletStatus# StatusWait GetStatus# ERROR shard id 72075186224037888 read size 0 out readset size 0 marker# P6 2025-05-07T08:48:45.975300Z node 1 :TX_PROXY ERROR: datareq.cpp:2071: Actor# [1:7501623058933366466:2508] txid# 281474976710664 HANDLE PrepareErrors TEvProposeTransactionResult TDataReq TabletStatus# StatusWait shard id 72075186224037888 2025-05-07T08:48:45.975330Z node 1 :TX_PROXY ERROR: datareq.cpp:1274: Actor# [1:7501623058933366466:2508] txid# 281474976710664 invalidateDistCache: 1 DIE TDataReq MarkShardError TabletsLeft# 1 2025-05-07T08:48:45.978167Z node 1 :TX_DATASHARD ERROR: datashard_pipeline.cpp:1570: Shard 72075186224037888 cannot parse tx 281474976710665: Validate (783): Key validation status: 3 DataShardErrors: [SCHEME_ERROR] Validate (783): Key validation status: 3 proxy error code: ProxyShardNotAvailable 2025-05-07T08:48:45.978493Z node 1 :TX_PROXY ERROR: datareq.cpp:1873: Actor# [1:7501623058933366472:2511] txid# 281474976710665 HANDLE Prepare TEvProposeTransactionResult TDataReq TabletStatus# StatusWait GetStatus# ERROR shard id 72075186224037888 read size 0 out readset size 0 marker# P6 2025-05-07T08:48:45.978565Z node 1 :TX_PROXY ERROR: datareq.cpp:2071: Actor# [1:7501623058933366472:2511] txid# 281474976710665 HANDLE PrepareErrors TEvProposeTransactionResult TDataReq TabletStatus# StatusWait shard id 72075186224037888 2025-05-07T08:48:45.978591Z node 1 :TX_PROXY ERROR: datareq.cpp:1274: Actor# [1:7501623058933366472:2511] txid# 281474976710665 invalidateDistCache: 1 DIE TDataReq MarkShardError TabletsLeft# 1 2025-05-07T08:48:49.122916Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501623077681992631:2063];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:48:49.122984Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004a3e/r3tmp/tmpILbMCW/pdisk_1.dat 2025-05-07T08:48:49.300303Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:48:49.320161Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:48:49.320260Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:48:49.323547Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:17976 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-05-07T08:48:49.553869Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:48:49.573091Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:48:49.676382Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:48:49.751613Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:48:53.209386Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7501623095404154633:2211];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004a3e/r3tmp/tmpZRwhGQ/pdisk_1.dat 2025-05-07T08:48:53.224770Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:48:53.309475Z node 3 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:48:53.321780Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:48:53.321865Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:48:53.323631Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25387 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-05-07T08:48:53.634199Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:48:53.638923Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:48:53.655191Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:48:53.745149Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:48:53.825586Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... ------- [TM] {asan, default-linux-x86_64, release} ydb/core/health_check/ut/unittest >> THealthCheckTest::ServerlessWithExclusiveNodesWhenTroublesWithSharedNodes [GOOD] Test command err: 2025-05-07T08:47:46.196839Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:698:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:47:46.197460Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:46.197683Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:47:46.214810Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:695:2355], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:47:46.217432Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:46.217481Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003d2a/r3tmp/tmpjxqGjQ/pdisk_1.dat 2025-05-07T08:47:51.596883Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7260, node 1 TClient is connected to server localhost:6412 2025-05-07T08:47:55.381750Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:47:55.382104Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:47:55.382411Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:47:55.388019Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:48:21.926095Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:700:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:21.926773Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:21.927085Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:48:21.927474Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:697:2355], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:21.927814Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:21.927956Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003d2a/r3tmp/tmpfMok7g/pdisk_1.dat 2025-05-07T08:48:22.300729Z node 3 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4294, node 3 TClient is connected to server localhost:27632 2025-05-07T08:48:22.863092Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:48:22.863149Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:48:22.863187Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:48:22.863424Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:48:37.480577Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:451:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:37.482385Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:37.483347Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003d2a/r3tmp/tmpLBzTY0/pdisk_1.dat 2025-05-07T08:48:39.196805Z node 5 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20888, node 5 TClient is connected to server localhost:62022 2025-05-07T08:48:40.214419Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:48:40.214484Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:48:40.214523Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:48:40.215036Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:48:45.937108Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:326:2368], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:45.937723Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:45.937832Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003d2a/r3tmp/tmpsThWLY/pdisk_1.dat TServer::EnableGrpc on GrpcPort 25608, node 7 TClient is connected to server localhost:12315 2025-05-07T08:48:54.876115Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [8:451:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:54.876508Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:54.876647Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003d2a/r3tmp/tmpxuE72v/pdisk_1.dat 2025-05-07T08:48:55.527333Z node 8 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1886, node 8 TClient is connected to server localhost:20896 2025-05-07T08:48:56.989931Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:48:56.994213Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:48:56.994301Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:48:56.995602Z node 8 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration >> THealthCheckTest::ProtobufBelowLimitFor10VdisksIssues [GOOD] >> THealthCheckTest::ProtobufUnderLimitFor100LargeVdisksIssues |88.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/ut/ydb-core-tx-columnshard-engines-ut |88.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/columnshard/engines/ut/ydb-core-tx-columnshard-engines-ut >> TSchemeShardPgTypesInTables::AlterTableAddPgTypeColumn-EnableTablePgTypes-false [GOOD] >> TSchemeShardPgTypesInTables::AlterTableAddPgTypeColumn-EnableTablePgTypes-true |88.5%| [TM] {RESULT} ydb/library/yql/tests/sql/solomon/pytest |88.5%| [LD] {RESULT} $(B)/ydb/core/tx/columnshard/engines/ut/ydb-core-tx-columnshard-engines-ut >> THealthCheckTest::ShardsLimit800 [GOOD] >> THealthCheckTest::ShardsNoLimit >> TObjectStorageListingTest::Decimal [GOOD] |88.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/replication/service/ut_table_writer/ydb-core-tx-replication-service-ut_table_writer |88.5%| [LD] {RESULT} $(B)/ydb/core/tx/replication/service/ut_table_writer/ydb-core-tx-replication-service-ut_table_writer |88.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/service/ut_table_writer/ydb-core-tx-replication-service-ut_table_writer |88.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/persqueue_v1/ut/describes_ut/ydb-services-persqueue_v1-ut-describes_ut |88.5%| [LD] {RESULT} $(B)/ydb/services/persqueue_v1/ut/describes_ut/ydb-services-persqueue_v1-ut-describes_ut |88.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/persqueue_v1/ut/describes_ut/ydb-services-persqueue_v1-ut-describes_ut >> test_sql_streaming.py::test[suites-GroupByHopByStringKey-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-GroupByHopExprKey-default.txt] >> Yq_1::CreateConnections_With_Idempotency [GOOD] >> TSchemeShardPgTypesInTables::AlterTableAddPgTypeColumn-EnableTablePgTypes-true [GOOD] |88.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_minikql/ydb-core-tx-datashard-ut_minikql |88.6%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_minikql/ydb-core-tx-datashard-ut_minikql |88.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_minikql/ydb-core-tx-datashard-ut_minikql |88.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/scheme_board/ut_cache/ydb-core-tx-scheme_board-ut_cache |88.6%| [LD] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_cache/ydb-core-tx-scheme_board-ut_cache |88.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/scheme_board/ut_cache/ydb-core-tx-scheme_board-ut_cache >> TBlobStorageWardenTest::TestCreatePDiskAndEncryptedGroup [GOOD] >> Yq_1::ListConnectionsOnEmptyConnectionsTable [GOOD] >> TFlatTest::CopyCopiedTableAndDropFirstCopy [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TObjectStorageListingTest::Decimal [GOOD] Test command err: 2025-05-07T08:48:51.923620Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623084463609629:2069];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:48:51.924620Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004a3c/r3tmp/tmpfWx42U/pdisk_1.dat 2025-05-07T08:48:52.753653Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:48:52.760879Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:48:52.761017Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:48:52.763851Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23435, node 1 2025-05-07T08:48:52.955960Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:48:52.955985Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:48:52.955991Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:48:52.956117Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1175 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-05-07T08:48:53.355302Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:48:53.388673Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:48:56.882376Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501623107487949972:2191];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:48:56.883152Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004a3c/r3tmp/tmpXfX5ZS/pdisk_1.dat 2025-05-07T08:48:57.328539Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:48:57.375858Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:48:57.375940Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:48:57.379109Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9693, node 2 2025-05-07T08:48:57.598619Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:48:57.598644Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:48:57.598651Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:48:57.598757Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25186 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-05-07T08:48:58.019194Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:48:58.024386Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:48:58.045545Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-05-07T08:48:58.057936Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... >> TYardTest::TestMultiYardHarakiri [GOOD] >> TYardTest::TestStartingPointReboots >> THealthCheckTest::LayoutIncorrect [GOOD] >> THealthCheckTest::LayoutCorrect ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut_fat/unittest >> TBlobStorageWardenTest::TestCreatePDiskAndEncryptedGroup [GOOD] Test command err: 2025-05-07T08:48:56.131841Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[3e000000:_:0:1:0]: (1040187392) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [3e000000:1:0:2:0] targetVDisk# [3e000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-05-07T08:48:56.133173Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[3e000000:_:0:1:0]: (1040187392) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [3e000000:1:0:3:0] targetVDisk# [3e000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-05-07T08:48:56.175084Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[3e000000:_:0:2:0]: (1040187392) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [3e000000:1:0:3:0] targetVDisk# [3e000000:1:0:2:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-05-07T08:48:58.620882Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[3e000000:_:0:1:0]: (1040187392) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [3e000000:1:0:0:0] targetVDisk# [3e000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-05-07T08:48:58.621001Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[3e000000:_:0:2:0]: (1040187392) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [3e000000:1:0:0:0] targetVDisk# [3e000000:1:0:2:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-05-07T08:48:58.621061Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[3e000000:_:0:3:0]: (1040187392) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [3e000000:1:0:0:0] targetVDisk# [3e000000:1:0:3:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-05-07T08:48:59.576883Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2175} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.133019s 2025-05-07T08:48:59.577034Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:666} StateWork event processing took too much time Type# 2146435078 Duration# 0.133207s 2025-05-07T08:48:59.965602Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2175} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.134865s 2025-05-07T08:48:59.965728Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:666} StateWork event processing took too much time Type# 2146435078 Duration# 0.135019s Sending TEvPut Sending TEvGet Sending TEvVGet Sending TEvPut Sending TEvGet ------- [TM] {asan, default-linux-x86_64, release} ydb/services/fq/ut_integration/unittest >> Yq_1::CreateConnections_With_Idempotency [GOOD] Test command err: 2025-05-07T08:47:32.120202Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501622744973756961:2279];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:47:32.120367Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:47:37.126321Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501622744973756961:2279];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:47:37.134441Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; E0507 08:47:37.399019259 107762 dns_resolver.cc:162] no server name supplied in dns URI E0507 08:47:37.399157051 107762 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-05-07T08:47:37.417194Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:37.698951Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:6579: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:6579 } ] 2025-05-07T08:47:38.141548Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:38.423588Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:39.122508Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:6579: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:6579 2025-05-07T08:47:39.169261Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:39.232961Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:6579: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:6579 } ] 2025-05-07T08:47:39.458903Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:40.194108Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:40.454300Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:40.947880Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:6579: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:6579 } ] 2025-05-07T08:47:41.222271Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:41.499216Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:42.235869Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0507 08:47:42.501447624 108311 dns_resolver.cc:162] no server name supplied in dns URI E0507 08:47:42.503047650 108311 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-05-07T08:47:42.540793Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:43.240972Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:43.547003Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:43.762251Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:6579: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:6579 } ] 2025-05-07T08:47:44.789997Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:44.828754Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:45.796127Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:45.853706Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:46.806481Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:46.858537Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0507 08:47:47.874431038 108311 dns_resolver.cc:162] no server name supplied in dns URI E0507 08:47:47.874894898 108311 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-05-07T08:47:47.888758Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:6579: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:6579 2025-05-07T08:47:47.894328Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:6579: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:6579 2025-05-07T08:47:47.908625Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:47.908652Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:47.988065Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:6579: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:6579 } ] 2025-05-07T08:47:48.905925Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:48.927195Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:50.034132Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:50.034156Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:51.069357Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:51.079498Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:52.056004Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:52.078874Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0507 08:47:53.003188223 108309 dns_resolver.cc:162] no server name supplied in dns URI E0507 08:47:53.004602078 108309 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-05-07T08:47:53.066409Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 202 ... pp:648: SyncQuota finished with error: 2025-05-07T08:49:00.624934Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.625057Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.625105Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.625206Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.625293Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.625422Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.625735Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.625787Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.625872Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.626001Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.626147Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.626191Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.626388Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.626416Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.626549Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.626576Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.626713Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.626736Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.626985Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.627015Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.627107Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.627184Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.627263Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.627390Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.627422Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.627532Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.627682Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.627704Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.627875Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.627904Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.628107Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.628131Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.628221Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.628434Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.628527Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.628609Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.628637Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.628718Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.628829Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.628911Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.628990Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.629066Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.629199Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.629220Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.629351Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.629374Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.629500Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.629523Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.629594Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.629669Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.629796Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.629880Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.629958Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.630845Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.630979Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.631017Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.631120Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.631273Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.631300Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.631381Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.631416Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.631462Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.631496Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.631542Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.631573Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.631617Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.631650Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.631694Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.631725Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.631763Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.631799Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.631844Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.631916Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.631948Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.631999Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.632026Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.632078Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.632111Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.632156Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.632244Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.632291Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.632339Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.632366Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.632412Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.632444Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.632487Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.632518Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.632563Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.632591Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.632637Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.632667Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.632710Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.632741Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.632854Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.632876Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.632925Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.632976Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.633016Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.633119Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.633175Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.633338Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.633371Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.633457Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:00.633622Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_base/unittest >> TSchemeShardPgTypesInTables::AlterTableAddPgTypeColumn-EnableTablePgTypes-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:47:20.897694Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:47:20.897762Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:20.897794Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:47:20.897843Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:47:20.897884Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:47:20.897911Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:47:20.897955Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:20.898035Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:47:20.902319Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:47:20.902682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:47:21.221046Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:47:21.221266Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:47:21.272282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:47:21.272551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:47:21.272748Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:47:21.290416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:47:21.290867Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:47:21.291607Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:21.291833Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:47:21.310616Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:21.312300Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:21.312380Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:21.312502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:47:21.312562Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:21.312672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:47:21.312908Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:47:21.328437Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:47:22.004234Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:47:22.006447Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:22.008557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:47:22.027994Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:47:22.029054Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:22.056049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:22.062182Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:47:22.063929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:22.064301Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:47:22.065026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:47:22.065354Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:47:22.089635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:22.090646Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:47:22.091263Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:47:22.110010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:22.110077Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:22.110993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:22.111587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:47:22.159061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:47:22.170795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:47:22.170991Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:47:22.172863Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:22.174224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:22.174491Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:22.177041Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:47:22.177644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:22.184036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:47:22.184955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:47:22.203012Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:22.203074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:22.203261Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:22.203299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... LAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_table.cpp:418: TAlterTable TPropose operationId# 102:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:49:02.166650Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 102 ready parts: 1/1 2025-05-07T08:49:02.166986Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } AffectedSet { TabletId: 72075186233409546 Flags: 2 } ExecLevel: 0 TxId: 102 MinStep: 5000003 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:49:02.175580Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 102:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:102 msg type: 269090816 2025-05-07T08:49:02.175823Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 102, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72075186233409546 for txId: 102 at step: 5000003 2025-05-07T08:49:02.176736Z node 12 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000003, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:49:02.176918Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 102 Coordinator: 72057594046316545 AckTo { RawX1: 123 RawX2: 51539609701 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:49:02.177010Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_table.cpp:359: TAlterTable TPropose operationId# 102:0 HandleReply TEvOperationPlan, operationId: 102:0, stepId: 5000003, at schemeshard: 72057594046678944 2025-05-07T08:49:02.179592Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 102:0 128 -> 129 2025-05-07T08:49:02.179963Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 2025-05-07T08:49:02.192316Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:49:02.192459Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T08:49:02.193095Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:49:02.193222Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [12:204:2206], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-05-07T08:49:02.193814Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:49:02.193925Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1036: NTableState::TProposedWaitParts operationId# 102:0 ProgressState at tablet: 72057594046678944 2025-05-07T08:49:02.195304Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:49:02.195512Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:49:02.195594Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-05-07T08:49:02.195675Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 4 2025-05-07T08:49:02.195775Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T08:49:02.195950Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true FAKE_COORDINATOR: Erasing txId 102 2025-05-07T08:49:02.198335Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6245: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 2128 } } 2025-05-07T08:49:02.198392Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-05-07T08:49:02.198605Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 2128 } } 2025-05-07T08:49:02.198784Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 2128 } } 2025-05-07T08:49:02.201554Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 305 RawX2: 51539609844 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-05-07T08:49:02.201645Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-05-07T08:49:02.201929Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 305 RawX2: 51539609844 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-05-07T08:49:02.202074Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1005: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 2025-05-07T08:49:02.202250Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1009: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 305 RawX2: 51539609844 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-05-07T08:49:02.202411Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, datashard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-05-07T08:49:02.202476Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:49:02.202528Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-05-07T08:49:02.202576Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 102:0 129 -> 240 2025-05-07T08:49:02.206610Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-05-07T08:49:02.207103Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:49:02.208614Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:49:02.209078Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:49:02.209145Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 102:0 ProgressState 2025-05-07T08:49:02.209369Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T08:49:02.209432Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:49:02.209504Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T08:49:02.209574Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:49:02.209641Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-05-07T08:49:02.209747Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [12:333:2312] message: TxId: 102 2025-05-07T08:49:02.209857Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:49:02.209946Z node 12 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-05-07T08:49:02.210029Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 102:0 2025-05-07T08:49:02.210236Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T08:49:02.215212Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T08:49:02.215306Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [12:393:2365] TestWaitNotification: OK eventTxId 102 >> Yq_1::Basic_TaggedLiteral [GOOD] >> THealthCheckTest::SharedWhenTroublesWithExclusiveNodes [GOOD] >> Yq_1::Basic_EmptyList [GOOD] >> Yq_1::Basic_EmptyDict >> THealthCheckTest::TestReBootingTabletIsDead [GOOD] >> THealthCheckTest::UnknowPDiskState >> THealthCheckTest::HealthCheckConfigUpdate [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::CopyCopiedTableAndDropFirstCopy [GOOD] Test command err: 2025-05-07T08:48:49.054489Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623076267937139:2286];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:48:49.054541Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004a3d/r3tmp/tmplRfhP9/pdisk_1.dat 2025-05-07T08:48:49.491615Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:48:49.491748Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:48:49.494465Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:48:49.495501Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:4042 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-05-07T08:48:49.802406Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:48:49.817350Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:48:49.843194Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:48:50.120918Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.11, eph 1} end=0, 4 blobs 3r (max 3), put Spent{time=0.002s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 2 +0, (1265 647 2154)b }, ecr=1.000 2025-05-07T08:48:50.151053Z node 1 :OPS_COMPACT INFO: Compact{72075186224037889.1.11, eph 1} end=0, 4 blobs 3r (max 3), put Spent{time=0.009s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 2 +0, (1139 521 2626)b }, ecr=1.000 2025-05-07T08:48:50.207116Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.16, eph 2} end=0, 4 blobs 6r (max 6), put Spent{time=0.013s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 5 +0, (1573 647 6413)b }, ecr=1.000 2025-05-07T08:48:50.214330Z node 1 :OPS_COMPACT INFO: Compact{72075186224037889.1.16, eph 2} end=0, 4 blobs 6r (max 6), put Spent{time=0.003s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 4 +0, (2326 1432 5183)b }, ecr=1.000 Copy TableOld to Table 2025-05-07T08:48:50.588511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/dc-1/Dir" OperationType: ESchemeOpCreateTable CreateTable { Name: "Table" CopyFromTable: "/dc-1/Dir/TableOld" } } TxId: 281474976710676 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-05-07T08:48:50.588826Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_copy_table.cpp:383: TCopyTable Propose, path: /dc-1/Dir/Table, opId: 281474976710676:0, at schemeshard: 72057594046644480 2025-05-07T08:48:50.589338Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:319: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046644480, LocalPathId: 2], parent name: Dir, child name: Table, child id: [OwnerId: 72057594046644480, LocalPathId: 4], at schemeshard: 72057594046644480 2025-05-07T08:48:50.589382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 0 2025-05-07T08:48:50.589395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction source path for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 3 2025-05-07T08:48:50.589442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 1 2025-05-07T08:48:50.589474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 2 2025-05-07T08:48:50.589633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 3 2025-05-07T08:48:50.589783Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976710676:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-05-07T08:48:50.591682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 2 2025-05-07T08:48:50.591737Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 4 waiting... 2025-05-07T08:48:50.593012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 281474976710676, response: Status: StatusAccepted TxId: 281474976710676 SchemeshardId: 72057594046644480 PathId: 4, at schemeshard: 72057594046644480 2025-05-07T08:48:50.593191Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710676, database: /dc-1, subject: , status: StatusAccepted, operation: CREATE TABLE, path: /dc-1/Dir/Table 2025-05-07T08:48:50.593429Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-05-07T08:48:50.593447Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976710676, path id: [OwnerId: 72057594046644480, LocalPathId: 2] 2025-05-07T08:48:50.593576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976710676, path id: [OwnerId: 72057594046644480, LocalPathId: 4] 2025-05-07T08:48:50.593640Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-05-07T08:48:50.593655Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:7501623076267937401:2244], at schemeshard: 72057594046644480, txId: 281474976710676, path id: 2 2025-05-07T08:48:50.593684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:7501623076267937401:2244], at schemeshard: 72057594046644480, txId: 281474976710676, path id: 4 2025-05-07T08:48:50.593722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710676:0, at schemeshard: 72057594046644480 2025-05-07T08:48:50.593759Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 281474976710676:0 ProgressState, operation type: TxCopyTable, at tablet# 72057594046644480 2025-05-07T08:48:50.594195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:357: TCreateParts opId# 281474976710676:0 CreateRequest Event to Hive: 72057594037968897 msg: Owner: 72057594046644480 OwnerIdx: 3 TabletType: DataShard FollowerCount: 0 ObjectDomain { SchemeShard: 72057594046644480 PathId: 1 } ObjectId: 4 BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } AllowedDomains { SchemeShard: 72057594046644480 PathId: 1 } 2025-05-07T08:48:50.594327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:357: TCreateParts opId# 281474976710676:0 CreateRequest Event to Hive: 72057594037968897 msg: Owner: 72057594046644480 OwnerIdx: 4 TabletType: DataShard FollowerCount: 0 ObjectDomain { SchemeShard: 72057594046644480 PathId: 1 } ObjectId: 4 BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } AllowedDomains { SchemeShard: 72057594046644480 PathId: 1 } 2025-05-07T08:48:50.595925Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 281474976710676:0 from tablet: 72057594046644480 to tablet: 72057594037968897 cookie: 72057594046644480:3 msg type: 268697601 2025-05-07T08:48:50.595999Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 281474976710676:0 from tablet: 72057594046644480 to tablet: 72057594037968897 cookie: 72057594046644480:4 msg type: 268697601 2025-05-07T08:48:50.596060Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 281474976710676, partId: 0, tablet: 72057594037968897 2025-05-07T08:48:50.596080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1767: TOperation RegisterRelationByShardIdx, TxId: 281474976710676, shardIdx: 72057594046644480:3, partId: 0 2025-05-07T08:48:50.596090Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1767: TOperation RegisterRelationByShardIdx, TxId: 281474976710676, shardIdx: 72057594046644480:4, partId: 0 2025-05-07T08:48:50.597951Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 2 Version: 6 PathOwnerId: 72057594046644480, cookie: 281474976710676 2025-05-07T08:48:50.600819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 2 Version: 6 PathOwnerId: 72057594046644480, cookie: 281474976710676 2025-05-07T08:48:50.600842Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046644480, txId: 281474976710676 2025-05-07T08:48:50.600866Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976710676, pathId: [OwnerId: 72057594046644480, LocalPathId: 2], version: 6 2025-05-07T08:48:50. ... wX2: 4503612512274683 } TabletId: 72075186224037888 State: 4 2025-05-07T08:49:00.259492Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037888, state: Offline, at schemeshard: 72057594046644480 2025-05-07T08:49:00.259656Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:2 hive 72057594037968897 at ss 72057594046644480 2025-05-07T08:49:00.259713Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:1 hive 72057594037968897 at ss 72057594046644480 2025-05-07T08:49:00.260033Z node 3 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037893 2025-05-07T08:49:00.260129Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037893 2025-05-07T08:49:00.261170Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046644480 ShardLocalIdx: 2, at schemeshard: 72057594046644480 2025-05-07T08:49:00.261350Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 2 2025-05-07T08:49:00.261490Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046644480 ShardLocalIdx: 1, at schemeshard: 72057594046644480 2025-05-07T08:49:00.261525Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3635: Client pipe to tablet 72075186224037893 from 72075186224037891 is reset 2025-05-07T08:49:00.261544Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3635: Client pipe to tablet 72075186224037892 from 72075186224037890 is reset 2025-05-07T08:49:00.261560Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037889 state Offline 2025-05-07T08:49:00.261576Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037888 state Offline 2025-05-07T08:49:00.261616Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 1 2025-05-07T08:49:00.261706Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-05-07T08:49:00.261717Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 3], at schemeshard: 72057594046644480 2025-05-07T08:49:00.261815Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 2 2025-05-07T08:49:00.261836Z node 3 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037892 2025-05-07T08:49:00.261890Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037892 2025-05-07T08:49:00.263138Z node 3 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037889 reason = ReasonStop 2025-05-07T08:49:00.263156Z node 3 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037888 reason = ReasonStop 2025-05-07T08:49:00.263578Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:2 2025-05-07T08:49:00.263593Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:2 tabletId 72075186224037889 2025-05-07T08:49:00.263637Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:1 2025-05-07T08:49:00.263653Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:1 tabletId 72075186224037888 2025-05-07T08:49:00.263667Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037889 not found 2025-05-07T08:49:00.263684Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-05-07T08:49:00.263752Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5509: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7501623120162766312 RawX2: 4503612512274740 } TabletId: 72075186224037891 State: 4 2025-05-07T08:49:00.263782Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037891, state: Offline, at schemeshard: 72057594046644480 2025-05-07T08:49:00.263910Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5509: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7501623120162766313 RawX2: 4503612512274741 } TabletId: 72075186224037890 State: 4 2025-05-07T08:49:00.263940Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037890, state: Offline, at schemeshard: 72057594046644480 2025-05-07T08:49:00.264014Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037888 not found 2025-05-07T08:49:00.264379Z node 3 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037888 2025-05-07T08:49:00.264433Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037888 2025-05-07T08:49:00.265099Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:4 hive 72057594037968897 at ss 72057594046644480 2025-05-07T08:49:00.265162Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:3 hive 72057594037968897 at ss 72057594046644480 2025-05-07T08:49:00.265900Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037891 state Offline 2025-05-07T08:49:00.265926Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037890 state Offline 2025-05-07T08:49:00.266218Z node 3 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037889 2025-05-07T08:49:00.266280Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037889 2025-05-07T08:49:00.266535Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046644480 ShardLocalIdx: 4, at schemeshard: 72057594046644480 2025-05-07T08:49:00.266705Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 2 2025-05-07T08:49:00.266829Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046644480 ShardLocalIdx: 3, at schemeshard: 72057594046644480 2025-05-07T08:49:00.266948Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 1 2025-05-07T08:49:00.267064Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-05-07T08:49:00.267076Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 4], at schemeshard: 72057594046644480 2025-05-07T08:49:00.267109Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-05-07T08:49:00.267409Z node 3 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037891 reason = ReasonStop 2025-05-07T08:49:00.267428Z node 3 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037890 reason = ReasonStop 2025-05-07T08:49:00.267642Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:4 2025-05-07T08:49:00.267654Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:4 tabletId 72075186224037891 2025-05-07T08:49:00.267695Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:3 2025-05-07T08:49:00.267709Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:3 tabletId 72075186224037890 2025-05-07T08:49:00.267734Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-05-07T08:49:00.268044Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037891 not found 2025-05-07T08:49:00.268063Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037890 not found 2025-05-07T08:49:00.268978Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037890, clientId# [3:7501623120162766394:2565], serverId# [3:7501623120162766398:2569], sessionId# [0:0:0] 2025-05-07T08:49:00.269192Z node 3 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037890 2025-05-07T08:49:00.269249Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037890 2025-05-07T08:49:00.270352Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037891, clientId# [3:7501623120162766403:2574], serverId# [3:7501623120162766406:2577], sessionId# [0:0:0] 2025-05-07T08:49:00.270586Z node 3 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037891 2025-05-07T08:49:00.270635Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037891 2025-05-07T08:49:00.554600Z node 3 :HIVE WARN: hive_impl.cpp:1929: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037892) Check that tablet 72075186224037893 was deleted 2025-05-07T08:49:00.555219Z node 3 :HIVE WARN: hive_impl.cpp:1929: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037893) Check that tablet 72075186224037888 was deleted 2025-05-07T08:49:00.555704Z node 3 :HIVE WARN: hive_impl.cpp:1929: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037888) Check that tablet 72075186224037889 was deleted 2025-05-07T08:49:00.556180Z node 3 :HIVE WARN: hive_impl.cpp:1929: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037889) Check that tablet 72075186224037890 was deleted 2025-05-07T08:49:00.556672Z node 3 :HIVE WARN: hive_impl.cpp:1929: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037890) Check that tablet 72075186224037891 was deleted 2025-05-07T08:49:00.557100Z node 3 :HIVE WARN: hive_impl.cpp:1929: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037891) ------- [TM] {asan, default-linux-x86_64, release} ydb/services/fq/ut_integration/unittest >> Yq_1::ListConnectionsOnEmptyConnectionsTable [GOOD] Test command err: 2025-05-07T08:47:37.969180Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501622768075856260:2142];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:47:37.970216Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:47:44.138538Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501622768075856260:2142];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:47:44.215762Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:47:44.574712Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501622776665791029:2293];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:47:44.575487Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; E0507 08:47:44.879067135 108790 dns_resolver.cc:162] no server name supplied in dns URI E0507 08:47:44.879181281 108790 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-05-07T08:47:45.662200Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:45.677199Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:45.717037Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:46.680579Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:46.681277Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:46.717304Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:47.630256Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:17753: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:17753 } ] 2025-05-07T08:47:47.700066Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:17753: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:17753 2025-05-07T08:47:47.730795Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:47.760181Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:47.775292Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:48.734663Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:48.770717Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:48.798310Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:49.760678Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:49.838696Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:49.850346Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0507 08:47:50.026289478 109262 dns_resolver.cc:162] no server name supplied in dns URI E0507 08:47:50.026762214 109262 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-05-07T08:47:50.122243Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:17753: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:17753 } ] 2025-05-07T08:47:50.792056Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:50.841434Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:50.854342Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:51.806529Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:51.843594Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:51.860084Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:52.835265Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:52.858492Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:52.866660Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:53.815586Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:53.856791Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:53.867755Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:54.080509Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:17753: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:17753 2025-05-07T08:47:54.170037Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:17753: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:17753 } ] 2025-05-07T08:47:54.870484Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:54.899143Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:54.914318Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0507 08:47:55.162173179 109259 dns_resolver.cc:162] no server name supplied in dns URI E0507 08:47:55.165859132 109259 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-05-07T08:47:55.886354Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:55.911010Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:55.926576Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:56.889382Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T0 ... Id: 281474976715694, task: 1, CA Id [4:7501623123397942578:2574]. enter getasyncinputdata results size 0, freeSpace 8388608 2025-05-07T08:49:00.267343Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1424: TxId: 281474976715694, task: 1, CA Id [4:7501623123397942578:2574]. returned async data processed rows 0 left freeSpace 8388608 received rows 0 running reads 1 pending shards 0 finished = 0 has limit 0 limit reached 0 2025-05-07T08:49:00.267384Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [4:7501623123397942578:2574], TxId: 281474976715694, task: 1. Ctx: { SessionId : ydb://session/3?node_id=4&id=NmQ1NGNmMjEtMWNkODk4Mi0yNGNhOWY5Yi0zMDNlM2E4OA==. TraceId : 01jtmyy4zs8p4jhs0gz39s8m9m. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646926 2025-05-07T08:49:00.267451Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1074: SelfId: [4:7501623123397942578:2574], TxId: 281474976715694, task: 1. Ctx: { SessionId : ydb://session/3?node_id=4&id=NmQ1NGNmMjEtMWNkODk4Mi0yNGNhOWY5Yi0zMDNlM2E4OA==. TraceId : 01jtmyy4zs8p4jhs0gz39s8m9m. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Received channels info: Update { Id: 1 TransportVersion: DATA_TRANSPORT_OOB_PICKLE_1_0 SrcTaskId: 1 DstTaskId: 2 SrcEndpoint { ActorId { RawX1: 7501623123397942578 RawX2: 4503616807242254 } } DstEndpoint { ActorId { RawX1: 7501623123397942579 RawX2: 4503616807242255 } } InMemory: true DstStageId: 1 } 2025-05-07T08:49:00.267469Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1328: TxId: 281474976715694, task: 1, CA Id [4:7501623123397942578:2574]. enter getasyncinputdata results size 0, freeSpace 8388608 2025-05-07T08:49:00.267480Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1424: TxId: 281474976715694, task: 1, CA Id [4:7501623123397942578:2574]. returned async data processed rows 0 left freeSpace 8388608 received rows 0 running reads 1 pending shards 0 finished = 0 has limit 0 limit reached 0 2025-05-07T08:49:00.267506Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [4:7501623123397942578:2574], TxId: 281474976715694, task: 1. Ctx: { SessionId : ydb://session/3?node_id=4&id=NmQ1NGNmMjEtMWNkODk4Mi0yNGNhOWY5Yi0zMDNlM2E4OA==. TraceId : 01jtmyy4zs8p4jhs0gz39s8m9m. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646922 2025-05-07T08:49:00.267522Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1328: TxId: 281474976715694, task: 1, CA Id [4:7501623123397942578:2574]. enter getasyncinputdata results size 0, freeSpace 8388608 2025-05-07T08:49:00.267539Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1424: TxId: 281474976715694, task: 1, CA Id [4:7501623123397942578:2574]. returned async data processed rows 0 left freeSpace 8388608 received rows 0 running reads 1 pending shards 0 finished = 0 has limit 0 limit reached 0 2025-05-07T08:49:00.269012Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:959: TxId: 281474976715694, task: 1, CA Id [4:7501623123397942578:2574]. Recv TEvReadResult from ShardID=72075186224037896, ReadId=0, Status=SUCCESS, Finished=1, RowCount=0, TxLocks= , BrokenTxLocks= 2025-05-07T08:49:00.269033Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1047: TxId: 281474976715694, task: 1, CA Id [4:7501623123397942578:2574]. Taken 0 locks 2025-05-07T08:49:00.269066Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1061: TxId: 281474976715694, task: 1, CA Id [4:7501623123397942578:2574]. new data for read #0 seqno = 1 finished = 1 2025-05-07T08:49:00.269089Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [4:7501623123397942578:2574], TxId: 281474976715694, task: 1. Ctx: { SessionId : ydb://session/3?node_id=4&id=NmQ1NGNmMjEtMWNkODk4Mi0yNGNhOWY5Yi0zMDNlM2E4OA==. TraceId : 01jtmyy4zs8p4jhs0gz39s8m9m. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 276037645 2025-05-07T08:49:00.269110Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [4:7501623123397942578:2574], TxId: 281474976715694, task: 1. Ctx: { SessionId : ydb://session/3?node_id=4&id=NmQ1NGNmMjEtMWNkODk4Mi0yNGNhOWY5Yi0zMDNlM2E4OA==. TraceId : 01jtmyy4zs8p4jhs0gz39s8m9m. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646922 2025-05-07T08:49:00.269130Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1328: TxId: 281474976715694, task: 1, CA Id [4:7501623123397942578:2574]. enter getasyncinputdata results size 1, freeSpace 8388608 2025-05-07T08:49:00.269148Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1224: TxId: 281474976715694, task: 1, CA Id [4:7501623123397942578:2574]. enter pack cells method shardId: 72075186224037896 processedRows: 0 packed rows: 0 freeSpace: 8388608 2025-05-07T08:49:00.269165Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1305: TxId: 281474976715694, task: 1, CA Id [4:7501623123397942578:2574]. exit pack cells method shardId: 72075186224037896 processedRows: 0 packed rows: 0 freeSpace: 8388608 2025-05-07T08:49:00.269176Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1362: TxId: 281474976715694, task: 1, CA Id [4:7501623123397942578:2574]. returned 0 rows; processed 0 rows 2025-05-07T08:49:00.269237Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1399: TxId: 281474976715694, task: 1, CA Id [4:7501623123397942578:2574]. dropping batch for read #0 2025-05-07T08:49:00.269247Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:459: TxId: 281474976715694, task: 1, CA Id [4:7501623123397942578:2574]. effective maxinflight 1 sorted 1 2025-05-07T08:49:00.269259Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:481: TxId: 281474976715694, task: 1, CA Id [4:7501623123397942578:2574]. Scheduled table scans, in flight: 0 shards. pending shards to read: 0, 2025-05-07T08:49:00.269278Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1424: TxId: 281474976715694, task: 1, CA Id [4:7501623123397942578:2574]. returned async data processed rows 0 left freeSpace 8388608 received rows 0 running reads 0 pending shards 0 finished = 1 has limit 0 limit reached 0 2025-05-07T08:49:00.269375Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [4:7501623123397942578:2574], TxId: 281474976715694, task: 1. Ctx: { SessionId : ydb://session/3?node_id=4&id=NmQ1NGNmMjEtMWNkODk4Mi0yNGNhOWY5Yi0zMDNlM2E4OA==. TraceId : 01jtmyy4zs8p4jhs0gz39s8m9m. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-05-07T08:49:00.269407Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [4:7501623123397942579:2575], TxId: 281474976715694, task: 2. Ctx: { TraceId : 01jtmyy4zs8p4jhs0gz39s8m9m. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=NmQ1NGNmMjEtMWNkODk4Mi0yNGNhOWY5Yi0zMDNlM2E4OA==. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646923 2025-05-07T08:49:00.269425Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:163: TxId: 281474976715694, task: 2. Finish input channelId: 1, from: [4:7501623123397942578:2574] 2025-05-07T08:49:00.269462Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [4:7501623123397942579:2575], TxId: 281474976715694, task: 2. Ctx: { TraceId : 01jtmyy4zs8p4jhs0gz39s8m9m. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=NmQ1NGNmMjEtMWNkODk4Mi0yNGNhOWY5Yi0zMDNlM2E4OA==. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646922 2025-05-07T08:49:00.269514Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [4:7501623123397942579:2575], TxId: 281474976715694, task: 2. Ctx: { TraceId : 01jtmyy4zs8p4jhs0gz39s8m9m. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=NmQ1NGNmMjEtMWNkODk4Mi0yNGNhOWY5Yi0zMDNlM2E4OA==. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-05-07T08:49:00.269526Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [4:7501623123397942578:2574], TxId: 281474976715694, task: 1. Ctx: { SessionId : ydb://session/3?node_id=4&id=NmQ1NGNmMjEtMWNkODk4Mi0yNGNhOWY5Yi0zMDNlM2E4OA==. TraceId : 01jtmyy4zs8p4jhs0gz39s8m9m. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646927 2025-05-07T08:49:00.269553Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [4:7501623123397942578:2574], TxId: 281474976715694, task: 1. Ctx: { SessionId : ydb://session/3?node_id=4&id=NmQ1NGNmMjEtMWNkODk4Mi0yNGNhOWY5Yi0zMDNlM2E4OA==. TraceId : 01jtmyy4zs8p4jhs0gz39s8m9m. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646922 2025-05-07T08:49:00.269575Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715694, task: 1. Tasks execution finished 2025-05-07T08:49:00.269588Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [4:7501623123397942578:2574], TxId: 281474976715694, task: 1. Ctx: { SessionId : ydb://session/3?node_id=4&id=NmQ1NGNmMjEtMWNkODk4Mi0yNGNhOWY5Yi0zMDNlM2E4OA==. TraceId : 01jtmyy4zs8p4jhs0gz39s8m9m. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Compute state finished. All channels and sinks finished 2025-05-07T08:49:00.269692Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715694, task: 1. pass away 2025-05-07T08:49:00.269810Z node 4 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:66;problem=finish_compute_actor;tx_id=281474976715694;task_id=1;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-05-07T08:49:00.270253Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [4:7501623123397942579:2575], TxId: 281474976715694, task: 2. Ctx: { TraceId : 01jtmyy4zs8p4jhs0gz39s8m9m. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=NmQ1NGNmMjEtMWNkODk4Mi0yNGNhOWY5Yi0zMDNlM2E4OA==. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646922 2025-05-07T08:49:00.270290Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715694, task: 2. Tasks execution finished, don't wait for ack delivery in input channelId: 1, seqNo: [1] 2025-05-07T08:49:00.270301Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715694, task: 2. Tasks execution finished 2025-05-07T08:49:00.270311Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [4:7501623123397942579:2575], TxId: 281474976715694, task: 2. Ctx: { TraceId : 01jtmyy4zs8p4jhs0gz39s8m9m. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=NmQ1NGNmMjEtMWNkODk4Mi0yNGNhOWY5Yi0zMDNlM2E4OA==. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Compute state finished. All channels and sinks finished 2025-05-07T08:49:00.270376Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715694, task: 2. pass away 2025-05-07T08:49:00.270434Z node 4 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:66;problem=finish_compute_actor;tx_id=281474976715694;task_id=2;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-05-07T08:49:00.726418Z node 4 :FQ_PENDING_FETCHER ERROR: pending_fetcher.cpp:259: Error with GetTask:
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv6:%5B::%5D:27483: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint [::]:27483 >> TSchemeShardTest::CopyTableForBackup [GOOD] >> TSchemeShardTest::ConsistentCopyTablesForBackup ------- [TM] {asan, default-linux-x86_64, release} ydb/core/health_check/ut/unittest >> THealthCheckTest::SharedWhenTroublesWithExclusiveNodes [GOOD] Test command err: 2025-05-07T08:47:41.890545Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:698:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:47:41.895115Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:41.900556Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:47:41.913051Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:695:2355], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:47:41.916987Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:41.917036Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003d6d/r3tmp/tmp8sMFPG/pdisk_1.dat 2025-05-07T08:47:46.132698Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3109, node 1 TClient is connected to server localhost:22083 2025-05-07T08:47:52.224710Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:47:52.225121Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:47:52.225454Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:47:52.231713Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration self_check_result: DEGRADED issue_log { id: "YELLOW-70fb-1231c6b1" status: YELLOW message: "Database has multiple issues" location { database { name: "/Root" } } reason: "YELLOW-1ba8-1231c6b1" reason: "YELLOW-5321-1231c6b1" type: "DATABASE" level: 1 } issue_log { id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-1" reason: "YELLOW-e9e2-1231c6b1-2" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-1231c6b1-1" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 1 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-2" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 2 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-5321-1231c6b1" status: YELLOW message: "Storage degraded" location { database { name: "/Root" } } reason: "YELLOW-595f-1231c6b1-80c02825" type: "STORAGE" level: 2 } issue_log { id: "YELLOW-595f-1231c6b1-80c02825" status: YELLOW message: "Pool degraded" location { storage { pool { name: "static" } } database { name: "/Root" } } reason: "YELLOW-ef3e-1231c6b1-0" type: "STORAGE_POOL" level: 3 } issue_log { id: "RED-4847-1231c6b1-1-0-3-55-0-55" status: RED message: "VDisk is not available" location { storage { node { id: 1 host: "::1" port: 12001 } pool { name: "static" group { vdisk { id: "0-3-55-0-55" } } } } database { name: "/Root" } } type: "VDISK" level: 5 } issue_log { id: "YELLOW-ef3e-1231c6b1-0" status: YELLOW message: "Group degraded" location { storage { pool { name: "static" group { id: "0" } } } database { name: "/Root" } } reason: "RED-4847-1231c6b1-1-0-3-55-0-55" type: "STORAGE_GROUP" level: 4 } location { id: 1 host: "::1" port: 12001 } 2025-05-07T08:48:22.203303Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:700:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:22.203920Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:22.204240Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:48:22.204591Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:697:2355], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:22.204910Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:22.205058Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003d6d/r3tmp/tmpiQjFME/pdisk_1.dat 2025-05-07T08:48:22.618037Z node 3 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11648, node 3 TClient is connected to server localhost:13617 2025-05-07T08:48:23.222963Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:48:23.223028Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:48:23.223120Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:48:23.223408Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:48:44.724374Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:446:2410], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:44.724764Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:48:44.724925Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003d6d/r3tmp/tmpYHyfvF/pdisk_1.dat 2025-05-07T08:48:45.670584Z node 5 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20649, node 5 TClient is connected to server localhost:27443 2025-05-07T08:48:46.623619Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:48:46.623703Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:48:46.623745Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:48:46.626816Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:48:52.834588Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:526:2414], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:52.834963Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:52.835130Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003d6d/r3tmp/tmpo94v3T/pdisk_1.dat 2025-05-07T08:48:53.403129Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7979, node 7 TClient is connected to server localhost:24375 2025-05-07T08:48:54.267672Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:48:54.267739Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:48:54.267780Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:48:54.268847Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:49:02.160981Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [10:455:2415], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:49:02.161575Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:49:02.161799Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003d6d/r3tmp/tmpkjcx0T/pdisk_1.dat 2025-05-07T08:49:02.590449Z node 10 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29025, node 10 TClient is connected to server localhost:25790 2025-05-07T08:49:03.295165Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:03.295250Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:03.295301Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:03.295681Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestCreateCleanWithRetry >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestSimpleDrop >> BSCReadOnlyPDisk::ReadOnlyOneByOne [GOOD] |88.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/cms/ut/ydb-core-cms-ut |88.6%| [LD] {RESULT} $(B)/ydb/core/cms/ut/ydb-core-cms-ut |88.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/cms/ut/ydb-core-cms-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/health_check/ut/unittest >> THealthCheckTest::HealthCheckConfigUpdate [GOOD] Test command err: 2025-05-07T08:47:39.436070Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:698:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:47:39.436469Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:39.436667Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:47:39.443374Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:695:2355], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:47:39.445907Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:39.445994Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003dca/r3tmp/tmp3c0Ymp/pdisk_1.dat 2025-05-07T08:47:43.558001Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4861, node 1 TClient is connected to server localhost:3636 2025-05-07T08:47:50.417232Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:47:50.417284Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:47:50.417313Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:47:50.417754Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:48:22.733537Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:700:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:22.742471Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:22.742912Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:48:22.743380Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:697:2355], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:22.743775Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:22.743928Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003dca/r3tmp/tmp1H7dAd/pdisk_1.dat 2025-05-07T08:48:23.189551Z node 3 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4158, node 3 TClient is connected to server localhost:7766 2025-05-07T08:48:23.690957Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:48:23.691056Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:48:23.691149Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:48:23.691472Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration self_check_result: GOOD issue_log { id: "YELLOW-f489-1231c6b1" status: YELLOW message: "Database has compute issues" location { database { name: "/Root" } } reason: "YELLOW-1ba8-1231c6b1" type: "DATABASE" level: 1 } issue_log { id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-3" reason: "YELLOW-e9e2-1231c6b1-4" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-1231c6b1-3" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 3 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-4" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 4 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } location { id: 3 host: "::1" port: 12001 } 2025-05-07T08:48:47.778765Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:491:2410], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:47.779315Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:47.779647Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:48:47.780729Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:486:2155], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:47.781117Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:48:47.781252Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003dca/r3tmp/tmpDZ8EKV/pdisk_1.dat 2025-05-07T08:48:48.269118Z node 5 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11168, node 5 TClient is connected to server localhost:12096 2025-05-07T08:48:48.892451Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:48:48.892534Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:48:48.892584Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:48:48.893401Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration self_check_result: GOOD issue_log { id: "YELLOW-f489-1231c6b1" status: YELLOW message: "Database has compute issues" location { database { name: "/Root" } } reason: "YELLOW-1ba8-1231c6b1" type: "DATABASE" level: 1 } issue_log { id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-5" reason: "YELLOW-e9e2-1231c6b1-6" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-1231c6b1-5" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 5 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-6" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 6 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } location { id: 5 host: "::1" port: 12001 } 2025-05-07T08:48:55.974144Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:254:2218], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:55.974616Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:48:55.974720Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003dca/r3tmp/tmp70tVAE/pdisk_1.dat 2025-05-07T08:48:56.493259Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11603, node 7 TClient is connected to server localhost:19040 2025-05-07T08:48:57.432485Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:48:57.432574Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:48:57.432621Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:48:57.433150Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:49:03.459328Z node 9 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [9:451:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:49:03.459747Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:49:03.459922Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003dca/r3tmp/tmpyD2u1a/pdisk_1.dat 2025-05-07T08:49:04.047880Z node 9 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29215, node 9 TClient is connected to server localhost:27534 2025-05-07T08:49:04.774471Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:04.774557Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:04.774611Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:04.807057Z node 9 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration >> TSchemeShardTest::ConsistentCopyTablesForBackup [GOOD] >> TSchemeShardTest::CopyLockedTableForBackup ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> BSCReadOnlyPDisk::ReadOnlyOneByOne [GOOD] Test command err: RandomSeed# 4868099963527042257 >> THealthCheckTest::Issues100Groups100VCardMerging [GOOD] >> Yq_1::DescribeQuery [GOOD] >> PrivateApi::Nodes [GOOD] |88.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_auditsettings/ydb-core-tx-schemeshard-ut_auditsettings |88.6%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_auditsettings/ydb-core-tx-schemeshard-ut_auditsettings |88.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/effects/ydb-core-kqp-ut-effects |88.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_auditsettings/ydb-core-tx-schemeshard-ut_auditsettings |88.6%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/effects/ydb-core-kqp-ut-effects |88.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/effects/ydb-core-kqp-ut-effects |88.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/tx/ydb-core-kqp-ut-tx |88.6%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/tx/ydb-core-kqp-ut-tx |88.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/tx/ydb-core-kqp-ut-tx |88.6%| [TA] $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/test-results/unittest/{meta.json ... results_accumulator.log} |88.6%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/test-results/unittest/{meta.json ... results_accumulator.log} |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |88.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/viewer/ut/ydb-core-viewer-ut |88.6%| [LD] {RESULT} $(B)/ydb/core/viewer/ut/ydb-core-viewer-ut |88.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/viewer/ut/ydb-core-viewer-ut >> TSchemeShardTest::CopyLockedTableForBackup [GOOD] >> TSchemeShardTest::ConfigColumnFamily >> TCacheTest::SystemView ------- [TM] {asan, default-linux-x86_64, release} ydb/core/health_check/ut/unittest >> THealthCheckTest::Issues100Groups100VCardMerging [GOOD] Test command err: 2025-05-07T08:47:38.680693Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:698:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:47:38.681009Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:38.681197Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:47:38.683942Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:695:2355], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:47:38.684248Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:38.684290Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003d47/r3tmp/tmpTb48go/pdisk_1.dat 2025-05-07T08:47:44.982799Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22133, node 1 TClient is connected to server localhost:28206 2025-05-07T08:47:53.676304Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:47:53.676663Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:47:53.677356Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:47:53.679888Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:48:21.537143Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:698:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:21.537503Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:21.537706Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:48:21.540143Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:695:2355], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:21.540570Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:21.540623Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003d47/r3tmp/tmpy09dya/pdisk_1.dat 2025-05-07T08:48:21.960529Z node 3 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23644, node 3 TClient is connected to server localhost:6127 2025-05-07T08:48:22.504731Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:48:22.504800Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:48:22.504834Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:48:22.505429Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:48:42.291699Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:491:2410], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:42.292204Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:42.292524Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:48:42.293513Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:486:2155], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:42.293842Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:48:42.294018Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003d47/r3tmp/tmpC8kFMP/pdisk_1.dat 2025-05-07T08:48:42.784690Z node 5 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11845, node 5 TClient is connected to server localhost:27365 2025-05-07T08:48:43.403628Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:48:43.403706Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:48:43.403748Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:48:43.404244Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:48:54.385317Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:698:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:54.385936Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:54.386357Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:48:54.386933Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [8:695:2355], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:54.387366Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:54.387446Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003d47/r3tmp/tmpgewDUk/pdisk_1.dat 2025-05-07T08:48:54.838922Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4764, node 7 TClient is connected to server localhost:61593 2025-05-07T08:48:55.530488Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:48:55.530561Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:48:55.530603Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:48:55.531558Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:49:06.613356Z node 9 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [9:698:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:49:06.613552Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:49:06.613748Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:49:06.615184Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [10:695:2355], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:49:06.615796Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:49:06.615999Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003d47/r3tmp/tmplK3lSM/pdisk_1.dat 2025-05-07T08:49:07.133013Z node 9 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22786, node 9 TClient is connected to server localhost:10884 2025-05-07T08:49:07.789721Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:07.789812Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:07.789867Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:07.790267Z node 9 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration >> THealthCheckTest::TestBootingTabletIsNotDead [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/fq/ut_integration/unittest >> PrivateApi::Nodes [GOOD] Test command err: 2025-05-07T08:47:37.096109Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501622768119051751:2094];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:47:37.096815Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:47:38.381352Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:1299: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:1299 } ] E0507 08:47:38.394365410 108335 dns_resolver.cc:162] no server name supplied in dns URI E0507 08:47:38.394522295 108335 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-05-07T08:47:38.404772Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:39.404876Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:39.695040Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:1299: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:1299 2025-05-07T08:47:39.842950Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:1299: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:1299 } ] 2025-05-07T08:47:40.414720Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:41.384359Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:1299: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:1299 } ] 2025-05-07T08:47:41.418394Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:42.114486Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501622768119051751:2094];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:47:42.115196Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:47:42.461756Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:43.128056Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:43.470333Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0507 08:47:43.484061167 108907 dns_resolver.cc:162] no server name supplied in dns URI E0507 08:47:43.488419272 108907 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-05-07T08:47:44.129163Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:44.281663Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:1299: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:1299 2025-05-07T08:47:44.496338Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:1299: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:1299 } ] 2025-05-07T08:47:44.528448Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:45.148820Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:45.538286Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:46.153799Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:46.267543Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:1299: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:1299 } ] 2025-05-07T08:47:46.552577Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:47.154406Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:47.555376Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:47.769845Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:1299: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:1299 } ] 2025-05-07T08:47:48.212018Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:48.558555Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0507 08:47:48.637637357 108907 dns_resolver.cc:162] no server name supplied in dns URI E0507 08:47:48.641772247 108907 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-05-07T08:47:49.678061Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:49.699052Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:50.720063Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:50.746082Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:51.350671Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/tenants". Create session error: [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:1299: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:1299 } ] 2025-05-07T08:47:51.350747Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/tenant_acks". Create session error: [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:1299: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:1299 } ] 2025-05-07T08:47:51.476879Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/queries". Create session error: [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:1299: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:1299 } ] 2025-05-07T08:47:51.476943Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/idempotency_keys". Create session error: [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:1299: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:1299 } ] 2025-05-07T08:47:51.477309Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/result_sets". Create session error: [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:1299: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:1299 } ] 2025-05-07T08:47:51.486402Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/mappings". Create session error: [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:1299: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:1299 } ] 2025-05-07T08:47:51.486473Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/jobs". C ... s 0 running reads 1 pending shards 0 finished = 0 has limit 0 limit reached 0 2025-05-07T08:49:06.590431Z node 7 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [7:7501623151371226389:2543], TxId: 281474976715685, task: 1. Ctx: { SessionId : ydb://session/3?node_id=7&id=YjBlNDYyMzktOGM1NDhiNGEtYmUzYzU4NzItNjExYmE0YzY=. TraceId : 01jtmyybgcb5n0mscd52b7grj9. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646926 2025-05-07T08:49:06.590491Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1074: SelfId: [7:7501623151371226389:2543], TxId: 281474976715685, task: 1. Ctx: { SessionId : ydb://session/3?node_id=7&id=YjBlNDYyMzktOGM1NDhiNGEtYmUzYzU4NzItNjExYmE0YzY=. TraceId : 01jtmyybgcb5n0mscd52b7grj9. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Received channels info: Update { Id: 1 TransportVersion: DATA_TRANSPORT_OOB_PICKLE_1_0 SrcTaskId: 1 DstTaskId: 2 SrcEndpoint { ActorId { RawX1: 7501623151371226389 RawX2: 4503629692144111 } } DstEndpoint { ActorId { RawX1: 7501623151371226390 RawX2: 4503629692144112 } } InMemory: true DstStageId: 1 } 2025-05-07T08:49:06.590501Z node 7 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1328: TxId: 281474976715685, task: 1, CA Id [7:7501623151371226389:2543]. enter getasyncinputdata results size 0, freeSpace 8388608 2025-05-07T08:49:06.590512Z node 7 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1424: TxId: 281474976715685, task: 1, CA Id [7:7501623151371226389:2543]. returned async data processed rows 0 left freeSpace 8388608 received rows 0 running reads 1 pending shards 0 finished = 0 has limit 0 limit reached 0 2025-05-07T08:49:06.590518Z node 7 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [7:7501623151371226390:2544], TxId: 281474976715685, task: 2. Ctx: { TraceId : 01jtmyybgcb5n0mscd52b7grj9. SessionId : ydb://session/3?node_id=7&id=YjBlNDYyMzktOGM1NDhiNGEtYmUzYzU4NzItNjExYmE0YzY=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646926 2025-05-07T08:49:06.590547Z node 7 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [7:7501623151371226389:2543], TxId: 281474976715685, task: 1. Ctx: { SessionId : ydb://session/3?node_id=7&id=YjBlNDYyMzktOGM1NDhiNGEtYmUzYzU4NzItNjExYmE0YzY=. TraceId : 01jtmyybgcb5n0mscd52b7grj9. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646922 2025-05-07T08:49:06.590562Z node 7 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1328: TxId: 281474976715685, task: 1, CA Id [7:7501623151371226389:2543]. enter getasyncinputdata results size 0, freeSpace 8388608 2025-05-07T08:49:06.590602Z node 7 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1424: TxId: 281474976715685, task: 1, CA Id [7:7501623151371226389:2543]. returned async data processed rows 0 left freeSpace 8388608 received rows 0 running reads 1 pending shards 0 finished = 0 has limit 0 limit reached 0 2025-05-07T08:49:06.590717Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1074: SelfId: [7:7501623151371226390:2544], TxId: 281474976715685, task: 2. Ctx: { TraceId : 01jtmyybgcb5n0mscd52b7grj9. SessionId : ydb://session/3?node_id=7&id=YjBlNDYyMzktOGM1NDhiNGEtYmUzYzU4NzItNjExYmE0YzY=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Received channels info: Update { Id: 1 TransportVersion: DATA_TRANSPORT_OOB_PICKLE_1_0 SrcTaskId: 1 DstTaskId: 2 SrcEndpoint { ActorId { RawX1: 7501623151371226389 RawX2: 4503629692144111 } } DstEndpoint { ActorId { RawX1: 7501623151371226390 RawX2: 4503629692144112 } } InMemory: true DstStageId: 1 } Update { Id: 2 TransportVersion: DATA_TRANSPORT_OOB_PICKLE_1_0 SrcTaskId: 2 SrcEndpoint { ActorId { RawX1: 7501623151371226390 RawX2: 4503629692144112 } } DstEndpoint { ActorId { RawX1: 7501623151371226385 RawX2: 4503629692143970 } } InMemory: true } 2025-05-07T08:49:06.590732Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1081: SelfId: [7:7501623151371226390:2544], TxId: 281474976715685, task: 2. Ctx: { TraceId : 01jtmyybgcb5n0mscd52b7grj9. SessionId : ydb://session/3?node_id=7&id=YjBlNDYyMzktOGM1NDhiNGEtYmUzYzU4NzItNjExYmE0YzY=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Update input channelId: 1, peer: [7:7501623151371226389:2543] 2025-05-07T08:49:06.590814Z node 7 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1944: ActorId: [7:7501623151371226385:2402] TxId: 281474976715685. Ctx: { TraceId: 01jtmyybgcb5n0mscd52b7grj9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=YjBlNDYyMzktOGM1NDhiNGEtYmUzYzU4NzItNjExYmE0YzY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Client lost } 2025-05-07T08:49:06.590823Z node 7 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [7:7501623151371226390:2544], TxId: 281474976715685, task: 2. Ctx: { TraceId : 01jtmyybgcb5n0mscd52b7grj9. SessionId : ydb://session/3?node_id=7&id=YjBlNDYyMzktOGM1NDhiNGEtYmUzYzU4NzItNjExYmE0YzY=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646926 2025-05-07T08:49:06.590962Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1074: SelfId: [7:7501623151371226390:2544], TxId: 281474976715685, task: 2. Ctx: { TraceId : 01jtmyybgcb5n0mscd52b7grj9. SessionId : ydb://session/3?node_id=7&id=YjBlNDYyMzktOGM1NDhiNGEtYmUzYzU4NzItNjExYmE0YzY=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Received channels info: Update { Id: 1 TransportVersion: DATA_TRANSPORT_OOB_PICKLE_1_0 SrcTaskId: 1 DstTaskId: 2 SrcEndpoint { ActorId { RawX1: 7501623151371226389 RawX2: 4503629692144111 } } DstEndpoint { ActorId { RawX1: 7501623151371226390 RawX2: 4503629692144112 } } InMemory: true DstStageId: 1 } Update { Id: 2 TransportVersion: DATA_TRANSPORT_OOB_PICKLE_1_0 SrcTaskId: 2 SrcEndpoint { ActorId { RawX1: 7501623151371226390 RawX2: 4503629692144112 } } DstEndpoint { ActorId { RawX1: 7501623151371226385 RawX2: 4503629692143970 } } InMemory: true } 2025-05-07T08:49:06.590995Z node 7 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [7:7501623151371226390:2544], TxId: 281474976715685, task: 2. Ctx: { TraceId : 01jtmyybgcb5n0mscd52b7grj9. SessionId : ydb://session/3?node_id=7&id=YjBlNDYyMzktOGM1NDhiNGEtYmUzYzU4NzItNjExYmE0YzY=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646922 2025-05-07T08:49:06.591017Z node 7 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [7:7501623151371226390:2544], TxId: 281474976715685, task: 2. Ctx: { TraceId : 01jtmyybgcb5n0mscd52b7grj9. SessionId : ydb://session/3?node_id=7&id=YjBlNDYyMzktOGM1NDhiNGEtYmUzYzU4NzItNjExYmE0YzY=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646735 2025-05-07T08:49:06.591106Z node 7 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1216: SelfId: [7:7501623151371226390:2544], TxId: 281474976715685, task: 2. Ctx: { TraceId : 01jtmyybgcb5n0mscd52b7grj9. SessionId : ydb://session/3?node_id=7&id=YjBlNDYyMzktOGM1NDhiNGEtYmUzYzU4NzItNjExYmE0YzY=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Handle abort execution event from: [7:7501623151371226385:2402], status: ABORTED, reason: {
: Error: Terminate execution } 2025-05-07T08:49:06.591222Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715685, task: 2. pass away 2025-05-07T08:49:06.591282Z node 7 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [7:7501623151371226389:2543], TxId: 281474976715685, task: 1. Ctx: { SessionId : ydb://session/3?node_id=7&id=YjBlNDYyMzktOGM1NDhiNGEtYmUzYzU4NzItNjExYmE0YzY=. TraceId : 01jtmyybgcb5n0mscd52b7grj9. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646735 2025-05-07T08:49:06.591329Z node 7 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:66;problem=finish_compute_actor;tx_id=281474976715685;task_id=2;success=0;message={
: Error: COMPUTE_STATE_FAILURE }; 2025-05-07T08:49:06.591377Z node 7 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1216: SelfId: [7:7501623151371226389:2543], TxId: 281474976715685, task: 1. Ctx: { SessionId : ydb://session/3?node_id=7&id=YjBlNDYyMzktOGM1NDhiNGEtYmUzYzU4NzItNjExYmE0YzY=. TraceId : 01jtmyybgcb5n0mscd52b7grj9. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Handle abort execution event from: [7:7501623151371226385:2402], status: ABORTED, reason: {
: Error: Terminate execution } 2025-05-07T08:49:06.591499Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715685, task: 1. pass away 2025-05-07T08:49:06.591577Z node 7 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:66;problem=finish_compute_actor;tx_id=281474976715685;task_id=1;success=0;message={
: Error: COMPUTE_STATE_FAILURE }; 2025-05-07T08:49:06.591987Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=7&id=YjBlNDYyMzktOGM1NDhiNGEtYmUzYzU4NzItNjExYmE0YzY=, ActorId: [7:7501623142781290228:2402], ActorState: ExecuteState, TraceId: 01jtmyybgcb5n0mscd52b7grj9, Create QueryResponse for error on request, msg: 2025-05-07T08:49:06.593226Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715686. Ctx: { TraceId: 01jtmyybgcb5n0mscd52b7grj9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=YjBlNDYyMzktOGM1NDhiNGEtYmUzYzU4NzItNjExYmE0YzY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:49:06.601171Z node 7 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage.cpp:561: DB Error, Status: CLIENT_CANCELLED, Issues: [ {
: Error: GRpc error: (1): Cancelled on the server side } {
: Error: Grpc error response on endpoint localhost:4102 } ], Query: --!syntax_v1 -- Query name: NodesHealthCheck(read) PRAGMA TablePathPrefix("Root/yq"); DECLARE $now as Timestamp; DECLARE $tenant as String; SELECT `node_id`, `instance_id`, `hostname`, `active_workers`, `memory_limit`, `memory_allocated`, `interconnect_port`, `node_address`, `data_center` FROM `nodes` WHERE `tenant` = $tenant AND `expire_at` >= $now; 2025-05-07T08:49:06.602079Z node 7 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage_impl.h:768: NodesHealthCheckRequest - NodesHealthCheckResult: {tenant: "TestTenant" node { node_id: 7 instance_id: "8e70700b-4334b69b-13fc6b91-853d988" hostname: "ghrun-sykirh5vua" node_address: "127.0.1.1" } } ERROR: [ {
: Error: GRpc error: (1): Cancelled on the server side } {
: Error: Grpc error response on endpoint localhost:4102 } ] 2025-05-07T08:49:06.602488Z node 7 :YQL_NODES_MANAGER ERROR: nodes_health_check.cpp:65: Failed with code: INTERNAL_ERROR Details:
: Error: Can't do NodesHealthCheck: (yexception) ydb/core/fq/libs/actors/nodes_health_check.cpp:95:
: Error: GRpc error: (1): Cancelled on the server side
: Error: Grpc error response on endpoint localhost:4102 2025-05-07T08:49:06.606702Z node 7 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: CLIENT_CANCELLED
: Error: GRpc error: (1): Cancelled on the server side
: Error: Grpc error response on endpoint [::]:4102 2025-05-07T08:49:07.334722Z node 7 :FQ_PENDING_FETCHER ERROR: pending_fetcher.cpp:259: Error with GetTask:
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv6:%5B::%5D:4102: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint [::]:4102 |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/dynamic_config/ut/unittest |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/dynamic_config/ut/unittest >> TCacheTest::SystemView [GOOD] >> TCacheTest::TableSchemaVersion >> THealthCheckTest::ShardsNoLimit [GOOD] |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/dynamic_config/ut/unittest >> THealthCheckTest::LayoutCorrect [GOOD] >> Yq_1::CreateQuery_Without_Connection [GOOD] >> TCacheTest::TableSchemaVersion [GOOD] >> TCacheTest::Recreate >> TCacheTest::List >> TSchemeShardTest::ConfigColumnFamily [GOOD] >> TSchemeShardTest::ConsistentCopyAfterDropIndexes ------- [TM] {asan, default-linux-x86_64, release} ydb/core/health_check/ut/unittest >> THealthCheckTest::ShardsNoLimit [GOOD] Test command err: 2025-05-07T08:47:38.250737Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:698:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:47:38.253373Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:38.257371Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:47:38.266137Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:695:2355], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:47:38.268200Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:38.268238Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003d5c/r3tmp/tmpPzSDPI/pdisk_1.dat 2025-05-07T08:47:41.509737Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:47:42.941336Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2175} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.140184s 2025-05-07T08:47:42.950525Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:666} StateWork event processing took too much time Type# 2146435078 Duration# 0.149349s TServer::EnableGrpc on GrpcPort 11547, node 1 TClient is connected to server localhost:30479 2025-05-07T08:47:51.488781Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:47:51.488836Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:47:51.488866Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:47:51.489260Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:48:22.358504Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:700:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:22.359216Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:22.359509Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:48:22.359905Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:697:2355], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:22.360323Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:22.360474Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003d5c/r3tmp/tmpoRTh2q/pdisk_1.dat 2025-05-07T08:48:22.814073Z node 3 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13225, node 3 TClient is connected to server localhost:1785 2025-05-07T08:48:23.238657Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:48:23.238708Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:48:23.238734Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:48:23.238981Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:48:48.604100Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:491:2410], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:48.604611Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:48.604913Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:48:48.605940Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:486:2155], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:48.608323Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:48:48.608518Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003d5c/r3tmp/tmpK6KGFT/pdisk_1.dat 2025-05-07T08:48:49.151814Z node 5 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24734, node 5 TClient is connected to server localhost:7874 2025-05-07T08:48:49.705834Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:48:49.705890Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:48:49.705921Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:48:49.706478Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:48:58.905461Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:698:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:58.906002Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:58.908182Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:48:58.908806Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [8:695:2355], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:58.909174Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:58.909367Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003d5c/r3tmp/tmpejVLCq/pdisk_1.dat 2025-05-07T08:48:59.316069Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 31892, node 7 TClient is connected to server localhost:18254 2025-05-07T08:48:59.733668Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:48:59.733722Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:48:59.733756Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:48:59.734489Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:49:09.236186Z node 9 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [9:698:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:49:09.236324Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:49:09.236479Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:49:09.237955Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [10:695:2355], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:49:09.238587Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:49:09.238794Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003d5c/r3tmp/tmpmo1IAr/pdisk_1.dat 2025-05-07T08:49:09.619487Z node 9 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28527, node 9 TClient is connected to server localhost:9773 2025-05-07T08:49:10.041095Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:10.041163Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:10.041197Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:10.041502Z node 9 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_cache/unittest >> TCacheTest::TableSchemaVersion [GOOD] Test command err: 2025-05-07T08:49:11.250087Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:49:11.250151Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TestModificationResults wait txId: 1 2025-05-07T08:49:11.422006Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 2025-05-07T08:49:11.776480Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:49:11.776562Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TestModificationResults wait txId: 1 2025-05-07T08:49:11.829763Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 TestModificationResults wait txId: 101 2025-05-07T08:49:11.869406Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 101:0, at schemeshard: 72057594046678944 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72075186233409546 for txId: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Erasing txId 101 TestWaitNotification: OK eventTxId 101 TestModificationResults wait txId: 102 2025-05-07T08:49:12.051723Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 102:0, at schemeshard: 72057594046678944 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72075186233409546 for txId: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 FAKE_COORDINATOR: Erasing txId 102 TestWaitNotification: OK eventTxId 102 >> TCacheTest::List [GOOD] >> TCacheTest::MigrationCommit >> TCacheTest::MigrationCommon ------- [TM] {asan, default-linux-x86_64, release} ydb/core/health_check/ut/unittest >> THealthCheckTest::TestBootingTabletIsNotDead [GOOD] Test command err: 2025-05-07T08:47:41.321371Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:698:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:47:41.321765Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:41.321930Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:47:41.328624Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:695:2355], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:47:41.330258Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:41.330499Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003d3f/r3tmp/tmp1UPO0E/pdisk_1.dat 2025-05-07T08:47:47.484984Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 6008, node 1 TClient is connected to server localhost:9329 2025-05-07T08:47:56.201468Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:47:56.201833Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:47:56.210808Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:47:56.213383Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:48:19.434097Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:700:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:19.435145Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:19.435655Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:48:19.436273Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:697:2355], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:19.437477Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:19.437607Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003d3f/r3tmp/tmpiRvMcq/pdisk_1.dat 2025-05-07T08:48:20.303471Z node 3 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28410, node 3 TClient is connected to server localhost:2793 2025-05-07T08:48:20.880040Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:48:20.880095Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:48:20.880122Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:48:20.881225Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:48:41.607094Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:700:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:41.607888Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:41.608133Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:48:41.609448Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:697:2355], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:41.609566Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:41.609614Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003d3f/r3tmp/tmpxpNkIX/pdisk_1.dat 2025-05-07T08:48:41.987538Z node 5 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22664, node 5 TClient is connected to server localhost:8466 2025-05-07T08:48:42.481435Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:48:42.481493Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:48:42.481525Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:48:42.482431Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:48:52.402620Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:693:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:52.403041Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:52.403190Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:48:52.405848Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [8:690:2352], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:52.406251Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:52.406365Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003d3f/r3tmp/tmpOzowRw/pdisk_1.dat 2025-05-07T08:48:52.806933Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16299, node 7 TClient is connected to server localhost:28984 2025-05-07T08:48:53.436612Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:48:53.436678Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:48:53.436709Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:48:53.437514Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:49:03.974621Z node 9 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [9:355:2216], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:49:03.975181Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:49:03.975430Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:49:03.976303Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [10:775:2356], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:49:03.976763Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:49:03.976951Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003d3f/r3tmp/tmpnuU4Uo/pdisk_1.dat 2025-05-07T08:49:04.598561Z node 9 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9349, node 9 TClient is connected to server localhost:9550 2025-05-07T08:49:09.312838Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:09.312914Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:09.312960Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:09.313954Z node 9 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:49:09.344121Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:09.344290Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:09.393272Z node 9 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 11 Cookie 11 2025-05-07T08:49:09.393944Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Connecting -> Connected self_check_result: GOOD issue_log { id: "YELLOW-f489-1231c6b1" status: YELLOW message: "Database has compute issues" location { database { name: "/Root" } } reason: "YELLOW-1ba8-1231c6b1" type: "DATABASE" level: 1 } issue_log { id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-10" reason: "YELLOW-e9e2-1231c6b1-11" reason: "YELLOW-e9e2-1231c6b1-9" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-1231c6b1-9" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 9 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-10" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 10 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-11" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 11 host: "::1" port: 12003 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } location { id: 9 host: "::1" port: 12001 } >> THealthCheckTest::ProtobufUnderLimitFor100LargeVdisksIssues [GOOD] >> TCacheTest::Recreate [GOOD] >> TCacheTest::SysLocks ------- [TM] {asan, default-linux-x86_64, release} ydb/core/health_check/ut/unittest >> THealthCheckTest::LayoutCorrect [GOOD] Test command err: 2025-05-07T08:47:57.642547Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:698:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:47:57.643064Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:57.643250Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:47:57.645153Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:695:2355], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:47:57.645506Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:57.645557Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003d37/r3tmp/tmpiByONk/pdisk_1.dat 2025-05-07T08:47:58.600106Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 12493, node 1 TClient is connected to server localhost:4019 2025-05-07T08:48:05.011793Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:48:05.012243Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:48:05.012268Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:48:05.021748Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:48:26.510951Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:451:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:26.511311Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:26.511859Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003d37/r3tmp/tmpyYcP5O/pdisk_1.dat 2025-05-07T08:48:28.787077Z node 3 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20051, node 3 TClient is connected to server localhost:29672 2025-05-07T08:48:32.572565Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:48:32.572645Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:48:32.572698Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:48:32.573388Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:48:44.430816Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:453:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:44.431178Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:44.431328Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003d37/r3tmp/tmpwvTj4H/pdisk_1.dat 2025-05-07T08:48:45.005928Z node 5 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10912, node 5 TClient is connected to server localhost:6282 2025-05-07T08:48:46.152895Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:48:46.152965Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:48:46.153005Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:48:46.153431Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:48:46.255851Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:48:46.256007Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:48:46.279588Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:49:01.784613Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:254:2218], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:49:01.785081Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:49:01.785171Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003d37/r3tmp/tmpGni4K5/pdisk_1.dat 2025-05-07T08:49:02.210297Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 15313, node 7 TClient is connected to server localhost:1975 2025-05-07T08:49:02.863451Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:02.863571Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:02.863629Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:02.864312Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:49:09.930415Z node 9 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [9:451:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:49:09.930739Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:49:09.930835Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003d37/r3tmp/tmpoCkFS1/pdisk_1.dat 2025-05-07T08:49:10.375052Z node 9 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5716, node 9 TClient is connected to server localhost:11111 2025-05-07T08:49:10.804972Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:10.805030Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:10.805064Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:10.805348Z node 9 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration >> TCmsTenatsTest::TestClusterLimit >> TCacheTest::Attributes >> TCacheTest::Navigate >> TCacheTest::SysLocks [GOOD] >> TCacheTest::RacyRecreateAndSync >> THealthCheckTest::UnknowPDiskState [GOOD] >> TS3WrapperTests::AbortMultipartUpload >> TCacheTest::Navigate [GOOD] >> TCacheTest::PathBelongsToDomain >> TCacheTest::Attributes [GOOD] >> TCacheTest::CheckAccess >> TCacheTest::MigrationCommit [GOOD] >> TCacheTest::RacyRecreateAndSync [GOOD] >> TCacheTest::RacyCreateAndSync ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_cache/unittest >> TCacheTest::SysLocks [GOOD] Test command err: 2025-05-07T08:49:12.797960Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:49:12.798057Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TestModificationResults wait txId: 1 2025-05-07T08:49:12.958738Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 TestModificationResults wait txId: 101 FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 FAKE_COORDINATOR: Erasing txId 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-05-07T08:49:12.976921Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 101 TestModificationResults wait txId: 102 2025-05-07T08:49:12.979149Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 102:0, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 FAKE_COORDINATOR: Erasing txId 102 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-05-07T08:49:13.025696Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 102 TestModificationResults wait txId: 103 FAKE_COORDINATOR: Add transaction: 103 at step: 5000004 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 103 at step: 5000004 FAKE_COORDINATOR: Erasing txId 103 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-05-07T08:49:13.039340Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 103 2025-05-07T08:49:13.724254Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:49:13.724341Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TestModificationResults wait txId: 1 2025-05-07T08:49:13.789165Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 >> TCacheTest::CheckAccess [GOOD] >> TCacheTest::PathBelongsToDomain [GOOD] |88.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_olap/ydb-core-tx-schemeshard-ut_olap |88.6%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_olap/ydb-core-tx-schemeshard-ut_olap |88.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_olap/ydb-core-tx-schemeshard-ut_olap >> TMaintenanceApiTest::SingleCompositeActionGroup >> TCacheTest::MigrationCommon [GOOD] >> TCacheTest::MigrationDeletedPathNavigate >> TCacheTest::RacyCreateAndSync [GOOD] >> TCmsTest::RequestRestartServicesOk ------- [TM] {asan, default-linux-x86_64, release} ydb/core/health_check/ut/unittest >> THealthCheckTest::ProtobufUnderLimitFor100LargeVdisksIssues [GOOD] Test command err: 2025-05-07T08:47:39.141204Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:698:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:47:39.158394Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:39.159855Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:47:39.164440Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:695:2355], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:47:39.164897Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:39.164965Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003d8f/r3tmp/tmpz7tRHn/pdisk_1.dat 2025-05-07T08:47:43.638439Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18513, node 1 TClient is connected to server localhost:17364 2025-05-07T08:47:52.446939Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:47:52.447002Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:47:52.447038Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:47:52.447660Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:48:21.093507Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:700:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:21.094233Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:21.094536Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:48:21.094988Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:697:2355], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:21.095419Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:21.095575Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003d8f/r3tmp/tmpiLC30d/pdisk_1.dat 2025-05-07T08:48:21.500361Z node 3 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3360, node 3 TClient is connected to server localhost:21645 2025-05-07T08:48:22.023361Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:48:22.023427Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:48:22.023466Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:48:22.024411Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration self_check_result: GOOD issue_log { id: "YELLOW-f489-1231c6b1" status: YELLOW message: "Database has compute issues" location { database { name: "/Root" } } reason: "YELLOW-1ba8-1231c6b1" type: "DATABASE" level: 1 } issue_log { id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-3" reason: "YELLOW-e9e2-1231c6b1-4" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-1231c6b1-3" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 3 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-4" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 4 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e463-3-3-42" status: YELLOW message: "Available size is less than 12%" location { storage { node { id: 3 host: "::1" port: 12001 } pool { group { vdisk { pdisk { id: "3-42" path: "/home/runner/.ya/build/build_root/zvgn/003d8f/r3tmp/tmpiLC30d/pdisk_1.dat" } } } } } } type: "PDISK" level: 6 } issue_log { id: "YELLOW-e463-3-3-43" status: YELLOW message: "Available size is less than 12%" location { storage { node { id: 3 host: "::1" port: 12001 } pool { group { vdisk { pdisk { id: "3-43" path: "/home/runner/.ya/build/build_root/zvgn/003d8f/r3tmp/tmpiLC30d/pdisk_1.dat" } } } } } } type: "PDISK" level: 6 } issue_log { id: "YELLOW-e463-3-3-44" status: YELLOW message: "Available size is less than 12%" location { storage { node { id: 3 host: "::1" port: 12001 } pool { group { vdisk { pdisk { id: "3-44" path: "/home/runner/.ya/build/build_root/zvgn/003d8f/r3tmp/tmpiLC30d/pdisk_1.dat" } } } } } } type: "PDISK" level: 6 } location { id: 3 host: "::1" port: 12001 } 2025-05-07T08:48:42.610408Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:698:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:42.610996Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:42.611322Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:48:42.611847Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:695:2355], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:42.611989Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:48:42.612113Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003d8f/r3tmp/tmpcwBgDw/pdisk_1.dat 2025-05-07T08:48:42.999320Z node 5 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11666, node 5 TClient is connected to server localhost:9043 2025-05-07T08:48:44.071492Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:48:44.071567Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:48:44.071615Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:48:44.072278Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration self_check_result: GOOD issue_log { id: "YELLOW-f489-1231c6b1" status: YELLOW message: "Database has compute issues" location { database { name: "/Root" } } reason: "YELLOW-1ba8-1231c6b1" type: "DATABASE" level: 1 } issue_log { id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-5" reason: "YELLOW-e9e2-1231c6b1-6" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-1231c6b1-5" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 5 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-6" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 6 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-a594-5-5-42" status: YELLOW message: "PDisk state is FAULTY" location { storage { node { id: 5 host: "::1" port: 12001 } pool { group { vdisk { pdisk { id: "5-42" path: "/home/runner/.ya/build/build_root/zvgn/003d8f/r3tmp/tmpcwBgDw/pdisk_1.dat" } } } } } } type: "PDISK" level: 6 } issue_log { id: "YELLOW-a594-5-5-43" status: YELLOW message: "PDisk state is FAULTY" location { storage { node { id: 5 host: "::1" port: 12001 } pool { group { vdisk { pdisk { id: "5-43" path: "/home/runner/.ya/build/build_root/zvgn/003d8f/r3tmp/tmpcwBgDw/pdisk_1.dat" } } } } } } type: "PDISK" level: 6 } issue_log { id: "YELLOW-a594-5-5-44" status: YELLOW message: "PDisk state is FAULTY" location { storage { node { id: 5 host: "::1" port: 12001 } pool { group { vdisk { pdisk { id: "5-44" path: "/home/runner/.ya/build/build_root/zvgn/003d8f/r3tmp/tmpcwBgDw/pdisk_1.dat" } } } } } } type: "PDISK" level: 6 } location { id: 5 host: "::1" port: 12001 } 2025-05-07T08:48:57.093094Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:698:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:57.093681Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:57.111445Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:48:57.112255Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [8:695:2355], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:57.112582Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:57.112740Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003d8f/r3tmp/tmp0JKnr7/pdisk_1.dat 2025-05-07T08:48:57.827418Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 64687, node 7 TClient is connected to server localhost:29212 2025-05-07T08:48:58.736244Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:48:58.736329Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:48:58.736372Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:48:58.737234Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:49:10.589631Z node 9 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [9:698:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:49:10.589819Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:49:10.590057Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:49:10.591494Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [10:695:2355], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:49:10.592138Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:49:10.592330Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003d8f/r3tmp/tmpVooyTh/pdisk_1.dat 2025-05-07T08:49:10.977185Z node 9 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19548, node 9 TClient is connected to server localhost:18768 2025-05-07T08:49:11.648330Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:11.648418Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:11.648474Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:11.648806Z node 9 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_cache/unittest >> TCacheTest::MigrationCommit [GOOD] Test command err: 2025-05-07T08:49:12.827471Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:49:12.827554Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TestModificationResults wait txId: 1 2025-05-07T08:49:12.997396Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 TestModificationResults wait txId: 101 FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 FAKE_COORDINATOR: Erasing txId 101 TestModificationResult got TxId: 101, wait until txId: 101 TestModificationResults wait txId: 102 FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 FAKE_COORDINATOR: Erasing txId 102 TestModificationResult got TxId: 102, wait until txId: 102 TestModificationResults wait txId: 103 FAKE_COORDINATOR: Add transaction: 103 at step: 5000004 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 103 at step: 5000004 FAKE_COORDINATOR: Erasing txId 103 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 101 TestWaitNotification wait txId: 102 TestWaitNotification wait txId: 103 2025-05-07T08:49:13.033692Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-05-07T08:49:13.033864Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-05-07T08:49:13.034197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 101 TestWaitNotification: OK eventTxId 102 TestWaitNotification: OK eventTxId 103 2025-05-07T08:49:13.358416Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:49:13.358494Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TestModificationResults wait txId: 1 2025-05-07T08:49:13.475124Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 Leader for TabletID 72057594046678944 is [2:70:2109] sender: [2:175:2067] recipient: [2:46:2093] Leader for TabletID 72057594046678944 is [2:70:2109] sender: [2:178:2067] recipient: [2:24:2071] Leader for TabletID 72057594046678944 is [2:70:2109] sender: [2:179:2067] recipient: [2:177:2171] Leader for TabletID 72057594046678944 is [2:180:2172] sender: [2:181:2067] recipient: [2:177:2171] 2025-05-07T08:49:13.594461Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:49:13.594572Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TestModificationResults wait txId: 101 Leader for TabletID 72057594046678944 is [2:180:2172] sender: [2:211:2067] recipient: [2:24:2071] 2025-05-07T08:49:13.635593Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 101:0, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 FAKE_COORDINATOR: Erasing txId 101 TestModificationResult got TxId: 101, wait until txId: 101 TestModificationResults wait txId: 102 2025-05-07T08:49:13.662201Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 102:0, at schemeshard: 72057594046678944 Leader for TabletID 72075186233409546 is [0:0:0] sender: [2:247:2067] recipient: [2:238:2213] IGNORE Leader for TabletID 72075186233409546 is [0:0:0] sender: [2:247:2067] recipient: [2:238:2213] Leader for TabletID 72075186233409547 is [0:0:0] sender: [2:248:2067] recipient: [2:241:2215] IGNORE Leader for TabletID 72075186233409547 is [0:0:0] sender: [2:248:2067] recipient: [2:241:2215] Leader for TabletID 72075186233409546 is [0:0:0] sender: [2:251:2067] recipient: [2:24:2071] IGNORE Leader for TabletID 72075186233409546 is [0:0:0] sender: [2:251:2067] recipient: [2:24:2071] Leader for TabletID 72075186233409547 is [0:0:0] sender: [2:252:2067] recipient: [2:24:2071] IGNORE Leader for TabletID 72075186233409547 is [0:0:0] sender: [2:252:2067] recipient: [2:24:2071] Leader for TabletID 72075186233409546 is [2:250:2219] sender: [2:253:2067] recipient: [2:238:2213] Leader for TabletID 72075186233409547 is [2:255:2221] sender: [2:256:2067] recipient: [2:241:2215] TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 101 TestWaitNotification wait txId: 102 2025-05-07T08:49:13.747137Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 101 Leader for TabletID 72075186233409546 is [2:250:2219] sender: [2:289:2067] recipient: [2:24:2071] Leader for TabletID 72075186233409547 is [2:255:2221] sender: [2:290:2067] recipient: [2:24:2071] FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 FAKE_COORDINATOR: Erasing txId 102 TestWaitNotification: OK eventTxId 102 TestModificationResults wait txId: 103 TestModificationResult got TxId: 103, wait until txId: 103 TestModificationResults wait txId: 104 2025-05-07T08:49:13.974832Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 104:0, at schemeshard: 72057594046678944 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 103 TestWaitNotification wait txId: 104 Leader for TabletID 72075186233409548 is [0:0:0] sender: [2:340:2067] recipient: [2:336:2285] IGNORE Leader for TabletID 72075186233409548 is [0:0:0] sender: [2:340:2067] recipient: [2:336:2285] Leader for TabletID 72075186233409548 is [0:0:0] sender: [2:341:2067] recipient: [2:24:2071] IGNORE Leader for TabletID 72075186233409548 is [0:0:0] sender: [2:341:2067] recipient: [2:24:2071] Leader for TabletID 72075186233409548 is [2:343:2289] sender: [2:344:2067] recipient: [2:336:2285] Leader for TabletID 72075186233409548 is [2:343:2289] sender: [2:345:2067] recipient: [2:24:2071] TestWaitNotification: OK eventTxId 103 TestWaitNotification: OK eventTxId 104 TestModificationResults wait txId: 105 2025-05-07T08:49:14.310348Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpUpgradeSubDomain, opId: 105:0, at schemeshard: 72057594046678944 Leader for TabletID 72075186233409549 is [0:0:0] sender: [2:420:2067] recipient: [2:415:2333] IGNORE Leader for TabletID 72075186233409549 is [0:0:0] sender: [2:420:2067] recipient: [2:415:2333] Leader for TabletID 72075186233409549 is [0:0:0] sender: [2:422:2067] recipient: [2:24:2071] IGNORE Leader for TabletID 72075186233409549 is [0:0:0] sender: [2:422:2067] recipient: [2:24:2071] Leader for TabletID 72075186233409549 is [2:423:2337] sender: [2:424:2067] recipient: [2:415:2333] 2025-05-07T08:49:14.374403Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:49:14.374544Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 Leader for TabletID 72075186233409549 is [2:423:2337] sender: [2:451:2067] recipient: [2:24:2071] TestWaitNotification: OK eventTxId 105 { Path: Root/USER_0/DirA TableId: [72057594046678944:3:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] Params { Version: 3 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 SchemeShard: 72075186233409549 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } TestModificationResults wait txId: 106 2025-05-07T08:49:14.435126Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5351: Mark as Migrated path id [OwnerId: 72057594046678944, LocalPathId: 3] 2025-05-07T08:49:14.435255Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5351: Mark as Migrated path id [OwnerId: 72057594046678944, LocalPathId: 4] 2025-05-07T08:49:14.435658Z node 2 :FLAT_TX_SCHEMESHARD ERROR: schemeshard__operation_upgrade_subdomain.cpp:1464: TWait ProgressState, dependent transaction: 106, parent transaction: 105, at schemeshard: 72057594046678944 2025-05-07T08:49:14.435811Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpUpgradeSubDomainDecision, opId: 106:0, at schemeshard: 72057594046678944 TestModificationResult got TxId: 106, wait until txId: 106 TestWaitNotification wait txId: 106 2025-05-07T08:49:14.455249Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5812: Got TEvUpdateAck for unknown txId 105, at schemeshard: 72057594046678944 2025-05-07T08:49:14.455705Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5812: Got TEvUpdateAck for unknown txId 105, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 106 { Path: Root/USER_0/DirA TableId: [72057594046678944:3:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] Params { Version: 3 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 SchemeShard: 72075186233409549 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } Leader for TabletID 72057594046678944 is [2:180:2172] sender: [2:512:2067] recipient: [2:46:2093] Leader for TabletID 72057594046678944 is [2:180:2172] sender: [2:514:2067] recipient: [2:24:2071] Leader for TabletID 72057594046678944 is [2:180:2172] sender: [2:516:2067] recipient: [2:515:2408] Leader for TabletID 72057594046678944 is [2:517:2409] sender: [2:518:2067] recipient: [2:515:2408] 2025-05-07T08:49:14.509950Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:49:14.510040Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded Leader for TabletID 72057594046678944 is [2:517:2409] sender: [2:545:2067] recipient: [2:24:2071] { Path: Root/USER_0/DirA TableId: [72057594046678944:3:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] Params { Version: 3 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 SchemeShard: 72075186233409549 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } >> Yq_1::DeleteQuery [GOOD] >> KqpSnapshotRead::ReadWriteTxFailsOnConcurrentWrite2-withSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_cache/unittest >> TCacheTest::PathBelongsToDomain [GOOD] Test command err: 2025-05-07T08:49:14.175647Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:49:14.175742Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TestModificationResults wait txId: 1 2025-05-07T08:49:14.344181Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 TestModificationResults wait txId: 101 FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 FAKE_COORDINATOR: Erasing txId 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-05-07T08:49:14.377795Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 101 2025-05-07T08:49:14.820935Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:49:14.820999Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TestModificationResults wait txId: 1 2025-05-07T08:49:14.867631Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 TestModificationResults wait txId: 101 2025-05-07T08:49:14.873026Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 101:0, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 FAKE_COORDINATOR: Erasing txId 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-05-07T08:49:14.878794Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 101 TestModificationResults wait txId: 102 FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 FAKE_COORDINATOR: Erasing txId 102 TestModificationResult got TxId: 102, wait until txId: 102 2025-05-07T08:49:14.889679Z node 2 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:286: Path does not belong to the specified domain: self# [2:226:2204], domain# [OwnerId: 72057594046678944, LocalPathId: 1], path's domain# [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T08:49:14.890080Z node 2 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:286: Path does not belong to the specified domain: self# [2:228:2206], domain# [OwnerId: 72057594046678944, LocalPathId: 1], path's domain# [OwnerId: 72057594046678944, LocalPathId: 2] >> TYardTest::TestStartingPointReboots [GOOD] >> TYardTest::TestRestartAtNonceJump >> KqpSinkMvcc::OltpMultiSinksNoSinks ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_cache/unittest >> TCacheTest::CheckAccess [GOOD] Test command err: 2025-05-07T08:49:13.984325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:49:13.984395Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TestModificationResults wait txId: 1 2025-05-07T08:49:14.339325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 TestModificationResults wait txId: 101 FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 FAKE_COORDINATOR: Erasing txId 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-05-07T08:49:14.364123Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 101 2025-05-07T08:49:14.832786Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:49:14.832863Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TestModificationResults wait txId: 1 2025-05-07T08:49:14.889920Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 TestModificationResults wait txId: 101 FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 FAKE_COORDINATOR: Erasing txId 101 TestModificationResult got TxId: 101, wait until txId: 101 TestModificationResults wait txId: 102 2025-05-07T08:49:14.903950Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 102:0, at schemeshard: 72057594046678944 TestModificationResult got TxId: 102, wait until txId: 102 2025-05-07T08:49:14.908365Z node 2 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:303: Access denied: self# [2:197:2187], for# user1@builtin, access# DescribeSchema 2025-05-07T08:49:14.908896Z node 2 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:303: Access denied: self# [2:201:2191], for# user1@builtin, access# DescribeSchema ------- [TM] {asan, default-linux-x86_64, release} ydb/core/health_check/ut/unittest >> THealthCheckTest::UnknowPDiskState [GOOD] Test command err: 2025-05-07T08:47:38.015998Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:698:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:47:38.016866Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:38.017068Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:47:38.019040Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:695:2355], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:47:38.019371Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:38.019417Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003d75/r3tmp/tmpEZgfXx/pdisk_1.dat 2025-05-07T08:47:41.236163Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17514, node 1 TClient is connected to server localhost:6355 2025-05-07T08:47:49.435014Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:47:49.435062Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:47:49.435089Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:47:49.436877Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:48:22.740699Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:566:2411], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:22.741429Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:22.741664Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:48:22.743463Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:561:2156], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:22.743800Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:48:22.744111Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003d75/r3tmp/tmpLesvZl/pdisk_1.dat 2025-05-07T08:48:23.155045Z node 3 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3743, node 3 TClient is connected to server localhost:1768 2025-05-07T08:48:37.700430Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:48:37.700502Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:48:37.700541Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:48:37.721421Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:48:37.923652Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:48:37.924648Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:48:38.039037Z node 3 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 5 Cookie 5 2025-05-07T08:48:38.050089Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:48:38.710388Z node 3 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 5 2025-05-07T08:48:38.714450Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connected -> Disconnected self_check_result: EMERGENCY issue_log { id: "RED-f489-1231c6b1" status: RED message: "Database has compute issues" location { database { name: "/Root" } } reason: "RED-6fa7-1231c6b1" reason: "YELLOW-1ba8-1231c6b1" type: "DATABASE" level: 1 } issue_log { id: "RED-6fa7-1231c6b1" status: RED message: "Compute has issues with tablets" location { database { name: "/Root" } } reason: "RED-e5e3-1231c6b1-PersQueue" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-3" reason: "YELLOW-e9e2-1231c6b1-4" reason: "YELLOW-e9e2-1231c6b1-5" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-1231c6b1-3" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 3 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-4" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 4 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-5" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 5 host: "::1" port: 12003 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "RED-e5e3-1231c6b1-PersQueue" status: RED message: "Tablets are dead" location { compute { tablet { type: "PersQueue" id: "72075186224037888" count: 1 } } database { name: "/Root" } node { } } type: "TABLET" level: 4 } location { id: 3 host: "::1" port: 12001 } 2025-05-07T08:48:50.723557Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:851:2415], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:50.724082Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:50.724340Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:48:50.727136Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:848:2358], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:48:50.727763Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:50.728200Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003d75/r3tmp/tmpEE0I3C/pdisk_1.dat 2025-05-07T08:48:51.122547Z node 6 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10761, node 6 TClient is connected to server localhost:22153 2025-05-07T08:48:58.448534Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:48:58.448600Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:48:58.448637Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:48:58.449518Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:48:58.451047Z node 6 :HIVE TRACE: hive_impl.cpp:114: HIVE#72057594037968897 Handle TEvTabletPipe::TEvServerConnected([6:1337:2702]) [6:1602:2707] 2025-05-07T08:48:58.451277Z node 6 :HIVE DEBUG: hive_impl.cpp:34: HIVE#72057594037968897 Handle TEvHive::TEvCreateTablet(PersQueue(72057594046578946,0)) 2025-05-07T08:48:58.464758Z node 6 :HIVE DEBUG: tx__create_tablet.cpp:200: HIVE#72057594037968897 THive::TTxCreateTablet::Execute Owner: 72057594046578946 OwnerIdx: 0 TabletType: PersQueue BindedChannels { StoragePoolName: "/Root:test" } BindedChannels { StoragePoolName: "/Root:test" } BindedChannels { StoragePoolName: "/Root:test" } 2025-05-07T08:48:58.464869Z node 6 :HIVE DEBUG: tx__create_tablet.cpp:348: HIVE#72057594037968897 Hive 72057594037968897 allocated TabletId 72075186224037888 from TabletIdIndex 65536 2025-05-07T08:48:58.465147Z node 6 :HIVE DEBUG: tx__create_tablet.cpp:440: HIVE#72057594037968897 THive::TTxCreateTablet::Execute; Default resources after merge for type PersQueue: {} 2025-05-07T08:48:58.465230Z node 6 :HIVE DEBUG: tx__create_tablet.cpp:447: HIVE#72057594037968897 THive::TTxCreateTablet::Execute; Default resources after merge for profile 'default': {Memory: 1048576} 2025-05-07T08:48:58.465477Z node 6 :HIVE DEBUG: hive_impl.cpp:2791: HIVE#72057594037968897 CreateTabletFollowers Tablet PersQueue.72075186224037888.Leader.0 2025-05-07T08:48:58.465561Z node 6 :HIVE DEBUG: tx__create_tablet.cpp:173: HIVE#72057594037968897 THive::TTxCreateTablet::Execute TabletId: 72075186224037888 Status: OK 2025-05-07T08:48:58.465710Z node 6 :HIVE DEBUG: hive_impl.cpp:1065: HIVE#72057594037968897 THive::AssignTabletGroups TEvControllerSelectGroups tablet 72075186224037888 GroupParameters { StoragePoolSpecifier { Name: "/Root:test" } } ReturnAllMatchingGroups: true 2025-05-07T08:48:58.467557Z node 6 :HIVE DEBUG: hive_impl.cpp:72: HIVE#72057594037968897 Connected to tablet 72057594037932033 from tablet 72057594037968897 2025-05-07T08:48:58.476258Z node 6 :HIVE TRACE: hive_impl.cpp:114: HIVE#72057594037968897 Handle TEvTabletPipe::TEvServerConnected([8:1583:2366]) [6:1644:2713] 2025-05-07T08:48:58.512006Z node 6 :HIVE DEBUG: hive_impl.cpp:141: HIVE#72057594037968897 Handle TEvLocal::TEvRegisterNode from [8:1582:2366] HiveId: 72057594037968897 ServicedDomains { SchemeShard: 72057594046644480 PathId: 1 } TabletAvailability { Type: Mediator Priority: 0 } TabletAvailability { Type: Dummy Priority ... " } Channels { Channel: 2 ChannelType: 0 History { FromGeneration: 0 GroupID: 2181038080 } StoragePool: "/Root:test" } TabletType: PersQueue Version: 1 TenantIdOwner: 72057594046644480 TenantIdLocalId: 1 } SuggestedGeneration: 1 BootMode: BOOT_MODE_LEADER FollowerId: 0} 2025-05-07T08:48:58.604028Z node 6 :HIVE TRACE: hive_impl.cpp:775: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected (duplicate), NodeId 8 Cookie 72075186224037888 2025-05-07T08:48:58.724680Z node 6 :HIVE DEBUG: hive_impl.cpp:480: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus, TabletId: 72075186224037888 2025-05-07T08:48:58.724846Z node 6 :HIVE DEBUG: tx__update_tablet_status.cpp:77: HIVE#72057594037968897 THive::TTxUpdateTabletStatus::Execute for tablet PersQueue.72075186224037888.Leader.1 status 0 generation 1 follower 0 from local [8:1582:2366] 2025-05-07T08:48:58.724936Z node 6 :HIVE DEBUG: tablet_info.cpp:123: HIVE#72057594037968897 Tablet(PersQueue.72075186224037888.Leader.1) VolatileState: Starting -> Running (Node 8) 2025-05-07T08:48:58.725009Z node 6 :HIVE TRACE: node_info.cpp:112: HIVE#72057594037968897 Node(8, (0,1048576,0,0)->(0,0,0,0)) 2025-05-07T08:48:58.725163Z node 6 :HIVE TRACE: hive_impl.cpp:2541: HIVE#72057594037968897 UpdateTotalResources: ObjectId (72057594046578946,0): {Memory: 1048576} -> {} 2025-05-07T08:48:58.725286Z node 6 :HIVE TRACE: hive_impl.cpp:2547: HIVE#72057594037968897 UpdateTotalResources: Type PersQueue: {Memory: 1048576} -> {} 2025-05-07T08:48:58.725364Z node 6 :HIVE TRACE: node_info.cpp:112: HIVE#72057594037968897 Node(8, (0,0,0,0)->(0,1048576,0,0)) 2025-05-07T08:48:58.725476Z node 6 :HIVE TRACE: hive_impl.cpp:2541: HIVE#72057594037968897 UpdateTotalResources: ObjectId (72057594046578946,0): {} -> {Memory: 1048576} 2025-05-07T08:48:58.725582Z node 6 :HIVE TRACE: hive_impl.cpp:2547: HIVE#72057594037968897 UpdateTotalResources: Type PersQueue: {} -> {Memory: 1048576} 2025-05-07T08:48:58.725718Z node 6 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037968897 ProcessBootQueue (0) 2025-05-07T08:48:58.725786Z node 6 :HIVE TRACE: hive_impl.cpp:344: HIVE#72057594037968897 ProcessBootQueue - sending 2025-05-07T08:48:58.726096Z node 6 :HIVE TRACE: hive_impl.cpp:328: HIVE#72057594037968897 ProcessBootQueue - executing 2025-05-07T08:48:58.726180Z node 6 :HIVE DEBUG: tx__process_boot_queue.cpp:18: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Execute 2025-05-07T08:48:58.726234Z node 6 :HIVE DEBUG: hive_impl.cpp:222: HIVE#72057594037968897 Handle ProcessBootQueue (size: 0) 2025-05-07T08:48:58.726277Z node 6 :HIVE DEBUG: hive_impl.cpp:302: HIVE#72057594037968897 ProcessBootQueue - BootQueue empty (WaitQueue: 0) 2025-05-07T08:48:58.741444Z node 6 :HIVE DEBUG: tx__update_tablet_status.cpp:216: HIVE#72057594037968897 THive::TTxUpdateTabletStatus::Complete TabletId: 72075186224037888 SideEffects: {Notifications: 0x10040207 [6:1336:2701] {EvTabletCreationResult Status: OK TabletID: 72075186224037888}} 2025-05-07T08:48:58.741711Z node 6 :HIVE DEBUG: tx__process_boot_queue.cpp:26: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Complete 2025-05-07T08:49:03.323348Z node 6 :HIVE DEBUG: hive_impl.cpp:721: HIVE#72057594037968897 Handle TEvLocal::TEvStatus for Node 8: Status: 2 2025-05-07T08:49:03.323496Z node 6 :HIVE DEBUG: tx__status.cpp:22: HIVE#72057594037968897 THive::TTxStatus(8)::Execute 2025-05-07T08:49:03.323565Z node 6 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 8 2025-05-07T08:49:03.323702Z node 6 :HIVE DEBUG: tx__status.cpp:65: HIVE#72057594037968897 THive::TTxStatus(8)::Complete 2025-05-07T08:49:03.324586Z node 6 :HIVE DEBUG: tx__restart_tablet.cpp:32: HIVE#72057594037968897 THive::TTxRestartTablet(PersQueue.72075186224037888.Leader.1)::Execute 2025-05-07T08:49:03.324722Z node 6 :HIVE DEBUG: tablet_info.cpp:123: HIVE#72057594037968897 Tablet(PersQueue.72075186224037888.Leader.1) VolatileState: Running -> Stopped (Node 8) 2025-05-07T08:49:03.324787Z node 6 :HIVE TRACE: node_info.cpp:112: HIVE#72057594037968897 Node(8, (0,1048576,0,0)->(0,0,0,0)) 2025-05-07T08:49:03.324916Z node 6 :HIVE TRACE: hive_impl.cpp:2541: HIVE#72057594037968897 UpdateTotalResources: ObjectId (72057594046578946,0): {Memory: 1048576} -> {} 2025-05-07T08:49:03.325014Z node 6 :HIVE TRACE: hive_impl.cpp:2547: HIVE#72057594037968897 UpdateTotalResources: Type PersQueue: {Memory: 1048576} -> {} 2025-05-07T08:49:03.325090Z node 6 :HIVE DEBUG: tablet_info.cpp:523: HIVE#72057594037968897 Sending TEvStopTablet(PersQueue.72075186224037888.Leader.1 gen 1) to node 8 2025-05-07T08:49:03.325180Z node 6 :HIVE DEBUG: tablet_info.cpp:125: HIVE#72057594037968897 Tablet(PersQueue.72075186224037888.Leader.1) VolatileState: Stopped -> Booting 2025-05-07T08:49:03.325244Z node 6 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037968897 ProcessBootQueue (1) 2025-05-07T08:49:03.325303Z node 6 :HIVE TRACE: hive_impl.cpp:344: HIVE#72057594037968897 ProcessBootQueue - sending 2025-05-07T08:49:03.325568Z node 6 :HIVE DEBUG: tx__kill_node.cpp:22: HIVE#72057594037968897 THive::TTxKillNode(8)::Execute 2025-05-07T08:49:03.325678Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-05-07T08:49:03.325739Z node 6 :HIVE TRACE: hive_domains.cpp:16: Node(8) DeregisterInDomains (72057594046644480:1) : 1 -> 0 2025-05-07T08:49:03.325802Z node 6 :HIVE DEBUG: hive_impl.cpp:2778: HIVE#72057594037968897 RemoveRegisteredDataCentersNode(3, 8) 2025-05-07T08:49:03.325863Z node 6 :HIVE TRACE: tx__kill_node.cpp:50: HIVE#72057594037968897 THive::TTxKillNode - killing pipe server [6:1644:2713] 2025-05-07T08:49:03.325912Z node 6 :HIVE DEBUG: hive_impl.cpp:105: HIVE#72057594037968897 TryToDeleteNode(8): waiting 3600.000000s 2025-05-07T08:49:03.336295Z node 6 :HIVE TRACE: hive_impl.cpp:122: HIVE#72057594037968897 Handle TEvTabletPipe::TEvServerDisconnected([8:1583:2366]) [6:1644:2713] 2025-05-07T08:49:03.368444Z node 6 :HIVE TRACE: hive_impl.cpp:114: HIVE#72057594037968897 Handle TEvTabletPipe::TEvServerConnected([6:2025:2731]) [6:2026:2736] 2025-05-07T08:49:03.408006Z node 6 :HIVE TRACE: hive_impl.cpp:122: HIVE#72057594037968897 Handle TEvTabletPipe::TEvServerDisconnected([6:2025:2731]) [6:2026:2736] 2025-05-07T08:49:03.420258Z node 6 :HIVE TRACE: hive_impl.cpp:114: HIVE#72057594037968897 Handle TEvTabletPipe::TEvServerConnected([9:2001:2365]) [6:2063:2740] 2025-05-07T08:49:03.472778Z node 6 :HIVE DEBUG: hive_impl.cpp:141: HIVE#72057594037968897 Handle TEvLocal::TEvRegisterNode from [9:2000:2365] HiveId: 72057594037968897 ServicedDomains { SchemeShard: 72057594046644480 PathId: 1 } TabletAvailability { Type: Mediator Priority: 0 } TabletAvailability { Type: Dummy Priority: 0 } TabletAvailability { Type: KeyValue Priority: 0 } TabletAvailability { Type: Coordinator Priority: 0 } TabletAvailability { Type: Hive Priority: 0 } TabletAvailability { Type: SchemeShard Priority: 0 } TabletAvailability { Type: DataShard Priority: 0 } TabletAvailability { Type: PersQueue Priority: 0 } TabletAvailability { Type: PersQueueReadBalancer Priority: 0 } TabletAvailability { Type: Kesus Priority: 0 } TabletAvailability { Type: SysViewProcessor Priority: 0 } TabletAvailability { Type: ColumnShard Priority: 0 } TabletAvailability { Type: SequenceShard Priority: 0 } TabletAvailability { Type: ReplicationController Priority: 0 } TabletAvailability { Type: StatisticsAggregator Priority: 0 } 2025-05-07T08:49:03.472916Z node 6 :HIVE DEBUG: tx__register_node.cpp:21: HIVE#72057594037968897 THive::TTxRegisterNode(9)::Execute 2025-05-07T08:49:03.473057Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:03.473116Z node 6 :HIVE DEBUG: hive_impl.cpp:361: HIVE#72057594037968897 ProcessWaitQueue (0) 2025-05-07T08:49:03.473167Z node 6 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037968897 ProcessBootQueue (1) 2025-05-07T08:49:03.473222Z node 6 :HIVE DEBUG: hive_impl.cpp:361: HIVE#72057594037968897 ProcessWaitQueue (0) 2025-05-07T08:49:03.473277Z node 6 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037968897 ProcessBootQueue (1) 2025-05-07T08:49:03.473386Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:03.473931Z node 6 :HIVE DEBUG: hive_impl.cpp:798: HIVE#72057594037968897 TEvInterconnect::TEvNodeInfo NodeId 9 Location DataCenter: "4" Module: "4" Rack: "4" Unit: "4" self_check_result: EMERGENCY issue_log { id: "RED-f489-1231c6b1" status: RED message: "Database has compute issues" location { database { name: "/Root" } } reason: "RED-6fa7-1231c6b1" reason: "YELLOW-1ba8-1231c6b1" type: "DATABASE" level: 1 } issue_log { id: "RED-6fa7-1231c6b1" status: RED message: "Compute has issues with tablets" location { database { name: "/Root" } } reason: "RED-e5e3-1231c6b1-PersQueue" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-6" reason: "YELLOW-e9e2-1231c6b1-7" reason: "YELLOW-e9e2-1231c6b1-8" reason: "YELLOW-e9e2-1231c6b1-9" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-1231c6b1-6" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 6 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-7" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 7 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-8" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 8 host: "::1" port: 12003 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-9" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 9 host: "::1" port: 12004 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "RED-e5e3-1231c6b1-PersQueue" status: RED message: "Tablets are dead" location { compute { tablet { type: "PersQueue" id: "72075186224037888" count: 1 } } database { name: "/Root" } node { } } type: "TABLET" level: 4 } location { id: 6 host: "::1" port: 12001 } 2025-05-07T08:49:11.405316Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [10:455:2415], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:49:11.405777Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:49:11.405923Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003d75/r3tmp/tmpcTR6rl/pdisk_1.dat 2025-05-07T08:49:11.828043Z node 10 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9904, node 10 TClient is connected to server localhost:10100 2025-05-07T08:49:12.458529Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:12.458601Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:12.458653Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:12.459033Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_cache/unittest >> TCacheTest::RacyCreateAndSync [GOOD] Test command err: 2025-05-07T08:49:14.472624Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:49:14.472707Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TestModificationResults wait txId: 1 2025-05-07T08:49:14.623563Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 TestModificationResults wait txId: 101 FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 FAKE_COORDINATOR: Erasing txId 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-05-07T08:49:14.644023Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 101 TestModificationResults wait txId: 102 2025-05-07T08:49:14.646705Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 102:0, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 FAKE_COORDINATOR: Erasing txId 102 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-05-07T08:49:14.697402Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 102 TestModificationResults wait txId: 103 FAKE_COORDINATOR: Add transaction: 103 at step: 5000004 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 103 at step: 5000004 FAKE_COORDINATOR: Erasing txId 103 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-05-07T08:49:14.720116Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 103 2025-05-07T08:49:15.105318Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:49:15.105374Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TestModificationResults wait txId: 1 2025-05-07T08:49:15.159729Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 TestModificationResults wait txId: 101 FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 FAKE_COORDINATOR: Erasing txId 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-05-07T08:49:15.172651Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 101 >> TS3WrapperTests::AbortMultipartUpload [GOOD] >> TSchemeShardTest::ConsistentCopyAfterDropIndexes [GOOD] >> TCmsTenatsTest::TestClusterLimit [GOOD] >> TCmsTenatsTest::RequestShutdownHost |88.7%| [TA] $(B)/ydb/core/health_check/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::AbortMultipartUpload [GOOD] Test command err: 2025-05-07T08:49:15.659241Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# 7552CB40-5401-4F48-BD83-D6F382B85E47, request# CreateMultipartUpload { Bucket: TEST Key: key } REQUEST: POST /TEST/key?uploads HTTP/1.1 HEADERS: Host: localhost:1418 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: A64CCCB9-38F0-42D8-98A9-78D24E432C89 amz-sdk-request: attempt=1 content-length: 0 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-storage-class: STANDARD S3_MOCK::HttpServeAction: 4 / /TEST/key / uploads= 2025-05-07T08:49:15.919396Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# 7552CB40-5401-4F48-BD83-D6F382B85E47, response# CreateMultipartUploadResult { Bucket: Key: TEST/key UploadId: 1 } 2025-05-07T08:49:15.920313Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# DB15E192-8AEA-43ED-BA8D-7EF0FAE84504, request# AbortMultipartUpload { Bucket: TEST Key: key UploadId: 1 } REQUEST: DELETE /TEST/key?uploadId=1 HTTP/1.1 HEADERS: Host: localhost:1418 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 61150F6A-5A3E-4E73-ADC6-94F229C4497E amz-sdk-request: attempt=1 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeAction: 6 / /TEST/key / uploadId=1 2025-05-07T08:49:15.925555Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# DB15E192-8AEA-43ED-BA8D-7EF0FAE84504, response# AbortMultipartUploadResult { } 2025-05-07T08:49:15.936888Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# 3F0A3D19-47CF-4790-B449-C7B0B961DD3D, request# HeadObject { Bucket: TEST Key: key } REQUEST: HEAD /TEST/key HTTP/1.1 HEADERS: Host: localhost:1418 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 4F5ED722-C858-466A-B8A2-D6EC18DF6963 amz-sdk-request: attempt=1 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 2025-05-07T08:49:15.943459Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# 3F0A3D19-47CF-4790-B449-C7B0B961DD3D, response# No response body. >> test_sql_streaming.py::test[suites-ReadTopic-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-ReadTopicGroupWriteToSolomon-default.txt] |88.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_order/ydb-core-tx-datashard-ut_order |88.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_order/ydb-core-tx-datashard-ut_order ------- [TM] {asan, default-linux-x86_64, release} ydb/services/fq/ut_integration/unittest >> Yq_1::DeleteQuery [GOOD] Test command err: 2025-05-07T08:47:33.411713Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501622749532209920:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:47:33.427775Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; E0507 08:47:37.448126961 107930 dns_resolver.cc:162] no server name supplied in dns URI E0507 08:47:37.448270396 107930 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-05-07T08:47:38.266971Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:38.440602Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501622749532209920:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:47:38.447274Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:47:39.183029Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:18903: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:18903 2025-05-07T08:47:39.229996Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:18903: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:18903 } ] 2025-05-07T08:47:39.399426Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:39.722178Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:40.426856Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:40.751714Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:41.149997Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:18903: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:18903 } ] 2025-05-07T08:47:41.453761Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:41.771132Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:42.457089Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0507 08:47:42.506712769 108501 dns_resolver.cc:162] no server name supplied in dns URI E0507 08:47:42.506825689 108501 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-05-07T08:47:42.790771Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:43.458091Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:43.774758Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:18903: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:18903 2025-05-07T08:47:43.803474Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:43.911837Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:18903: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:18903 } ] 2025-05-07T08:47:44.459388Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:44.802495Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:45.557821Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:45.882464Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:46.623450Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:46.886766Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0507 08:47:47.669732054 108489 dns_resolver.cc:162] no server name supplied in dns URI E0507 08:47:47.714442976 108489 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-05-07T08:47:47.870733Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:18903: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:18903 } ] 2025-05-07T08:47:48.278562Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:18903: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:18903 2025-05-07T08:47:50.391703Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:50.391731Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:51.402891Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:51.402924Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:52.407600Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:52.407635Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0507 08:47:52.770878527 108489 dns_resolver.cc:162] no server name supplied in dns URI E0507 08:47:52.770974819 108489 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-05-07T08:47:53.414676Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:53.414712Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:54.415273Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:54.415310Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:55.440292Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:55.440332Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:55.518984Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:18903: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:18903 } ] 2025-05-07T08:47:55.578025Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; l ... ards 0 finished = 0 has limit 0 limit reached 0 2025-05-07T08:49:12.350588Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [4:7501623175460899871:3080], TxId: 281474976715818, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jtmyygp6c3aqgy39y6wxfqez. SessionId : ydb://session/3?node_id=4&id=ODdjMDc1NjgtNjkwNzZjZmUtZTQ2ZTU1NGMtYjE3N2Fi. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646926 2025-05-07T08:49:12.350653Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1074: SelfId: [4:7501623175460899871:3080], TxId: 281474976715818, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jtmyygp6c3aqgy39y6wxfqez. SessionId : ydb://session/3?node_id=4&id=ODdjMDc1NjgtNjkwNzZjZmUtZTQ2ZTU1NGMtYjE3N2Fi. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Received channels info: Update { Id: 1 TransportVersion: DATA_TRANSPORT_OOB_PICKLE_1_0 SrcTaskId: 1 DstTaskId: 2 SrcEndpoint { ActorId { RawX1: 7501623175460899871 RawX2: 4503616807242760 } } DstEndpoint { ActorId { RawX1: 7501623175460899872 RawX2: 4503616807242761 } } InMemory: true DstStageId: 1 } 2025-05-07T08:49:12.350663Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1328: TxId: 281474976715818, task: 1, CA Id [4:7501623175460899871:3080]. enter getasyncinputdata results size 0, freeSpace 8388608 2025-05-07T08:49:12.350677Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1424: TxId: 281474976715818, task: 1, CA Id [4:7501623175460899871:3080]. returned async data processed rows 0 left freeSpace 8388608 received rows 0 running reads 1 pending shards 0 finished = 0 has limit 0 limit reached 0 2025-05-07T08:49:12.350701Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [4:7501623175460899871:3080], TxId: 281474976715818, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jtmyygp6c3aqgy39y6wxfqez. SessionId : ydb://session/3?node_id=4&id=ODdjMDc1NjgtNjkwNzZjZmUtZTQ2ZTU1NGMtYjE3N2Fi. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646922 2025-05-07T08:49:12.350717Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1328: TxId: 281474976715818, task: 1, CA Id [4:7501623175460899871:3080]. enter getasyncinputdata results size 0, freeSpace 8388608 2025-05-07T08:49:12.350732Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1424: TxId: 281474976715818, task: 1, CA Id [4:7501623175460899871:3080]. returned async data processed rows 0 left freeSpace 8388608 received rows 0 running reads 1 pending shards 0 finished = 0 has limit 0 limit reached 0 2025-05-07T08:49:12.351537Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:959: TxId: 281474976715818, task: 1, CA Id [4:7501623175460899871:3080]. Recv TEvReadResult from ShardID=72075186224037890, ReadId=0, Status=SUCCESS, Finished=1, RowCount=0, TxLocks= , BrokenTxLocks= 2025-05-07T08:49:12.351571Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1047: TxId: 281474976715818, task: 1, CA Id [4:7501623175460899871:3080]. Taken 0 locks 2025-05-07T08:49:12.351593Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1061: TxId: 281474976715818, task: 1, CA Id [4:7501623175460899871:3080]. new data for read #0 seqno = 1 finished = 1 2025-05-07T08:49:12.351621Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [4:7501623175460899871:3080], TxId: 281474976715818, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jtmyygp6c3aqgy39y6wxfqez. SessionId : ydb://session/3?node_id=4&id=ODdjMDc1NjgtNjkwNzZjZmUtZTQ2ZTU1NGMtYjE3N2Fi. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 276037645 2025-05-07T08:49:12.351643Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [4:7501623175460899871:3080], TxId: 281474976715818, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jtmyygp6c3aqgy39y6wxfqez. SessionId : ydb://session/3?node_id=4&id=ODdjMDc1NjgtNjkwNzZjZmUtZTQ2ZTU1NGMtYjE3N2Fi. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646922 2025-05-07T08:49:12.351663Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1328: TxId: 281474976715818, task: 1, CA Id [4:7501623175460899871:3080]. enter getasyncinputdata results size 1, freeSpace 8388608 2025-05-07T08:49:12.351682Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1224: TxId: 281474976715818, task: 1, CA Id [4:7501623175460899871:3080]. enter pack cells method shardId: 72075186224037890 processedRows: 0 packed rows: 0 freeSpace: 8388608 2025-05-07T08:49:12.351704Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1305: TxId: 281474976715818, task: 1, CA Id [4:7501623175460899871:3080]. exit pack cells method shardId: 72075186224037890 processedRows: 0 packed rows: 0 freeSpace: 8388608 2025-05-07T08:49:12.351718Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1362: TxId: 281474976715818, task: 1, CA Id [4:7501623175460899871:3080]. returned 0 rows; processed 0 rows 2025-05-07T08:49:12.351764Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1399: TxId: 281474976715818, task: 1, CA Id [4:7501623175460899871:3080]. dropping batch for read #0 2025-05-07T08:49:12.351777Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:459: TxId: 281474976715818, task: 1, CA Id [4:7501623175460899871:3080]. effective maxinflight 1024 sorted 0 2025-05-07T08:49:12.351790Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:481: TxId: 281474976715818, task: 1, CA Id [4:7501623175460899871:3080]. Scheduled table scans, in flight: 0 shards. pending shards to read: 0, 2025-05-07T08:49:12.351807Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1424: TxId: 281474976715818, task: 1, CA Id [4:7501623175460899871:3080]. returned async data processed rows 0 left freeSpace 8388608 received rows 0 running reads 0 pending shards 0 finished = 1 has limit 0 limit reached 0 2025-05-07T08:49:12.351914Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [4:7501623175460899871:3080], TxId: 281474976715818, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jtmyygp6c3aqgy39y6wxfqez. SessionId : ydb://session/3?node_id=4&id=ODdjMDc1NjgtNjkwNzZjZmUtZTQ2ZTU1NGMtYjE3N2Fi. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-05-07T08:49:12.351963Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [4:7501623175460899872:3081], TxId: 281474976715818, task: 2. Ctx: { CustomerSuppliedId : . TraceId : 01jtmyygp6c3aqgy39y6wxfqez. SessionId : ydb://session/3?node_id=4&id=ODdjMDc1NjgtNjkwNzZjZmUtZTQ2ZTU1NGMtYjE3N2Fi. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646923 2025-05-07T08:49:12.351984Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:163: TxId: 281474976715818, task: 2. Finish input channelId: 1, from: [4:7501623175460899871:3080] 2025-05-07T08:49:12.352013Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [4:7501623175460899872:3081], TxId: 281474976715818, task: 2. Ctx: { CustomerSuppliedId : . TraceId : 01jtmyygp6c3aqgy39y6wxfqez. SessionId : ydb://session/3?node_id=4&id=ODdjMDc1NjgtNjkwNzZjZmUtZTQ2ZTU1NGMtYjE3N2Fi. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646922 2025-05-07T08:49:12.352069Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [4:7501623175460899872:3081], TxId: 281474976715818, task: 2. Ctx: { CustomerSuppliedId : . TraceId : 01jtmyygp6c3aqgy39y6wxfqez. SessionId : ydb://session/3?node_id=4&id=ODdjMDc1NjgtNjkwNzZjZmUtZTQ2ZTU1NGMtYjE3N2Fi. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-05-07T08:49:12.352082Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [4:7501623175460899871:3080], TxId: 281474976715818, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jtmyygp6c3aqgy39y6wxfqez. SessionId : ydb://session/3?node_id=4&id=ODdjMDc1NjgtNjkwNzZjZmUtZTQ2ZTU1NGMtYjE3N2Fi. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646927 2025-05-07T08:49:12.352100Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [4:7501623175460899871:3080], TxId: 281474976715818, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jtmyygp6c3aqgy39y6wxfqez. SessionId : ydb://session/3?node_id=4&id=ODdjMDc1NjgtNjkwNzZjZmUtZTQ2ZTU1NGMtYjE3N2Fi. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646922 2025-05-07T08:49:12.352126Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715818, task: 1. Tasks execution finished 2025-05-07T08:49:12.352142Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [4:7501623175460899871:3080], TxId: 281474976715818, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jtmyygp6c3aqgy39y6wxfqez. SessionId : ydb://session/3?node_id=4&id=ODdjMDc1NjgtNjkwNzZjZmUtZTQ2ZTU1NGMtYjE3N2Fi. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Compute state finished. All channels and sinks finished 2025-05-07T08:49:12.352266Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715818, task: 1. pass away 2025-05-07T08:49:12.352382Z node 4 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:66;problem=finish_compute_actor;tx_id=281474976715818;task_id=1;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-05-07T08:49:12.352787Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [4:7501623175460899872:3081], TxId: 281474976715818, task: 2. Ctx: { CustomerSuppliedId : . TraceId : 01jtmyygp6c3aqgy39y6wxfqez. SessionId : ydb://session/3?node_id=4&id=ODdjMDc1NjgtNjkwNzZjZmUtZTQ2ZTU1NGMtYjE3N2Fi. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646922 2025-05-07T08:49:12.352828Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715818, task: 2. Tasks execution finished, don't wait for ack delivery in input channelId: 1, seqNo: [1] 2025-05-07T08:49:12.352840Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715818, task: 2. Tasks execution finished 2025-05-07T08:49:12.352852Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [4:7501623175460899872:3081], TxId: 281474976715818, task: 2. Ctx: { CustomerSuppliedId : . TraceId : 01jtmyygp6c3aqgy39y6wxfqez. SessionId : ydb://session/3?node_id=4&id=ODdjMDc1NjgtNjkwNzZjZmUtZTQ2ZTU1NGMtYjE3N2Fi. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Compute state finished. All channels and sinks finished 2025-05-07T08:49:12.352931Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715818, task: 2. pass away 2025-05-07T08:49:12.352992Z node 4 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:66;problem=finish_compute_actor;tx_id=281474976715818;task_id=2;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-05-07T08:49:12.363270Z node 4 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage_impl.h:768: DescribeQueryRequest - DescribeQueryResult: {query_id: "utquebf0p1fueqpdmrjt" } ERROR: {
: Error: (NYql::TCodeLineException) ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_queries.cpp:664: Query does not exist or permission denied. Please check the id of the query or your access rights, code: 1000 } 2025-05-07T08:49:13.088109Z node 4 :FQ_PENDING_FETCHER ERROR: pending_fetcher.cpp:259: Error with GetTask:
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv6:%5B::%5D:6662: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint [::]:6662 |88.7%| [TA] {RESULT} $(B)/ydb/core/health_check/ut/test-results/unittest/{meta.json ... results_accumulator.log} |88.7%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_order/ydb-core-tx-datashard-ut_order >> TCmsTest::RequestRestartServicesOk [GOOD] >> TCmsTest::RequestRestartServicesReject >> test_sql_streaming.py::test[suites-GroupByHoppingWindowByStringKey-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-GroupByHoppingWindowExprKey-default.txt] >> TYardTest::TestRestartAtNonceJump [GOOD] >> TYardTest::TestRestartAtChunkEnd >> TAsyncIndexTests::DropTableWithInflightChanges[PipeResets] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_base/unittest >> TSchemeShardTest::ConsistentCopyAfterDropIndexes [GOOD] Test command err: canonic: ACE { AccessType: 0 AccessRight: 59391 SID: "2@staff" InheritanceType: 1 } ACE { AccessType: 0 AccessRight: 521 SID: "4@staff" InheritanceType: 4 } ACE { AccessType: 0 AccessRight: 521 SID: "5@staff" InheritanceType: 5 } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 } ACE { AccessType: 1 AccessRight: 32768 SID: "1@staff" InheritanceType: 0 } ACE { AccessType: 1 AccessRight: 59391 SID: "3@staff" InheritanceType: 2 } ACE { AccessType: 1 AccessRight: 521 SID: "6@staff" InheritanceType: 6 } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 7 } result: ACE { AccessType: 0 AccessRight: 59391 SID: "2@staff" InheritanceType: 1 } ACE { AccessType: 0 AccessRight: 521 SID: "4@staff" InheritanceType: 4 } ACE { AccessType: 0 AccessRight: 521 SID: "5@staff" InheritanceType: 5 } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 } ACE { AccessType: 1 AccessRight: 32768 SID: "1@staff" InheritanceType: 0 } ACE { AccessType: 1 AccessRight: 59391 SID: "3@staff" InheritanceType: 2 } ACE { AccessType: 1 AccessRight: 521 SID: "6@staff" InheritanceType: 6 } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 7 } canonic: ACE { AccessType: 0 AccessRight: 59391 SID: "2@staff" InheritanceType: 1 } ACE { AccessType: 0 AccessRight: 521 SID: "4@staff" InheritanceType: 4 } ACE { AccessType: 0 AccessRight: 521 SID: "5@staff" InheritanceType: 5 } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 } ACE { AccessType: 1 AccessRight: 32768 SID: "1@staff" InheritanceType: 0 } ACE { AccessType: 1 AccessRight: 59391 SID: "3@staff" InheritanceType: 2 } ACE { AccessType: 1 AccessRight: 521 SID: "6@staff" InheritanceType: 6 } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 7 } result: ACE { AccessType: 0 AccessRight: 59391 SID: "2@staff" InheritanceType: 1 } ACE { AccessType: 0 AccessRight: 521 SID: "4@staff" InheritanceType: 4 } ACE { AccessType: 0 AccessRight: 521 SID: "5@staff" InheritanceType: 5 } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 } ACE { AccessType: 1 AccessRight: 32768 SID: "1@staff" InheritanceType: 0 } ACE { AccessType: 1 AccessRight: 59391 SID: "3@staff" InheritanceType: 2 } ACE { AccessType: 1 AccessRight: 521 SID: "6@staff" InheritanceType: 6 } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 7 } canonic: ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "3@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "6@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } result: ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "3@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "6@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } canonic: ACE { AccessType: 0 AccessRight: 59391 SID: "2@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 0 AccessRight: 521 SID: "5@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } result: ACE { AccessType: 0 AccessRight: 59391 SID: "2@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 0 AccessRight: 521 SID: "5@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } canonic: ACE { AccessType: 0 AccessRight: 59391 SID: "22@staff" InheritanceType: 1 } ACE { AccessType: 0 AccessRight: 521 SID: "44@staff" InheritanceType: 4 } ACE { AccessType: 0 AccessRight: 521 SID: "55@staff" InheritanceType: 5 } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "3@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "6@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "00@staff" InheritanceType: 3 } ACE { AccessType: 1 AccessRight: 32768 SID: "11@staff" InheritanceType: 0 } ACE { AccessType: 1 AccessRight: 59391 SID: "33@staff" InheritanceType: 2 } ACE { AccessType: 1 AccessRight: 521 SID: "66@staff" InheritanceType: 6 } ACE { AccessType: 1 AccessRight: 521 SID: "77@staff" InheritanceType: 7 } result: ACE { AccessType: 0 AccessRight: 59391 SID: "22@staff" InheritanceType: 1 } ACE { AccessType: 0 AccessRight: 521 SID: "44@staff" InheritanceType: 4 } ACE { AccessType: 0 AccessRight: 521 SID: "55@staff" InheritanceType: 5 } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "3@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "6@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "00@staff" InheritanceType: 3 } ACE { AccessType: 1 AccessRight: 32768 SID: "11@staff" InheritanceType: 0 } ACE { AccessType: 1 AccessRight: 59391 SID: "33@staff" InheritanceType: 2 } ACE { AccessType: 1 AccessRight: 521 SID: "66@staff" InheritanceType: 6 } ACE { AccessType: 1 AccessRight: 521 SID: "77@staff" InheritanceType: 7 } canonic: ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "3@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "6@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "00@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "33@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "66@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "77@staff" InheritanceType: 3 Inherited: true } result: ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "3@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "6@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "00@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "33@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "66@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "77@staff" InheritanceType: 3 Inherited: true } canonic: ACE { AccessType: 0 AccessRight: 59391 SID: "22@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 0 AccessRight: 521 SID: "55@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "00@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "77@staff" InheritanceType: 3 Inherited: true } result: ACE { AccessType: 0 AccessRight: 59391 SID: "22@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 0 AccessRight: 521 SID: "55@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "00@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "77@staff" InheritanceType: 3 Inherited: true } canonic: ACE { AccessType: 0 AccessRight: 59391 SID: "2@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 0 AccessRight: 521 SID: "5@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 0 AccessRight: 59391 SID: "22@staff" InheritanceType: 1 } ACE { AccessType: 0 AccessRight: 521 SID: "44@staff" InheritanceType: 4 } ACE { AccessType: 0 AccessRight: 521 SID: "55@staff" InheritanceType: 5 } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "00@staff" InheritanceType: 3 } ACE { AccessType: 1 AccessRight: 32768 SID: "11@staff" InheritanceType: 0 } ACE { AccessType: 1 AccessRight: 59391 SID: "33@staff" InheritanceType: 2 } ACE { AccessType: 1 AccessRight: 521 SID: "66@staff" InheritanceType: 6 } ACE { AccessType: 1 AccessRight: 521 SID: "77@staff" InheritanceType: 7 } result: ACE { AccessType: 0 AccessRight: 59391 SID: "2@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 0 AccessRight: 521 SID: "5@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 0 AccessRight: 59391 SID: "22@staff" InheritanceType: 1 } ACE { AccessType: 0 AccessRight: 521 SID: "44@staff" InheritanceType: 4 } ACE { AccessType: 0 AccessRight: 521 SID: "55@staff" InheritanceType: 5 } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "00@staff" InheritanceType: 3 } ACE { AccessType: 1 AccessRight: 32768 SID: "11@staff" InheritanceType: 0 } ACE { AccessType: 1 AccessRight: 59391 SID: "33@staff" InheritanceType: 2 } ACE { AccessType: 1 AccessRight: 521 SID: "66@staff" InheritanceType: 6 } ACE { AccessType: 1 AccessRight: 521 SID: "77@staff" InheritanceType: 7 } canonic: ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "00@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "33@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "66@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "77@sta ... " ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 8 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:49:15.806607Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:49:15.807074Z node 15 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table1" took 486us result status StatusSuccess 2025-05-07T08:49:15.807684Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table1" PathDescription { Self { Name: "Table1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 8 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 8 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 TableSchemaVersion: 4 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table1" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 4 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 8 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:49:15.808964Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Copy1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:49:15.809370Z node 15 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Copy1" took 426us result status StatusSuccess 2025-05-07T08:49:15.810177Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Copy1" PathDescription { Self { Name: "Copy1" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 103 CreateStep: 5000007 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Copy1" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "Sync" LocalPathId: 6 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "value" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 8 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:49:15.811556Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Copy2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:49:15.811949Z node 15 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Copy2" took 404us result status StatusSuccess 2025-05-07T08:49:15.812468Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Copy2" PathDescription { Self { Name: "Copy2" PathId: 8 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 105 CreateStep: 5000009 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Copy2" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 8 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 8 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:49:15.813911Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Copy3" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:49:15.814360Z node 15 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Copy3" took 461us result status StatusSuccess 2025-05-07T08:49:15.815051Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Copy3" PathDescription { Self { Name: "Copy3" PathId: 9 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 105 CreateStep: 5000009 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Copy3" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "Sync" LocalPathId: 10 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "value" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 8 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 9 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TMaintenanceApiTest::SingleCompositeActionGroup [GOOD] >> TMaintenanceApiTest::SimplifiedMirror3DC >> Yq_1::ModifyQuery [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::DropTableWithInflightChanges[PipeResets] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:116:2058] recipient: [1:111:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:116:2058] recipient: [1:111:2142] Leader for TabletID 72057594046678944 is [1:123:2149] sender: [1:126:2058] recipient: [1:108:2140] Leader for TabletID 72057594046447617 is [1:129:2154] sender: [1:131:2058] recipient: [1:109:2141] Leader for TabletID 72057594046316545 is [1:134:2157] sender: [1:136:2058] recipient: [1:111:2142] 2025-05-07T08:48:40.447643Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:48:40.447762Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:48:40.447822Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:48:40.447887Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:48:40.447937Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:48:40.447964Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:48:40.448027Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:48:40.448116Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:48:40.448908Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:48:40.449371Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:48:40.585844Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7610: Got new config: QueryServiceConfig { AvailableExternalDataSources: "ObjectStorage" AvailableExternalDataSources: "ClickHouse" AvailableExternalDataSources: "PostgreSQL" AvailableExternalDataSources: "MySQL" AvailableExternalDataSources: "Ydb" AvailableExternalDataSources: "YT" AvailableExternalDataSources: "Greenplum" AvailableExternalDataSources: "MsSQLServer" AvailableExternalDataSources: "Oracle" AvailableExternalDataSources: "Logging" AvailableExternalDataSources: "Solomon" } 2025-05-07T08:48:40.586006Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:48:40.586925Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# ObjectStorage, ClickHouse, PostgreSQL, MySQL, Ydb, YT, Greenplum, MsSQLServer, Oracle, Logging, Solomon Leader for TabletID 72057594046447617 is [1:129:2154] sender: [1:175:2058] recipient: [1:15:2062] 2025-05-07T08:48:40.606315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:48:40.606511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:48:40.606713Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:48:40.614140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:48:40.614405Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:48:40.615105Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:48:40.615358Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:48:40.617743Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:48:40.619261Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:48:40.619338Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:48:40.619516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:48:40.619566Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:48:40.619688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:48:40.619847Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2212] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2212] Leader for TabletID 72057594037968897 is [1:217:2216] sender: [1:218:2058] recipient: [1:211:2212] 2025-05-07T08:48:40.628024Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:123:2149] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:48:40.762792Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:48:40.763060Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:48:40.763309Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:48:40.763562Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:48:40.763631Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:48:40.766264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:48:40.766428Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:48:40.766646Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:48:40.766701Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:48:40.766737Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:48:40.766773Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:48:40.769243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:48:40.769311Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:48:40.769361Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:48:40.771530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:48:40.771600Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:48:40.771664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:48:40.771718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:48:40.775591Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:48:40.780823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:48:40.781098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:134:2157] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:48:40.782263Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:48:40.782417Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 134 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 ... 594046678944 Generation: 2 LocalPathId: 5 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1003 2025-05-07T08:49:17.815612Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1003 2025-05-07T08:49:17.815655Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 18446744073709551615 2025-05-07T08:49:17.815704Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 3 2025-05-07T08:49:17.815828Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 1003, ready parts: 2/3, is published: true 2025-05-07T08:49:17.823433Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-05-07T08:49:17.823528Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 1003:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:49:17.823829Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-05-07T08:49:17.823974Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1003:0 progress is 3/3 2025-05-07T08:49:17.824015Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1003 ready parts: 3/3 2025-05-07T08:49:17.824063Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1003:0 progress is 3/3 2025-05-07T08:49:17.824097Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1003 ready parts: 3/3 2025-05-07T08:49:17.824137Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 1003, ready parts: 3/3, is published: true 2025-05-07T08:49:17.824180Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1003 ready parts: 3/3 2025-05-07T08:49:17.824226Z node 26 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1003:0 2025-05-07T08:49:17.824262Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 1003:0 2025-05-07T08:49:17.824377Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-05-07T08:49:17.824437Z node 26 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1003:1 2025-05-07T08:49:17.824466Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 1003:1 2025-05-07T08:49:17.824506Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-05-07T08:49:17.824536Z node 26 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1003:2 2025-05-07T08:49:17.824567Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 1003:2 2025-05-07T08:49:17.824618Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-05-07T08:49:17.825421Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-05-07T08:49:17.830826Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-05-07T08:49:17.833097Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-05-07T08:49:17.833174Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-05-07T08:49:17.833438Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-05-07T08:49:17.836010Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-05-07T08:49:17.838583Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5509: Handle TEvStateChanged, at schemeshard: 72057594046678944, message: Source { RawX1: 345 RawX2: 111669152025 } TabletId: 72075186233409546 State: 4 2025-05-07T08:49:17.838679Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186233409546, state: Offline, at schemeshard: 72057594046678944 2025-05-07T08:49:17.840547Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-05-07T08:49:17.841025Z node 26 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409546 Forgetting tablet 72075186233409546 2025-05-07T08:49:17.841207Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-05-07T08:49:17.841456Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 1 2025-05-07T08:49:17.841768Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T08:49:17.841808Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 5], at schemeshard: 72057594046678944 2025-05-07T08:49:17.841868Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-05-07T08:49:17.841909Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-05-07T08:49:17.841949Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-05-07T08:49:17.847412Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-05-07T08:49:17.847481Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409546 2025-05-07T08:49:17.848008Z node 26 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 2 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 1003, wait until txId: 1003 TestWaitNotification wait txId: 1003 2025-05-07T08:49:17.848343Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1003: send EvNotifyTxCompletion 2025-05-07T08:49:17.848382Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1003 2025-05-07T08:49:17.849131Z node 26 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1003, at schemeshard: 72057594046678944 2025-05-07T08:49:17.849217Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-05-07T08:49:17.849251Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [26:628:2554] 2025-05-07T08:49:17.855907Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5509: Handle TEvStateChanged, at schemeshard: 72057594046678944, message: Source { RawX1: 346 RawX2: 111669152026 } TabletId: 72075186233409547 State: 4 2025-05-07T08:49:17.856046Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186233409547, state: Offline, at schemeshard: 72057594046678944 2025-05-07T08:49:17.858833Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-05-07T08:49:17.859505Z node 26 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409547 Forgetting tablet 72075186233409547 2025-05-07T08:49:17.859746Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-05-07T08:49:17.860077Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-05-07T08:49:17.860559Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T08:49:17.860610Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-05-07T08:49:17.860690Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:49:17.868991Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-05-07T08:49:17.869093Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409547 2025-05-07T08:49:17.870003Z node 26 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 1003 wait until 72075186233409546 is deleted wait until 72075186233409547 is deleted 2025-05-07T08:49:17.870522Z node 26 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409546 2025-05-07T08:49:17.870611Z node 26 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409547 Deleted tabletId 72075186233409546 Deleted tabletId 72075186233409547 >> test_sql_streaming.py::test[suites-ReadTwoTopics-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-ReadWriteSameTopic-default.txt] >> TMaintenanceApiTest::SimplifiedMirror3DC [GOOD] >> TMaintenanceApiTest::RequestReplaceDevicePDisk >> TCmsTest::RequestRestartServicesReject [GOOD] >> TCmsTest::RequestRestartServicesPartial >> TYardTest::TestRestartAtChunkEnd [GOOD] >> TYardTestRestore::TestRestore15 >> TCmsTenatsTest::RequestShutdownHost [GOOD] >> TCmsTenatsTest::RequestShutdownHostWithTenantPolicy >> TYardTestRestore::TestRestore15 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/fq/ut_integration/unittest >> Yq_1::ModifyQuery [GOOD] Test command err: 2025-05-07T08:47:47.861994Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501622812577716340:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:47:47.862606Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:47:55.252589Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501622812577716340:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:47:55.269360Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; E0507 08:47:57.544790339 109639 dns_resolver.cc:162] no server name supplied in dns URI E0507 08:47:57.545699944 109639 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-05-07T08:47:57.782192Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:57.790414Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:58.790263Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:58.794424Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:59.761538Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:4831: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:4831 } ] 2025-05-07T08:47:59.777495Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:4831: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:4831 2025-05-07T08:47:59.830792Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:59.850477Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:00.847086Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:00.864244Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:01.295604Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:4831: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:4831 } ] 2025-05-07T08:48:01.962080Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:01.962120Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0507 08:48:02.602691429 110220 dns_resolver.cc:162] no server name supplied in dns URI E0507 08:48:02.605095512 110220 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-05-07T08:48:02.963502Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:02.963529Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:03.975063Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:03.975085Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:04.978906Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:04.979264Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:05.974208Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:4831: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:4831 } ] 2025-05-07T08:48:06.000183Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:06.066129Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:06.399337Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:4831: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:4831 2025-05-07T08:48:07.006207Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:07.098372Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0507 08:48:07.746256679 110220 dns_resolver.cc:162] no server name supplied in dns URI E0507 08:48:07.746369681 110220 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-05-07T08:48:08.006654Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:08.091012Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:09.034273Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:09.094823Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:10.035718Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:10.098694Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:11.038616Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:11.102286Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:12.151600Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:12.174660Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0507 08:48:12.852073274 110216 dns_resolver.cc:162] no server name supplied in dns URI E0507 08:48:12.853184606 110216 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-05-07T08:48:13.194496Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:13.214313Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:13.587744Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:4831: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:4831 } ] 2025-05-07T08:48:13.636382Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:4831: Failed to connect to rem ... : [4:7501623194614675075:3124], TxId: 281474976715827, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=ODYzY2M4ZTYtM2RmMDg0MzEtMmQzNWY5MC1lYjQxY2FiYw==. CustomerSuppliedId : . TraceId : 01jtmyynep822j2t0q5zq7kcfg. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Received channels info: Update { Id: 1 TransportVersion: DATA_TRANSPORT_OOB_PICKLE_1_0 SrcTaskId: 1 DstTaskId: 2 SrcEndpoint { ActorId { RawX1: 7501623194614675074 RawX2: 4503616807242803 } } DstEndpoint { ActorId { RawX1: 7501623194614675075 RawX2: 4503616807242804 } } InMemory: true DstStageId: 1 } Update { Id: 2 TransportVersion: DATA_TRANSPORT_OOB_PICKLE_1_0 SrcTaskId: 2 SrcEndpoint { ActorId { RawX1: 7501623194614675075 RawX2: 4503616807242804 } } DstEndpoint { ActorId { RawX1: 7501623194614675070 RawX2: 4503616807242373 } } InMemory: true } 2025-05-07T08:49:16.763555Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [4:7501623194614675075:3124], TxId: 281474976715827, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=ODYzY2M4ZTYtM2RmMDg0MzEtMmQzNWY5MC1lYjQxY2FiYw==. CustomerSuppliedId : . TraceId : 01jtmyynep822j2t0q5zq7kcfg. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646922 2025-05-07T08:49:16.764327Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:959: TxId: 281474976715827, task: 1, CA Id [4:7501623194614675074:3123]. Recv TEvReadResult from ShardID=72075186224037890, ReadId=0, Status=SUCCESS, Finished=1, RowCount=1, TxLocks= , BrokenTxLocks= 2025-05-07T08:49:16.764349Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1047: TxId: 281474976715827, task: 1, CA Id [4:7501623194614675074:3123]. Taken 0 locks 2025-05-07T08:49:16.764367Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1061: TxId: 281474976715827, task: 1, CA Id [4:7501623194614675074:3123]. new data for read #0 seqno = 1 finished = 1 2025-05-07T08:49:16.764387Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [4:7501623194614675074:3123], TxId: 281474976715827, task: 1. Ctx: { TraceId : 01jtmyynep822j2t0q5zq7kcfg. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=ODYzY2M4ZTYtM2RmMDg0MzEtMmQzNWY5MC1lYjQxY2FiYw==. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 276037645 2025-05-07T08:49:16.764407Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [4:7501623194614675074:3123], TxId: 281474976715827, task: 1. Ctx: { TraceId : 01jtmyynep822j2t0q5zq7kcfg. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=ODYzY2M4ZTYtM2RmMDg0MzEtMmQzNWY5MC1lYjQxY2FiYw==. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646922 2025-05-07T08:49:16.764424Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1328: TxId: 281474976715827, task: 1, CA Id [4:7501623194614675074:3123]. enter getasyncinputdata results size 1, freeSpace 8388608 2025-05-07T08:49:16.764443Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1224: TxId: 281474976715827, task: 1, CA Id [4:7501623194614675074:3123]. enter pack cells method shardId: 72075186224037890 processedRows: 0 packed rows: 0 freeSpace: 8388608 2025-05-07T08:49:16.764477Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1305: TxId: 281474976715827, task: 1, CA Id [4:7501623194614675074:3123]. exit pack cells method shardId: 72075186224037890 processedRows: 0 packed rows: 1 freeSpace: 8388517 2025-05-07T08:49:16.764498Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1362: TxId: 281474976715827, task: 1, CA Id [4:7501623194614675074:3123]. returned 1 rows; processed 1 rows 2025-05-07T08:49:16.764543Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1399: TxId: 281474976715827, task: 1, CA Id [4:7501623194614675074:3123]. dropping batch for read #0 2025-05-07T08:49:16.764557Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:459: TxId: 281474976715827, task: 1, CA Id [4:7501623194614675074:3123]. effective maxinflight 1024 sorted 0 2025-05-07T08:49:16.764569Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:481: TxId: 281474976715827, task: 1, CA Id [4:7501623194614675074:3123]. Scheduled table scans, in flight: 0 shards. pending shards to read: 0, 2025-05-07T08:49:16.764586Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1424: TxId: 281474976715827, task: 1, CA Id [4:7501623194614675074:3123]. returned async data processed rows 1 left freeSpace 8388517 received rows 1 running reads 0 pending shards 0 finished = 1 has limit 0 limit reached 0 2025-05-07T08:49:16.764877Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [4:7501623194614675074:3123], TxId: 281474976715827, task: 1. Ctx: { TraceId : 01jtmyynep822j2t0q5zq7kcfg. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=ODYzY2M4ZTYtM2RmMDg0MzEtMmQzNWY5MC1lYjQxY2FiYw==. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-05-07T08:49:16.764914Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [4:7501623194614675074:3123], TxId: 281474976715827, task: 1. Ctx: { TraceId : 01jtmyynep822j2t0q5zq7kcfg. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=ODYzY2M4ZTYtM2RmMDg0MzEtMmQzNWY5MC1lYjQxY2FiYw==. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646922 2025-05-07T08:49:16.764980Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:670: TxId: 281474976715827, task: 1. Tasks execution finished, waiting for chunk delivery in output channelId: 1, seqNo: [1] 2025-05-07T08:49:16.765022Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [4:7501623194614675075:3124], TxId: 281474976715827, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=ODYzY2M4ZTYtM2RmMDg0MzEtMmQzNWY5MC1lYjQxY2FiYw==. CustomerSuppliedId : . TraceId : 01jtmyynep822j2t0q5zq7kcfg. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646923 2025-05-07T08:49:16.765073Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:163: TxId: 281474976715827, task: 2. Finish input channelId: 1, from: [4:7501623194614675074:3123] 2025-05-07T08:49:16.765154Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [4:7501623194614675075:3124], TxId: 281474976715827, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=ODYzY2M4ZTYtM2RmMDg0MzEtMmQzNWY5MC1lYjQxY2FiYw==. CustomerSuppliedId : . TraceId : 01jtmyynep822j2t0q5zq7kcfg. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646922 2025-05-07T08:49:16.765313Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [4:7501623194614675074:3123], TxId: 281474976715827, task: 1. Ctx: { TraceId : 01jtmyynep822j2t0q5zq7kcfg. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=ODYzY2M4ZTYtM2RmMDg0MzEtMmQzNWY5MC1lYjQxY2FiYw==. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646927 2025-05-07T08:49:16.765353Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [4:7501623194614675074:3123], TxId: 281474976715827, task: 1. Ctx: { TraceId : 01jtmyynep822j2t0q5zq7kcfg. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=ODYzY2M4ZTYtM2RmMDg0MzEtMmQzNWY5MC1lYjQxY2FiYw==. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646922 2025-05-07T08:49:16.765376Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715827, task: 1. Tasks execution finished 2025-05-07T08:49:16.765396Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [4:7501623194614675074:3123], TxId: 281474976715827, task: 1. Ctx: { TraceId : 01jtmyynep822j2t0q5zq7kcfg. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=ODYzY2M4ZTYtM2RmMDg0MzEtMmQzNWY5MC1lYjQxY2FiYw==. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Compute state finished. All channels and sinks finished 2025-05-07T08:49:16.765512Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715827, task: 1. pass away 2025-05-07T08:49:16.765634Z node 4 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:66;problem=finish_compute_actor;tx_id=281474976715827;task_id=1;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-05-07T08:49:16.768115Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [4:7501623194614675075:3124], TxId: 281474976715827, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=ODYzY2M4ZTYtM2RmMDg0MzEtMmQzNWY5MC1lYjQxY2FiYw==. CustomerSuppliedId : . TraceId : 01jtmyynep822j2t0q5zq7kcfg. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-05-07T08:49:16.768267Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [4:7501623194614675075:3124], TxId: 281474976715827, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=ODYzY2M4ZTYtM2RmMDg0MzEtMmQzNWY5MC1lYjQxY2FiYw==. CustomerSuppliedId : . TraceId : 01jtmyynep822j2t0q5zq7kcfg. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646922 2025-05-07T08:49:16.768306Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715827, task: 2. Tasks execution finished, don't wait for ack delivery in input channelId: 1, seqNo: [1] 2025-05-07T08:49:16.768318Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715827, task: 2. Tasks execution finished 2025-05-07T08:49:16.768333Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [4:7501623194614675075:3124], TxId: 281474976715827, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=ODYzY2M4ZTYtM2RmMDg0MzEtMmQzNWY5MC1lYjQxY2FiYw==. CustomerSuppliedId : . TraceId : 01jtmyynep822j2t0q5zq7kcfg. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Compute state finished. All channels and sinks finished 2025-05-07T08:49:16.768415Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715827, task: 2. pass away 2025-05-07T08:49:16.768499Z node 4 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:66;problem=finish_compute_actor;tx_id=281474976715827;task_id=2;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-05-07T08:49:16.784264Z node 4 :FQ_PENDING_FETCHER ERROR: pending_fetcher.cpp:259: Error with GetTask:
: Error: GRpc error: (1): Cancelled on the server side
: Error: Grpc error response on endpoint [::]:9066 2025-05-07T08:49:16.790283Z node 4 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage.cpp:561: DB Error, Status: TRANSPORT_UNAVAILABLE, Issues: [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:9066: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:9066 } ], Query: --!syntax_v1 -- Query name: GetTask(read) PRAGMA TablePathPrefix("Root/yq"); DECLARE $tenant as String; DECLARE $scope as String; DECLARE $query_id as String; DECLARE $now as Timestamp; SELECT `generation`, `internal`, `query` FROM `queries` WHERE `scope` = $scope AND `query_id` = $query_id; SELECT `assigned_until` FROM `pending_small` WHERE `tenant` = $tenant AND `scope` = $scope AND `query_id` = $query_id AND `assigned_until` < $now; 2025-05-07T08:49:17.762276Z node 4 :FQ_PENDING_FETCHER ERROR: pending_fetcher.cpp:259: Error with GetTask:
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv6:%5B::%5D:9066: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint [::]:9066 >> TOlap::CreateTableWithNullableKeysNotAllowed >> TCmsTest::RequestRestartServicesPartial [GOOD] >> TCmsTest::RequestRestartServicesNoUser |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/pdisk/ut/unittest >> TYardTestRestore::TestRestore15 [GOOD] >> test_sql_streaming.py::test[suites-GroupByHopExprKey-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-GroupByHopListKey-default.txt] >> Backpressure::MonteCarlo [GOOD] >> TOlap::CreateTableWithNullableKeysNotAllowed [GOOD] >> TOlap::CreateTableWithNullableKeys >> TCmsTest::RequestRestartServicesNoUser [GOOD] >> TCmsTenatsTest::RequestShutdownHostWithTenantPolicy [GOOD] >> TCmsTenatsTest::TestClusterLimitForceRestartMode >> TAsyncIndexTests::SplitMainWithReboots[PipeResets] [GOOD] >> TOlapNaming::AlterColumnTableOk >> TOlap::CreateDropStandaloneTable >> TMaintenanceApiTest::RequestReplaceDevicePDisk [GOOD] |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::RequestRestartServicesNoUser [GOOD] >> TDataShardTrace::TestTraceWriteImmediateOnShard |88.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/tx_proxy/ut_storage_tenant/ydb-core-tx-tx_proxy-ut_storage_tenant |88.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/tx_proxy/ut_storage_tenant/ydb-core-tx-tx_proxy-ut_storage_tenant |88.7%| [LD] {RESULT} $(B)/ydb/core/tx/tx_proxy/ut_storage_tenant/ydb-core-tx-tx_proxy-ut_storage_tenant ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/backpressure/ut_client/unittest >> Backpressure::MonteCarlo [GOOD] Test command err: Clock# 1970-01-01T00:00:00.000000Z elapsed# 0.000030s EventsProcessed# 0 clients.size# 0 Clock# 1970-01-01T00:00:13.281066Z elapsed# 0.000164s EventsProcessed# 2 clients.size# 0 Clock# 1970-01-01T00:00:26.082218Z elapsed# 0.000189s EventsProcessed# 2 clients.size# 0 Clock# 1970-01-01T00:00:45.644651Z elapsed# 0.000215s EventsProcessed# 2 clients.size# 0 Clock# 1970-01-01T00:00:57.726089Z elapsed# 0.000237s EventsProcessed# 2 clients.size# 0 Clock# 1970-01-01T00:01:08.516937Z elapsed# 0.000259s EventsProcessed# 2 clients.size# 0 Clock# 1970-01-01T00:01:20.042747Z elapsed# 0.000280s EventsProcessed# 2 clients.size# 0 Clock# 1970-01-01T00:01:34.596058Z elapsed# 0.000306s EventsProcessed# 2 clients.size# 0 Clock# 1970-01-01T00:01:47.063903Z elapsed# 0.000330s EventsProcessed# 2 clients.size# 0 Clock# 1970-01-01T00:02:02.760418Z elapsed# 0.000351s EventsProcessed# 2 clients.size# 0 Clock# 1970-01-01T00:02:19.504091Z elapsed# 0.000372s EventsProcessed# 2 clients.size# 0 Clock# 1970-01-01T00:02:39.145908Z elapsed# 0.000393s EventsProcessed# 2 clients.size# 0 Clock# 1970-01-01T00:02:58.841289Z elapsed# 0.000417s EventsProcessed# 2 clients.size# 0 Clock# 1970-01-01T00:03:09.789507Z elapsed# 0.000455s EventsProcessed# 2 clients.size# 0 Clock# 1970-01-01T00:03:22.768799Z elapsed# 0.000477s EventsProcessed# 2 clients.size# 0 Clock# 1970-01-01T00:03:40.144672Z elapsed# 0.000499s EventsProcessed# 2 clients.size# 0 Clock# 1970-01-01T00:03:58.195779Z elapsed# 0.000525s EventsProcessed# 2 clients.size# 0 Clock# 1970-01-01T00:04:14.977234Z elapsed# 0.000547s EventsProcessed# 2 clients.size# 0 Clock# 1970-01-01T00:04:34.799632Z elapsed# 0.049325s EventsProcessed# 2315 clients.size# 1 Clock# 1970-01-01T00:04:50.699621Z elapsed# 0.078692s EventsProcessed# 4165 clients.size# 1 Clock# 1970-01-01T00:05:07.644334Z elapsed# 0.106378s EventsProcessed# 6079 clients.size# 1 Clock# 1970-01-01T00:05:20.280461Z elapsed# 0.120333s EventsProcessed# 7542 clients.size# 1 Clock# 1970-01-01T00:05:31.940710Z elapsed# 0.132980s EventsProcessed# 8925 clients.size# 1 Clock# 1970-01-01T00:05:49.212611Z elapsed# 0.180224s EventsProcessed# 13158 clients.size# 2 Clock# 1970-01-01T00:06:05.394272Z elapsed# 0.244591s EventsProcessed# 18975 clients.size# 3 Clock# 1970-01-01T00:06:16.773966Z elapsed# 0.293387s EventsProcessed# 23150 clients.size# 3 Clock# 1970-01-01T00:06:29.860767Z elapsed# 0.344166s EventsProcessed# 27872 clients.size# 3 Clock# 1970-01-01T00:06:46.497905Z elapsed# 0.424959s EventsProcessed# 33951 clients.size# 3 Clock# 1970-01-01T00:07:02.960943Z elapsed# 0.486149s EventsProcessed# 39671 clients.size# 3 Clock# 1970-01-01T00:07:17.827502Z elapsed# 0.571285s EventsProcessed# 44840 clients.size# 3 Clock# 1970-01-01T00:07:33.362440Z elapsed# 0.653098s EventsProcessed# 50237 clients.size# 3 Clock# 1970-01-01T00:07:50.092754Z elapsed# 0.709833s EventsProcessed# 56315 clients.size# 3 Clock# 1970-01-01T00:08:02.403611Z elapsed# 0.754326s EventsProcessed# 60706 clients.size# 3 Clock# 1970-01-01T00:08:13.333104Z elapsed# 0.792259s EventsProcessed# 64736 clients.size# 3 Clock# 1970-01-01T00:08:24.220560Z elapsed# 0.828516s EventsProcessed# 68626 clients.size# 3 Clock# 1970-01-01T00:08:35.840598Z elapsed# 0.871071s EventsProcessed# 72805 clients.size# 3 Clock# 1970-01-01T00:08:52.807703Z elapsed# 0.932129s EventsProcessed# 78744 clients.size# 3 Clock# 1970-01-01T00:09:07.432372Z elapsed# 1.028541s EventsProcessed# 83934 clients.size# 3 Clock# 1970-01-01T00:09:26.336531Z elapsed# 1.126326s EventsProcessed# 90558 clients.size# 3 Clock# 1970-01-01T00:09:42.364375Z elapsed# 1.234156s EventsProcessed# 96291 clients.size# 3 Clock# 1970-01-01T00:10:00.787094Z elapsed# 1.391729s EventsProcessed# 102961 clients.size# 3 Clock# 1970-01-01T00:10:14.131764Z elapsed# 1.542615s EventsProcessed# 107671 clients.size# 3 Clock# 1970-01-01T00:10:28.464584Z elapsed# 1.642076s EventsProcessed# 112874 clients.size# 3 Clock# 1970-01-01T00:10:39.580920Z elapsed# 1.744880s EventsProcessed# 116933 clients.size# 3 Clock# 1970-01-01T00:10:58.859664Z elapsed# 1.909182s EventsProcessed# 123942 clients.size# 3 Clock# 1970-01-01T00:11:15.380428Z elapsed# 2.058142s EventsProcessed# 129713 clients.size# 3 Clock# 1970-01-01T00:11:31.406828Z elapsed# 2.255045s EventsProcessed# 137257 clients.size# 4 Clock# 1970-01-01T00:11:45.333480Z elapsed# 2.393593s EventsProcessed# 143894 clients.size# 4 Clock# 1970-01-01T00:12:01.501039Z elapsed# 2.507108s EventsProcessed# 149679 clients.size# 3 Clock# 1970-01-01T00:12:12.594618Z elapsed# 2.639527s EventsProcessed# 155023 clients.size# 4 Clock# 1970-01-01T00:12:24.769679Z elapsed# 2.748910s EventsProcessed# 160838 clients.size# 4 Clock# 1970-01-01T00:12:42.578795Z elapsed# 2.955011s EventsProcessed# 169184 clients.size# 4 Clock# 1970-01-01T00:12:59.133137Z elapsed# 3.094495s EventsProcessed# 177020 clients.size# 4 Clock# 1970-01-01T00:13:10.914465Z elapsed# 3.150708s EventsProcessed# 182535 clients.size# 4 Clock# 1970-01-01T00:13:25.709496Z elapsed# 3.222843s EventsProcessed# 189450 clients.size# 4 Clock# 1970-01-01T00:13:37.309245Z elapsed# 3.277833s EventsProcessed# 194872 clients.size# 4 Clock# 1970-01-01T00:13:47.979282Z elapsed# 3.332039s EventsProcessed# 199884 clients.size# 4 Clock# 1970-01-01T00:14:03.738582Z elapsed# 3.449646s EventsProcessed# 207324 clients.size# 4 Clock# 1970-01-01T00:14:18.304969Z elapsed# 3.619719s EventsProcessed# 214367 clients.size# 4 Clock# 1970-01-01T00:14:36.500211Z elapsed# 3.815513s EventsProcessed# 222938 clients.size# 4 Clock# 1970-01-01T00:14:47.174162Z elapsed# 3.959567s EventsProcessed# 229222 clients.size# 5 Clock# 1970-01-01T00:15:03.332516Z elapsed# 4.179359s EventsProcessed# 238748 clients.size# 5 Clock# 1970-01-01T00:15:21.750772Z elapsed# 4.371836s EventsProcessed# 251838 clients.size# 6 Clock# 1970-01-01T00:15:37.687992Z elapsed# 4.544211s EventsProcessed# 263373 clients.size# 6 Clock# 1970-01-01T00:15:57.639689Z elapsed# 4.817357s EventsProcessed# 275195 clients.size# 5 Clock# 1970-01-01T00:16:15.499002Z elapsed# 4.932602s EventsProcessed# 285796 clients.size# 5 Clock# 1970-01-01T00:16:33.066191Z elapsed# 5.048743s EventsProcessed# 296431 clients.size# 5 Clock# 1970-01-01T00:16:48.338364Z elapsed# 5.150829s EventsProcessed# 305341 clients.size# 5 Clock# 1970-01-01T00:17:06.641659Z elapsed# 5.275991s EventsProcessed# 316301 clients.size# 5 Clock# 1970-01-01T00:17:22.146869Z elapsed# 5.380769s EventsProcessed# 325423 clients.size# 5 Clock# 1970-01-01T00:17:35.079168Z elapsed# 5.471900s EventsProcessed# 333146 clients.size# 5 Clock# 1970-01-01T00:17:50.235257Z elapsed# 5.581712s EventsProcessed# 342245 clients.size# 5 Clock# 1970-01-01T00:18:08.558404Z elapsed# 5.735868s EventsProcessed# 353198 clients.size# 5 Clock# 1970-01-01T00:18:26.348666Z elapsed# 5.840950s EventsProcessed# 363937 clients.size# 5 Clock# 1970-01-01T00:18:42.124669Z elapsed# 5.954490s EventsProcessed# 375406 clients.size# 6 Clock# 1970-01-01T00:18:57.810723Z elapsed# 6.101988s EventsProcessed# 386632 clients.size# 6 Clock# 1970-01-01T00:19:14.730824Z elapsed# 6.217209s EventsProcessed# 398496 clients.size# 6 Clock# 1970-01-01T00:19:34.368327Z elapsed# 6.381799s EventsProcessed# 415266 clients.size# 7 Clock# 1970-01-01T00:19:45.318662Z elapsed# 6.495840s EventsProcessed# 424355 clients.size# 7 Clock# 1970-01-01T00:20:04.258242Z elapsed# 6.648328s EventsProcessed# 437837 clients.size# 6 Clock# 1970-01-01T00:20:16.994621Z elapsed# 6.764610s EventsProcessed# 448522 clients.size# 7 Clock# 1970-01-01T00:20:33.375488Z elapsed# 6.928098s EventsProcessed# 462019 clients.size# 7 Clock# 1970-01-01T00:20:46.820443Z elapsed# 7.070364s EventsProcessed# 474756 clients.size# 8 Clock# 1970-01-01T00:21:01.991908Z elapsed# 7.223223s EventsProcessed# 489210 clients.size# 8 Clock# 1970-01-01T00:21:18.754595Z elapsed# 7.507954s EventsProcessed# 505023 clients.size# 8 Clock# 1970-01-01T00:21:29.821365Z elapsed# 7.630113s EventsProcessed# 515479 clients.size# 8 Clock# 1970-01-01T00:21:48.814662Z elapsed# 7.869138s EventsProcessed# 531284 clients.size# 7 Clock# 1970-01-01T00:22:07.719320Z elapsed# 8.057620s EventsProcessed# 546866 clients.size# 7 Clock# 1970-01-01T00:22:27.330185Z elapsed# 8.269578s EventsProcessed# 563228 clients.size# 7 Clock# 1970-01-01T00:22:45.767160Z elapsed# 8.585959s EventsProcessed# 578492 clients.size# 7 Clock# 1970-01-01T00:23:01.236081Z elapsed# 8.911214s EventsProcessed# 589548 clients.size# 6 Clock# 1970-01-01T00:23:17.074996Z elapsed# 9.115382s EventsProcessed# 600872 clients.size# 6 Clock# 1970-01-01T00:23:36.041886Z elapsed# 9.279044s EventsProcessed# 614472 clients.size# 6 Clock# 1970-01-01T00:23:50.906057Z elapsed# 9.483267s EventsProcessed# 624964 clients.size# 6 Clock# 1970-01-01T00:24:02.486673Z elapsed# 9.648909s EventsProcessed# 633080 clients.size# 6 Clock# 1970-01-01T00:24:15.269486Z elapsed# 9.764971s EventsProcessed# 642144 clients.size# 6 Clock# 1970-01-01T00:24:34.030699Z elapsed# 10.012898s EventsProcessed# 655741 clients.size# 6 Clock# 1970-01-01T00:24:48.981143Z elapsed# 10.119712s EventsProcessed# 666523 clients.size# 6 Clock# 1970-01-01T00:25:03.573775Z elapsed# 10.238703s EventsProcessed# 676908 clients.size# 6 Clock# 1970-01-01T00:25:16.677427Z elapsed# 10.411806s EventsProcessed# 686299 clients.size# 6 Clock# 1970-01-01T00:25:35.458681Z elapsed# 10.706766s EventsProcessed# 699742 clients.size# 6 Clock# 1970-01-01T00:25:54.514296Z elapsed# 11.073474s EventsProcessed# 713368 clients.size# 6 Clock# 1970-01-01T00:26:08.975013Z elapsed# 11.429767s EventsProcessed# 723569 clients.size# 6 Clock# 1970-01-01T00:26:24.393044Z elapsed# 11.719783s EventsProcessed# 734544 clients.size# 6 Clock# 1970-01-01T00:26:41.109142Z elapsed# 11.876085s EventsProcessed# 746637 clients.size# 6 Clock# 1970-01-01T00:26:52.985910Z elapsed# 12.016863s EventsProcessed# 753608 clients.size# 5 Clock# 1970-01-01T00:27:12.766809Z elapsed# 12.164247s EventsProcessed# 765194 clients.size# 5 Clock# 1970-01-01T00:27:25.079112Z elapsed# 12.240057s EventsProcessed# 772418 clients.size# 5 Clock# 1970-01-01T00:27:38.849555Z elapsed# 12.317065s EventsProcessed# 780512 clients.size# 5 Clock# 1970-01-01T00:27:51.445271Z elapsed# 12.388578s EventsProcessed# 788001 clients.size# 5 Clock# 1970-01-01T00:28:05.767163Z elapsed# 12.542046s EventsProcessed# 796523 clients.size# 5 Clock# 1970-01-01T00:28:25.519583Z elapsed# 12.826112s EventsProcessed# 808336 clients.size# 5 Clock# 1970-01-01T00:28:45.432022Z elapsed# 13.167487s EventsProcessed# 819866 clients.size# 5 Clock# 1970-01-01T00:29:05.241736Z elapsed# 13.446989s EventsProcessed# 831544 clients.size# 5 Clock# 1970-01-01T00:29:20.528346Z elapsed# 13.576533s EventsProcessed# 840585 clients.size# 5 Clock# 1970-01-01T00:29:36.734988Z elapsed# 13.703536s EventsProcessed# 850332 clients.size# 5 Clock# 1970-01-01T00:29:53.414202Z elapsed# 13.858686s EventsProcessed# 860084 clients.size# 5 Clock# 1970-01-01T00:30:09.417298Z elapsed# 14.009657s EventsProcessed# 869605 clients.size# 5 Clock# 1970-01-01T00:30:23.326024Z elapsed# 14.124424s EventsProcessed# 877831 clients.size# 5 Clock# 1970-01-01T00:30:34.695992Z elapsed# 14.249831s EventsProcessed# 884521 clients.size# 5 Clock# 1970-01-01T00:30:52.033227Z elapsed# 14.653691s EventsProcessed# 894806 clients.size# 5 Clock# 1970-01-01T00:31:11.528977Z elapsed# 15.043088s EventsProcessed# 906293 clients.size# 5 Clock# 1970-01-01T00:31:25.261350Z elapsed# 15.327318s EventsProcessed# 916000 clients.size# 6 Clock# 1970-01-01T00:31:44.496957Z elapsed# 15.529612s EventsProcessed# 929903 clients.size# 6 Clock# 1970-01-01T00:31:59.794230Z elapsed# 15.807562s EventsProcessed# 940898 clients.size# 6 Clock# 1970-01-01T00:32:14.778946Z elapsed# 15.916378s EventsProcessed# 951592 clients.size# 6 Clock# 1970-01-01T00:32:25.161659Z elapsed# 16.129691s EventsProcessed# 958981 clients.size# 6 Clock# 1970-01-01T00:32:41.498540Z elapsed# 16.347059s EventsProcessed# 970750 clients.size# 6 Clock# 1970-01-01T00:32:55.780839Z elapsed# 16.616613 ... s EventsProcessed# 10577776 clients.size# 0 Clock# 1970-01-01T05:29:55.947267Z elapsed# 190.471820s EventsProcessed# 10577776 clients.size# 0 Clock# 1970-01-01T05:30:11.694880Z elapsed# 190.471843s EventsProcessed# 10577776 clients.size# 0 Clock# 1970-01-01T05:30:31.509563Z elapsed# 190.471865s EventsProcessed# 10577776 clients.size# 0 Clock# 1970-01-01T05:30:47.367767Z elapsed# 190.471886s EventsProcessed# 10577776 clients.size# 0 Clock# 1970-01-01T05:31:03.313145Z elapsed# 190.471907s EventsProcessed# 10577776 clients.size# 0 Clock# 1970-01-01T05:31:16.556481Z elapsed# 190.471929s EventsProcessed# 10577776 clients.size# 0 Clock# 1970-01-01T05:31:33.737299Z elapsed# 190.471949s EventsProcessed# 10577776 clients.size# 0 Clock# 1970-01-01T05:31:48.255204Z elapsed# 190.493388s EventsProcessed# 10579510 clients.size# 1 Clock# 1970-01-01T05:31:59.353203Z elapsed# 190.509099s EventsProcessed# 10580837 clients.size# 1 Clock# 1970-01-01T05:32:09.622044Z elapsed# 190.523766s EventsProcessed# 10582051 clients.size# 1 Clock# 1970-01-01T05:32:28.733410Z elapsed# 190.550966s EventsProcessed# 10584317 clients.size# 1 Clock# 1970-01-01T05:32:46.700227Z elapsed# 190.575587s EventsProcessed# 10586387 clients.size# 1 Clock# 1970-01-01T05:32:56.701562Z elapsed# 190.589473s EventsProcessed# 10587559 clients.size# 1 Clock# 1970-01-01T05:33:12.495672Z elapsed# 190.610709s EventsProcessed# 10589346 clients.size# 1 Clock# 1970-01-01T05:33:26.240026Z elapsed# 190.630796s EventsProcessed# 10591061 clients.size# 1 Clock# 1970-01-01T05:33:37.582719Z elapsed# 190.646648s EventsProcessed# 10592494 clients.size# 1 Clock# 1970-01-01T05:33:49.274915Z elapsed# 190.661662s EventsProcessed# 10593885 clients.size# 1 Clock# 1970-01-01T05:34:04.576353Z elapsed# 190.682852s EventsProcessed# 10595692 clients.size# 1 Clock# 1970-01-01T05:34:15.630823Z elapsed# 190.697848s EventsProcessed# 10596977 clients.size# 1 Clock# 1970-01-01T05:34:34.376800Z elapsed# 190.761305s EventsProcessed# 10601606 clients.size# 2 Clock# 1970-01-01T05:34:53.744157Z elapsed# 190.880342s EventsProcessed# 10608411 clients.size# 3 Clock# 1970-01-01T05:35:11.399651Z elapsed# 190.957084s EventsProcessed# 10614777 clients.size# 3 Clock# 1970-01-01T05:35:25.011413Z elapsed# 191.013685s EventsProcessed# 10619769 clients.size# 3 Clock# 1970-01-01T05:35:41.303004Z elapsed# 191.078696s EventsProcessed# 10625510 clients.size# 3 Clock# 1970-01-01T05:35:59.532564Z elapsed# 191.154230s EventsProcessed# 10632068 clients.size# 3 Clock# 1970-01-01T05:36:14.617387Z elapsed# 191.223940s EventsProcessed# 10637468 clients.size# 3 Clock# 1970-01-01T05:36:30.673647Z elapsed# 191.399081s EventsProcessed# 10645158 clients.size# 4 Clock# 1970-01-01T05:36:47.665968Z elapsed# 191.532543s EventsProcessed# 10653054 clients.size# 4 Clock# 1970-01-01T05:37:04.495306Z elapsed# 191.637997s EventsProcessed# 10661044 clients.size# 4 Clock# 1970-01-01T05:37:15.469113Z elapsed# 191.713185s EventsProcessed# 10666044 clients.size# 4 Clock# 1970-01-01T05:37:29.580328Z elapsed# 191.851081s EventsProcessed# 10672713 clients.size# 4 Clock# 1970-01-01T05:37:41.088848Z elapsed# 191.929326s EventsProcessed# 10678051 clients.size# 4 Clock# 1970-01-01T05:37:58.803123Z elapsed# 192.125946s EventsProcessed# 10686488 clients.size# 4 Clock# 1970-01-01T05:38:15.615698Z elapsed# 192.224831s EventsProcessed# 10694415 clients.size# 4 Clock# 1970-01-01T05:38:27.566298Z elapsed# 192.294264s EventsProcessed# 10700036 clients.size# 4 Clock# 1970-01-01T05:38:43.071000Z elapsed# 192.381744s EventsProcessed# 10707220 clients.size# 4 Clock# 1970-01-01T05:38:57.698082Z elapsed# 192.469134s EventsProcessed# 10714195 clients.size# 4 Clock# 1970-01-01T05:39:09.098702Z elapsed# 192.598196s EventsProcessed# 10721025 clients.size# 5 Clock# 1970-01-01T05:39:24.185027Z elapsed# 192.708678s EventsProcessed# 10730097 clients.size# 5 Clock# 1970-01-01T05:39:42.060467Z elapsed# 192.837973s EventsProcessed# 10740843 clients.size# 5 Clock# 1970-01-01T05:39:57.730278Z elapsed# 192.948426s EventsProcessed# 10750128 clients.size# 5 Clock# 1970-01-01T05:40:10.103859Z elapsed# 193.058992s EventsProcessed# 10757441 clients.size# 5 Clock# 1970-01-01T05:40:20.446947Z elapsed# 193.116648s EventsProcessed# 10763478 clients.size# 5 Clock# 1970-01-01T05:40:40.439273Z elapsed# 193.230648s EventsProcessed# 10775427 clients.size# 5 Clock# 1970-01-01T05:40:58.256020Z elapsed# 193.336812s EventsProcessed# 10786386 clients.size# 5 Clock# 1970-01-01T05:41:15.402527Z elapsed# 193.481028s EventsProcessed# 10796748 clients.size# 5 Clock# 1970-01-01T05:41:30.660745Z elapsed# 193.590811s EventsProcessed# 10806045 clients.size# 5 Clock# 1970-01-01T05:41:42.762604Z elapsed# 193.658929s EventsProcessed# 10813018 clients.size# 5 Clock# 1970-01-01T05:41:58.957836Z elapsed# 193.761164s EventsProcessed# 10822452 clients.size# 5 Clock# 1970-01-01T05:42:14.520957Z elapsed# 193.909372s EventsProcessed# 10831614 clients.size# 5 Clock# 1970-01-01T05:42:33.463584Z elapsed# 194.051388s EventsProcessed# 10842931 clients.size# 5 Clock# 1970-01-01T05:42:47.404038Z elapsed# 194.143856s EventsProcessed# 10851088 clients.size# 5 Clock# 1970-01-01T05:43:04.412087Z elapsed# 194.270888s EventsProcessed# 10861191 clients.size# 5 Clock# 1970-01-01T05:43:19.728756Z elapsed# 194.432336s EventsProcessed# 10870225 clients.size# 5 Clock# 1970-01-01T05:43:30.129742Z elapsed# 194.492637s EventsProcessed# 10874937 clients.size# 4 Clock# 1970-01-01T05:43:49.136603Z elapsed# 194.602102s EventsProcessed# 10883852 clients.size# 4 Clock# 1970-01-01T05:44:07.487292Z elapsed# 194.706947s EventsProcessed# 10892701 clients.size# 4 Clock# 1970-01-01T05:44:18.992315Z elapsed# 194.772213s EventsProcessed# 10898153 clients.size# 4 Clock# 1970-01-01T05:44:32.068083Z elapsed# 194.880407s EventsProcessed# 10904260 clients.size# 4 Clock# 1970-01-01T05:44:42.985068Z elapsed# 194.934301s EventsProcessed# 10909465 clients.size# 4 Clock# 1970-01-01T05:44:55.014704Z elapsed# 194.987796s EventsProcessed# 10915155 clients.size# 4 Clock# 1970-01-01T05:45:10.879845Z elapsed# 195.060250s EventsProcessed# 10922570 clients.size# 4 Clock# 1970-01-01T05:45:27.851361Z elapsed# 195.155778s EventsProcessed# 10930703 clients.size# 4 Clock# 1970-01-01T05:45:42.576512Z elapsed# 195.267998s EventsProcessed# 10937748 clients.size# 4 Clock# 1970-01-01T05:45:59.350759Z elapsed# 195.361892s EventsProcessed# 10945740 clients.size# 4 Clock# 1970-01-01T05:46:14.794405Z elapsed# 195.438222s EventsProcessed# 10952804 clients.size# 4 Clock# 1970-01-01T05:46:32.045350Z elapsed# 195.527166s EventsProcessed# 10961143 clients.size# 4 Clock# 1970-01-01T05:46:44.197955Z elapsed# 195.585493s EventsProcessed# 10966867 clients.size# 4 Clock# 1970-01-01T05:47:01.033814Z elapsed# 195.697769s EventsProcessed# 10972702 clients.size# 3 Clock# 1970-01-01T05:47:14.598745Z elapsed# 195.780894s EventsProcessed# 10979315 clients.size# 4 Clock# 1970-01-01T05:47:33.456010Z elapsed# 195.882307s EventsProcessed# 10988218 clients.size# 4 Clock# 1970-01-01T05:47:52.196153Z elapsed# 196.001926s EventsProcessed# 10999488 clients.size# 5 Clock# 1970-01-01T05:48:06.852467Z elapsed# 196.092631s EventsProcessed# 11008264 clients.size# 5 Clock# 1970-01-01T05:48:18.581498Z elapsed# 196.207437s EventsProcessed# 11015176 clients.size# 5 Clock# 1970-01-01T05:48:32.550499Z elapsed# 196.279902s EventsProcessed# 11023302 clients.size# 5 Clock# 1970-01-01T05:48:45.783174Z elapsed# 196.353094s EventsProcessed# 11031111 clients.size# 5 Clock# 1970-01-01T05:49:03.502764Z elapsed# 196.470744s EventsProcessed# 11041325 clients.size# 5 Clock# 1970-01-01T05:49:16.428158Z elapsed# 196.607391s EventsProcessed# 11049009 clients.size# 5 Clock# 1970-01-01T05:49:32.790306Z elapsed# 196.725169s EventsProcessed# 11058729 clients.size# 5 Clock# 1970-01-01T05:49:46.631564Z elapsed# 196.823653s EventsProcessed# 11066990 clients.size# 5 Clock# 1970-01-01T05:49:59.161240Z elapsed# 196.914340s EventsProcessed# 11074487 clients.size# 5 Clock# 1970-01-01T05:50:18.816392Z elapsed# 197.087935s EventsProcessed# 11085927 clients.size# 5 Clock# 1970-01-01T05:50:37.271929Z elapsed# 197.217304s EventsProcessed# 11096676 clients.size# 5 Clock# 1970-01-01T05:50:53.725185Z elapsed# 197.332033s EventsProcessed# 11106307 clients.size# 5 Clock# 1970-01-01T05:51:07.062970Z elapsed# 197.425121s EventsProcessed# 11114165 clients.size# 5 Clock# 1970-01-01T05:51:21.763671Z elapsed# 197.568413s EventsProcessed# 11122738 clients.size# 5 Clock# 1970-01-01T05:51:38.149406Z elapsed# 197.658923s EventsProcessed# 11130516 clients.size# 4 Clock# 1970-01-01T05:51:49.732620Z elapsed# 197.720828s EventsProcessed# 11136036 clients.size# 4 Clock# 1970-01-01T05:52:04.715646Z elapsed# 197.803385s EventsProcessed# 11143314 clients.size# 4 Clock# 1970-01-01T05:52:16.177543Z elapsed# 197.865226s EventsProcessed# 11148855 clients.size# 4 Clock# 1970-01-01T05:52:30.969054Z elapsed# 198.005915s EventsProcessed# 11155781 clients.size# 4 Clock# 1970-01-01T05:52:46.708249Z elapsed# 198.102726s EventsProcessed# 11163358 clients.size# 4 Clock# 1970-01-01T05:53:00.218204Z elapsed# 198.246601s EventsProcessed# 11169859 clients.size# 4 Clock# 1970-01-01T05:53:13.296438Z elapsed# 198.485037s EventsProcessed# 11176016 clients.size# 4 Clock# 1970-01-01T05:53:24.777308Z elapsed# 198.688643s EventsProcessed# 11181397 clients.size# 4 Clock# 1970-01-01T05:53:37.561077Z elapsed# 198.923117s EventsProcessed# 11187634 clients.size# 4 Clock# 1970-01-01T05:53:55.605077Z elapsed# 199.095456s EventsProcessed# 11196216 clients.size# 4 Clock# 1970-01-01T05:54:08.998100Z elapsed# 199.195210s EventsProcessed# 11202661 clients.size# 4 Clock# 1970-01-01T05:54:23.106824Z elapsed# 199.319476s EventsProcessed# 11209346 clients.size# 4 Clock# 1970-01-01T05:54:37.040648Z elapsed# 199.441119s EventsProcessed# 11215971 clients.size# 4 Clock# 1970-01-01T05:54:50.863491Z elapsed# 199.557641s EventsProcessed# 11220852 clients.size# 3 Clock# 1970-01-01T05:55:09.232350Z elapsed# 199.766853s EventsProcessed# 11229537 clients.size# 4 Clock# 1970-01-01T05:55:28.376856Z elapsed# 199.904089s EventsProcessed# 11238447 clients.size# 4 Clock# 1970-01-01T05:55:40.849040Z elapsed# 200.021753s EventsProcessed# 11245849 clients.size# 5 Clock# 1970-01-01T05:55:51.124885Z elapsed# 200.115791s EventsProcessed# 11252191 clients.size# 5 Clock# 1970-01-01T05:56:06.305239Z elapsed# 200.255489s EventsProcessed# 11261196 clients.size# 5 Clock# 1970-01-01T05:56:17.666750Z elapsed# 200.360067s EventsProcessed# 11267960 clients.size# 5 Clock# 1970-01-01T05:56:33.646630Z elapsed# 200.459675s EventsProcessed# 11277316 clients.size# 5 Clock# 1970-01-01T05:56:44.522838Z elapsed# 200.548486s EventsProcessed# 11283896 clients.size# 5 Clock# 1970-01-01T05:56:56.833304Z elapsed# 200.630715s EventsProcessed# 11291063 clients.size# 5 Clock# 1970-01-01T05:57:13.930638Z elapsed# 200.779648s EventsProcessed# 11301275 clients.size# 5 Clock# 1970-01-01T05:57:27.002780Z elapsed# 200.916435s EventsProcessed# 11308911 clients.size# 5 Clock# 1970-01-01T05:57:38.266362Z elapsed# 201.009767s EventsProcessed# 11315634 clients.size# 5 Clock# 1970-01-01T05:57:49.230438Z elapsed# 201.130776s EventsProcessed# 11323570 clients.size# 6 Clock# 1970-01-01T05:58:06.310933Z elapsed# 201.340733s EventsProcessed# 11335931 clients.size# 6 Clock# 1970-01-01T05:58:16.946509Z elapsed# 201.454784s EventsProcessed# 11343570 clients.size# 6 Clock# 1970-01-01T05:58:33.388373Z elapsed# 201.615382s EventsProcessed# 11355236 clients.size# 6 Clock# 1970-01-01T05:58:49.747954Z elapsed# 201.734946s EventsProcessed# 11364917 clients.size# 5 Clock# 1970-01-01T05:59:08.649739Z elapsed# 201.918756s EventsProcessed# 11376130 clients.size# 5 Clock# 1970-01-01T05:59:21.470940Z elapsed# 202.017531s EventsProcessed# 11383781 clients.size# 5 Clock# 1970-01-01T05:59:39.864001Z elapsed# 202.162030s EventsProcessed# 11394628 clients.size# 5 Clock# 1970-01-01T05:59:59.715109Z elapsed# 202.384412s EventsProcessed# 11406314 clients.size# 5 >> TDataShardTrace::TestTraceDistributedSelectViaReadActors >> TDataShardTrace::TestTraceDistributedSelect >> TDataShardTrace::TestTraceDistributedUpsert+UseSink >> TDataShardTrace::TestTraceDistributedUpsert-UseSink >> KqpSnapshotRead::ReadWriteTxFailsOnConcurrentWrite2-withSink [GOOD] >> KqpSnapshotRead::ReadWriteTxFailsOnConcurrentWrite3+withSink |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TMaintenanceApiTest::RequestReplaceDevicePDisk [GOOD] >> Yq_1::Basic_EmptyDict [FAIL] >> TSchemeShardTest::DropPQAbort [GOOD] >> TSchemeShardTest::ManyDirs >> TOlap::CreateTableWithNullableKeys [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::SplitMainWithReboots[PipeResets] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:116:2058] recipient: [1:111:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:116:2058] recipient: [1:111:2142] Leader for TabletID 72057594046678944 is [1:123:2149] sender: [1:126:2058] recipient: [1:108:2140] Leader for TabletID 72057594046447617 is [1:129:2154] sender: [1:131:2058] recipient: [1:109:2141] Leader for TabletID 72057594046316545 is [1:134:2157] sender: [1:136:2058] recipient: [1:111:2142] 2025-05-07T08:48:11.213269Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:48:11.222485Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:48:11.223240Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:48:11.224271Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:48:11.224690Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:48:11.225093Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:48:11.225931Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:48:11.234787Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:48:11.282780Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:48:11.297011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:48:13.021496Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7610: Got new config: QueryServiceConfig { AvailableExternalDataSources: "ObjectStorage" AvailableExternalDataSources: "ClickHouse" AvailableExternalDataSources: "PostgreSQL" AvailableExternalDataSources: "MySQL" AvailableExternalDataSources: "Ydb" AvailableExternalDataSources: "YT" AvailableExternalDataSources: "Greenplum" AvailableExternalDataSources: "MsSQLServer" AvailableExternalDataSources: "Oracle" AvailableExternalDataSources: "Logging" AvailableExternalDataSources: "Solomon" } 2025-05-07T08:48:13.022264Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:48:13.100687Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# ObjectStorage, ClickHouse, PostgreSQL, MySQL, Ydb, YT, Greenplum, MsSQLServer, Oracle, Logging, Solomon Leader for TabletID 72057594046447617 is [1:129:2154] sender: [1:175:2058] recipient: [1:15:2062] 2025-05-07T08:48:13.284891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:48:13.285098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:48:13.285309Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:48:13.318208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:48:13.318539Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:48:13.319484Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:48:13.319802Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:48:13.328209Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:48:13.330112Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:48:13.330210Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:48:13.330453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:48:13.330522Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:48:13.330642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:48:13.330818Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2212] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2212] Leader for TabletID 72057594037968897 is [1:217:2216] sender: [1:218:2058] recipient: [1:211:2212] 2025-05-07T08:48:13.352655Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:123:2149] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:48:14.678012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:48:14.679710Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:48:14.680510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:48:14.684649Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:48:14.685385Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:48:14.712985Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:48:14.713504Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:48:14.716565Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:48:14.717302Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:48:14.717685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:48:14.718173Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:48:14.750633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:48:14.750722Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:48:14.751061Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:48:14.780737Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:48:14.780825Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:48:14.781990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:48:14.782440Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:48:14.847314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:48:14.872254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:48:14.878833Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:134:2157] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:48:14.879990Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:48:14.880142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 134 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 ... askPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } SplitBoundary { KeyPrefix { Tuple { Optional { Uint32: 50 } } } } TableIndexes { Name: "UserDefinedIndex" LocalPathId: 4 Type: EIndexTypeGlobalAsync State: EIndexStateReady KeyColumnNames: "indexed" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "\001\000\004\000\000\0002\000\000\000" IsPoint: false IsInclusive: false DatashardId: 72075186233409548 } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409549 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 2 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:49:24.283954Z node 24 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-05-07T08:49:24.284233Z node 24 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndex/indexImplTable" took 317us result status StatusSuccess 2025-05-07T08:49:24.284966Z node 24 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "indexed" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "indexed" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409546 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:49:24.297370Z node 24 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:78: [TableChangeSenderShard][72075186233409548:2][72075186233409546][24:782:2619] Handshake NKikimrChangeExchange.TEvStatus Status: STATUS_OK LastRecordOrder: 0 2025-05-07T08:49:24.297481Z node 24 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409548:2][24:723:2619] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409546 } 2025-05-07T08:49:24.297638Z node 24 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:123: [TableChangeSenderShard][72075186233409548:2][72075186233409546][24:782:2619] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 1746607764245811 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 },{ Order: 2 Group: 1746607764245811 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 },{ Order: 3 Group: 1746607764245811 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 }] } 2025-05-07T08:49:24.302188Z node 24 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:200: [TableChangeSenderShard][72075186233409548:2][72075186233409546][24:782:2619] Handle NKikimrChangeExchange.TEvStatus Status: STATUS_OK RecordStatuses { Order: 1 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 2 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 3 Status: STATUS_OK Reason: REASON_NONE } LastRecordOrder: 3 2025-05-07T08:49:24.302342Z node 24 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409548:2][24:723:2619] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409546 } |88.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/metadata/initializer/ut/ydb-services-metadata-initializer-ut |88.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/metadata/initializer/ut/ydb-services-metadata-initializer-ut |88.7%| [TM] {RESULT} ydb/core/blobstorage/backpressure/ut_client/unittest |88.7%| [LD] {RESULT} $(B)/ydb/services/metadata/initializer/ut/ydb-services-metadata-initializer-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_olap/unittest >> TOlap::CreateTableWithNullableKeys [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:49:23.606686Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:49:23.606778Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:49:23.606818Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:49:23.606908Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:49:23.606965Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:49:23.606995Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:49:23.607057Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:49:23.607128Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:49:23.607939Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:49:23.608319Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:49:23.723175Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:49:23.723236Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:23.739089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:49:23.739213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:49:23.739387Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:49:23.747992Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:49:23.748619Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:49:23.749273Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:49:23.749559Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:49:23.751882Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:49:23.753456Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:49:23.753515Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:49:23.753580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:49:23.753637Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:49:23.753735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:49:23.753924Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:49:23.760494Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:49:24.054408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:49:24.054688Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:24.054984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:49:24.055252Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:49:24.055314Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:24.066088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:49:24.066264Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:49:24.066567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:24.066628Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:49:24.066669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:49:24.066705Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:49:24.239198Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:24.239299Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:49:24.239343Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:49:24.247355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:24.247422Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:24.247470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:49:24.247520Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:49:24.252007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:49:24.258749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:49:24.259130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:49:24.260262Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:49:24.260402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:49:24.260454Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:49:24.260824Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:49:24.260900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:49:24.261247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:49:24.261386Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:49:24.271129Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:49:24.271205Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:49:24.271397Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:49:24.271439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... n: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T08:49:25.904320Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 104 2025-05-07T08:49:25.904374Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-05-07T08:49:25.904421Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-05-07T08:49:25.905455Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T08:49:25.905552Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T08:49:25.905582Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 104 2025-05-07T08:49:25.905613Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 9 2025-05-07T08:49:25.905660Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-05-07T08:49:25.910505Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 7 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T08:49:25.910642Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 7 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T08:49:25.910687Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 104 2025-05-07T08:49:25.910730Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 7 2025-05-07T08:49:25.910775Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-05-07T08:49:25.910909Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 104, ready parts: 0/1, is published: true 2025-05-07T08:49:25.912643Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 104:0 from tablet: 72057594046678944 to tablet: 72075186233409547 cookie: 72057594046678944:2 msg type: 275382275 2025-05-07T08:49:25.914530Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-05-07T08:49:25.914993Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-05-07T08:49:25.916089Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-05-07T08:49:25.928412Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6106: Handle TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, message: Origin: 72075186233409547 TxId: 104 2025-05-07T08:49:25.928484Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 104, tablet: 72075186233409547, partId: 0 2025-05-07T08:49:25.928636Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 104:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409547 TxId: 104 2025-05-07T08:49:25.928692Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 104:0 129 -> 130 FAKE_COORDINATOR: Erasing txId 104 2025-05-07T08:49:25.930954Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-05-07T08:49:25.931157Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-05-07T08:49:25.931221Z node 2 :FLAT_TX_SCHEMESHARD INFO: drop_table.cpp:315: TDropColumnTable TProposedDeleteParts operationId# 104:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:49:25.931316Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-05-07T08:49:25.931451Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 1/1 2025-05-07T08:49:25.931494Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-05-07T08:49:25.931538Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 1/1 2025-05-07T08:49:25.931579Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-05-07T08:49:25.931625Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: true 2025-05-07T08:49:25.931726Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:371:2350] message: TxId: 104 2025-05-07T08:49:25.931793Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-05-07T08:49:25.931840Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 104:0 2025-05-07T08:49:25.931887Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 104:0 2025-05-07T08:49:25.932027Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-05-07T08:49:25.934247Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-05-07T08:49:25.934384Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-05-07T08:49:25.934430Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [2:608:2568] 2025-05-07T08:49:25.934973Z node 2 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 2025-05-07T08:49:25.935691Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186233409547;self_id=[2:473:2442];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:1153;event=tablet_die; Forgetting tablet 72075186233409547 2025-05-07T08:49:25.939560Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-05-07T08:49:25.940701Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-05-07T08:49:25.941389Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T08:49:25.941450Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-05-07T08:49:25.941541Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-05-07T08:49:25.944295Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-05-07T08:49:25.944378Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-05-07T08:49:25.945512Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 104 2025-05-07T08:49:25.946199Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/MyDir/MyTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:49:25.946438Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/MyDir/MyTable" took 255us result status StatusPathDoesNotExist 2025-05-07T08:49:25.946623Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/MyDir/MyTable\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/MyDir\' (id: [OwnerId: 72057594046678944, LocalPathId: 3])" Path: "/MyRoot/MyDir/MyTable" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/MyDir" LastExistedPrefixPathId: 3 LastExistedPrefixDescription { Self { Name: "MyDir" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-05-07T08:49:25.947306Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 4 SchemeshardId: 72057594046678944 Options { }, at schemeshard: 72057594046678944 2025-05-07T08:49:25.947408Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72057594046678944 describe pathId 4 took 109us result status StatusPathDoesNotExist 2025-05-07T08:49:25.947493Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'\', error: path is empty" Path: "" PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TOlap::CreateStore >> TOlapNaming::CreateColumnTableOk >> TCmsTenatsTest::TestClusterLimitForceRestartMode [GOOD] >> TOlap::CreateDropStandaloneTable [GOOD] >> TOlap::AlterStore >> TOlapNaming::CreateColumnStoreOk |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTenatsTest::TestClusterLimitForceRestartMode [GOOD] >> TOlap::CustomDefaultPresets |88.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/data_integrity/ydb-core-kqp-ut-data_integrity |88.7%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/data_integrity/ydb-core-kqp-ut-data_integrity |88.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/data_integrity/ydb-core-kqp-ut-data_integrity |88.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/lib/ydb_cli/topic/ut/ydb-public-lib-ydb_cli-topic-ut |88.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/public/lib/ydb_cli/topic/ut/ydb-public-lib-ydb_cli-topic-ut |88.7%| [LD] {RESULT} $(B)/ydb/public/lib/ydb_cli/topic/ut/ydb-public-lib-ydb_cli-topic-ut >> TOlap::AlterStore [GOOD] >> TOlap::AlterTtl >> TOlap::CreateStore [GOOD] >> TOlap::CreateDropTable >> DataShardTxOrder::ImmediateBetweenOnline_Init_oo8 >> Viewer::PDiskMerging >> TOlapNaming::CreateColumnStoreOk [GOOD] >> TOlapNaming::CreateColumnStoreFailed >> Viewer::LevenshteinDistance [GOOD] >> Viewer::JsonStorageListingV2 >> Viewer::PDiskMerging [GOOD] >> Viewer::SelectStringWithBase64Encoding >> Viewer::JsonAutocompleteEmpty >> Viewer::TabletMerging >> Viewer::JsonAutocompleteStartOfDatabaseName >> Viewer::SelectStringWithNoBase64Encoding >> TOlapNaming::CreateColumnStoreFailed [GOOD] >> TOlap::CustomDefaultPresets [GOOD] >> TOlap::Decimal >> Viewer::Cluster10000Tablets >> TOlap::CreateDropTable [GOOD] >> TOlap::CreateDropStandaloneTableDefaultSharding >> DataShardOutOfOrder::TestPlannedTimeoutSplit >> TOlap::AlterTtl [GOOD] >> KqpSinkMvcc::OltpMultiSinksNoSinks [GOOD] >> KqpSinkMvcc::OltpMultiSinks ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_olap/unittest >> TOlapNaming::CreateColumnStoreFailed [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:49:28.236681Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:49:28.236775Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:49:28.236815Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:49:28.236852Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:49:28.236909Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:49:28.236945Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:49:28.237003Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:49:28.237073Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:49:28.237916Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:49:28.238296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:49:28.335098Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:49:28.335184Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:28.355048Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:49:28.355326Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:49:28.355523Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:49:28.362647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:49:28.363106Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:49:28.363867Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:49:28.364096Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:49:28.367540Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:49:28.369314Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:49:28.369389Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:49:28.369481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:49:28.369535Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:49:28.369654Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:49:28.369933Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:49:28.377790Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:49:28.549328Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:49:28.549588Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:28.549861Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:49:28.550164Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:49:28.550230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:28.553107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:49:28.553262Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:49:28.553481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:28.553546Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:49:28.553598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:49:28.553638Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:49:28.556048Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:28.556132Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:49:28.556196Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:49:28.558658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:28.558723Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:28.558768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:49:28.558854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:49:28.563182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:49:28.565601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:49:28.565857Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:49:28.567005Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:49:28.567159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:49:28.567474Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:49:28.567826Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:49:28.567887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:49:28.568080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:49:28.568163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:49:28.570741Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:49:28.570794Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:49:28.571006Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:49:28.571051Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... hemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:49:29.600690Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:49:29.600823Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:49:29.601983Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:49:29.602114Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:49:29.602806Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:49:29.602907Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 131 RawX2: 8589936747 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:49:29.602944Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:49:29.603123Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:49:29.603160Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:49:29.603282Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:49:29.603336Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:49:29.604874Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:49:29.604911Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:49:29.605042Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:49:29.605072Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:206:2208], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-05-07T08:49:29.605292Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:29.605328Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 1:0 ProgressState 2025-05-07T08:49:29.605404Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-05-07T08:49:29.605434Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:49:29.605463Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-05-07T08:49:29.605489Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:49:29.605519Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-05-07T08:49:29.605547Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:49:29.605576Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1:0 2025-05-07T08:49:29.605601Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 1:0 2025-05-07T08:49:29.605659Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:49:29.605701Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-05-07T08:49:29.605737Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-05-07T08:49:29.606211Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-05-07T08:49:29.606313Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-05-07T08:49:29.606357Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-05-07T08:49:29.606397Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-05-07T08:49:29.606433Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:49:29.606504Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-05-07T08:49:29.608582Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-05-07T08:49:29.608965Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-05-07T08:49:29.609576Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [2:269:2260] Bootstrap 2025-05-07T08:49:29.620745Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [2:269:2260] Become StateWork (SchemeCache [2:274:2265]) 2025-05-07T08:49:29.623014Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateColumnStore CreateColumnStore { Name: "OlapStore" ColumnShardCount: 1 SchemaPresets { Name: "default" Schema { Columns { Name: "timestamp" Type: "Timestamp" NotNull: true } Columns { Name: "data" Type: "Utf8" } Columns { Name: "mess age" Type: "Utf8" } KeyColumnNames: "timestamp" } } } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:49:29.623319Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: create_store.cpp:331: TCreateOlapStore Propose, path: /MyRoot/OlapStore, opId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:49:29.623472Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 101:1, propose status:StatusSchemeError, reason: Invalid name for column 'mess age', at schemeshard: 72057594046678944 2025-05-07T08:49:29.624647Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [2:269:2260] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-05-07T08:49:29.626738Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 101, response: Status: StatusSchemeError Reason: "Invalid name for column \'mess age\'" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:49:29.626918Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusSchemeError, reason: Invalid name for column 'mess age', operation: CREATE COLUMN STORE, path: /MyRoot/OlapStore 2025-05-07T08:49:29.627917Z node 2 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-05-07T08:49:29.628093Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-05-07T08:49:29.628129Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-05-07T08:49:29.628404Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-05-07T08:49:29.628472Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-05-07T08:49:29.628500Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [2:284:2275] TestWaitNotification: OK eventTxId 101 2025-05-07T08:49:29.628797Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/OlapStore" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:49:29.628947Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/OlapStore" took 173us result status StatusPathDoesNotExist 2025-05-07T08:49:29.629084Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/OlapStore\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/OlapStore" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TOlap::Decimal [GOOD] |88.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/sysview/ydb-core-kqp-ut-sysview |88.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/sysview/ydb-core-kqp-ut-sysview |88.7%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/sysview/ydb-core-kqp-ut-sysview >> IncrementalRestoreScan::ChangeSenderSimple >> IncrementalRestoreScan::Empty >> IncrementalRestoreScan::ChangeSenderEmpty >> Viewer::TabletMergingPacked ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_olap/unittest >> TOlap::AlterTtl [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:49:25.792433Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:49:25.792540Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:49:25.792586Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:49:25.792635Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:49:25.792693Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:49:25.792735Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:49:25.792793Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:49:25.792873Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:49:25.793768Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:49:25.794320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:49:25.887559Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:49:25.887624Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:25.905570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:49:25.905835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:49:25.906174Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:49:25.914146Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:49:25.914480Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:49:25.915210Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:49:25.915389Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:49:25.918974Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:49:25.920417Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:49:25.920481Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:49:25.920555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:49:25.920602Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:49:25.920699Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:49:25.920932Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:49:25.927522Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:49:26.084024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:49:26.084256Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:26.084507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:49:26.084754Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:49:26.084822Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:26.092216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:49:26.092392Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:49:26.092642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:26.092734Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:49:26.092787Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:49:26.092828Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:49:26.097549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:26.097627Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:49:26.097684Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:49:26.100773Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:26.100837Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:26.100884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:49:26.100947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:49:26.105092Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:49:26.109824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:49:26.110088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:49:26.111293Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:49:26.111442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:49:26.111497Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:49:26.111809Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:49:26.111871Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:49:26.112062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:49:26.112192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:49:26.115027Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:49:26.115079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:49:26.115260Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:49:26.115302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... NKikimr::TEvColumnShard::TEvProposeTransactionResult> complete, operationId: 106:0, at schemeshard: 72057594046678944 2025-05-07T08:49:29.786906Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 106:0, at schemeshard: 72057594046678944 2025-05-07T08:49:29.786953Z node 3 :FLAT_TX_SCHEMESHARD INFO: alter_table.cpp:148: TAlterColumnTable TPropose operationId# 106:0 HandleReply ProgressState at tablet: 72057594046678944 2025-05-07T08:49:29.787023Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 106 ready parts: 1/1 2025-05-07T08:49:29.787175Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } AffectedSet { TabletId: 72075186233409546 Flags: 2 } ExecLevel: 0 TxId: 106 MinStep: 5000006 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:49:29.788786Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 106:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:106 msg type: 269090816 2025-05-07T08:49:29.788927Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 106, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 106 at step: 5000007 FAKE_COORDINATOR: advance: minStep5000007 State->FrontStep: 5000006 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 106 at step: 5000007 FAKE_COORDINATOR: Send Plan to tablet 72075186233409546 for txId: 106 at step: 5000007 2025-05-07T08:49:29.789224Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000007, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:49:29.789318Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 106 Coordinator: 72057594046316545 AckTo { RawX1: 125 RawX2: 12884904039 } } Step: 5000007 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:49:29.789393Z node 3 :FLAT_TX_SCHEMESHARD INFO: alter_table.cpp:109: TAlterColumnTable TPropose operationId# 106:0 HandleReply TEvOperationPlan at tablet: 72057594046678944, stepId: 5000007 2025-05-07T08:49:29.790104Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 106:0 128 -> 129 2025-05-07T08:49:29.790315Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T08:49:29.790371Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-05-07T08:49:29.798144Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186233409546;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=106;fline=tx_controller.cpp:214;event=finished_tx;tx_id=106; FAKE_COORDINATOR: advance: minStep5000007 State->FrontStep: 5000007 2025-05-07T08:49:29.801430Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:49:29.801493Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 106, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T08:49:29.801707Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 106, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-05-07T08:49:29.801882Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:49:29.801935Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [3:209:2211], at schemeshard: 72057594046678944, txId: 106, path id: 2 2025-05-07T08:49:29.802041Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [3:209:2211], at schemeshard: 72057594046678944, txId: 106, path id: 3 2025-05-07T08:49:29.802387Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 106:0, at schemeshard: 72057594046678944 2025-05-07T08:49:29.802448Z node 3 :FLAT_TX_SCHEMESHARD INFO: alter_table.cpp:199: TAlterColumnTable TProposedWaitParts operationId# 106:0 ProgressState at tablet: 72057594046678944 2025-05-07T08:49:29.802526Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: alter_table.cpp:222: TAlterColumnTable TProposedWaitParts operationId# 106:0 ProgressState wait for NotifyTxCompletionResult tabletId: 72075186233409546 2025-05-07T08:49:29.803281Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 8 PathOwnerId: 72057594046678944, cookie: 106 2025-05-07T08:49:29.803410Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 8 PathOwnerId: 72057594046678944, cookie: 106 2025-05-07T08:49:29.803460Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 106 2025-05-07T08:49:29.803507Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 106, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 8 2025-05-07T08:49:29.803565Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T08:49:29.805082Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 14 PathOwnerId: 72057594046678944, cookie: 106 2025-05-07T08:49:29.805181Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 14 PathOwnerId: 72057594046678944, cookie: 106 2025-05-07T08:49:29.805216Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 106 2025-05-07T08:49:29.805252Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 106, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 14 2025-05-07T08:49:29.805289Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-05-07T08:49:29.805366Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 106, ready parts: 0/1, is published: true 2025-05-07T08:49:29.808655Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 106:0 from tablet: 72057594046678944 to tablet: 72075186233409546 cookie: 72057594046678944:1 msg type: 275382275 2025-05-07T08:49:29.814582Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-05-07T08:49:29.815270Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-05-07T08:49:29.815679Z node 3 :TX_TIERING ERROR: log.cpp:784: fline=manager.cpp:158;error=cannot_read_secrets;reason=Can't read access key: No such secret: SId:secret; 2025-05-07T08:49:29.828111Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6106: Handle TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 106 2025-05-07T08:49:29.828193Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 106, tablet: 72075186233409546, partId: 0 2025-05-07T08:49:29.828349Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 106:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 106 FAKE_COORDINATOR: Erasing txId 106 2025-05-07T08:49:29.830526Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 106:0, at schemeshard: 72057594046678944 2025-05-07T08:49:29.830689Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 106:0, at schemeshard: 72057594046678944 2025-05-07T08:49:29.830748Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 106:0 ProgressState 2025-05-07T08:49:29.830923Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#106:0 progress is 1/1 2025-05-07T08:49:29.830987Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-05-07T08:49:29.831037Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#106:0 progress is 1/1 2025-05-07T08:49:29.831084Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-05-07T08:49:29.831133Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 106, ready parts: 1/1, is published: true 2025-05-07T08:49:29.831216Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:342:2321] message: TxId: 106 2025-05-07T08:49:29.831278Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-05-07T08:49:29.831326Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 106:0 2025-05-07T08:49:29.831371Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 106:0 2025-05-07T08:49:29.831531Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-05-07T08:49:29.833301Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-05-07T08:49:29.833364Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [3:549:2520] TestWaitNotification: OK eventTxId 106 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/MyRoot/Tier1' stopped at tablet 72075186233409546 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/MyRoot/Tier1' stopped at tablet 72075186233409546 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_olap/unittest >> TOlap::Decimal [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:49:28.773036Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:49:28.773141Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:49:28.773199Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:49:28.773236Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:49:28.773294Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:49:28.773333Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:49:28.773394Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:49:28.773475Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:49:28.774427Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:49:28.774824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:49:28.867197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:49:28.867269Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:28.891722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:49:28.892027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:49:28.892235Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:49:28.916285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:49:28.916703Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:49:28.917418Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:49:28.917637Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:49:28.921158Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:49:28.922746Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:49:28.922846Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:49:28.922940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:49:28.922999Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:49:28.923114Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:49:28.923385Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:49:28.930686Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:49:29.093916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:49:29.094733Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:29.095034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:49:29.095357Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:49:29.095422Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:29.097999Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:49:29.098142Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:49:29.098361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:29.098427Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:49:29.098469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:49:29.098509Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:49:29.100620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:29.100699Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:49:29.100768Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:49:29.102603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:29.102653Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:29.102699Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:49:29.102762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:49:29.106605Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:49:29.108804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:49:29.109041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:49:29.110105Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:49:29.110260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:49:29.110315Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:49:29.110645Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:49:29.110706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:49:29.110912Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:49:29.110991Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:49:29.113329Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:49:29.113378Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:49:29.113607Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:49:29.113656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 00002 2025-05-07T08:49:30.425740Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 101:0 128 -> 129 2025-05-07T08:49:30.425893Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:49:30.425989Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T08:49:30.427115Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186233409546;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=101;fline=tx_controller.cpp:214;event=finished_tx;tx_id=101; FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 2025-05-07T08:49:30.438638Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:49:30.438691Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:49:30.438908Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T08:49:30.439084Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:49:30.439130Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:206:2208], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-05-07T08:49:30.439184Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:206:2208], at schemeshard: 72057594046678944, txId: 101, path id: 2 2025-05-07T08:49:30.439827Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:49:30.439896Z node 2 :FLAT_TX_SCHEMESHARD INFO: create_store.cpp:245: TCreateOlapStore TProposedWaitParts operationId# 101:0 ProgressState at tablet: 72057594046678944 2025-05-07T08:49:30.439966Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: create_store.cpp:268: TCreateOlapStore TProposedWaitParts operationId# 101:0 ProgressState wait for NotifyTxCompletionResult tabletId: 72075186233409546 2025-05-07T08:49:30.440939Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T08:49:30.441060Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T08:49:30.441101Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 101 2025-05-07T08:49:30.441159Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-05-07T08:49:30.441202Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:49:30.441835Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T08:49:30.441892Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T08:49:30.441911Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 101 2025-05-07T08:49:30.441932Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-05-07T08:49:30.442000Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T08:49:30.442067Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 101, ready parts: 0/1, is published: true 2025-05-07T08:49:30.444185Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 101:0 from tablet: 72057594046678944 to tablet: 72075186233409546 cookie: 72057594046678944:1 msg type: 275382275 2025-05-07T08:49:30.447626Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T08:49:30.447771Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T08:49:30.470362Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6106: Handle TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 101 2025-05-07T08:49:30.470436Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409546, partId: 0 2025-05-07T08:49:30.470589Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 101 FAKE_COORDINATOR: Erasing txId 101 2025-05-07T08:49:30.472750Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:49:30.472955Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:49:30.473001Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 101:0 ProgressState 2025-05-07T08:49:30.473122Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T08:49:30.473163Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T08:49:30.473206Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T08:49:30.473261Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T08:49:30.473303Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: true 2025-05-07T08:49:30.473410Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:341:2320] message: TxId: 101 2025-05-07T08:49:30.473476Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T08:49:30.473518Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:0 2025-05-07T08:49:30.473550Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 101:0 2025-05-07T08:49:30.473711Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T08:49:30.475620Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-05-07T08:49:30.475676Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [2:342:2321] TestWaitNotification: OK eventTxId 101 2025-05-07T08:49:30.476223Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/OlapStore" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:49:30.476472Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/OlapStore" took 281us result status StatusSuccess 2025-05-07T08:49:30.477095Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/OlapStore" PathDescription { Self { Name: "OlapStore" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeColumnStore CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 ColumnStoreVersion: 1 } ChildrenExist: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } ColumnStoreDescription { Name: "OlapStore" ColumnShardCount: 1 ColumnShards: 72075186233409546 SchemaPresets { Id: 1 Name: "default" Schema { Columns { Id: 1 Name: "timestamp" Type: "Timestamp" TypeId: 50 NotNull: true StorageId: "" DefaultValue { } } Columns { Id: 2 Name: "data" Type: "Decimal(35,9)" TypeId: 4865 TypeInfo { DecimalPrecision: 35 DecimalScale: 9 } NotNull: false StorageId: "" DefaultValue { } } KeyColumnNames: "timestamp" NextColumnId: 3 Version: 1 Options { SchemeNeedActualization: false } NextColumnFamilyId: 1 } } NextSchemaPresetId: 2 NextTtlSettingsPresetId: 1 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> Viewer::JsonAutocompleteSimilarDatabaseName >> Viewer::FuzzySearcherLimit3OutOf4 [GOOD] >> Viewer::FuzzySearcherLimit4OutOf4 [GOOD] >> Viewer::FuzzySearcherLongWord [GOOD] >> Viewer::FuzzySearcherPriority [GOOD] >> Viewer::JsonAutocompleteColumns >> KqpSnapshotRead::ReadWriteTxFailsOnConcurrentWrite3+withSink [GOOD] >> DataShardOutOfOrder::UncommittedReads >> Viewer::TabletMergingPacked [GOOD] >> Viewer::VDiskMerging >> DataShardScan::ScanFollowedByUpdate >> TOlapNaming::AlterColumnTableOk [GOOD] >> TOlapNaming::AlterColumnTableFailed >> DataShardOutOfOrder::TestSnapshotReadAfterStuckRW ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSnapshotRead::ReadWriteTxFailsOnConcurrentWrite3+withSink [GOOD] Test command err: Trying to start YDB, gRPC: 28310, MsgBus: 29349 2025-05-07T08:49:15.786579Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623190842923551:2196];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:15.787041Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003718/r3tmp/tmpwPvRRH/pdisk_1.dat 2025-05-07T08:49:16.275112Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:16.313510Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:16.313650Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:16.316943Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28310, node 1 2025-05-07T08:49:16.421204Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:16.421232Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:16.421250Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:16.421451Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29349 TClient is connected to server localhost:29349 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:49:17.037623Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:17.060876Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:49:17.078460Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:17.222906Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:17.398744Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:17.510483Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:19.403544Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623208022794255:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:19.403736Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:19.700379Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:49:19.728255Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:49:19.752976Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:49:19.783784Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:49:19.816175Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:49:19.851997Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:49:19.895131Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:49:19.988202Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623208022794916:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:19.988309Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:19.988643Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623208022794921:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:19.993047Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:49:20.003516Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623208022794923:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:49:20.114081Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623212317762270:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:49:20.784220Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623190842923551:2196];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:20.806565Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:49:24.911725Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=1&id=ODlkZDA2NDEtNTdlY2FhN2MtNGEzNDYzYjAtYjVkOWZiNzg=, ActorId: [1:7501623216612729803:2507], ActorState: ExecuteState, TraceId: 01jtmyyx0677sx8aw7e8hgm75f, Create QueryResponse for error on request, msg: tx has deferred effects, but locks are broken Trying to start YDB, gRPC: 14648, MsgBus: 31973 2025-05-07T08:49:25.967605Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501623232491485744:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:25.967678Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003718/r3tmp/tmp4SYWtD/pdisk_1.dat 2025-05-07T08:49:26.099840Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:26.126278Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:26.126370Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:26.128061Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14648, node 2 2025-05-07T08:49:26.178739Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:26.178769Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:26.178777Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:26.178947Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31973 TClient is connected to server localhost:31973 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:49:26.661083Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:26.674362Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T08:49:26.687591Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:26.766027Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:49:26.993531Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:27.075278Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 2025-05-07T08:49:29.655772Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501623249671356578:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:29.655891Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:29.707086Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-05-07T08:49:29.742643Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-05-07T08:49:29.776363Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-05-07T08:49:29.809727Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-05-07T08:49:29.841611Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-05-07T08:49:29.879386Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-05-07T08:49:29.915402Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-05-07T08:49:30.011751Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501623253966324535:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:30.011880Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:30.011975Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501623253966324540:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:30.016585Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-05-07T08:49:30.030302Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7501623253966324542:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-05-07T08:49:30.090267Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501623253966324593:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:49:30.968233Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7501623232491485744:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:30.968325Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:49:31.966400Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=2&id=ODY5ZjdhYTEtZmJiZWJjNGItY2EwODFiMWYtMzc2NjI5OGY=, ActorId: [2:7501623258261292160:2513], ActorState: ExecuteState, TraceId: 01jtmyz4832prbnamde9j88bah, Create QueryResponse for error on request, msg: tx has deferred effects, but locks are broken >> Viewer::VDiskMerging [GOOD] >> Viewer::TenantInfo5kkTablets >> DataShardTxOrder::ImmediateBetweenOnline_Init_oo8 [GOOD] >> TOlapNaming::CreateColumnTableOk [GOOD] >> TOlapNaming::CreateColumnTableFailed >> DataShardOutOfOrder::TestOutOfOrderNonConflictingWrites+EvWrite >> TOlapNaming::CreateColumnTableFailed [GOOD] >> DataShardScan::ScanFollowedByUpdate [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardTxOrder::ImmediateBetweenOnline_Init_oo8 [GOOD] Test command err: 2025-05-07T08:49:29.457487Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828672, Sender [1:108:2140], Recipient [1:133:2155]: NKikimr::TEvTablet::TEvBoot 2025-05-07T08:49:29.508592Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:49:29.508682Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:29.519875Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828673, Sender [1:108:2140], Recipient [1:133:2155]: NKikimr::TEvTablet::TEvRestored 2025-05-07T08:49:29.520480Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:133:2155] 2025-05-07T08:49:29.520757Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T08:49:29.566948Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3111: StateInactive, received event# 268828684, Sender [1:108:2140], Recipient [1:133:2155]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-05-07T08:49:29.578650Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T08:49:29.579338Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T08:49:29.581171Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-05-07T08:49:29.581262Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-05-07T08:49:29.581314Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-05-07T08:49:29.581743Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T08:49:29.581988Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T08:49:29.582084Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:198:2155] in generation 2 2025-05-07T08:49:29.655174Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T08:49:29.688626Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-05-07T08:49:29.688822Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T08:49:29.688915Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:213:2211] 2025-05-07T08:49:29.688942Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-05-07T08:49:29.688968Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-05-07T08:49:29.688991Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-05-07T08:49:29.689207Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 2146435072, Sender [1:133:2155], Recipient [1:133:2155]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-05-07T08:49:29.689257Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3155: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-05-07T08:49:29.689459Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-05-07T08:49:29.689544Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-05-07T08:49:29.689606Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877761, Sender [1:209:2208], Recipient [1:133:2155]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:49:29.689644Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:49:29.689686Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:207:2207], serverId# [1:209:2208], sessionId# [0:0:0] 2025-05-07T08:49:29.689740Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-05-07T08:49:29.689771Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:49:29.689799Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-05-07T08:49:29.689827Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-05-07T08:49:29.689859Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-05-07T08:49:29.689886Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-05-07T08:49:29.689913Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-05-07T08:49:29.691837Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269549568, Sender [1:99:2134], Recipient [1:133:2155]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 99 RawX2: 4294969430 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\010\030\001(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-05-07T08:49:29.691892Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3136: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-05-07T08:49:29.691970Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-05-07T08:49:29.692099Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-05-07T08:49:29.692133Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-05-07T08:49:29.692188Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-05-07T08:49:29.692221Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-05-07T08:49:29.692248Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-05-07T08:49:29.692275Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-05-07T08:49:29.692299Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-05-07T08:49:29.692594Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-05-07T08:49:29.692638Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-05-07T08:49:29.692667Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-05-07T08:49:29.692707Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-05-07T08:49:29.692738Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-05-07T08:49:29.692759Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-05-07T08:49:29.692780Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-05-07T08:49:29.692803Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-05-07T08:49:29.692820Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-05-07T08:49:29.705109Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-05-07T08:49:29.705189Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-05-07T08:49:29.705222Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-05-07T08:49:29.705262Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-05-07T08:49:29.705314Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-05-07T08:49:29.705865Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877761, Sender [1:219:2217], Recipient [1:133:2155]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:49:29.705938Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:49:29.705993Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:218:2216], serverId# [1:219:2217], sessionId# [0:0:0] 2025-05-07T08:49:29.706157Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269287424, Sender [1:99:2134], Recipient [1:133:2155]: {TEvPlanStep step# 2 MediatorId# 0 TabletID 9437184} 2025-05-07T08:49:29.706189Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3148: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-05-07T08:49:29.706305Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [2:1] at 9437184 on unit WaitForPlan 2025-05-07T08:49:29.706336Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [2:1] at 9437184 is Executed 2025-05-07T08:49:29.706363Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [2:1] at 9437184 executing on unit WaitForPlan 2025-05-07T08:49:29.706392Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [2:1] at 9437184 to execution unit PlanQueue 2025-05-07T08:49:29.709504Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 2 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 99 RawX2: 4294969430 } } Step: 2 MediatorID: 0 TabletID: 9437184 } 2025-05-07T08:49:29.709587Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-05-07T08:49:29.709797Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 2146435072, Sender [1:133:2155], Recipient [1:133:2155]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-05-07T08:49:29.709834Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3155: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-05-07T08:49:29.709879Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-05-07T08:49:29.709918Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-05-07T08:49:29.709956Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-05-07T08:49:29.710010Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [2:1] in PlanQueue unit at 9437184 2025-05-07T08:49:29.710046Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [2:1] at 9437184 on unit PlanQueue 2025-05-07T08:49:29.7100 ... 7186 consumer 9437186 txId 140 2025-05-07T08:49:35.017290Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269287938, Sender [1:455:2397], Recipient [1:232:2225]: {TEvReadSet step# 6 txid# 119 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 39} 2025-05-07T08:49:35.017325Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-05-07T08:49:35.017364Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 119 2025-05-07T08:49:35.017467Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269287938, Sender [1:455:2397], Recipient [1:232:2225]: {TEvReadSet step# 6 txid# 143 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 47} 2025-05-07T08:49:35.017516Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-05-07T08:49:35.017561Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 143 2025-05-07T08:49:35.017701Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269287938, Sender [1:455:2397], Recipient [1:232:2225]: {TEvReadSet step# 6 txid# 146 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 48} 2025-05-07T08:49:35.017731Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-05-07T08:49:35.017756Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 146 2025-05-07T08:49:35.021366Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269287938, Sender [1:455:2397], Recipient [1:232:2225]: {TEvReadSet step# 6 txid# 149 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 49} 2025-05-07T08:49:35.021423Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-05-07T08:49:35.021458Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 149 2025-05-07T08:49:35.021570Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269287938, Sender [1:455:2397], Recipient [1:232:2225]: {TEvReadSet step# 6 txid# 152 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 50} 2025-05-07T08:49:35.021601Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-05-07T08:49:35.021645Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 152 2025-05-07T08:49:35.021708Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269287938, Sender [1:455:2397], Recipient [1:232:2225]: {TEvReadSet step# 6 txid# 122 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 40} 2025-05-07T08:49:35.021735Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-05-07T08:49:35.021760Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 122 2025-05-07T08:49:35.021893Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269287938, Sender [1:455:2397], Recipient [1:232:2225]: {TEvReadSet step# 6 txid# 125 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 41} 2025-05-07T08:49:35.021928Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-05-07T08:49:35.021953Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 125 2025-05-07T08:49:35.022089Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269287938, Sender [1:455:2397], Recipient [1:232:2225]: {TEvReadSet step# 6 txid# 128 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 42} 2025-05-07T08:49:35.022133Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-05-07T08:49:35.022159Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 128 2025-05-07T08:49:35.022230Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269287938, Sender [1:455:2397], Recipient [1:232:2225]: {TEvReadSet step# 6 txid# 131 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 43} 2025-05-07T08:49:35.022255Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-05-07T08:49:35.022294Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 131 2025-05-07T08:49:35.022384Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269287938, Sender [1:455:2397], Recipient [1:232:2225]: {TEvReadSet step# 6 txid# 134 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 44} 2025-05-07T08:49:35.022414Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-05-07T08:49:35.022442Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 134 2025-05-07T08:49:35.022532Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-05-07T08:49:35.022574Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [6:149] at 9437184 on unit CompleteOperation 2025-05-07T08:49:35.022647Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [6 : 149] from 9437184 at tablet 9437184 send result to client [1:99:2134], exec latency: 1 ms, propose latency: 2 ms 2025-05-07T08:49:35.022711Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 6 txid# 149 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 97} 2025-05-07T08:49:35.022761Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-05-07T08:49:35.022928Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437184 2025-05-07T08:49:35.022970Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437184 2025-05-07T08:49:35.022993Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437184 2025-05-07T08:49:35.023035Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-05-07T08:49:35.023065Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [6:151] at 9437184 on unit CompleteOperation 2025-05-07T08:49:35.023105Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [6 : 151] from 9437184 at tablet 9437184 send result to client [1:99:2134], exec latency: 1 ms, propose latency: 2 ms 2025-05-07T08:49:35.023157Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 6 txid# 151 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 98} 2025-05-07T08:49:35.023184Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-05-07T08:49:35.023294Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-05-07T08:49:35.023326Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [6:152] at 9437184 on unit CompleteOperation 2025-05-07T08:49:35.023362Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [6 : 152] from 9437184 at tablet 9437184 send result to client [1:99:2134], exec latency: 1 ms, propose latency: 2 ms 2025-05-07T08:49:35.023401Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 6 txid# 152 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 99} 2025-05-07T08:49:35.023487Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-05-07T08:49:35.023595Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-05-07T08:49:35.023638Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [6:154] at 9437184 on unit CompleteOperation 2025-05-07T08:49:35.023692Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [6 : 154] from 9437184 at tablet 9437184 send result to client [1:99:2134], exec latency: 1 ms, propose latency: 2 ms 2025-05-07T08:49:35.023742Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 6 txid# 154 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 100} 2025-05-07T08:49:35.023770Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-05-07T08:49:35.024029Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269287938, Sender [1:232:2225], Recipient [1:343:2311]: {TEvReadSet step# 6 txid# 149 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 97} 2025-05-07T08:49:35.024087Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-05-07T08:49:35.024139Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 149 2025-05-07T08:49:35.024249Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269287938, Sender [1:232:2225], Recipient [1:343:2311]: {TEvReadSet step# 6 txid# 151 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 98} 2025-05-07T08:49:35.024280Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-05-07T08:49:35.024313Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 151 2025-05-07T08:49:35.024390Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269287938, Sender [1:232:2225], Recipient [1:343:2311]: {TEvReadSet step# 6 txid# 152 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 99} 2025-05-07T08:49:35.024430Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-05-07T08:49:35.024477Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 152 2025-05-07T08:49:35.024583Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269287938, Sender [1:232:2225], Recipient [1:343:2311]: {TEvReadSet step# 6 txid# 154 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 100} 2025-05-07T08:49:35.024617Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-05-07T08:49:35.024641Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 154 >> DataShardOutOfOrder::TestPlannedTimeoutSplit [GOOD] >> DataShardOutOfOrder::TestPlannedHalfOverloadedSplit-UseSink >> test_sql_streaming.py::test[suites-ReadTopicGroupWriteToSolomon-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-ReadTopicWithMetadata-default.txt] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardScan::ScanFollowedByUpdate [GOOD] Test command err: 2025-05-07T08:49:33.573427Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828672, Sender [1:109:2140], Recipient [1:131:2154]: NKikimr::TEvTablet::TEvBoot 2025-05-07T08:49:33.574894Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:49:33.574944Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:33.585230Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828673, Sender [1:109:2140], Recipient [1:131:2154]: NKikimr::TEvTablet::TEvRestored 2025-05-07T08:49:33.585709Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:131:2154] 2025-05-07T08:49:33.586005Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T08:49:33.633109Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3111: StateInactive, received event# 268828684, Sender [1:109:2140], Recipient [1:131:2154]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-05-07T08:49:33.643269Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T08:49:33.643919Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T08:49:33.645743Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-05-07T08:49:33.645826Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-05-07T08:49:33.645944Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-05-07T08:49:33.646415Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T08:49:33.646641Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T08:49:33.646718Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:198:2154] in generation 2 2025-05-07T08:49:33.710439Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T08:49:33.767965Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-05-07T08:49:33.768176Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T08:49:33.768289Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:213:2211] 2025-05-07T08:49:33.768326Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-05-07T08:49:33.768358Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-05-07T08:49:33.768391Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-05-07T08:49:33.768631Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 2146435072, Sender [1:131:2154], Recipient [1:131:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-05-07T08:49:33.768699Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3155: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-05-07T08:49:33.768950Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-05-07T08:49:33.769070Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-05-07T08:49:33.769160Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-05-07T08:49:33.769201Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:49:33.769239Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-05-07T08:49:33.769273Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-05-07T08:49:33.769306Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-05-07T08:49:33.769334Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-05-07T08:49:33.769369Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-05-07T08:49:33.769459Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877761, Sender [1:209:2208], Recipient [1:131:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:49:33.769496Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:49:33.769552Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:207:2207], serverId# [1:209:2208], sessionId# [0:0:0] 2025-05-07T08:49:33.772283Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269549568, Sender [1:99:2134], Recipient [1:131:2154]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 99 RawX2: 4294969430 } TxBody: "\nK\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\n \000Z\006\010\010\030\001(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-05-07T08:49:33.772343Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3136: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-05-07T08:49:33.772435Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-05-07T08:49:33.772591Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-05-07T08:49:33.772634Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-05-07T08:49:33.772690Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-05-07T08:49:33.772739Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-05-07T08:49:33.772775Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-05-07T08:49:33.772830Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-05-07T08:49:33.772866Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-05-07T08:49:33.773167Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-05-07T08:49:33.773220Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-05-07T08:49:33.773260Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-05-07T08:49:33.773298Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-05-07T08:49:33.773338Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-05-07T08:49:33.773370Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-05-07T08:49:33.773402Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-05-07T08:49:33.773433Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-05-07T08:49:33.773466Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-05-07T08:49:33.785684Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-05-07T08:49:33.785756Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-05-07T08:49:33.785788Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-05-07T08:49:33.785829Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-05-07T08:49:33.785901Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-05-07T08:49:33.786439Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877761, Sender [1:219:2217], Recipient [1:131:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:49:33.786500Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:49:33.786550Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:218:2216], serverId# [1:219:2217], sessionId# [0:0:0] 2025-05-07T08:49:33.786697Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269287424, Sender [1:99:2134], Recipient [1:131:2154]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-05-07T08:49:33.786727Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3148: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-05-07T08:49:33.786868Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-05-07T08:49:33.786906Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-05-07T08:49:33.786959Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-05-07T08:49:33.787003Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-05-07T08:49:33.790729Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 99 RawX2: 4294969430 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-05-07T08:49:33.790812Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-05-07T08:49:33.791077Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 2146435072, Sender [1:131:2154], Recipient [1:131:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-05-07T08:49:33.791119Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3155: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-05-07T08:49:33.791192Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-05-07T08:49:33.791238Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-05-07T08:49:33.791289Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-05-07T08:49:33.791331Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-05-07T08:49:33.791380Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000001 ... ction::Execute at 9437185 2025-05-07T08:49:35.952976Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437185 active 1 active planned 1 immediate 0 planned 1 2025-05-07T08:49:35.953010Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [1000006:36] at 9437185 for ReadTableScan 2025-05-07T08:49:35.953054Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000006:36] at 9437185 on unit ReadTableScan 2025-05-07T08:49:35.953090Z node 1 :TX_DATASHARD TRACE: read_table_scan_unit.cpp:158: ReadTable scan complete for [1000006:36] at 9437185 error: , IsFatalError: 0 2025-05-07T08:49:35.953140Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000006:36] at 9437185 is Executed 2025-05-07T08:49:35.953166Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000006:36] at 9437185 executing on unit ReadTableScan 2025-05-07T08:49:35.953191Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000006:36] at 9437185 to execution unit CompleteOperation 2025-05-07T08:49:35.953214Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000006:36] at 9437185 on unit CompleteOperation 2025-05-07T08:49:35.953405Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000006:36] at 9437185 is DelayComplete 2025-05-07T08:49:35.953442Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000006:36] at 9437185 executing on unit CompleteOperation 2025-05-07T08:49:35.953474Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000006:36] at 9437185 to execution unit CompletedOperations 2025-05-07T08:49:35.953506Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000006:36] at 9437185 on unit CompletedOperations 2025-05-07T08:49:35.953537Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000006:36] at 9437185 is Executed 2025-05-07T08:49:35.953571Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000006:36] at 9437185 executing on unit CompletedOperations 2025-05-07T08:49:35.953596Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [1000006:36] at 9437185 has finished 2025-05-07T08:49:35.953627Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437185 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:49:35.953650Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437185 2025-05-07T08:49:35.953686Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437185 has no attached operations 2025-05-07T08:49:35.953726Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437185 2025-05-07T08:49:35.953901Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 2146435072, Sender [1:453:2395], Recipient [1:453:2395]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-05-07T08:49:35.953940Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3155: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-05-07T08:49:35.953996Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437186 2025-05-07T08:49:35.954022Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437186 active 1 active planned 1 immediate 0 planned 1 2025-05-07T08:49:35.954050Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [1000006:36] at 9437186 for ReadTableScan 2025-05-07T08:49:35.954073Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000006:36] at 9437186 on unit ReadTableScan 2025-05-07T08:49:35.954111Z node 1 :TX_DATASHARD TRACE: read_table_scan_unit.cpp:158: ReadTable scan complete for [1000006:36] at 9437186 error: , IsFatalError: 0 2025-05-07T08:49:35.954140Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000006:36] at 9437186 is Executed 2025-05-07T08:49:35.954163Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000006:36] at 9437186 executing on unit ReadTableScan 2025-05-07T08:49:35.954188Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000006:36] at 9437186 to execution unit CompleteOperation 2025-05-07T08:49:35.954216Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000006:36] at 9437186 on unit CompleteOperation 2025-05-07T08:49:35.954360Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000006:36] at 9437186 is DelayComplete 2025-05-07T08:49:35.954402Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000006:36] at 9437186 executing on unit CompleteOperation 2025-05-07T08:49:35.954429Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000006:36] at 9437186 to execution unit CompletedOperations 2025-05-07T08:49:35.954452Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000006:36] at 9437186 on unit CompletedOperations 2025-05-07T08:49:35.954494Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000006:36] at 9437186 is Executed 2025-05-07T08:49:35.954519Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000006:36] at 9437186 executing on unit CompletedOperations 2025-05-07T08:49:35.954538Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [1000006:36] at 9437186 has finished 2025-05-07T08:49:35.954567Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437186 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:49:35.954598Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437186 2025-05-07T08:49:35.954622Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437186 has no attached operations 2025-05-07T08:49:35.954648Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437186 2025-05-07T08:49:35.954760Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 2146435072, Sender [1:233:2226], Recipient [1:233:2226]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-05-07T08:49:35.954792Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3155: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-05-07T08:49:35.954857Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-05-07T08:49:35.954883Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 1 active planned 1 immediate 0 planned 1 2025-05-07T08:49:35.954910Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [1000006:36] at 9437184 for ReadTableScan 2025-05-07T08:49:35.954947Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000006:36] at 9437184 on unit ReadTableScan 2025-05-07T08:49:35.954973Z node 1 :TX_DATASHARD TRACE: read_table_scan_unit.cpp:158: ReadTable scan complete for [1000006:36] at 9437184 error: , IsFatalError: 0 2025-05-07T08:49:35.955001Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000006:36] at 9437184 is Executed 2025-05-07T08:49:35.955022Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000006:36] at 9437184 executing on unit ReadTableScan 2025-05-07T08:49:35.955057Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000006:36] at 9437184 to execution unit CompleteOperation 2025-05-07T08:49:35.955080Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000006:36] at 9437184 on unit CompleteOperation 2025-05-07T08:49:35.955202Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000006:36] at 9437184 is DelayComplete 2025-05-07T08:49:35.955246Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000006:36] at 9437184 executing on unit CompleteOperation 2025-05-07T08:49:35.955281Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000006:36] at 9437184 to execution unit CompletedOperations 2025-05-07T08:49:35.955305Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000006:36] at 9437184 on unit CompletedOperations 2025-05-07T08:49:35.955332Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000006:36] at 9437184 is Executed 2025-05-07T08:49:35.955353Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000006:36] at 9437184 executing on unit CompletedOperations 2025-05-07T08:49:35.955374Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [1000006:36] at 9437184 has finished 2025-05-07T08:49:35.955396Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:49:35.955415Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-05-07T08:49:35.955439Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-05-07T08:49:35.955463Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437184 2025-05-07T08:49:35.968417Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2025-05-07T08:49:35.968492Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2025-05-07T08:49:35.968523Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000006:36] at 9437186 on unit CompleteOperation 2025-05-07T08:49:35.968588Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000006 : 36] from 9437186 at tablet 9437186 send result to client [1:99:2134], exec latency: 3 ms, propose latency: 4 ms 2025-05-07T08:49:35.968656Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2025-05-07T08:49:35.968808Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-05-07T08:49:35.968833Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-05-07T08:49:35.968854Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000006:36] at 9437184 on unit CompleteOperation 2025-05-07T08:49:35.968884Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000006 : 36] from 9437184 at tablet 9437184 send result to client [1:99:2134], exec latency: 3 ms, propose latency: 4 ms 2025-05-07T08:49:35.968910Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-05-07T08:49:35.969088Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437185 2025-05-07T08:49:35.969123Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437185 2025-05-07T08:49:35.969151Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000006:36] at 9437185 on unit CompleteOperation 2025-05-07T08:49:35.969191Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000006 : 36] from 9437185 at tablet 9437185 send result to client [1:99:2134], exec latency: 3 ms, propose latency: 4 ms 2025-05-07T08:49:35.969249Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437185 >> test_sql_streaming.py::test[suites-GroupByHoppingWindowExprKey-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-GroupByHoppingWindowListKey-default.txt] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_olap/unittest >> TOlapNaming::CreateColumnTableFailed [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:49:28.108757Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:49:28.108848Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:49:28.108890Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:49:28.108927Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:49:28.108979Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:49:28.109016Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:49:28.109078Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:49:28.109140Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:49:28.109824Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:49:28.110095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:49:28.177820Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:49:28.177865Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:28.192563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:49:28.192791Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:49:28.192963Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:49:28.199643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:49:28.199973Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:49:28.200599Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:49:28.200799Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:49:28.203590Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:49:28.204867Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:49:28.204932Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:49:28.205021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:49:28.205068Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:49:28.205165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:49:28.205394Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:49:28.213797Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:49:28.367166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:49:28.367413Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:28.367696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:49:28.367966Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:49:28.368036Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:28.372631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:49:28.372780Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:49:28.372989Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:28.373049Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:49:28.373090Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:49:28.373126Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:49:28.375378Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:28.375461Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:49:28.375515Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:49:28.379285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:28.379350Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:28.379399Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:49:28.379452Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:49:28.383372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:49:28.386284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:49:28.386546Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:49:28.387637Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:49:28.387792Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:49:28.387847Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:49:28.388237Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:49:28.388306Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:49:28.388502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:49:28.388588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:49:28.391376Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:49:28.391429Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:49:28.391621Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:49:28.391664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:49:36.037715Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:49:36.037862Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 131 RawX2: 8589936747 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:49:36.037922Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:49:36.038223Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:49:36.038297Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:49:36.038495Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:49:36.038574Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:49:36.040608Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:49:36.040663Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:49:36.040874Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:49:36.040918Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:206:2208], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-05-07T08:49:36.041260Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:36.041318Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 1:0 ProgressState 2025-05-07T08:49:36.041429Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-05-07T08:49:36.041474Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:49:36.041522Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-05-07T08:49:36.041561Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:49:36.041603Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-05-07T08:49:36.041649Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:49:36.041688Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1:0 2025-05-07T08:49:36.041729Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 1:0 2025-05-07T08:49:36.041808Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:49:36.041848Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-05-07T08:49:36.041886Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-05-07T08:49:36.042505Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-05-07T08:49:36.042613Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-05-07T08:49:36.042662Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-05-07T08:49:36.042704Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-05-07T08:49:36.042748Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:49:36.042869Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-05-07T08:49:36.045998Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-05-07T08:49:36.046517Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-05-07T08:49:36.047429Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [2:269:2260] Bootstrap 2025-05-07T08:49:36.063177Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [2:269:2260] Become StateWork (SchemeCache [2:274:2265]) 2025-05-07T08:49:36.066396Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateColumnTable CreateColumnTable { Name: "TestTable" Schema { Columns { Name: "Id" Type: "Int32" NotNull: true } Columns { Name: "mess age" Type: "Utf8" } KeyColumnNames: "Id" } } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:49:36.066783Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: create_table.cpp:593: TCreateColumnTable Propose, path: /MyRoot/TestTable, opId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:49:36.067061Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 101:1, propose status:StatusSchemeError, reason: Invalid name for column 'mess age', at schemeshard: 72057594046678944 2025-05-07T08:49:36.068386Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [2:269:2260] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-05-07T08:49:36.070936Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 101, response: Status: StatusSchemeError Reason: "Invalid name for column \'mess age\'" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:49:36.071113Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusSchemeError, reason: Invalid name for column 'mess age', operation: CREATE COLUMN TABLE, path: /MyRoot/ 2025-05-07T08:49:36.072473Z node 2 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-05-07T08:49:36.072704Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-05-07T08:49:36.072754Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-05-07T08:49:36.073189Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-05-07T08:49:36.073299Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-05-07T08:49:36.073345Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [2:284:2275] TestWaitNotification: OK eventTxId 101 TestModificationResults wait txId: 102 2025-05-07T08:49:36.076646Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateColumnTable CreateColumnTable { Name: "TestTable" Schema { Columns { Name: "Id" Type: "Int32" NotNull: true } Columns { Name: "~!@#$%^&*()+=asdfa" Type: "Utf8" } KeyColumnNames: "Id" } } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:49:36.076928Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: create_table.cpp:593: TCreateColumnTable Propose, path: /MyRoot/TestTable, opId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:49:36.077116Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 102:1, propose status:StatusSchemeError, reason: Invalid name for column '~!@#$%^&*()+=asdfa', at schemeshard: 72057594046678944 2025-05-07T08:49:36.079544Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 102, response: Status: StatusSchemeError Reason: "Invalid name for column \'~!@#$%^&*()+=asdfa\'" TxId: 102 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:49:36.079681Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusSchemeError, reason: Invalid name for column '~!@#$%^&*()+=asdfa', operation: CREATE COLUMN TABLE, path: /MyRoot/ TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-05-07T08:49:36.079940Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-05-07T08:49:36.079977Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-05-07T08:49:36.080293Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-05-07T08:49:36.080379Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T08:49:36.080411Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [2:291:2282] TestWaitNotification: OK eventTxId 102 >> Viewer::JsonAutocompleteStartOfDatabaseName [GOOD] >> Viewer::JsonStorageListingV1 >> Viewer::JsonAutocompleteEmpty [GOOD] >> Viewer::JsonAutocompleteEndOfDatabaseName >> TStorageTenantTest::CreateTableInsideSubDomain >> TDataShardTrace::TestTraceWriteImmediateOnShard [GOOD] >> TOlap::CreateDropStandaloneTableDefaultSharding [GOOD] >> DataShardOutOfOrder::UncommittedReads [GOOD] >> IncrementalRestoreScan::Empty [GOOD] >> TStorageTenantTest::LsLs >> TStorageTenantTest::CreateSolomonInsideSubDomain >> test_sql_streaming.py::test[suites-ReadWriteSameTopic-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-ReadWriteTopic-default.txt] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_trace/unittest >> TDataShardTrace::TestTraceWriteImmediateOnShard [GOOD] Test command err: 2025-05-07T08:49:31.657364Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:49:31.668722Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:49:31.669146Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001de7/r3tmp/tmpVczNkx/pdisk_1.dat 2025-05-07T08:49:34.455593Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2175} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.243732s 2025-05-07T08:49:34.455738Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:666} StateWork event processing took too much time Type# 2146435078 Duration# 0.243893s 2025-05-07T08:49:34.492792Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:49:34.703487Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:34.836676Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:34.836834Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:34.851211Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:49:35.226769Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 >> Viewer::JsonAutocompleteSimilarDatabaseName [GOOD] >> Viewer::JsonAutocompleteSimilarDatabaseNameWithLimit ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::UncommittedReads [GOOD] Test command err: 2025-05-07T08:49:36.066193Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:49:36.066380Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:49:36.066693Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004513/r3tmp/tmpZtNo62/pdisk_1.dat 2025-05-07T08:49:36.449991Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:49:36.495391Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:36.545922Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:36.546142Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:36.557698Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:49:36.642937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:49:36.687849Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828672, Sender [1:656:2563], Recipient [1:665:2569]: NKikimr::TEvTablet::TEvBoot 2025-05-07T08:49:36.689123Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828673, Sender [1:656:2563], Recipient [1:665:2569]: NKikimr::TEvTablet::TEvRestored 2025-05-07T08:49:36.689573Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:665:2569] 2025-05-07T08:49:36.689997Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T08:49:36.700536Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3111: StateInactive, received event# 268828684, Sender [1:656:2563], Recipient [1:665:2569]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-05-07T08:49:36.738599Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T08:49:36.738839Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T08:49:36.741219Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-05-07T08:49:36.741328Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-05-07T08:49:36.741394Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-05-07T08:49:36.744022Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T08:49:36.744240Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T08:49:36.744373Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:681:2569] in generation 1 2025-05-07T08:49:36.744966Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T08:49:36.773539Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-05-07T08:49:36.773820Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T08:49:36.774007Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:683:2579] 2025-05-07T08:49:36.774055Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T08:49:36.774100Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-05-07T08:49:36.774141Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:49:36.774420Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 2146435072, Sender [1:665:2569], Recipient [1:665:2569]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-05-07T08:49:36.774519Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3155: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-05-07T08:49:36.774911Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T08:49:36.775033Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T08:49:36.775147Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T08:49:36.775206Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:49:36.775277Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-05-07T08:49:36.775337Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-05-07T08:49:36.775393Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-05-07T08:49:36.775439Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T08:49:36.775492Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:49:36.776006Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877761, Sender [1:672:2573], Recipient [1:665:2569]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:49:36.776055Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:49:36.776110Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:672:2573], sessionId# [0:0:0] 2025-05-07T08:49:36.776200Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269549568, Sender [1:410:2405], Recipient [1:672:2573] 2025-05-07T08:49:36.776252Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3136: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-05-07T08:49:36.776393Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T08:49:36.776649Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-05-07T08:49:36.776736Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-05-07T08:49:36.776854Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-05-07T08:49:36.776913Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-05-07T08:49:36.776960Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-05-07T08:49:36.777005Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-05-07T08:49:36.777055Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-05-07T08:49:36.777420Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-05-07T08:49:36.777467Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-05-07T08:49:36.777524Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-05-07T08:49:36.777573Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-05-07T08:49:36.777629Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-05-07T08:49:36.777667Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-05-07T08:49:36.777729Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-05-07T08:49:36.777774Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-05-07T08:49:36.777805Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-05-07T08:49:36.778839Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T08:49:36.778903Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-05-07T08:49:36.778943Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-05-07T08:49:36.779004Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose latency: 0 ms, status: PREPARED 2025-05-07T08:49:36.779071Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-05-07T08:49:36.781623Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269746185, Sender [1:684:2580], Recipient [1:665:2569]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-05-07T08:49:36.781700Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:49:36.935726Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877761, Sender [1:699:2589], Recipient [1:665:2569]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:49:36.935815Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:49:36.935865Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075 ... arsing write transaction for 0 at 72075186224037888, record: Operations { Type: OPERATION_UPSERT TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } ColumnIds: 1 ColumnIds: 2 PayloadIndex: 0 PayloadFormat: FORMAT_CELLVEC } TxMode: MODE_IMMEDIATE 2025-05-07T08:49:38.213051Z node 1 :TX_DATASHARD TRACE: datashard_write_operation.cpp:190: Table /Root/table-1, shard: 72075186224037888, write point (Uint32 : 4) 2025-05-07T08:49:38.213111Z node 1 :TX_DATASHARD TRACE: key_validator.cpp:54: -- AddWriteRange: (Uint32 : 4) table: [72057594046644480:2:1] 2025-05-07T08:49:38.213225Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit CheckWrite 2025-05-07T08:49:38.213283Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-05-07T08:49:38.213321Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit CheckWrite 2025-05-07T08:49:38.213420Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-05-07T08:49:38.213458Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit BuildAndWaitDependencies 2025-05-07T08:49:38.213504Z node 1 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v2500/0 IncompleteEdge# v{min} UnprotectedReadEdge# v2000/18446744073709551615 ImmediateWriteEdge# v2500/18446744073709551615 ImmediateWriteEdgeReplied# v2500/18446744073709551615 2025-05-07T08:49:38.213579Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:6] at 72075186224037888 2025-05-07T08:49:38.213616Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-05-07T08:49:38.213642Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-05-07T08:49:38.213666Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit ExecuteWrite 2025-05-07T08:49:38.213701Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit ExecuteWrite 2025-05-07T08:49:38.213734Z node 1 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:6] at 72075186224037888 2025-05-07T08:49:38.213790Z node 1 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v2500/0 IncompleteEdge# v{min} UnprotectedReadEdge# v2000/18446744073709551615 ImmediateWriteEdge# v2500/18446744073709551615 ImmediateWriteEdgeReplied# v2500/18446744073709551615 2025-05-07T08:49:38.213922Z node 1 :TX_DATASHARD DEBUG: execute_write_unit.cpp:410: Executed write operation for [0:6] at 72075186224037888, row count=1 2025-05-07T08:49:38.213966Z node 1 :TX_DATASHARD TRACE: execute_write_unit.cpp:47: add locks to result: 0 2025-05-07T08:49:38.214048Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is ExecutedNoMoreRestarts 2025-05-07T08:49:38.214101Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit ExecuteWrite 2025-05-07T08:49:38.214152Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit FinishProposeWrite 2025-05-07T08:49:38.214195Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit FinishProposeWrite 2025-05-07T08:49:38.214232Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is DelayComplete 2025-05-07T08:49:38.214259Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit FinishProposeWrite 2025-05-07T08:49:38.214293Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit CompletedOperations 2025-05-07T08:49:38.214326Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit CompletedOperations 2025-05-07T08:49:38.214370Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-05-07T08:49:38.214390Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit CompletedOperations 2025-05-07T08:49:38.214416Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:6] at 72075186224037888 has finished ... blocked commit for tablet 72075186224037888 2025-05-07T08:49:38.330524Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715665. Ctx: { TraceId: 01jtmyzad79wfkfnpjdvvt7f9x, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzU0MmI2MmMtNWZiYTZmNzQtZGZiOWE1YjItZTA3ZGQ1NDI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:49:38.331897Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269553215, Sender [1:967:2761], Recipient [1:665:2569]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 RangesSize: 1 2025-05-07T08:49:38.332067Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2435: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-05-07T08:49:38.332116Z node 1 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v2500/0 IncompleteEdge# v{min} UnprotectedReadEdge# v2000/18446744073709551615 ImmediateWriteEdge# v2500/18446744073709551615 ImmediateWriteEdgeReplied# v2500/18446744073709551615 2025-05-07T08:49:38.332158Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2538: 72075186224037888 changed HEAD read to non-repeatable v2500/18446744073709551615 2025-05-07T08:49:38.332201Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit CheckRead 2025-05-07T08:49:38.332267Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-05-07T08:49:38.332293Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit CheckRead 2025-05-07T08:49:38.332318Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-05-07T08:49:38.332343Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit BuildAndWaitDependencies 2025-05-07T08:49:38.332400Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:7] at 72075186224037888 2025-05-07T08:49:38.332430Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-05-07T08:49:38.332445Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-05-07T08:49:38.332458Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit ExecuteRead 2025-05-07T08:49:38.332485Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit ExecuteRead 2025-05-07T08:49:38.332567Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1566: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 } 2025-05-07T08:49:38.332744Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is DelayComplete 2025-05-07T08:49:38.332773Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit ExecuteRead 2025-05-07T08:49:38.332796Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit CompletedOperations 2025-05-07T08:49:38.332818Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit CompletedOperations 2025-05-07T08:49:38.332850Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-05-07T08:49:38.332875Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit CompletedOperations 2025-05-07T08:49:38.332896Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:7] at 72075186224037888 has finished 2025-05-07T08:49:38.332923Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2670: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-05-07T08:49:38.448374Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:692: Actor# [1:24:2071] HANDLE NKikimrTxMediatorTimecast.TEvGranularUpdate Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 1 LatestStep: 3000 2025-05-07T08:49:38.448487Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:625: Actor# [1:24:2071] HANDLE {TEvUpdate Mediator# 72057594046382081 Bucket# 0 TimeBarrier# 3000} 2025-05-07T08:49:38.599333Z node 1 :TX_DATASHARD TRACE: datashard__write.cpp:150: TTxWrite complete: at tablet# 72075186224037888 2025-05-07T08:49:38.599446Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:6] at 72075186224037888 on unit FinishProposeWrite 2025-05-07T08:49:38.599502Z node 1 :TX_DATASHARD TRACE: finish_propose_write_unit.cpp:163: Propose transaction complete txid 6 at tablet 72075186224037888 send to client, propose latency: 1000 ms, status: STATUS_COMPLETED 2025-05-07T08:49:38.599622Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:49:38.599732Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2719: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2025-05-07T08:49:38.599790Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:7] at 72075186224037888 on unit ExecuteRead 2025-05-07T08:49:38.599847Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2146: 72075186224037888 Complete read# {[1:967:2761], 0} after executionsCount# 1 2025-05-07T08:49:38.599892Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2120: 72075186224037888 read iterator# {[1:967:2761], 0} sends rowCount# 4, bytes# 128, quota rows left# 997, quota bytes left# 5242752, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-05-07T08:49:38.599984Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2171: 72075186224037888 read iterator# {[1:967:2761], 0} finished in read 2025-05-07T08:49:38.601998Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269553219, Sender [1:967:2761], Recipient [1:665:2569]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-05-07T08:49:38.602089Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3392: 72075186224037888 ReadCancel: { ReadId: 0 } { items { uint32_value: 1 } items { uint32_value: 1 } }, { items { uint32_value: 2 } items { uint32_value: 2 } }, { items { uint32_value: 3 } items { uint32_value: 3 } }, { items { uint32_value: 4 } items { uint32_value: 4 } } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_olap/unittest >> TOlap::CreateDropStandaloneTableDefaultSharding [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:49:27.979126Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:49:27.979212Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:49:27.979252Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:49:27.979310Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:49:27.979358Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:49:27.979387Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:49:27.979439Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:49:27.979506Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:49:27.980253Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:49:27.980571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:49:28.054113Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:49:28.054166Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:28.067267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:49:28.067439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:49:28.067585Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:49:28.073549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:49:28.073875Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:49:28.074579Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:49:28.074768Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:49:28.077750Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:49:28.079290Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:49:28.079357Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:49:28.079432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:49:28.079483Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:49:28.079633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:49:28.079888Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:49:28.087021Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:49:28.221757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:49:28.221930Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:28.222227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:49:28.222555Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:49:28.222638Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:28.225394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:49:28.225537Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:49:28.225791Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:28.225862Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:49:28.225910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:49:28.225953Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:49:28.228595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:28.228663Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:49:28.228732Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:49:28.230596Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:28.230661Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:28.230714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:49:28.230774Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:49:28.241228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:49:28.243674Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:49:28.243927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:49:28.245248Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:49:28.245415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:49:28.245492Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:49:28.245835Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:49:28.245903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:49:28.246124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:49:28.246208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:49:28.249219Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:49:28.249281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:49:28.249520Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:49:28.249576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... :180: Close pipe to deleted shardIdx 72057594046678944:59 tabletId 72075186233409604 2025-05-07T08:49:38.274939Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:6 2025-05-07T08:49:38.274968Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:6 tabletId 72075186233409551 2025-05-07T08:49:38.276193Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-05-07T08:49:38.276237Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-05-07T08:49:38.276383Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-05-07T08:49:38.276409Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186233409549 2025-05-07T08:49:38.277894Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:23 2025-05-07T08:49:38.277933Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:23 tabletId 72075186233409568 2025-05-07T08:49:38.278055Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:21 2025-05-07T08:49:38.278084Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:21 tabletId 72075186233409566 2025-05-07T08:49:38.278377Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:19 2025-05-07T08:49:38.278409Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:19 tabletId 72075186233409564 2025-05-07T08:49:38.278492Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:17 2025-05-07T08:49:38.278518Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:17 tabletId 72075186233409562 2025-05-07T08:49:38.279155Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:15 2025-05-07T08:49:38.279192Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:15 tabletId 72075186233409560 2025-05-07T08:49:38.279837Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:13 2025-05-07T08:49:38.279876Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:13 tabletId 72075186233409558 2025-05-07T08:49:38.281402Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:11 2025-05-07T08:49:38.281441Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:11 tabletId 72075186233409556 2025-05-07T08:49:38.281564Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:9 2025-05-07T08:49:38.281592Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:9 tabletId 72075186233409554 2025-05-07T08:49:38.284379Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:36 2025-05-07T08:49:38.284424Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:36 tabletId 72075186233409581 2025-05-07T08:49:38.284582Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:38 2025-05-07T08:49:38.284612Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:38 tabletId 72075186233409583 2025-05-07T08:49:38.284841Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:40 2025-05-07T08:49:38.284873Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:40 tabletId 72075186233409585 2025-05-07T08:49:38.285078Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:32 2025-05-07T08:49:38.285108Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:32 tabletId 72075186233409577 2025-05-07T08:49:38.285215Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:34 2025-05-07T08:49:38.285242Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:34 tabletId 72075186233409579 2025-05-07T08:49:38.285362Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:28 2025-05-07T08:49:38.285391Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:28 tabletId 72075186233409573 2025-05-07T08:49:38.285453Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:30 2025-05-07T08:49:38.285477Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:30 tabletId 72075186233409575 2025-05-07T08:49:38.285554Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:26 2025-05-07T08:49:38.285581Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:26 tabletId 72075186233409571 2025-05-07T08:49:38.285676Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:24 2025-05-07T08:49:38.285702Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:24 tabletId 72075186233409569 2025-05-07T08:49:38.294460Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:57 2025-05-07T08:49:38.294538Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:57 tabletId 72075186233409602 2025-05-07T08:49:38.294669Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:53 2025-05-07T08:49:38.294699Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:53 tabletId 72075186233409598 2025-05-07T08:49:38.294803Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:55 2025-05-07T08:49:38.294852Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:55 tabletId 72075186233409600 2025-05-07T08:49:38.294932Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:49 2025-05-07T08:49:38.294962Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:49 tabletId 72075186233409594 2025-05-07T08:49:38.295036Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:47 2025-05-07T08:49:38.295066Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:47 tabletId 72075186233409592 2025-05-07T08:49:38.295160Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:51 2025-05-07T08:49:38.295191Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:51 tabletId 72075186233409596 2025-05-07T08:49:38.295259Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:45 2025-05-07T08:49:38.295287Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:45 tabletId 72075186233409590 2025-05-07T08:49:38.295352Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:43 2025-05-07T08:49:38.295379Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:43 tabletId 72075186233409588 2025-05-07T08:49:38.298922Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:41 2025-05-07T08:49:38.299019Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:41 tabletId 72075186233409586 2025-05-07T08:49:38.299220Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 105 2025-05-07T08:49:38.300382Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/MyDir/ColumnTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:49:38.300662Z node 3 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/MyDir/ColumnTable" took 302us result status StatusPathDoesNotExist 2025-05-07T08:49:38.300851Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/MyDir/ColumnTable\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/MyDir\' (id: [OwnerId: 72057594046678944, LocalPathId: 2])" Path: "/MyRoot/MyDir/ColumnTable" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/MyDir" LastExistedPrefixPathId: 2 LastExistedPrefixDescription { Self { Name: "MyDir" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-05-07T08:49:38.301627Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 4 SchemeshardId: 72057594046678944 Options { }, at schemeshard: 72057594046678944 2025-05-07T08:49:38.301736Z node 3 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72057594046678944 describe pathId 4 took 117us result status StatusPathDoesNotExist 2025-05-07T08:49:38.301830Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'\', error: path is empty" Path: "" PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_incremental_restore_scan/unittest >> IncrementalRestoreScan::Empty [GOOD] Test command err: 2025-05-07T08:49:36.196723Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:49:36.216788Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:49:36.217259Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004885/r3tmp/tmpB1J4Kl/pdisk_1.dat 2025-05-07T08:49:38.516425Z node 1 :CHANGE_EXCHANGE DEBUG: incr_restore_scan.cpp:177: [TIncrementalRestoreScan][1337][OwnerId: 1, LocalPathId: 2][OwnerId: 3, LocalPathId: 4][1:598:2522] Exhausted 2025-05-07T08:49:38.516589Z node 1 :CHANGE_EXCHANGE DEBUG: incr_restore_scan.cpp:126: [TIncrementalRestoreScan][1337][OwnerId: 1, LocalPathId: 2][OwnerId: 3, LocalPathId: 4][1:598:2522] Handle TEvIncrementalRestoreScan::TEvFinished NKikimr::NDataShard::TEvIncrementalRestoreScan::TEvFinished 2025-05-07T08:49:38.516638Z node 1 :CHANGE_EXCHANGE DEBUG: incr_restore_scan.cpp:190: [TIncrementalRestoreScan][1337][OwnerId: 1, LocalPathId: 2][OwnerId: 3, LocalPathId: 4][1:598:2522] Finish 0 >> DataShardOutOfOrder::TestSnapshotReadAfterStuckRW [GOOD] >> IncrementalRestoreScan::ChangeSenderEmpty [GOOD] >> IncrementalRestoreScan::ChangeSenderSimple [GOOD] >> Viewer::JsonAutocompleteColumns [GOOD] >> TOlapNaming::AlterColumnTableFailed [GOOD] |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest >> test_sql_streaming.py::test[suites-GroupByHopListKey-default.txt] [FAIL] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::TestSnapshotReadAfterStuckRW [GOOD] >> test_sql_streaming.py::test[suites-GroupByHopNoKey-default.txt] Test command err: 2025-05-07T08:49:36.660131Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:49:36.660286Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:49:36.660580Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004504/r3tmp/tmpKTkMrs/pdisk_1.dat 2025-05-07T08:49:37.049703Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:49:37.091200Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:37.144718Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:37.144860Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:37.156547Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:49:37.240565Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:49:37.278677Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828672, Sender [1:656:2563], Recipient [1:665:2569]: NKikimr::TEvTablet::TEvBoot 2025-05-07T08:49:37.279629Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828673, Sender [1:656:2563], Recipient [1:665:2569]: NKikimr::TEvTablet::TEvRestored 2025-05-07T08:49:37.279973Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:665:2569] 2025-05-07T08:49:37.280174Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T08:49:37.288218Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3111: StateInactive, received event# 268828684, Sender [1:656:2563], Recipient [1:665:2569]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-05-07T08:49:37.315967Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T08:49:37.316097Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T08:49:37.317359Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-05-07T08:49:37.317424Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-05-07T08:49:37.317469Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-05-07T08:49:37.317728Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T08:49:37.317837Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T08:49:37.317892Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:681:2569] in generation 1 2025-05-07T08:49:37.328649Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T08:49:37.373994Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-05-07T08:49:37.374255Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T08:49:37.374391Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:683:2579] 2025-05-07T08:49:37.374436Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T08:49:37.374475Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-05-07T08:49:37.374509Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:49:37.374754Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 2146435072, Sender [1:665:2569], Recipient [1:665:2569]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-05-07T08:49:37.374841Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3155: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-05-07T08:49:37.375191Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T08:49:37.375293Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T08:49:37.375380Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T08:49:37.375430Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:49:37.375492Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-05-07T08:49:37.375542Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-05-07T08:49:37.375587Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-05-07T08:49:37.375622Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T08:49:37.375668Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:49:37.375804Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877761, Sender [1:672:2573], Recipient [1:665:2569]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:49:37.375844Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:49:37.375886Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:672:2573], sessionId# [0:0:0] 2025-05-07T08:49:37.376372Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269549568, Sender [1:410:2405], Recipient [1:672:2573] 2025-05-07T08:49:37.376435Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3136: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-05-07T08:49:37.376554Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T08:49:37.376786Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-05-07T08:49:37.376835Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-05-07T08:49:37.376923Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-05-07T08:49:37.376978Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-05-07T08:49:37.377019Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-05-07T08:49:37.377055Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-05-07T08:49:37.377093Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-05-07T08:49:37.377318Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-05-07T08:49:37.377347Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-05-07T08:49:37.377373Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-05-07T08:49:37.377407Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-05-07T08:49:37.377457Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-05-07T08:49:37.377488Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-05-07T08:49:37.377524Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-05-07T08:49:37.377551Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-05-07T08:49:37.377592Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-05-07T08:49:37.378863Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269746185, Sender [1:684:2580], Recipient [1:665:2569]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-05-07T08:49:37.378910Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:49:37.389692Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T08:49:37.389783Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-05-07T08:49:37.389819Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-05-07T08:49:37.389866Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose latency: 0 ms, status: PREPARED 2025-05-07T08:49:37.389927Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-05-07T08:49:37.541498Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877761, Sender [1:699:2589], Recipient [1:665:2569]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:49:37.541568Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:49:37.541603Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075 ... d_outreadset.cpp:150: Receive RS Ack at 72075186224037888 source 72075186224037888 dest 72075186224037889 consumer 72075186224037889 txId 281474976715664 ... performing the first select 2025-05-07T08:49:40.067980Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715665. Ctx: { TraceId: 01jtmyzbsw6dq8p523b94rnxsv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2JkOTRlNmItYjBiOTY1Y2YtZTZjMjU1MjktMTIzNTVhZmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:49:40.073028Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269553215, Sender [1:1074:2844], Recipient [1:665:2569]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 4000 TxId: 18446744073709551615 } LockTxId: 281474976715665 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false LockNodeId: 1 TotalRowsLimit: 1001 LockMode: OPTIMISTIC KeysSize: 1 2025-05-07T08:49:40.073362Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2435: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-05-07T08:49:40.073464Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037888 on unit CheckRead 2025-05-07T08:49:40.073568Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037888 is Executed 2025-05-07T08:49:40.073621Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037888 executing on unit CheckRead 2025-05-07T08:49:40.073684Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:5] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-05-07T08:49:40.073736Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037888 on unit BuildAndWaitDependencies 2025-05-07T08:49:40.073795Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:5] at 72075186224037888 2025-05-07T08:49:40.073847Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037888 is Executed 2025-05-07T08:49:40.073890Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-05-07T08:49:40.073926Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:5] at 72075186224037888 to execution unit ExecuteRead 2025-05-07T08:49:40.073953Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037888 on unit ExecuteRead 2025-05-07T08:49:40.074148Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1566: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 4000 TxId: 18446744073709551615 } LockTxId: 281474976715665 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false LockNodeId: 1 TotalRowsLimit: 1001 LockMode: OPTIMISTIC } 2025-05-07T08:49:40.074479Z node 1 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:2410: 72075186224037888 Acquired lock# 281474976715665, counter# 1 for [OwnerId: 72057594046644480, LocalPathId: 2] 2025-05-07T08:49:40.074544Z node 1 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v4000/18446744073709551615 2025-05-07T08:49:40.074600Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2146: 72075186224037888 Complete read# {[1:1074:2844], 0} after executionsCount# 1 2025-05-07T08:49:40.074673Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2120: 72075186224037888 read iterator# {[1:1074:2844], 0} sends rowCount# 1, bytes# 32, quota rows left# 1000, quota bytes left# 5242848, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-05-07T08:49:40.074805Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2171: 72075186224037888 read iterator# {[1:1074:2844], 0} finished in read 2025-05-07T08:49:40.074923Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037888 is Executed 2025-05-07T08:49:40.074968Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037888 executing on unit ExecuteRead 2025-05-07T08:49:40.074997Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:5] at 72075186224037888 to execution unit CompletedOperations 2025-05-07T08:49:40.075028Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037888 on unit CompletedOperations 2025-05-07T08:49:40.075102Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037888 is Executed 2025-05-07T08:49:40.075132Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037888 executing on unit CompletedOperations 2025-05-07T08:49:40.075174Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:5] at 72075186224037888 has finished 2025-05-07T08:49:40.075221Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2670: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-05-07T08:49:40.075347Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2719: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2025-05-07T08:49:40.075839Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 275709965, Sender [1:61:2108], Recipient [1:665:2569]: NKikimrLongTxService.TEvLockStatus LockId: 281474976715665 LockNode: 1 Status: STATUS_SUBSCRIBED 2025-05-07T08:49:40.076031Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269553215, Sender [1:1076:2845], Recipient [1:750:2628]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 3 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 4000 TxId: 18446744073709551615 } LockTxId: 281474976715665 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false LockNodeId: 1 TotalRowsLimit: 1001 LockMode: OPTIMISTIC KeysSize: 1 2025-05-07T08:49:40.076229Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2435: TTxReadViaPipeline execute: at tablet# 72075186224037889, FollowerId 0 2025-05-07T08:49:40.076294Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037889 on unit CheckRead 2025-05-07T08:49:40.076367Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037889 is Executed 2025-05-07T08:49:40.076403Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037889 executing on unit CheckRead 2025-05-07T08:49:40.076430Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:5] at 72075186224037889 to execution unit BuildAndWaitDependencies 2025-05-07T08:49:40.076457Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037889 on unit BuildAndWaitDependencies 2025-05-07T08:49:40.076499Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:5] at 72075186224037889 2025-05-07T08:49:40.076552Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037889 is Executed 2025-05-07T08:49:40.076598Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037889 executing on unit BuildAndWaitDependencies 2025-05-07T08:49:40.076625Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:5] at 72075186224037889 to execution unit ExecuteRead 2025-05-07T08:49:40.076649Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037889 on unit ExecuteRead 2025-05-07T08:49:40.076746Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1566: 72075186224037889 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 3 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 4000 TxId: 18446744073709551615 } LockTxId: 281474976715665 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false LockNodeId: 1 TotalRowsLimit: 1001 LockMode: OPTIMISTIC } 2025-05-07T08:49:40.076992Z node 1 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:2410: 72075186224037889 Acquired lock# 281474976715665, counter# 1 for [OwnerId: 72057594046644480, LocalPathId: 3] 2025-05-07T08:49:40.077033Z node 1 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037889 promoting UnprotectedReadEdge to v4000/18446744073709551615 2025-05-07T08:49:40.077073Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2146: 72075186224037889 Complete read# {[1:1076:2845], 0} after executionsCount# 1 2025-05-07T08:49:40.077120Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2120: 72075186224037889 read iterator# {[1:1076:2845], 0} sends rowCount# 1, bytes# 32, quota rows left# 1000, quota bytes left# 5242848, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-05-07T08:49:40.077207Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2171: 72075186224037889 read iterator# {[1:1076:2845], 0} finished in read 2025-05-07T08:49:40.077348Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037889 is Executed 2025-05-07T08:49:40.077388Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037889 executing on unit ExecuteRead 2025-05-07T08:49:40.077416Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:5] at 72075186224037889 to execution unit CompletedOperations 2025-05-07T08:49:40.077443Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037889 on unit CompletedOperations 2025-05-07T08:49:40.077491Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037889 is Executed 2025-05-07T08:49:40.077515Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037889 executing on unit CompletedOperations 2025-05-07T08:49:40.077558Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:5] at 72075186224037889 has finished 2025-05-07T08:49:40.077590Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2670: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037889 2025-05-07T08:49:40.077682Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2719: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037889 2025-05-07T08:49:40.077955Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 275709965, Sender [1:61:2108], Recipient [1:750:2628]: NKikimrLongTxService.TEvLockStatus LockId: 281474976715665 LockNode: 1 Status: STATUS_SUBSCRIBED 2025-05-07T08:49:40.079266Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269553219, Sender [1:1074:2844], Recipient [1:665:2569]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-05-07T08:49:40.079342Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3392: 72075186224037888 ReadCancel: { ReadId: 0 } 2025-05-07T08:49:40.081362Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269553219, Sender [1:1076:2845], Recipient [1:750:2628]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-05-07T08:49:40.081443Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3392: 72075186224037889 ReadCancel: { ReadId: 0 } { items { uint32_value: 1 } items { uint32_value: 1 } }, { items { uint32_value: 2 } items { uint32_value: 2 } } |88.8%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest >> TSchemeShardTest::AlterIndexTableDirectly [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_incremental_restore_scan/unittest >> IncrementalRestoreScan::ChangeSenderEmpty [GOOD] Test command err: 2025-05-07T08:49:36.195389Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:49:36.216817Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:49:36.217290Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004888/r3tmp/tmp1L9n4i/pdisk_1.dat 2025-05-07T08:49:38.695231Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 1 TabletId: 72057594046644480 , at schemeshard: 72057594046644480 2025-05-07T08:49:38.695555Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:49:38.696904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-05-07T08:49:38.703372Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-05-07T08:49:38.703536Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:49:38.717361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-05-07T08:49:38.768684Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-05-07T08:49:38.769090Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:49:38.769168Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-05-07T08:49:38.769213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:49:38.769254Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:49:38.770085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:49:38.770150Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046644480 2025-05-07T08:49:38.770196Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:49:38.782072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:49:38.782148Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:49:38.782204Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046644480 2025-05-07T08:49:38.782255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:49:38.786171Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:49:38.786896Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:49:38.795721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 2025-05-07T08:49:38.797078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 1, at schemeshard: 72057594046644480 2025-05-07T08:49:38.797140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 1, ready parts: 0/1, is published: true 2025-05-07T08:49:38.797180Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 1, at schemeshard: 72057594046644480 2025-05-07T08:49:38.913273Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7610: Got new config: QueryServiceConfig { AvailableExternalDataSources: "ObjectStorage" } 2025-05-07T08:49:38.913348Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:38.914047Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# ObjectStorage 2025-05-07T08:49:39.004405Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:59:2106] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-05-07T08:49:39.005370Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-05-07T08:49:39.005675Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:39.011844Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:39.034757Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:49:39.167338Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 500, transactions count in step: 1, at schemeshard: 72057594046644480 2025-05-07T08:49:39.167607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 AckTo { RawX1: 0 RawX2: 0 } } Step: 500 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-05-07T08:49:39.167698Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046644480 2025-05-07T08:49:39.168002Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:49:39.168060Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046644480 2025-05-07T08:49:39.168278Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-05-07T08:49:39.168367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 1], at schemeshard: 72057594046644480 2025-05-07T08:49:39.169643Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-05-07T08:49:39.169704Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 1, path id: [OwnerId: 72057594046644480, LocalPathId: 1] 2025-05-07T08:49:39.169886Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-05-07T08:49:39.169926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:573:2500], at schemeshard: 72057594046644480, txId: 1, path id: 1 2025-05-07T08:49:39.170312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:49:39.170369Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046644480] TDone opId# 1:0 ProgressState 2025-05-07T08:49:39.170480Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-05-07T08:49:39.170520Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:49:39.170562Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-05-07T08:49:39.170602Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:49:39.170656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-05-07T08:49:39.170728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:49:39.170795Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1:0 2025-05-07T08:49:39.170855Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 1:0 2025-05-07T08:49:39.170955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 2 2025-05-07T08:49:39.171007Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1, publications: 1, subscribers: 1 2025-05-07T08:49:39.171044Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1, [OwnerId: 72057594046644480, LocalPathId: 1], 3 2025-05-07T08:49:39.176002Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046644480, cookie: 1 2025-05-07T08:49:39.176156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046644480, cookie: 1 2025-05-07T08:49:39.176211Z node 1 :FLAT_TX_SCHEMES ... veACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } ColumnFamilies { Id: 0 Name: "default" } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046644480 2025-05-07T08:49:40.417581Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:59:2106] Handle TEvNavigate describe path /Root/IncrBackupTable 2025-05-07T08:49:40.417691Z node 1 :TX_PROXY DEBUG: describe.cpp:227: Actor# [1:819:2671] HANDLE EvNavigateScheme /Root/IncrBackupTable 2025-05-07T08:49:40.418184Z node 1 :TX_PROXY DEBUG: describe.cpp:311: Actor# [1:819:2671] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-05-07T08:49:40.418275Z node 1 :TX_PROXY DEBUG: describe.cpp:389: Actor# [1:819:2671] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "/Root/IncrBackupTable" 2025-05-07T08:49:40.419326Z node 1 :TX_PROXY DEBUG: describe.cpp:402: Actor# [1:819:2671] Handle TEvDescribeSchemeResult Forward to# [1:594:2519] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/IncrBackupTable" PathDescription { Self { Name: "IncrBackupTable" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1500 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "IncrBackupTable" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "__ydb_incrBackupImpl_deleted" Type: "Bool" TypeId: 6 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } ColumnFamilies { Id: 0 Name: "default" } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72057594046644480 2025-05-07T08:49:40.433584Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.h:65: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:821:2673] HandleUserTable TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/IncrBackupTable TableId: [72057594046644480:3:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-05-07T08:49:40.447415Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.h:131: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:821:2673] HandleTargetTable TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table TableId: [72057594046644480:2:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-05-07T08:49:40.447892Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.h:227: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:821:2673] HandleKeys TEvTxProxySchemeCache::TEvResolveKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-05-07T08:49:40.448076Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_incr_restore.cpp:176: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:821:2673] Handle NKikimr::NDataShard::TEvIncrementalRestoreScan::TEvNoMoreData >> TDataShardTrace::TestTraceDistributedUpsert+UseSink [GOOD] >> TDataShardTrace::TestTraceDistributedUpsert-UseSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_incremental_restore_scan/unittest >> IncrementalRestoreScan::ChangeSenderSimple [GOOD] Test command err: 2025-05-07T08:49:36.195373Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:49:36.216803Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:49:36.217330Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004873/r3tmp/tmpwbXVpb/pdisk_1.dat 2025-05-07T08:49:38.672624Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 1 TabletId: 72057594046644480 , at schemeshard: 72057594046644480 2025-05-07T08:49:38.692769Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:49:38.696928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-05-07T08:49:38.703363Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-05-07T08:49:38.703492Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:49:38.717352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-05-07T08:49:38.768182Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-05-07T08:49:38.768577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:49:38.768657Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-05-07T08:49:38.768701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:49:38.768740Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:49:38.769598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:49:38.769641Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046644480 2025-05-07T08:49:38.769678Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:49:38.782085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:49:38.782157Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:49:38.782216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046644480 2025-05-07T08:49:38.782268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:49:38.786518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:49:38.787228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:49:38.795734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 2025-05-07T08:49:38.797085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 1, at schemeshard: 72057594046644480 2025-05-07T08:49:38.797139Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 1, ready parts: 0/1, is published: true 2025-05-07T08:49:38.797188Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 1, at schemeshard: 72057594046644480 2025-05-07T08:49:38.913329Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7610: Got new config: QueryServiceConfig { AvailableExternalDataSources: "ObjectStorage" } 2025-05-07T08:49:38.913402Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:38.914226Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# ObjectStorage 2025-05-07T08:49:39.004405Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:59:2106] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-05-07T08:49:39.005367Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-05-07T08:49:39.005670Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:39.011766Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:39.033898Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:49:39.167326Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 500, transactions count in step: 1, at schemeshard: 72057594046644480 2025-05-07T08:49:39.167550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 AckTo { RawX1: 0 RawX2: 0 } } Step: 500 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-05-07T08:49:39.167629Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046644480 2025-05-07T08:49:39.167991Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:49:39.168069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046644480 2025-05-07T08:49:39.168262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-05-07T08:49:39.168361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 1], at schemeshard: 72057594046644480 2025-05-07T08:49:39.169638Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-05-07T08:49:39.169705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 1, path id: [OwnerId: 72057594046644480, LocalPathId: 1] 2025-05-07T08:49:39.169885Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-05-07T08:49:39.169925Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:573:2500], at schemeshard: 72057594046644480, txId: 1, path id: 1 2025-05-07T08:49:39.170341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:49:39.170396Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046644480] TDone opId# 1:0 ProgressState 2025-05-07T08:49:39.170494Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-05-07T08:49:39.170534Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:49:39.170579Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-05-07T08:49:39.170613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:49:39.170668Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-05-07T08:49:39.170752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:49:39.170799Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1:0 2025-05-07T08:49:39.170858Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 1:0 2025-05-07T08:49:39.170951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 2 2025-05-07T08:49:39.171003Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1, publications: 1, subscribers: 1 2025-05-07T08:49:39.171041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1, [OwnerId: 72057594046644480, LocalPathId: 1], 3 2025-05-07T08:49:39.183917Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046644480, cookie: 1 2025-05-07T08:49:39.184087Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046644480, cookie: 1 2025-05-07T08:49:39.184142Z node 1 :FLAT_TX_SCHEMES ... -07T08:49:40.379048Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715658 ready parts: 1/1 2025-05-07T08:49:40.379094Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715658:0 2025-05-07T08:49:40.379127Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976715658:0 2025-05-07T08:49:40.379280Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 3 2025-05-07T08:49:40.379606Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715658 datashard 72075186224037889 state Ready 2025-05-07T08:49:40.379674Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2025-05-07T08:49:40.380333Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:59:2106] Handle TEvNavigate describe path /Root/IncrBackupTable 2025-05-07T08:49:40.380452Z node 1 :TX_PROXY DEBUG: describe.cpp:227: Actor# [1:807:2665] HANDLE EvNavigateScheme /Root/IncrBackupTable 2025-05-07T08:49:40.382391Z node 1 :TX_PROXY DEBUG: describe.cpp:311: Actor# [1:807:2665] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-05-07T08:49:40.382539Z node 1 :TX_PROXY DEBUG: describe.cpp:389: Actor# [1:807:2665] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "/Root/IncrBackupTable" Options { ShowPrivateTable: true } 2025-05-07T08:49:40.383789Z node 1 :TX_PROXY DEBUG: describe.cpp:402: Actor# [1:807:2665] Handle TEvDescribeSchemeResult Forward to# [1:594:2519] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/IncrBackupTable" PathDescription { Self { Name: "IncrBackupTable" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1500 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "IncrBackupTable" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "__ydb_incrBackupImpl_deleted" Type: "Bool" TypeId: 6 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } ColumnFamilies { Id: 0 Name: "default" } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72057594046644480 2025-05-07T08:49:40.385006Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:817:2669], serverId# [1:818:2670], sessionId# [0:0:0] 2025-05-07T08:49:40.430894Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.h:65: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:819:2671] HandleUserTable TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/IncrBackupTable TableId: [72057594046644480:3:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-05-07T08:49:40.447359Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.h:131: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:819:2671] HandleTargetTable TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table TableId: [72057594046644480:2:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-05-07T08:49:40.447815Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.h:227: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:819:2671] HandleKeys TEvTxProxySchemeCache::TEvResolveKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-05-07T08:49:40.448125Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_incr_restore.cpp:139: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:819:2671] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvEnqueueRecords { Records [{ Order: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 2] BodySize: 18 }] } 2025-05-07T08:49:40.448290Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_incr_restore.cpp:144: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:819:2671] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 0 Group: 0 Step: 0 TxId: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 2] Kind: IncrementalRestore Source: InitialScan Body: 18b TableId: [OwnerId: 72057594046644480, LocalPathId: 3] SchemaVersion: 0 LockId: 0 LockOffset: 0 }] } 2025-05-07T08:49:40.448558Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:392: actor# [1:59:2106] Handle TEvGetProxyServicesRequest 2025-05-07T08:49:40.448648Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:40: [TableChangeSenderShard][0:0][72075186224037888][1:823:2671] Handle NKikimr::TEvTxUserProxy::TEvGetProxyServicesResponse 2025-05-07T08:49:40.470409Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:824:2675], serverId# [1:825:2676], sessionId# [0:0:0] 2025-05-07T08:49:40.524676Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:78: [TableChangeSenderShard][0:0][72075186224037888][1:823:2671] Handshake NKikimrChangeExchange.TEvStatus Status: STATUS_OK LastRecordOrder: 0 2025-05-07T08:49:40.524831Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_incr_restore.cpp:154: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:819:2671] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-05-07T08:49:40.542538Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:123: [TableChangeSenderShard][0:0][72075186224037888][1:823:2671] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 0 Group: 0 Step: 0 TxId: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 2] Kind: IncrementalRestore Source: InitialScan Body: 18b TableId: [OwnerId: 72057594046644480, LocalPathId: 3] SchemaVersion: 0 LockId: 0 LockOffset: 0 }] } 2025-05-07T08:49:40.542676Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_incr_restore.cpp:154: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:819:2671] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-05-07T08:49:40.542873Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_incr_restore.cpp:176: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:819:2671] Handle NKikimr::NDataShard::TEvIncrementalRestoreScan::TEvNoMoreData >> KqpSystemView::PartitionStatsRanges >> TTopicWriterTests::TestEnterMessage_OnlyDelimiters [GOOD] >> TTopicWriterTests::TestEnterMessage_SomeBinaryData [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/viewer/ut/unittest >> Viewer::JsonAutocompleteColumns [GOOD] Test command err: 2025-05-07T08:49:38.498807Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:337:2380], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:49:38.499137Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:49:38.499182Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] TServer::EnableGrpc on GrpcPort 9528, node 1 TClient is connected to server localhost:26412 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_olap/unittest >> TOlapNaming::AlterColumnTableFailed [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:49:25.612537Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:49:25.612616Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:49:25.612653Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:49:25.612688Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:49:25.612731Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:49:25.612754Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:49:25.612800Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:49:25.612871Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:49:25.613577Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:49:25.614086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:49:25.685854Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:49:25.685919Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:25.697718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:49:25.697837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:49:25.698018Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:49:25.706018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:49:25.706681Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:49:25.707412Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:49:25.707717Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:49:25.712389Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:49:25.714045Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:49:25.714113Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:49:25.714162Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:49:25.714241Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:49:25.714379Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:49:25.714589Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:49:25.720842Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:49:25.860056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:49:25.860325Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:25.860621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:49:25.860888Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:49:25.860962Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:25.864435Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:49:25.864586Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:49:25.864831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:25.864908Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:49:25.864963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:49:25.865007Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:49:25.867577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:25.867659Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:49:25.867712Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:49:25.870042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:25.870116Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:25.870172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:49:25.870223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:49:25.874330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:49:25.876580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:49:25.876830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:49:25.877841Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:49:25.878018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:49:25.878080Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:49:25.878408Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:49:25.878479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:49:25.878755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:49:25.878905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:49:25.881431Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:49:25.881501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:49:25.881695Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:49:25.881740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 2025-05-07T08:49:40.462123Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:49:40.462197Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:49:40.462281Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:49:40.462344Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:49:40.462434Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:49:40.462502Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:49:40.469033Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:49:40.469225Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:49:40.469376Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:49:40.469481Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:49:40.469576Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:49:40.469690Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:49:40.469763Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:49:40.469854Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:49:40.473193Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:49:40.473391Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:49:40.473525Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:49:40.473589Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:49:40.473680Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:49:40.473770Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:49:40.473840Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:49:40.473931Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:49:40.476000Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:49:40.476276Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:49:40.476370Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:49:40.476576Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:49:40.476687Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:49:40.476747Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:49:40.476827Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:49:40.476904Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:49:40.476966Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:49:40.477075Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:49:40.477128Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 101:0 ProgressState 2025-05-07T08:49:40.477265Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T08:49:40.477309Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T08:49:40.477360Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T08:49:40.477398Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T08:49:40.477443Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: true 2025-05-07T08:49:40.477533Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:2774:4039] message: TxId: 101 2025-05-07T08:49:40.477589Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T08:49:40.477677Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:0 2025-05-07T08:49:40.477729Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 101:0 2025-05-07T08:49:40.479034Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 66 2025-05-07T08:49:40.482877Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-05-07T08:49:40.482946Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [2:2775:4040] TestWaitNotification: OK eventTxId 101 TestModificationResults wait txId: 102 2025-05-07T08:49:40.486520Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterColumnTable AlterColumnTable { Name: "TestTable" AlterSchema { AddColumns { Name: "New Column" Type: "Int32" } } } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:49:40.486754Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: alter_table.cpp:282: TAlterColumnTable Propose, path: /MyRoot/TestTable, opId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:49:40.487021Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 102:1, propose status:StatusSchemeError, reason: update parse error: Invalid name for column 'New Column'. in alter constructor STANDALONE_UPDATE, at schemeshard: 72057594046678944 2025-05-07T08:49:40.489827Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 102, response: Status: StatusSchemeError Reason: "update parse error: Invalid name for column \'New Column\'. in alter constructor STANDALONE_UPDATE" TxId: 102 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:49:40.490036Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusSchemeError, reason: update parse error: Invalid name for column 'New Column'. in alter constructor STANDALONE_UPDATE, operation: ALTER COLUMN TABLE, path: /MyRoot/TestTable TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-05-07T08:49:40.490370Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-05-07T08:49:40.490417Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-05-07T08:49:40.490890Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-05-07T08:49:40.491020Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T08:49:40.491063Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [2:3584:4778] TestWaitNotification: OK eventTxId 102 >> KqpSinkMvcc::OltpMultiSinks [GOOD] >> DataShardOutOfOrder::TestOutOfOrderNonConflictingWrites+EvWrite [GOOD] >> DataShardOutOfOrder::TestOutOfOrderNonConflictingWrites-EvWrite >> DataShardOutOfOrder::TestPlannedHalfOverloadedSplit-UseSink [GOOD] |88.8%| [TM] {asan, default-linux-x86_64, release} ydb/public/lib/ydb_cli/topic/ut/unittest >> TTopicWriterTests::TestEnterMessage_SomeBinaryData [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_trace/unittest >> TDataShardTrace::TestTraceDistributedUpsert-UseSink [GOOD] Test command err: 2025-05-07T08:49:31.656795Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:49:31.668725Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:49:31.669166Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001d92/r3tmp/tmp8NiVHP/pdisk_1.dat 2025-05-07T08:49:34.453774Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2175} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.243670s 2025-05-07T08:49:34.453904Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:666} StateWork event processing took too much time Type# 2146435078 Duration# 0.243827s 2025-05-07T08:49:34.506391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:49:34.703718Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:34.836643Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:34.836828Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:34.850519Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:49:35.226529Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:49:38.519325Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:927:2769], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:38.519467Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:938:2774], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:38.536739Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:38.587279Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-05-07T08:49:38.649380Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-05-07T08:49:38.825753Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:941:2777], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-05-07T08:49:38.917470Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:1003:2819] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:49:41.622845Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jtmyzap28s1zeg0279dhrrs9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWM4MWExNTYtYmEwNjcwMjctZWIyN2U3OGItMjlmOTM5OWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Trace: (Session.query.QUERY_ACTION_EXECUTE -> [(CompileService -> [(CompileActor)]) , (LiteralExecuter) , (DataExecuter -> [(WaitForTableResolve) , (RunTasks) , (Datashard.Transaction -> [(Tablet.Transaction -> [(Tablet.Transaction.Execute -> [(Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit)]) , (Tablet.Transaction.Complete)]) , (Datashard.SendWithConfirmedReadOnlyLease) , (Tablet.Transaction -> [(Tablet.Transaction.Execute -> [(Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit)]) , (Tablet.WriteLog -> [(Tablet.WriteLog.LogEntry)]) , (Tablet.Transaction.Complete)]) , (Datashard.SendResult)]) , (Datashard.Transaction -> [(Tablet.Transaction -> [(Tablet.Transaction.Execute -> [(Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit)]) , (Tablet.Transaction.Complete)]) , (Datashard.SendWithConfirmedReadOnlyLease) , (Tablet.Transaction -> [(Tablet.Transaction.Execute -> [(Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit)]) , (Tablet.WriteLog -> [(Tablet.WriteLog.LogEntry)]) , (Tablet.Transaction.Complete)]) , (Datashard.SendResult)])])]) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_trace/unittest >> TDataShardTrace::TestTraceDistributedUpsert+UseSink [GOOD] Test command err: 2025-05-07T08:49:31.657563Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:49:31.669110Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:49:31.669494Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001db4/r3tmp/tmp98Y0rS/pdisk_1.dat 2025-05-07T08:49:34.455252Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2175} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.247472s 2025-05-07T08:49:34.455374Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:666} StateWork event processing took too much time Type# 2146435078 Duration# 0.247631s 2025-05-07T08:49:34.504078Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:49:34.704135Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:34.837399Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:34.837542Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:34.850916Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:49:35.227946Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:49:38.519457Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:927:2769], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:38.519591Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:938:2774], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:38.537015Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:38.583610Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-05-07T08:49:38.649464Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-05-07T08:49:38.820762Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:941:2777], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-05-07T08:49:38.917528Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:1003:2819] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:49:41.498406Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jtmyzap217wdphtn94at7jwy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmMzYzRhODUtODY0ODk3Y2YtZjEzMjFiYTEtYTE3YmM0MDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Trace: (Session.query.QUERY_ACTION_EXECUTE -> [(CompileService -> [(CompileActor)]) , (DataExecuter -> [(WaitForTableResolve) , (ComputeActor -> [(ForwardWriteActor)]) , (RunTasks) , (Commit -> [(Datashard.WriteTransaction -> [(Tablet.Transaction -> [(Tablet.Transaction.Execute -> [(Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit)]) , (Tablet.Transaction.Complete)]) , (Datashard.SendWithConfirmedReadOnlyLease) , (Tablet.Transaction -> [(Tablet.Transaction.Execute -> [(Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit)]) , (Tablet.WriteLog -> [(Tablet.WriteLog.LogEntry)]) , (Tablet.Transaction.Complete)]) , (Datashard.SendWriteResult)]) , (Datashard.WriteTransaction -> [(Tablet.Transaction -> [(Tablet.Transaction.Execute -> [(Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit)]) , (Tablet.Transaction.Complete)]) , (Datashard.SendWithConfirmedReadOnlyLease) , (Tablet.Transaction -> [(Tablet.Transaction.Execute -> [(Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit)]) , (Tablet.WriteLog -> [(Tablet.WriteLog.LogEntry)]) , (Tablet.Transaction.Complete)]) , (Datashard.SendWriteResult)])])])]) >> TDataShardTrace::TestTraceDistributedSelectViaReadActors [GOOD] >> KqpSystemView::PartitionStatsRange3 |88.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::Join ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_base/unittest >> TSchemeShardTest::AlterIndexTableDirectly [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:47:20.212001Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:47:20.212100Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:20.212159Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:47:20.212193Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:47:20.212236Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:47:20.212267Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:47:20.212341Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:20.212434Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:47:20.213120Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:47:20.213477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:47:20.440402Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:47:20.440475Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:47:20.467318Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:47:20.467437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:47:20.467598Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:47:20.489006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:47:20.489654Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:47:20.490312Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:20.490642Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:47:20.492965Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:20.494435Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:20.494496Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:20.494551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:47:20.494604Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:20.494706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:47:20.494897Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:47:20.501350Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:47:20.660618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:47:20.660849Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:20.661069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:47:20.661323Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:47:20.661410Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:20.666418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:20.666611Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:47:20.666850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:20.666924Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:47:20.666972Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:47:20.667006Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:47:20.669808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:20.669884Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:47:20.669939Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:47:20.672629Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:20.672685Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:20.672760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:20.672808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:47:20.692266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:47:20.695769Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:47:20.695957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:47:20.696903Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:20.697017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:20.697068Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:20.697334Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:47:20.697395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:20.697574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:47:20.697643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:47:20.703614Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:20.703666Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:20.703898Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:20.703951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... refix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409552 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 3 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 1592 DataSize: 1592 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } StoragePoolsUsage { PoolKind: "pool-kind-1" TotalSize: 0 DataSize: 0 IndexSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:49:41.365298Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/table/indexByValue" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-05-07T08:49:41.365783Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/table/indexByValue" took 518us result status StatusSuccess 2025-05-07T08:49:41.369381Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/table/indexByValue" PathDescription { Self { Name: "indexByValue" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 3 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateAlter Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 3 TablePartitionVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 1592 DataSize: 1592 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } StoragePoolsUsage { PoolKind: "pool-kind-1" TotalSize: 0 DataSize: 0 IndexSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } TableIndex { Name: "indexByValue" LocalPathId: 3 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "value" SchemaVersion: 3 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 100500 MinPartitionsCount: 1 FastSplitSettings { SizeThreshold: 100500 RowCountThreshold: 100500 } } } } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:49:41.371556Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/table/indexByValue/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-05-07T08:49:41.372028Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/table/indexByValue/indexImplTable" took 490us result status StatusSuccess 2025-05-07T08:49:41.373153Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/table/indexByValue/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 8 PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 8 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 3 TablePartitionVersion: 4 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 100500 MinPartitionsCount: 1 FastSplitSettings { SizeThreshold: 100500 RowCountThreshold: 100500 } } } TableSchemaVersion: 3 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409552 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 3 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 1592 DataSize: 1592 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } StoragePoolsUsage { PoolKind: "pool-kind-1" TotalSize: 0 DataSize: 0 IndexSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TDataShardTrace::TestTraceDistributedSelect [GOOD] >> KqpSysColV0::SelectRowById |88.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSinkMvcc::OltpMultiSinks [GOOD] Test command err: Trying to start YDB, gRPC: 11671, MsgBus: 12173 2025-05-07T08:49:15.999334Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623189668163952:2061];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:15.999391Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00370b/r3tmp/tmpqv2N77/pdisk_1.dat 2025-05-07T08:49:16.531135Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:16.548667Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:16.548755Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:16.551326Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11671, node 1 2025-05-07T08:49:16.649800Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:16.649827Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:16.649839Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:16.650218Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12173 TClient is connected to server localhost:12173 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:49:17.283135Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:19.490891Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623206848033799:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:19.490897Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623206848033808:2333], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:19.491012Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:19.494477Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480 2025-05-07T08:49:19.503394Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623206848033813:2334], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-05-07T08:49:19.572970Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623206848033864:2334] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:49:19.945214Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 2025-05-07T08:49:20.110204Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 2025-05-07T08:49:21.807460Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623189668163952:2061];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:21.847690Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:49:22.325671Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 Trying to start YDB, gRPC: 5465, MsgBus: 28685 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00370b/r3tmp/tmpcxrxwU/pdisk_1.dat 2025-05-07T08:49:30.470865Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:49:30.536830Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5465, node 2 2025-05-07T08:49:30.568515Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:30.568604Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:30.571856Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:49:30.618037Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:30.618062Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:30.618072Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:30.618207Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28685 TClient is connected to server localhost:28685 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:49:31.061671Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:31.069093Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T08:49:33.703167Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501623264249947154:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:33.703251Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501623264249947162:2333], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:33.703330Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:33.707647Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T08:49:33.716867Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7501623264249947168:2334], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T08:49:33.783365Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501623264249947219:2333] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:49:33.831553Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-05-07T08:49:33.866360Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 2025-05-07T08:49:35.015264Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 >> TCacheTest::MigrationDeletedPathNavigate [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::TestPlannedHalfOverloadedSplit-UseSink [GOOD] Test command err: 2025-05-07T08:49:33.284123Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:49:33.284280Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:49:33.284528Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004515/r3tmp/tmpQAkyum/pdisk_1.dat 2025-05-07T08:49:33.682266Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:49:33.730030Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:33.780685Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:33.780822Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:33.792298Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:49:33.873108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:49:33.911692Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828672, Sender [1:656:2563], Recipient [1:665:2569]: NKikimr::TEvTablet::TEvBoot 2025-05-07T08:49:33.912813Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828673, Sender [1:656:2563], Recipient [1:665:2569]: NKikimr::TEvTablet::TEvRestored 2025-05-07T08:49:33.913294Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:665:2569] 2025-05-07T08:49:33.913562Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T08:49:33.923727Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3111: StateInactive, received event# 268828684, Sender [1:656:2563], Recipient [1:665:2569]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-05-07T08:49:33.962974Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T08:49:33.963099Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T08:49:33.964856Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-05-07T08:49:33.964963Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-05-07T08:49:33.965044Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-05-07T08:49:33.965438Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T08:49:33.965590Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T08:49:33.965672Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:681:2569] in generation 1 2025-05-07T08:49:33.976413Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T08:49:34.002615Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-05-07T08:49:34.002836Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T08:49:34.002951Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:683:2579] 2025-05-07T08:49:34.002986Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T08:49:34.003022Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-05-07T08:49:34.003055Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:49:34.003258Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 2146435072, Sender [1:665:2569], Recipient [1:665:2569]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-05-07T08:49:34.003314Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3155: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-05-07T08:49:34.003615Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T08:49:34.003700Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T08:49:34.003821Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T08:49:34.003869Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:49:34.003920Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-05-07T08:49:34.003956Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-05-07T08:49:34.004004Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-05-07T08:49:34.004037Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T08:49:34.004080Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:49:34.004195Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877761, Sender [1:672:2573], Recipient [1:665:2569]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:49:34.004225Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:49:34.004270Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:672:2573], sessionId# [0:0:0] 2025-05-07T08:49:34.004621Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269549568, Sender [1:410:2405], Recipient [1:672:2573] 2025-05-07T08:49:34.004668Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3136: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-05-07T08:49:34.004801Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T08:49:34.005022Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-05-07T08:49:34.005075Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-05-07T08:49:34.005188Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-05-07T08:49:34.005229Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-05-07T08:49:34.005268Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-05-07T08:49:34.005302Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-05-07T08:49:34.005338Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-05-07T08:49:34.005611Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-05-07T08:49:34.005651Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-05-07T08:49:34.005695Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-05-07T08:49:34.005730Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-05-07T08:49:34.005775Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-05-07T08:49:34.005820Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-05-07T08:49:34.005853Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-05-07T08:49:34.005901Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-05-07T08:49:34.005935Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-05-07T08:49:34.007384Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269746185, Sender [1:684:2580], Recipient [1:665:2569]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-05-07T08:49:34.007436Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:49:34.018290Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T08:49:34.018365Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-05-07T08:49:34.018405Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-05-07T08:49:34.018469Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose latency: 0 ms, status: PREPARED 2025-05-07T08:49:34.018551Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-05-07T08:49:34.170247Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877761, Sender [1:699:2589], Recipient [1:665:2569]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:49:34.170431Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:49:34.170475Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075 ... E: datashard_impl.h:3130: StateWork, received event# 270270976, Sender [2:24:2071], Recipient [2:1012:2808]: {TEvRegisterTabletResult TabletId# 72075186224037892 Entry# 2000} 2025-05-07T08:49:41.826735Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvMediatorTimecast::TEvRegisterTabletResult 2025-05-07T08:49:41.826782Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037892 time 2000 2025-05-07T08:49:41.826850Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037892 2025-05-07T08:49:41.826918Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037892 2025-05-07T08:49:41.826964Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037892 active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:49:41.827031Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037892 2025-05-07T08:49:41.827075Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037892 has no attached operations 2025-05-07T08:49:41.827117Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037892 2025-05-07T08:49:41.827158Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037892 TxInFly 0 2025-05-07T08:49:41.827213Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037892 2025-05-07T08:49:41.827406Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877764, Sender [2:1167:2919], Recipient [2:1012:2808]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:49:41.827446Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3167: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:49:41.827543Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037892, clientId# [2:1165:2917], serverId# [2:1167:2919], sessionId# [0:0:0] 2025-05-07T08:49:41.827959Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 270270978, Sender [2:24:2071], Recipient [2:1012:2808]: NKikimr::TEvMediatorTimecast::TEvSubscribeReadStepResult{ CoordinatorId# 72057594046316545 LastReadStep# 0 NextReadStep# 2000 ReadStep# 2000 } 2025-05-07T08:49:41.828017Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3169: StateWork, processing event TEvMediatorTimecast::TEvSubscribeReadStepResult 2025-05-07T08:49:41.828075Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037892 coordinator 72057594046316545 last step 0 next step 2000 2025-05-07T08:49:41.828192Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2812: CheckMediatorStateRestored at 72075186224037892: waitStep# 2000 readStep# 2000 observedStep# 2000 2025-05-07T08:49:41.828266Z node 2 :TX_DATASHARD TRACE: datashard.cpp:2846: CheckMediatorStateRestored at 72075186224037892 promoting UnprotectedReadEdge to v2000/18446744073709551615 2025-05-07T08:49:41.839376Z node 2 :TX_DATASHARD DEBUG: datashard_split_dst.cpp:304: 72075186224037893 ack snapshot OpId 281474976715665 2025-05-07T08:49:41.839537Z node 2 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state Ready tabletId 72075186224037893 2025-05-07T08:49:41.839674Z node 2 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037893 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-05-07T08:49:41.839763Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037893 2025-05-07T08:49:41.839828Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037893, actorId: [2:1174:2926] 2025-05-07T08:49:41.839862Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037893 2025-05-07T08:49:41.839908Z node 2 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037893 2025-05-07T08:49:41.839943Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037893 2025-05-07T08:49:41.840197Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269553157, Sender [2:1015:2810], Recipient [2:750:2628]: NKikimrTxDataShard.TEvSplitTransferSnapshotAck TabletId: 72075186224037893 OperationCookie: 281474976715665 2025-05-07T08:49:41.840267Z node 2 :TX_DATASHARD DEBUG: datashard_split_src.cpp:461: 72075186224037889 Received snapshot Ack from dst 72075186224037893 for split OpId 281474976715665 2025-05-07T08:49:41.840575Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 2146435072, Sender [2:1015:2810], Recipient [2:1015:2810]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-05-07T08:49:41.840615Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3155: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-05-07T08:49:41.840947Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877763, Sender [2:1166:2918], Recipient [2:750:2628]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72075186224037893 ClientId: [2:1166:2918] ServerId: [2:1168:2920] } 2025-05-07T08:49:41.840988Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3164: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-05-07T08:49:41.841315Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 270270976, Sender [2:24:2071], Recipient [2:1015:2810]: {TEvRegisterTabletResult TabletId# 72075186224037893 Entry# 2000} 2025-05-07T08:49:41.841352Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvMediatorTimecast::TEvRegisterTabletResult 2025-05-07T08:49:41.841387Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037893 time 2000 2025-05-07T08:49:41.841421Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037893 2025-05-07T08:49:41.841554Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037893 2025-05-07T08:49:41.841589Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037893 active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:49:41.841622Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037893 2025-05-07T08:49:41.841652Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037893 has no attached operations 2025-05-07T08:49:41.841683Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037893 2025-05-07T08:49:41.841718Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037893 TxInFly 0 2025-05-07T08:49:41.841759Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037893 2025-05-07T08:49:41.841849Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877764, Sender [2:1168:2920], Recipient [2:1015:2810]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:49:41.841885Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3167: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:49:41.841925Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037893, clientId# [2:1166:2918], serverId# [2:1168:2920], sessionId# [0:0:0] 2025-05-07T08:49:41.843135Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 270270978, Sender [2:24:2071], Recipient [2:1015:2810]: NKikimr::TEvMediatorTimecast::TEvSubscribeReadStepResult{ CoordinatorId# 72057594046316545 LastReadStep# 0 NextReadStep# 2000 ReadStep# 2000 } 2025-05-07T08:49:41.843175Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3169: StateWork, processing event TEvMediatorTimecast::TEvSubscribeReadStepResult 2025-05-07T08:49:41.843202Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037893 coordinator 72057594046316545 last step 0 next step 2000 2025-05-07T08:49:41.843237Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2812: CheckMediatorStateRestored at 72075186224037893: waitStep# 2000 readStep# 2000 observedStep# 2000 2025-05-07T08:49:41.843280Z node 2 :TX_DATASHARD TRACE: datashard.cpp:2846: CheckMediatorStateRestored at 72075186224037893 promoting UnprotectedReadEdge to v2000/18446744073709551615 2025-05-07T08:49:41.854158Z node 2 :TX_DATASHARD DEBUG: datashard_split_src.cpp:485: 72075186224037889 ack split to schemeshard 281474976715665 2025-05-07T08:49:41.857476Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269553158, Sender [2:409:2404], Recipient [2:755:2630] 2025-05-07T08:49:41.857579Z node 2 :TX_DATASHARD DEBUG: datashard_split_src.cpp:565: Got TEvSplitPartitioningChanged: opId: 281474976715665, at datashard: 72075186224037889, state: SplitSrcWaitForPartitioningChanged 2025-05-07T08:49:41.860816Z node 2 :TX_DATASHARD DEBUG: datashard_split_src.cpp:532: 72075186224037889 ack split partitioning changed to schemeshard 281474976715665 2025-05-07T08:49:41.860913Z node 2 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037889 in PreOffline state HasSharedBobs: 1 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-05-07T08:49:41.861038Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 268828683, Sender [2:742:2623], Recipient [2:750:2628]: NKikimr::TEvTablet::TEvFollowerGcApplied 2025-05-07T08:49:42.463806Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269549568, Sender [2:962:2670], Recipient [2:664:2568]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_DATA SourceDeprecated { RawX1: 962 RawX2: 8589937262 } TxBody: " \0008\000`\200\200\200\005j\324\006\010\001\022\225\006\010\001\022\024\n\022\t\302\003\000\000\000\000\000\000\021n\n\000\000\002\000\000\000\032\256\002\010\240\215\006\022\207\002\037\002\022KqpEffect\005\205\006\213\000\205\002\206\205\004\207\203\004?\004\014key\024valueh%kqp%tx_result_binding_0_1\204\214\002\030Inputs(Parameters\034Program\013?\000)\251\000?\n\014Arg\000\002)\211\002?\016\204\214\002(KqpEffects\000)\211\010?\032\213\010\203\010\203\010\203\005@\203\010\204?\006\210\203\004\203\004\203\0144KqpUpsertRows\000\013?&\003?\036\177\000\001\205\000\000\000\000\001\003? \004\003?\"\000\003?$\002\017)\211\002?(?\010 Iterator\000)\211\004?\010?\n\203\004\030Member\000?\026\003?@\000\002\004\000\006\010\002?.\003\203\004\004\003\203\004\002\003\003?0\000\r\010\000\n\001/\032\0369\000\000\000\000\000\000\000@i\000\000\000\000\000\000\360?q\000\000\000\000\ 2025-05-07T08:49:42.463940Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3136: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-05-07T08:49:42.464085Z node 2 :TX_DATASHARD NOTICE: datashard.cpp:3097: Rejecting data TxId 281474976715663 because datashard 72075186224037888: is in a pre/offline state assuming this is due to a finished split (wrong shard state) 2025-05-07T08:49:42.464654Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715664, at schemeshard: 72057594046644480 2025-05-07T08:49:42.465717Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715665, at schemeshard: 72057594046644480 >> KqpSysColV1::SelectRowAsterisk >> TStorageTenantTest::CreateTableInsideSubDomain [GOOD] |88.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> TStorageTenantTest::LsLs [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_trace/unittest >> TDataShardTrace::TestTraceDistributedSelectViaReadActors [GOOD] Test command err: 2025-05-07T08:49:31.660131Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:49:31.668794Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:49:31.669110Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001d65/r3tmp/tmpqtS5hF/pdisk_1.dat 2025-05-07T08:49:34.454454Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2175} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.245557s 2025-05-07T08:49:34.454580Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:666} StateWork event processing took too much time Type# 2146435078 Duration# 0.245694s 2025-05-07T08:49:34.513471Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:49:34.704892Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:34.836985Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:34.837246Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:34.849014Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:49:35.224536Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:49:38.519376Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:927:2769], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:38.519483Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:938:2774], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:38.537433Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:38.583623Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-05-07T08:49:38.651965Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-05-07T08:49:38.821366Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:941:2777], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-05-07T08:49:38.917636Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:1003:2819] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:49:41.498571Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jtmyzap2bdrnz1ym6kf48rdv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZGFmM2ZhMS1mNDBlMTg5ZS0zYzk4MTE4NS04YjA1OTExMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:49:41.891975Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jtmyzdx01y5gq8pg0ht4rsqr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTc2MTc0OGItOGMyMjE4MjItN2M3ZTMwNTgtODIzODc0NjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:49:42.960245Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715663. Ctx: { TraceId: 01jtmyze0t82m367bzvysps6nh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODc5ZmFkNDUtNDI2ZDRjZjctMmU5NDFhMDItZWQ1MjljMTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_trace/unittest >> TDataShardTrace::TestTraceDistributedSelect [GOOD] Test command err: 2025-05-07T08:49:31.657895Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:49:31.670289Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:49:31.670625Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001d5b/r3tmp/tmpnf90Sq/pdisk_1.dat 2025-05-07T08:49:34.458424Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2175} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.249158s 2025-05-07T08:49:34.458578Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:666} StateWork event processing took too much time Type# 2146435078 Duration# 0.249331s 2025-05-07T08:49:34.496421Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:49:34.703444Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:34.836643Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:34.836834Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:34.848956Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:49:35.222230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:49:38.521129Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:927:2769], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:38.521246Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:938:2774], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:38.537223Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:38.583582Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-05-07T08:49:38.647769Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-05-07T08:49:38.819812Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:941:2777], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-05-07T08:49:38.917470Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:1003:2819] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:49:41.498140Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jtmyzap26t5v693e2zccjfvw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGVlNDUyMTItOTNmNzhlMjYtZGRhNmM2NDItYWYwMTU4OTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:49:41.859586Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jtmyzdww645w4qthh2dbpyb9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjJjOTY0Y2YtOGJhNGM3YzEtOGY1ZjViOC05NjY1OWYwMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:49:43.344410Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715663. Ctx: { TraceId: 01jtmyzed4fyyae5q5gtvb8b9j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTkxZDk2ZTMtNmYyMDA2N2YtMWYzYzVkNzQtMmM4ZGExZjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_cache/unittest >> TCacheTest::MigrationDeletedPathNavigate [GOOD] Test command err: 2025-05-07T08:49:14.121335Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:49:14.121412Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TestModificationResults wait txId: 1 2025-05-07T08:49:14.346281Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 2025-05-07T08:49:14.359051Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 65543, Sender [1:174:2170], Recipient [1:70:2109]: NActors::TEvents::TEvPoison 2025-05-07T08:49:14.359820Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 Leader for TabletID 72057594046678944 is [1:70:2109] sender: [1:175:2067] recipient: [1:46:2093] Leader for TabletID 72057594046678944 is [1:70:2109] sender: [1:178:2067] recipient: [1:24:2071] Leader for TabletID 72057594046678944 is [1:70:2109] sender: [1:179:2067] recipient: [1:177:2171] Leader for TabletID 72057594046678944 is [1:180:2172] sender: [1:181:2067] recipient: [1:177:2171] 2025-05-07T08:49:14.367013Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4779: StateInit, received event# 268828672, Sender [1:177:2171], Recipient [1:180:2172]: NKikimr::TEvTablet::TEvBoot 2025-05-07T08:49:14.386355Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4779: StateInit, received event# 268828673, Sender [1:177:2171], Recipient [1:180:2172]: NKikimr::TEvTablet::TEvRestored 2025-05-07T08:49:14.386549Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4779: StateInit, received event# 268828684, Sender [1:177:2171], Recipient [1:180:2172]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-05-07T08:49:14.391980Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:49:14.392139Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:49:14.392184Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:49:14.392224Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:49:14.392290Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:49:14.392327Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:49:14.392417Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:49:14.392505Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:49:14.393301Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:49:14.393739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:49:14.409169Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:49:14.410488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:49:14.410667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:49:14.410881Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4779: StateInit, received event# 65542, Sender [1:7238242728502259555:7369577], Recipient [1:180:2172]: TSystem::Undelivered 2025-05-07T08:49:14.410921Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4781: StateInit, processing event TEvents::TEvUndelivered 2025-05-07T08:49:14.410978Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:49:14.411014Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:14.411236Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__root_data_erasure_manager.cpp:92: [RootDataErasureManager] Clear operation queue and active pipes 2025-05-07T08:49:14.411273Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:49:14.412018Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit for Paths, read records: 1, at schemeshard: 72057594046678944 2025-05-07T08:49:14.412140Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1457: TTxInit for UserAttributes, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:49:14.412248Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1483: TTxInit for UserAttributesAlterData, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:49:14.412653Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1785: TTxInit for Tables, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:49:14.412787Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:450: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-05-07T08:49:14.413037Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2011: TTxInit for Columns, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:49:14.413117Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2071: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:49:14.413323Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2129: TTxInit for Shards, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:49:14.413449Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2215: TTxInit for TablePartitions, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:49:14.413520Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2281: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:49:14.413703Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2431: TTxInit for ChannelsBinding, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:49:14.414018Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2810: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:49:14.414137Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2889: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:49:14.414524Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3387: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:49:14.414607Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3423: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:49:14.414783Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3637: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:49:14.414894Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3782: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:49:14.415009Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3799: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:49:14.415206Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3959: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:49:14.415318Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3975: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:49:14.415427Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4260: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:49:14.415683Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4558: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-05-07T08:49:14.415899Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4705: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-05-07T08:49:14.415964Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4732: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-05-07T08:49:14.416028Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4759: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-05-07T08:49:14.416292Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-05-07T08:49:14.417563Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-05-07T08:49:14.417694Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-05-07T08:49:14.418221Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435083, Sender [1:180:2172], Recipient [1:180:2172]: NKikimr::NSchemeShard::TEvPrivate::TEvServerlessStorageBilling 2025-05-07T08:49:14.418272Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4903: StateWork, processing event TEvPrivate::TEvServerlessStorageBilling 2025-05-07T08:49:14.418960Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:49:14.419019Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:49:14.419133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:49:14.419190Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:49:14.419232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:49:14.419263Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-05-07T08:49:14.419570Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 274399233, Sender [1:196:2172], Recipient [1:180:2172]: NKikimr::TEvTxAllocatorClient::TEvAllocateResult 2025-05-07T08:49:14.419610Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4993: StateWork, processing event TEvTxA ... inPathId: 2 TabletID: 72075186233409549 Generation: 2 EffectiveACLVersion: 0 SubdomainVersion: 3 UserAttributesVersion: 1 TenantHive: 18446744073709551615 TenantSysViewProcessor: 18446744073709551615 TenantRootACL: "" TenantStatisticsAggregator: 18446744073709551615 TenantGraphShard: 18446744073709551615 2025-05-07T08:49:15.175270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:26: TTxSyncTenant DoExecute, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T08:49:15.175328Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-05-07T08:49:15.175404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:567: DoUpdateTenant no hasChanges, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], tenantLink: TSubDomainsLinks::TLink { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2], Generation: 2, ActorId:[1:424:2337], EffectiveACLVersion: 0, SubdomainVersion: 3, UserAttributesVersion: 1, TenantHive: 18446744073709551615, TenantSysViewProcessor: 18446744073709551615, TenantStatisticsAggregator: 18446744073709551615, TenantGraphShard: 18446744073709551615, TenantRootACL: }, subDomain->GetVersion(): 3, actualEffectiveACLVersion: 0, actualUserAttrsVersion: 1, tenantHive: 18446744073709551615, tenantSysViewProcessor: 18446744073709551615, at schemeshard: 72057594046678944 2025-05-07T08:49:15.175470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:36: TTxSyncTenant DoComplete, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T08:49:15.175498Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 { Path: Root/USER_0/DirA TableId: [72057594046678944:3:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] Params { Version: 3 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 SchemeShard: 72075186233409549 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-05-07T08:49:15.447216Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:49:15.447280Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TestModificationResults wait txId: 1 2025-05-07T08:49:15.502084Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 Leader for TabletID 72057594046678944 is [2:70:2109] sender: [2:175:2067] recipient: [2:46:2093] Leader for TabletID 72057594046678944 is [2:70:2109] sender: [2:178:2067] recipient: [2:24:2071] Leader for TabletID 72057594046678944 is [2:70:2109] sender: [2:179:2067] recipient: [2:177:2171] Leader for TabletID 72057594046678944 is [2:180:2172] sender: [2:181:2067] recipient: [2:177:2171] 2025-05-07T08:49:15.549613Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:49:15.549667Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TestModificationResults wait txId: 101 Leader for TabletID 72057594046678944 is [2:180:2172] sender: [2:211:2067] recipient: [2:24:2071] 2025-05-07T08:49:15.577783Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 101:0, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 FAKE_COORDINATOR: Erasing txId 101 TestModificationResult got TxId: 101, wait until txId: 101 TestModificationResults wait txId: 102 2025-05-07T08:49:15.585763Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 102:0, at schemeshard: 72057594046678944 Leader for TabletID 72075186233409546 is [0:0:0] sender: [2:247:2067] recipient: [2:238:2213] IGNORE Leader for TabletID 72075186233409546 is [0:0:0] sender: [2:247:2067] recipient: [2:238:2213] Leader for TabletID 72075186233409547 is [0:0:0] sender: [2:248:2067] recipient: [2:241:2215] IGNORE Leader for TabletID 72075186233409547 is [0:0:0] sender: [2:248:2067] recipient: [2:241:2215] Leader for TabletID 72075186233409546 is [0:0:0] sender: [2:251:2067] recipient: [2:24:2071] IGNORE Leader for TabletID 72075186233409546 is [0:0:0] sender: [2:251:2067] recipient: [2:24:2071] Leader for TabletID 72075186233409547 is [0:0:0] sender: [2:252:2067] recipient: [2:24:2071] IGNORE Leader for TabletID 72075186233409547 is [0:0:0] sender: [2:252:2067] recipient: [2:24:2071] Leader for TabletID 72075186233409546 is [2:250:2219] sender: [2:253:2067] recipient: [2:238:2213] Leader for TabletID 72075186233409547 is [2:255:2221] sender: [2:256:2067] recipient: [2:241:2215] TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 101 TestWaitNotification wait txId: 102 2025-05-07T08:49:15.632356Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 101 Leader for TabletID 72075186233409546 is [2:250:2219] sender: [2:289:2067] recipient: [2:24:2071] Leader for TabletID 72075186233409547 is [2:255:2221] sender: [2:290:2067] recipient: [2:24:2071] FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 FAKE_COORDINATOR: Erasing txId 102 TestWaitNotification: OK eventTxId 102 TestModificationResults wait txId: 103 TestModificationResult got TxId: 103, wait until txId: 103 TestModificationResults wait txId: 104 2025-05-07T08:49:15.694104Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 104:0, at schemeshard: 72057594046678944 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 103 TestWaitNotification wait txId: 104 Leader for TabletID 72075186233409548 is [0:0:0] sender: [2:340:2067] recipient: [2:336:2285] IGNORE Leader for TabletID 72075186233409548 is [0:0:0] sender: [2:340:2067] recipient: [2:336:2285] Leader for TabletID 72075186233409548 is [0:0:0] sender: [2:341:2067] recipient: [2:24:2071] IGNORE Leader for TabletID 72075186233409548 is [0:0:0] sender: [2:341:2067] recipient: [2:24:2071] Leader for TabletID 72075186233409548 is [2:343:2289] sender: [2:344:2067] recipient: [2:336:2285] Leader for TabletID 72075186233409548 is [2:343:2289] sender: [2:345:2067] recipient: [2:24:2071] TestWaitNotification: OK eventTxId 103 TestWaitNotification: OK eventTxId 104 TestModificationResults wait txId: 105 2025-05-07T08:49:15.918271Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpUpgradeSubDomain, opId: 105:0, at schemeshard: 72057594046678944 Leader for TabletID 72075186233409549 is [0:0:0] sender: [2:420:2067] recipient: [2:415:2333] IGNORE Leader for TabletID 72075186233409549 is [0:0:0] sender: [2:420:2067] recipient: [2:415:2333] Leader for TabletID 72075186233409549 is [0:0:0] sender: [2:422:2067] recipient: [2:24:2071] IGNORE Leader for TabletID 72075186233409549 is [0:0:0] sender: [2:422:2067] recipient: [2:24:2071] Leader for TabletID 72075186233409549 is [2:423:2337] sender: [2:424:2067] recipient: [2:415:2333] 2025-05-07T08:49:15.980740Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:49:15.980819Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 Leader for TabletID 72075186233409549 is [2:423:2337] sender: [2:451:2067] recipient: [2:24:2071] TestWaitNotification: OK eventTxId 105 TestModificationResults wait txId: 106 2025-05-07T08:49:16.047495Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5351: Mark as Migrated path id [OwnerId: 72057594046678944, LocalPathId: 3] 2025-05-07T08:49:16.047588Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5351: Mark as Migrated path id [OwnerId: 72057594046678944, LocalPathId: 4] 2025-05-07T08:49:16.048027Z node 2 :FLAT_TX_SCHEMESHARD ERROR: schemeshard__operation_upgrade_subdomain.cpp:1464: TWait ProgressState, dependent transaction: 106, parent transaction: 105, at schemeshard: 72057594046678944 2025-05-07T08:49:16.048186Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpUpgradeSubDomainDecision, opId: 106:0, at schemeshard: 72057594046678944 TestModificationResult got TxId: 106, wait until txId: 106 TestWaitNotification wait txId: 106 2025-05-07T08:49:16.071768Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5812: Got TEvUpdateAck for unknown txId 105, at schemeshard: 72057594046678944 2025-05-07T08:49:16.072538Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5812: Got TEvUpdateAck for unknown txId 105, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 106 TestModificationResults wait txId: 107 TestModificationResult got TxId: 107, wait until txId: 107 TestWaitNotification wait txId: 107 TestWaitNotification: OK eventTxId 107 TestModificationResults wait txId: 108 2025-05-07T08:49:16.127092Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 108:0, at schemeshard: 72075186233409549 Leader for TabletID 72075186233409550 is [0:0:0] sender: [2:556:2067] recipient: [2:552:2441] IGNORE Leader for TabletID 72075186233409550 is [0:0:0] sender: [2:556:2067] recipient: [2:552:2441] Leader for TabletID 72075186233409550 is [0:0:0] sender: [2:557:2067] recipient: [2:24:2071] IGNORE Leader for TabletID 72075186233409550 is [0:0:0] sender: [2:557:2067] recipient: [2:24:2071] Leader for TabletID 72075186233409550 is [2:559:2445] sender: [2:560:2067] recipient: [2:552:2441] Leader for TabletID 72075186233409550 is [2:559:2445] sender: [2:561:2067] recipient: [2:24:2071] TestModificationResult got TxId: 108, wait until txId: 108 TestWaitNotification wait txId: 108 Forgetting tablet 72075186233409548 TestWaitNotification: OK eventTxId 108 2025-05-07T08:49:18.650269Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7261: Cannot get console configs 2025-05-07T08:49:18.650346Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:18.726280Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7261: Cannot get console configs 2025-05-07T08:49:18.726346Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded >> KqpSystemView::PartitionStatsRange1 >> KqpSystemView::ReadSuccess >> KqpSysColV1::SelectRange >> TStorageTenantTest::CreateSolomonInsideSubDomain [GOOD] |88.8%| [TA] $(B)/ydb/core/tx/datashard/ut_trace/test-results/unittest/{meta.json ... results_accumulator.log} |88.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::QueryStatsScan ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest >> TStorageTenantTest::CreateTableInsideSubDomain [GOOD] Test command err: 2025-05-07T08:49:38.705460Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623288610457670:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:38.705530Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004982/r3tmp/tmpfBZczF/pdisk_1.dat 2025-05-07T08:49:39.132892Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:39.141068Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:39.141200Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:39.159408Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:9176 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-05-07T08:49:39.346123Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7501623288610457907:2140] Handle TEvNavigate describe path dc-1 2025-05-07T08:49:39.346210Z node 1 :TX_PROXY DEBUG: describe.cpp:227: Actor# [1:7501623292905425640:2442] HANDLE EvNavigateScheme dc-1 2025-05-07T08:49:39.346346Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2663: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7501623288610458000:2190], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-05-07T08:49:39.346419Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2283: Create subscriber: self# [1:7501623288610458000:2190], path# /dc-1, domainOwnerId# 72057594046644480 2025-05-07T08:49:39.346621Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:960: [main][1:7501623292905425641:2443][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-05-07T08:49:39.353783Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7501623288610457548:2051] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7501623292905425645:2443] 2025-05-07T08:49:39.353868Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7501623288610457548:2051] Subscribe: subscriber# [1:7501623292905425645:2443], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-05-07T08:49:39.353955Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7501623288610457554:2057] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7501623292905425647:2443] 2025-05-07T08:49:39.353993Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7501623288610457554:2057] Subscribe: subscriber# [1:7501623292905425647:2443], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-05-07T08:49:39.354062Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7501623292905425645:2443][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7501623288610457548:2051] 2025-05-07T08:49:39.354096Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7501623292905425647:2443][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7501623288610457554:2057] 2025-05-07T08:49:39.354145Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:7501623292905425641:2443][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7501623292905425642:2443] 2025-05-07T08:49:39.354174Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:7501623292905425641:2443][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7501623292905425644:2443] 2025-05-07T08:49:39.354248Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:836: [main][1:7501623292905425641:2443][/dc-1] Set up state: owner# [1:7501623288610458000:2190], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-05-07T08:49:39.354416Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7501623292905425645:2443][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7501623292905425642:2443], cookie# 1 2025-05-07T08:49:39.354449Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7501623292905425646:2443][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7501623292905425643:2443], cookie# 1 2025-05-07T08:49:39.354468Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7501623292905425647:2443][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7501623292905425644:2443], cookie# 1 2025-05-07T08:49:39.358041Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7501623288610457551:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7501623292905425646:2443] 2025-05-07T08:49:39.358114Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7501623288610457551:2054] Subscribe: subscriber# [1:7501623292905425646:2443], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-05-07T08:49:39.358212Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7501623288610457551:2054] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7501623292905425646:2443], cookie# 1 2025-05-07T08:49:39.358270Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7501623288610457548:2051] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7501623292905425645:2443] 2025-05-07T08:49:39.358294Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7501623288610457548:2051] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7501623292905425645:2443], cookie# 1 2025-05-07T08:49:39.358320Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7501623288610457554:2057] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7501623292905425647:2443] 2025-05-07T08:49:39.358365Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7501623288610457554:2057] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7501623292905425647:2443], cookie# 1 2025-05-07T08:49:39.358402Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7501623292905425646:2443][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7501623288610457551:2054] 2025-05-07T08:49:39.358437Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7501623292905425646:2443][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7501623288610457551:2054], cookie# 1 2025-05-07T08:49:39.358452Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7501623292905425645:2443][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7501623288610457548:2051], cookie# 1 2025-05-07T08:49:39.358467Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7501623292905425647:2443][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7501623288610457554:2057], cookie# 1 2025-05-07T08:49:39.358513Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:7501623292905425641:2443][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7501623292905425643:2443] 2025-05-07T08:49:39.358585Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:854: [main][1:7501623292905425641:2443][/dc-1] Path was already updated: owner# [1:7501623288610458000:2190], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-05-07T08:49:39.358618Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7501623292905425641:2443][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7501623292905425643:2443], cookie# 1 2025-05-07T08:49:39.358700Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:932: [main][1:7501623292905425641:2443][/dc-1] Sync is in progress: cookie# 1, size# 3, half# 1, successes# 1, faulires# 0 2025-05-07T08:49:39.358720Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7501623292905425641:2443][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7501623292905425642:2443], cookie# 1 2025-05-07T08:49:39.358742Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:946: [main][1:7501623292905425641:2443][/dc-1] Sync is done: cookie# 1, size# 3, half# 1, successes# 2, faulires# 0, partial# 0 2025-05-07T08:49:39.358765Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7501623292905425641:2443][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7501623292905425644:2443], cookie# 1 2025-05-07T08:49:39.358781Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:906: [main][1:7501623292905425641:2443][/dc-1] Unexpected sync response: sender# [1:7501623292905425644:2443], cookie# 1 2025-05-07T08:49:39.358797Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7501623288610457551:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7501623292905425646:2443] 2025-05-07T08:49:39.431722Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2550: HandleNotify: self# [1:7501623288610458000:2190], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLi ... hemeBoard.TEvSubscribe { Path: /dc-1/USER_0/.metadata/initialization/migrations DomainOwnerId: 72057594046644480 }: sender# [3:7501623302304020121:2350] 2025-05-07T08:49:41.238682Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:7501623288610457551:2054] Upsert description: path# /dc-1/USER_0/.metadata/initialization/migrations 2025-05-07T08:49:41.238707Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7501623288610457554:2057] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/USER_0/.metadata/initialization/migrations DomainOwnerId: 72057594046644480 }: sender# [3:7501623302304020122:2350] 2025-05-07T08:49:41.238716Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:7501623288610457554:2057] Upsert description: path# /dc-1/USER_0/.metadata/initialization/migrations 2025-05-07T08:49:41.238730Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7501623288610457551:2054] Subscribe: subscriber# [3:7501623302304020121:2350], path# /dc-1/USER_0/.metadata/initialization/migrations, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-05-07T08:49:41.238747Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7501623288610457554:2057] Subscribe: subscriber# [3:7501623302304020122:2350], path# /dc-1/USER_0/.metadata/initialization/migrations, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-05-07T08:49:41.239430Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7501623288610457548:2051] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7501623302304020120:2350] 2025-05-07T08:49:41.239436Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7501623288610457551:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7501623302304020121:2350] 2025-05-07T08:49:41.239461Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7501623288610457554:2057] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7501623302304020122:2350] 2025-05-07T08:49:41.238977Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:7501623302304020120:2350][/dc-1/USER_0/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0/.metadata/initialization/migrations Version: 0 }: sender# [1:7501623288610457548:2051] 2025-05-07T08:49:41.239011Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:7501623302304020121:2350][/dc-1/USER_0/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0/.metadata/initialization/migrations Version: 0 }: sender# [1:7501623288610457551:2054] 2025-05-07T08:49:41.239049Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:7501623302304020122:2350][/dc-1/USER_0/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0/.metadata/initialization/migrations Version: 0 }: sender# [1:7501623288610457554:2057] 2025-05-07T08:49:41.239085Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][3:7501623302304020116:2350][/dc-1/USER_0/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0/.metadata/initialization/migrations Version: 0 }: sender# [3:7501623302304020117:2350] 2025-05-07T08:49:41.239151Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][3:7501623302304020116:2350][/dc-1/USER_0/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0/.metadata/initialization/migrations Version: 0 }: sender# [3:7501623302304020118:2350] 2025-05-07T08:49:41.239187Z node 3 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:836: [main][3:7501623302304020116:2350][/dc-1/USER_0/.metadata/initialization/migrations] Set up state: owner# [3:7501623298009052445:2108], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-05-07T08:49:41.239218Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][3:7501623302304020116:2350][/dc-1/USER_0/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0/.metadata/initialization/migrations Version: 0 }: sender# [3:7501623302304020119:2350] 2025-05-07T08:49:41.239239Z node 3 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:854: [main][3:7501623302304020116:2350][/dc-1/USER_0/.metadata/initialization/migrations] Ignore empty state: owner# [3:7501623298009052445:2108], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-05-07T08:49:41.239321Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2550: HandleNotify: self# [3:7501623298009052445:2108], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/USER_0/.metadata/initialization/migrations PathId: Strong: 1 } 2025-05-07T08:49:41.239445Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2425: ResolveCacheItem: self# [3:7501623298009052445:2108], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/USER_0/.metadata/initialization/migrations PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [3:7501623302304020116:2350] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-05-07T08:49:41.239547Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1818: FillEntry for TNavigate: self# [3:7501623298009052445:2108], cacheItem# { Subscriber: { Subscriber: [3:7501623302304020116:2350] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-05-07T08:49:41.239631Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:264: Send result: self# [3:7501623302304020123:2351], recipient# [3:7501623302304020106:2324], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-05-07T08:49:41.258007Z node 1 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 3 2025-05-07T08:49:41.258474Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:7501623288610457548:2051] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: /dc-1/USER_0 }: sender# [3:7501623298009052438:2106] 2025-05-07T08:49:41.258631Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:7501623288610457548:2051] Unsubscribe: subscriber# [3:7501623298009052438:2106], path# /dc-1/USER_0 2025-05-07T08:49:41.258714Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:7501623288610457551:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: /dc-1/USER_0 }: sender# [3:7501623298009052439:2106] 2025-05-07T08:49:41.258734Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:7501623288610457551:2054] Unsubscribe: subscriber# [3:7501623298009052439:2106], path# /dc-1/USER_0 2025-05-07T08:49:41.258766Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:7501623288610457554:2057] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: /dc-1/USER_0 }: sender# [3:7501623298009052440:2106] 2025-05-07T08:49:41.258775Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:7501623288610457554:2057] Unsubscribe: subscriber# [3:7501623298009052440:2106], path# /dc-1/USER_0 2025-05-07T08:49:41.259537Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-05-07T08:49:42.242453Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2663: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7501623298009052445:2108], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-05-07T08:49:42.242627Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1818: FillEntry for TNavigate: self# [3:7501623298009052445:2108], cacheItem# { Subscriber: { Subscriber: [3:7501623302304020116:2350] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-05-07T08:49:42.242730Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:264: Send result: self# [3:7501623306598987468:2355], recipient# [3:7501623306598987467:2325], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-05-07T08:49:43.242640Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2663: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7501623298009052445:2108], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-05-07T08:49:43.242804Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1818: FillEntry for TNavigate: self# [3:7501623298009052445:2108], cacheItem# { Subscriber: { Subscriber: [3:7501623302304020116:2350] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-05-07T08:49:43.242953Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:264: Send result: self# [3:7501623310893954766:2356], recipient# [3:7501623310893954765:2326], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } >> KqpSysColV1::InnerJoinTables ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest >> TStorageTenantTest::LsLs [GOOD] Test command err: 2025-05-07T08:49:39.409184Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623290335389354:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:39.409242Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:49:39.476821Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501623293187169295:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:39.476950Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00497b/r3tmp/tmpOC8LEq/pdisk_1.dat 2025-05-07T08:49:39.885527Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:39.905719Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:39.905834Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:39.907115Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:39.907224Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:39.920580Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-05-07T08:49:39.920706Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:49:39.921710Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:4691 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-05-07T08:49:40.226222Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7501623290335389583:2141] Handle TEvNavigate describe path dc-1 2025-05-07T08:49:40.226294Z node 1 :TX_PROXY DEBUG: describe.cpp:227: Actor# [1:7501623294630357326:2449] HANDLE EvNavigateScheme dc-1 2025-05-07T08:49:40.226451Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2663: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7501623290335389606:2154], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-05-07T08:49:40.226483Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2283: Create subscriber: self# [1:7501623290335389606:2154], path# /dc-1, domainOwnerId# 72057594046644480 2025-05-07T08:49:40.226718Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:960: [main][1:7501623294630357327:2450][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-05-07T08:49:40.229649Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7501623290335389223:2052] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7501623294630357331:2450] 2025-05-07T08:49:40.229670Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7501623290335389226:2055] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7501623294630357332:2450] 2025-05-07T08:49:40.229729Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7501623290335389223:2052] Subscribe: subscriber# [1:7501623294630357331:2450], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-05-07T08:49:40.229732Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7501623290335389226:2055] Subscribe: subscriber# [1:7501623294630357332:2450], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-05-07T08:49:40.229831Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7501623290335389229:2058] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7501623294630357333:2450] 2025-05-07T08:49:40.229848Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7501623294630357332:2450][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7501623290335389226:2055] 2025-05-07T08:49:40.229875Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7501623290335389229:2058] Subscribe: subscriber# [1:7501623294630357333:2450], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-05-07T08:49:40.229882Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7501623294630357331:2450][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7501623290335389223:2052] 2025-05-07T08:49:40.229909Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7501623294630357333:2450][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7501623290335389229:2058] 2025-05-07T08:49:40.229914Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7501623290335389226:2055] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7501623294630357332:2450] 2025-05-07T08:49:40.229937Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7501623290335389223:2052] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7501623294630357331:2450] 2025-05-07T08:49:40.229959Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7501623290335389229:2058] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7501623294630357333:2450] 2025-05-07T08:49:40.230000Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:7501623294630357327:2450][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7501623294630357329:2450] 2025-05-07T08:49:40.230056Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:7501623294630357327:2450][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7501623294630357328:2450] 2025-05-07T08:49:40.230145Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:836: [main][1:7501623294630357327:2450][/dc-1] Set up state: owner# [1:7501623290335389606:2154], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-05-07T08:49:40.230267Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:7501623294630357327:2450][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7501623294630357330:2450] 2025-05-07T08:49:40.230319Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:854: [main][1:7501623294630357327:2450][/dc-1] Path was already updated: owner# [1:7501623290335389606:2154], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-05-07T08:49:40.230366Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7501623294630357331:2450][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7501623294630357328:2450], cookie# 1 2025-05-07T08:49:40.230389Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7501623294630357332:2450][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7501623294630357329:2450], cookie# 1 2025-05-07T08:49:40.230403Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7501623294630357333:2450][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7501623294630357330:2450], cookie# 1 2025-05-07T08:49:40.230797Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7501623290335389223:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7501623294630357331:2450], cookie# 1 2025-05-07T08:49:40.230864Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7501623290335389226:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7501623294630357332:2450], cookie# 1 2025-05-07T08:49:40.230882Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7501623290335389229:2058] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7501623294630357333:2450], cookie# 1 2025-05-07T08:49:40.230928Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7501623294630357331:2450][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7501623290335389223:2052], cookie# 1 2025-05-07T08:49:40.230975Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7501623294630357332:2450][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7501623290335389226:2055], cookie# 1 2025-05-07T08:49:40.230997Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7501623294630357333:2450][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7501623290335389229:2058], cookie# 1 2025-05-07T08:49:40.231046Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7501623294630357327:2450][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7501623294630357328:2450], cookie# 1 2025-05-07T08:49:40.231075Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:932: [main][1:7501623294630357327:2450][/dc-1] Sync is in progress: cookie# 1, size# 3, half# 1, successes# 1, faulires# 0 2025-05-07T08:49:40.231091Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7501623294630357327:2450][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7501623294630357329:2450], cookie# 1 2025-05-07T08:49:40.231122Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:946: [main][1:7501623294630357327:2450][/dc-1] Sync is done: cookie# 1, size# 3, half# 1, successes# 2, faulires# 0, partial# 0 2025-05-07T08:49:40.231154Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7501623294630357327:2450][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7501623294630357330:2450], cookie# 1 2025-05-07T08:49:40.231198Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:906: [main][1:7501623294630357327:2450][/dc-1] Unexpected sync response: sender# [1:7501623294630357330:2450], cookie# 1 2025-05-07T08:49:40.317450Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2550: HandleNotify: self# [1:7501623290335389606:2154], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescripti ... RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-05-07T08:49:43.531871Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1818: FillEntry for TNavigate: self# [2:7501623293187169536:2107], cacheItem# { Subscriber: { Subscriber: [2:7501623310367038776:2117] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-05-07T08:49:43.531925Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1818: FillEntry for TNavigate: self# [2:7501623293187169536:2107], cacheItem# { Subscriber: { Subscriber: [2:7501623310367038777:2118] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-05-07T08:49:43.532037Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:264: Send result: self# [2:7501623310367038797:2123], recipient# [2:7501623310367038775:2306], result# { ErrorCount: 2 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-05-07T08:49:43.532454Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:7501623310367038775:2306], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:49:43.823914Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2663: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7501623293187169536:2107], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-05-07T08:49:43.824079Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1818: FillEntry for TNavigate: self# [2:7501623293187169536:2107], cacheItem# { Subscriber: { Subscriber: [2:7501623310367038776:2117] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-05-07T08:49:43.824166Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1818: FillEntry for TNavigate: self# [2:7501623293187169536:2107], cacheItem# { Subscriber: { Subscriber: [2:7501623310367038777:2118] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-05-07T08:49:43.824270Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:264: Send result: self# [2:7501623310367038798:2124], recipient# [2:7501623310367038775:2306], result# { ErrorCount: 2 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-05-07T08:49:43.824424Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:7501623310367038775:2306], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:49:44.269752Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2663: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7501623293187169536:2107], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-05-07T08:49:44.269923Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1818: FillEntry for TNavigate: self# [2:7501623293187169536:2107], cacheItem# { Subscriber: { Subscriber: [2:7501623310367038760:2116] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-05-07T08:49:44.270058Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:264: Send result: self# [2:7501623314662006096:2125], recipient# [2:7501623314662006095:2309], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-05-07T08:49:44.273674Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:49:44.436543Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2663: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7501623293187169536:2107], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-05-07T08:49:44.436703Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1818: FillEntry for TNavigate: self# [2:7501623293187169536:2107], cacheItem# { Subscriber: { Subscriber: [2:7501623310367038776:2117] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-05-07T08:49:44.436757Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1818: FillEntry for TNavigate: self# [2:7501623293187169536:2107], cacheItem# { Subscriber: { Subscriber: [2:7501623310367038777:2118] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-05-07T08:49:44.436882Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:264: Send result: self# [2:7501623314662006097:2126], recipient# [2:7501623310367038775:2306], result# { ErrorCount: 2 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-05-07T08:49:44.437339Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:7501623310367038775:2306], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:49:44.477888Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7501623293187169295:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:44.477947Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; |88.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::PartitionStatsRange2 >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestSimpleDrop [GOOD] >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestSimpleDropIndex >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestCreateCleanWithRetry [GOOD] >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestCreateCleanManyTables >> Viewer::Cluster10000Tablets [GOOD] >> Viewer::FuzzySearcherLimit1OutOf4 [GOOD] >> Viewer::FuzzySearcherLimit2OutOf4 [GOOD] >> Viewer::ExecuteQueryDoesntExecuteSchemeOperationsInsideTransation ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest >> TStorageTenantTest::CreateSolomonInsideSubDomain [GOOD] Test command err: 2025-05-07T08:49:39.741413Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623291271117242:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:39.741549Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00496e/r3tmp/tmpaHasK2/pdisk_1.dat 2025-05-07T08:49:40.294705Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:40.294827Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:40.304799Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:49:40.339132Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TClient is connected to server localhost:5642 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-05-07T08:49:40.525448Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7501623291271117471:2140] Handle TEvNavigate describe path dc-1 2025-05-07T08:49:40.525500Z node 1 :TX_PROXY DEBUG: describe.cpp:227: Actor# [1:7501623295566085211:2444] HANDLE EvNavigateScheme dc-1 2025-05-07T08:49:40.525631Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2663: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7501623291271117494:2153], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-05-07T08:49:40.525656Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2283: Create subscriber: self# [1:7501623291271117494:2153], path# /dc-1, domainOwnerId# 72057594046644480 2025-05-07T08:49:40.525814Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:960: [main][1:7501623295566085212:2445][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-05-07T08:49:40.529593Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7501623291271117113:2051] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7501623295566085216:2445] 2025-05-07T08:49:40.529625Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7501623291271117116:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7501623295566085217:2445] 2025-05-07T08:49:40.529677Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7501623291271117113:2051] Subscribe: subscriber# [1:7501623295566085216:2445], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-05-07T08:49:40.529689Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7501623291271117116:2054] Subscribe: subscriber# [1:7501623295566085217:2445], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-05-07T08:49:40.529779Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7501623291271117119:2057] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7501623295566085218:2445] 2025-05-07T08:49:40.529844Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7501623295566085216:2445][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7501623291271117113:2051] 2025-05-07T08:49:40.529880Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7501623295566085217:2445][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7501623291271117116:2054] 2025-05-07T08:49:40.529935Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:7501623295566085212:2445][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7501623295566085213:2445] 2025-05-07T08:49:40.529987Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:7501623295566085212:2445][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7501623295566085214:2445] 2025-05-07T08:49:40.530044Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:836: [main][1:7501623295566085212:2445][/dc-1] Set up state: owner# [1:7501623291271117494:2153], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-05-07T08:49:40.530206Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7501623295566085216:2445][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7501623295566085213:2445], cookie# 1 2025-05-07T08:49:40.530240Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7501623295566085217:2445][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7501623295566085214:2445], cookie# 1 2025-05-07T08:49:40.530256Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7501623295566085218:2445][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7501623295566085215:2445], cookie# 1 2025-05-07T08:49:40.530296Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7501623291271117113:2051] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7501623295566085216:2445] 2025-05-07T08:49:40.530322Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7501623291271117113:2051] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7501623295566085216:2445], cookie# 1 2025-05-07T08:49:40.530347Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7501623291271117116:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7501623295566085217:2445] 2025-05-07T08:49:40.530363Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7501623291271117116:2054] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7501623295566085217:2445], cookie# 1 2025-05-07T08:49:40.532749Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7501623291271117119:2057] Subscribe: subscriber# [1:7501623295566085218:2445], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-05-07T08:49:40.532880Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7501623291271117119:2057] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7501623295566085218:2445], cookie# 1 2025-05-07T08:49:40.532968Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7501623295566085216:2445][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7501623291271117113:2051], cookie# 1 2025-05-07T08:49:40.533032Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7501623295566085217:2445][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7501623291271117116:2054], cookie# 1 2025-05-07T08:49:40.533071Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7501623295566085218:2445][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7501623291271117119:2057] 2025-05-07T08:49:40.533092Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7501623295566085218:2445][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7501623291271117119:2057], cookie# 1 2025-05-07T08:49:40.533143Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7501623295566085212:2445][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7501623295566085213:2445], cookie# 1 2025-05-07T08:49:40.533178Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:932: [main][1:7501623295566085212:2445][/dc-1] Sync is in progress: cookie# 1, size# 3, half# 1, successes# 1, faulires# 0 2025-05-07T08:49:40.533208Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7501623295566085212:2445][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7501623295566085214:2445], cookie# 1 2025-05-07T08:49:40.533234Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:946: [main][1:7501623295566085212:2445][/dc-1] Sync is done: cookie# 1, size# 3, half# 1, successes# 2, faulires# 0, partial# 0 2025-05-07T08:49:40.533305Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:7501623295566085212:2445][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7501623295566085215:2445] 2025-05-07T08:49:40.533368Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:854: [main][1:7501623295566085212:2445][/dc-1] Path was already updated: owner# [1:7501623291271117494:2153], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-05-07T08:49:40.533399Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7501623295566085212:2445][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7501623295566085215:2445], cookie# 1 2025-05-07T08:49:40.533426Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:906: [main][1:7501623295566085212:2445][/dc-1] Unexpected sync response: sender# [1:7501623295566085215:2445], cookie# 1 2025-05-07T08:49:40.533456Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7501623291271117119:2057] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7501623295566085218:2445] 2025-05-07T08:49:40.585922Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2550: HandleNotify: self# [1:7501623291271117494:2153], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLi ... PathId: 2] was 6 2025-05-07T08:49:42.146470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 7 ShardOwnerId: 72057594046644480 ShardLocalIdx: 7, at schemeshard: 72057594046644480 2025-05-07T08:49:42.146717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 4 2025-05-07T08:49:42.146852Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046644480 ShardLocalIdx: 4, at schemeshard: 72057594046644480 2025-05-07T08:49:42.146982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 5 2025-05-07T08:49:42.147123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046644480 ShardLocalIdx: 1, at schemeshard: 72057594046644480 2025-05-07T08:49:42.147281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 4 2025-05-07T08:49:42.147395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 6 ShardOwnerId: 72057594046644480 ShardLocalIdx: 6, at schemeshard: 72057594046644480 2025-05-07T08:49:42.147557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 3 2025-05-07T08:49:42.147682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046644480 ShardLocalIdx: 3, at schemeshard: 72057594046644480 2025-05-07T08:49:42.147824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 3 2025-05-07T08:49:42.147934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 8 ShardOwnerId: 72057594046644480 ShardLocalIdx: 8, at schemeshard: 72057594046644480 2025-05-07T08:49:42.148059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 2 2025-05-07T08:49:42.148255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 5 ShardOwnerId: 72057594046644480 ShardLocalIdx: 5, at schemeshard: 72057594046644480 2025-05-07T08:49:42.148405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 1 2025-05-07T08:49:42.148566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-05-07T08:49:42.148588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 3], at schemeshard: 72057594046644480 2025-05-07T08:49:42.148683Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 2 2025-05-07T08:49:42.148958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-05-07T08:49:42.148998Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046644480, LocalPathId: 2], at schemeshard: 72057594046644480 2025-05-07T08:49:42.149118Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-05-07T08:49:42.153199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:2 2025-05-07T08:49:42.153230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:2 tabletId 72075186224037889 2025-05-07T08:49:42.153305Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:7 2025-05-07T08:49:42.153315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:7 tabletId 72075186224037894 2025-05-07T08:49:42.153340Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:4 2025-05-07T08:49:42.153346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:4 tabletId 72075186224037891 2025-05-07T08:49:42.153362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:1 2025-05-07T08:49:42.153368Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:1 tabletId 72075186224037888 2025-05-07T08:49:42.153401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:6 2025-05-07T08:49:42.153408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:6 tabletId 72075186224037893 2025-05-07T08:49:42.153428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:3 2025-05-07T08:49:42.153434Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:3 tabletId 72075186224037890 2025-05-07T08:49:42.153472Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:8 2025-05-07T08:49:42.153483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:8 tabletId 72075186224037895 2025-05-07T08:49:42.153566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:5 2025-05-07T08:49:42.153594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:5 tabletId 72075186224037892 2025-05-07T08:49:42.153631Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 1 candidates, at schemeshard: 72057594046644480 2025-05-07T08:49:42.153681Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-05-07T08:49:42.153755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-05-07T08:49:42.153816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 2], at schemeshard: 72057594046644480 2025-05-07T08:49:42.153894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-05-07T08:49:42.158086Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-05-07T08:49:42.604839Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2663: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7501623301693682444:2107], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-05-07T08:49:42.605060Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1818: FillEntry for TNavigate: self# [3:7501623301693682444:2107], cacheItem# { Subscriber: { Subscriber: [3:7501623301693682499:2132] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-05-07T08:49:42.605184Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:264: Send result: self# [3:7501623305988650069:2308], recipient# [3:7501623305988650068:2318], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-05-07T08:49:43.606212Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2663: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7501623301693682444:2107], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-05-07T08:49:43.606346Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1818: FillEntry for TNavigate: self# [3:7501623301693682444:2107], cacheItem# { Subscriber: { Subscriber: [3:7501623301693682499:2132] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-05-07T08:49:43.606430Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:264: Send result: self# [3:7501623310283617367:2309], recipient# [3:7501623310283617366:2319], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } >> KqpSystemView::PartitionStatsParametricRanges |88.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::PartitionStatsFollower >> KqpSysColV1::SelectRowById >> Donor::ConsistentWritesWhenSwitchingToDonorMode [GOOD] >> Viewer::JsonAutocompleteEndOfDatabaseName [GOOD] >> Viewer::JsonAutocompleteScheme >> KqpSysColV0::SelectRowAsterisk |88.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV1::InnerJoinSelectAsterisk >> KqpSysColV0::InnerJoinTables >> KqpSysColV1::InnerJoinSelect >> KqpSysColV0::InnerJoinSelectAsterisk >> Viewer::JsonAutocompleteSimilarDatabaseNameWithLimit [GOOD] >> Viewer::JsonAutocompleteSimilarDatabaseNamePOST ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest >> Donor::ConsistentWritesWhenSwitchingToDonorMode [GOOD] Test command err: RandomSeed# 3281072012776108501 Reassign# 2 -- VSlotId { NodeId: 3 PDiskId: 1000 VSlotId: 1000 } GroupId: 2181038080 GroupGeneration: 1 VDiskKind: "Default" FailDomainIdx: 2 VDiskMetrics { SatisfactionRank: 0 VSlotId { NodeId: 3 PDiskId: 1000 VSlotId: 1000 } State: OK Replicated: true DiskSpace: Green } Status: "READY" Ready: true Put# [1:1:1:0:0:91:0] Put# [1:1:2:0:0:73:0] Put# [1:1:3:0:0:11:0] 2025-05-07T08:46:41.815166Z 9 00h00m20.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-05-07T08:46:41.817328Z 9 00h00m20.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 8316865191760166136] 2025-05-07T08:46:41.844136Z 9 00h00m20.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) THullOsirisActor: RESURRECT: id# [1:1:1:0:0:91:6] 2025-05-07T08:46:41.844396Z 9 00h00m20.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 1 PartsResurrected# 1 Put# [1:1:4:0:0:30:0] Put# [1:1:5:0:0:44:0] Put# [1:1:6:0:0:41:0] Put# [1:1:7:0:0:45:0] Put# [1:1:8:0:0:36:0] Put# [1:1:9:0:0:77:0] Put# [1:1:10:0:0:82:0] Put# [1:1:11:0:0:42:0] Put# [1:1:12:0:0:48:0] Put# [1:1:13:0:0:46:0] Put# [1:1:14:0:0:83:0] Put# [1:1:15:0:0:12:0] Put# [1:1:16:0:0:81:0] Put# [1:1:17:0:0:26:0] Put# [1:1:18:0:0:31:0] Put# [1:1:19:0:0:85:0] Put# [1:1:20:0:0:70:0] Put# [1:1:21:0:0:61:0] Put# [1:1:22:0:0:21:0] Put# [1:1:23:0:0:39:0] Put# [1:1:24:0:0:53:0] Put# [1:1:25:0:0:45:0] Put# [1:1:26:0:0:39:0] Put# [1:1:27:0:0:62:0] Put# [1:1:28:0:0:98:0] Put# [1:1:29:0:0:58:0] Put# [1:1:30:0:0:52:0] Put# [1:1:31:0:0:26:0] Put# [1:1:32:0:0:99:0] Put# [1:1:33:0:0:33:0] Put# [1:1:34:0:0:65:0] Put# [1:1:35:0:0:44:0] Put# [1:1:36:0:0:73:0] Put# [1:1:37:0:0:42:0] Put# [1:1:38:0:0:47:0] Put# [1:1:39:0:0:83:0] Put# [1:1:40:0:0:87:0] Put# [1:1:41:0:0:47:0] Put# [1:1:42:0:0:29:0] Put# [1:1:43:0:0:97:0] Put# [1:1:44:0:0:16:0] Put# [1:1:45:0:0:80:0] Put# [1:1:46:0:0:76:0] Put# [1:1:47:0:0:28:0] Put# [1:1:48:0:0:50:0] Put# [1:1:49:0:0:83:0] Put# [1:1:50:0:0:13:0] Put# [1:1:51:0:0:75:0] Put# [1:1:52:0:0:54:0] Put# [1:1:53:0:0:81:0] Put# [1:1:54:0:0:100:0] Put# [1:1:55:0:0:41:0] Put# [1:1:56:0:0:27:0] Put# [1:1:57:0:0:87:0] Put# [1:1:58:0:0:74:0] Put# [1:1:59:0:0:85:0] Put# [1:1:60:0:0:28:0] Put# [1:1:61:0:0:21:0] Put# [1:1:62:0:0:62:0] Put# [1:1:63:0:0:22:0] Put# [1:1:64:0:0:88:0] Put# [1:1:65:0:0:81:0] Put# [1:1:66:0:0:53:0] Put# [1:1:67:0:0:55:0] Put# [1:1:68:0:0:26:0] Put# [1:1:69:0:0:71:0] Put# [1:1:70:0:0:26:0] Put# [1:1:71:0:0:16:0] Put# [1:1:72:0:0:16:0] Put# [1:1:73:0:0:78:0] Put# [1:1:74:0:0:79:0] Put# [1:1:75:0:0:100:0] Put# [1:1:76:0:0:23:0] Put# [1:1:77:0:0:9:0] Put# [1:1:78:0:0:8:0] Put# [1:1:79:0:0:79:0] Put# [1:1:80:0:0:35:0] Put# [1:1:81:0:0:98:0] Put# [1:1:82:0:0:63:0] Put# [1:1:83:0:0:39:0] Put# [1:1:84:0:0:64:0] Put# [1:1:85:0:0:64:0] Put# [1:1:86:0:0:6:0] Put# [1:1:87:0:0:43:0] Put# [1:1:88:0:0:87:0] Put# [1:1:89:0:0:87:0] Put# [1:1:90:0:0:63:0] Put# [1:1:91:0:0:37:0] Put# [1:1:92:0:0:56:0] Put# [1:1:93:0:0:18:0] Put# [1:1:94:0:0:48:0] Put# [1:1:95:0:0:23:0] Put# [1:1:96:0:0:78:0] Put# [1:1:97:0:0:23:0] Put# [1:1:98:0:0:96:0] Put# [1:1:99:0:0:35:0] Put# [1:1:100:0:0:28:0] Put# [1:1:101:0:0:30:0] Put# [1:1:102:0:0:21:0] Put# [1:1:103:0:0:35:0] Put# [1:1:104:0:0:32:0] Put# [1:1:105:0:0:4:0] Put# [1:1:106:0:0:73:0] Put# [1:1:107:0:0:27:0] Put# [1:1:108:0:0:23:0] Put# [1:1:109:0:0:62:0] Put# [1:1:110:0:0:79:0] Put# [1:1:111:0:0:86:0] Put# [1:1:112:0:0:49:0] Put# [1:1:113:0:0:73:0] Put# [1:1:114:0:0:7:0] Put# [1:1:115:0:0:51:0] Put# [1:1:116:0:0:58:0] Put# [1:1:117:0:0:67:0] Put# [1:1:118:0:0:55:0] Put# [1:1:119:0:0:100:0] Put# [1:1:120:0:0:68:0] Put# [1:1:121:0:0:46:0] Put# [1:1:122:0:0:43:0] Put# [1:1:123:0:0:30:0] Put# [1:1:124:0:0:53:0] Put# [1:1:125:0:0:9:0] Put# [1:1:126:0:0:16:0] Put# [1:1:127:0:0:44:0] Put# [1:1:128:0:0:9:0] Put# [1:1:129:0:0:61:0] Put# [1:1:130:0:0:93:0] Put# [1:1:131:0:0:92:0] Put# [1:1:132:0:0:68:0] Put# [1:1:133:0:0:5:0] Put# [1:1:134:0:0:8:0] Put# [1:1:135:0:0:100:0] Put# [1:1:136:0:0:90:0] Put# [1:1:137:0:0:65:0] Put# [1:1:138:0:0:8:0] Put# [1:1:139:0:0:100:0] Put# [1:1:140:0:0:67:0] Put# [1:1:141:0:0:22:0] Put# [1:1:142:0:0:9:0] Put# [1:1:143:0:0:56:0] Put# [1:1:144:0:0:64:0] Put# [1:1:145:0:0:85:0] Put# [1:1:146:0:0:64:0] Put# [1:1:147:0:0:46:0] Put# [1:1:148:0:0:50:0] Put# [1:1:149:0:0:10:0] Put# [1:1:150:0:0:61:0] Put# [1:1:151:0:0:52:0] Put# [1:1:152:0:0:64:0] Put# [1:1:153:0:0:98:0] Put# [1:1:154:0:0:84:0] Put# [1:1:155:0:0:97:0] Put# [1:1:156:0:0:16:0] Put# [1:1:157:0:0:4:0] Put# [1:1:158:0:0:17:0] Put# [1:1:159:0:0:12:0] Put# [1:1:160:0:0:13:0] Put# [1:1:161:0:0:91:0] Put# [1:1:162:0:0:39:0] Put# [1:1:163:0:0:75:0] Put# [1:1:164:0:0:88:0] Put# [1:1:165:0:0:11:0] Put# [1:1:166:0:0:97:0] Put# [1:1:167:0:0:84:0] Put# [1:1:168:0:0:97:0] Put# [1:1:169:0:0:32:0] Put# [1:1:170:0:0:79:0] Put# [1:1:171:0:0:99:0] Put# [1:1:172:0:0:90:0] Put# [1:1:173:0:0:48:0] Put# [1:1:174:0:0:32:0] Put# [1:1:175:0:0:24:0] Put# [1:1:176:0:0:79:0] Put# [1:1:177:0:0:93:0] Put# [1:1:178:0:0:73:0] Put# [1:1:179:0:0:3:0] Put# [1:1:180:0:0:57:0] Put# [1:1:181:0:0:37:0] Put# [1:1:182:0:0:36:0] Put# [1:1:183:0:0:8:0] Put# [1:1:184:0:0:88:0] Put# [1:1:185:0:0:12:0] Put# [1:1:186:0:0:90:0] Put# [1:1:187:0:0:72:0] Put# [1:1:188:0:0:50:0] Put# [1:1:189:0:0:60:0] Put# [1:1:190:0:0:25:0] Put# [1:1:191:0:0:87:0] Put# [1:1:192:0:0:37:0] Put# [1:1:193:0:0:84:0] Put# [1:1:194:0:0:94:0] Put# [1:1:195:0:0:78:0] Put# [1:1:196:0:0:34:0] Put# [1:1:197:0:0:99:0] Put# [1:1:198:0:0:57:0] Put# [1:1:199:0:0:73:0] Put# [1:1:200:0:0:96:0] Put# [1:1:201:0:0:31:0] Put# [1:1:202:0:0:31:0] Put# [1:1:203:0:0:43:0] Put# [1:1:204:0:0:81:0] Put# [1:1:205:0:0:87:0] Put# [1:1:206:0:0:51:0] Put# [1:1:207:0:0:87:0] Put# [1:1:208:0:0:79:0] Put# [1:1:209:0:0:89:0] Put# [1:1:210:0:0:46:0] Put# [1:1:211:0:0:83:0] Put# [1:1:212:0:0:11:0] Put# [1:1:213:0:0:49:0] Put# [1:1:214:0:0:41:0] Put# [1:1:215:0:0:45:0] Put# [1:1:216:0:0:52:0] Put# [1:1:217:0:0:33:0] Put# [1:1:218:0:0:2:0] Put# [1:1:219:0:0:81:0] Put# [1:1:220:0:0:2:0] Put# [1:1:221:0:0:23:0] Put# [1:1:222:0:0:74:0] Put# [1:1:223:0:0:5:0] Put# [1:1:224:0:0:91:0] Put# [1:1:225:0:0:81:0] Put# [1:1:226:0:0:54:0] Put# [1:1:227:0:0:71:0] Put# [1:1:228:0:0:89:0] Put# [1:1:229:0:0:53:0] Put# [1:1:230:0:0:89:0] Put# [1:1:231:0:0:11:0] Put# [1:1:232:0:0:17:0] Put# [1:1:233:0:0:72:0] Put# [1:1:234:0:0:88:0] Put# [1:1:235:0:0:20:0] Put# [1:1:236:0:0:19:0] Put# [1:1:237:0:0:76:0] Put# [1:1:238:0:0:87:0] Put# [1:1:239:0:0:14:0] Put# [1:1:240:0:0:92:0] Put# [1:1:241:0:0:76:0] Put# [1:1:242:0:0:34:0] Put# [1:1:243:0:0:31:0] Put# [1:1:244:0:0:61:0] Put# [1:1:245:0:0:36:0] Put# [1:1:246:0:0:46:0] Put# [1:1:247:0:0:29:0] Put# [1:1:248:0:0:3:0] Put# [1:1:249:0:0:80:0] Put# [1:1:250:0:0:55:0] Put# [1:1:251:0:0:86:0] Put# [1:1:252:0:0:17:0] Put# [1:1:253:0:0:94:0] Put# [1:1:254:0:0:40:0] Put# [1:1:255:0:0:98:0] Put# [1:1:256:0:0:45:0] Put# [1:1:257:0:0:69:0] Put# [1:1:258:0:0:1:0] Put# [1:1:259:0:0:70:0] Put# [1:1:260:0:0:14:0] Put# [1:1:261:0:0:7:0] Put# [1:1:262:0:0:63:0] Put# [1:1:263:0:0:57:0] Put# [1:1:264:0:0:43:0] Put# [1:1:265:0:0:77:0] Put# [1:1:266:0:0:15:0] Put# [1:1:267:0:0:65:0] Put# [1:1:268:0:0:83:0] Put# [1:1:269:0:0:85:0] Put# [1:1:270:0:0:80:0] Put# [1:1:271:0:0:64:0] Put# [1:1:272:0:0:62:0] Put# [1:1:273:0:0:26:0] Put# [1:1:274:0:0:2:0] Put# [1:1:275:0:0:72:0] Put# [1:1:276:0:0:92:0] Put# [1:1:277:0:0:22:0] Put# [1:1:278:0:0:58:0] Put# [1:1:279:0:0:39:0] Put# [1:1:280:0:0:78:0] Put# [1:1:281:0:0:13:0] Put# [1:1:282:0:0:20:0] Put# [1:1:283:0:0:94:0] Put# [1:1:284:0:0:91:0] Put# [1:1:285:0:0:95:0] Put# [1:1:286:0:0:29:0] Put# [1:1:287:0:0:17:0] Put# [1:1:288:0:0:14:0] Put# [1:1:289:0:0:54:0] Put# [1:1:290:0:0:21:0] Put# [1:1:291:0:0:67:0] Put# [1:1:292:0:0:93:0] Put# [1:1:293:0:0:31:0] Put# [1:1:294:0:0:35:0] Put# [1:1:295:0:0:75:0] Put# [1:1:296:0:0:9:0] Put# [1:1:297:0:0:8:0] Put# [1:1:298:0:0:100:0] Put# [1:1:299:0:0:26:0] Put# [1:1:300:0:0:98:0] Put# [1:1:301:0:0:9:0] Put# [1:1:302:0:0:75:0] Put# [1:1:303:0:0:61:0] Put# [1:1:304:0:0:83:0] Put# [1:1:305:0:0:18:0] Put# [1:1:306:0:0:54:0] Put# [1:1:307:0:0:72:0] Put# [1:1:308:0:0:43:0] Put# [1:1:309:0:0:41:0] Put# [1:1:310:0:0:25:0] Put# [1:1:311:0:0:80:0] Put# [1:1:312:0:0:81:0] Put# [1:1:313:0:0:41:0] Put# [1:1:314:0:0:42:0] Put# [1:1:315:0:0:5:0] Put# [1:1:316:0:0:7:0] Put# [1:1:317:0:0:72:0] Put# [1:1:318:0:0:74:0] Put# [1:1:319:0:0:56:0] Put# [1:1:320:0:0:74:0] Put# [1:1:321:0:0:65:0] Put# [1:1:322:0:0:22:0] Put# [1:1:323:0:0:15:0] Put# [1:1:324:0:0:51:0] Put# [1:1:325:0:0:61:0] Put# [1:1:326:0:0:7:0] Put# [1:1:327:0:0:34:0] Put# [1:1:328:0:0:71:0] Put# [1:1:329:0:0:94:0] Put# [1:1:330:0:0:35:0] Put# [1:1:331:0:0:34:0] Put# [1:1:332:0:0:55:0] Put# [1:1:333:0:0:59:0] Put# [1:1:334:0:0:75:0] Put# [1:1:335:0:0:14:0] Put# [1:1:336:0:0:59:0] Put# [1:1:337:0:0:24:0] Put# [1:1:338:0:0:80:0] Put# [1:1:339:0:0:72:0] Put# [1:1:340:0:0:39:0] Put# [1:1:341:0:0:87:0] Put# [1:1:342:0:0:54:0] Put# [1:1:343:0:0:88:0] Put# [1:1:344:0:0:52:0] Put# [1:1:345:0:0:20:0] Put# [1:1:346:0:0:81:0] Put# [1:1:347:0:0:32:0] Put# [1:1:348:0:0:20:0] Put# [1:1:349:0:0:40:0] Put# [1:1:350:0:0:28:0] Put# [1:1:351:0:0:26:0] Put# [1:1:352:0:0:49:0] Put# [1:1:353:0:0:57:0] Put# [1:1:354:0:0:65:0] Put# [1:1:355:0:0:39:0] Put# [1:1:356:0:0:9:0] Put# [1:1:357:0:0:11:0] Put# [1:1:358:0:0:33:0] Put# [1:1:359:0:0:73:0] Put# [1:1:360:0:0:37:0] Put# [1:1:361:0:0:9:0] Put# [1:1:362:0:0:14:0] Put# [1:1:363:0:0:26:0] Put# [1:1:364:0:0:96:0] Put# [1:1:365:0:0:92:0] Put# [1:1:366:0:0:30:0] Put# [1:1:367:0:0:4:0] Put# [1:1:368:0:0:20:0] Put# [1:1:369:0:0:99:0] Put# [1:1:370:0:0:46:0] Put# [1:1:371:0:0:100:0] Put# [1:1:372:0:0:68:0] Put# [1:1:373:0:0:100:0] Put# [1:1:374:0:0:20:0] Put# [1:1:375:0:0:43:0] Put# [1:1:376:0:0:78:0] Put# [1:1:377:0:0:60:0] Put# [1:1:378:0:0:7:0] Put# [1:1:379:0:0:85:0] Put# [1:1:380:0:0:78:0] Put# [1:1:381:0:0:65:0] Put# [1:1:382:0:0:5:0] Put# [1:1:383:0:0:7:0] Put# [1:1:384:0:0:93:0] Put# [1:1:385:0:0:100:0] Put# [1:1:386:0:0:68:0] Put# [1:1:387:0:0:84:0] Put# [1:1:388:0:0:79:0] Put# [1:1:389:0:0:20:0] Put# [1:1:390:0:0:37:0] Put# [1:1:391:0:0:35:0] Put# [1:1:392:0:0:31:0] Put# [1:1:393:0:0:54:0] Put# [1:1:394:0:0:1:0] Put# [1:1:395:0:0:82:0] Put# [1:1:396:0:0:51:0] Put# [1:1:397:0:0:37:0] Put# [1:1:398:0:0:70:0] Put# [1:1:399:0:0:34:0] Put# [1:1:400:0:0:91:0] Put# [1:1:401:0:0:51:0] Put# [1:1:402:0:0:51:0] Put# [1:1:403:0:0:53:0] Put# [1:1:404:0:0:16:0] Put# [1:1:405:0:0:74:0] Put# [1:1:406:0:0:15:0] Put# [1:1:407:0:0:82:0] Put# [1:1:408:0:0:82:0] Put# [1:1:409:0:0:49:0] Put# [1:1:410:0:0:73:0] Put# [1:1:411:0:0:89:0] Put# [1:1:412:0:0:41:0] Put# [1:1:413:0:0:64:0] Put# [1:1:414:0:0:14:0] Put# [1:1:415:0:0:49:0] Put# [1:1:416:0:0:3:0] Put# [1:1:417:0:0:13:0] Put# [1:1:418:0:0:97:0] Put# [1:1:419:0:0:64:0] Put# [1:1:420:0:0:90:0] Put# [1:1:421:0:0:30:0] Put# [1:1:422:0:0:76:0] Put# [1:1:423:0:0:14:0] Put# [1:1:424:0:0:44:0] Put# [1:1:425:0:0:16:0] Put# [1:1:426:0:0:10:0] Put# [1:1:427:0:0:28:0] Put# [1:1:428:0:0:35:0] Put# [1:1:429:0:0:80:0] Put# [1:1:430:0:0:88:0] Put# [1:1:431:0:0:34:0] Put# [1:1:432:0:0:38:0] Put# [1:1:433:0:0:31:0] Put# [1:1:434:0:0:32:0] Put# [1:1:435:0:0:80:0] Put# [1:1:436:0:0:93:0] Put# [1:1:437:0:0:6:0] Put# [1:1:438:0:0:10:0] Put# [1:1:439:0:0:27:0] Put# [1:1:440:0:0:85:0] Put# [1:1:441:0:0:43:0] Put# [1:1:442:0:0:17:0] Put# [1:1:443:0:0:75:0] Put# [1:1:444:0:0:75:0] Put# [1:1:445:0:0:60:0] Put# [1:1:446:0:0:23:0] Put# [1:1:447:0:0:49:0] Put# [1:1:448:0:0:10:0] Put# [1:1:449:0:0:22:0] Put# [1:1:450:0:0:68:0] Put# [1:1:451:0:0:7:0] Put# [1:1:452:0:0:71:0] Put# [1:1:453:0:0:35:0] Put# [1:1:454:0:0:11:0] Put# [1:1:455:0:0:7:0] Put# [1:1:456:0:0:38:0] Put# [1:1:457:0:0:32:0] Put# [1:1:458:0:0:76:0] Put# [1:1:459:0:0:94:0] Put# [1:1:460:0:0:14:0] Put# [1:1:461:0:0:79:0 ... 15:0] Put# [1:3:2526:0:0:72:0] Put# [1:3:2527:0:0:12:0] Put# [1:3:2528:0:0:21:0] Put# [1:3:2529:0:0:32:0] Put# [1:3:2530:0:0:57:0] Put# [1:3:2531:0:0:33:0] Put# [1:3:2532:0:0:89:0] Put# [1:3:2533:0:0:40:0] Put# [1:3:2534:0:0:18:0] Put# [1:3:2535:0:0:25:0] Put# [1:3:2536:0:0:17:0] Put# [1:3:2537:0:0:93:0] Put# [1:3:2538:0:0:81:0] Put# [1:3:2539:0:0:16:0] Put# [1:3:2540:0:0:91:0] Put# [1:3:2541:0:0:58:0] Put# [1:3:2542:0:0:25:0] Put# [1:3:2543:0:0:88:0] Put# [1:3:2544:0:0:65:0] Put# [1:3:2545:0:0:39:0] Put# [1:3:2546:0:0:20:0] Put# [1:3:2547:0:0:52:0] Put# [1:3:2548:0:0:37:0] Put# [1:3:2549:0:0:28:0] Put# [1:3:2550:0:0:58:0] Put# [1:3:2551:0:0:57:0] Put# [1:3:2552:0:0:23:0] Put# [1:3:2553:0:0:77:0] Put# [1:3:2554:0:0:55:0] Put# [1:3:2555:0:0:3:0] Put# [1:3:2556:0:0:56:0] Put# [1:3:2557:0:0:85:0] Put# [1:3:2558:0:0:60:0] Put# [1:3:2559:0:0:67:0] Put# [1:3:2560:0:0:36:0] Put# [1:3:2561:0:0:46:0] Put# [1:3:2562:0:0:80:0] Put# [1:3:2563:0:0:92:0] Put# [1:3:2564:0:0:4:0] Put# [1:3:2565:0:0:16:0] Put# [1:3:2566:0:0:20:0] Put# [1:3:2567:0:0:36:0] Put# [1:3:2568:0:0:9:0] Put# [1:3:2569:0:0:21:0] Put# [1:3:2570:0:0:38:0] Put# [1:3:2571:0:0:48:0] Put# [1:3:2572:0:0:48:0] Put# [1:3:2573:0:0:22:0] Put# [1:3:2574:0:0:72:0] Put# [1:3:2575:0:0:68:0] Put# [1:3:2576:0:0:4:0] Put# [1:3:2577:0:0:70:0] Put# [1:3:2578:0:0:83:0] Put# [1:3:2579:0:0:86:0] Put# [1:3:2580:0:0:85:0] Put# [1:3:2581:0:0:28:0] Put# [1:3:2582:0:0:38:0] Put# [1:3:2583:0:0:82:0] Put# [1:3:2584:0:0:97:0] Put# [1:3:2585:0:0:84:0] Put# [1:3:2586:0:0:64:0] Put# [1:3:2587:0:0:2:0] Put# [1:3:2588:0:0:30:0] Put# [1:3:2589:0:0:91:0] Put# [1:3:2590:0:0:99:0] Put# [1:3:2591:0:0:1:0] Put# [1:3:2592:0:0:69:0] Put# [1:3:2593:0:0:38:0] Put# [1:3:2594:0:0:98:0] Put# [1:3:2595:0:0:21:0] Put# [1:3:2596:0:0:40:0] Put# [1:3:2597:0:0:69:0] Put# [1:3:2598:0:0:10:0] Put# [1:3:2599:0:0:2:0] Put# [1:3:2600:0:0:6:0] Put# [1:3:2601:0:0:77:0] Put# [1:3:2602:0:0:99:0] Put# [1:3:2603:0:0:86:0] Put# [1:3:2604:0:0:68:0] Put# [1:3:2605:0:0:52:0] Put# [1:3:2606:0:0:52:0] Put# [1:3:2607:0:0:77:0] Put# [1:3:2608:0:0:92:0] Put# [1:3:2609:0:0:25:0] Put# [1:3:2610:0:0:11:0] Put# [1:3:2611:0:0:85:0] Put# [1:3:2612:0:0:51:0] Put# [1:3:2613:0:0:97:0] Put# [1:3:2614:0:0:37:0] Put# [1:3:2615:0:0:76:0] Put# [1:3:2616:0:0:19:0] Put# [1:3:2617:0:0:10:0] Put# [1:3:2618:0:0:57:0] Put# [1:3:2619:0:0:48:0] Put# [1:3:2620:0:0:72:0] Put# [1:3:2621:0:0:72:0] Put# [1:3:2622:0:0:33:0] Put# [1:3:2623:0:0:6:0] Put# [1:3:2624:0:0:39:0] Put# [1:3:2625:0:0:30:0] Put# [1:3:2626:0:0:37:0] Put# [1:3:2627:0:0:47:0] Put# [1:3:2628:0:0:55:0] Put# [1:3:2629:0:0:82:0] Put# [1:3:2630:0:0:86:0] Put# [1:3:2631:0:0:94:0] Put# [1:3:2632:0:0:35:0] Put# [1:3:2633:0:0:57:0] Put# [1:3:2634:0:0:27:0] Put# [1:3:2635:0:0:100:0] Put# [1:3:2636:0:0:67:0] Put# [1:3:2637:0:0:18:0] Put# [1:3:2638:0:0:95:0] Put# [1:3:2639:0:0:93:0] Put# [1:3:2640:0:0:4:0] Put# [1:3:2641:0:0:73:0] Put# [1:3:2642:0:0:56:0] Put# [1:3:2643:0:0:17:0] Put# [1:3:2644:0:0:60:0] Put# [1:3:2645:0:0:7:0] Put# [1:3:2646:0:0:64:0] Put# [1:3:2647:0:0:77:0] Put# [1:3:2648:0:0:87:0] Put# [1:3:2649:0:0:95:0] Put# [1:3:2650:0:0:22:0] Put# [1:3:2651:0:0:77:0] Put# [1:3:2652:0:0:29:0] Put# [1:3:2653:0:0:75:0] Put# [1:3:2654:0:0:91:0] Put# [1:3:2655:0:0:57:0] Put# [1:3:2656:0:0:66:0] Put# [1:3:2657:0:0:12:0] Put# [1:3:2658:0:0:17:0] Put# [1:3:2659:0:0:67:0] Put# [1:3:2660:0:0:28:0] Put# [1:3:2661:0:0:5:0] Put# [1:3:2662:0:0:62:0] Put# [1:3:2663:0:0:50:0] Put# [1:3:2664:0:0:35:0] Put# [1:3:2665:0:0:51:0] Put# [1:3:2666:0:0:21:0] Put# [1:3:2667:0:0:27:0] Put# [1:3:2668:0:0:86:0] Put# [1:3:2669:0:0:30:0] Put# [1:3:2670:0:0:4:0] Put# [1:3:2671:0:0:72:0] Put# [1:3:2672:0:0:40:0] Put# [1:3:2673:0:0:47:0] Put# [1:3:2674:0:0:18:0] Put# [1:3:2675:0:0:78:0] Put# [1:3:2676:0:0:58:0] Put# [1:3:2677:0:0:44:0] Put# [1:3:2678:0:0:56:0] Put# [1:3:2679:0:0:67:0] Put# [1:3:2680:0:0:75:0] Put# [1:3:2681:0:0:16:0] Put# [1:3:2682:0:0:37:0] Put# [1:3:2683:0:0:44:0] Put# [1:3:2684:0:0:68:0] Put# [1:3:2685:0:0:75:0] Put# [1:3:2686:0:0:34:0] Put# [1:3:2687:0:0:21:0] Put# [1:3:2688:0:0:75:0] Put# [1:3:2689:0:0:71:0] Put# [1:3:2690:0:0:39:0] Put# [1:3:2691:0:0:43:0] Put# [1:3:2692:0:0:26:0] Put# [1:3:2693:0:0:38:0] Put# [1:3:2694:0:0:92:0] Put# [1:3:2695:0:0:80:0] Put# [1:3:2696:0:0:34:0] Put# [1:3:2697:0:0:39:0] Put# [1:3:2698:0:0:27:0] Put# [1:3:2699:0:0:50:0] Put# [1:3:2700:0:0:43:0] Put# [1:3:2701:0:0:36:0] Put# [1:3:2702:0:0:59:0] Put# [1:3:2703:0:0:40:0] Put# [1:3:2704:0:0:14:0] Put# [1:3:2705:0:0:60:0] Put# [1:3:2706:0:0:94:0] Put# [1:3:2707:0:0:76:0] Put# [1:3:2708:0:0:77:0] Put# [1:3:2709:0:0:51:0] Put# [1:3:2710:0:0:78:0] Put# [1:3:2711:0:0:75:0] Put# [1:3:2712:0:0:34:0] Put# [1:3:2713:0:0:23:0] Put# [1:3:2714:0:0:30:0] Put# [1:3:2715:0:0:96:0] Put# [1:3:2716:0:0:6:0] Put# [1:3:2717:0:0:83:0] Put# [1:3:2718:0:0:41:0] Put# [1:3:2719:0:0:18:0] Put# [1:3:2720:0:0:99:0] Put# [1:3:2721:0:0:51:0] Put# [1:3:2722:0:0:63:0] Put# [1:3:2723:0:0:74:0] Put# [1:3:2724:0:0:70:0] Put# [1:3:2725:0:0:12:0] Put# [1:3:2726:0:0:31:0] Put# [1:3:2727:0:0:19:0] Put# [1:3:2728:0:0:26:0] Put# [1:3:2729:0:0:62:0] Put# [1:3:2730:0:0:82:0] Put# [1:3:2731:0:0:29:0] Put# [1:3:2732:0:0:88:0] Put# [1:3:2733:0:0:37:0] Put# [1:3:2734:0:0:47:0] Put# [1:3:2735:0:0:35:0] Put# [1:3:2736:0:0:11:0] Put# [1:3:2737:0:0:14:0] Put# [1:3:2738:0:0:64:0] Put# [1:3:2739:0:0:73:0] Put# [1:3:2740:0:0:17:0] Put# [1:3:2741:0:0:74:0] Put# [1:3:2742:0:0:28:0] Put# [1:3:2743:0:0:82:0] Put# [1:3:2744:0:0:75:0] Put# [1:3:2745:0:0:8:0] Put# [1:3:2746:0:0:86:0] Put# [1:3:2747:0:0:20:0] Put# [1:3:2748:0:0:56:0] Put# [1:3:2749:0:0:80:0] Put# [1:3:2750:0:0:24:0] Put# [1:3:2751:0:0:33:0] Put# [1:3:2752:0:0:37:0] Put# [1:3:2753:0:0:57:0] Put# [1:3:2754:0:0:20:0] Put# [1:3:2755:0:0:72:0] Put# [1:3:2756:0:0:83:0] Put# [1:3:2757:0:0:29:0] Put# [1:3:2758:0:0:43:0] Put# [1:3:2759:0:0:34:0] Put# [1:3:2760:0:0:39:0] Put# [1:3:2761:0:0:70:0] Put# [1:3:2762:0:0:81:0] Put# [1:3:2763:0:0:41:0] Put# [1:3:2764:0:0:3:0] Put# [1:3:2765:0:0:82:0] Put# [1:3:2766:0:0:11:0] Put# [1:3:2767:0:0:93:0] Put# [1:3:2768:0:0:17:0] Put# [1:3:2769:0:0:10:0] Put# [1:3:2770:0:0:80:0] Put# [1:3:2771:0:0:31:0] Put# [1:3:2772:0:0:82:0] Put# [1:3:2773:0:0:97:0] Put# [1:3:2774:0:0:66:0] Put# [1:3:2775:0:0:54:0] Put# [1:3:2776:0:0:89:0] Put# [1:3:2777:0:0:83:0] Put# [1:3:2778:0:0:13:0] Put# [1:3:2779:0:0:55:0] Put# [1:3:2780:0:0:3:0] Put# [1:3:2781:0:0:29:0] Put# [1:3:2782:0:0:44:0] Put# [1:3:2783:0:0:2:0] Put# [1:3:2784:0:0:52:0] Put# [1:3:2785:0:0:34:0] Put# [1:3:2786:0:0:56:0] Put# [1:3:2787:0:0:65:0] Put# [1:3:2788:0:0:76:0] Put# [1:3:2789:0:0:6:0] Put# [1:3:2790:0:0:93:0] Put# [1:3:2791:0:0:18:0] Put# [1:3:2792:0:0:16:0] Put# [1:3:2793:0:0:65:0] Put# [1:3:2794:0:0:66:0] Put# [1:3:2795:0:0:30:0] Put# [1:3:2796:0:0:74:0] Put# [1:3:2797:0:0:83:0] Put# [1:3:2798:0:0:92:0] Put# [1:3:2799:0:0:67:0] Put# [1:3:2800:0:0:11:0] Put# [1:3:2801:0:0:98:0] Put# [1:3:2802:0:0:39:0] Put# [1:3:2803:0:0:6:0] Put# [1:3:2804:0:0:48:0] Put# [1:3:2805:0:0:20:0] Put# [1:3:2806:0:0:69:0] Put# [1:3:2807:0:0:19:0] Put# [1:3:2808:0:0:29:0] Put# [1:3:2809:0:0:33:0] Put# [1:3:2810:0:0:87:0] Put# [1:3:2811:0:0:76:0] Put# [1:3:2812:0:0:13:0] Put# [1:3:2813:0:0:88:0] Put# [1:3:2814:0:0:53:0] Put# [1:3:2815:0:0:97:0] Put# [1:3:2816:0:0:85:0] Put# [1:3:2817:0:0:19:0] Put# [1:3:2818:0:0:6:0] Put# [1:3:2819:0:0:89:0] Put# [1:3:2820:0:0:33:0] Put# [1:3:2821:0:0:42:0] Put# [1:3:2822:0:0:26:0] Put# [1:3:2823:0:0:95:0] Put# [1:3:2824:0:0:2:0] Put# [1:3:2825:0:0:55:0] Put# [1:3:2826:0:0:86:0] Put# [1:3:2827:0:0:81:0] Put# [1:3:2828:0:0:50:0] Put# [1:3:2829:0:0:73:0] Put# [1:3:2830:0:0:19:0] Put# [1:3:2831:0:0:39:0] Put# [1:3:2832:0:0:20:0] Put# [1:3:2833:0:0:72:0] Put# [1:3:2834:0:0:57:0] Put# [1:3:2835:0:0:9:0] Put# [1:3:2836:0:0:79:0] Put# [1:3:2837:0:0:39:0] Put# [1:3:2838:0:0:1:0] Put# [1:3:2839:0:0:33:0] Put# [1:3:2840:0:0:72:0] Put# [1:3:2841:0:0:84:0] Put# [1:3:2842:0:0:19:0] Put# [1:3:2843:0:0:97:0] Put# [1:3:2844:0:0:10:0] Put# [1:3:2845:0:0:37:0] Put# [1:3:2846:0:0:85:0] Put# [1:3:2847:0:0:8:0] Put# [1:3:2848:0:0:63:0] Put# [1:3:2849:0:0:82:0] Put# [1:3:2850:0:0:95:0] Put# [1:3:2851:0:0:30:0] Put# [1:3:2852:0:0:31:0] Put# [1:3:2853:0:0:39:0] Put# [1:3:2854:0:0:69:0] Put# [1:3:2855:0:0:87:0] Put# [1:3:2856:0:0:56:0] Put# [1:3:2857:0:0:68:0] Put# [1:3:2858:0:0:64:0] Put# [1:3:2859:0:0:95:0] Put# [1:3:2860:0:0:43:0] Put# [1:3:2861:0:0:89:0] Put# [1:3:2862:0:0:42:0] Put# [1:3:2863:0:0:55:0] Put# [1:3:2864:0:0:50:0] Put# [1:3:2865:0:0:41:0] Put# [1:3:2866:0:0:92:0] Put# [1:3:2867:0:0:85:0] Put# [1:3:2868:0:0:36:0] Put# [1:3:2869:0:0:18:0] Put# [1:3:2870:0:0:60:0] Put# [1:3:2871:0:0:66:0] Put# [1:3:2872:0:0:49:0] Put# [1:3:2873:0:0:74:0] Put# [1:3:2874:0:0:39:0] Put# [1:3:2875:0:0:47:0] Put# [1:3:2876:0:0:96:0] Put# [1:3:2877:0:0:64:0] Put# [1:3:2878:0:0:68:0] Put# [1:3:2879:0:0:12:0] Put# [1:3:2880:0:0:27:0] Put# [1:3:2881:0:0:54:0] Put# [1:3:2882:0:0:13:0] Put# [1:3:2883:0:0:86:0] Put# [1:3:2884:0:0:30:0] Put# [1:3:2885:0:0:5:0] Put# [1:3:2886:0:0:5:0] Put# [1:3:2887:0:0:6:0] Put# [1:3:2888:0:0:37:0] Put# [1:3:2889:0:0:36:0] Put# [1:3:2890:0:0:46:0] Put# [1:3:2891:0:0:99:0] Put# [1:3:2892:0:0:31:0] Put# [1:3:2893:0:0:50:0] Put# [1:3:2894:0:0:31:0] Put# [1:3:2895:0:0:70:0] Put# [1:3:2896:0:0:83:0] Put# [1:3:2897:0:0:71:0] Put# [1:3:2898:0:0:87:0] Put# [1:3:2899:0:0:61:0] Put# [1:3:2900:0:0:94:0] Put# [1:3:2901:0:0:98:0] Put# [1:3:2902:0:0:66:0] Put# [1:3:2903:0:0:43:0] Put# [1:3:2904:0:0:13:0] Put# [1:3:2905:0:0:82:0] Put# [1:3:2906:0:0:60:0] Put# [1:3:2907:0:0:83:0] Put# [1:3:2908:0:0:3:0] Put# [1:3:2909:0:0:89:0] Put# [1:3:2910:0:0:95:0] Put# [1:3:2911:0:0:77:0] Put# [1:3:2912:0:0:40:0] Put# [1:3:2913:0:0:72:0] Put# [1:3:2914:0:0:33:0] Put# [1:3:2915:0:0:27:0] Put# [1:3:2916:0:0:78:0] Put# [1:3:2917:0:0:16:0] Put# [1:3:2918:0:0:98:0] Put# [1:3:2919:0:0:7:0] Put# [1:3:2920:0:0:37:0] Put# [1:3:2921:0:0:66:0] Put# [1:3:2922:0:0:28:0] Put# [1:3:2923:0:0:51:0] Put# [1:3:2924:0:0:40:0] Put# [1:3:2925:0:0:10:0] Put# [1:3:2926:0:0:25:0] Put# [1:3:2927:0:0:7:0] Put# [1:3:2928:0:0:27:0] Put# [1:3:2929:0:0:1:0] Put# [1:3:2930:0:0:53:0] Put# [1:3:2931:0:0:3:0] Put# [1:3:2932:0:0:83:0] Put# [1:3:2933:0:0:9:0] Put# [1:3:2934:0:0:61:0] Put# [1:3:2935:0:0:62:0] Put# [1:3:2936:0:0:37:0] Put# [1:3:2937:0:0:10:0] Put# [1:3:2938:0:0:27:0] Put# [1:3:2939:0:0:90:0] Put# [1:3:2940:0:0:20:0] Put# [1:3:2941:0:0:16:0] Put# [1:3:2942:0:0:22:0] Put# [1:3:2943:0:0:57:0] Put# [1:3:2944:0:0:25:0] Put# [1:3:2945:0:0:46:0] Put# [1:3:2946:0:0:30:0] Put# [1:3:2947:0:0:82:0] Put# [1:3:2948:0:0:100:0] Put# [1:3:2949:0:0:86:0] Put# [1:3:2950:0:0:47:0] Put# [1:3:2951:0:0:21:0] Put# [1:3:2952:0:0:89:0] Put# [1:3:2953:0:0:10:0] Put# [1:3:2954:0:0:87:0] Put# [1:3:2955:0:0:55:0] Put# [1:3:2956:0:0:29:0] Put# [1:3:2957:0:0:69:0] Put# [1:3:2958:0:0:81:0] Put# [1:3:2959:0:0:51:0] Put# [1:3:2960:0:0:46:0] Put# [1:3:2961:0:0:29:0] Put# [1:3:2962:0:0:20:0] Put# [1:3:2963:0:0:85:0] Put# [1:3:2964:0:0:5:0] Put# [1:3:2965:0:0:87:0] Put# [1:3:2966:0:0:52:0] Put# [1:3:2967:0:0:96:0] Put# [1:3:2968:0:0:72:0] Put# [1:3:2969:0:0:16:0] Put# [1:3:2970:0:0:87:0] Put# [1:3:2971:0:0:15:0] Put# [1:3:2972:0:0:64:0] Put# [1:3:2973:0:0:91:0] Put# [1:3:2974:0:0:24:0] Put# [1:3:2975:0:0:69:0] Put# [1:3:2976:0:0:29:0] Put# [1:3:2977:0:0:37:0] Put# [1:3:2978:0:0:87:0] Put# [1:3:2979:0:0:73:0] Put# [1:3:2980:0:0:37:0] Put# [1:3:2981:0:0:80:0] Put# [1:3:2982:0:0:65:0] Put# [1:3:2983:0:0:14:0] Put# [1:3:2984:0:0:42:0] Put# [1:3:2985:0:0:37:0] Put# [1:3:2986:0:0:44:0] Put# [1:3:2987:0:0:40:0] Put# [1:3:2988:0:0:94:0] Put# [1:3:2989:0:0:85:0] Put# [1:3:2990:0:0:86:0] Put# [1:3:2991:0:0:29:0] Put# [1:3:2992:0:0:3:0] Reassign# 3 -- VSlotId { NodeId: 5 PDiskId: 1000 VSlotId: 1000 } GroupId: 2181038080 GroupGeneration: 3 VDiskKind: "Default" FailDomainIdx: 4 VDiskMetrics { SatisfactionRank: 12 VSlotId { NodeId: 5 PDiskId: 1000 VSlotId: 1000 } State: OK Replicated: true DiskSpace: Green } Status: "READY" Ready: true Put# [1:3:2993:0:0:26:0] Put# [1:3:2994:0:0:96:0] >> KqpSystemView::PartitionStatsRanges [GOOD] >> DataShardOutOfOrder::TestOutOfOrderNonConflictingWrites-EvWrite [GOOD] >> KqpSystemView::Sessions >> KqpSystemView::PartitionStatsRange3 [GOOD] >> KqpSysColV0::SelectRowById [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::PartitionStatsRanges [GOOD] Test command err: Trying to start YDB, gRPC: 8414, MsgBus: 30248 2025-05-07T08:49:42.597619Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623306565897945:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:42.598388Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003200/r3tmp/tmp2KjT6y/pdisk_1.dat 2025-05-07T08:49:42.958784Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8414, node 1 2025-05-07T08:49:43.003832Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:43.004125Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:43.007810Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:49:43.061346Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:43.061368Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:43.061373Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:43.061487Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30248 TClient is connected to server localhost:30248 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:49:43.622063Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:43.642044Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:49:43.656650Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:43.830301Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:44.006764Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:44.089101Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:46.061956Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623323745768790:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:46.062070Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:46.464261Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:49:46.495574Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:49:46.531435Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:49:46.609237Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:49:46.655290Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:49:46.716442Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:49:46.768729Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:49:46.858665Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623323745769452:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:46.858776Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:46.859195Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623323745769457:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:46.863254Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:49:46.874007Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623323745769459:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:49:46.969773Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623323745769510:3430] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:49:47.598184Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623306565897945:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:47.618214Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:49:48.757280Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746607788746, txId: 281474976710672] shutting down ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::TestOutOfOrderNonConflictingWrites-EvWrite [GOOD] Test command err: 2025-05-07T08:49:39.252403Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:49:39.252564Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:49:39.252817Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0044fe/r3tmp/tmpGqAzEY/pdisk_1.dat 2025-05-07T08:49:39.631615Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:49:39.679733Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:39.735623Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:39.735776Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:39.747469Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:49:39.833214Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:49:39.888741Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828672, Sender [1:656:2563], Recipient [1:665:2569]: NKikimr::TEvTablet::TEvBoot 2025-05-07T08:49:39.890133Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828673, Sender [1:656:2563], Recipient [1:665:2569]: NKikimr::TEvTablet::TEvRestored 2025-05-07T08:49:39.890741Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:665:2569] 2025-05-07T08:49:39.891111Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T08:49:39.902026Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3111: StateInactive, received event# 268828684, Sender [1:656:2563], Recipient [1:665:2569]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-05-07T08:49:39.946678Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T08:49:39.946876Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T08:49:39.948864Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-05-07T08:49:39.948992Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-05-07T08:49:39.949088Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-05-07T08:49:39.949483Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T08:49:39.949640Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T08:49:39.949711Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:681:2569] in generation 1 2025-05-07T08:49:39.960756Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T08:49:40.007989Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-05-07T08:49:40.008355Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T08:49:40.008537Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:683:2579] 2025-05-07T08:49:40.008586Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T08:49:40.008651Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-05-07T08:49:40.008713Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:49:40.009003Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 2146435072, Sender [1:665:2569], Recipient [1:665:2569]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-05-07T08:49:40.009067Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3155: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-05-07T08:49:40.009540Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T08:49:40.009655Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T08:49:40.009747Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T08:49:40.009801Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:49:40.009857Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-05-07T08:49:40.009904Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-05-07T08:49:40.009955Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-05-07T08:49:40.010052Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T08:49:40.010122Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:49:40.010267Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877761, Sender [1:672:2573], Recipient [1:665:2569]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:49:40.010308Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:49:40.010379Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:672:2573], sessionId# [0:0:0] 2025-05-07T08:49:40.010894Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269549568, Sender [1:410:2405], Recipient [1:672:2573] 2025-05-07T08:49:40.010945Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3136: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-05-07T08:49:40.011079Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T08:49:40.011327Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-05-07T08:49:40.011414Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-05-07T08:49:40.011543Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-05-07T08:49:40.011624Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-05-07T08:49:40.011701Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-05-07T08:49:40.011745Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-05-07T08:49:40.011788Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-05-07T08:49:40.012137Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-05-07T08:49:40.012180Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-05-07T08:49:40.012239Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-05-07T08:49:40.012286Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-05-07T08:49:40.012348Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-05-07T08:49:40.012386Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-05-07T08:49:40.012431Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-05-07T08:49:40.012475Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-05-07T08:49:40.012512Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-05-07T08:49:40.016490Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269746185, Sender [1:684:2580], Recipient [1:665:2569]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-05-07T08:49:40.016576Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:49:40.028115Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T08:49:40.028202Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-05-07T08:49:40.028247Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-05-07T08:49:40.028301Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose latency: 0 ms, status: PREPARED 2025-05-07T08:49:40.028385Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-05-07T08:49:40.200127Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877761, Sender [1:699:2589], Recipient [1:665:2569]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:49:40.200194Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:49:40.200252Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075 ... 1746607789089 } MaxMemoryUsage: 1048576 } 2025-05-07T08:49:49.119670Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:688: TxId: 281474976715667. Ctx: { TraceId: 01jtmyzmkf1a1xp07g9hmv1m93, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YzUxNWE2NTktMjAzY2U2YWQtZWExODUzN2UtZTZjNmVkM2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:1068:2860] 2025-05-07T08:49:49.119742Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:644: ActorId: [2:1061:2842] TxId: 281474976715667. Ctx: { TraceId: 01jtmyzmkf1a1xp07g9hmv1m93, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YzUxNWE2NTktMjAzY2U2YWQtZWExODUzN2UtZTZjNmVkM2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:1072:2864], CA [2:1069:2861], CA [2:1073:2865], CA [2:1070:2862], CA [2:1071:2863], 2025-05-07T08:49:49.119781Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:157: ActorId: [2:1061:2842] TxId: 281474976715667. Ctx: { TraceId: 01jtmyzmkf1a1xp07g9hmv1m93, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YzUxNWE2NTktMjAzY2U2YWQtZWExODUzN2UtZTZjNmVkM2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 5 compute actor(s) and 0 datashard(s): CA [2:1072:2864], CA [2:1069:2861], CA [2:1073:2865], CA [2:1070:2862], CA [2:1071:2863], 2025-05-07T08:49:49.120529Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:433: ActorId: [2:1061:2842] TxId: 281474976715667. Ctx: { TraceId: 01jtmyzmkf1a1xp07g9hmv1m93, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YzUxNWE2NTktMjAzY2U2YWQtZWExODUzN2UtZTZjNmVkM2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:1069:2861], task: 2, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 558 DurationUs: 1000 Tasks { TaskId: 2 StageId: 1 CpuTimeUs: 322 FinishTimeMs: 1746607789089 InputRows: 1 InputBytes: 5 OutputRows: 1 OutputBytes: 5 ComputeCpuTimeUs: 286 BuildCpuTimeUs: 36 HostName: "ghrun-sykirh5vua" NodeId: 2 StartTimeMs: 1746607789088 CreateTimeMs: 1746607789082 UpdateTimeMs: 1746607789089 } MaxMemoryUsage: 1048576 } 2025-05-07T08:49:49.120590Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:688: TxId: 281474976715667. Ctx: { TraceId: 01jtmyzmkf1a1xp07g9hmv1m93, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YzUxNWE2NTktMjAzY2U2YWQtZWExODUzN2UtZTZjNmVkM2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:1069:2861] 2025-05-07T08:49:49.120627Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:644: ActorId: [2:1061:2842] TxId: 281474976715667. Ctx: { TraceId: 01jtmyzmkf1a1xp07g9hmv1m93, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YzUxNWE2NTktMjAzY2U2YWQtZWExODUzN2UtZTZjNmVkM2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:1072:2864], CA [2:1073:2865], CA [2:1070:2862], CA [2:1071:2863], 2025-05-07T08:49:49.120659Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:157: ActorId: [2:1061:2842] TxId: 281474976715667. Ctx: { TraceId: 01jtmyzmkf1a1xp07g9hmv1m93, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YzUxNWE2NTktMjAzY2U2YWQtZWExODUzN2UtZTZjNmVkM2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 4 compute actor(s) and 0 datashard(s): CA [2:1072:2864], CA [2:1073:2865], CA [2:1070:2862], CA [2:1071:2863], 2025-05-07T08:49:49.120727Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:433: ActorId: [2:1061:2842] TxId: 281474976715667. Ctx: { TraceId: 01jtmyzmkf1a1xp07g9hmv1m93, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YzUxNWE2NTktMjAzY2U2YWQtZWExODUzN2UtZTZjNmVkM2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:1070:2862], task: 4, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 422 DurationUs: 2000 Tasks { TaskId: 4 StageId: 3 CpuTimeUs: 214 FinishTimeMs: 1746607789090 InputRows: 1 InputBytes: 5 OutputRows: 1 OutputBytes: 5 ComputeCpuTimeUs: 194 BuildCpuTimeUs: 20 HostName: "ghrun-sykirh5vua" NodeId: 2 StartTimeMs: 1746607789088 CreateTimeMs: 1746607789082 UpdateTimeMs: 1746607789090 } MaxMemoryUsage: 1048576 } 2025-05-07T08:49:49.120762Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:688: TxId: 281474976715667. Ctx: { TraceId: 01jtmyzmkf1a1xp07g9hmv1m93, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YzUxNWE2NTktMjAzY2U2YWQtZWExODUzN2UtZTZjNmVkM2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:1070:2862] 2025-05-07T08:49:49.120788Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:644: ActorId: [2:1061:2842] TxId: 281474976715667. Ctx: { TraceId: 01jtmyzmkf1a1xp07g9hmv1m93, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YzUxNWE2NTktMjAzY2U2YWQtZWExODUzN2UtZTZjNmVkM2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:1072:2864], CA [2:1073:2865], CA [2:1071:2863], 2025-05-07T08:49:49.120814Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:157: ActorId: [2:1061:2842] TxId: 281474976715667. Ctx: { TraceId: 01jtmyzmkf1a1xp07g9hmv1m93, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YzUxNWE2NTktMjAzY2U2YWQtZWExODUzN2UtZTZjNmVkM2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 3 compute actor(s) and 0 datashard(s): CA [2:1072:2864], CA [2:1073:2865], CA [2:1071:2863], 2025-05-07T08:49:49.120877Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:433: ActorId: [2:1061:2842] TxId: 281474976715667. Ctx: { TraceId: 01jtmyzmkf1a1xp07g9hmv1m93, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YzUxNWE2NTktMjAzY2U2YWQtZWExODUzN2UtZTZjNmVkM2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:1071:2863], task: 5, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 554 DurationUs: 30000 Tasks { TaskId: 5 StageId: 4 CpuTimeUs: 322 FinishTimeMs: 1746607789119 InputRows: 2 InputBytes: 10 OutputRows: 2 OutputBytes: 7 ComputeCpuTimeUs: 281 BuildCpuTimeUs: 41 HostName: "ghrun-sykirh5vua" NodeId: 2 StartTimeMs: 1746607789089 CreateTimeMs: 1746607789082 UpdateTimeMs: 1746607789119 } MaxMemoryUsage: 1048576 } 2025-05-07T08:49:49.120910Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:688: TxId: 281474976715667. Ctx: { TraceId: 01jtmyzmkf1a1xp07g9hmv1m93, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YzUxNWE2NTktMjAzY2U2YWQtZWExODUzN2UtZTZjNmVkM2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:1071:2863] 2025-05-07T08:49:49.120934Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:644: ActorId: [2:1061:2842] TxId: 281474976715667. Ctx: { TraceId: 01jtmyzmkf1a1xp07g9hmv1m93, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YzUxNWE2NTktMjAzY2U2YWQtZWExODUzN2UtZTZjNmVkM2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:1072:2864], CA [2:1073:2865], 2025-05-07T08:49:49.120958Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:157: ActorId: [2:1061:2842] TxId: 281474976715667. Ctx: { TraceId: 01jtmyzmkf1a1xp07g9hmv1m93, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YzUxNWE2NTktMjAzY2U2YWQtZWExODUzN2UtZTZjNmVkM2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 2 compute actor(s) and 0 datashard(s): CA [2:1072:2864], CA [2:1073:2865], 2025-05-07T08:49:49.121103Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:433: ActorId: [2:1061:2842] TxId: 281474976715667. Ctx: { TraceId: 01jtmyzmkf1a1xp07g9hmv1m93, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YzUxNWE2NTktMjAzY2U2YWQtZWExODUzN2UtZTZjNmVkM2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:1072:2864], task: 6, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 330 Tasks { TaskId: 6 StageId: 5 CpuTimeUs: 142 FinishTimeMs: 1746607789120 InputRows: 2 InputBytes: 7 OutputRows: 2 OutputBytes: 7 ComputeCpuTimeUs: 99 BuildCpuTimeUs: 43 HostName: "ghrun-sykirh5vua" NodeId: 2 CreateTimeMs: 1746607789082 UpdateTimeMs: 1746607789120 } MaxMemoryUsage: 1048576 } 2025-05-07T08:49:49.121135Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:688: TxId: 281474976715667. Ctx: { TraceId: 01jtmyzmkf1a1xp07g9hmv1m93, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YzUxNWE2NTktMjAzY2U2YWQtZWExODUzN2UtZTZjNmVkM2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:1072:2864] 2025-05-07T08:49:49.121157Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:644: ActorId: [2:1061:2842] TxId: 281474976715667. Ctx: { TraceId: 01jtmyzmkf1a1xp07g9hmv1m93, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YzUxNWE2NTktMjAzY2U2YWQtZWExODUzN2UtZTZjNmVkM2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:1073:2865], 2025-05-07T08:49:49.121177Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:157: ActorId: [2:1061:2842] TxId: 281474976715667. Ctx: { TraceId: 01jtmyzmkf1a1xp07g9hmv1m93, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YzUxNWE2NTktMjAzY2U2YWQtZWExODUzN2UtZTZjNmVkM2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 1 compute actor(s) and 0 datashard(s): CA [2:1073:2865], 2025-05-07T08:49:49.121415Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:433: ActorId: [2:1061:2842] TxId: 281474976715667. Ctx: { TraceId: 01jtmyzmkf1a1xp07g9hmv1m93, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YzUxNWE2NTktMjAzY2U2YWQtZWExODUzN2UtZTZjNmVkM2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:1073:2865], task: 7, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 399 DurationUs: 1000 Tasks { TaskId: 7 StageId: 6 CpuTimeUs: 192 FinishTimeMs: 1746607789121 InputRows: 2 InputBytes: 7 OutputRows: 2 OutputBytes: 7 ResultRows: 2 ResultBytes: 7 ComputeCpuTimeUs: 155 BuildCpuTimeUs: 37 HostName: "ghrun-sykirh5vua" NodeId: 2 StartTimeMs: 1746607789120 CreateTimeMs: 1746607789082 UpdateTimeMs: 1746607789121 } MaxMemoryUsage: 1048576 } 2025-05-07T08:49:49.121481Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:688: TxId: 281474976715667. Ctx: { TraceId: 01jtmyzmkf1a1xp07g9hmv1m93, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YzUxNWE2NTktMjAzY2U2YWQtZWExODUzN2UtZTZjNmVkM2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:1073:2865] 2025-05-07T08:49:49.121712Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2140: ActorId: [2:1061:2842] TxId: 281474976715667. Ctx: { TraceId: 01jtmyzmkf1a1xp07g9hmv1m93, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YzUxNWE2NTktMjAzY2U2YWQtZWExODUzN2UtZTZjNmVkM2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-05-07T08:49:49.121792Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:838: ActorId: [2:1061:2842] TxId: 281474976715667. Ctx: { TraceId: 01jtmyzmkf1a1xp07g9hmv1m93, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YzUxNWE2NTktMjAzY2U2YWQtZWExODUzN2UtZTZjNmVkM2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.003776s ReadRows: 2 ReadBytes: 16 ru: 2 rate limiter was not found force flag: 1 { items { uint32_value: 3 } items { uint32_value: 2 } }, { items { uint32_value: 4 } items { uint32_value: 2 } } >> KqpSystemView::FailResolve >> KqpSysColV1::SelectRowAsterisk [GOOD] |88.8%| [TA] $(B)/ydb/core/blobstorage/ut_blobstorage/ut_donor/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpSystemView::ReadSuccess [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::PartitionStatsRange3 [GOOD] Test command err: Trying to start YDB, gRPC: 11978, MsgBus: 5194 2025-05-07T08:49:43.909012Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623309208095734:2128];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:43.910333Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0031bc/r3tmp/tmp33c1W8/pdisk_1.dat 2025-05-07T08:49:44.265353Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11978, node 1 2025-05-07T08:49:44.323145Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:44.324499Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:44.334478Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:49:44.380227Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:44.380253Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:44.380260Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:44.380406Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5194 TClient is connected to server localhost:5194 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:49:45.010449Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:45.052752Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:49:45.187133Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 2025-05-07T08:49:45.338637Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:49:45.408611Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 2025-05-07T08:49:47.367908Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623326387966495:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:47.368026Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:47.764515Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:49:47.802564Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:49:47.879477Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:49:47.975142Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:49:48.006141Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:49:48.035774Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:49:48.076056Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:49:48.160882Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623330682934454:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:48.160962Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:48.161130Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623330682934459:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:48.165552Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:49:48.176531Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623330682934461:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:49:48.259536Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623330682934512:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:49:48.944817Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623309208095734:2128];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:48.944927Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:49:49.644863Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746607789625, txId: 281474976710672] shutting down >> KqpSystemView::PartitionStatsRange1 [GOOD] >> KqpSysColV1::SelectRange [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV0::SelectRowById [GOOD] Test command err: Trying to start YDB, gRPC: 21775, MsgBus: 28357 2025-05-07T08:49:44.271519Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623312411147365:2263];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:44.271928Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0031a0/r3tmp/tmpJHAHqX/pdisk_1.dat 2025-05-07T08:49:44.710437Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:44.728745Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:44.728885Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:44.732023Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21775, node 1 2025-05-07T08:49:44.829620Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:44.829648Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:44.829658Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:44.829790Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28357 TClient is connected to server localhost:28357 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:49:45.399391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:45.427536Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:45.603142Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:45.789631Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:45.866970Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:47.851046Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623325296050709:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:47.851203Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:48.312759Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:49:48.348573Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:49:48.377722Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:49:48.450540Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:49:48.480699Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:49:48.556384Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:49:48.635136Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:49:48.700257Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623329591018671:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:48.700319Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:48.700458Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623329591018676:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:48.704315Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:49:48.714495Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623329591018678:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:49:48.801643Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623329591018729:3427] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:49:49.270914Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623312411147365:2263];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:49.271014Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV1::SelectRowAsterisk [GOOD] Test command err: Trying to start YDB, gRPC: 23721, MsgBus: 13946 2025-05-07T08:49:44.773822Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623313793542050:2199];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:44.814318Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00317d/r3tmp/tmpBdHQv4/pdisk_1.dat 2025-05-07T08:49:45.175964Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23721, node 1 2025-05-07T08:49:45.227511Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:45.227665Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:45.230653Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:49:45.297902Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:45.297927Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:45.297936Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:45.298092Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13946 TClient is connected to server localhost:13946 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:49:45.897737Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:45.916120Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:49:45.928733Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:46.118367Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:46.269917Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:46.366284Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:48.514289Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623330973412750:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:48.514409Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:48.815203Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:49:48.849541Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:49:48.881151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:49:48.909531Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:49:48.945531Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:49:49.010154Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:49:49.058286Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:49:49.147288Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623335268380706:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:49.147389Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:49.147560Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623335268380711:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:49.151684Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:49:49.164378Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623335268380713:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:49:49.223763Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623335268380764:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:49:49.768019Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623313793542050:2199];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:49.768162Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpSystemView::PartitionStatsRange2 [GOOD] |88.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::ReadSuccess [GOOD] Test command err: Trying to start YDB, gRPC: 19750, MsgBus: 22725 2025-05-07T08:49:45.427457Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623317398427686:2068];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:45.438073Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00316b/r3tmp/tmpHe4lNZ/pdisk_1.dat 2025-05-07T08:49:45.865196Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:45.865450Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:45.867208Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:49:45.886356Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19750, node 1 2025-05-07T08:49:45.978792Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:45.978833Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:45.978845Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:45.978982Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22725 TClient is connected to server localhost:22725 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:49:46.585544Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:46.606429Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:46.800501Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:46.970438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:47.053566Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:49.045964Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623334578298503:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:49.046123Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:49.428157Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:49:49.465486Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:49:49.500554Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:49:49.552918Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:49:49.587984Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:49:49.633935Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:49:49.672388Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:49:49.745698Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623334578299162:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:49.745783Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:49.746156Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623334578299167:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:49.750929Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:49:49.763890Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623334578299169:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:49:49.830051Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623334578299220:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:49:50.427683Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623317398427686:2068];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:50.427755Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:49:50.796581Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-05-07T08:49:50.947329Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710674. Ctx: { TraceId: 01jtmyzpq76ye4ejt1amp533vq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTNiOTRmMTQtMzBmMDExNTktNDMyM2IyYmMtMTdlYTdkNDE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:49:50.954843Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746607790945, txId: 281474976710673] shutting down >> KqpSysColV1::StreamInnerJoinSelect >> KqpSystemView::QueryStatsScan [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::PartitionStatsRange1 [GOOD] Test command err: Trying to start YDB, gRPC: 21495, MsgBus: 6009 2025-05-07T08:49:45.410284Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623318862251910:2063];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:45.410371Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003169/r3tmp/tmpERwNjN/pdisk_1.dat 2025-05-07T08:49:45.766929Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:45.820453Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:45.820556Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 21495, node 1 2025-05-07T08:49:45.827354Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:49:45.897404Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:45.897442Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:45.897452Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:45.898763Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6009 TClient is connected to server localhost:6009 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:49:46.463272Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:46.497923Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:46.647186Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:46.815102Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:46.905496Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:48.992779Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623331747155472:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:48.992895Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:49.383391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:49:49.422215Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:49:49.491077Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:49:49.546418Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:49:49.582340Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:49:49.658245Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:49:49.701941Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:49:49.782365Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623336042123431:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:49.782528Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:49.783797Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623336042123436:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:49.787876Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:49:49.798425Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623336042123438:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:49:49.876027Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623336042123489:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:49:50.411866Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623318862251910:2063];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:50.411961Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:49:51.172371Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746607791164, txId: 281474976710672] shutting down ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV1::SelectRange [GOOD] Test command err: Trying to start YDB, gRPC: 31948, MsgBus: 19032 2025-05-07T08:49:45.422553Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623316293347988:2065];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:45.422596Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003175/r3tmp/tmpkMhLCQ/pdisk_1.dat 2025-05-07T08:49:45.856372Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:45.856484Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:45.858777Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:45.859094Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 31948, node 1 2025-05-07T08:49:45.977537Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:45.977563Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:45.977574Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:45.977743Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19032 TClient is connected to server localhost:19032 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:49:46.550977Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:46.571019Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:46.578790Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T08:49:46.782045Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:46.975875Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:47.057028Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:49.128072Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623333473218819:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:49.128198Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:49.484379Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:49:49.562701Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:49:49.637312Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:49:49.670733Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:49:49.715203Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:49:49.756022Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:49:49.838163Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:49:49.902490Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623333473219486:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:49.902562Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:49.902727Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623333473219491:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:49.907106Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:49:49.919997Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623333473219493:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:49:50.009405Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623337768186840:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:49:50.423050Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623316293347988:2065];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:50.423136Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpSysColV1::InnerJoinTables [GOOD] >> Viewer::ExecuteQueryDoesntExecuteSchemeOperationsInsideTransation [GOOD] >> Viewer::FloatPointJsonQuery >> KqpSysColV1::StreamSelectRowAsterisk >> KqpSysColV1::SelectRowById [GOOD] >> KqpSystemView::PartitionStatsParametricRanges [GOOD] |88.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> test_sql_streaming.py::test[suites-ReadTopicWithMetadata-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-ReadTopicWithMetadataInsideFilter-default.txt] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::PartitionStatsRange2 [GOOD] Test command err: Trying to start YDB, gRPC: 14105, MsgBus: 61813 2025-05-07T08:49:46.377920Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623321104358148:2205];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:46.378186Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003153/r3tmp/tmpATWMWo/pdisk_1.dat 2025-05-07T08:49:46.849265Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:46.849396Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:46.852610Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:49:46.913076Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14105, node 1 2025-05-07T08:49:46.992770Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:46.992800Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:46.992816Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:46.993009Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61813 TClient is connected to server localhost:61813 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:49:47.555976Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:47.584551Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:49:47.719435Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 2025-05-07T08:49:47.891498Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:48.083210Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:49.945592Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623333989261527:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:49.945762Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:50.308585Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:49:50.345171Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:49:50.398353Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:49:50.431100Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:49:50.467656Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:49:50.540657Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:49:50.610752Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:49:50.662360Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623338284229490:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:50.662470Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:50.663101Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623338284229495:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:50.666907Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:49:50.681540Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623338284229497:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:49:50.765774Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623338284229548:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:49:51.370210Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623321104358148:2205];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:51.370296Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:49:52.092854Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746607792083, txId: 281474976710672] shutting down ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::QueryStatsScan [GOOD] Test command err: Trying to start YDB, gRPC: 1362, MsgBus: 6159 2025-05-07T08:49:46.060862Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623321190492714:2256];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:46.062193Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003162/r3tmp/tmpqEi4XL/pdisk_1.dat 2025-05-07T08:49:46.461628Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:46.467376Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:46.467476Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:46.471380Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1362, node 1 2025-05-07T08:49:46.580914Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:46.580967Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:46.581001Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:46.581113Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6159 TClient is connected to server localhost:6159 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:49:47.123688Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:47.153809Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:47.332551Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:49:47.521915Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 2025-05-07T08:49:47.609726Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:49.637849Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623334075396055:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:49.637987Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:49.988095Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:49:50.025728Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:49:50.058517Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:49:50.102579Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:49:50.151586Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:49:50.224856Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:49:50.260177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:49:50.325289Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623338370364014:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:50.325381Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:50.325666Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623338370364019:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:50.329776Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:49:50.344944Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623338370364021:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:49:50.411026Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623338370364072:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:49:51.059989Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623321190492714:2256];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:51.060108Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:49:52.204121Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746607791761, txId: 281474976710672] shutting down 2025-05-07T08:49:52.308968Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746607792302, txId: 281474976710675] shutting down >> Viewer::SelectStringWithBase64Encoding [GOOD] >> Viewer::QueryExecuteScript >> KqpSysColV0::SelectRowAsterisk [GOOD] |88.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV1::InnerJoinTables [GOOD] Test command err: Trying to start YDB, gRPC: 26247, MsgBus: 23887 2025-05-07T08:49:46.298902Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623322315064862:2069];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:46.319504Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003154/r3tmp/tmpeRJpiO/pdisk_1.dat 2025-05-07T08:49:46.777780Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:46.794769Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:46.794894Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:46.803674Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26247, node 1 2025-05-07T08:49:46.931702Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:46.931728Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:46.931735Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:46.931914Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23887 TClient is connected to server localhost:23887 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:49:47.568562Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:47.595995Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:47.741922Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:48.034763Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:48.127250Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:49.884826Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623335199968381:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:49.884983Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:50.235539Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:49:50.274253Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:49:50.308117Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:49:50.355010Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:49:50.396092Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:49:50.490991Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:49:50.562973Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:49:50.620235Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623339494936346:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:50.620329Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:50.620343Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623339494936351:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:50.624116Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:49:50.633413Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623339494936353:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:49:50.701008Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623339494936404:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:49:51.299012Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623322315064862:2069];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:51.299130Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpSystemView::QueryStatsSimple >> KqpSysColV1::StreamSelectRowById >> Viewer::SelectStringWithNoBase64Encoding [GOOD] >> Viewer::ServerlessNodesPage ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV1::SelectRowById [GOOD] Test command err: Trying to start YDB, gRPC: 14151, MsgBus: 4653 2025-05-07T08:49:47.391583Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623325675411225:2065];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:47.392333Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003146/r3tmp/tmp0K7Pqg/pdisk_1.dat 2025-05-07T08:49:47.861578Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:47.861684Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:47.866560Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:47.866801Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14151, node 1 2025-05-07T08:49:48.099105Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:48.099126Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:48.099148Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:48.099271Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4653 TClient is connected to server localhost:4653 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:49:48.741410Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:48.761491Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:49:48.769987Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:48.907769Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:49.077587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:49:49.157655Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 2025-05-07T08:49:50.998395Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623338560314755:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:50.998515Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:51.302650Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:49:51.333884Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:49:51.369156Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:49:51.401285Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:49:51.430734Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:49:51.468253Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:49:51.507557Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:49:51.600772Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623342855282714:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:51.600885Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:51.600962Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623342855282719:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:51.604725Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:49:51.613893Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623342855282721:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:49:51.700801Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623342855282772:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:49:52.391333Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623325675411225:2065];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:52.391412Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::PartitionStatsParametricRanges [GOOD] Test command err: Trying to start YDB, gRPC: 20596, MsgBus: 27983 2025-05-07T08:49:46.884393Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623322081315980:2062];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:46.884462Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00314b/r3tmp/tmpmR7LTd/pdisk_1.dat 2025-05-07T08:49:47.355162Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:47.382824Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:47.382938Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 20596, node 1 2025-05-07T08:49:47.390331Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:49:47.452992Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:47.453023Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:47.453063Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:47.453211Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27983 TClient is connected to server localhost:27983 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:49:48.213027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:49:48.238226Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T08:49:48.420547Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:48.603058Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:49:48.691571Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 2025-05-07T08:49:50.539636Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623339261186810:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:50.539817Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:50.894821Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:49:50.925131Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:49:50.956724Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:49:50.988534Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:49:51.021589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:49:51.054116Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:49:51.123846Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:49:51.209627Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623343556154768:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:51.209731Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623343556154773:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:51.209746Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:51.213289Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:49:51.223258Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623343556154775:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:49:51.320884Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623343556154826:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:49:51.884484Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623322081315980:2062];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:51.884560Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:49:52.858444Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746607792846, txId: 281474976710672] shutting down >> KqpSysColV1::StreamSelectRange >> KqpSysColV1::StreamInnerJoinSelectAsterisk >> KqpSysColV0::InnerJoinTables [GOOD] >> KqpSysColV1::InnerJoinSelect [GOOD] >> test_sql_streaming.py::test[suites-GroupByHoppingWindowListKey-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-GroupByHoppingWindowNoKey-default.txt] >> KqpSysColV1::InnerJoinSelectAsterisk [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV0::SelectRowAsterisk [GOOD] Test command err: Trying to start YDB, gRPC: 15896, MsgBus: 23740 2025-05-07T08:49:48.234755Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623328868515920:2080];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:48.239746Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00313d/r3tmp/tmpU58p6F/pdisk_1.dat 2025-05-07T08:49:48.710127Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:48.732335Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:48.732443Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:48.733940Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15896, node 1 2025-05-07T08:49:48.804384Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:48.804431Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:48.804442Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:48.804582Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23740 TClient is connected to server localhost:23740 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:49:49.412034Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:49.432472Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:49:49.438144Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:49.647256Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:49.838578Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:49.909297Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:51.897669Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623341753419420:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:51.897754Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:52.326077Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:49:52.358025Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:49:52.412655Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:49:52.441811Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:49:52.472211Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:49:52.502313Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:49:52.531087Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:49:52.586770Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623346048387375:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:52.586869Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:52.586884Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623346048387380:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:52.590510Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:49:52.599732Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623346048387382:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:49:52.684836Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623346048387433:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:49:53.238091Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623328868515920:2080];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:53.238202Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpSysColV1::UpdateAndDelete >> KqpSysColV0::InnerJoinSelectAsterisk [GOOD] >> KqpSysColV1::StreamInnerJoinTables |88.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::NodesRange2 |88.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV0::InnerJoinTables [GOOD] Test command err: Trying to start YDB, gRPC: 25092, MsgBus: 7362 2025-05-07T08:49:48.609103Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623330524129768:2096];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:48.609159Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003120/r3tmp/tmpqfGOKv/pdisk_1.dat 2025-05-07T08:49:49.009692Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25092, node 1 2025-05-07T08:49:49.094481Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:49.094665Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:49.105201Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:49:49.152840Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:49.152881Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:49.152889Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:49.153001Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7362 TClient is connected to server localhost:7362 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:49:49.739833Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:49.752814Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:49:49.768402Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:49.933566Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:50.111623Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:50.204921Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:52.192889Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623347704000566:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:52.192982Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:52.572714Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:49:52.600316Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:49:52.631030Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:49:52.662524Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:49:52.692235Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:49:52.722088Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:49:52.793391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:49:52.878651Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623347704001234:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:52.878761Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:52.878864Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623347704001239:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:52.882930Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:49:52.892351Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623347704001241:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:49:52.979972Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623347704001292:3416] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:49:53.609476Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623330524129768:2096];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:53.609543Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV1::InnerJoinSelect [GOOD] Test command err: Trying to start YDB, gRPC: 18049, MsgBus: 23672 2025-05-07T08:49:48.900001Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623332379019295:2062];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:48.900375Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0030b7/r3tmp/tmpcZRG1Z/pdisk_1.dat 2025-05-07T08:49:49.353898Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:49.398324Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:49.398444Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 18049, node 1 2025-05-07T08:49:49.413817Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:49:49.505535Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:49.505569Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:49.505581Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:49.505713Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23672 TClient is connected to server localhost:23672 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:49:50.089476Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:50.121527Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:50.280106Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:49:50.459393Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 2025-05-07T08:49:50.542689Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:52.313834Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623349558890147:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:52.313933Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:52.658973Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:49:52.687707Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:49:52.713426Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:49:52.742032Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:49:52.773350Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:49:52.804706Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:49:52.837039Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:49:52.913067Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623349558890806:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:52.913138Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:52.913241Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623349558890811:2472], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:52.917273Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:49:52.926854Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623349558890813:2473], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:49:52.987420Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623349558890866:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:49:53.899756Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623332379019295:2062];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:53.899833Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV1::InnerJoinSelectAsterisk [GOOD] Test command err: Trying to start YDB, gRPC: 29405, MsgBus: 2294 2025-05-07T08:49:48.573776Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623329336636562:2064];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:48.573831Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00312c/r3tmp/tmpXJXLhB/pdisk_1.dat 2025-05-07T08:49:48.984862Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29405, node 1 2025-05-07T08:49:49.054507Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:49.054743Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:49.059226Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:49:49.114640Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:49.114668Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:49.114674Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:49.114887Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2294 TClient is connected to server localhost:2294 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:49:49.751259Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:49.765339Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T08:49:49.776677Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:49.925190Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:50.106443Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:50.197411Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:52.013206Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623346516507419:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:52.013368Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:52.403881Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-05-07T08:49:52.433008Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-05-07T08:49:52.460763Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-05-07T08:49:52.491665Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-05-07T08:49:52.520562Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-05-07T08:49:52.589964Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-05-07T08:49:52.663583Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-05-07T08:49:52.716683Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623346516508084:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:52.716755Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:52.716855Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623346516508089:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:52.720004Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-05-07T08:49:52.734909Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623346516508091:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-05-07T08:49:52.796291Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623346516508142:3424] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:49:53.573952Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623329336636562:2064];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:53.574034Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpSystemView::PartitionStatsSimple >> KqpSysColV0::SelectRange ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV0::InnerJoinSelectAsterisk [GOOD] Test command err: Trying to start YDB, gRPC: 19077, MsgBus: 29533 2025-05-07T08:49:49.036095Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623336398936182:2063];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:49.036378Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0030b6/r3tmp/tmpDqCmih/pdisk_1.dat 2025-05-07T08:49:49.441944Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:49.468940Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:49.469056Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 19077, node 1 2025-05-07T08:49:49.471193Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:49:49.547670Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:49.547690Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:49.547702Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:49.547795Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29533 TClient is connected to server localhost:29533 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:49:50.236460Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:50.255182Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:49:50.267276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:50.439283Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:50.592557Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:49:50.670695Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 2025-05-07T08:49:52.453429Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623349283839720:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:52.453561Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:52.757873Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:49:52.782385Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:49:52.808422Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:49:52.832715Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:49:52.858276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:49:52.885241Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:49:52.952562Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:49:53.012850Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623353578807675:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:53.012929Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:53.012984Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623353578807680:2472], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:53.016790Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:49:53.025911Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623353578807682:2473], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:49:53.083432Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623353578807734:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:49:54.036415Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623336398936182:2063];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:54.037239Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> test_sql_streaming.py::test[suites-ReadWriteTopic-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-ReadWriteTopicWithSchema-default.txt] >> Viewer::JsonAutocompleteScheme [GOOD] >> Viewer::JsonAutocompleteEmptyColumns >> TYardTest::TestEnormousDisk [GOOD] >> KqpSystemView::FailResolve [GOOD] >> KqpSystemView::NodesSimple |88.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::NodesRange1 >> KqpSysColV0::InnerJoinSelect >> KqpSysColV0::UpdateAndDelete ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::FailResolve [GOOD] Test command err: Trying to start YDB, gRPC: 61714, MsgBus: 1773 2025-05-07T08:49:51.235833Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623342523863339:2061];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:51.235903Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003077/r3tmp/tmpRKGiXR/pdisk_1.dat 2025-05-07T08:49:51.607714Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 61714, node 1 2025-05-07T08:49:51.640622Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:51.640765Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:51.642601Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:49:51.687090Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:51.687117Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:51.687135Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:51.687309Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1773 TClient is connected to server localhost:1773 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:49:52.188865Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:49:52.215106Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T08:49:52.345339Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:52.489472Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:52.566341Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:54.348948Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623355408766899:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:54.349112Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:54.675937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:49:54.710016Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:49:54.745754Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:49:54.780975Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:49:54.815604Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:49:54.846497Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:49:54.882416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:49:54.962334Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623355408767558:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:54.962397Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:54.962533Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623355408767563:2472], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:54.966617Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:49:54.976731Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623355408767565:2473], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:49:55.069087Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623359703734912:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:49:56.018492Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-05-07T08:49:56.210622Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:303: Access denied: self# [1:7501623363998702525:3628], for# user0@builtin, access# SelectRow 2025-05-07T08:49:56.211402Z node 1 :KQP_EXECUTER ERROR: kqp_table_resolver.cpp:275: TxId: 281474976710674. Error resolving keys for entry: { TableId: [OwnerId: 72057594046644480, LocalPathId: 1] Access: 1 SyncVersion: false Status: AccessDenied Kind: KindUnknown PartitionsCount: 0 DomainInfo From: (Uint64 : NULL, Uint64 : NULL, Uint64 : NULL, Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 } 2025-05-07T08:49:56.221287Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=1&id=ZjhiYjg0MDUtYzc4ODI0LTRmM2Q3ODA4LTZjY2FkYTVm, ActorId: [1:7501623363998702505:2516], ActorState: ExecuteState, TraceId: 01jtmyzvtr2dq22bvs6xe7drrj, Create QueryResponse for error on request, msg: 2025-05-07T08:49:56.221682Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746607796208, txId: 281474976710673] shutting down 2025-05-07T08:49:56.222191Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710675. Ctx: { TraceId: 01jtmyzvtr2dq22bvs6xe7drrj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjhiYjg0MDUtYzc4ODI0LTRmM2Q3ODA4LTZjY2FkYTVm, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:49:56.238083Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623342523863339:2061];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:56.238170Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpSystemView::FailNavigate |88.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/pdisk/ut/unittest >> TYardTest::TestEnormousDisk [GOOD] >> Viewer::JsonAutocompleteSimilarDatabaseNamePOST [GOOD] >> Viewer::JsonAutocompleteSimilarDatabaseNameLowerCase >> DSProxyStrategyTest::Restore_mirror3dc [GOOD] >> test_sql_streaming.py::test[suites-GroupByHopNoKey-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-GroupByHopPercentile-default.txt] >> TTopicWriterTests::TestEnterMessage_1KiB_No_Delimiter [GOOD] >> TTopicWriterTests::TestEnterMessage_Custom_Delimiter_Delimited [GOOD] >> KqpSystemView::Join [GOOD] |88.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/ut_rw/ydb-core-tx-columnshard-ut_rw |88.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/columnshard/ut_rw/ydb-core-tx-columnshard-ut_rw |88.9%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_trace/test-results/unittest/{meta.json ... results_accumulator.log} |88.9%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_donor/test-results/unittest/{meta.json ... results_accumulator.log} |88.9%| [LD] {RESULT} $(B)/ydb/core/tx/columnshard/ut_rw/ydb-core-tx-columnshard-ut_rw >> KqpSysColV1::StreamSelectRowAsterisk [GOOD] >> KqpSysColV1::StreamInnerJoinSelect [GOOD] |88.9%| [TM] {asan, default-linux-x86_64, release} ydb/public/lib/ydb_cli/topic/ut/unittest >> TTopicWriterTests::TestEnterMessage_Custom_Delimiter_Delimited [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_strategy/unittest >> DSProxyStrategyTest::Restore_mirror3dc [GOOD] Test command err: diskMask# 287 nonWorkingDomain# 0 31656 diskMask# 287 nonWorkingDomain# 1 42024 diskMask# 288 nonWorkingDomain# 0 613440 diskMask# 289 nonWorkingDomain# 0 192960 diskMask# 289 nonWorkingDomain# 1 336960 diskMask# 290 nonWorkingDomain# 0 192960 >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestCreateCleanManyTables [GOOD] >> TSchemeshardBackgroundCleaningTest::CreateTableInTemp >> TTopicWriterTests::TestTopicWriterParams_No_Delimiter [GOOD] >> TTopicWriterTests::TestTopicWriterParams_InvalidDelimiter [GOOD] >> KqpSystemView::Sessions [GOOD] >> Viewer::FloatPointJsonQuery [GOOD] >> Viewer::AuthorizeYdbTokenWithDatabaseAttributes >> TTopicWriterTests::TestTopicWriterParams_Format_NewlineDelimited [GOOD] >> TTopicWriterTests::TestTopicWriterParams_Format_Concatenated [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::Join [GOOD] Test command err: Trying to start YDB, gRPC: 17827, MsgBus: 13023 2025-05-07T08:49:43.969465Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623308586291771:2063];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:43.969549Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0031b5/r3tmp/tmpkJr34v/pdisk_1.dat 2025-05-07T08:49:44.366258Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:44.409520Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:44.409665Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:44.411095Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17827, node 1 2025-05-07T08:49:44.480687Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:44.480715Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:44.480721Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:44.480837Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13023 TClient is connected to server localhost:13023 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:49:45.126386Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:45.157664Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:45.357099Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:45.516745Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:45.596026Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:47.499379Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623325766162604:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:47.499564Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:47.859138Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:49:47.936967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:49:47.979254Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:49:48.050932Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:49:48.096271Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:49:48.180244Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:49:48.222236Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:49:48.293782Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623330061130560:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:48.293888Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:48.294195Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623330061130565:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:48.298307Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:49:48.309215Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623330061130567:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:49:48.406436Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623330061130618:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:49:48.974097Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623308586291771:2063];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:48.974188Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:49:49.712239Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746607789698, txId: 281474976710672] shutting down waiting... 2025-05-07T08:49:50.901091Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746607790882, txId: 281474976710674] shutting down waiting... 2025-05-07T08:49:52.102267Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746607792096, txId: 281474976710676] shutting down waiting... 2025-05-07T08:49:53.280362Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746607793274, txId: 281474976710678] shutting down waiting... 2025-05-07T08:49:54.479381Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746607794474, txId: 281474976710680] shutting down waiting... 2025-05-07T08:49:55.652916Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746607795646, txId: 281474976710682] shutting down waiting... 2025-05-07T08:49:56.856319Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746607796840, txId: 281474976710684] shutting down waiting... 2025-05-07T08:49:58.028340Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746607798021, txId: 281474976710686] shutting down 2025-05-07T08:49:58.509840Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746607798478, txId: 281474976710688] shutting down |88.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/ut_blobstorage-ut_read_only_vdisk >> KqpSysColV1::StreamSelectRowById [GOOD] |88.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/ut_blobstorage-ut_read_only_vdisk |88.9%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/ut_blobstorage-ut_read_only_vdisk ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV1::StreamSelectRowAsterisk [GOOD] Test command err: Trying to start YDB, gRPC: 28248, MsgBus: 64939 2025-05-07T08:49:53.621526Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623352359152014:2139];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:53.625837Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00305b/r3tmp/tmpzBSwZs/pdisk_1.dat 2025-05-07T08:49:53.922840Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28248, node 1 2025-05-07T08:49:53.989305Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:53.989478Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:53.991540Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:49:54.000406Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:54.000429Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:54.000462Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:54.000674Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64939 TClient is connected to server localhost:64939 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:49:54.483501Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:54.515148Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:54.648758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:54.786377Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:54.867090Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:56.598782Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623365244055473:2405], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:56.598944Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:56.884114Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:49:56.919148Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:49:56.991026Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:49:57.035969Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:49:57.074292Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:49:57.117882Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:49:57.160621Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:49:57.248200Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623369539023426:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:57.248263Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:57.248538Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623369539023431:2472], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:57.252273Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:49:57.267480Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623369539023433:2473], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:49:57.345348Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623369539023484:3415] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:49:58.614049Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623352359152014:2139];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:58.614116Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:49:58.628983Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746607798663, txId: 281474976710672] shutting down >> TTopicWriterTests::TestEnterMessage_1KiB_Newline_Delimiter [GOOD] >> TTopicWriterTests::TestEnterMessage_1KiB_Newline_Delimited_With_Two_Delimiters_In_A_Row [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV1::StreamInnerJoinSelect [GOOD] Test command err: Trying to start YDB, gRPC: 22469, MsgBus: 6553 2025-05-07T08:49:53.032455Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623353203223386:2061];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:53.032633Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00305d/r3tmp/tmplrVTQT/pdisk_1.dat 2025-05-07T08:49:53.349161Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22469, node 1 2025-05-07T08:49:53.400343Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:53.400494Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:53.402359Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:49:53.444773Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:53.444824Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:53.444837Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:53.445032Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6553 TClient is connected to server localhost:6553 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:49:53.966309Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:49:53.990611Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T08:49:54.120888Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:54.272751Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:54.350827Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:56.259937Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623366088126943:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:56.260037Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:56.605419Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:49:56.637582Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:49:56.673868Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:49:56.718502Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:49:56.796349Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:49:56.833173Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:49:56.873522Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:49:56.963485Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623366088127608:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:56.963564Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:56.963744Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623366088127613:2472], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:56.967492Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:49:56.978208Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623366088127615:2473], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:49:57.047456Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623370383094962:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:49:58.042256Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623353203223386:2061];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:58.044433Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:49:58.780222Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746607798803, txId: 281474976710672] shutting down |88.9%| [TM] {asan, default-linux-x86_64, release} ydb/public/lib/ydb_cli/topic/ut/unittest >> TTopicWriterTests::TestTopicWriterParams_InvalidDelimiter [GOOD] >> KqpSysColV1::StreamSelectRange [GOOD] |88.9%| [TM] {asan, default-linux-x86_64, release} ydb/public/lib/ydb_cli/topic/ut/unittest >> TTopicWriterTests::TestTopicWriterParams_Format_Concatenated [GOOD] |88.9%| [TM] {asan, default-linux-x86_64, release} ydb/public/lib/ydb_cli/topic/ut/unittest >> TTopicWriterTests::TestEnterMessage_1KiB_Newline_Delimited_With_Two_Delimiters_In_A_Row [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::Sessions [GOOD] Test command err: Trying to start YDB, gRPC: 27515, MsgBus: 7531 2025-05-07T08:49:50.138687Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623337444362221:2263];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:50.141327Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00309b/r3tmp/tmpQOWUIi/pdisk_1.dat 2025-05-07T08:49:50.550793Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27515, node 1 2025-05-07T08:49:50.592238Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:50.592388Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:50.593924Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:49:50.606550Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:50.606576Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:50.606589Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:50.606727Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7531 TClient is connected to server localhost:7531 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:49:51.135405Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:51.158002Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T08:49:51.164107Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:51.318139Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:51.452907Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:51.519989Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:53.217154Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623350329265583:2407], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:53.217289Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:53.541748Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:49:53.571757Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:49:53.600282Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:49:53.636914Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:49:53.665383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:49:53.710202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:49:53.746733Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480 2025-05-07T08:49:53.822268Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623350329266239:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:53.822340Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:53.822480Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623350329266244:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:53.826110Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710670:3, at schemeshard: 72057594046644480 2025-05-07T08:49:53.836104Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623350329266246:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710670 completed, doublechecking } 2025-05-07T08:49:53.905808Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623350329266297:3424] txid# 281474976710671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:49:55.137310Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623337444362221:2263];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:55.137408Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 1 ydb-cpp-sdk/dev 2025-05-07T08:49:59.335679Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746607799326, txId: 281474976710685] shutting down >> KqpSysColV1::StreamInnerJoinSelectAsterisk [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV1::StreamSelectRowById [GOOD] Test command err: Trying to start YDB, gRPC: 23548, MsgBus: 22656 2025-05-07T08:49:54.719791Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623356225846626:2062];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:54.719865Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00303a/r3tmp/tmpIJDJkl/pdisk_1.dat 2025-05-07T08:49:55.050486Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23548, node 1 2025-05-07T08:49:55.074210Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:55.074317Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:55.076006Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:49:55.130702Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:55.130735Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:55.130744Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:55.130888Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22656 TClient is connected to server localhost:22656 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:49:55.636454Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:55.658855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:55.801107Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:55.971148Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:56.049142Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:57.887820Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623369110750169:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:57.887951Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:58.179988Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:49:58.221908Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:49:58.255864Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:49:58.289912Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:49:58.329199Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:49:58.392308Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:49:58.446196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:49:58.537188Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623373405718122:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:58.537268Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:58.547180Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623373405718127:2472], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:58.558761Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:49:58.572565Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623373405718129:2473], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:49:58.678357Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623373405718180:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:49:59.721418Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623356225846626:2062];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:59.721519Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:49:59.871247Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746607799902, txId: 281474976710672] shutting down >> TTopicWriterTests::TestEnterMessage_EmptyInput [GOOD] >> TTopicWriterTests::TestEnterMessage_No_Base64_Transform [GOOD] >> TTopicReaderTests::TestRun_ReadOneMessage |88.9%| [TM] {asan, default-linux-x86_64, release} ydb/public/lib/ydb_cli/topic/ut/unittest >> TTopicWriterTests::TestEnterMessage_No_Base64_Transform [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV1::StreamSelectRange [GOOD] Test command err: Trying to start YDB, gRPC: 12821, MsgBus: 12972 2025-05-07T08:49:55.110218Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623361468672777:2061];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:55.110527Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003022/r3tmp/tmpJRc8AX/pdisk_1.dat 2025-05-07T08:49:55.495085Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:55.532191Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:55.532334Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 12821, node 1 2025-05-07T08:49:55.538205Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:49:55.594556Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:55.594586Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:55.594607Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:55.594757Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12972 TClient is connected to server localhost:12972 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:49:56.167742Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:56.191158Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:56.332837Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:56.513863Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:49:56.575166Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 2025-05-07T08:49:58.423672Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623374353576325:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:58.423842Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:58.771383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:49:58.810335Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:49:58.847699Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:49:58.886001Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:49:58.923943Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:49:59.001715Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:49:59.039330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:49:59.128315Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623378648544282:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:59.128401Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:59.128668Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623378648544287:2472], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:59.135008Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:49:59.151401Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623378648544289:2473], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:49:59.235553Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623378648544342:3414] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:50:00.111131Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623361468672777:2061];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:00.111217Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:50:00.451469Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746607800483, txId: 281474976710672] shutting down >> TTopicReaderTests::TestRun_ReadMoreMessagesThanLimit_Without_Wait_NewlineDelimited >> KqpSysColV1::StreamInnerJoinTables [GOOD] >> TTopicWriterTests::TestEnterMessage_With_Base64_Transform_Invalid_Encode [GOOD] >> TTopicWriterTests::TestEnterMessage_With_Base64_Transform [GOOD] >> KqpSysColV0::SelectRange [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV1::StreamInnerJoinSelectAsterisk [GOOD] Test command err: Trying to start YDB, gRPC: 1263, MsgBus: 21354 2025-05-07T08:49:55.200481Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623358697135901:2079];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:55.201451Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00302e/r3tmp/tmpHRktTo/pdisk_1.dat 2025-05-07T08:49:55.546979Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1263, node 1 2025-05-07T08:49:55.630880Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:55.631028Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:55.636998Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:49:55.673857Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:55.673914Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:55.673932Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:55.674094Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21354 TClient is connected to server localhost:21354 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:49:56.268406Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:56.291710Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:56.452612Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:56.614188Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:56.693067Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:58.507719Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623371582039394:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:58.507834Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:58.872745Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-05-07T08:49:58.908980Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-05-07T08:49:58.943642Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-05-07T08:49:58.987120Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-05-07T08:49:59.022750Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-05-07T08:49:59.105183Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-05-07T08:49:59.181911Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-05-07T08:49:59.244923Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623375877007354:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:59.245002Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:59.245189Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623375877007359:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:59.249215Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-05-07T08:49:59.262322Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623375877007361:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-05-07T08:49:59.343445Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623375877007412:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:50:00.202127Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623358697135901:2079];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:00.202223Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:50:01.071528Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746607801099, txId: 281474976715672] shutting down >> TTopicWriterTests::TestEnterMessage_ZeroSymbol_Delimited [GOOD] >> TTopicWriterTests::TestEnterMessage_With_Base64_Transform_NewlineDelimited [GOOD] >> KqpSystemView::PartitionStatsSimple [GOOD] >> TColumnShardTestReadWrite::CompactionInGranule_PKInt64_Reboot |88.9%| [TM] {asan, default-linux-x86_64, release} ydb/public/lib/ydb_cli/topic/ut/unittest >> TTopicWriterTests::TestEnterMessage_With_Base64_Transform [GOOD] |88.9%| [TM] {asan, default-linux-x86_64, release} ydb/public/lib/ydb_cli/topic/ut/unittest >> TTopicWriterTests::TestEnterMessage_With_Base64_Transform_NewlineDelimited [GOOD] >> KqpSysColV1::UpdateAndDelete [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV1::StreamInnerJoinTables [GOOD] Test command err: Trying to start YDB, gRPC: 61833, MsgBus: 3288 2025-05-07T08:49:55.967573Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623360701621098:2061];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:55.967648Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00301a/r3tmp/tmpHIbBBa/pdisk_1.dat 2025-05-07T08:49:56.409924Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:56.439632Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:56.439768Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 61833, node 1 2025-05-07T08:49:56.445626Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:49:56.518072Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:56.518099Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:56.518124Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:56.518234Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3288 TClient is connected to server localhost:3288 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:49:57.206108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:57.237392Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:49:57.254388Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:57.418395Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:57.583236Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:57.663005Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:59.505763Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623377881491945:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:59.505921Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:59.917027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:49:59.950765Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:50:00.000540Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:50:00.042842Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:50:00.078329Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:50:00.120394Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:50:00.166081Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:50:00.245562Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623382176459895:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:00.245669Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:00.248522Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623382176459900:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:00.252807Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:50:00.268984Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623382176459902:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:50:00.327200Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623382176459953:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:50:00.969704Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623360701621098:2061];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:01.002903Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:50:02.003996Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746607802009, txId: 281474976710672] shutting down [[[108u];["One"];[8];["Value5"];[108u];["One"];#;["Value31"]]] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV0::SelectRange [GOOD] Test command err: Trying to start YDB, gRPC: 8218, MsgBus: 1771 2025-05-07T08:49:56.822678Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623366096061491:2067];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:56.823663Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002f2c/r3tmp/tmpipZbiN/pdisk_1.dat 2025-05-07T08:49:57.198705Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:57.199326Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:57.199431Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:57.204449Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8218, node 1 2025-05-07T08:49:57.282732Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:57.282767Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:57.282782Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:57.282965Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1771 TClient is connected to server localhost:1771 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:49:57.908915Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:57.946646Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:58.105476Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:58.310100Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:58.408401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:00.102722Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623383275932316:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:00.102867Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:00.451034Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:50:00.484644Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:50:00.525062Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:50:00.556210Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:50:00.599403Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:50:00.641478Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:50:00.683507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:50:00.793130Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623383275932979:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:00.793205Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:00.793383Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623383275932984:2472], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:00.796829Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:50:00.807014Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623383275932986:2473], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:50:00.910694Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623383275933039:3416] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:50:01.828419Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623366096061491:2067];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:01.832346Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> Backup::ProposeBackup >> TColumnShardTestReadWrite::CompactionSplitGranule_PKTimestamp ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::PartitionStatsSimple [GOOD] Test command err: Trying to start YDB, gRPC: 13978, MsgBus: 27008 2025-05-07T08:49:56.844347Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623366539348992:2132];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:56.844428Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002ec6/r3tmp/tmpnDMSGQ/pdisk_1.dat 2025-05-07T08:49:57.401392Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:57.411123Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:57.411232Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:57.413283Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13978, node 1 2025-05-07T08:49:57.525175Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:57.525197Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:57.525208Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:57.525341Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27008 TClient is connected to server localhost:27008 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:49:58.141591Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:58.157639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:49:58.169007Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:58.364954Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:58.602307Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:58.714003Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:00.604370Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623383719219746:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:00.604503Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:00.989308Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:50:01.069495Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:50:01.121615Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:50:01.154160Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:50:01.230687Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:50:01.301052Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:50:01.336406Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:50:01.411510Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623388014187709:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:01.411611Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:01.411853Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623388014187714:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:01.418782Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:50:01.433013Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623388014187716:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:50:01.528346Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623388014187767:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:50:01.846058Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623366539348992:2132];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:01.846128Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:50:02.648359Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746607802638, txId: 281474976710672] shutting down |88.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest >> KqpSystemView::FailNavigate [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV1::UpdateAndDelete [GOOD] Test command err: Trying to start YDB, gRPC: 28921, MsgBus: 25609 2025-05-07T08:49:55.697880Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623358981154833:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:55.697947Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00301e/r3tmp/tmp3Rq6pA/pdisk_1.dat 2025-05-07T08:49:56.119211Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28921, node 1 2025-05-07T08:49:56.129010Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:56.129138Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:56.130892Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T08:49:56.130936Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T08:49:56.133657Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:49:56.189948Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:56.189984Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:56.189991Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:56.190107Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25609 TClient is connected to server localhost:25609 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:49:56.808712Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:56.837225Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:49:56.857176Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:56.993382Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:57.158307Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:57.241609Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:59.224293Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623376161025662:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:59.224523Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:59.555398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:49:59.595747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:49:59.635069Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:49:59.663519Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:49:59.700682Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:49:59.742322Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:49:59.785765Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:49:59.878955Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623376161026324:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:59.879032Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:59.879250Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623376161026329:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:59.883063Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:49:59.892406Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623376161026331:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:49:59.948291Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623376161026382:3414] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:50:00.698086Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623358981154833:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:00.698222Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> ReadOnlyVDisk::TestWrites >> ReadOnlyVDisk::TestSync >> KqpSysColV0::InnerJoinSelect [GOOD] >> ReadOnlyVDisk::TestReads >> ReadOnlyVDisk::TestDiscover >> Backup::ProposeBackup [GOOD] >> EvWrite::AbortInTransaction ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::FailNavigate [GOOD] Test command err: Trying to start YDB, gRPC: 11516, MsgBus: 29309 2025-05-07T08:49:58.673817Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623374644012339:2062];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:58.673870Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002e93/r3tmp/tmprznFZr/pdisk_1.dat 2025-05-07T08:49:59.156079Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:59.156175Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:59.157774Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:49:59.197488Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11516, node 1 2025-05-07T08:49:59.296253Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:59.296286Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:59.296295Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:59.296443Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29309 TClient is connected to server localhost:29309 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:49:59.965256Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:59.984359Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:49:59.991181Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T08:49:59.995882Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:00.144031Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:00.303620Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:00.398606Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:02.214805Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623391823883195:2407], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:02.214912Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:02.579076Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:50:02.611589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:50:02.641368Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:50:02.686367Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:50:02.730411Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:50:02.779829Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:50:02.825901Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480 2025-05-07T08:50:02.889995Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623391823883851:2471], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:02.890081Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623391823883856:2474], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:02.890083Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:02.892896Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710670:3, at schemeshard: 72057594046644480 2025-05-07T08:50:02.903572Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623391823883858:2475], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710670 completed, doublechecking } 2025-05-07T08:50:02.968035Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623391823883909:3426] txid# 281474976710671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:50:03.674725Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623374644012339:2062];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:03.674864Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:50:04.129686Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:303: Access denied: self# [1:7501623400413818781:3603], for# user0@builtin, access# DescribeSchema 2025-05-07T08:50:04.129721Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:303: Access denied: self# [1:7501623400413818781:3603], for# user0@builtin, access# DescribeSchema 2025-05-07T08:50:04.145166Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7501623400413818778:2518], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:13: Error: At function: KiReadTable!
:2:13: Error: Cannot find table 'db.[/Root/.sys/partition_stats]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T08:50:04.146213Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=1&id=NTBiNWQ3MDgtZTFhN2QwNjItOWEzY2JjNzAtMjU0MDg1MjM=, ActorId: [1:7501623400413818771:2514], ActorState: ExecuteState, TraceId: 01jtmz03nf9epm3gk6ap4e154v, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: >> ReadOnlyVDisk::TestGarbageCollect |88.9%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest |88.9%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest |88.9%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest |88.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest |88.9%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> KqpSysColV0::UpdateAndDelete [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV0::InnerJoinSelect [GOOD] Test command err: Trying to start YDB, gRPC: 10890, MsgBus: 61486 2025-05-07T08:49:58.410669Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623373773584971:2211];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:58.411010Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002ea7/r3tmp/tmp0KCFsU/pdisk_1.dat 2025-05-07T08:49:58.920049Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:58.923849Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:58.923988Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:58.927155Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10890, node 1 2025-05-07T08:49:59.067600Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:59.067668Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:59.067684Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:59.067822Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61486 TClient is connected to server localhost:61486 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:49:59.636028Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:59.671494Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:49:59.838087Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:00.023073Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 2025-05-07T08:50:00.123017Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:02.083024Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623390953455663:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:02.083152Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:02.461033Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:50:02.494717Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:50:02.526246Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:50:02.571062Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:50:02.604371Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:50:02.663451Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:50:02.700717Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:50:02.758483Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623390953456322:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:02.758564Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:02.758947Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623390953456327:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:02.763537Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:50:02.777088Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623390953456329:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:50:02.858511Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623390953456380:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:50:03.408336Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623373773584971:2211];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:03.408424Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> EvWrite::AbortInTransaction [GOOD] >> ReadOnlyVDisk::TestStorageLoad >> KqpSystemView::NodesSimple [GOOD] >> Viewer::JsonAutocompleteEmptyColumns [GOOD] >> Viewer::JsonAutocompleteColumnsPOST ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> EvWrite::AbortInTransaction [GOOD] Test command err: 2025-05-07T08:50:04.357064Z node 1 :BLOB_CACHE NOTICE: ctor_logger.h:56: MaxCacheDataSize: 20971520 InFlightDataSize: 0 2025-05-07T08:50:04.455381Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-05-07T08:50:04.486314Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-05-07T08:50:04.486625Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-05-07T08:50:04.494555Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-05-07T08:50:04.494837Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-05-07T08:50:04.495095Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-05-07T08:50:04.495227Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-05-07T08:50:04.495359Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-05-07T08:50:04.495484Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-05-07T08:50:04.495609Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-05-07T08:50:04.495724Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-05-07T08:50:04.495832Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-05-07T08:50:04.495969Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-05-07T08:50:04.496093Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-05-07T08:50:04.496214Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-05-07T08:50:04.529285Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-05-07T08:50:04.529948Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:137;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=11;current_normalizer=CLASS_NAME=Granules; 2025-05-07T08:50:04.530031Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-05-07T08:50:04.530240Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-05-07T08:50:04.530406Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-05-07T08:50:04.530474Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-05-07T08:50:04.530521Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-05-07T08:50:04.530630Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-05-07T08:50:04.530694Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-05-07T08:50:04.530754Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-05-07T08:50:04.530802Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-05-07T08:50:04.530965Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-05-07T08:50:04.531027Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-05-07T08:50:04.531072Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-05-07T08:50:04.531119Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-05-07T08:50:04.531223Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-05-07T08:50:04.531285Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-05-07T08:50:04.531328Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanInsertionDedup;id=CleanInsertionDedup; 2025-05-07T08:50:04.531355Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=8;type=CleanInsertionDedup; 2025-05-07T08:50:04.531445Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanInsertionDedup;id=8; 2025-05-07T08:50:04.531494Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-05-07T08:50:04.531528Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-05-07T08:50:04.531588Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-05-07T08:50:04.531626Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-05-07T08:50:04.531671Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-05-07T08:50:04.531891Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-05-07T08:50:04.531960Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-05-07T08:50:04.531993Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-05-07T08:50:04.532199Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-05-07T08:50:04.532244Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-05-07T08:50:04.532290Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-05-07T08:50:04.532421Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-05-07T08:50:04.532472Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-05-07T08:50:04.532514Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-05-07T08:50:04.532598Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-05-07T08:50:04.532666Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-05-07T08:50:04.532704Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-05-07T08:50:04.532730Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-05-07T08:50:04.533105Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=46; 2025-05-07T08:50:04.533218Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=47; ... 784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=tx_controller.cpp:214;event=finished_tx;tx_id=10; 2025-05-07T08:50:06.783434Z node 2 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[2] complete at tablet 9437184 2025-05-07T08:50:06.783617Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Complete;fline=columnshard_impl.cpp:785;event=start_indexation_tasks;insert_overload_size=0; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=229592;columns=2; 2025-05-07T08:50:06.805167Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;parent=[2:108:2140];fline=actor.cpp:22;event=flush_writing;size=229592;count=1; 2025-05-07T08:50:06.808786Z node 2 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Write (record) into pathId 1 writeId 1 at tablet 9437184 2025-05-07T08:50:06.810830Z node 2 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:1 Blob count: 1 2025-05-07T08:50:06.828261Z node 2 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:1 Blob count: 1 2025-05-07T08:50:06.828451Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;local_tx_no=4;method=complete;tx_info=TTxWrite;tablet_id=9437184;tx_state=complete;fline=columnshard_impl.cpp:785;event=start_indexation_tasks;insert_overload_size=0; 2025-05-07T08:50:06.828941Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=9437184;self_id=[2:108:2140];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=9437184;event=TEvWrite;fline=manager.cpp:116;event=abort;tx_id=222;problem=finished; 2025-05-07T08:50:06.829030Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=9437184;self_id=[2:108:2140];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=9437184;event=TEvWrite;fline=manager.cpp:134;event=abort;tx_id=222;problem=finished; 2025-05-07T08:50:06.829285Z node 2 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: PlanStep 1746607807140 at tablet 9437184, mediator 0 2025-05-07T08:50:06.829350Z node 2 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[5] execute at tablet 9437184 2025-05-07T08:50:06.829411Z node 2 :TX_COLUMNSHARD ERROR: ctor_logger.h:56: TxPlanStep[5] Ignore old txIds [112] for step 1746607807140 last planned step 1746607807140 at tablet 9437184 2025-05-07T08:50:06.829476Z node 2 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[5] complete at tablet 9437184 2025-05-07T08:50:06.829829Z node 2 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 18446744073709551615 scanId: 0 version: {1746607807140:max} readable: {1746607807140:max} at tablet 9437184 2025-05-07T08:50:06.829948Z node 2 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxScan prepare txId: 18446744073709551615 scanId: 0 at tablet 9437184 2025-05-07T08:50:06.835782Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[2:108:2140];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1746607807140:max};tablet=9437184;timeout=0.000000s;fline=program.cpp:33;event=parse_program;program=Command { Projection { Columns { Id: 1 } Columns { Id: 2 } } } ; 2025-05-07T08:50:06.835938Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[2:108:2140];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1746607807140:max};tablet=9437184;timeout=0.000000s;fline=program.cpp:102;parse_proto_program=Command { Projection { Columns { Id: 1 } Columns { Id: 2 } } } ; 2025-05-07T08:50:06.836968Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[2:108:2140];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1746607807140:max};tablet=9437184;timeout=0.000000s;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[{"from":2},{"from":4}]},{"owner_id":2,"inputs":[{"from":5}]},{"owner_id":4,"inputs":[{"from":5}]},{"owner_id":5,"inputs":[{"from":6}]},{"owner_id":6,"inputs":[]}],"nodes":{"2":{"p":{"i":"1","p":{"address":{"name":"key","id":1}},"o":"1","t":"AssembleOriginalData"},"w":9,"id":2},"6":{"p":{"p":{"data":[{"name":"key","id":1},{"name":"field","id":2}]},"o":"0","t":"ReserveMemory"},"w":0,"id":6},"5":{"p":{"i":"0","p":{"data":[{"name":"key","id":1},{"name":"field","id":2}]},"o":"1,2","t":"FetchOriginalData"},"w":4,"id":5},"4":{"p":{"i":"2","p":{"address":{"name":"field","id":2}},"o":"2","t":"AssembleOriginalData"},"w":9,"id":4},"0":{"p":{"i":"1,2","t":"Projection"},"w":18,"id":0}}}; 2025-05-07T08:50:06.837143Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: tablet_id=9437184;self_id=[2:108:2140];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1746607807140:max};tablet=9437184;timeout=0.000000s;fline=read_metadata.h:136;filter_limit_not_detected= range{ from {+Inf} to {-Inf}}; 2025-05-07T08:50:06.837832Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: tablet_id=9437184;self_id=[2:108:2140];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1746607807140:max};tablet=9437184;timeout=0.000000s;fline=tx_scan.cpp:166;event=TTxScan started;actor_id=[2:169:2187];trace_detailed=; 2025-05-07T08:50:06.839473Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: fline=context.cpp:84;ff_first=(column_ids=1,2;column_names=field,key;);; 2025-05-07T08:50:06.839721Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: fline=context.cpp:99;columns_context_info=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;; 2025-05-07T08:50:06.840075Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:169:2187];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:104;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-05-07T08:50:06.840211Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:169:2187];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:187;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-05-07T08:50:06.840347Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:169:2187];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-05-07T08:50:06.840393Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:409: Scan [2:169:2187] finished for tablet 9437184 2025-05-07T08:50:06.840803Z node 2 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[2:169:2187];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:415;event=scan_finish;compute_actor_id=[2:168:2186];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["l_bootstrap","f_ack","l_ack","f_processing","l_processing","f_ProduceResults","l_ProduceResults","f_Finish","l_Finish"],"t":0.002}],"full":{"a":1746607806837724,"name":"_full_task","f":1746607806837724,"d_finished":0,"c":0,"l":1746607806840456,"d":2732},"events":[{"name":"bootstrap","f":1746607806838008,"d_finished":1893,"c":1,"l":1746607806839901,"d":1893},{"a":1746607806840043,"name":"ack","f":1746607806840043,"d_finished":0,"c":0,"l":1746607806840456,"d":413},{"a":1746607806840020,"name":"processing","f":1746607806840020,"d_finished":0,"c":0,"l":1746607806840456,"d":436},{"name":"ProduceResults","f":1746607806839878,"d_finished":289,"c":2,"l":1746607806840377,"d":289},{"a":1746607806840380,"name":"Finish","f":1746607806840380,"d_finished":0,"c":0,"l":1746607806840456,"d":76}],"id":"9437184::2"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-05-07T08:50:06.840883Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:169:2187];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:365;event=send_data;compute_actor_id=[2:168:2186];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-05-07T08:50:06.841255Z node 2 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[2:169:2187];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=scan_finished;compute_actor_id=[2:168:2186];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["l_bootstrap","f_ack","f_processing","f_ProduceResults","l_ProduceResults","f_Finish"],"t":0.002},{"events":["l_ack","l_processing","l_Finish"],"t":0.003}],"full":{"a":1746607806837724,"name":"_full_task","f":1746607806837724,"d_finished":0,"c":0,"l":1746607806840928,"d":3204},"events":[{"name":"bootstrap","f":1746607806838008,"d_finished":1893,"c":1,"l":1746607806839901,"d":1893},{"a":1746607806840043,"name":"ack","f":1746607806840043,"d_finished":0,"c":0,"l":1746607806840928,"d":885},{"a":1746607806840020,"name":"processing","f":1746607806840020,"d_finished":0,"c":0,"l":1746607806840928,"d":908},{"name":"ProduceResults","f":1746607806839878,"d_finished":289,"c":2,"l":1746607806840377,"d":289},{"a":1746607806840380,"name":"Finish","f":1746607806840380,"d_finished":0,"c":0,"l":1746607806840928,"d":548}],"id":"9437184::2"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-05-07T08:50:06.841338Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:169:2187];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-05-07T08:50:06.837096Z;index_granules=0;index_portions=0;index_batches=0;committed_batches=0;schema_columns=2;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=0;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=0;selected_rows=0; 2025-05-07T08:50:06.841378Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:169:2187];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:189;event=scan_aborted;reason=unexpected on destructor; 2025-05-07T08:50:06.841488Z node 2 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[2:169:2187];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2100;fline=simple_arrays_cache.h:65;event=slice_from_cache;key=uint64;records=0;count=100; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2100;fline=simple_arrays_cache.h:49;event=insert_to_cache;key=string;records=0;size=0; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2100;fline=simple_arrays_cache.h:65;event=slice_from_cache;key=string;records=0;count=0; >> Viewer::AuthorizeYdbTokenWithDatabaseAttributes [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV0::UpdateAndDelete [GOOD] Test command err: Trying to start YDB, gRPC: 15380, MsgBus: 30074 2025-05-07T08:49:58.426946Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623373032707250:2065];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:58.426992Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002ea0/r3tmp/tmpaj1cO1/pdisk_1.dat 2025-05-07T08:49:58.923919Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:58.966773Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:58.966882Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 15380, node 1 2025-05-07T08:49:58.968728Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:49:59.050809Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:59.050868Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:59.050882Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:59.051032Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30074 TClient is connected to server localhost:30074 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:49:59.608509Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:59.643126Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:59.825619Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:00.019156Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:00.113019Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:02.115066Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623390212578109:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:02.115208Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:02.468355Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:50:02.505435Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:50:02.534723Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:50:02.567630Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:50:02.635207Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:50:02.714063Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:50:02.751471Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:50:02.816512Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623390212578769:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:02.816612Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:02.817002Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623390212578774:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:02.821207Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:50:02.832463Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623390212578776:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:50:02.939189Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623390212578827:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:50:03.431235Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623373032707250:2065];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:03.444188Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> ReadOnlyVDisk::TestGetWithMustRestoreFirst >> Viewer::ServerlessNodesPage [GOOD] >> Viewer::ServerlessWithExclusiveNodes >> TColumnShardTestReadWrite::CompactionInGranule_PKString_Reboot |88.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest >> TColumnShardTestReadWrite::Write >> TColumnShardTestReadWrite::ReadAggregate >> TColumnShardTestReadWrite::CompactionSplitGranule_PKInt64 >> KqpSystemView::PartitionStatsFollower [GOOD] >> TColumnShardTestReadWrite::RebootWriteReadStandalone ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::NodesSimple [GOOD] Test command err: Trying to start YDB, gRPC: 27851, MsgBus: 9869 2025-05-07T08:49:57.835625Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623368527582303:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:57.835669Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:49:57.897462Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501623370567087945:2121];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:57.898218Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:49:57.919646Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7501623371245220645:2143];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:57.922131Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002ec0/r3tmp/tmpJR2cjr/pdisk_1.dat 2025-05-07T08:49:58.608986Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:58.618811Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:58.618907Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:58.619210Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:58.619254Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:58.621738Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:58.621797Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:58.624903Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-05-07T08:49:58.625552Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:49:58.626473Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:49:58.631756Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-05-07T08:49:58.633500Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27851, node 1 2025-05-07T08:49:58.798074Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:58.798107Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:58.798116Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:58.798254Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9869 TClient is connected to server localhost:9869 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:49:59.731878Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:59.783314Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:00.060017Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:00.395553Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:00.548498Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:02.607767Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623390002420810:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:02.607916Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:02.836113Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623368527582303:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:02.836226Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:50:02.897204Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7501623370567087945:2121];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:02.897273Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:50:02.914399Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7501623371245220645:2143];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:02.914469Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:50:02.997054Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:50:03.118183Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:50:03.184701Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:50:03.253758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:50:03.325563Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:50:03.379538Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:50:03.489821Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:50:03.708791Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623394297388940:2409], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:03.708885Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:03.709079Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623394297388945:2412], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:03.714448Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:50:03.740897Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623394297388947:2413], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:50:03.842632Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623394297389024:4246] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:50:05.085509Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746607805066, txId: 281474976710672] shutting down |88.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_index_build/ydb-core-tx-schemeshard-ut_index_build |88.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_index_build/ydb-core-tx-schemeshard-ut_index_build |88.9%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_index_build/ydb-core-tx-schemeshard-ut_index_build >> TColumnShardTestReadWrite::WriteReadNoCompression >> Viewer::JsonAutocompleteSimilarDatabaseNameLowerCase [GOOD] >> Viewer::JsonAutocompleteSchemePOST >> Viewer::QueryExecuteScript [FAIL] >> Viewer::Plan2SvgOK ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::PartitionStatsFollower [GOOD] Test command err: Trying to start YDB, gRPC: 5548, MsgBus: 14632 2025-05-07T08:49:47.241258Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623327400216931:2093];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:47.241319Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003144/r3tmp/tmpFesYnC/pdisk_1.dat 2025-05-07T08:49:47.697250Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:47.697372Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:47.703044Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:49:47.707125Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5548, node 1 2025-05-07T08:49:47.805323Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:47.805353Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:47.805363Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:47.805562Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14632 TClient is connected to server localhost:14632 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:49:48.559521Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:48.709537Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:7501623327400217321:2199]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T08:49:48.709593Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T08:49:48.709697Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [1:7501623327400217321:2199], Recipient [1:7501623327400217321:2199]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T08:49:48.709713Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T08:49:49.710250Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:7501623327400217321:2199]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T08:49:49.710290Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T08:49:49.710341Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [1:7501623327400217321:2199], Recipient [1:7501623327400217321:2199]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T08:49:49.710360Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T08:49:50.606876Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623340285119438:2331], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:50.607007Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:50.710553Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:7501623327400217321:2199]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T08:49:50.710604Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T08:49:50.710685Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [1:7501623327400217321:2199], Recipient [1:7501623327400217321:2199]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T08:49:50.710702Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T08:49:50.886662Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:7501623340285119463:2307], Recipient [1:7501623327400217321:2199]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:49:50.886707Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:49:50.886727Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T08:49:50.886779Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122432, Sender [1:7501623340285119459:2304], Recipient [1:7501623327400217321:2199]: {TEvModifySchemeTransaction txid# 281474976710658 TabletId# 72057594046644480} 2025-05-07T08:49:50.886794Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4851: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-05-07T08:49:50.956415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "Followers" Columns { Name: "Key" Type: "Uint64" NotNull: false } Columns { Name: "Value" Type: "String" NotNull: false } KeyColumnNames: "Key" PartitionConfig { ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } FollowerGroups { FollowerCount: 3 RequireAllDataCenters: false } } Temporary: false } } TxId: 281474976710658 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-05-07T08:49:50.956879Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /Root/Followers, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T08:49:50.957050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:433: TCreateTable Propose, path: /Root/Followers, opId: 281474976710658:0, schema: Name: "Followers" Columns { Name: "Key" Type: "Uint64" NotNull: false } Columns { Name: "Value" Type: "String" NotNull: false } KeyColumnNames: "Key" PartitionConfig { ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } FollowerGroups { FollowerCount: 3 RequireAllDataCenters: false } } Temporary: false, at schemeshard: 72057594046644480 2025-05-07T08:49:50.957576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:319: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046644480, LocalPathId: 1], parent name: Root, child name: Followers, child id: [OwnerId: 72057594046644480, LocalPathId: 2], at schemeshard: 72057594046644480 2025-05-07T08:49:50.957609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 0 2025-05-07T08:49:50.957646Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-05-07T08:49:50.957759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason new path created for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 2 2025-05-07T08:49:50.957790Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976710658:0 1 -> 2 2025-05-07T08:49:50.957799Z node 1 :SYSTEM_VIEWS TRACE: partition_stats.cpp:83: TEvSysView::TEvSetPartitioning: domainKey [OwnerId: 72057594046644480, LocalPathId: 1] pathId [OwnerId: 72057594046644480, LocalPathId: 2] path /Root/Followers ShardIndices size 1 2025-05-07T08:49:50.958675Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_create_table.cpp:744: TCreateTable Propose creating new table opId# 281474976710658:0 path# /Root/Followers pathId# [OwnerId: 72057594046644480, LocalPathId: 2] schemeshard# 72057594046644480 tx# WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "Followers" Columns { Name: "Key" Type: "Uint64" NotNull: false } Columns { Name: "Value" Type: "String" NotNull: false } KeyColumnNames: "Key" PartitionConfig { ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } FollowerGroups { FollowerCount: 3 RequireAllDataCenters: false } } Temporary: false } FailOnExist: false 2025-05-07T08:49:50.958833Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976710658:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-05-07T08:49:50.958848Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T08:49:50.958924Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T08:49:50.958948Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path f ... 0 RowReads: 1 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 934 Memory: 123960 } ShardState: 2 UserTablePartOwners: 72075186224037888 NodeId: 1 StartTime: 1746607790966 TableOwnerId: 72057594046644480 FollowerId: 0 2025-05-07T08:50:06.004977Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4877: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-05-07T08:50:06.005011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:561: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037888 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 2] state 'Ready' dataSize 800 rowCount 4 cpuUsage 0.0934 2025-05-07T08:50:06.005096Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:568: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037888 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 2] raw table stats: DataSize: 800 RowCount: 4 IndexSize: 0 InMemSize: 800 LastAccessTime: 1746607791422 LastUpdateTime: 1746607791298 ImmediateTxCompleted: 1 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 4 RowDeletes: 0 RowReads: 1 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-05-07T08:50:06.005118Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:608: Will delay TTxStoreTableStats on# 0.099995s, queue# 1 2025-05-07T08:50:06.007553Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 2146435073, Sender [0:0:0], Recipient [1:7501623340285119482:2336]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvCleanupTransaction 2025-05-07T08:50:06.007593Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3156: StateWork, processing event TEvPrivate::TEvCleanupTransaction 2025-05-07T08:50:06.007689Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:214: No cleanup at 72075186224037888 outdated step 1746607806000 last cleanup 0 2025-05-07T08:50:06.007780Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_tx.cpp:54: Removed expired snapshots at 72075186224037888 2025-05-07T08:50:06.007806Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:50:06.007828Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-05-07T08:50:06.007842Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-05-07T08:50:06.007856Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-05-07T08:50:06.008686Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3266: StateWorkAsFollower, received event# 268828680, Sender [1:7501623344580086800:2335], Recipient [1:7501623344580086830:2338]: NKikimr::TEvTablet::TEvFUpdate 2025-05-07T08:50:06.008884Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3266: StateWorkAsFollower, received event# 268828680, Sender [1:7501623344580086801:2336], Recipient [1:7501623344580086832:2339]: NKikimr::TEvTablet::TEvFUpdate 2025-05-07T08:50:06.008965Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3266: StateWorkAsFollower, received event# 268828680, Sender [1:7501623344580086802:2337], Recipient [1:7501623344580086833:2340]: NKikimr::TEvTablet::TEvFUpdate 2025-05-07T08:50:06.023962Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3266: StateWorkAsFollower, received event# 2146435079, Sender [0:0:0], Recipient [1:7501623344580086832:2339]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvPeriodicWakeup 2025-05-07T08:50:06.024003Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3282: StateWorkAsFollower, processing event TEvPrivate::TEvPeriodicWakeup 2025-05-07T08:50:06.024121Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3266: StateWorkAsFollower, received event# 2146435079, Sender [0:0:0], Recipient [1:7501623344580086830:2338]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvPeriodicWakeup 2025-05-07T08:50:06.024134Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3282: StateWorkAsFollower, processing event TEvPrivate::TEvPeriodicWakeup 2025-05-07T08:50:06.030021Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3266: StateWorkAsFollower, received event# 2146435079, Sender [0:0:0], Recipient [1:7501623344580086833:2340]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvPeriodicWakeup 2025-05-07T08:50:06.030062Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3282: StateWorkAsFollower, processing event TEvPrivate::TEvPeriodicWakeup 2025-05-07T08:50:06.104790Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [1:7501623327400217321:2199]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-05-07T08:50:06.104828Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5015: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-05-07T08:50:06.104846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:588: Started TEvPersistStats at tablet 72057594046644480, queue size# 1 2025-05-07T08:50:06.104909Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:599: Will execute TTxStoreStats, queue# 1 2025-05-07T08:50:06.104926Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:608: Will delay TTxStoreTableStats on# 0.000189s, queue# 1 2025-05-07T08:50:06.104981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046644480:1 data size 800 row count 4 2025-05-07T08:50:06.105034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037888 maps to shardIdx: 72057594046644480:1 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 2], pathId map=Followers, is column=0, is olap=0, RowCount 4, DataSize 800 2025-05-07T08:50:06.105046Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037888, followerId 0 2025-05-07T08:50:06.105125Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:1 with partCount# 0, rowCount# 4, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-05-07T08:50:06.105183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186224037888 2025-05-07T08:50:06.105241Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:50:06.105484Z node 1 :SYSTEM_VIEWS TRACE: partition_stats.cpp:152: TEvSysView::TEvSendPartitionStats: domainKey [OwnerId: 72057594046644480, LocalPathId: 1] pathId [OwnerId: 72057594046644480, LocalPathId: 2] shardIdx 72057594046644480 1 followerId 0 stats DataSize: 800 RowCount: 4 IndexSize: 0 CPUCores: 0.000934 TabletId: 72075186224037888 NodeId: 1 StartTime: 1746607790966 AccessTime: 1746607791422 UpdateTime: 1746607791298 InFlightTxCount: 0 RowUpdates: 4 RowDeletes: 0 RowReads: 1 RangeReads: 0 RangeReadRows: 0 ImmediateTxCompleted: 1 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 ByKeyFilterSize: 0 FollowerId: 0 LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-05-07T08:50:06.106014Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [1:7501623327400217321:2199]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-05-07T08:50:06.106036Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5015: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-05-07T08:50:06.106050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:588: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 2025-05-07T08:50:06.723357Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:7501623327400217321:2199]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T08:50:06.723405Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T08:50:06.723453Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [1:7501623327400217321:2199], Recipient [1:7501623327400217321:2199]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T08:50:06.723468Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime ... SELECT from partition_stats, attempt 3 2025-05-07T08:50:07.725002Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:7501623327400217321:2199]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T08:50:07.725043Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T08:50:07.725097Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [1:7501623327400217321:2199], Recipient [1:7501623327400217321:2199]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T08:50:07.725111Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T08:50:07.788941Z node 1 :SYSTEM_VIEWS INFO: sysview_service.cpp:886: Navigate by database succeeded: service id# [1:7501623327400217038:2199], database# /Root, no sysview processor 2025-05-07T08:50:07.928776Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:45: Scan started, actor: [1:7501623413299563948:2447], owner: [1:7501623413299563944:2445], scan id: 0, table id: [72057594046644480:1:0:partition_stats] 2025-05-07T08:50:07.930331Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:321: Scan prepared, actor: [1:7501623413299563948:2447], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-05-07T08:50:07.930658Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 274595843, Sender [1:7501623413299563948:2447], Recipient [1:7501623327400217321:2199]: NKikimrSysView.TEvGetPartitionStats DomainKeyOwnerId: 72057594046644480 DomainKeyPathId: 1 From { } FromInclusive: true To { } ToInclusive: false IncludePathColumn: true 2025-05-07T08:50:07.930690Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4905: StateWork, processing event NSysView::TEvSysView::TEvGetPartitionStats 2025-05-07T08:50:07.930836Z node 1 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [1:7501623413299563948:2447], row count: 2, finished: 1 2025-05-07T08:50:07.930883Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:120: Scan finished, actor: [1:7501623413299563948:2447], owner: [1:7501623413299563944:2445], scan id: 0, table id: [72057594046644480:1:0:partition_stats] 2025-05-07T08:50:07.935554Z node 1 :SYSTEM_VIEWS TRACE: sysview_service.cpp:900: Collect query stats: service id# [1:7501623327400217038:2199], database# /Root, query hash# 14960494650040056739, cpu time# 347766 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/viewer/ut/unittest >> Viewer::AuthorizeYdbTokenWithDatabaseAttributes [GOOD] Test command err: 2025-05-07T08:49:41.260272Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:445:2362], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:49:41.260875Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:49:41.261109Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:49:41.262098Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:738:2373], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:49:41.262538Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:49:41.262759Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-05-07T08:49:41.736013Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:41.900149Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-05-07T08:49:41.927514Z node 1 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:422} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-05-07T08:49:42.461751Z node 1 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:362} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 19698, node 1 TClient is connected to server localhost:26793 2025-05-07T08:49:42.797583Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:42.797651Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:42.797691Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:42.798444Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:49:46.519968Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7501623320715408286:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:46.520032Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-05-07T08:49:46.767611Z node 3 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:46.774496Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:46.774614Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:46.776322Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4240, node 3 2025-05-07T08:49:46.874919Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:46.874952Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:46.874971Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:46.875171Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23552 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:49:47.267243Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:47.287583Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T08:49:47.311846Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-05-07T08:49:47.319841Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:47.325963Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 2025-05-07T08:49:49.955230Z node 3 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-05-07T08:49:49.955311Z node 3 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-05-07T08:49:50.437117Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7501623337895278165:2344], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:50.437651Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:50.438255Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7501623337895278177:2347], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:50.444555Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480 2025-05-07T08:49:50.464484Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7501623337895278179:2348], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-05-07T08:49:50.533747Z node 3 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [3:7501623337895278230:2353] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:49:51.185790Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=3&id=ZGJhYTE2OTktNWM2Y2JmM2MtNDU5ZmZkYjAtYTQyNGI5OTY=, ActorId: [3:7501623337895278154:2342], ActorState: ExecuteState, TraceId: 01jtmyzpb33a0tmgrk38n4qve9, Create QueryResponse for error on request, msg: Scheme operations cannot be executed inside transaction 2025-05-07T08:49:53.571043Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501623350433242835:2066];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:53.571133Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-05-07T08:49:53.733945Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:53.761272Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:53.761361Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:53.763129Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15198, node 4 2025-05-07T08:49:53.842344Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:53.842379Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:53.842387Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:53.842529Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24217 2025-05-07T08:49:54.233195Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T08:49:54.236629Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:56.914447Z node 4 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:575: Got grpc request# request auth and check internal request, traceId# undef, sdkBuildInfo# undef, state# AS_NOT_PERFORMED, database# /Root, peer# , grpcInfo# undef, timeout# 9.999922s 2025-05-07T08:49:56.914600Z node 4 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-05-07T08:49:56.914640Z node 4 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthor ... node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000ed980] received request Name# GetScaleRecommendation ok# false data# peer# 2025-05-07T08:50:05.413526Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000eee80] received request Name# ListEndpoints ok# false data# peer# 2025-05-07T08:50:05.413751Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000ee080] received request Name# WhoAmI ok# false data# peer# 2025-05-07T08:50:05.413818Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000ee780] received request Name# NodeRegistration ok# false data# peer# 2025-05-07T08:50:05.414043Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000ef580] received request Name# Scan ok# false data# peer# 2025-05-07T08:50:05.414149Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000efc80] received request Name# GetShardLocations ok# false data# peer# 2025-05-07T08:50:05.414350Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000f0380] received request Name# DescribeTable ok# false data# peer# 2025-05-07T08:50:05.414430Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000f0a80] received request Name# CreateSnapshot ok# false data# peer# 2025-05-07T08:50:05.414658Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000f1180] received request Name# RefreshSnapshot ok# false data# peer# 2025-05-07T08:50:05.414699Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000f1880] received request Name# DiscardSnapshot ok# false data# peer# 2025-05-07T08:50:05.414958Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000f1f80] received request Name# List ok# false data# peer# 2025-05-07T08:50:05.414999Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000f2680] received request Name# RateLimiter/CreateResource ok# false data# peer# 2025-05-07T08:50:05.415245Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000f2d80] received request Name# RateLimiter/AlterResource ok# false data# peer# 2025-05-07T08:50:05.415281Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000f3480] received request Name# RateLimiter/DropResource ok# false data# peer# 2025-05-07T08:50:05.415528Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000f3b80] received request Name# RateLimiter/ListResources ok# false data# peer# 2025-05-07T08:50:05.415542Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000f4280] received request Name# RateLimiter/DescribeResource ok# false data# peer# 2025-05-07T08:50:05.415812Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000f4980] received request Name# RateLimiter/AcquireResource ok# false data# peer# 2025-05-07T08:50:05.415874Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000f5780] received request Name# CreateStream ok# false data# peer# 2025-05-07T08:50:05.416084Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000f5e80] received request Name# ListStreams ok# false data# peer# 2025-05-07T08:50:05.416164Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000166c80] received request Name# DeleteStream ok# false data# peer# 2025-05-07T08:50:05.416376Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000f5080] received request Name# DescribeStream ok# false data# peer# 2025-05-07T08:50:05.416510Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000167380] received request Name# ListShards ok# false data# peer# 2025-05-07T08:50:05.416699Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0001f2c80] received request Name# SetWriteQuota ok# false data# peer# 2025-05-07T08:50:05.416819Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0001f2580] received request Name# UpdateStream ok# false data# peer# 2025-05-07T08:50:05.416946Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000167a80] received request Name# PutRecord ok# false data# peer# 2025-05-07T08:50:05.417092Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000168180] received request Name# PutRecords ok# false data# peer# 2025-05-07T08:50:05.417200Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000168880] received request Name# GetRecords ok# false data# peer# 2025-05-07T08:50:05.417378Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000168f80] received request Name# GetShardIterator ok# false data# peer# 2025-05-07T08:50:05.417479Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000169680] received request Name# SubscribeToShard ok# false data# peer# 2025-05-07T08:50:05.417652Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00016a480] received request Name# DescribeLimits ok# false data# peer# 2025-05-07T08:50:05.417736Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00016ab80] received request Name# DescribeStreamSummary ok# false data# peer# 2025-05-07T08:50:05.417937Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00016b280] received request Name# DecreaseStreamRetentionPeriod ok# false data# peer# 2025-05-07T08:50:05.418128Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00016b980] received request Name# IncreaseStreamRetentionPeriod ok# false data# peer# 2025-05-07T08:50:05.418377Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00016c080] received request Name# UpdateShardCount ok# false data# peer# 2025-05-07T08:50:05.418649Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0001ec380] received request Name# UpdateStreamMode ok# false data# peer# 2025-05-07T08:50:05.418714Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0001eca80] received request Name# RegisterStreamConsumer ok# false data# peer# 2025-05-07T08:50:05.418948Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0001ed180] received request Name# DeregisterStreamConsumer ok# false data# peer# 2025-05-07T08:50:05.419012Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0001ed880] received request Name# DescribeStreamConsumer ok# false data# peer# 2025-05-07T08:50:05.419203Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0001edf80] received request Name# ListStreamConsumers ok# false data# peer# 2025-05-07T08:50:05.419273Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0001ee680] received request Name# AddTagsToStream ok# false data# peer# 2025-05-07T08:50:05.419482Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0001eed80] received request Name# DisableEnhancedMonitoring ok# false data# peer# 2025-05-07T08:50:05.419531Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0001ef480] received request Name# EnableEnhancedMonitoring ok# false data# peer# 2025-05-07T08:50:05.419751Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0001efb80] received request Name# ListTagsForStream ok# false data# peer# 2025-05-07T08:50:05.419796Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0001f0280] received request Name# MergeShards ok# false data# peer# 2025-05-07T08:50:05.420008Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0001f0980] received request Name# RemoveTagsFromStream ok# false data# peer# 2025-05-07T08:50:05.420066Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0001f1080] received request Name# SplitShard ok# false data# peer# 2025-05-07T08:50:05.420270Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0001f1780] received request Name# StartStreamEncryption ok# false data# peer# 2025-05-07T08:50:05.420361Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0001f1e80] received request Name# StopStreamEncryption ok# false data# peer# 2025-05-07T08:50:05.420650Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0001f3380] received request Name# SelfCheck ok# false data# peer# 2025-05-07T08:50:05.420916Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000f6580] received request Name# NodeCheck ok# false data# peer# 2025-05-07T08:50:05.421228Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000f8180] received request Name# CreateSession ok# false data# peer# 2025-05-07T08:50:05.421466Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000f8880] received request Name# DeleteSession ok# false data# peer# 2025-05-07T08:50:05.421511Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000f8f80] received request Name# AttachSession ok# false data# peer# 2025-05-07T08:50:05.421773Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000f9d80] received request Name# BeginTransaction ok# false data# peer# 2025-05-07T08:50:05.421812Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000fa480] received request Name# CommitTransaction ok# false data# peer# 2025-05-07T08:50:05.422084Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000fab80] received request Name# RollbackTransaction ok# false data# peer# 2025-05-07T08:50:05.422120Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000f6c80] received request Name# ExecuteQuery ok# false data# peer# 2025-05-07T08:50:05.422352Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000350580] received request Name# ExecuteScript ok# false data# peer# 2025-05-07T08:50:05.422444Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000f7a80] received request Name# FetchScriptResults ok# false data# peer# 2025-05-07T08:50:05.422620Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000fce80] received request Name# ExecuteTabletMiniKQL ok# false data# peer# 2025-05-07T08:50:05.422684Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000fb280] received request Name# ChangeTabletSchema ok# false data# peer# 2025-05-07T08:50:05.422891Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000fb980] received request Name# RestartTablet ok# false data# peer# 2025-05-07T08:50:05.422971Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000fc080] received request Name# CreateLogStore ok# false data# peer# 2025-05-07T08:50:05.423136Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000fc780] received request Name# DescribeLogStore ok# false data# peer# 2025-05-07T08:50:05.423222Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000fd580] received request Name# DropLogStore ok# false data# peer# 2025-05-07T08:50:05.423374Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000fdc80] received request Name# AlterLogStore ok# false data# peer# 2025-05-07T08:50:05.423485Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000fe380] received request Name# CreateLogTable ok# false data# peer# 2025-05-07T08:50:05.423626Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000fff80] received request Name# DescribeLogTable ok# false data# peer# 2025-05-07T08:50:05.423756Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000100680] received request Name# DropLogTable ok# false data# peer# 2025-05-07T08:50:05.423895Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000110980] received request Name# AlterLogTable ok# false data# peer# 2025-05-07T08:50:05.424032Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000111080] received request Name# Login ok# false data# peer# 2025-05-07T08:50:05.424187Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000111780] received request Name# DescribeReplication ok# false data# peer# 2025-05-07T08:50:05.424306Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000111e80] received request Name# DescribeView ok# false data# peer# 2025-05-07T08:50:05.458968Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[5:7501623382514476381:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:05.459081Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> EvWrite::WriteWithSplit >> Normalizers::ColumnChunkNormalizer >> KqpSystemView::NodesRange2 [GOOD] >> TColumnShardTestReadWrite::WriteStandaloneExoticTypes >> ReadOnlyVDisk::TestWrites [GOOD] >> test_sql_streaming.py::test[suites-ReadTopicWithMetadataInsideFilter-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-ReadTopicWithMetadataNestedDeep-default.txt] >> VDiskBalancing::TestRandom_Mirror3dc [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest >> ReadOnlyVDisk::TestWrites [GOOD] Test command err: RandomSeed# 17471997715254311413 === Trying to put and get a blob === SEND TEvPut with key [1:1:0:0:0:131072:0] TEvPutResult: TEvPutResult {Id# [1:1:0:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Putting VDisk #0 to read-only === Setting VDisk read-only to 1 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] === Write 10 blobs, expect some VDisks refuse parts but writes go through === SEND TEvPut with key [1:1:1:0:0:32768:0] 2025-05-07T08:50:06.789917Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5308:698] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:2:0:0:131072:0] 2025-05-07T08:50:06.801024Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5308:698] TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:3:0:0:32768:0] 2025-05-07T08:50:06.806386Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5308:698] TEvPutResult: TEvPutResult {Id# [1:1:3:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:4:0:0:131072:0] 2025-05-07T08:50:06.809137Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5308:698] TEvPutResult: TEvPutResult {Id# [1:1:4:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:5:0:0:32768:0] TEvPutResult: TEvPutResult {Id# [1:1:5:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:6:0:0:131072:0] TEvPutResult: TEvPutResult {Id# [1:1:6:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:7:0:0:32768:0] 2025-05-07T08:50:06.817269Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5308:698] TEvPutResult: TEvPutResult {Id# [1:1:7:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:8:0:0:131072:0] 2025-05-07T08:50:06.820201Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5308:698] TEvPutResult: TEvPutResult {Id# [1:1:8:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:9:0:0:32768:0] 2025-05-07T08:50:06.823378Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5308:698] TEvPutResult: TEvPutResult {Id# [1:1:9:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:10:0:0:131072:0] 2025-05-07T08:50:06.826343Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5308:698] TEvPutResult: TEvPutResult {Id# [1:1:10:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} === Read all 11 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:1:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:1:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:2:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:2:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:3:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:4:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:4:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:5:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:5:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:6:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:6:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:7:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:7:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:8:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:8:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:9:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:9:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:10:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:10:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Put 2 more VDisks to read-only === Setting VDisk read-only to 1 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] Setting VDisk read-only to 1 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] === Write 10 more blobs, expect errors === SEND TEvPut with key [1:1:11:0:0:32768:0] 2025-05-07T08:50:07.929892Z 1 00h03m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5308:698] 2025-05-07T08:50:07.930071Z 3 00h03m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5322:712] 2025-05-07T08:50:07.930235Z 2 00h03m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5315:705] 2025-05-07T08:50:07.931367Z 1 00h03m30.110512s :BS_PROXY_PUT ERROR: [9edc6be91323e6de] Result# TEvPutResult {Id# [1:1:11:0:0:32768:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:11:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 5 Situations# SUUUUU } { OrderNumber# 6 Situations# USUUUU } { OrderNumber# 7 Situations# UUSUUU } { OrderNumber# 0 Situations# UUUEUU } { OrderNumber# 1 Situations# UUUUEU } { OrderNumber# 2 Situations# UUUUUE } { OrderNumber# 3 Situations# UUUSUU } { OrderNumber# 4 Situations# UUUUUS } ] " ApproximateFreeSpaceShare# 0.999988} GroupId# 2181038080 Marker# BPP12 TEvPutResult: TEvPutResult {Id# [1:1:11:0:0:32768:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:11:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 5 Situations# SUUUUU } { OrderNumber# 6 Situations# USUUUU } { OrderNumber# 7 Situations# UUSUUU } { OrderNumber# 0 Situations# UUUEUU } { OrderNumber# 1 Situations# UUUUEU } { OrderNumber# 2 Situations# UUUUUE } { OrderNumber# 3 Situations# UUUSUU } { OrderNumber# 4 Situations# UUUUUS } ] " ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:12:0:0:131072:0] 2025-05-07T08:50:07.933460Z 1 00h03m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5308:698] 2025-05-07T08:50:07.933648Z 2 00h03m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5315:705] 2025-05-07T08:50:07.935275Z 3 00h03m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5322:712] TEvPutResult: TEvPutResult {Id# [1:1:12:0:0:131072:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:12:0:0:131072:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 4 Situations# SUUUUU } { OrderNumber# 5 Situations# USUUUU } { OrderNumber# 6 Situations# UUSUUU } { OrderNumber# 7 Situations# UUUSUU } { OrderNumber# 0 Situations# UUUUEU } { OrderNumber# 1 Situations# UUUUUE } { OrderNumber# 2 Situations# UUUUEU } { OrderNumber# 3 Situations# UUUUUS } ] " ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:13:0:0:32768:0] 2025-05-07T08:50:07.937562Z 1 00h03m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5308:698] 2025-05-07T08:50:07.938668Z 2 00h03m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5315:705] 2025-05-07T08:50:07.939770Z 3 00h03m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5322:712] TEvPutResult: TEvPutResult {Id# [1:1:13:0:0:32768:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:13:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 3 Situations# PUUUUU } { OrderNumber# 4 Situations# UPUUUU } { OrderNumber# 5 Situations# UUPUUU } { OrderNumber# 6 Situations# UUUPUU } { OrderNumber# 7 Situations# UUUUPU } { OrderNumber# 0 Situations# UUUUUE } { OrderNumber# 1 Situations# UUUUUE } { OrderNumber# 2 Situations# UUUUUE } ] " ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:14:0:0:131072:0] 2025-05-07T08:50:07.941460Z 3 00h03m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5322:712] 2025-05-07T08:50:07.942902Z 1 00h03m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5308:698] 2025-05-07T08:50:07.943661Z 2 00h03m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5315:705] TEvPutResult: TEvPutResult {Id# [1:1:14:0:0:131072:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:14:0:0:131072:0] Reported ErrorReasons# [ { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only m ... ey [1:1:5:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:5:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:6:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:6:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:7:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:7:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:8:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:8:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:9:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:9:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:10:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:10:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:11:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:11:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:12:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:12:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:13:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:13:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:14:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:14:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:15:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:15:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:16:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:16:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:17:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:17:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:18:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:18:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:19:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:19:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:20:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:20:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Restoring to normal VDisk #0 === Setting VDisk read-only to 0 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] === Write 10 blobs, expect some VDisks refuse parts but the writes still go through === SEND TEvPut with key [1:1:21:0:0:32768:0] 2025-05-07T08:50:10.381997Z 3 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5322:712] 2025-05-07T08:50:10.386324Z 2 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5315:705] TEvPutResult: TEvPutResult {Id# [1:1:21:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} SEND TEvPut with key [1:1:22:0:0:131072:0] 2025-05-07T08:50:10.395086Z 2 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5315:705] 2025-05-07T08:50:10.396982Z 3 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5322:712] TEvPutResult: TEvPutResult {Id# [1:1:22:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} SEND TEvPut with key [1:1:23:0:0:32768:0] TEvPutResult: TEvPutResult {Id# [1:1:23:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} SEND TEvPut with key [1:1:24:0:0:131072:0] 2025-05-07T08:50:10.403031Z 3 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5322:712] TEvPutResult: TEvPutResult {Id# [1:1:24:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} SEND TEvPut with key [1:1:25:0:0:32768:0] 2025-05-07T08:50:10.406424Z 3 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5322:712] 2025-05-07T08:50:10.406547Z 2 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5315:705] TEvPutResult: TEvPutResult {Id# [1:1:25:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} SEND TEvPut with key [1:1:26:0:0:131072:0] 2025-05-07T08:50:10.409436Z 3 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5322:712] 2025-05-07T08:50:10.409516Z 2 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5315:705] TEvPutResult: TEvPutResult {Id# [1:1:26:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} SEND TEvPut with key [1:1:27:0:0:32768:0] 2025-05-07T08:50:10.412374Z 3 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5322:712] 2025-05-07T08:50:10.412451Z 2 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5315:705] TEvPutResult: TEvPutResult {Id# [1:1:27:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} SEND TEvPut with key [1:1:28:0:0:131072:0] 2025-05-07T08:50:10.414700Z 2 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5315:705] 2025-05-07T08:50:10.414936Z 3 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5322:712] TEvPutResult: TEvPutResult {Id# [1:1:28:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} SEND TEvPut with key [1:1:29:0:0:32768:0] 2025-05-07T08:50:10.417179Z 3 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5322:712] 2025-05-07T08:50:10.417260Z 2 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5315:705] TEvPutResult: TEvPutResult {Id# [1:1:29:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} SEND TEvPut with key [1:1:30:0:0:131072:0] 2025-05-07T08:50:10.419993Z 3 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5322:712] 2025-05-07T08:50:10.420125Z 2 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5315:705] TEvPutResult: TEvPutResult {Id# [1:1:30:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} === Read all 31 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:1:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:1:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:2:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:2:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:3:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:4:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:4:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:5:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:5:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:6:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:6:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:7:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:7:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:8:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:8:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:9:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:9:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:10:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:10:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:11:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:11:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:12:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:12:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:13:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:13:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:14:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:14:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:15:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:15:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:16:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:16:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:17:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:17:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:18:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:18:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:19:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:19:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:20:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:20:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:21:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:21:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:22:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:22:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:23:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:23:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:24:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:24:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:25:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:25:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:26:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:26:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:27:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:27:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:28:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:28:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:29:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:29:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:30:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:30:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} >> TColumnShardTestReadWrite::WriteReadStandaloneExoticTypes >> TColumnShardTestReadWrite::CompactionSplitGranuleStrKey_PKUtf8 >> ReadOnlyVDisk::TestDiscover [GOOD] >> ReadOnlyVDisk::TestGetWithMustRestoreFirst [GOOD] |88.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_replication/ydb-core-tx-schemeshard-ut_replication |89.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_replication/ydb-core-tx-schemeshard-ut_replication |89.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_replication/ydb-core-tx-schemeshard-ut_replication >> KqpSystemView::NodesRange1 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::NodesRange2 [GOOD] Test command err: Trying to start YDB, gRPC: 1937, MsgBus: 27180 2025-05-07T08:49:56.552785Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623363126370966:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:56.552842Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:49:56.606122Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501623366668610621:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:56.606174Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:49:56.631237Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7501623363382752550:2112];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:56.642386Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501623363704618814:2081];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:56.642476Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:49:56.694074Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7501623364997198404:2145];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:56.725302Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:49:56.737713Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002fee/r3tmp/tmpWgmTkd/pdisk_1.dat 2025-05-07T08:49:57.653160Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:49:57.662821Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:49:57.680314Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:49:57.753019Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:49:57.753145Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:49:57.997024Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:58.042620Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:58.042804Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:58.043201Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:58.043271Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:58.043452Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:58.043500Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:58.043640Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:58.043682Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:58.056025Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-05-07T08:49:58.056070Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-05-07T08:49:58.056092Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 4 Cookie 4 2025-05-07T08:49:58.056114Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 5 Cookie 5 2025-05-07T08:49:58.057849Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:49:58.058342Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:49:58.061281Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:49:58.062933Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:49:58.098708Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:58.098868Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:58.103293Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1937, node 1 2025-05-07T08:49:58.354980Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:58.356906Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:58.356927Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:58.357104Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27180 TClient is connected to server localhost:27180 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:50:00.449168Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:00.688277Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:00.973564Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:01.499447Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:01.558900Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623363126370966:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:01.558955Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:50:01.610053Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7501623366668610621:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:01.631603Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7501623363382752550:2112];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:01.614102Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:50:01.643637Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7501623363704618814:2081];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:01.643690Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:50:01.631666Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:50:01.679673Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[5:7501623364997198404:2145];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:01.679749Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:50:01.797697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:04.100144Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623397486111277:2364], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:04.100271Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:04.521369Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:50:04.617214Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:50:04.700730Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:50:04.760700Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:50:04.826891Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:50:04.987420Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:50:05.113186Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:50:05.250112Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623401781079364:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:05.250271Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:05.251943Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623401781079369:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:05.255860Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:50:05.287047Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623401781079372:2407], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:50:05.357441Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623401781079444:4157] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:50:06.967734Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746607806947, txId: 281474976710672] shutting down >> test_sql_streaming.py::test[suites-GroupByHoppingWindowNoKey-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-GroupByHoppingWindowPercentile-default.txt] >> TColumnShardTestReadWrite::ReadAggregate [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest >> ReadOnlyVDisk::TestDiscover [GOOD] Test command err: RandomSeed# 10717309661802234439 SEND TEvPut with key [1:1:0:0:0:131072:0] TEvPutResult: TEvPutResult {Id# [1:1:0:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:1:0:0:32768:0] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:2:0:0:131072:0] TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} === Read all 3 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:1:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:1:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:2:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:2:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} Setting VDisk read-only to 1 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] SEND TEvPut with key [1:1:3:0:0:32768:0] 2025-05-07T08:50:07.494506Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:698] TEvPutResult: TEvPutResult {Id# [1:1:3:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 1 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] SEND TEvPut with key [1:1:4:0:0:131072:0] 2025-05-07T08:50:07.793704Z 1 00h02m00.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:698] 2025-05-07T08:50:07.795126Z 2 00h02m00.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5316:705] TEvPutResult: TEvPutResult {Id# [1:1:4:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 1 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] SEND TEvPut with key [1:1:5:0:0:32768:0] 2025-05-07T08:50:08.087756Z 3 00h02m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5323:712] 2025-05-07T08:50:08.088960Z 1 00h02m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:698] 2025-05-07T08:50:08.089767Z 2 00h02m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5316:705] 2025-05-07T08:50:08.090114Z 1 00h02m30.110512s :BS_PROXY_PUT ERROR: [9d4923c84f32c9c6] Result# TEvPutResult {Id# [1:1:5:0:0:32768:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:5:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 2 Situations# EUUUUU } { OrderNumber# 3 Situations# UPUUUU } { OrderNumber# 4 Situations# UUPUUU } { OrderNumber# 5 Situations# UUUPUU } { OrderNumber# 6 Situations# UUUUPU } { OrderNumber# 7 Situations# UUUUUP } { OrderNumber# 0 Situations# EUUUUU } { OrderNumber# 1 Situations# EUUUUU } ] " ApproximateFreeSpaceShare# 0.999988} GroupId# 2181038080 Marker# BPP12 TEvPutResult: TEvPutResult {Id# [1:1:5:0:0:32768:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:5:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 2 Situations# EUUUUU } { OrderNumber# 3 Situations# UPUUUU } { OrderNumber# 4 Situations# UUPUUU } { OrderNumber# 5 Situations# UUUPUU } { OrderNumber# 6 Situations# UUUUPU } { OrderNumber# 7 Situations# UUUUUP } { OrderNumber# 0 Situations# EUUUUU } { OrderNumber# 1 Situations# EUUUUU } ] " ApproximateFreeSpaceShare# 0.999988} === Putting VDisk #3 to read-only === Setting VDisk read-only to 1 for position 3 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:3:0] === Read all 6 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:1:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:1:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:2:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:2:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:3:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:4:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:4:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:5:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:5:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} === Putting VDisk #4 to read-only === Setting VDisk read-only to 1 for position 4 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:4:0] === Read all 6 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:1:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:1:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:2:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:2:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:3:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:4:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:4:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:5:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:5:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} === Putting VDisk #5 to read-only === Setting VDisk read-only to 1 for position 5 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:5:0] === Read all 6 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:1:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:1:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:2:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:2:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:3:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:4:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:4:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:5:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:5:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} === Putting VDisk #6 to read-only === Setting VDisk read-only to 1 for position 6 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:6:0] === Read all 6 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:1:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:1:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:2:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:2:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:3:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:4:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:4:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:5:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:5:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} === Putting VDisk #0 to normal === Setting VDisk read-only to 0 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] === Putting VDisk #1 to normal === Setting VDisk read-only to 0 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] === Putting VDisk #2 to normal === Setting VDisk read-only to 0 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] === Putting VDisk #3 to normal === Setting VDisk read-only to 0 for position 3 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:3:0] === Putting VDisk #4 to normal === Setting VDisk read-only to 0 for position 4 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:4:0] === Putting VDisk #5 to normal === Setting VDisk read-only to 0 for position 5 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:5:0] === Putting VDisk #6 to normal === Setting VDisk read-only to 0 for position 6 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:6:0] SEND TEvPut with key [1:1:6:0:0:131072:0] TEvPutResult: TEvPutResult {Id# [1:1:6:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest >> ReadOnlyVDisk::TestGetWithMustRestoreFirst [GOOD] Test command err: RandomSeed# 10710363045291555244 === Trying to put and get a blob === SEND TEvPut with key [1:1:0:0:0:131072:0] TEvPutResult: TEvPutResult {Id# [1:1:0:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Putting VDisk #0 to read-only === Setting VDisk read-only to 1 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] === Write 10 blobs, expect some VDisks refuse parts but writes go through === SEND TEvPut with key [1:1:1:0:0:32768:0] 2025-05-07T08:50:09.722717Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:698] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:2:0:0:131072:0] 2025-05-07T08:50:09.735996Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:698] TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:3:0:0:32768:0] 2025-05-07T08:50:09.741843Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:698] TEvPutResult: TEvPutResult {Id# [1:1:3:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:4:0:0:131072:0] 2025-05-07T08:50:09.744856Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:698] TEvPutResult: TEvPutResult {Id# [1:1:4:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:5:0:0:32768:0] TEvPutResult: TEvPutResult {Id# [1:1:5:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:6:0:0:131072:0] TEvPutResult: TEvPutResult {Id# [1:1:6:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:7:0:0:32768:0] 2025-05-07T08:50:09.753556Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:698] TEvPutResult: TEvPutResult {Id# [1:1:7:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:8:0:0:131072:0] 2025-05-07T08:50:09.756458Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:698] TEvPutResult: TEvPutResult {Id# [1:1:8:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:9:0:0:32768:0] 2025-05-07T08:50:09.759431Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:698] TEvPutResult: TEvPutResult {Id# [1:1:9:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:10:0:0:131072:0] 2025-05-07T08:50:09.762285Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:698] TEvPutResult: TEvPutResult {Id# [1:1:10:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} === Read all 11 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:1:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:1:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:2:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:2:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:3:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:4:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:4:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:5:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:5:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:6:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:6:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:7:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:7:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:8:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:8:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:9:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:9:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:10:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:10:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Put 2 more VDisks to read-only === Setting VDisk read-only to 1 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] Setting VDisk read-only to 1 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] === Write 10 more blobs, expect errors === SEND TEvPut with key [1:1:11:0:0:32768:0] 2025-05-07T08:50:11.845768Z 1 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:698] 2025-05-07T08:50:11.845912Z 3 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5323:712] 2025-05-07T08:50:11.846076Z 2 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5316:705] 2025-05-07T08:50:11.846993Z 1 00h05m30.160512s :BS_PROXY_PUT ERROR: [739995be8173adae] Result# TEvPutResult {Id# [1:1:11:0:0:32768:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:11:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 5 Situations# SUUUUU } { OrderNumber# 6 Situations# USUUUU } { OrderNumber# 7 Situations# UUSUUU } { OrderNumber# 0 Situations# UUUEUU } { OrderNumber# 1 Situations# UUUUEU } { OrderNumber# 2 Situations# UUUUUE } { OrderNumber# 3 Situations# UUUSUU } { OrderNumber# 4 Situations# UUUUUS } ] " ApproximateFreeSpaceShare# 0.999988} GroupId# 2181038080 Marker# BPP12 TEvPutResult: TEvPutResult {Id# [1:1:11:0:0:32768:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:11:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 5 Situations# SUUUUU } { OrderNumber# 6 Situations# USUUUU } { OrderNumber# 7 Situations# UUSUUU } { OrderNumber# 0 Situations# UUUEUU } { OrderNumber# 1 Situations# UUUUEU } { OrderNumber# 2 Situations# UUUUUE } { OrderNumber# 3 Situations# UUUSUU } { OrderNumber# 4 Situations# UUUUUS } ] " ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:12:0:0:131072:0] 2025-05-07T08:50:11.848725Z 1 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:698] 2025-05-07T08:50:11.849143Z 2 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5316:705] 2025-05-07T08:50:11.850460Z 3 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5323:712] TEvPutResult: TEvPutResult {Id# [1:1:12:0:0:131072:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:12:0:0:131072:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 4 Situations# SUUUUU } { OrderNumber# 5 Situations# USUUUU } { OrderNumber# 6 Situations# UUSUUU } { OrderNumber# 7 Situations# UUUSUU } { OrderNumber# 0 Situations# UUUUEU } { OrderNumber# 1 Situations# UUUUUE } { OrderNumber# 2 Situations# UUUUEU } { OrderNumber# 3 Situations# UUUUUS } ] " ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:13:0:0:32768:0] 2025-05-07T08:50:11.852514Z 1 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:698] 2025-05-07T08:50:11.853339Z 2 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5316:705] 2025-05-07T08:50:11.854353Z 3 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5323:712] TEvPutResult: TEvPutResult {Id# [1:1:13:0:0:32768:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:13:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 3 Situations# PUUUUU } { OrderNumber# 4 Situations# UPUUUU } { OrderNumber# 5 Situations# UUPUUU } { OrderNumber# 6 Situations# UUUPUU } { OrderNumber# 7 Situations# UUUUPU } { OrderNumber# 0 Situations# UUUUUE } { OrderNumber# 1 Situations# UUUUUE } { OrderNumber# 2 Situations# UUUUUE } ] " ApproximateFreeSpaceShare# 0.999963} SEND TEvPut with key [1:1:14:0:0:131072:0] 2025-05-07T08:50:11.855735Z 3 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5323:712] 2025-05-07T08:50:11.856935Z 1 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:698] 2025-05-07T08:50:11.857626Z 2 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5316:705] TEvPutResult: TEvPutResult {Id# [1:1:14:0:0:131072:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:14:0:0:131072:0] Reported ErrorReasons# [ { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 2 Situations# EUUUUU } { OrderNumber# 3 Situations# UPUUUU } { OrderNumber# 4 Situations# UUPUUU } { OrderNumber# 5 Situations# UUUPUU } { OrderNumber# 6 Situations# UUUUPU } { OrderNumber# 7 Situations# UUUUUP } { OrderNumber# 0 Situations# EUUUUU } { OrderNumber# 1 Situations# EUUUUU } ] " ApproximateFreeSpaceShare# 0.999963} SEND TEvPut with key [1:1:15:0:0:32768:0] 2025-05-07T08:50:11.858990Z 3 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5323:712] 2025-05-07T08:50:11.859085Z 2 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5316:705] 2025-05-07T08:50:11.860001Z 1 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:698] TEvPutResult: TEvPutResult {Id# [1:1:15:0:0:32768:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:15:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 1 Situations# EUUUUU } { OrderNumber# 2 Situations# UEUUUU } { OrderNumber# 3 Situations# UUSUUU } { OrderNumber# 4 Situations# UUUSUU } { OrderNumber# 5 Situations# UUUUSU } { OrderNumber# 6 Situations# UUUUUS } { OrderNumber# 7 Situations# USUUUU } { OrderNumber# 0 Situations# EUUUUU } ] " ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:16:0:0:131072:0] 2025-05-07T08:50:11.861824Z 3 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5323:712] 2025-05-07T08:50:11.861914Z 2 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5316:705] 2025-05-07T08:50:11.862982Z 1 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:698] TEvPutResult: TEvPutResult {Id# [1:1:16:0:0:131072:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:16:0:0:131072:0] Reported ErrorReasons# [ { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 1 Situations# EUUUUU } { OrderNumber# 2 Situations# UEUUUU } { OrderNumber# 3 Situations# UUSUUU } { OrderNumber# 4 Situations# UUUSUU } { OrderNumber# 5 Situations# UUUUSU } { OrderNumber# 6 Situations# UUUUUS } { OrderNumber# 7 Situations# USUUUU } { OrderNumber# 0 Situations# EUUUUU } ] " ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:17:0:0:32768:0] 2025-05-07T08:50:11.864669Z 1 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:698] 2025-05-07T08:50:11.864911Z 3 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5323:712] 2025-05-07T08:50:11.864976Z 2 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5316:705] TEvPutResult: TEvPutResult {Id# [1:1:17:0:0:32768:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:17:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 0 Situations# EUUUUU } { OrderNumber# 1 Situations# UEUUUU } { OrderNumber# 2 Situations# UUEUUU } { OrderNumber# 3 Situations# UUUSUU } { OrderNumber# 4 Situations# UUUUSU } { OrderNumber# 5 Situations# UUUUUS } { OrderNumber# 6 Situations# SUUUUU } { OrderNumber# 7 Situations# UUSUUU } ] " ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:18:0:0:131072:0] 2025-05-07T08:50:11.867184Z 1 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:698] 2025-05-07T08:50:11.867411Z 2 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5316:705] 2025-05-07T08:50:11.867533Z 3 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5323:712] TEvPutResult: TEvPutResult {Id# [1:1:18:0:0:131072:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:18:0:0:131072:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 7 Situations# SUUUUU } { OrderNumber# 0 Situations# UEUUUU } { OrderNumber# 1 Situations# UUEUUU } { OrderNumber# 2 Situations# UUUEUU } { OrderNumber# 3 Situations# UUUUSU } { OrderNumber# 4 Situations# UUUUUS } { OrderNumber# 5 Situations# USUUUU } { OrderNumber# 6 Situations# UUSUUU } ] " ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:19:0:0:32768:0] 2025-05-07T08:50:11.869999Z 1 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:698] 2025-05-07T08:50:11.870255Z 3 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5323:712] 2025-05-07T08:50:11.870358Z 2 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5316:705] TEvPutResult: TEvPutResult {Id# [1:1:19:0:0:32768:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:19:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 6 Situations# SUUUUU } { OrderNumber# 7 Situations# USUUUU } { OrderNumber# 0 Situations# UUEUUU } { OrderNumber# 1 Situations# UUUEUU } { OrderNumber# 2 Situations# UUUUEU } { OrderNumber# 3 Situations# UUUUUS } { OrderNumber# 4 Situations# UUSUUU } { OrderNumber# 5 Situations# UUUUSU } ] " ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:20:0:0:131072:0] 2025-05-07T08:50:11.872755Z 1 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:698] 2025-05-07T08:50:11.872883Z 3 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5323:712] 2025-05-07T08:50:11.873022Z 2 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5316:705] TEvPutResult: TEvPutResult {Id# [1:1:20:0:0:131072:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:20:0:0:131072:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 5 Situations# SUUUUU } { OrderNumber# 6 Situations# USUUUU } { OrderNumber# 7 Situations# UUSUUU } { OrderNumber# 0 Situations# UUUEUU } { OrderNumber# 1 Situations# UUUUEU } { OrderNumber# 2 Situations# UUUUUE } { OrderNumber# 3 Situations# UUUSUU } { OrderNumber# 4 Situations# UUUUUS } ] " ApproximateFreeSpaceShare# 0.999988} SEND TEvGet with key [1:1:11:0:0:32768:0] 2025-05-07T08:50:11.879422Z 1 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5310:699] 2025-05-07T08:50:11.879641Z 2 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5317:706] 2025-05-07T08:50:11.879711Z 3 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5324:713] 2025-05-07T08:50:11.880414Z 1 00h05m30.160512s :BS_PROXY_GET ERROR: [d48294b7284f6c8d] Response# TEvGetResult {Status# ERROR ResponseSz# 1 {[1:1:11:0:0:32768:0] ERROR Size# 0 RequestedSize# 32768} ErrorReason# "TStrategyBase saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:11:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 5 Situations# PUUUUU } { OrderNumber# 6 Situations# UPUUUU } { OrderNumber# 7 Situations# UUPUUU } { OrderNumber# 0 Situations# UUUEUU } { OrderNumber# 1 Situations# UUUUEU } { OrderNumber# 2 Situations# UUUUUE } { OrderNumber# 3 Situations# AAAPAA } { OrderNumber# 4 Situations# AAAAAA } ] "} Marker# BPG29 2025-05-07T08:50:11.880575Z 2 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5317:706] 2025-05-07T08:50:11.880653Z 3 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5324:713] TEvGetResult: TEvGetResult {Status# ERROR ResponseSz# 1 {[1:1:11:0:0:32768:0] ERROR Size# 0 RequestedSize# 32768} ErrorReason# "TStrategyBase saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:11:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 5 Situations# PUUUUU } { OrderNumber# 6 Situations# UPUUUU } { OrderNumber# 7 Situations# UUPUUU } { OrderNumber# 0 Situations# UUUEUU } { OrderNumber# 1 Situations# UUUUEU } { OrderNumber# 2 Situations# UUUUUE } { OrderNumber# 3 Situations# AAAPAA } { OrderNumber# 4 Situations# AAAAAA } ] "} >> TColumnShardTestReadWrite::WriteOverload+InStore-WithWritePortionsOnInsert ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TestRandom_Mirror3dc [GOOD] Test command err: RandomSeed# 9638440708679657801 Step = 0 SEND TEvPut with key [1:1:0:0:0:51943:0] TEvPutResult: TEvPutResult {Id# [1:1:0:0:0:51943:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 1 SEND TEvPut with key [1:1:1:0:0:37868:0] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:37868:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 2 SEND TEvPut with key [1:1:2:0:0:85877:0] TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:85877:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 3 SEND TEvPut with key [1:1:3:0:0:192081:0] TEvPutResult: TEvPutResult {Id# [1:1:3:0:0:192081:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 4 SEND TEvPut with key [1:1:4:0:0:267203:0] TEvPutResult: TEvPutResult {Id# [1:1:4:0:0:267203:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Stop node 3 2025-05-07T08:46:36.503476Z 1 00h01m00.010512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 4 Step = 5 SEND TEvPut with key [1:1:5:0:0:502135:0] TEvPutResult: TEvPutResult {Id# [1:1:5:0:0:502135:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 6 SEND TEvPut with key [1:1:6:0:0:377427:0] TEvPutResult: TEvPutResult {Id# [1:1:6:0:0:377427:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Stop node 4 2025-05-07T08:46:36.777361Z 1 00h01m10.060512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 5 Step = 7 SEND TEvPut with key [1:1:7:0:0:48850:0] TEvPutResult: TEvPutResult {Id# [1:1:7:0:0:48850:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 8 SEND TEvPut with key [1:1:8:0:0:411812:0] TEvPutResult: TEvPutResult {Id# [1:1:8:0:0:411812:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 9 SEND TEvPut with key [1:1:9:0:0:293766:0] TEvPutResult: TEvPutResult {Id# [1:1:9:0:0:293766:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Start node 3 Step = 10 SEND TEvPut with key [1:1:10:0:0:127358:0] TEvPutResult: TEvPutResult {Id# [1:1:10:0:0:127358:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 11 SEND TEvPut with key [1:1:11:0:0:282945:0] TEvPutResult: TEvPutResult {Id# [1:1:11:0:0:282945:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} Step = 12 SEND TEvPut with key [1:1:12:0:0:34864:0] TEvPutResult: TEvPutResult {Id# [1:1:12:0:0:34864:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 13 SEND TEvPut with key [1:1:13:0:0:363096:0] TEvPutResult: TEvPutResult {Id# [1:1:13:0:0:363096:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 14 SEND TEvPut with key [1:1:14:0:0:179270:0] TEvPutResult: TEvPutResult {Id# [1:1:14:0:0:179270:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 15 SEND TEvPut with key [1:1:15:0:0:358611:0] TEvPutResult: TEvPutResult {Id# [1:1:15:0:0:358611:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 16 SEND TEvPut with key [1:1:16:0:0:136892:0] TEvPutResult: TEvPutResult {Id# [1:1:16:0:0:136892:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 17 SEND TEvPut with key [1:1:17:0:0:517733:0] TEvPutResult: TEvPutResult {Id# [1:1:17:0:0:517733:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 18 SEND TEvPut with key [1:1:18:0:0:250802:0] TEvPutResult: TEvPutResult {Id# [1:1:18:0:0:250802:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 19 SEND TEvPut with key [1:1:19:0:0:199490:0] TEvPutResult: TEvPutResult {Id# [1:1:19:0:0:199490:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 20 SEND TEvPut with key [1:1:20:0:0:244269:0] TEvPutResult: TEvPutResult {Id# [1:1:20:0:0:244269:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} Step = 21 SEND TEvPut with key [1:1:21:0:0:329606:0] TEvPutResult: TEvPutResult {Id# [1:1:21:0:0:329606:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 22 SEND TEvPut with key [1:1:22:0:0:322338:0] TEvPutResult: TEvPutResult {Id# [1:1:22:0:0:322338:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} Step = 23 SEND TEvPut with key [1:1:23:0:0:519258:0] TEvPutResult: TEvPutResult {Id# [1:1:23:0:0:519258:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 24 SEND TEvPut with key [1:1:24:0:0:56036:0] TEvPutResult: TEvPutResult {Id# [1:1:24:0:0:56036:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} Step = 25 SEND TEvPut with key [1:1:25:0:0:514591:0] TEvPutResult: TEvPutResult {Id# [1:1:25:0:0:514591:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} Stop node 7 2025-05-07T08:46:38.521037Z 1 00h01m30.100512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 8 Step = 26 SEND TEvPut with key [1:1:26:0:0:5927:0] TEvPutResult: TEvPutResult {Id# [1:1:26:0:0:5927:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 27 SEND TEvPut with key [1:1:27:0:0:148482:0] TEvPutResult: TEvPutResult {Id# [1:1:27:0:0:148482:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 28 SEND TEvPut with key [1:1:28:0:0:6043:0] TEvPutResult: TEvPutResult {Id# [1:1:28:0:0:6043:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 29 SEND TEvPut with key [1:1:29:0:0:265170:0] TEvPutResult: TEvPutResult {Id# [1:1:29:0:0:265170:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 30 SEND TEvPut with key [1:1:30:0:0:264716:0] TEvPutResult: TEvPutResult {Id# [1:1:30:0:0:264716:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Compact vdisk 3 Step = 31 SEND TEvPut with key [1:1:31:0:0:168116:0] TEvPutResult: TEvPutResult {Id# [1:1:31:0:0:168116:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 32 SEND TEvPut with key [1:1:32:0:0:444749:0] TEvPutResult: TEvPutResult {Id# [1:1:32:0:0:444749:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 33 SEND TEvPut with key [1:1:33:0:0:350254:0] TEvPutResult: TEvPutResult {Id# [1:1:33:0:0:350254:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 34 SEND TEvPut with key [1:1:34:0:0:145950:0] TEvPutResult: TEvPutResult {Id# [1:1:34:0:0:145950:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 35 SEND TEvPut with key [1:1:35:0:0:358611:0] TEvPutResult: TEvPutResult {Id# [1:1:35:0:0:358611:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 36 SEND TEvPut with key [1:1:36:0:0:139148:0] TEvPutResult: TEvPutResult {Id# [1:1:36:0:0:139148:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 37 SEND TEvPut with key [1:1:37:0:0:200198:0] TEvPutResult: TEvPutResult {Id# [1:1:37:0:0:200198:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 38 SEND TEvPut with key [1:1:38:0:0:185170:0] TEvPutResult: TEvPutResult {Id# [1:1:38:0:0:185170:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 39 SEND TEvPut with key [1:1:39:0:0:297271:0] TEvPutResult: TEvPutResult {Id# [1:1:39:0:0:297271:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 40 SEND TEvPut with key [1:1:40:0:0:419670:0] TEvPutResult: TEvPutResult {Id# [1:1:40:0:0:419670:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 41 SEND TEvPut with key [1:1:41:0:0:218956:0] TEvPutResult: TEvPutResult {Id# [1:1:41:0:0:218956:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 42 SEND TEvPut with key [1:1:42:0:0:154723:0] TEvPutResult: TEvPutResult {Id# [1:1:42:0:0:154723:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 43 SEND TEvPut with key [1:1:43:0:0:13332:0] TEvPutResult: TEvPutResult {Id# [1:1:43:0:0:13332:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 44 SEND TEvPut with key [1:1:44:0:0:448892:0] TEvPutResult: TEvPutResult {Id# [1:1:44:0:0:448892:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 45 SEND TEvPut with key [1:1:45:0:0:103231:0] TEvPutResult: TEvPutResult {Id# [1:1:45:0:0:103231:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 46 SEND TEvPut with key [1:1:46:0:0:295973:0] TEvPutResult: TEvPutResult {Id# [1:1:46:0:0:295973:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 47 SEND TEvPut with key [1:1:47:0:0:402799:0] TEvPutResult: TEvPutResult {Id# [1:1:47:0:0:402799:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 48 SEND TEvPut with key [1:1:48:0:0:165045:0] TEvPutResult: TEvPutResult {Id# [1:1:48:0:0:165045:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 49 SEND TEvPut with key [1:1:49:0:0:360099:0] TEvPutResult: TEvPutResult {Id# [1:1:49:0:0:360099:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 50 SEND TEvPut with key [1:1:50:0:0:97222:0] TEvPutResult: TEvPutResult {Id# [1:1:50:0:0:97222:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 51 SEND TEvPut with key [1:1:51:0:0:303396:0] TEvPutResult: TEvPutResult {Id# [1:1:51:0:0:303396:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 52 SEND TEvPut with key [1:1:52:0:0:304876:0] TEvPutResult: TEvPutResult {Id# [1:1:52:0:0:304876:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 53 SEND TEvPut with key [1:1:53:0:0:375063:0] TEvPutResult: TEvPutResult {Id# [1:1:53:0:0:375063:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Start node 4 Step = 54 SEND TEvPut with key [1:1:54:0:0:288044:0] TEvPutResult: TEvPutResult {Id# [1:1:54:0:0:288044:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999866} Step = 55 SEND TEvPut with key [1:1:55:0:0:181559:0] TEvPutResult: TEvPutResult {Id# [1:1:55:0:0:181559:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 56 SEND TEvPut with key [1:1:56:0:0:42993:0] TEvPutResult: TEvPutResult {Id# [1:1:56:0:0:42993:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999866} Step = 57 SEND TEvPut with key [1:1:57:0:0:424399:0] TEvPutResult: TEvPutResult {Id# [1:1:57:0:0:424399:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 58 SEND TEvPut with key [1:1:58:0:0:169341:0] TEvPutResult: TEvPutResult {Id# [1:1:58:0:0:169341:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999902} Step = 59 SEND TEvPut with key [1:1:59:0:0:405932:0] TEvPutResult: TEvPutResult {Id# [1:1:59:0:0:405932:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999902} Step = 60 SEND TEvPut with key [1:1:60:0:0:190148:0] TEvPutResult: TEvPutResult {Id# [1:1:60:0:0:190148:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Stop node 3 2025-05-07T08:46:41.964125Z 1 00h02m00.150512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 4 Wipe node 0 2025-05-07T08:46:42.246254Z 1 00h02m10.161024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-05-07T08:46:42.248407Z 1 00h02m10.161024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 12730265189654500628] Step = 61 SEND TEvPut with key [1:1:61:0:0:500240:0] 2025-05-07T08:46:44.731383Z 1 00h03m50.161024s :BS_PROXY ERROR: Group# 2181038080 StateEstablishingSessions Wakeup TIMEOUT Marker# DSP12 TEvPutResult: TEvPutResult {Id# [1:1:61:0:0:500240:0] Status# ERROR StatusFlags# { } ErrorReason# "Timeout while establishing sessions (DSPE4)." ApproximateFreeSpaceShare# 0} Step = 62 SEND TEvPut with key [1:1:62:0:0:354994:0] TEvPutResult: TEvPutResult {Id# [1:1:62:0:0:354994:0] Status# ERROR StatusFlags# { } ErrorReason# "Timeout while establishing sessions (DSPE4)." ApproximateFreeSpac ... :945:0:0:76599:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 946 SEND TEvPut with key [1:1:946:0:0:24822:0] TEvPutResult: TEvPutResult {Id# [1:1:946:0:0:24822:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999756} Compact vdisk 2 Step = 947 SEND TEvPut with key [1:1:947:0:0:100167:0] TEvPutResult: TEvPutResult {Id# [1:1:947:0:0:100167:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 948 SEND TEvPut with key [1:1:948:0:0:112126:0] TEvPutResult: TEvPutResult {Id# [1:1:948:0:0:112126:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999695} Step = 949 SEND TEvPut with key [1:1:949:0:0:525378:0] TEvPutResult: TEvPutResult {Id# [1:1:949:0:0:525378:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999731} Step = 950 SEND TEvPut with key [1:1:950:0:0:410875:0] TEvPutResult: TEvPutResult {Id# [1:1:950:0:0:410875:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 951 SEND TEvPut with key [1:1:951:0:0:113503:0] TEvPutResult: TEvPutResult {Id# [1:1:951:0:0:113503:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999695} Step = 952 SEND TEvPut with key [1:1:952:0:0:431140:0] TEvPutResult: TEvPutResult {Id# [1:1:952:0:0:431140:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999731} Step = 953 SEND TEvPut with key [1:1:953:0:0:509293:0] TEvPutResult: TEvPutResult {Id# [1:1:953:0:0:509293:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999695} Stop node 3 2025-05-07T08:49:42.414674Z 1 00h28m00.953072s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 4 Step = 954 SEND TEvPut with key [1:1:954:0:0:286395:0] TEvPutResult: TEvPutResult {Id# [1:1:954:0:0:286395:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999731} Stop node 1 2025-05-07T08:49:42.762523Z 1 00h28m10.961024s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 2 Step = 955 SEND TEvPut with key [1:1:955:0:0:219270:0] TEvPutResult: TEvPutResult {Id# [1:1:955:0:0:219270:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999805} Start node 1 Step = 956 SEND TEvPut with key [1:1:956:0:0:274971:0] TEvPutResult: TEvPutResult {Id# [1:1:956:0:0:274971:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999805} Step = 957 SEND TEvPut with key [1:1:957:0:0:487884:0] TEvPutResult: TEvPutResult {Id# [1:1:957:0:0:487884:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999731} Start node 3 Step = 958 SEND TEvPut with key [1:1:958:0:0:327302:0] TEvPutResult: TEvPutResult {Id# [1:1:958:0:0:327302:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 959 SEND TEvPut with key [1:1:959:0:0:385917:0] TEvPutResult: TEvPutResult {Id# [1:1:959:0:0:385917:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999719} Step = 960 SEND TEvPut with key [1:1:960:0:0:200998:0] TEvPutResult: TEvPutResult {Id# [1:1:960:0:0:200998:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 961 SEND TEvPut with key [1:1:961:0:0:61147:0] TEvPutResult: TEvPutResult {Id# [1:1:961:0:0:61147:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 962 SEND TEvPut with key [1:1:962:0:0:237906:0] TEvPutResult: TEvPutResult {Id# [1:1:962:0:0:237906:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999719} Step = 963 SEND TEvPut with key [1:1:963:0:0:347273:0] TEvPutResult: TEvPutResult {Id# [1:1:963:0:0:347273:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 964 SEND TEvPut with key [1:1:964:0:0:181317:0] TEvPutResult: TEvPutResult {Id# [1:1:964:0:0:181317:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999817} Step = 965 SEND TEvPut with key [1:1:965:0:0:456096:0] TEvPutResult: TEvPutResult {Id# [1:1:965:0:0:456096:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 966 SEND TEvPut with key [1:1:966:0:0:93776:0] TEvPutResult: TEvPutResult {Id# [1:1:966:0:0:93776:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999817} Step = 967 SEND TEvPut with key [1:1:967:0:0:447659:0] TEvPutResult: TEvPutResult {Id# [1:1:967:0:0:447659:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999817} Step = 968 SEND TEvPut with key [1:1:968:0:0:14298:0] TEvPutResult: TEvPutResult {Id# [1:1:968:0:0:14298:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 969 SEND TEvPut with key [1:1:969:0:0:92781:0] TEvPutResult: TEvPutResult {Id# [1:1:969:0:0:92781:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999817} Step = 970 SEND TEvPut with key [1:1:970:0:0:334566:0] TEvPutResult: TEvPutResult {Id# [1:1:970:0:0:334566:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999719} Stop node 0 2025-05-07T08:49:44.342087Z 9 00h28m40.983984s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [9:127548:350] ServerId# [1:128578:167] TabletId# 72057594037932033 PipeClientId# [9:127548:350] 2025-05-07T08:49:44.342305Z 8 00h28m40.983984s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [8:158241:17] ServerId# [1:158248:4106] TabletId# 72057594037932033 PipeClientId# [8:158241:17] 2025-05-07T08:49:44.342466Z 7 00h28m40.983984s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [7:157165:17] ServerId# [1:157172:3977] TabletId# 72057594037932033 PipeClientId# [7:157165:17] 2025-05-07T08:49:44.342610Z 6 00h28m40.983984s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [6:134225:17] ServerId# [1:134232:1014] TabletId# 72057594037932033 PipeClientId# [6:134225:17] 2025-05-07T08:49:44.342781Z 5 00h28m40.983984s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [5:154282:17] ServerId# [1:154290:3599] TabletId# 72057594037932033 PipeClientId# [5:154282:17] 2025-05-07T08:49:44.342925Z 4 00h28m40.983984s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [4:163149:17] ServerId# [1:163159:4704] TabletId# 72057594037932033 PipeClientId# [4:163149:17] 2025-05-07T08:49:44.343054Z 3 00h28m40.983984s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [3:153174:17] ServerId# [1:153184:3474] TabletId# 72057594037932033 PipeClientId# [3:153174:17] 2025-05-07T08:49:44.343185Z 2 00h28m40.983984s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [2:162177:17] ServerId# [1:162184:4594] TabletId# 72057594037932033 PipeClientId# [2:162177:17] Step = 971 SEND TEvPut with key [1:1:971:0:0:439384:0] TEvPutResult: TEvPutResult {Id# [1:1:971:0:0:439384:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999792} Step = 972 SEND TEvPut with key [1:1:972:0:0:252551:0] TEvPutResult: TEvPutResult {Id# [1:1:972:0:0:252551:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999695} Step = 973 SEND TEvPut with key [1:1:973:0:0:39982:0] TEvPutResult: TEvPutResult {Id# [1:1:973:0:0:39982:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999695} Stop node 2 Step = 974 SEND TEvPut with key [1:1:974:0:0:526796:0] TEvPutResult: TEvPutResult {Id# [1:1:974:0:0:526796:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999719} Start node 0 Step = 975 SEND TEvPut with key [1:1:975:0:0:337763:0] TEvPutResult: TEvPutResult {Id# [1:1:975:0:0:337763:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999695} Stop node 2 Step = 976 SEND TEvPut with key [1:1:976:0:0:475740:0] TEvPutResult: TEvPutResult {Id# [1:1:976:0:0:475740:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 977 SEND TEvPut with key [1:1:977:0:0:169780:0] TEvPutResult: TEvPutResult {Id# [1:1:977:0:0:169780:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 978 SEND TEvPut with key [1:1:978:0:0:481535:0] TEvPutResult: TEvPutResult {Id# [1:1:978:0:0:481535:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 979 SEND TEvPut with key [1:1:979:0:0:24668:0] TEvPutResult: TEvPutResult {Id# [1:1:979:0:0:24668:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 980 SEND TEvPut with key [1:1:980:0:0:159890:0] TEvPutResult: TEvPutResult {Id# [1:1:980:0:0:159890:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 981 SEND TEvPut with key [1:1:981:0:0:111300:0] TEvPutResult: TEvPutResult {Id# [1:1:981:0:0:111300:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 982 SEND TEvPut with key [1:1:982:0:0:355914:0] TEvPutResult: TEvPutResult {Id# [1:1:982:0:0:355914:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999792} Step = 983 SEND TEvPut with key [1:1:983:0:0:399106:0] TEvPutResult: TEvPutResult {Id# [1:1:983:0:0:399106:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 984 SEND TEvPut with key [1:1:984:0:0:347759:0] TEvPutResult: TEvPutResult {Id# [1:1:984:0:0:347759:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 985 SEND TEvPut with key [1:1:985:0:0:261994:0] TEvPutResult: TEvPutResult {Id# [1:1:985:0:0:261994:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999792} Step = 986 SEND TEvPut with key [1:1:986:0:0:101043:0] TEvPutResult: TEvPutResult {Id# [1:1:986:0:0:101043:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 987 SEND TEvPut with key [1:1:987:0:0:138774:0] TEvPutResult: TEvPutResult {Id# [1:1:987:0:0:138774:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999792} Step = 988 SEND TEvPut with key [1:1:988:0:0:441913:0] TEvPutResult: TEvPutResult {Id# [1:1:988:0:0:441913:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 989 SEND TEvPut with key [1:1:989:0:0:134469:0] TEvPutResult: TEvPutResult {Id# [1:1:989:0:0:134469:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999792} Step = 990 SEND TEvPut with key [1:1:990:0:0:123825:0] TEvPutResult: TEvPutResult {Id# [1:1:990:0:0:123825:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999792} Step = 991 SEND TEvPut with key [1:1:991:0:0:40387:0] TEvPutResult: TEvPutResult {Id# [1:1:991:0:0:40387:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 992 SEND TEvPut with key [1:1:992:0:0:193000:0] TEvPutResult: TEvPutResult {Id# [1:1:992:0:0:193000:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999792} Stop node 7 2025-05-07T08:49:46.535700Z 1 00h29m21.003072s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 8 Step = 993 SEND TEvPut with key [1:1:993:0:0:455894:0] TEvPutResult: TEvPutResult {Id# [1:1:993:0:0:455894:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999658} Compact vdisk 0 Step = 994 SEND TEvPut with key [1:1:994:0:0:54378:0] TEvPutResult: TEvPutResult {Id# [1:1:994:0:0:54378:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Compact vdisk 6 Step = 995 SEND TEvPut with key [1:1:995:0:0:487669:0] TEvPutResult: TEvPutResult {Id# [1:1:995:0:0:487669:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999829} Step = 996 SEND TEvPut with key [1:1:996:0:0:194641:0] TEvPutResult: TEvPutResult {Id# [1:1:996:0:0:194641:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999695} Step = 997 SEND TEvPut with key [1:1:997:0:0:74188:0] TEvPutResult: TEvPutResult {Id# [1:1:997:0:0:74188:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 998 SEND TEvPut with key [1:1:998:0:0:136082:0] TEvPutResult: TEvPutResult {Id# [1:1:998:0:0:136082:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999695} Step = 999 SEND TEvPut with key [1:1:999:0:0:145518:0] TEvPutResult: TEvPutResult {Id# [1:1:999:0:0:145518:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Starting nodes Start compaction 1 Start checking ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::ReadAggregate [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8328;columns=19; 2025-05-07T08:50:08.600596Z node 1 :BLOB_CACHE NOTICE: ctor_logger.h:56: MaxCacheDataSize: 20971520 InFlightDataSize: 0 2025-05-07T08:50:08.728928Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-05-07T08:50:08.757368Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-05-07T08:50:08.757676Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-05-07T08:50:08.765659Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-05-07T08:50:08.765923Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-05-07T08:50:08.766301Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-05-07T08:50:08.766430Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-05-07T08:50:08.766568Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-05-07T08:50:08.766694Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-05-07T08:50:08.766832Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-05-07T08:50:08.766963Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-05-07T08:50:08.767095Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-05-07T08:50:08.767207Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-05-07T08:50:08.767314Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-05-07T08:50:08.767443Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-05-07T08:50:08.806033Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-05-07T08:50:08.806215Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:137;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=11;current_normalizer=CLASS_NAME=Granules; 2025-05-07T08:50:08.806318Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-05-07T08:50:08.806534Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-05-07T08:50:08.806692Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-05-07T08:50:08.806799Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-05-07T08:50:08.806850Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-05-07T08:50:08.806955Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-05-07T08:50:08.807026Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-05-07T08:50:08.807073Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-05-07T08:50:08.807120Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-05-07T08:50:08.807309Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-05-07T08:50:08.807389Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-05-07T08:50:08.807433Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-05-07T08:50:08.807474Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-05-07T08:50:08.807604Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-05-07T08:50:08.807668Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-05-07T08:50:08.807712Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanInsertionDedup;id=CleanInsertionDedup; 2025-05-07T08:50:08.807745Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=8;type=CleanInsertionDedup; 2025-05-07T08:50:08.807850Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanInsertionDedup;id=8; 2025-05-07T08:50:08.807892Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-05-07T08:50:08.807924Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-05-07T08:50:08.807997Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-05-07T08:50:08.808043Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-05-07T08:50:08.808072Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-05-07T08:50:08.808283Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-05-07T08:50:08.808356Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-05-07T08:50:08.808397Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-05-07T08:50:08.808629Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-05-07T08:50:08.808673Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-05-07T08:50:08.808721Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-05-07T08:50:08.808850Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-05-07T08:50:08.808895Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-05-07T08:50:08.808937Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-05-07T08:50:08.809024Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-05-07T08:50:08.809117Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-05-07T08:50:08.809163Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-05-07T08:50:08.809192Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-05-07T08:50:08.809647Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=48; 2025-05-07T08:50:08.809767Z node 1 :TX_COLUMNSHARD INFO: log. ... 4;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:84;event=TEvTaskProcessedResult; 2025-05-07T08:50:12.384388Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:435:2453];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=merge.cpp:74;event=DoApply;interval_idx=0; 2025-05-07T08:50:12.384428Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:435:2453];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=scanner.cpp:21;event=interval_result_received;interval_idx=0;intervalId=76; 2025-05-07T08:50:12.384491Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:435:2453];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=scanner.cpp:47;event=interval_result;interval_idx=0;count=1;merger=0;interval_id=76; 2025-05-07T08:50:12.384532Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:435:2453];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=scanner.cpp:65;event=intervals_finished; 2025-05-07T08:50:12.384623Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:435:2453];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:187;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,19;column_names=i32,jsondoc;);;program_input=(column_ids=4,19;column_names=i32,jsondoc;);;;); 2025-05-07T08:50:12.384658Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:435:2453];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:73;event=DoExtractReadyResults;result=1;count=1;finished=1; 2025-05-07T08:50:12.384700Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:435:2453];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:198;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-05-07T08:50:12.385237Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:435:2453];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:104;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-05-07T08:50:12.385386Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:435:2453];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:187;stage=start;iterator=ready_results:(count:1;records_count:1;schema=100: binary 101: binary 102: binary 103: uint64;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,19;column_names=i32,jsondoc;);;program_input=(column_ids=4,19;column_names=i32,jsondoc;);;;); 2025-05-07T08:50:12.385427Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:435:2453];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:73;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-05-07T08:50:12.385530Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:435:2453];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:229;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,19;column_names=i32,jsondoc;);;program_input=(column_ids=4,19;column_names=i32,jsondoc;);;;);columns=4;rows=1; 2025-05-07T08:50:12.385597Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:435:2453];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:249;stage=data_format;batch_size=26;num_rows=1;batch_columns=100,101,102,103; 2025-05-07T08:50:12.385870Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:435:2453];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:365;event=send_data;compute_actor_id=[2:434:2452];bytes=26;rows=1;faults=0;finished=0;fault=0;schema=100: binary 101: binary 102: binary 103: uint64; 2025-05-07T08:50:12.386014Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:435:2453];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:269;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,19;column_names=i32,jsondoc;);;program_input=(column_ids=4,19;column_names=i32,jsondoc;);;;); 2025-05-07T08:50:12.386130Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:435:2453];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:187;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,19;column_names=i32,jsondoc;);;program_input=(column_ids=4,19;column_names=i32,jsondoc;);;;); 2025-05-07T08:50:12.386221Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:435:2453];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,19;column_names=i32,jsondoc;);;program_input=(column_ids=4,19;column_names=i32,jsondoc;);;;); 2025-05-07T08:50:12.386498Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:435:2453];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:104;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-05-07T08:50:12.386607Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:435:2453];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:187;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,19;column_names=i32,jsondoc;);;program_input=(column_ids=4,19;column_names=i32,jsondoc;);;;); 2025-05-07T08:50:12.386702Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:435:2453];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,19;column_names=i32,jsondoc;);;program_input=(column_ids=4,19;column_names=i32,jsondoc;);;;); 2025-05-07T08:50:12.386743Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:409: Scan [2:435:2453] finished for tablet 9437184 2025-05-07T08:50:12.387255Z node 2 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[2:435:2453];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:415;event=scan_finish;compute_actor_id=[2:434:2452];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.001},{"events":["l_bootstrap","f_processing","f_task_result"],"t":0.003},{"events":["f_ack","l_task_result"],"t":0.013},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.015}],"full":{"a":1746607812371571,"name":"_full_task","f":1746607812371571,"d_finished":0,"c":0,"l":1746607812386827,"d":15256},"events":[{"name":"bootstrap","f":1746607812371834,"d_finished":2737,"c":1,"l":1746607812374571,"d":2737},{"a":1746607812386476,"name":"ack","f":1746607812385213,"d_finished":1032,"c":1,"l":1746607812386245,"d":1383},{"a":1746607812386463,"name":"processing","f":1746607812375520,"d_finished":7425,"c":10,"l":1746607812386247,"d":7789},{"name":"ProduceResults","f":1746607812373507,"d_finished":2733,"c":13,"l":1746607812386727,"d":2733},{"a":1746607812386730,"name":"Finish","f":1746607812386730,"d_finished":0,"c":0,"l":1746607812386827,"d":97},{"name":"task_result","f":1746607812375534,"d_finished":6246,"c":9,"l":1746607812384780,"d":6246}],"id":"9437184::76"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,19;column_names=i32,jsondoc;);;program_input=(column_ids=4,19;column_names=i32,jsondoc;);;;); 2025-05-07T08:50:12.387342Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:435:2453];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:365;event=send_data;compute_actor_id=[2:434:2452];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-05-07T08:50:12.387764Z node 2 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[2:435:2453];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=scan_finished;compute_actor_id=[2:434:2452];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.001},{"events":["l_bootstrap","f_processing","f_task_result"],"t":0.003},{"events":["f_ack","l_task_result"],"t":0.013},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.015}],"full":{"a":1746607812371571,"name":"_full_task","f":1746607812371571,"d_finished":0,"c":0,"l":1746607812387386,"d":15815},"events":[{"name":"bootstrap","f":1746607812371834,"d_finished":2737,"c":1,"l":1746607812374571,"d":2737},{"a":1746607812386476,"name":"ack","f":1746607812385213,"d_finished":1032,"c":1,"l":1746607812386245,"d":1942},{"a":1746607812386463,"name":"processing","f":1746607812375520,"d_finished":7425,"c":10,"l":1746607812386247,"d":8348},{"name":"ProduceResults","f":1746607812373507,"d_finished":2733,"c":13,"l":1746607812386727,"d":2733},{"a":1746607812386730,"name":"Finish","f":1746607812386730,"d_finished":0,"c":0,"l":1746607812387386,"d":656},{"name":"task_result","f":1746607812375534,"d_finished":6246,"c":9,"l":1746607812384780,"d":6246}],"id":"9437184::76"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,19;column_names=i32,jsondoc;);;program_input=(column_ids=4,19;column_names=i32,jsondoc;);;;); 2025-05-07T08:50:12.387860Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:435:2453];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-05-07T08:50:12.371001Z;index_granules=0;index_portions=1;index_batches=2;committed_batches=0;schema_columns=2;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=16001;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=16001;selected_rows=0; 2025-05-07T08:50:12.387906Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:435:2453];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=read_context.h:189;event=scan_aborted;reason=unexpected on destructor; 2025-05-07T08:50:12.388198Z node 2 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[2:435:2453];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,19;column_names=i32,jsondoc;);;program_input=(column_ids=4,19;column_names=i32,jsondoc;);;; >> TSchemeshardBackgroundCleaningTest::CreateTableInTemp [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::NodesRange1 [GOOD] Test command err: Trying to start YDB, gRPC: 7905, MsgBus: 62342 2025-05-07T08:49:58.926725Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623372567485109:2234];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:58.934359Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:49:59.018375Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501623376070704900:2071];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:59.018417Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:49:59.036452Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7501623376280263020:2069];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:59.036512Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:49:59.126480Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501623376434376015:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:59.126509Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:49:59.276003Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7501623376111241166:2290];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002eb6/r3tmp/tmpLmuE2r/pdisk_1.dat 2025-05-07T08:50:00.049113Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:50:00.170127Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:50:00.272425Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:50:00.274620Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:50:00.289585Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:50:01.035014Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:01.035208Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:01.035394Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:01.035446Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:01.035585Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:01.035644Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:01.038606Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:01.038676Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:01.038842Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:01.038878Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:01.052440Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 5 Cookie 5 2025-05-07T08:50:01.052472Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 4 Cookie 4 2025-05-07T08:50:01.052610Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:50:01.053427Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:50:01.055947Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:50:01.057081Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-05-07T08:50:01.057120Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-05-07T08:50:01.057865Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:50:01.060620Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:50:01.178343Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:50:01.186329Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:50:01.209743Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7905, node 1 2025-05-07T08:50:01.276055Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:50:01.279299Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:50:01.363263Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T08:50:01.425767Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T08:50:01.425838Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:50:01.460788Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:01.460827Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:01.460839Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:01.460983Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:62342 TClient is connected to server localhost:62342 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:50:02.599824Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:02.685452Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:50:03.123080Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 2025-05-07T08:50:03.580728Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:03.894839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:03.923861Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623372567485109:2234];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:03.925788Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:50:04.040205Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7501623376280263020:2069];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:04.040282Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:50:04.052405Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7501623376070704900:2071];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:04.052478Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:50:04.130149Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7501623376434376015:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:04.130204Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:50:04.276087Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[5:7501623376111241166:2290];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:04.276183Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:50:06.290263Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623406927225255:2363], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:06.292610Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:06.767507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:50:06.864354Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:50:07.002937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:50:07.099394Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:50:07.176164Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:50:07.248270Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:50:07.371464Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:50:07.487545Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623411222193334:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:07.487623Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:07.487790Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623411222193339:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:07.492578Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:50:07.523089Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623411222193341:2407], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:50:07.645379Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623411222193413:4158] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:50:09.182292Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746607809172, txId: 281474976710672] shutting down >> TColumnShardTestReadWrite::WriteOverload-InStore+WithWritePortionsOnInsert >> Normalizers::EmptyTablesNormalizer >> TColumnShardTestReadWrite::WriteRead ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_background_cleaning/unittest >> TSchemeshardBackgroundCleaningTest::CreateTableInTemp [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:224:2060] recipient: [1:218:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:224:2060] recipient: [1:218:2142] Leader for TabletID 72057594046678944 is [1:233:2151] sender: [1:234:2060] recipient: [1:218:2142] 2025-05-07T08:49:11.774322Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:49:11.774482Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:49:11.774536Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:49:11.784688Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:49:11.786755Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:49:11.786834Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:49:11.790319Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:49:11.790425Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:49:11.791337Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:49:11.802688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:49:12.229792Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:49:12.229863Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:12.259536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:49:12.272020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:49:12.286404Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:49:12.340623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:49:12.370136Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:49:12.390829Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:49:12.434034Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:49:12.527672Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:49:12.666212Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:49:12.666320Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:49:12.683169Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:49:12.683283Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:49:12.683364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:49:12.683588Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:49:12.712032Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:233:2151] sender: [1:349:2060] recipient: [1:17:2064] 2025-05-07T08:49:12.913222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:49:12.913430Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:12.913621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:49:12.928204Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:49:12.928343Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:12.930865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:49:12.947848Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:49:12.948262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:12.948336Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:49:12.948383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:49:12.948421Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:49:12.962042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:12.962137Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:49:12.962189Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:49:12.977180Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:12.977273Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:12.982153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:49:13.016241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:49:13.032390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:49:13.044748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:49:13.060058Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:49:13.068031Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:49:13.068190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 244 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:49:13.068243Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:49:13.078447Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:49:13.078560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:49:13.078791Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:49:13.078900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:49:13.083525Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:49:13.083612Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:49:13.083839Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:49:13.083887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... ARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 104 datashard 72075186233409551 state Ready 2025-05-07T08:50:13.059716Z node 7 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186233409551 Got TEvSchemaChangedResult from SS at 72075186233409551 2025-05-07T08:50:13.059906Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435072, Sender [7:235:2153], Recipient [7:235:2153]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-05-07T08:50:13.059953Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4857: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-05-07T08:50:13.060027Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 104:2, at schemeshard: 72057594046678944 2025-05-07T08:50:13.060081Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 104:2 ProgressState 2025-05-07T08:50:13.060233Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-05-07T08:50:13.060270Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:2 progress is 2/3 2025-05-07T08:50:13.060318Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 2/3 2025-05-07T08:50:13.060363Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:2 progress is 2/3 2025-05-07T08:50:13.060405Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 2/3 2025-05-07T08:50:13.060478Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 104, ready parts: 2/3, is published: true 2025-05-07T08:50:13.060931Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435072, Sender [7:235:2153], Recipient [7:235:2153]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-05-07T08:50:13.060980Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4857: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-05-07T08:50:13.061052Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-05-07T08:50:13.061107Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 104:0 ProgressState 2025-05-07T08:50:13.061189Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-05-07T08:50:13.061221Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 3/3 2025-05-07T08:50:13.061252Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2025-05-07T08:50:13.061285Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 3/3 2025-05-07T08:50:13.061325Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2025-05-07T08:50:13.061360Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 104, ready parts: 3/3, is published: true 2025-05-07T08:50:13.061435Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [7:577:2403] message: TxId: 104 2025-05-07T08:50:13.061504Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2025-05-07T08:50:13.061574Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 104:0 2025-05-07T08:50:13.061626Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 104:0 2025-05-07T08:50:13.061777Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 10] was 4 2025-05-07T08:50:13.061843Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 104:1 2025-05-07T08:50:13.061876Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 104:1 2025-05-07T08:50:13.061915Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 11] was 3 2025-05-07T08:50:13.061941Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 104:2 2025-05-07T08:50:13.061964Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 104:2 2025-05-07T08:50:13.062046Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 12] was 3 2025-05-07T08:50:13.065442Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-05-07T08:50:13.065638Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-05-07T08:50:13.065767Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [7:577:2403] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 104 at schemeshard: 72057594046678944 2025-05-07T08:50:13.065948Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-05-07T08:50:13.066040Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [7:1030:2783] 2025-05-07T08:50:13.066348Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877764, Sender [7:1032:2785], Recipient [7:235:2153]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:50:13.066401Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4938: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:50:13.066436Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5761: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 104 TestModificationResults wait txId: 105 2025-05-07T08:50:13.067628Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122432, Sender [8:553:2102], Recipient [7:235:2153] 2025-05-07T08:50:13.067697Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4851: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-05-07T08:50:13.070909Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/tmp" OperationType: ESchemeOpCreateIndexedTable CreateIndexedTable { TableDescription { Name: "NotTempTable" Columns { Name: "key" Type: "Uint64" } Columns { Name: "value" Type: "Utf8" } KeyColumnNames: "key" } IndexDescription { Name: "ValueIndex" KeyColumnNames: "value" } } AllowCreateInTempDir: false } TxId: 105 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:50:13.073151Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 105:0, explain: Check failed: path: '/MyRoot/tmp', error: path is temporary (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeDir, state: EPathStateNoChanges), at schemeshard: 72057594046678944 2025-05-07T08:50:13.073248Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 105:1, propose status:StatusPreconditionFailed, reason: Check failed: path: '/MyRoot/tmp', error: path is temporary (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeDir, state: EPathStateNoChanges), at schemeshard: 72057594046678944 2025-05-07T08:50:13.127394Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-05-07T08:50:13.131068Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 105, response: Status: StatusPreconditionFailed Reason: "Check failed: path: \'/MyRoot/tmp\', error: path is temporary (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeDir, state: EPathStateNoChanges)" TxId: 105 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:50:13.131330Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 105, database: /MyRoot, subject: , status: StatusPreconditionFailed, reason: Check failed: path: '/MyRoot/tmp', error: path is temporary (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeDir, state: EPathStateNoChanges), operation: CREATE TABLE WITH INDEXES, path: /MyRoot/tmp/NotTempTable 2025-05-07T08:50:13.131427Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 2025-05-07T08:50:13.131968Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 105: send EvNotifyTxCompletion 2025-05-07T08:50:13.132034Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 105 2025-05-07T08:50:13.132522Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [7:1100:2853], Recipient [7:235:2153]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:50:13.132591Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:50:13.132635Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046678944 2025-05-07T08:50:13.132813Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124996, Sender [7:577:2403], Recipient [7:235:2153]: NKikimrScheme.TEvNotifyTxCompletion TxId: 105 2025-05-07T08:50:13.132848Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4853: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-05-07T08:50:13.132937Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 105, at schemeshard: 72057594046678944 2025-05-07T08:50:13.133080Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-05-07T08:50:13.133126Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [7:1098:2851] 2025-05-07T08:50:13.133336Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877764, Sender [7:1100:2853], Recipient [7:235:2153]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:50:13.133375Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4938: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:50:13.133422Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5761: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 105 >> test_sql_streaming.py::test[suites-ReadWriteTopicWithSchema-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-WriteTwoTopics-default.txt] >> TColumnShardTestReadWrite::Write [GOOD] >> TColumnShardTestReadWrite::ReadSomePrograms >> TColumnShardTestReadWrite::CompactionSplitGranuleStrKey_PKString >> Normalizers::ColumnChunkNormalizer [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::Write [GOOD] Test command err: 2025-05-07T08:50:08.520417Z node 1 :BLOB_CACHE NOTICE: ctor_logger.h:56: MaxCacheDataSize: 20971520 InFlightDataSize: 0 2025-05-07T08:50:08.654696Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-05-07T08:50:08.678502Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-05-07T08:50:08.678826Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-05-07T08:50:08.687229Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-05-07T08:50:08.687459Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-05-07T08:50:08.687767Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-05-07T08:50:08.687917Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-05-07T08:50:08.688051Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-05-07T08:50:08.688226Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-05-07T08:50:08.688346Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-05-07T08:50:08.688467Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-05-07T08:50:08.688570Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-05-07T08:50:08.688699Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-05-07T08:50:08.688827Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-05-07T08:50:08.688954Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-05-07T08:50:08.716662Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-05-07T08:50:08.716844Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:137;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=11;current_normalizer=CLASS_NAME=Granules; 2025-05-07T08:50:08.716897Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-05-07T08:50:08.717070Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-05-07T08:50:08.717268Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-05-07T08:50:08.717366Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-05-07T08:50:08.717411Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-05-07T08:50:08.717513Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-05-07T08:50:08.717583Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-05-07T08:50:08.717636Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-05-07T08:50:08.717674Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-05-07T08:50:08.717838Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-05-07T08:50:08.717903Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-05-07T08:50:08.717941Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-05-07T08:50:08.717999Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-05-07T08:50:08.718100Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-05-07T08:50:08.718157Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-05-07T08:50:08.718206Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanInsertionDedup;id=CleanInsertionDedup; 2025-05-07T08:50:08.718238Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=8;type=CleanInsertionDedup; 2025-05-07T08:50:08.718323Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanInsertionDedup;id=8; 2025-05-07T08:50:08.718390Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-05-07T08:50:08.718421Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-05-07T08:50:08.718480Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-05-07T08:50:08.718516Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-05-07T08:50:08.718544Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-05-07T08:50:08.718774Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-05-07T08:50:08.718845Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-05-07T08:50:08.718878Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-05-07T08:50:08.719090Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-05-07T08:50:08.719771Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-05-07T08:50:08.719806Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-05-07T08:50:08.719960Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-05-07T08:50:08.720010Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-05-07T08:50:08.720045Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-05-07T08:50:08.720135Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-05-07T08:50:08.720218Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-05-07T08:50:08.720280Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-05-07T08:50:08.720311Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-05-07T08:50:08.720756Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=52; 2025-05-07T08:50:08.720839Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=38; ... :{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"21,21,21,21,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"22,22,22,22,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"23,23,23,23,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"24,24,24,24,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"25,25,25,25,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"26,26,26,26,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"27,27,27,27,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"28,28,28,28,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"29,29,29,29,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"30,30,30,30,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"31,31,31,31,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"32,32,32,32,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"33,33,33,33,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"34,34,34,34,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"35,35,35,35,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"36,36,36,36,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"37,37,37,37,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"38,38,38,38,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"39,39,39,39,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"40,40,40,40,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"41,41,41,41,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"42,42,42,42,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"43,43,43,43,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"44,44,44,44,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"45,45,45,45,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"46,46,46,46,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"47,47,47,47,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"48,48,48,48,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"49,49,49,49,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"50,50,50,50,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"51,51,51,51,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"52,52,52,52,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"53,53,53,53,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"54,54,54,54,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"55,55,55,55,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"56,56,56,56,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"57,57,57,57,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"58,58,58,58,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"59,59,59,59,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"60,60,60,60,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"61,61,61,61,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"62,62,62,62,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"63,63,63,63,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"64,64,64,64,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"65,65,65,65,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"66,66,66,66,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"67,67,67,67,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"68,68,68,68,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"69,69,69,69,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"70,70,70,70,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"71,71,71,71,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"72,72,72,72,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"73,73,73,73,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"74,74,74,74,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"75,75,75,75,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"76,76,76,76,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"77,77,77,77,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"78,78,78,78,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"79,79,79,79,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"80,80,80,80,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"81,81,81,81,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"82,82,82,82,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"83,83,83,83,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"84,84,84,84,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"85,85,85,85,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"86,86,86,86,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"87,87,87,87,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"88,88,88,88,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"89,89,89,89,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"90,90,90,90,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"91,91,91,91,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"92,92,92,92,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"93,93,93,93,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"94,94,94,94,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"95,95,95,95,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"96,96,96,96,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"97,97,97,97,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"98,98,98,98,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"99,99,99,99,"}}]}; 2025-05-07T08:50:14.441177Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;local_tx_no=30;method=complete;tx_info=TTxWrite;tablet_id=9437184;tx_state=complete;fline=columnshard_impl.cpp:785;event=start_indexation_tasks;insert_overload_size=0; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> Normalizers::ColumnChunkNormalizer [GOOD] Test command err: 2025-05-07T08:50:10.634588Z node 1 :BLOB_CACHE NOTICE: ctor_logger.h:56: MaxCacheDataSize: 20971520 InFlightDataSize: 0 2025-05-07T08:50:10.798422Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-05-07T08:50:10.824923Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-05-07T08:50:10.825200Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-05-07T08:50:10.836405Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=NO_VALUE_OPTIONAL; 2025-05-07T08:50:10.836679Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-05-07T08:50:10.836986Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-05-07T08:50:10.837127Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-05-07T08:50:10.837260Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-05-07T08:50:10.837389Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-05-07T08:50:10.837526Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-05-07T08:50:10.837637Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-05-07T08:50:10.837768Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-05-07T08:50:10.837889Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-05-07T08:50:10.838043Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-05-07T08:50:10.838178Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-05-07T08:50:10.866575Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-05-07T08:50:10.867225Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:137;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=11;current_normalizer=CLASS_NAME=Granules; 2025-05-07T08:50:10.867300Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=1;type=Granules; 2025-05-07T08:50:10.867549Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-05-07T08:50:10.867750Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-05-07T08:50:10.867831Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-05-07T08:50:10.867882Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=2;type=Chunks; 2025-05-07T08:50:10.867990Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-05-07T08:50:10.868081Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-05-07T08:50:10.868141Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-05-07T08:50:10.868178Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=4;type=TablesCleaner; 2025-05-07T08:50:10.868359Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-05-07T08:50:10.868433Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-05-07T08:50:10.868489Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-05-07T08:50:10.868528Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=6;type=CleanGranuleId; 2025-05-07T08:50:10.868648Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-05-07T08:50:10.868718Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-05-07T08:50:10.868770Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanInsertionDedup;id=CleanInsertionDedup; 2025-05-07T08:50:10.868800Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=8;type=CleanInsertionDedup; 2025-05-07T08:50:10.868875Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanInsertionDedup;id=8; 2025-05-07T08:50:10.868917Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-05-07T08:50:10.868954Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=9;type=GCCountersNormalizer; 2025-05-07T08:50:10.869020Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-05-07T08:50:10.869061Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-05-07T08:50:10.869123Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=11;type=SyncPortionFromChunks; 2025-05-07T08:50:10.869364Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-05-07T08:50:10.869431Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-05-07T08:50:10.869485Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=15;type=RestoreV1Chunks_V2; 2025-05-07T08:50:10.869725Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-05-07T08:50:10.869777Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-05-07T08:50:10.869815Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=16;type=RestoreV2Chunks; 2025-05-07T08:50:10.869960Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-05-07T08:50:10.870051Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-05-07T08:50:10.870083Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=17;type=CleanDeprecatedSnapshot; 2025-05-07T08:50:10.870173Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-05-07T08:50:10.870254Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-05-07T08:50:10.870298Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-05-07T08:50:10.870329Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=18;type=RestoreV0ChunksMeta; 2025-05-07T08:50:10.870821Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;Tab ... cessedResult; 2025-05-07T08:50:15.108256Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:479:2484];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=merge.cpp:74;event=DoApply;interval_idx=0; 2025-05-07T08:50:15.108318Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:479:2484];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=scanner.cpp:21;event=interval_result_received;interval_idx=0;intervalId=2; 2025-05-07T08:50:15.108409Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:479:2484];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=scanner.cpp:47;event=interval_result;interval_idx=0;count=20048;merger=0;interval_id=2; 2025-05-07T08:50:15.108483Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:479:2484];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=scanner.cpp:65;event=intervals_finished; 2025-05-07T08:50:15.108601Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:479:2484];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:187;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-05-07T08:50:15.108647Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:479:2484];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:73;event=DoExtractReadyResults;result=1;count=20048;finished=1; 2025-05-07T08:50:15.108730Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:479:2484];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:198;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-05-07T08:50:15.109021Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:479:2484];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:104;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-05-07T08:50:15.109224Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:479:2484];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:187;stage=start;iterator=ready_results:(count:1;records_count:20048;schema=key1: uint64 key2: uint64 field: string;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-05-07T08:50:15.109282Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:479:2484];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:73;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-05-07T08:50:15.109454Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:479:2484];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:229;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;);columns=3;rows=20048; 2025-05-07T08:50:15.109551Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:479:2484];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:249;stage=data_format;batch_size=2405760;num_rows=20048;batch_columns=key1,key2,field; 2025-05-07T08:50:15.109812Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:479:2484];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:365;event=send_data;compute_actor_id=[1:477:2483];bytes=2405760;rows=20048;faults=0;finished=0;fault=0;schema=key1: uint64 key2: uint64 field: string; 2025-05-07T08:50:15.110031Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:479:2484];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:269;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-05-07T08:50:15.110182Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:479:2484];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:187;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-05-07T08:50:15.110321Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:479:2484];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-05-07T08:50:15.111488Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:479:2484];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:104;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-05-07T08:50:15.111685Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:479:2484];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:187;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-05-07T08:50:15.111836Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:479:2484];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-05-07T08:50:15.111893Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:409: Scan [1:479:2484] finished for tablet 9437184 2025-05-07T08:50:15.112476Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:479:2484];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:415;event=scan_finish;compute_actor_id=[1:477:2483];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.003},{"events":["f_processing","f_task_result"],"t":0.007},{"events":["f_ack","l_task_result"],"t":0.34},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.343}],"full":{"a":1746607814768314,"name":"_full_task","f":1746607814768314,"d_finished":0,"c":0,"l":1746607815111984,"d":343670},"events":[{"name":"bootstrap","f":1746607814768472,"d_finished":3386,"c":1,"l":1746607814771858,"d":3386},{"a":1746607815111448,"name":"ack","f":1746607815108988,"d_finished":1368,"c":1,"l":1746607815110356,"d":1904},{"a":1746607815111421,"name":"processing","f":1746607814775381,"d_finished":185973,"c":9,"l":1746607815110360,"d":186536},{"name":"ProduceResults","f":1746607814770543,"d_finished":3381,"c":12,"l":1746607815111872,"d":3381},{"a":1746607815111877,"name":"Finish","f":1746607815111877,"d_finished":0,"c":0,"l":1746607815111984,"d":107},{"name":"task_result","f":1746607814775404,"d_finished":184369,"c":8,"l":1746607815108824,"d":184369}],"id":"9437184::2"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-05-07T08:50:15.112592Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:479:2484];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:365;event=send_data;compute_actor_id=[1:477:2483];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-05-07T08:50:15.113133Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:479:2484];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=scan_finished;compute_actor_id=[1:477:2483];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.003},{"events":["f_processing","f_task_result"],"t":0.007},{"events":["f_ack","l_task_result"],"t":0.34},{"events":["l_ProduceResults","f_Finish"],"t":0.343},{"events":["l_ack","l_processing","l_Finish"],"t":0.344}],"full":{"a":1746607814768314,"name":"_full_task","f":1746607814768314,"d_finished":0,"c":0,"l":1746607815112652,"d":344338},"events":[{"name":"bootstrap","f":1746607814768472,"d_finished":3386,"c":1,"l":1746607814771858,"d":3386},{"a":1746607815111448,"name":"ack","f":1746607815108988,"d_finished":1368,"c":1,"l":1746607815110356,"d":2572},{"a":1746607815111421,"name":"processing","f":1746607814775381,"d_finished":185973,"c":9,"l":1746607815110360,"d":187204},{"name":"ProduceResults","f":1746607814770543,"d_finished":3381,"c":12,"l":1746607815111872,"d":3381},{"a":1746607815111877,"name":"Finish","f":1746607815111877,"d_finished":0,"c":0,"l":1746607815112652,"d":775},{"name":"task_result","f":1746607814775404,"d_finished":184369,"c":8,"l":1746607815108824,"d":184369}],"id":"9437184::2"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-05-07T08:50:15.113238Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:479:2484];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-05-07T08:50:14.767794Z;index_granules=0;index_portions=1;index_batches=939;committed_batches=0;schema_columns=3;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=2589264;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=2589264;selected_rows=0; 2025-05-07T08:50:15.113294Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:479:2484];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=read_context.h:189;event=scan_aborted;reason=unexpected on destructor; 2025-05-07T08:50:15.113651Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:479:2484];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;; >> TColumnShardTestReadWrite::CompactionInGranule_PKInt64 >> VectorIndexBuildTest::VectorIndexDescriptionIsPersisted >> TTopicReaderTests::TestRun_ReadOneMessage [GOOD] >> TTopicReaderTests::TestRun_ReadTwoMessages_With_Limit_1 >> TColumnShardTestReadWrite::ReadSomePrograms [GOOD] >> test_sql_streaming.py::test[suites-GroupByHopPercentile-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-GroupByHopTimeExtractorUnusedColumns-default.txt] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::ReadSomePrograms [GOOD] Test command err: 2025-05-07T08:50:15.658824Z node 1 :BLOB_CACHE NOTICE: ctor_logger.h:56: MaxCacheDataSize: 20971520 InFlightDataSize: 0 2025-05-07T08:50:15.801745Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-05-07T08:50:15.831210Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-05-07T08:50:15.831508Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-05-07T08:50:15.840734Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-05-07T08:50:15.840979Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-05-07T08:50:15.841293Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-05-07T08:50:15.841456Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-05-07T08:50:15.841580Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-05-07T08:50:15.841692Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-05-07T08:50:15.841824Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-05-07T08:50:15.841938Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-05-07T08:50:15.842138Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-05-07T08:50:15.842310Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-05-07T08:50:15.842435Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-05-07T08:50:15.842549Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-05-07T08:50:15.884002Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-05-07T08:50:15.884196Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:137;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=11;current_normalizer=CLASS_NAME=Granules; 2025-05-07T08:50:15.884255Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-05-07T08:50:15.884414Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-05-07T08:50:15.884559Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-05-07T08:50:15.884650Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-05-07T08:50:15.884717Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-05-07T08:50:15.884813Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-05-07T08:50:15.884882Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-05-07T08:50:15.884945Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-05-07T08:50:15.884988Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-05-07T08:50:15.885176Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-05-07T08:50:15.885247Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-05-07T08:50:15.885290Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-05-07T08:50:15.885332Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-05-07T08:50:15.885430Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-05-07T08:50:15.885485Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-05-07T08:50:15.885532Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanInsertionDedup;id=CleanInsertionDedup; 2025-05-07T08:50:15.885569Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=8;type=CleanInsertionDedup; 2025-05-07T08:50:15.885644Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanInsertionDedup;id=8; 2025-05-07T08:50:15.885692Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-05-07T08:50:15.885727Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-05-07T08:50:15.885828Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-05-07T08:50:15.885879Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-05-07T08:50:15.885914Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-05-07T08:50:15.886211Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-05-07T08:50:15.886275Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-05-07T08:50:15.886313Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-05-07T08:50:15.886553Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-05-07T08:50:15.886602Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-05-07T08:50:15.886650Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-05-07T08:50:15.886817Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-05-07T08:50:15.886869Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-05-07T08:50:15.886910Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-05-07T08:50:15.887001Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-05-07T08:50:15.887079Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-05-07T08:50:15.887153Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-05-07T08:50:15.887194Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-05-07T08:50:15.887627Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=50; 2025-05-07T08:50:15.887727Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=41; ... 9437184 Save Batch GenStep: 2:1 Blob count: 1 2025-05-07T08:50:16.573191Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;local_tx_no=4;method=complete;tx_info=TTxWrite;tablet_id=9437184;tx_state=complete;fline=columnshard_impl.cpp:785;event=start_indexation_tasks;insert_overload_size=0; 2025-05-07T08:50:16.590899Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: PlanStep 1746607816762 at tablet 9437184, mediator 0 2025-05-07T08:50:16.591002Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[6] execute at tablet 9437184 2025-05-07T08:50:16.591277Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=100;fline=abstract.h:83;progress_tx_id=100;lock_id=1;broken=0; 2025-05-07T08:50:16.591473Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=100;fline=tx_controller.cpp:214;event=finished_tx;tx_id=100; 2025-05-07T08:50:16.604458Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[6] complete at tablet 9437184 2025-05-07T08:50:16.604595Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Complete;fline=abstract.h:93;progress_tx_id=100;lock_id=1;broken=0; 2025-05-07T08:50:16.604759Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Complete;fline=columnshard_impl.cpp:785;event=start_indexation_tasks;insert_overload_size=3384; 2025-05-07T08:50:16.608729Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Complete;fline=manager.cpp:10;event=lock;process_id=CS::INDEXATION::4c361ee0-2b2011f0-a31819be-1f133d77; 2025-05-07T08:50:16.608817Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Complete;fline=ro_controller.cpp:45;event=CS::INDEXATION;tablet_id=9437184; 2025-05-07T08:50:16.608945Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Complete;fline=columnshard_impl.cpp:754;event=indexation;bytes=3384;blobs_count=1;max_limit=251658240;has_more=0;external_task_id=4c361ee0-2b2011f0-a31819be-1f133d77; 2025-05-07T08:50:16.609173Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Complete;fline=columnshard_impl.cpp:620;event=start_changes;type=CS::INDEXATION;task_id=4c361ee0-2b2011f0-a31819be-1f133d77; 2025-05-07T08:50:16.609593Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;parent=[1:139:2171];ev_type=NKikimr::NOlap::NResourceBroker::NSubscribe::TEvStartTask;fline=actor.cpp:38;event=ask_resources;task=cpu=0;mem=3035;external_task_id=4c361ee0-2b2011f0-a31819be-1f133d77;type=CS::INDEXATION;priority=0;; 2025-05-07T08:50:16.609836Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;parent=[1:139:2171];ev_type=NKikimr::NResourceBroker::TEvResourceBroker::TEvResourceAllocated;fline=actor.cpp:29;event=result_resources;task_id=1;task=cpu=0;mem=3035;external_task_id=4c361ee0-2b2011f0-a31819be-1f133d77;type=CS::INDEXATION;priority=0;; 2025-05-07T08:50:16.609881Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;parent=[1:139:2171];ev_type=NKikimr::NResourceBroker::TEvResourceBroker::TEvResourceAllocated;fline=task.cpp:9;event=resource_allocated;external_task_id=4c361ee0-2b2011f0-a31819be-1f133d77;mem=3035;cpu=0; 2025-05-07T08:50:16.610077Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;parent=[1:139:2171];ev_type=NKikimr::NResourceBroker::TEvResourceBroker::TEvResourceAllocated;fline=task.cpp:40;event=allocate_resources;external_task_id=4c361ee0-2b2011f0-a31819be-1f133d77;task_id=1;mem=3035;cpu=0; 2025-05-07T08:50:16.610237Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: external_task_id=4c361ee0-2b2011f0-a31819be-1f133d77;fline=task.cpp:110;event=OnDataReady;task=agents_waiting=0;additional_info=();;external_task_id=4c361ee0-2b2011f0-a31819be-1f133d77; 2025-05-07T08:50:16.615256Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: external_task_id=4c361ee0-2b2011f0-a31819be-1f133d77;fline=actor.cpp:48;task=agents_waiting=0;additional_info=();; 2025-05-07T08:50:16.615509Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:50;event=TEvWriteIndex;count=1; 2025-05-07T08:50:16.617266Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: WriteIndex at tablet 9437184 2025-05-07T08:50:16.617440Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:54;memory_size=60;data_size=20;sum=60;count=1; 2025-05-07T08:50:16.617545Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:75;memory_size=220;data_size=196;sum=220;count=2;size_of_meta=144; 2025-05-07T08:50:16.617658Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_portion.cpp:40;memory_size=292;data_size=268;sum=292;count=1;size_of_portion=216; 2025-05-07T08:50:16.617927Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxWriteIndex[8] (CS::INDEXATION) apply at tablet 9437184 2025-05-07T08:50:16.618953Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:2 Blob count: 1 2025-05-07T08:50:16.619070Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 0 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} inactive {blob_bytes=0;raw_bytes=0;count=0;records=0} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-05-07T08:50:16.619650Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:239;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-05-07T08:50:16.619713Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:516;event=EnqueueBackgroundActivities;periodic=0; 2025-05-07T08:50:16.619805Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:769;event=skip_indexation;reason=in_progress;count=1;insert_overload_size=3384;indexing_debug={task_ids=4c361ee0-2b2011f0-a31819be-1f133d77,;}; 2025-05-07T08:50:16.619905Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:246;event=StartCleanup;portions_count=0; 2025-05-07T08:50:16.620174Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:321;event=StartCleanup;portions_count=0;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-05-07T08:50:16.620245Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:1061;background=cleanup;skip_reason=no_changes; 2025-05-07T08:50:16.620302Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:1093;background=cleanup;skip_reason=no_changes; 2025-05-07T08:50:16.620490Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:1002;background=ttl;skip_reason=no_changes; 2025-05-07T08:50:16.620928Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 100 scanId: 0 version: {1746607816762:100} readable: {1746607816762:max} at tablet 9437184 2025-05-07T08:50:16.633506Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=4c361ee0-2b2011f0-a31819be-1f133d77;fline=abstract.cpp:53;event=WriteIndexComplete;type=CS::INDEXATION;success=1; 2025-05-07T08:50:16.633581Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=4c361ee0-2b2011f0-a31819be-1f133d77;fline=with_appended.cpp:65;portions=1,;task_id=4c361ee0-2b2011f0-a31819be-1f133d77; 2025-05-07T08:50:16.633851Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=4c361ee0-2b2011f0-a31819be-1f133d77;fline=manager.cpp:15;event=unlock;process_id=CS::INDEXATION::4c361ee0-2b2011f0-a31819be-1f133d77; 2025-05-07T08:50:16.633908Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=4c361ee0-2b2011f0-a31819be-1f133d77;tablet_id=9437184;fline=columnshard_impl.cpp:516;event=EnqueueBackgroundActivities;periodic=0; 2025-05-07T08:50:16.633995Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=4c361ee0-2b2011f0-a31819be-1f133d77;tablet_id=9437184;fline=columnshard_impl.cpp:785;event=start_indexation_tasks;insert_overload_size=0; 2025-05-07T08:50:16.634077Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=4c361ee0-2b2011f0-a31819be-1f133d77;tablet_id=9437184;fline=column_engine_logs.cpp:246;event=StartCleanup;portions_count=0; 2025-05-07T08:50:16.634178Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=4c361ee0-2b2011f0-a31819be-1f133d77;tablet_id=9437184;fline=column_engine_logs.cpp:321;event=StartCleanup;portions_count=0;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-05-07T08:50:16.634246Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=4c361ee0-2b2011f0-a31819be-1f133d77;tablet_id=9437184;fline=columnshard_impl.cpp:1061;background=cleanup;skip_reason=no_changes; 2025-05-07T08:50:16.634298Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=4c361ee0-2b2011f0-a31819be-1f133d77;tablet_id=9437184;fline=columnshard_impl.cpp:1093;background=cleanup;skip_reason=no_changes; 2025-05-07T08:50:16.634403Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=4c361ee0-2b2011f0-a31819be-1f133d77;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:168;event=skip_actualization;waiting=0.998500s; 2025-05-07T08:50:16.634474Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=4c361ee0-2b2011f0-a31819be-1f133d77;tablet_id=9437184;fline=columnshard_impl.cpp:1002;background=ttl;skip_reason=no_changes; 2025-05-07T08:50:16.634613Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:1:3:0:3384:0] 2025-05-07T08:50:16.634707Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:2 Blob count: 1 2025-05-07T08:50:16.634864Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: fline=task.cpp:21;event=free_resources;task_id=1;external_task_id=4c361ee0-2b2011f0-a31819be-1f133d77;mem=3035;cpu=0; 2025-05-07T08:50:16.635037Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Complete;fline=columnshard_impl.cpp:785;event=start_indexation_tasks;insert_overload_size=0; 2025-05-07T08:50:16.635231Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxScan prepare txId: 100 scanId: 0 at tablet 9437184 2025-05-07T08:50:16.635409Z node 1 :TX_COLUMNSHARD_SCAN WARN: log.cpp:784: tx_id=100;scan_id=0;gen=0;table=;snapshot={1746607816762:100};tablet=9437184;timeout=0.000000s;fline=tx_scan.cpp:14;event=TTxScan failed;problem=cannot parse program;details=Can't parse SsaProgram: Can't parse TOlapProgram protobuf; >> IndexBuildTest::BaseCase >> TColumnShardTestReadWrite::RebootWriteReadStandalone [GOOD] |89.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kesus/tablet/ut/ydb-core-kesus-tablet-ut |89.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kesus/tablet/ut/ydb-core-kesus-tablet-ut |89.0%| [LD] {RESULT} $(B)/ydb/core/kesus/tablet/ut/ydb-core-kesus-tablet-ut >> IndexBuildTest::CheckLimitWithDroppedIndex >> ReadOnlyVDisk::TestGarbageCollect [GOOD] >> TColumnShardTestReadWrite::WriteReadStandaloneExoticTypes [GOOD] >> ReadOnlyVDisk::TestReads [GOOD] >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestSimpleDropIndex [GOOD] >> TSchemeshardBackgroundCleaningTest::TempInTemp |89.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_kqp/ydb-core-tx-datashard-ut_kqp |89.0%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_kqp/ydb-core-tx-datashard-ut_kqp |89.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_kqp/ydb-core-tx-datashard-ut_kqp >> TTopicReaderTests::TestRun_ReadMoreMessagesThanLimit_Without_Wait_NewlineDelimited [GOOD] >> TTopicReaderTests::TestRun_ReadMoreMessagesThanLimit_Without_Wait_NoDelimiter >> Viewer::Plan2SvgOK [GOOD] >> Viewer::Plan2SvgBad |89.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/checkpoint_storage/ut/ydb-core-fq-libs-checkpoint_storage-ut |89.0%| [LD] {RESULT} $(B)/ydb/core/fq/libs/checkpoint_storage/ut/ydb-core-fq-libs-checkpoint_storage-ut |89.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/checkpoint_storage/ut/ydb-core-fq-libs-checkpoint_storage-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest >> ReadOnlyVDisk::TestGarbageCollect [GOOD] Test command err: RandomSeed# 11496225307477682420 SEND TEvPut with key [1:1:0:0:0:131072:0] TEvPutResult: TEvPutResult {Id# [1:1:0:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:1:0:0:32768:0] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} === Read all 2 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:1:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:1:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:1:0:0:1:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:1:0:0:1:0] NODATA Size# 0}} Setting VDisk read-only to 1 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] SEND TEvPut with key [1:1:2:0:0:131072:0] 2025-05-07T08:50:08.090528Z 1 00h01m40.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5308:699] TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} 2025-05-07T08:50:08.095812Z 1 00h01m40.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5308:699] SEND TEvGet with key [1:1:2:0:0:1:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:2:0:0:1:0] NODATA Size# 0}} Setting VDisk read-only to 1 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] SEND TEvPut with key [1:1:3:0:0:32768:0] 2025-05-07T08:50:09.089228Z 1 00h03m20.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5308:699] 2025-05-07T08:50:09.090051Z 2 00h03m20.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5315:706] TEvPutResult: TEvPutResult {Id# [1:1:3:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} 2025-05-07T08:50:09.654361Z 1 00h04m20.161024s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5308:699] 2025-05-07T08:50:09.654597Z 2 00h04m20.161024s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5315:706] SEND TEvGet with key [1:1:3:0:0:1:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:1:0] NODATA Size# 0}} Setting VDisk read-only to 1 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] SEND TEvPut with key [1:1:4:0:0:131072:0] 2025-05-07T08:50:10.094250Z 1 00h05m00.200000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5308:699] 2025-05-07T08:50:10.095837Z 2 00h05m00.200000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5315:706] 2025-05-07T08:50:10.097327Z 3 00h05m00.200000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5322:713] 2025-05-07T08:50:10.097805Z 1 00h05m00.200000s :BS_PROXY_PUT ERROR: [ebf02712e1a9aceb] Result# TEvPutResult {Id# [1:1:4:0:0:131072:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:4:0:0:131072:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 3 Situations# PUUUUU } { OrderNumber# 4 Situations# UPUUUU } { OrderNumber# 5 Situations# UUPUUU } { OrderNumber# 6 Situations# UUUPUU } { OrderNumber# 7 Situations# UUUUPU } { OrderNumber# 0 Situations# UUUUUE } { OrderNumber# 1 Situations# UUUUUE } { OrderNumber# 2 Situations# UUUUUE } ] " ApproximateFreeSpaceShare# 0.999988} GroupId# 2181038080 Marker# BPP12 TEvPutResult: TEvPutResult {Id# [1:1:4:0:0:131072:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:4:0:0:131072:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 3 Situations# PUUUUU } { OrderNumber# 4 Situations# UPUUUU } { OrderNumber# 5 Situations# UUPUUU } { OrderNumber# 6 Situations# UUUPUU } { OrderNumber# 7 Situations# UUUUPU } { OrderNumber# 0 Situations# UUUUUE } { OrderNumber# 1 Situations# UUUUUE } { OrderNumber# 2 Situations# UUUUUE } ] " ApproximateFreeSpaceShare# 0.999988} 2025-05-07T08:50:10.761140Z 1 00h06m00.210512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5308:699] 2025-05-07T08:50:10.761441Z 2 00h06m00.210512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5315:706] 2025-05-07T08:50:10.761511Z 3 00h06m00.210512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5322:713] === Putting VDisk #3 to read-only === Setting VDisk read-only to 1 for position 3 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:3:0] 2025-05-07T08:50:11.675410Z 1 00h07m40.260512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5308:699] 2025-05-07T08:50:11.675662Z 2 00h07m40.260512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5315:706] 2025-05-07T08:50:11.675730Z 3 00h07m40.260512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5322:713] 2025-05-07T08:50:11.675799Z 4 00h07m40.260512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Unavailable in read-only Sender# [1:5329:720] === Putting VDisk #4 to read-only === Setting VDisk read-only to 1 for position 4 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:4:0] 2025-05-07T08:50:12.062077Z 1 00h08m20.262048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5308:699] 2025-05-07T08:50:12.062344Z 2 00h08m20.262048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5315:706] 2025-05-07T08:50:12.062417Z 3 00h08m20.262048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5322:713] 2025-05-07T08:50:12.062477Z 4 00h08m20.262048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Unavailable in read-only Sender# [1:5329:720] 2025-05-07T08:50:12.062536Z 5 00h08m20.262048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Unavailable in read-only Sender# [1:5336:727] === Putting VDisk #5 to read-only === Setting VDisk read-only to 1 for position 5 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:5:0] 2025-05-07T08:50:12.454884Z 1 00h09m00.310512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5308:699] 2025-05-07T08:50:12.455136Z 2 00h09m00.310512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5315:706] 2025-05-07T08:50:12.455201Z 3 00h09m00.310512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5322:713] 2025-05-07T08:50:12.455259Z 4 00h09m00.310512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Unavailable in read-only Sender# [1:5329:720] 2025-05-07T08:50:12.455317Z 5 00h09m00.310512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Unavailable in read-only Sender# [1:5336:727] 2025-05-07T08:50:12.455371Z 6 00h09m00.310512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Unavailable in read-only Sender# [1:5343:734] === Putting VDisk #6 to read-only === Setting VDisk read-only to 1 for position 6 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:6:0] 2025-05-07T08:50:12.760570Z 1 00h09m40.312048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5308:699] 2025-05-07T08:50:12.760834Z 2 00h09m40.312048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5315:706] 2025-05-07T08:50:12.760906Z 3 00h09m40.312048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5322:713] 2025-05-07T08:50:12.760971Z 4 00h09m40.312048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Unavailable in read-only Sender# [1:5329:720] 2025-05-07T08:50:12.761042Z 5 00h09m40.312048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Unavailable in read-only Sender# [1:5336:727] 2025-05-07T08:50:12.761107Z 6 00h09m40.312048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Unavailable in read-only Sender# [1:5343:734] 2025-05-07T08:50:12.761170Z 7 00h09m40.312048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Unavailable in read-only Sender# [1:5350:741] === Putting VDisk #0 to normal === Setting VDisk read-only to 0 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] 2025-05-07T08:50:13.069791Z 2 00h10m20.360512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5315:706] 2025-05-07T08:50:13.069934Z 3 00h10m20.360512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5322:713] 2025-05-07T08:50:13.070030Z 4 00h10m20.360512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Unavailable in read-only Sender# [1:5329:720] 2025-05-07T08:50:13.070094Z 5 00h10m20.360512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Unavailable in read-only Sender# [1:5336:727] 2025-05-07T08:50:13.070160Z 6 00h10m20.360512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Unavailable in read-only Sender# [1:5343:734] 2025-05-07T08:50:13.070225Z 7 00h10m20.360512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Unavailable in read-only Sender# [1:5350:741] === Putting VDisk #1 to normal === Setting VDisk read-only to 0 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] 2025-05-07T08:50:13.435226Z 3 00h11m00.400000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5322:713] 2025-05-07T08:50:13.435343Z 4 00h11m00.400000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Unavailable in read-only Sender# [1:5329:720] 2025-05-07T08:50:13.435410Z 5 00h11m00.400000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Unavailable in read-only Sender# [1:5336:727] 2025-05-07T08:50:13.435473Z 6 00h11m00.400000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Unavailable in read-only Sender# [1:5343:734] 2025-05-07T08:50:13.435533Z 7 00h11m00.400000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Unavailable in read-only Sender# [1:5350:741] === Putting VDisk #2 to normal === Setting VDisk read-only to 0 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] 2025-05-07T08:50:13.830448Z 4 00h11m40.410512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Unavailable in read-only Sender# [1:5329:720] 2025-05-07T08:50:13.830532Z 5 00h11m40.410512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Unavailable in read-only Sender# [1:5336:727] 2025-05-07T08:50:13.830578Z 6 00h11m40.410512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Unavailable in read-only Sender# [1:5343:734] 2025-05-07T08:50:13.830621Z 7 00h11m40.410512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Unavailable in read-only Sender# [1:5350:741] === Putting VDisk #3 to normal === Setting VDisk read-only to 0 for position 3 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:3:0] 2025-05-07T08:50:14.308692Z 5 00h12m20.450512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Unavailable in read-only Sender# [1:5336:727] 2025-05-07T08:50:14.308807Z 6 00h12m20.450512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Unavailable in read-only Sender# [1:5343:734] 2025-05-07T08:50:14.308869Z 7 00h12m20.450512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Unavailable in read-only Sender# [1:5350:741] Setting VDisk read-only to 0 for position 4 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:4:0] 2025-05-07T08:50:15.721712Z 6 00h14m00.461536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Unavailable in read-only Sender# [1:5343:734] 2025-05-07T08:50:15.721836Z 7 00h14m00.461536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Unavailable in read-only Sender# [1:5350:741] SEND TEvGet with key [1:1:3:0:0:1:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:1:0] NODATA Size# 0}} Setting VDisk read-only to 0 for position 5 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:5:0] 2025-05-07T08:50:16.279628Z 7 00h14m40.500000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Unavailable in read-only Sender# [1:5350:741] SEND TEvGet with key [1:1:3:0:0:1:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:1:0] NODATA Size# 0}} Setting VDisk read-only to 0 for position 6 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:6:0] SEND TEvGet with key [1:1:3:0:0:1:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:1:0] NODATA Size# 0}} SEND TEvPut with key [1:1:4:0:0:131072:0] TEvPutResult: TEvPutResult {Id# [1:1:4:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999976} SEND TEvGet with key [1:1:4:0:0:1:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:4:0:0:1:0] NODATA Size# 0}} >> Normalizers::EmptyTablesNormalizer [GOOD] >> TColumnShardTestReadWrite::WriteStandaloneExoticTypes [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::RebootWriteReadStandalone [GOOD] Test command err: 2025-05-07T08:50:09.094599Z node 1 :BLOB_CACHE NOTICE: ctor_logger.h:56: MaxCacheDataSize: 20971520 InFlightDataSize: 0 2025-05-07T08:50:09.204829Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-05-07T08:50:09.230297Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-05-07T08:50:09.230652Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-05-07T08:50:09.239595Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-05-07T08:50:09.239878Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-05-07T08:50:09.240136Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-05-07T08:50:09.240268Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-05-07T08:50:09.240403Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-05-07T08:50:09.240523Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-05-07T08:50:09.240641Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-05-07T08:50:09.240803Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-05-07T08:50:09.240946Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-05-07T08:50:09.241101Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-05-07T08:50:09.241216Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-05-07T08:50:09.241325Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-05-07T08:50:09.271333Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-05-07T08:50:09.271526Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:137;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=11;current_normalizer=CLASS_NAME=Granules; 2025-05-07T08:50:09.271617Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-05-07T08:50:09.271839Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-05-07T08:50:09.272046Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-05-07T08:50:09.272154Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-05-07T08:50:09.272207Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-05-07T08:50:09.272375Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-05-07T08:50:09.272452Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-05-07T08:50:09.272528Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-05-07T08:50:09.272570Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-05-07T08:50:09.272787Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-05-07T08:50:09.272869Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-05-07T08:50:09.272918Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-05-07T08:50:09.272964Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-05-07T08:50:09.273065Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-05-07T08:50:09.273132Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-05-07T08:50:09.273178Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanInsertionDedup;id=CleanInsertionDedup; 2025-05-07T08:50:09.273215Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=8;type=CleanInsertionDedup; 2025-05-07T08:50:09.273319Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanInsertionDedup;id=8; 2025-05-07T08:50:09.273378Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-05-07T08:50:09.273413Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-05-07T08:50:09.273486Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-05-07T08:50:09.273531Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-05-07T08:50:09.273567Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-05-07T08:50:09.273857Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-05-07T08:50:09.273929Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-05-07T08:50:09.274021Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-05-07T08:50:09.274275Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-05-07T08:50:09.274337Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-05-07T08:50:09.274373Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-05-07T08:50:09.274526Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-05-07T08:50:09.274578Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-05-07T08:50:09.274619Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-05-07T08:50:09.274754Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-05-07T08:50:09.274884Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-05-07T08:50:09.274941Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-05-07T08:50:09.274981Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-05-07T08:50:09.275465Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=61; 2025-05-07T08:50:09.275574Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=43; ... equest_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-05-07T08:50:17.897024Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1056:2926];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:73;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-05-07T08:50:17.897202Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1056:2926];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:229;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;);columns=10;rows=31; 2025-05-07T08:50:17.897322Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1056:2926];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:249;stage=data_format;batch_size=1984;num_rows=31;batch_columns=timestamp,resource_type,resource_id,uid,level,message,json_payload,ingested_at,saved_at,request_id; 2025-05-07T08:50:17.897695Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1056:2926];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:365;event=send_data;compute_actor_id=[1:1055:2925];bytes=1984;rows=31;faults=0;finished=0;fault=0;schema=timestamp: timestamp[us] resource_type: string resource_id: string uid: string level: int32 message: string json_payload: string ingested_at: timestamp[us] saved_at: timestamp[us] request_id: string; 2025-05-07T08:50:17.897888Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1056:2926];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:269;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-05-07T08:50:17.898056Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1056:2926];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:187;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-05-07T08:50:17.898209Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1056:2926];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-05-07T08:50:17.898488Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1056:2926];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:104;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-05-07T08:50:17.898657Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1056:2926];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:187;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-05-07T08:50:17.898829Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1056:2926];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-05-07T08:50:17.898877Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:409: Scan [1:1056:2926] finished for tablet 9437184 2025-05-07T08:50:17.899524Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:1056:2926];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:415;event=scan_finish;compute_actor_id=[1:1055:2925];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.004},{"events":["f_processing","f_task_result"],"t":0.007},{"events":["f_ack","l_task_result"],"t":0.019},{"events":["l_ProduceResults","f_Finish"],"t":0.021},{"events":["l_ack","l_processing","l_Finish"],"t":0.022}],"full":{"a":1746607817876935,"name":"_full_task","f":1746607817876935,"d_finished":0,"c":0,"l":1746607817898950,"d":22015},"events":[{"name":"bootstrap","f":1746607817877226,"d_finished":4400,"c":1,"l":1746607817881626,"d":4400},{"a":1746607817898455,"name":"ack","f":1746607817896651,"d_finished":1593,"c":1,"l":1746607817898244,"d":2088},{"a":1746607817898438,"name":"processing","f":1746607817884467,"d_finished":8379,"c":10,"l":1746607817898247,"d":8891},{"name":"ProduceResults","f":1746607817879932,"d_finished":4393,"c":13,"l":1746607817898861,"d":4393},{"a":1746607817898864,"name":"Finish","f":1746607817898864,"d_finished":0,"c":0,"l":1746607817898950,"d":86},{"name":"task_result","f":1746607817884492,"d_finished":6588,"c":9,"l":1746607817896420,"d":6588}],"id":"9437184::12"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-05-07T08:50:17.899631Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1056:2926];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:365;event=send_data;compute_actor_id=[1:1055:2925];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-05-07T08:50:17.900161Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:1056:2926];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=scan_finished;compute_actor_id=[1:1055:2925];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.004},{"events":["f_processing","f_task_result"],"t":0.007},{"events":["f_ack","l_task_result"],"t":0.019},{"events":["l_ProduceResults","f_Finish"],"t":0.021},{"events":["l_ack","l_processing","l_Finish"],"t":0.022}],"full":{"a":1746607817876935,"name":"_full_task","f":1746607817876935,"d_finished":0,"c":0,"l":1746607817899689,"d":22754},"events":[{"name":"bootstrap","f":1746607817877226,"d_finished":4400,"c":1,"l":1746607817881626,"d":4400},{"a":1746607817898455,"name":"ack","f":1746607817896651,"d_finished":1593,"c":1,"l":1746607817898244,"d":2827},{"a":1746607817898438,"name":"processing","f":1746607817884467,"d_finished":8379,"c":10,"l":1746607817898247,"d":9630},{"name":"ProduceResults","f":1746607817879932,"d_finished":4393,"c":13,"l":1746607817898861,"d":4393},{"a":1746607817898864,"name":"Finish","f":1746607817898864,"d_finished":0,"c":0,"l":1746607817899689,"d":825},{"name":"task_result","f":1746607817884492,"d_finished":6588,"c":9,"l":1746607817896420,"d":6588}],"id":"9437184::12"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-05-07T08:50:17.900258Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1056:2926];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-05-07T08:50:17.876129Z;index_granules=0;index_portions=1;index_batches=2;committed_batches=0;schema_columns=10;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=10280;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=10280;selected_rows=0; 2025-05-07T08:50:17.900313Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1056:2926];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=read_context.h:189;event=scan_aborted;reason=unexpected on destructor; 2025-05-07T08:50:17.900780Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:1056:2926];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;; >> TColumnShardTestReadWrite::WriteReadNoCompression [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::WriteReadStandaloneExoticTypes [GOOD] Test command err: 2025-05-07T08:50:11.906377Z node 1 :BLOB_CACHE NOTICE: ctor_logger.h:56: MaxCacheDataSize: 20971520 InFlightDataSize: 0 2025-05-07T08:50:12.018079Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-05-07T08:50:12.042956Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-05-07T08:50:12.043217Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-05-07T08:50:12.050246Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-05-07T08:50:12.050492Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-05-07T08:50:12.050807Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-05-07T08:50:12.050949Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-05-07T08:50:12.051102Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-05-07T08:50:12.051228Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-05-07T08:50:12.051342Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-05-07T08:50:12.051454Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-05-07T08:50:12.051590Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-05-07T08:50:12.051762Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-05-07T08:50:12.051880Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-05-07T08:50:12.051983Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-05-07T08:50:12.082044Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-05-07T08:50:12.082223Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:137;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=11;current_normalizer=CLASS_NAME=Granules; 2025-05-07T08:50:12.082279Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-05-07T08:50:12.082508Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-05-07T08:50:12.082692Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-05-07T08:50:12.082793Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-05-07T08:50:12.082843Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-05-07T08:50:12.082937Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-05-07T08:50:12.083004Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-05-07T08:50:12.083057Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-05-07T08:50:12.083095Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-05-07T08:50:12.083283Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-05-07T08:50:12.083361Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-05-07T08:50:12.083422Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-05-07T08:50:12.083464Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-05-07T08:50:12.083559Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-05-07T08:50:12.083631Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-05-07T08:50:12.083676Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanInsertionDedup;id=CleanInsertionDedup; 2025-05-07T08:50:12.083708Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=8;type=CleanInsertionDedup; 2025-05-07T08:50:12.083785Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanInsertionDedup;id=8; 2025-05-07T08:50:12.083825Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-05-07T08:50:12.083853Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-05-07T08:50:12.083915Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-05-07T08:50:12.083953Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-05-07T08:50:12.083984Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-05-07T08:50:12.084233Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-05-07T08:50:12.084301Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-05-07T08:50:12.084347Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-05-07T08:50:12.084576Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-05-07T08:50:12.084634Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-05-07T08:50:12.084666Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-05-07T08:50:12.084813Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-05-07T08:50:12.084861Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-05-07T08:50:12.084898Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-05-07T08:50:12.084981Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-05-07T08:50:12.085052Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-05-07T08:50:12.085091Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-05-07T08:50:12.085122Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-05-07T08:50:12.085550Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=59; 2025-05-07T08:50:12.085641Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=37; ... quest_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-05-07T08:50:17.997569Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:423:2438];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:73;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-05-07T08:50:17.997737Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:423:2438];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:229;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;);columns=10;rows=31; 2025-05-07T08:50:17.997845Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:423:2438];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:249;stage=data_format;batch_size=2759;num_rows=31;batch_columns=timestamp,resource_type,resource_id,uid,level,message,json_payload,ingested_at,saved_at,request_id; 2025-05-07T08:50:17.998257Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:423:2438];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:365;event=send_data;compute_actor_id=[1:422:2437];bytes=2759;rows=31;faults=0;finished=0;fault=0;schema=timestamp: timestamp[us] resource_type: string resource_id: string uid: string level: int32 message: binary json_payload: binary ingested_at: timestamp[us] saved_at: timestamp[us] request_id: binary; 2025-05-07T08:50:17.998458Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:423:2438];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:269;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-05-07T08:50:17.998644Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:423:2438];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:187;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-05-07T08:50:17.998836Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:423:2438];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-05-07T08:50:17.999147Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:423:2438];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:104;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-05-07T08:50:17.999316Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:423:2438];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:187;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-05-07T08:50:17.999473Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:423:2438];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-05-07T08:50:17.999530Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:409: Scan [1:423:2438] finished for tablet 9437184 2025-05-07T08:50:18.000147Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:423:2438];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:415;event=scan_finish;compute_actor_id=[1:422:2437];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.004},{"events":["f_processing","f_task_result"],"t":0.006},{"events":["l_task_result"],"t":0.016},{"events":["f_ack"],"t":0.017},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.019}],"full":{"a":1746607817980043,"name":"_full_task","f":1746607817980043,"d_finished":0,"c":0,"l":1746607817999602,"d":19559},"events":[{"name":"bootstrap","f":1746607817980311,"d_finished":4097,"c":1,"l":1746607817984408,"d":4097},{"a":1746607817999095,"name":"ack","f":1746607817997193,"d_finished":1688,"c":1,"l":1746607817998881,"d":2195},{"a":1746607817999078,"name":"processing","f":1746607817986134,"d_finished":7705,"c":10,"l":1746607817998884,"d":8229},{"name":"ProduceResults","f":1746607817982725,"d_finished":4032,"c":13,"l":1746607817999509,"d":4032},{"a":1746607817999514,"name":"Finish","f":1746607817999514,"d_finished":0,"c":0,"l":1746607817999602,"d":88},{"name":"task_result","f":1746607817986154,"d_finished":5858,"c":9,"l":1746607817996972,"d":5858}],"id":"9437184::12"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-05-07T08:50:18.000261Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:423:2438];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:365;event=send_data;compute_actor_id=[1:422:2437];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-05-07T08:50:18.000816Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:423:2438];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=scan_finished;compute_actor_id=[1:422:2437];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.004},{"events":["f_processing","f_task_result"],"t":0.006},{"events":["l_task_result"],"t":0.016},{"events":["f_ack"],"t":0.017},{"events":["l_ProduceResults","f_Finish"],"t":0.019},{"events":["l_ack","l_processing","l_Finish"],"t":0.02}],"full":{"a":1746607817980043,"name":"_full_task","f":1746607817980043,"d_finished":0,"c":0,"l":1746607818000313,"d":20270},"events":[{"name":"bootstrap","f":1746607817980311,"d_finished":4097,"c":1,"l":1746607817984408,"d":4097},{"a":1746607817999095,"name":"ack","f":1746607817997193,"d_finished":1688,"c":1,"l":1746607817998881,"d":2906},{"a":1746607817999078,"name":"processing","f":1746607817986134,"d_finished":7705,"c":10,"l":1746607817998884,"d":8940},{"name":"ProduceResults","f":1746607817982725,"d_finished":4032,"c":13,"l":1746607817999509,"d":4032},{"a":1746607817999514,"name":"Finish","f":1746607817999514,"d_finished":0,"c":0,"l":1746607818000313,"d":799},{"name":"task_result","f":1746607817986154,"d_finished":5858,"c":9,"l":1746607817996972,"d":5858}],"id":"9437184::12"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-05-07T08:50:18.000916Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:423:2438];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-05-07T08:50:17.979233Z;index_granules=0;index_portions=1;index_batches=2;committed_batches=0;schema_columns=10;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=13240;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=13240;selected_rows=0; 2025-05-07T08:50:18.000992Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:423:2438];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=read_context.h:189;event=scan_aborted;reason=unexpected on destructor; 2025-05-07T08:50:18.001444Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:423:2438];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest >> ReadOnlyVDisk::TestReads [GOOD] Test command err: RandomSeed# 11002893752350407516 === Trying to put and get a blob === SEND TEvPut with key [1:1:0:0:0:131072:0] TEvPutResult: TEvPutResult {Id# [1:1:0:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Putting VDisk #0 to read-only === Setting VDisk read-only to 1 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Putting VDisk #1 to read-only === Setting VDisk read-only to 1 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Putting VDisk #2 to read-only === Setting VDisk read-only to 1 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Putting VDisk #3 to read-only === Setting VDisk read-only to 1 for position 3 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:3:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Putting VDisk #4 to read-only === Setting VDisk read-only to 1 for position 4 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:4:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Putting VDisk #5 to read-only === Setting VDisk read-only to 1 for position 5 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:5:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Putting VDisk #6 to read-only === Setting VDisk read-only to 1 for position 6 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:6:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Restoring to normal VDisk #0 === Setting VDisk read-only to 0 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Restoring to normal VDisk #1 === Setting VDisk read-only to 0 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Restoring to normal VDisk #2 === Setting VDisk read-only to 0 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Restoring to normal VDisk #3 === Setting VDisk read-only to 0 for position 3 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:3:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Restoring to normal VDisk #4 === Setting VDisk read-only to 0 for position 4 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:4:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Restoring to normal VDisk #5 === Setting VDisk read-only to 0 for position 5 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:5:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Restoring to normal VDisk #6 === Setting VDisk read-only to 0 for position 6 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:6:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> Normalizers::EmptyTablesNormalizer [GOOD] Test command err: 2025-05-07T08:50:14.693106Z node 1 :BLOB_CACHE NOTICE: ctor_logger.h:56: MaxCacheDataSize: 20971520 InFlightDataSize: 0 2025-05-07T08:50:14.821517Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-05-07T08:50:14.850536Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-05-07T08:50:14.850868Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-05-07T08:50:14.863736Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=PortionsCleaner; 2025-05-07T08:50:14.864151Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=NO_VALUE_OPTIONAL; 2025-05-07T08:50:14.864351Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-05-07T08:50:14.864555Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-05-07T08:50:14.864711Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-05-07T08:50:14.864813Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-05-07T08:50:14.864891Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-05-07T08:50:14.864957Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-05-07T08:50:14.865024Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-05-07T08:50:14.865114Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-05-07T08:50:14.865200Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-05-07T08:50:14.865304Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-05-07T08:50:14.865382Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-05-07T08:50:14.899627Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-05-07T08:50:14.901423Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:137;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=12;current_normalizer=CLASS_NAME=PortionsCleaner; 2025-05-07T08:50:14.901500Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=NO_VALUE_OPTIONAL;type=NO_VALUE_OPTIONAL; 2025-05-07T08:50:14.902080Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=111; 2025-05-07T08:50:14.902277Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=49; 2025-05-07T08:50:14.902393Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=40; 2025-05-07T08:50:14.902486Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=42; 2025-05-07T08:50:14.902668Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=PortionsCleaner;id=NO_VALUE_OPTIONAL; 2025-05-07T08:50:14.902748Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Granules;id=Granules; 2025-05-07T08:50:14.902827Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=1;type=Granules; 2025-05-07T08:50:14.902987Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-05-07T08:50:14.903079Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-05-07T08:50:14.903147Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-05-07T08:50:14.903241Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=2;type=Chunks; 2025-05-07T08:50:14.903393Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-05-07T08:50:14.903468Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-05-07T08:50:14.903515Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-05-07T08:50:14.903549Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=4;type=TablesCleaner; 2025-05-07T08:50:14.903730Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-05-07T08:50:14.903799Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-05-07T08:50:14.903858Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-05-07T08:50:14.903896Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=6;type=CleanGranuleId; 2025-05-07T08:50:14.904011Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-05-07T08:50:14.904088Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-05-07T08:50:14.904129Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanInsertionDedup;id=CleanInsertionDedup; 2025-05-07T08:50:14.904167Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=8;type=CleanInsertionDedup; 2025-05-07T08:50:14.904281Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanInsertionDedup;id=8; 2025-05-07T08:50:14.904337Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-05-07T08:50:14.904384Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=9;type=GCCountersNormalizer; 2025-05-07T08:50:14.904459Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-05-07T08:50:14.904508Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-05-07T08:50:14.904538Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=11;type=SyncPortionFromChunks; 2025-05-07T08:50:14.904770Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-05-07T08:50:14.904837Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-05-07T08:50:14.904874Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=15;type=RestoreV1Chunks_V2; 2025-05-07T08:50:14.905098Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-05-07T08:50:14.905145Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-05-07T08:50:14.905177Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=16;type=RestoreV2Chunks; 2025-05-07T08:50:14.905311Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784 ... ;tablet_id=9437184; 2025-05-07T08:50:18.619470Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-05-07T08:50:18.619514Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-05-07T08:50:18.619575Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:402:2415];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:516;event=EnqueueBackgroundActivities;periodic=0; 2025-05-07T08:50:18.619661Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:402:2415];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:785;event=start_indexation_tasks;insert_overload_size=0; 2025-05-07T08:50:18.619745Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:402:2415];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:246;event=StartCleanup;portions_count=0; 2025-05-07T08:50:18.619830Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:402:2415];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:321;event=StartCleanup;portions_count=0;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-05-07T08:50:18.619900Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:402:2415];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:1061;background=cleanup;skip_reason=no_changes; 2025-05-07T08:50:18.619957Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:402:2415];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:1093;background=cleanup;skip_reason=no_changes; 2025-05-07T08:50:18.620068Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:402:2415];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:1002;background=ttl;skip_reason=no_changes; 2025-05-07T08:50:18.826185Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 111 scanId: 0 version: {1746607815775:111} readable: {1746607815775:max} at tablet 9437184 2025-05-07T08:50:18.826369Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxScan prepare txId: 111 scanId: 0 at tablet 9437184 2025-05-07T08:50:18.826601Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:402:2415];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=111;scan_id=0;gen=0;table=;snapshot={1746607815775:111};tablet=9437184;timeout=0.000000s;fline=program.cpp:33;event=parse_program;program=Command { Projection { Columns { Id: 1 } Columns { Id: 2 } Columns { Id: 3 } } } ; 2025-05-07T08:50:18.826705Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:402:2415];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=111;scan_id=0;gen=0;table=;snapshot={1746607815775:111};tablet=9437184;timeout=0.000000s;fline=program.cpp:102;parse_proto_program=Command { Projection { Columns { Id: 1 } Columns { Id: 2 } Columns { Id: 3 } } } ; 2025-05-07T08:50:18.827801Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:402:2415];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=111;scan_id=0;gen=0;table=;snapshot={1746607815775:111};tablet=9437184;timeout=0.000000s;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":7,"inputs":[{"from":8}]},{"owner_id":0,"inputs":[{"from":2},{"from":4},{"from":6}]},{"owner_id":8,"inputs":[]},{"owner_id":2,"inputs":[{"from":7}]},{"owner_id":4,"inputs":[{"from":7}]},{"owner_id":6,"inputs":[{"from":7}]}],"nodes":{"8":{"p":{"p":{"data":[{"name":"key1","id":1},{"name":"key2","id":2},{"name":"field","id":3}]},"o":"0","t":"ReserveMemory"},"w":0,"id":8},"2":{"p":{"i":"1","p":{"address":{"name":"key1","id":1}},"o":"1","t":"AssembleOriginalData"},"w":11,"id":2},"6":{"p":{"i":"3","p":{"address":{"name":"field","id":3}},"o":"3","t":"AssembleOriginalData"},"w":11,"id":6},"7":{"p":{"i":"0","p":{"data":[{"name":"key1","id":1},{"name":"key2","id":2},{"name":"field","id":3}]},"o":"1,2,3","t":"FetchOriginalData"},"w":6,"id":7},"4":{"p":{"i":"2","p":{"address":{"name":"key2","id":2}},"o":"2","t":"AssembleOriginalData"},"w":11,"id":4},"0":{"p":{"i":"1,2,3","t":"Projection"},"w":33,"id":0}}}; 2025-05-07T08:50:18.827965Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:402:2415];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=111;scan_id=0;gen=0;table=;snapshot={1746607815775:111};tablet=9437184;timeout=0.000000s;fline=read_metadata.h:136;filter_limit_not_detected= range{ from {+Inf} to {-Inf}}; 2025-05-07T08:50:18.828844Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:402:2415];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=111;scan_id=0;gen=0;table=;snapshot={1746607815775:111};tablet=9437184;timeout=0.000000s;fline=tx_scan.cpp:166;event=TTxScan started;actor_id=[1:463:2468];trace_detailed=; 2025-05-07T08:50:18.829597Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: fline=context.cpp:84;ff_first=(column_ids=1,2,3;column_names=field,key1,key2;);; 2025-05-07T08:50:18.829859Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: fline=context.cpp:99;columns_context_info=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;; 2025-05-07T08:50:18.830308Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:463:2468];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:104;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-05-07T08:50:18.830490Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:463:2468];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:187;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-05-07T08:50:18.830641Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:463:2468];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-05-07T08:50:18.830687Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:409: Scan [1:463:2468] finished for tablet 9437184 2025-05-07T08:50:18.831207Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:463:2468];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:415;event=scan_finish;compute_actor_id=[1:461:2467];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["l_bootstrap","f_ack","f_processing","f_ProduceResults"],"t":0.001},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.002}],"full":{"a":1746607818828588,"name":"_full_task","f":1746607818828588,"d_finished":0,"c":0,"l":1746607818830788,"d":2200},"events":[{"name":"bootstrap","f":1746607818828980,"d_finished":1086,"c":1,"l":1746607818830066,"d":1086},{"a":1746607818830277,"name":"ack","f":1746607818830277,"d_finished":0,"c":0,"l":1746607818830788,"d":511},{"a":1746607818830250,"name":"processing","f":1746607818830250,"d_finished":0,"c":0,"l":1746607818830788,"d":538},{"name":"ProduceResults","f":1746607818830045,"d_finished":337,"c":2,"l":1746607818830670,"d":337},{"a":1746607818830673,"name":"Finish","f":1746607818830673,"d_finished":0,"c":0,"l":1746607818830788,"d":115}],"id":"9437184::2"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-05-07T08:50:18.831296Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:463:2468];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:365;event=send_data;compute_actor_id=[1:461:2467];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-05-07T08:50:18.831777Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:463:2468];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=scan_finished;compute_actor_id=[1:461:2467];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["l_bootstrap","f_ack","f_processing","f_ProduceResults"],"t":0.001},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.002}],"full":{"a":1746607818828588,"name":"_full_task","f":1746607818828588,"d_finished":0,"c":0,"l":1746607818831349,"d":2761},"events":[{"name":"bootstrap","f":1746607818828980,"d_finished":1086,"c":1,"l":1746607818830066,"d":1086},{"a":1746607818830277,"name":"ack","f":1746607818830277,"d_finished":0,"c":0,"l":1746607818831349,"d":1072},{"a":1746607818830250,"name":"processing","f":1746607818830250,"d_finished":0,"c":0,"l":1746607818831349,"d":1099},{"name":"ProduceResults","f":1746607818830045,"d_finished":337,"c":2,"l":1746607818830670,"d":337},{"a":1746607818830673,"name":"Finish","f":1746607818830673,"d_finished":0,"c":0,"l":1746607818831349,"d":676}],"id":"9437184::2"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-05-07T08:50:18.831883Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:463:2468];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-05-07T08:50:18.827926Z;index_granules=0;index_portions=0;index_batches=0;committed_batches=0;schema_columns=3;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=0;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=0;selected_rows=0; 2025-05-07T08:50:18.831968Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:463:2468];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=read_context.h:189;event=scan_aborted;reason=unexpected on destructor; 2025-05-07T08:50:18.832088Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:463:2468];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2100;fline=simple_arrays_cache.h:65;event=slice_from_cache;key=uint64;records=0;count=20048; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2100;fline=simple_arrays_cache.h:65;event=slice_from_cache;key=uint64;records=0;count=20048; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2100;fline=simple_arrays_cache.h:49;event=insert_to_cache;key=string;records=0;size=0; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2100;fline=simple_arrays_cache.h:65;event=slice_from_cache;key=string;records=0;count=0; >> TReplicationTests::Create ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::WriteStandaloneExoticTypes [GOOD] Test command err: 2025-05-07T08:50:11.004493Z node 1 :BLOB_CACHE NOTICE: ctor_logger.h:56: MaxCacheDataSize: 20971520 InFlightDataSize: 0 2025-05-07T08:50:11.123302Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-05-07T08:50:11.149000Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-05-07T08:50:11.149307Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-05-07T08:50:11.157452Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-05-07T08:50:11.157685Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-05-07T08:50:11.157935Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-05-07T08:50:11.159271Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-05-07T08:50:11.159477Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-05-07T08:50:11.159631Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-05-07T08:50:11.159752Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-05-07T08:50:11.159893Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-05-07T08:50:11.160043Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-05-07T08:50:11.160201Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-05-07T08:50:11.160368Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-05-07T08:50:11.160491Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-05-07T08:50:11.188270Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-05-07T08:50:11.188469Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:137;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=11;current_normalizer=CLASS_NAME=Granules; 2025-05-07T08:50:11.188525Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-05-07T08:50:11.188699Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-05-07T08:50:11.188879Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-05-07T08:50:11.188968Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-05-07T08:50:11.189014Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-05-07T08:50:11.189094Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-05-07T08:50:11.189159Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-05-07T08:50:11.189209Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-05-07T08:50:11.189241Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-05-07T08:50:11.189384Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-05-07T08:50:11.189447Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-05-07T08:50:11.189487Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-05-07T08:50:11.189518Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-05-07T08:50:11.189648Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-05-07T08:50:11.189707Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-05-07T08:50:11.189775Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanInsertionDedup;id=CleanInsertionDedup; 2025-05-07T08:50:11.189815Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=8;type=CleanInsertionDedup; 2025-05-07T08:50:11.189912Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanInsertionDedup;id=8; 2025-05-07T08:50:11.189959Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-05-07T08:50:11.190010Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-05-07T08:50:11.190087Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-05-07T08:50:11.190127Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-05-07T08:50:11.190175Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-05-07T08:50:11.190386Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-05-07T08:50:11.190432Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-05-07T08:50:11.190465Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-05-07T08:50:11.190671Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-05-07T08:50:11.190716Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-05-07T08:50:11.190747Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-05-07T08:50:11.190879Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-05-07T08:50:11.190925Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-05-07T08:50:11.190948Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-05-07T08:50:11.191029Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-05-07T08:50:11.191095Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-05-07T08:50:11.191124Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-05-07T08:50:11.191147Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-05-07T08:50:11.191501Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=50; 2025-05-07T08:50:11.191582Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=35; ... :{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"21,21,21,21,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"22,22,22,22,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"23,23,23,23,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"24,24,24,24,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"25,25,25,25,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"26,26,26,26,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"27,27,27,27,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"28,28,28,28,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"29,29,29,29,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"30,30,30,30,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"31,31,31,31,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"32,32,32,32,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"33,33,33,33,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"34,34,34,34,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"35,35,35,35,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"36,36,36,36,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"37,37,37,37,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"38,38,38,38,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"39,39,39,39,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"40,40,40,40,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"41,41,41,41,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"42,42,42,42,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"43,43,43,43,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"44,44,44,44,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"45,45,45,45,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"46,46,46,46,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"47,47,47,47,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"48,48,48,48,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"49,49,49,49,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"50,50,50,50,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"51,51,51,51,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"52,52,52,52,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"53,53,53,53,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"54,54,54,54,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"55,55,55,55,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"56,56,56,56,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"57,57,57,57,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"58,58,58,58,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"59,59,59,59,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"60,60,60,60,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"61,61,61,61,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"62,62,62,62,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"63,63,63,63,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"64,64,64,64,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"65,65,65,65,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"66,66,66,66,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"67,67,67,67,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"68,68,68,68,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"69,69,69,69,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"70,70,70,70,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"71,71,71,71,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"72,72,72,72,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"73,73,73,73,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"74,74,74,74,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"75,75,75,75,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"76,76,76,76,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"77,77,77,77,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"78,78,78,78,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"79,79,79,79,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"80,80,80,80,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"81,81,81,81,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"82,82,82,82,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"83,83,83,83,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"84,84,84,84,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"85,85,85,85,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"86,86,86,86,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"87,87,87,87,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"88,88,88,88,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"89,89,89,89,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"90,90,90,90,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"91,91,91,91,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"92,92,92,92,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"93,93,93,93,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"94,94,94,94,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"95,95,95,95,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"96,96,96,96,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"97,97,97,97,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"98,98,98,98,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"99,99,99,99,"}}]}; 2025-05-07T08:50:18.773556Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;local_tx_no=30;method=complete;tx_info=TTxWrite;tablet_id=9437184;tx_state=complete;fline=columnshard_impl.cpp:785;event=start_indexation_tasks;insert_overload_size=0; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::WriteReadNoCompression [GOOD] Test command err: 2025-05-07T08:50:09.741567Z node 1 :BLOB_CACHE NOTICE: ctor_logger.h:56: MaxCacheDataSize: 20971520 InFlightDataSize: 0 2025-05-07T08:50:09.860075Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-05-07T08:50:09.887411Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-05-07T08:50:09.887794Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-05-07T08:50:09.895848Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-05-07T08:50:09.896099Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-05-07T08:50:09.896356Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-05-07T08:50:09.896485Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-05-07T08:50:09.896630Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-05-07T08:50:09.896753Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-05-07T08:50:09.896846Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-05-07T08:50:09.896973Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-05-07T08:50:09.897201Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-05-07T08:50:09.897389Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-05-07T08:50:09.897511Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-05-07T08:50:09.897612Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-05-07T08:50:09.929054Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-05-07T08:50:09.929249Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:137;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=11;current_normalizer=CLASS_NAME=Granules; 2025-05-07T08:50:09.929306Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-05-07T08:50:09.929537Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-05-07T08:50:09.929724Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-05-07T08:50:09.929817Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-05-07T08:50:09.929864Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-05-07T08:50:09.930007Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-05-07T08:50:09.930083Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-05-07T08:50:09.930141Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-05-07T08:50:09.930193Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-05-07T08:50:09.930430Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-05-07T08:50:09.930528Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-05-07T08:50:09.930580Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-05-07T08:50:09.930624Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-05-07T08:50:09.930741Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-05-07T08:50:09.930837Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-05-07T08:50:09.930883Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanInsertionDedup;id=CleanInsertionDedup; 2025-05-07T08:50:09.930925Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=8;type=CleanInsertionDedup; 2025-05-07T08:50:09.931021Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanInsertionDedup;id=8; 2025-05-07T08:50:09.931075Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-05-07T08:50:09.931107Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-05-07T08:50:09.931186Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-05-07T08:50:09.931227Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-05-07T08:50:09.931266Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-05-07T08:50:09.931497Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-05-07T08:50:09.931569Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-05-07T08:50:09.931633Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-05-07T08:50:09.931897Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-05-07T08:50:09.931951Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-05-07T08:50:09.931986Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-05-07T08:50:09.932124Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-05-07T08:50:09.932168Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-05-07T08:50:09.932207Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-05-07T08:50:09.932307Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-05-07T08:50:09.932390Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-05-07T08:50:09.932441Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-05-07T08:50:09.932477Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-05-07T08:50:09.932905Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=48; 2025-05-07T08:50:09.932998Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=39; ... ;;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-05-07T08:50:18.875253Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1055:2926];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:73;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-05-07T08:50:18.875412Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1055:2926];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:229;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;);columns=10;rows=31; 2025-05-07T08:50:18.875523Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1055:2926];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:249;stage=data_format;batch_size=1984;num_rows=31;batch_columns=timestamp,resource_type,resource_id,uid,level,message,json_payload,ingested_at,saved_at,request_id; 2025-05-07T08:50:18.875866Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1055:2926];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:365;event=send_data;compute_actor_id=[1:1054:2925];bytes=1984;rows=31;faults=0;finished=0;fault=0;schema=timestamp: timestamp[us] resource_type: string resource_id: string uid: string level: int32 message: string json_payload: string ingested_at: timestamp[us] saved_at: timestamp[us] request_id: string; 2025-05-07T08:50:18.876057Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1055:2926];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:269;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-05-07T08:50:18.876214Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1055:2926];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:187;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-05-07T08:50:18.876405Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1055:2926];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-05-07T08:50:18.876699Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1055:2926];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:104;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-05-07T08:50:18.876872Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1055:2926];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:187;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-05-07T08:50:18.877023Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1055:2926];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-05-07T08:50:18.877070Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:409: Scan [1:1055:2926] finished for tablet 9437184 2025-05-07T08:50:18.877788Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:1055:2926];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:415;event=scan_finish;compute_actor_id=[1:1054:2925];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap","f_processing","f_task_result"],"t":0.004},{"events":["f_ack","l_task_result"],"t":0.016},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.019}],"full":{"a":1746607818857941,"name":"_full_task","f":1746607818857941,"d_finished":0,"c":0,"l":1746607818877287,"d":19346},"events":[{"name":"bootstrap","f":1746607818858240,"d_finished":4156,"c":1,"l":1746607818862396,"d":4156},{"a":1746607818876667,"name":"ack","f":1746607818874834,"d_finished":1620,"c":1,"l":1746607818876454,"d":2240},{"a":1746607818876652,"name":"processing","f":1746607818862532,"d_finished":7220,"c":10,"l":1746607818876458,"d":7855},{"name":"ProduceResults","f":1746607818860627,"d_finished":3936,"c":13,"l":1746607818877051,"d":3936},{"a":1746607818877055,"name":"Finish","f":1746607818877055,"d_finished":0,"c":0,"l":1746607818877287,"d":232},{"name":"task_result","f":1746607818862553,"d_finished":5417,"c":9,"l":1746607818874586,"d":5417}],"id":"9437184::12"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-05-07T08:50:18.877883Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1055:2926];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:365;event=send_data;compute_actor_id=[1:1054:2925];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-05-07T08:50:18.878409Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:1055:2926];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=scan_finished;compute_actor_id=[1:1054:2925];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap","f_processing","f_task_result"],"t":0.004},{"events":["f_ack","l_task_result"],"t":0.016},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.019}],"full":{"a":1746607818857941,"name":"_full_task","f":1746607818857941,"d_finished":0,"c":0,"l":1746607818877940,"d":19999},"events":[{"name":"bootstrap","f":1746607818858240,"d_finished":4156,"c":1,"l":1746607818862396,"d":4156},{"a":1746607818876667,"name":"ack","f":1746607818874834,"d_finished":1620,"c":1,"l":1746607818876454,"d":2893},{"a":1746607818876652,"name":"processing","f":1746607818862532,"d_finished":7220,"c":10,"l":1746607818876458,"d":8508},{"name":"ProduceResults","f":1746607818860627,"d_finished":3936,"c":13,"l":1746607818877051,"d":3936},{"a":1746607818877055,"name":"Finish","f":1746607818877055,"d_finished":0,"c":0,"l":1746607818877940,"d":885},{"name":"task_result","f":1746607818862553,"d_finished":5417,"c":9,"l":1746607818874586,"d":5417}],"id":"9437184::12"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-05-07T08:50:18.878512Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1055:2926];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-05-07T08:50:18.857134Z;index_granules=0;index_portions=1;index_batches=2;committed_batches=0;schema_columns=10;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=10280;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=10280;selected_rows=0; 2025-05-07T08:50:18.878563Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:1055:2926];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=read_context.h:189;event=scan_aborted;reason=unexpected on destructor; 2025-05-07T08:50:18.879026Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:1055:2926];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;; >> Viewer::JsonAutocompleteColumnsPOST [GOOD] >> TReplicationTests::CreateSequential |89.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_stats/ydb-core-tx-schemeshard-ut_stats |89.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_stats/ydb-core-tx-schemeshard-ut_stats |89.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_stats/ydb-core-tx-schemeshard-ut_stats >> IndexBuildTest::MergeIndexTableShardsOnlyWhenReady >> VectorIndexBuildTest::BaseCase >> IndexBuildTest::CheckLimitWithDroppedIndex [GOOD] >> IndexBuildTest::DropIndex |89.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_user_attributes/ydb-core-tx-schemeshard-ut_user_attributes |89.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_user_attributes/ydb-core-tx-schemeshard-ut_user_attributes |89.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_user_attributes/ydb-core-tx-schemeshard-ut_user_attributes >> TColumnShardTestReadWrite::WriteRead [GOOD] >> IndexBuildTest::ShadowDataNotAllowedByDefault >> TReplicationTests::Create [GOOD] >> TReplicationTests::ConsistencyLevel >> IndexBuildTest::RejectsCreate >> VectorIndexBuildTest::VectorIndexDescriptionIsPersisted [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/viewer/ut/unittest >> Viewer::JsonAutocompleteColumnsPOST [GOOD] Test command err: 2025-05-07T08:49:35.536994Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:337:2380], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:49:35.537466Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:49:35.537546Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] TServer::EnableGrpc on GrpcPort 27076, node 1 TClient is connected to server localhost:30333 2025-05-07T08:49:44.854400Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:322:2365], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:49:44.854573Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:49:44.854797Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] TServer::EnableGrpc on GrpcPort 31507, node 2 TClient is connected to server localhost:7271 2025-05-07T08:49:54.544160Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:340:2383], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:49:54.544390Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:49:54.544485Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] TServer::EnableGrpc on GrpcPort 29079, node 3 TClient is connected to server localhost:4866 2025-05-07T08:50:04.669384Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:339:2381], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:50:04.669738Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:50:04.670038Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] TServer::EnableGrpc on GrpcPort 8542, node 4 TClient is connected to server localhost:13773 2025-05-07T08:50:16.763781Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:320:2363], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:50:16.764427Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:50:16.764608Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] TServer::EnableGrpc on GrpcPort 10667, node 5 TClient is connected to server localhost:5162 >> IndexBuildTest::CancellationNotEnoughRetries >> IndexBuildTest::Lock ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::WriteRead [GOOD] Test command err: 2025-05-07T08:50:14.977021Z node 1 :BLOB_CACHE NOTICE: ctor_logger.h:56: MaxCacheDataSize: 20971520 InFlightDataSize: 0 2025-05-07T08:50:15.146476Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-05-07T08:50:15.189895Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-05-07T08:50:15.201683Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-05-07T08:50:15.215255Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-05-07T08:50:15.215582Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-05-07T08:50:15.215916Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-05-07T08:50:15.216093Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-05-07T08:50:15.216279Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-05-07T08:50:15.216442Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-05-07T08:50:15.216588Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-05-07T08:50:15.216749Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-05-07T08:50:15.216928Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-05-07T08:50:15.217108Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-05-07T08:50:15.217295Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-05-07T08:50:15.217445Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-05-07T08:50:15.254635Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-05-07T08:50:15.254847Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:137;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=11;current_normalizer=CLASS_NAME=Granules; 2025-05-07T08:50:15.254907Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-05-07T08:50:15.255125Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-05-07T08:50:15.255338Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-05-07T08:50:15.255432Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-05-07T08:50:15.255477Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-05-07T08:50:15.255574Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-05-07T08:50:15.255647Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-05-07T08:50:15.255716Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-05-07T08:50:15.255761Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-05-07T08:50:15.255976Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-05-07T08:50:15.256045Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-05-07T08:50:15.256100Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-05-07T08:50:15.256143Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-05-07T08:50:15.256233Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-05-07T08:50:15.256294Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-05-07T08:50:15.256344Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanInsertionDedup;id=CleanInsertionDedup; 2025-05-07T08:50:15.256381Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=8;type=CleanInsertionDedup; 2025-05-07T08:50:15.256490Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanInsertionDedup;id=8; 2025-05-07T08:50:15.256534Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-05-07T08:50:15.256564Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-05-07T08:50:15.256634Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-05-07T08:50:15.256674Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-05-07T08:50:15.256707Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-05-07T08:50:15.256923Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-05-07T08:50:15.256998Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-05-07T08:50:15.257037Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-05-07T08:50:15.257307Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-05-07T08:50:15.257374Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-05-07T08:50:15.257415Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-05-07T08:50:15.257560Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-05-07T08:50:15.257614Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-05-07T08:50:15.257644Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-05-07T08:50:15.257732Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-05-07T08:50:15.257796Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-05-07T08:50:15.257836Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-05-07T08:50:15.257866Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-05-07T08:50:15.258387Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=97; 2025-05-07T08:50:15.258475Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=34; ... ource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-05-07T08:50:21.618141Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:423:2438];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:73;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-05-07T08:50:21.618279Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:423:2438];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:229;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;);columns=10;rows=31; 2025-05-07T08:50:21.618361Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:423:2438];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:249;stage=data_format;batch_size=1984;num_rows=31;batch_columns=timestamp,resource_type,resource_id,uid,level,message,json_payload,ingested_at,saved_at,request_id; 2025-05-07T08:50:21.618646Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:423:2438];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:365;event=send_data;compute_actor_id=[1:422:2437];bytes=1984;rows=31;faults=0;finished=0;fault=0;schema=timestamp: timestamp[us] resource_type: string resource_id: string uid: string level: int32 message: string json_payload: string ingested_at: timestamp[us] saved_at: timestamp[us] request_id: string; 2025-05-07T08:50:21.618836Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:423:2438];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:269;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-05-07T08:50:21.618975Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:423:2438];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:187;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-05-07T08:50:21.619103Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:423:2438];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-05-07T08:50:21.619339Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:423:2438];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:104;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-05-07T08:50:21.619506Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:423:2438];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:187;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-05-07T08:50:21.619638Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:423:2438];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-05-07T08:50:21.619675Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:409: Scan [1:423:2438] finished for tablet 9437184 2025-05-07T08:50:21.620130Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:423:2438];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:415;event=scan_finish;compute_actor_id=[1:422:2437];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.003},{"events":["f_processing","f_task_result"],"t":0.005},{"events":["l_task_result"],"t":0.015},{"events":["f_ack"],"t":0.016},{"events":["l_ProduceResults","f_Finish"],"t":0.017},{"events":["l_ack","l_processing","l_Finish"],"t":0.018}],"full":{"a":1746607821601676,"name":"_full_task","f":1746607821601676,"d_finished":0,"c":0,"l":1746607821619729,"d":18053},"events":[{"name":"bootstrap","f":1746607821601947,"d_finished":3522,"c":1,"l":1746607821605469,"d":3522},{"a":1746607821619315,"name":"ack","f":1746607821617777,"d_finished":1357,"c":1,"l":1746607821619134,"d":1771},{"a":1746607821619296,"name":"processing","f":1746607821607103,"d_finished":7045,"c":10,"l":1746607821619138,"d":7478},{"name":"ProduceResults","f":1746607821604177,"d_finished":3616,"c":13,"l":1746607821619662,"d":3616},{"a":1746607821619666,"name":"Finish","f":1746607821619666,"d_finished":0,"c":0,"l":1746607821619729,"d":63},{"name":"task_result","f":1746607821607132,"d_finished":5518,"c":9,"l":1746607821617561,"d":5518}],"id":"9437184::12"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-05-07T08:50:21.620216Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:423:2438];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:365;event=send_data;compute_actor_id=[1:422:2437];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-05-07T08:50:21.620616Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:423:2438];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=scan_finished;compute_actor_id=[1:422:2437];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.003},{"events":["f_processing","f_task_result"],"t":0.005},{"events":["l_task_result"],"t":0.015},{"events":["f_ack"],"t":0.016},{"events":["l_ProduceResults","f_Finish"],"t":0.017},{"events":["l_ack","l_processing","l_Finish"],"t":0.018}],"full":{"a":1746607821601676,"name":"_full_task","f":1746607821601676,"d_finished":0,"c":0,"l":1746607821620265,"d":18589},"events":[{"name":"bootstrap","f":1746607821601947,"d_finished":3522,"c":1,"l":1746607821605469,"d":3522},{"a":1746607821619315,"name":"ack","f":1746607821617777,"d_finished":1357,"c":1,"l":1746607821619134,"d":2307},{"a":1746607821619296,"name":"processing","f":1746607821607103,"d_finished":7045,"c":10,"l":1746607821619138,"d":8014},{"name":"ProduceResults","f":1746607821604177,"d_finished":3616,"c":13,"l":1746607821619662,"d":3616},{"a":1746607821619666,"name":"Finish","f":1746607821619666,"d_finished":0,"c":0,"l":1746607821620265,"d":599},{"name":"task_result","f":1746607821607132,"d_finished":5518,"c":9,"l":1746607821617561,"d":5518}],"id":"9437184::12"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-05-07T08:50:21.620679Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:423:2438];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-05-07T08:50:21.600878Z;index_granules=0;index_portions=1;index_batches=2;committed_batches=0;schema_columns=10;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=10280;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=10280;selected_rows=0; 2025-05-07T08:50:21.620713Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:423:2438];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=read_context.h:189;event=scan_aborted;reason=unexpected on destructor; 2025-05-07T08:50:21.621072Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:423:2438];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;; |89.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_read_table/ydb-core-tx-datashard-ut_read_table |89.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_read_table/ydb-core-tx-datashard-ut_read_table |89.0%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_read_table/ydb-core-tx-datashard-ut_read_table >> TReplicationTests::CreateSequential [GOOD] >> TReplicationTests::CreateInParallel >> TReplicationTests::ConsistencyLevel [GOOD] >> TReplicationTests::Alter >> Viewer::JsonAutocompleteSchemePOST [GOOD] |89.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/tx_proxy/ut_base_tenant/ydb-core-tx-tx_proxy-ut_base_tenant >> IndexBuildTest::DropIndex [GOOD] >> IndexBuildTest::ShadowDataNotAllowedByDefault [GOOD] >> IndexBuildTest::ShadowDataEdgeCases |89.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/tx_proxy/ut_base_tenant/ydb-core-tx-tx_proxy-ut_base_tenant |89.0%| [LD] {RESULT} $(B)/ydb/core/tx/tx_proxy/ut_base_tenant/ydb-core-tx-tx_proxy-ut_base_tenant ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index_build/unittest >> VectorIndexBuildTest::VectorIndexDescriptionIsPersisted [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:50:17.461523Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:50:17.461632Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:50:17.461679Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:50:17.461719Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:50:17.461804Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:50:17.461837Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:50:17.461894Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:50:17.462020Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:50:17.462837Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:50:17.463342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:50:17.571619Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:50:17.571696Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:17.601593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:50:17.601867Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:50:17.603604Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:50:17.629685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:50:17.630084Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:50:17.630940Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:17.631192Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:50:17.634700Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:17.636330Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:17.636418Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:17.636501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:50:17.636553Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:17.636674Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:50:17.636943Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:50:17.644677Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:50:17.810298Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:50:17.810559Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:17.810863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:50:17.811128Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:50:17.811191Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:17.813832Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:17.814017Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:50:17.814238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:17.814313Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:50:17.814361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:50:17.814399Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:50:17.816650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:17.816727Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:50:17.816778Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:50:17.818732Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:17.818812Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:17.818860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:17.818924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:50:17.822992Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:50:17.825331Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:50:17.825591Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:50:17.826704Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:17.826896Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:50:17.826974Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:17.827317Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:50:17.827375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:17.827590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:50:17.827673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:50:17.830066Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:17.830134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:17.830344Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:17.830392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... :50:22.167147Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3782: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:50:22.167270Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3799: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:50:22.167525Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3959: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:50:22.167601Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3975: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:50:22.167901Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4260: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:50:22.168285Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4558: IndexBuild , records: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:22.168500Z node 1 :BUILD_INDEX DEBUG: schemeshard_info_types.h:3694: AddShardStatus id# 102 shard 72057594046678944:11 2025-05-07T08:50:22.168632Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4705: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-05-07T08:50:22.168983Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4732: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-05-07T08:50:22.169058Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4759: LongLocks: records: 4, at schemeshard: 72057594046678944 2025-05-07T08:50:22.176971Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1105: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Done TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobalVectorKmeansTree, IndexName: by_embedding, IndexColumn: embedding, DataColumns: covered, State: Done, IsCancellationRequested: 0, Issue: , SubscribersCount: 0, CreateSender: [0:0:0], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976720769, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976720770, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-05-07T08:50:22.177032Z node 1 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:325: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 102, subscribers count# 0 2025-05-07T08:50:22.179663Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:22.179733Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:22.180670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:50:22.180730Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:22.180787Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:50:22.183142Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:3198:4997] sender: [1:3260:2058] recipient: [1:15:2062] 2025-05-07T08:50:22.218986Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/vectors/by_embedding" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-05-07T08:50:22.219266Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/vectors/by_embedding" took 327us result status StatusSuccess 2025-05-07T08:50:22.220312Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/vectors/by_embedding" PathDescription { Self { Name: "by_embedding" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 8 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 8 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 TableIndexVersion: 2 } ChildrenExist: true } Children { Name: "indexImplLevelTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeVectorKmeansTreeIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Children { Name: "indexImplPostingTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeVectorKmeansTreeIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } TableIndex { Name: "by_embedding" LocalPathId: 3 Type: EIndexTypeGlobalVectorKmeansTree State: EIndexStateReady KeyColumnNames: "embedding" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataColumnNames: "covered" DataSize: 0 IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 3 MaxPartitionsCount: 3 } } } IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 3 MaxPartitionsCount: 3 } } } VectorIndexKmeansTreeDescription { Settings { settings { metric: DISTANCE_COSINE vector_type: VECTOR_TYPE_FLOAT vector_dimension: 1024 } clusters: 4 levels: 5 } } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index_build/unittest >> IndexBuildTest::DropIndex [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:50:19.061847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:50:19.061952Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:50:19.062038Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:50:19.062089Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:50:19.062159Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:50:19.062197Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:50:19.062277Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:50:19.062382Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:50:19.063279Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:50:19.063761Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:50:19.159765Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:50:19.159836Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:19.179217Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:50:19.179498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:50:19.179709Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:50:19.186703Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:50:19.187143Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:50:19.187939Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:19.188180Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:50:19.191834Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:19.194403Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:19.194486Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:19.194572Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:50:19.194624Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:19.194748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:50:19.195041Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:50:19.203335Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:50:19.380886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:50:19.381157Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:19.381435Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:50:19.381767Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:50:19.381840Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:19.384871Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:19.385062Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:50:19.385304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:19.385371Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:50:19.385420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:50:19.385461Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:50:19.387918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:19.387998Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:50:19.388047Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:50:19.390001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:19.390062Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:19.390110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:19.390360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:50:19.395368Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:50:19.397870Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:50:19.398157Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:50:19.399338Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:19.399498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:50:19.399565Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:19.399950Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:50:19.400010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:19.400241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:50:19.400322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:50:19.402719Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:19.402813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:19.403198Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:19.403247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 5 PathOwnerId: 72057594046678944, cookie: 105 2025-05-07T08:50:23.150095Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 105 2025-05-07T08:50:23.150149Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 7], version: 18446744073709551615 2025-05-07T08:50:23.150194Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 5 2025-05-07T08:50:23.150702Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 8 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 105 2025-05-07T08:50:23.150781Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 8 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 105 2025-05-07T08:50:23.150807Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 105 2025-05-07T08:50:23.150831Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 8], version: 18446744073709551615 2025-05-07T08:50:23.150853Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 8] was 3 2025-05-07T08:50:23.152656Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 15 PathOwnerId: 72057594046678944, cookie: 105 2025-05-07T08:50:23.152728Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 15 PathOwnerId: 72057594046678944, cookie: 105 2025-05-07T08:50:23.152749Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 105 2025-05-07T08:50:23.152773Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 15 2025-05-07T08:50:23.152798Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:50:23.153392Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 7 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 105 2025-05-07T08:50:23.153446Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 7 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 105 2025-05-07T08:50:23.153468Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 105 2025-05-07T08:50:23.154010Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 105:0, at schemeshard: 72057594046678944 2025-05-07T08:50:23.154062Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 105:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:50:23.154330Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 4 2025-05-07T08:50:23.154477Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#105:0 progress is 2/3 2025-05-07T08:50:23.154521Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 105 ready parts: 2/3 2025-05-07T08:50:23.154562Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#105:0 progress is 2/3 2025-05-07T08:50:23.154599Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 105 ready parts: 2/3 2025-05-07T08:50:23.154639Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 105, ready parts: 2/3, is published: false 2025-05-07T08:50:23.155715Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 8 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 105 2025-05-07T08:50:23.155789Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 8 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 105 2025-05-07T08:50:23.155811Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 105 2025-05-07T08:50:23.156708Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 9 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 105 2025-05-07T08:50:23.156793Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 9 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 105 2025-05-07T08:50:23.156822Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 105 2025-05-07T08:50:23.156852Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 9], version: 18446744073709551615 2025-05-07T08:50:23.156890Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 9] was 4 2025-05-07T08:50:23.157000Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 105, ready parts: 2/3, is published: true 2025-05-07T08:50:23.158702Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 105:2, at schemeshard: 72057594046678944 2025-05-07T08:50:23.158755Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 105:2 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:50:23.158999Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 9] was 3 2025-05-07T08:50:23.159088Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#105:2 progress is 3/3 2025-05-07T08:50:23.159113Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 105 ready parts: 3/3 2025-05-07T08:50:23.159142Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#105:2 progress is 3/3 2025-05-07T08:50:23.159162Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 105 ready parts: 3/3 2025-05-07T08:50:23.159187Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 105, ready parts: 3/3, is published: true 2025-05-07T08:50:23.159254Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:412:2369] message: TxId: 105 2025-05-07T08:50:23.159301Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 105 ready parts: 3/3 2025-05-07T08:50:23.159352Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 105:0 2025-05-07T08:50:23.159393Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 105:0 2025-05-07T08:50:23.159483Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 3 2025-05-07T08:50:23.159522Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 105:1 2025-05-07T08:50:23.159545Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 105:1 2025-05-07T08:50:23.159583Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 8] was 2 2025-05-07T08:50:23.159601Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 105:2 2025-05-07T08:50:23.159617Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 105:2 2025-05-07T08:50:23.159648Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 9] was 2 2025-05-07T08:50:23.159956Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-05-07T08:50:23.160313Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-05-07T08:50:23.161430Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-05-07T08:50:23.161475Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-05-07T08:50:23.161576Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-05-07T08:50:23.162751Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-05-07T08:50:23.164007Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-05-07T08:50:23.164054Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [2:927:2851] TestWaitNotification: OK eventTxId 105 >> TReplicationTests::Alter [GOOD] >> TReplicationTests::CannotAddReplicationConfig >> TColumnShardTestReadWrite::WriteReadDuplicate |89.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_object_storage_listing/ydb-core-tx-datashard-ut_object_storage_listing |89.0%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_object_storage_listing/ydb-core-tx-datashard-ut_object_storage_listing |89.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_object_storage_listing/ydb-core-tx-datashard-ut_object_storage_listing >> IndexBuildTest::WithFollowers >> TReplicationTests::CreateInParallel [GOOD] >> TReplicationTests::CreateDropRecreate >> IndexBuildTest::Lock [GOOD] >> IndexBuildTest::IndexPartitioningIsPersisted >> IndexBuildTest::ShadowDataEdgeCases [GOOD] |89.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_backup/ydb-core-tx-schemeshard-ut_backup |89.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_backup/ydb-core-tx-schemeshard-ut_backup |89.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_backup/ydb-core-tx-schemeshard-ut_backup ------- [TM] {asan, default-linux-x86_64, release} ydb/core/viewer/ut/unittest >> Viewer::JsonAutocompleteSchemePOST [GOOD] Test command err: 2025-05-07T08:49:37.479524Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:337:2380], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:49:37.479959Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:49:37.480041Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] TServer::EnableGrpc on GrpcPort 22979, node 1 TClient is connected to server localhost:2470 2025-05-07T08:49:46.863043Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:322:2365], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:49:46.863226Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:49:46.863438Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] TServer::EnableGrpc on GrpcPort 5257, node 2 TClient is connected to server localhost:8908 2025-05-07T08:49:56.254603Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:340:2383], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:49:56.254863Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:49:56.254995Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] TServer::EnableGrpc on GrpcPort 6212, node 3 TClient is connected to server localhost:7363 2025-05-07T08:50:06.522010Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:339:2381], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:50:06.522371Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:50:06.522657Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] TServer::EnableGrpc on GrpcPort 7681, node 4 TClient is connected to server localhost:15892 2025-05-07T08:50:19.144373Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:320:2363], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:50:19.145006Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:50:19.145174Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] TServer::EnableGrpc on GrpcPort 8226, node 5 TClient is connected to server localhost:28729 >> IndexBuildTest::RejectsCreate [GOOD] >> IndexBuildTest::RejectsDropIndex >> Viewer::ServerlessWithExclusiveNodes [GOOD] >> Viewer::SharedDoesntShowExclusiveNodes >> TReplicationTests::CannotAddReplicationConfig [GOOD] >> TReplicationTests::CannotSetAsyncReplicaAttribute >> KqpStreamLookup::ReadTableWithIndexDuringSplit >> TColumnShardTestReadWrite::WriteOverload+InStore-WithWritePortionsOnInsert [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index_build/unittest >> IndexBuildTest::ShadowDataEdgeCases [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:50:22.430350Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:50:22.430448Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:50:22.430500Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:50:22.430556Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:50:22.430609Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:50:22.430640Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:50:22.430692Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:50:22.430786Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:50:22.431543Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:50:22.432111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:50:22.523558Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:50:22.523622Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:22.550830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:50:22.551102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:50:22.551306Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:50:22.564693Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:50:22.565046Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:50:22.565789Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:22.566042Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:50:22.569467Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:22.571054Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:22.571124Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:22.571201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:50:22.571252Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:22.571373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:50:22.571642Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:50:22.578063Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:50:22.724722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:50:22.724973Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:22.725222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:50:22.725400Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:50:22.725462Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:22.728361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:22.728526Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:50:22.728730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:22.728788Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:50:22.728827Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:50:22.728861Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:50:22.731144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:22.731232Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:50:22.731281Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:50:22.733311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:22.733373Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:22.733417Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:22.733475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:50:22.745846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:50:22.747988Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:50:22.748239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:50:22.749307Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:22.749471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:50:22.749561Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:22.749869Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:50:22.749921Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:22.750145Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:50:22.750216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:50:22.752448Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:22.752512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:22.752708Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:22.752748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... d: 72057594046678944 2025-05-07T08:50:24.805297Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_table.cpp:418: TAlterTable TPropose operationId# 109:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:50:24.805382Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 109 ready parts: 1/1 2025-05-07T08:50:24.805528Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } AffectedSet { TabletId: 72075186233409548 Flags: 2 } ExecLevel: 0 TxId: 109 MinStep: 5000008 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:50:24.813015Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 109:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:109 msg type: 269090816 2025-05-07T08:50:24.813149Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 109, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 109 at step: 5000008 FAKE_COORDINATOR: advance: minStep5000008 State->FrontStep: 5000007 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 109 at step: 5000008 FAKE_COORDINATOR: Send Plan to tablet 72075186233409548 for txId: 109 at step: 5000008 2025-05-07T08:50:24.813727Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000008, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:24.813846Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 109 Coordinator: 72057594046316545 AckTo { RawX1: 131 RawX2: 8589936747 } } Step: 5000008 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:50:24.813901Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_table.cpp:359: TAlterTable TPropose operationId# 109:0 HandleReply TEvOperationPlan, operationId: 109:0, stepId: 5000008, at schemeshard: 72057594046678944 2025-05-07T08:50:24.814145Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 109:0 128 -> 129 2025-05-07T08:50:24.814306Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 FAKE_COORDINATOR: advance: minStep5000008 State->FrontStep: 5000008 2025-05-07T08:50:24.820332Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:24.820391Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 109, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-05-07T08:50:24.820662Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:24.820770Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:206:2208], at schemeshard: 72057594046678944, txId: 109, path id: 4 2025-05-07T08:50:24.821746Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 109:0, at schemeshard: 72057594046678944 2025-05-07T08:50:24.821805Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1036: NTableState::TProposedWaitParts operationId# 109:0 ProgressState at tablet: 72057594046678944 2025-05-07T08:50:24.822617Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 4 PathOwnerId: 72057594046678944, cookie: 109 2025-05-07T08:50:24.822723Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 4 PathOwnerId: 72057594046678944, cookie: 109 2025-05-07T08:50:24.822777Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 109 2025-05-07T08:50:24.822839Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 109, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 4 2025-05-07T08:50:24.822887Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-05-07T08:50:24.822989Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 109, ready parts: 0/1, is published: true FAKE_COORDINATOR: Erasing txId 109 2025-05-07T08:50:24.829665Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 109 2025-05-07T08:50:24.831704Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6245: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409548 Status: COMPLETE TxId: 109 Step: 5000008 OrderId: 109 ExecLatency: 3 ProposeLatency: 5 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409548 CpuTimeUsec: 1268 } } 2025-05-07T08:50:24.831784Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 109, tablet: 72075186233409548, partId: 0 2025-05-07T08:50:24.831935Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 109:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409548 Status: COMPLETE TxId: 109 Step: 5000008 OrderId: 109 ExecLatency: 3 ProposeLatency: 5 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409548 CpuTimeUsec: 1268 } } 2025-05-07T08:50:24.832054Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409548 Status: COMPLETE TxId: 109 Step: 5000008 OrderId: 109 ExecLatency: 3 ProposeLatency: 5 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409548 CpuTimeUsec: 1268 } } 2025-05-07T08:50:24.833213Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 673 RawX2: 8589937220 } Origin: 72075186233409548 State: 2 TxId: 109 Step: 0 Generation: 2 2025-05-07T08:50:24.833269Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 109, tablet: 72075186233409548, partId: 0 2025-05-07T08:50:24.833429Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 109:0, at schemeshard: 72057594046678944, message: Source { RawX1: 673 RawX2: 8589937220 } Origin: 72075186233409548 State: 2 TxId: 109 Step: 0 Generation: 2 2025-05-07T08:50:24.833493Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1005: NTableState::TProposedWaitParts operationId# 109:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 2025-05-07T08:50:24.833602Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1009: NTableState::TProposedWaitParts operationId# 109:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 673 RawX2: 8589937220 } Origin: 72075186233409548 State: 2 TxId: 109 Step: 0 Generation: 2 2025-05-07T08:50:24.833672Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 109:0, shardIdx: 72057594046678944:3, datashard: 72075186233409548, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:24.833716Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 109:0, at schemeshard: 72057594046678944 2025-05-07T08:50:24.833750Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 109:0, datashard: 72075186233409548, at schemeshard: 72057594046678944 2025-05-07T08:50:24.833793Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 109:0 129 -> 240 2025-05-07T08:50:24.836994Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 109:0, at schemeshard: 72057594046678944 2025-05-07T08:50:24.837530Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 109:0, at schemeshard: 72057594046678944 2025-05-07T08:50:24.837834Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 109:0, at schemeshard: 72057594046678944 2025-05-07T08:50:24.837899Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 109:0 ProgressState 2025-05-07T08:50:24.838052Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#109:0 progress is 1/1 2025-05-07T08:50:24.838097Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 109 ready parts: 1/1 2025-05-07T08:50:24.838141Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#109:0 progress is 1/1 2025-05-07T08:50:24.838174Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 109 ready parts: 1/1 2025-05-07T08:50:24.838212Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 109, ready parts: 1/1, is published: true 2025-05-07T08:50:24.838290Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:333:2312] message: TxId: 109 2025-05-07T08:50:24.838346Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 109 ready parts: 1/1 2025-05-07T08:50:24.838391Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 109:0 2025-05-07T08:50:24.838425Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 109:0 2025-05-07T08:50:24.838595Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-05-07T08:50:24.843434Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 109: got EvNotifyTxCompletionResult 2025-05-07T08:50:24.843516Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 109: satisfy waiter [2:781:2726] TestWaitNotification: OK eventTxId 109 >> TReplicationTests::CreateDropRecreate [GOOD] >> TReplicationTests::CreateWithoutCredentials >> KqpStreamLookup::ReadTableDuringSplit >> IndexBuildTest::IndexPartitioningIsPersisted [GOOD] >> IndexBuildTest::RejectsDropIndex [GOOD] >> TReplicationTests::CannotSetAsyncReplicaAttribute [GOOD] >> TReplicationTests::AlterReplicatedTable >> IndexBuildTest::WithFollowers [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::WriteOverload+InStore-WithWritePortionsOnInsert [GOOD] Test command err: 2025-05-07T08:50:13.600319Z node 1 :BLOB_CACHE NOTICE: ctor_logger.h:56: MaxCacheDataSize: 20971520 InFlightDataSize: 0 2025-05-07T08:50:13.719536Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-05-07T08:50:13.746497Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-05-07T08:50:13.746858Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-05-07T08:50:13.755699Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-05-07T08:50:13.756012Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-05-07T08:50:13.756296Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-05-07T08:50:13.756449Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-05-07T08:50:13.756572Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-05-07T08:50:13.756710Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-05-07T08:50:13.756829Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-05-07T08:50:13.756948Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-05-07T08:50:13.757072Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-05-07T08:50:13.757215Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-05-07T08:50:13.757370Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-05-07T08:50:13.757491Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-05-07T08:50:13.788862Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-05-07T08:50:13.789045Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:137;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=11;current_normalizer=CLASS_NAME=Granules; 2025-05-07T08:50:13.789112Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-05-07T08:50:13.789336Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-05-07T08:50:13.789521Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-05-07T08:50:13.789619Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-05-07T08:50:13.789673Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-05-07T08:50:13.789791Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-05-07T08:50:13.789862Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-05-07T08:50:13.789922Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-05-07T08:50:13.789989Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-05-07T08:50:13.790238Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-05-07T08:50:13.790319Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-05-07T08:50:13.790370Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-05-07T08:50:13.790432Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-05-07T08:50:13.790531Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-05-07T08:50:13.790594Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-05-07T08:50:13.790639Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanInsertionDedup;id=CleanInsertionDedup; 2025-05-07T08:50:13.790673Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=8;type=CleanInsertionDedup; 2025-05-07T08:50:13.790801Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanInsertionDedup;id=8; 2025-05-07T08:50:13.790865Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-05-07T08:50:13.790900Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-05-07T08:50:13.790965Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-05-07T08:50:13.791008Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-05-07T08:50:13.791040Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-05-07T08:50:13.791298Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-05-07T08:50:13.791362Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-05-07T08:50:13.791400Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-05-07T08:50:13.791624Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-05-07T08:50:13.791672Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-05-07T08:50:13.791721Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-05-07T08:50:13.791868Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-05-07T08:50:13.791929Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-05-07T08:50:13.791968Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-05-07T08:50:13.792096Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-05-07T08:50:13.792172Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-05-07T08:50:13.792236Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-05-07T08:50:13.792270Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-05-07T08:50:13.792745Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=65; 2025-05-07T08:50:13.792860Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=55; ... =columnshard_impl.cpp:785;event=start_indexation_tasks;insert_overload_size=0; 2025-05-07T08:50:24.682828Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Write (record) into pathId 1 writeId 6 at tablet 9437184 2025-05-07T08:50:24.683140Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:6 Blob count: 1 2025-05-07T08:50:24.695303Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:6 Blob count: 1 2025-05-07T08:50:24.695466Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;local_tx_no=9;method=complete;tx_info=TTxWrite;tablet_id=9437184;tx_state=complete;fline=columnshard_impl.cpp:785;event=start_indexation_tasks;insert_overload_size=0; 2025-05-07T08:50:24.709144Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Write (record) into pathId 1 writeId 7 at tablet 9437184 2025-05-07T08:50:24.709509Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:7 Blob count: 1 2025-05-07T08:50:24.722473Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:7 Blob count: 1 2025-05-07T08:50:24.722653Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;local_tx_no=10;method=complete;tx_info=TTxWrite;tablet_id=9437184;tx_state=complete;fline=columnshard_impl.cpp:785;event=start_indexation_tasks;insert_overload_size=0; 2025-05-07T08:50:24.724507Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Write (record) into pathId 1 writeId 8 at tablet 9437184 2025-05-07T08:50:24.724821Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:8 Blob count: 1 2025-05-07T08:50:24.737602Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:8 Blob count: 1 2025-05-07T08:50:24.737778Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;local_tx_no=11;method=complete;tx_info=TTxWrite;tablet_id=9437184;tx_state=complete;fline=columnshard_impl.cpp:785;event=start_indexation_tasks;insert_overload_size=0; 2025-05-07T08:50:24.739581Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Write (record) into pathId 1 writeId 9 at tablet 9437184 2025-05-07T08:50:24.739908Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:9 Blob count: 1 2025-05-07T08:50:24.752549Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:9 Blob count: 1 2025-05-07T08:50:24.752713Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;local_tx_no=12;method=complete;tx_info=TTxWrite;tablet_id=9437184;tx_state=complete;fline=columnshard_impl.cpp:785;event=start_indexation_tasks;insert_overload_size=0; 2025-05-07T08:50:24.763337Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Write (record) into pathId 1 writeId 10 at tablet 9437184 2025-05-07T08:50:24.763697Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:10 Blob count: 1 2025-05-07T08:50:24.776033Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:10 Blob count: 1 2025-05-07T08:50:24.776204Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;local_tx_no=13;method=complete;tx_info=TTxWrite;tablet_id=9437184;tx_state=complete;fline=columnshard_impl.cpp:785;event=start_indexation_tasks;insert_overload_size=0; 2025-05-07T08:50:24.777903Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Write (record) into pathId 1 writeId 11 at tablet 9437184 2025-05-07T08:50:24.778254Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:11 Blob count: 1 2025-05-07T08:50:24.790905Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:11 Blob count: 1 2025-05-07T08:50:24.791074Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;local_tx_no=14;method=complete;tx_info=TTxWrite;tablet_id=9437184;tx_state=complete;fline=columnshard_impl.cpp:785;event=start_indexation_tasks;insert_overload_size=0; 2025-05-07T08:50:24.792697Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Write (record) into pathId 1 writeId 12 at tablet 9437184 2025-05-07T08:50:24.792986Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:12 Blob count: 1 2025-05-07T08:50:24.806307Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:12 Blob count: 1 2025-05-07T08:50:24.806472Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;local_tx_no=15;method=complete;tx_info=TTxWrite;tablet_id=9437184;tx_state=complete;fline=columnshard_impl.cpp:785;event=start_indexation_tasks;insert_overload_size=0; 2025-05-07T08:50:24.827346Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Write (record) into pathId 1 writeId 13 at tablet 9437184 2025-05-07T08:50:24.827699Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:13 Blob count: 1 2025-05-07T08:50:24.840911Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:13 Blob count: 1 2025-05-07T08:50:24.841077Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;local_tx_no=16;method=complete;tx_info=TTxWrite;tablet_id=9437184;tx_state=complete;fline=columnshard_impl.cpp:785;event=start_indexation_tasks;insert_overload_size=0; 2025-05-07T08:50:24.842485Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Write (record) into pathId 1 writeId 14 at tablet 9437184 2025-05-07T08:50:24.842744Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:14 Blob count: 1 2025-05-07T08:50:24.854741Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:14 Blob count: 1 2025-05-07T08:50:24.854886Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;local_tx_no=17;method=complete;tx_info=TTxWrite;tablet_id=9437184;tx_state=complete;fline=columnshard_impl.cpp:785;event=start_indexation_tasks;insert_overload_size=0; 2025-05-07T08:50:24.856497Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Write (record) into pathId 1 writeId 15 at tablet 9437184 2025-05-07T08:50:24.856765Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:15 Blob count: 1 2025-05-07T08:50:24.868738Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:15 Blob count: 1 2025-05-07T08:50:24.868880Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;local_tx_no=18;method=complete;tx_info=TTxWrite;tablet_id=9437184;tx_state=complete;fline=columnshard_impl.cpp:785;event=start_indexation_tasks;insert_overload_size=0; 2025-05-07T08:50:24.877707Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Write (record) into pathId 1 writeId 16 at tablet 9437184 2025-05-07T08:50:24.878149Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:16 Blob count: 1 2025-05-07T08:50:24.891494Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:16 Blob count: 1 2025-05-07T08:50:24.891677Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;local_tx_no=19;method=complete;tx_info=TTxWrite;tablet_id=9437184;tx_state=complete;fline=columnshard_impl.cpp:785;event=start_indexation_tasks;insert_overload_size=0; 2025-05-07T08:50:24.893229Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Write (record) into pathId 1 writeId 17 at tablet 9437184 2025-05-07T08:50:24.893514Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:17 Blob count: 1 2025-05-07T08:50:24.908972Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:17 Blob count: 1 2025-05-07T08:50:24.909136Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;local_tx_no=20;method=complete;tx_info=TTxWrite;tablet_id=9437184;tx_state=complete;fline=columnshard_impl.cpp:785;event=start_indexation_tasks;insert_overload_size=0; 2025-05-07T08:50:24.910699Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Write (record) into pathId 1 writeId 18 at tablet 9437184 2025-05-07T08:50:24.911024Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:18 Blob count: 1 2025-05-07T08:50:24.928371Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:18 Blob count: 1 2025-05-07T08:50:24.928568Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;local_tx_no=21;method=complete;tx_info=TTxWrite;tablet_id=9437184;tx_state=complete;fline=columnshard_impl.cpp:785;event=start_indexation_tasks;insert_overload_size=0; 2025-05-07T08:50:24.941455Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Write (record) into pathId 1 writeId 19 at tablet 9437184 2025-05-07T08:50:24.941712Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:19 Blob count: 1 2025-05-07T08:50:24.955589Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:19 Blob count: 1 2025-05-07T08:50:24.955752Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;local_tx_no=22;method=complete;tx_info=TTxWrite;tablet_id=9437184;tx_state=complete;fline=columnshard_impl.cpp:785;event=start_indexation_tasks;insert_overload_size=0; 2025-05-07T08:50:24.957358Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Write (record) into pathId 1 writeId 20 at tablet 9437184 2025-05-07T08:50:24.957647Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:20 Blob count: 1 2025-05-07T08:50:24.974238Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:20 Blob count: 1 2025-05-07T08:50:24.974411Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;local_tx_no=23;method=complete;tx_info=TTxWrite;tablet_id=9437184;tx_state=complete;fline=columnshard_impl.cpp:785;event=start_indexation_tasks;insert_overload_size=0; 2025-05-07T08:50:24.976358Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Write (record) into pathId 1 writeId 21 at tablet 9437184 2025-05-07T08:50:24.976683Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:21 Blob count: 1 2025-05-07T08:50:24.990619Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:21 Blob count: 1 2025-05-07T08:50:24.990810Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;local_tx_no=24;method=complete;tx_info=TTxWrite;tablet_id=9437184;tx_state=complete;fline=columnshard_impl.cpp:785;event=start_indexation_tasks;insert_overload_size=0; 2025-05-07T08:50:25.376623Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;parent=[1:139:2171];fline=actor.cpp:22;event=flush_writing;size=6330728;count=1; 2025-05-07T08:50:25.451145Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Write (record) into pathId 1 writeId 22 at tablet 9437184 2025-05-07T08:50:25.451506Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:22 Blob count: 1 2025-05-07T08:50:25.464344Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:22 Blob count: 1 2025-05-07T08:50:25.464542Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;local_tx_no=25;method=complete;tx_info=TTxWrite;tablet_id=9437184;tx_state=complete;fline=columnshard_impl.cpp:785;event=start_indexation_tasks;insert_overload_size=0; >> TReplicationTests::CreateWithoutCredentials [GOOD] >> TReplicationTests::Describe >> ReadOnlyVDisk::TestSync [GOOD] >> THDRRQuoterResourceTreeRuntimeTest::TestAllocateResource [GOOD] >> THDRRQuoterResourceTreeRuntimeTest::TestAllocationGranularity [GOOD] >> THDRRQuoterResourceTreeRuntimeTest::TestAmountIsLessThanEpsilon [GOOD] >> THDRRQuoterResourceTreeRuntimeTest::TestActiveSessionDisconnectsAndThenConnectsAgain [GOOD] >> THDRRQuoterResourceTreeRuntimeTest::TestActiveMultiresourceSessionDisconnectsAndThenConnectsAgain [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index_build/unittest >> IndexBuildTest::WithFollowers [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:50:25.186549Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:50:25.186685Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:50:25.186738Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:50:25.186795Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:50:25.186843Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:50:25.186871Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:50:25.186926Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:50:25.187005Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:50:25.187585Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:50:25.187869Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:50:25.270840Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:50:25.270912Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:25.309447Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:50:25.309601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:50:25.309792Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:50:25.320558Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:50:25.321211Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:50:25.322030Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:25.322425Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:50:25.327742Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:25.329578Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:25.329657Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:25.329718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:50:25.329772Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:25.329925Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:50:25.330204Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:50:25.338498Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:50:25.477513Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:50:25.477760Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:25.478063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:50:25.478301Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:50:25.478345Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:25.480721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:25.480897Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:50:25.481119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:25.481197Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:50:25.481239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:50:25.481274Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:50:25.483638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:25.483716Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:50:25.483758Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:50:25.485957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:25.486051Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:25.486105Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:25.486153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:50:25.490084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:50:25.492385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:50:25.492659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:50:25.493807Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:25.493995Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:50:25.494069Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:25.494404Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:50:25.494462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:25.494666Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:50:25.494774Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:50:25.497285Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:25.497349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:25.497593Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:25.497657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... :486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-05-07T08:50:26.263633Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T08:50:26.263917Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T08:50:26.263949Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 104 2025-05-07T08:50:26.263991Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-05-07T08:50:26.264024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:50:26.264859Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 8 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T08:50:26.264962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 8 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T08:50:26.265006Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 104 2025-05-07T08:50:26.265045Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 8 2025-05-07T08:50:26.265103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-05-07T08:50:26.266704Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T08:50:26.266800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T08:50:26.266829Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 104 2025-05-07T08:50:26.267530Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T08:50:26.267613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T08:50:26.267644Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 104 2025-05-07T08:50:26.267687Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-05-07T08:50:26.267722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-05-07T08:50:26.267796Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 104, ready parts: 2/3, is published: true 2025-05-07T08:50:26.268392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 104:2, at schemeshard: 72057594046678944 2025-05-07T08:50:26.268448Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 104:2 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:50:26.268795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-05-07T08:50:26.268930Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:2 progress is 3/3 2025-05-07T08:50:26.268959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2025-05-07T08:50:26.268994Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:2 progress is 3/3 2025-05-07T08:50:26.269022Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2025-05-07T08:50:26.269053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 104, ready parts: 3/3, is published: true 2025-05-07T08:50:26.269115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:337:2316] message: TxId: 104 2025-05-07T08:50:26.269196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2025-05-07T08:50:26.269245Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 104:0 2025-05-07T08:50:26.269281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 104:0 2025-05-07T08:50:26.269378Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T08:50:26.269432Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 104:1 2025-05-07T08:50:26.269455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 104:1 2025-05-07T08:50:26.269502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-05-07T08:50:26.269525Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 104:2 2025-05-07T08:50:26.269545Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 104:2 2025-05-07T08:50:26.269581Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-05-07T08:50:26.271739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-05-07T08:50:26.271878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-05-07T08:50:26.274174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-05-07T08:50:26.274243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-05-07T08:50:26.274330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-05-07T08:50:26.274510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-05-07T08:50:26.274548Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:703:2661] TestWaitNotification: OK eventTxId 104 2025-05-07T08:50:26.275328Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/WithFollowers" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:50:26.275633Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/WithFollowers" took 287us result status StatusSuccess 2025-05-07T08:50:26.276089Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/WithFollowers" PathDescription { Self { Name: "WithFollowers" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 8 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 8 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 TableSchemaVersion: 4 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "WithFollowers" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value0" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value1" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } Columns { Name: "valueFloat" Type: "Float" TypeId: 33 Id: 4 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 4 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index_build/unittest >> IndexBuildTest::IndexPartitioningIsPersisted [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:50:23.283482Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:50:23.283603Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:50:23.283660Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:50:23.283696Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:50:23.283744Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:50:23.283772Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:50:23.283827Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:50:23.283919Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:50:23.284684Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:50:23.285052Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:50:23.373193Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:50:23.373260Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:23.391100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:50:23.391237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:50:23.391424Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:50:23.401570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:50:23.402223Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:50:23.402924Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:23.403272Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:50:23.406362Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:23.408061Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:23.408149Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:23.408219Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:50:23.408267Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:23.408363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:50:23.408533Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:50:23.419588Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:50:23.562376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:50:23.562620Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:23.562937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:50:23.563167Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:50:23.563254Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:23.566254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:23.566435Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:50:23.566653Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:23.566704Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:50:23.566748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:50:23.566804Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:50:23.569270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:23.569351Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:50:23.569403Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:50:23.573121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:23.573189Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:23.573271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:23.573337Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:50:23.577722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:50:23.590522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:50:23.590799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:50:23.591980Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:23.592144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:50:23.592220Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:23.592583Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:50:23.592661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:23.592866Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:50:23.592952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:50:23.596053Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:23.596112Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:23.596339Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:23.596391Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "Index" LocalPathId: 3 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "value" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 3 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:50:26.175897Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/Index" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-05-07T08:50:26.176113Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/Index" took 242us result status StatusSuccess 2025-05-07T08:50:26.176758Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/Index" PathDescription { Self { Name: "Index" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 2 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateAlter Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } TableIndex { Name: "Index" LocalPathId: 3 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "value" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 3 MaxPartitionsCount: 3 } } } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:50:26.177442Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/Index/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-05-07T08:50:26.177792Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/Index/indexImplTable" took 364us result status StatusSuccess 2025-05-07T08:50:26.178855Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/Index/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 3 MaxPartitionsCount: 3 } } SplitBoundary { KeyPrefix { Tuple { Optional { Text: "alice" } } Tuple { } } } SplitBoundary { KeyPrefix { Tuple { Optional { Text: "bob" } } Tuple { } } } TableSchemaVersion: 2 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "\002\000\005\000\000\000alice\000\000\000\200" IsPoint: false IsInclusive: false DatashardId: 72075186233409547 } TablePartitions { EndOfRangeKeyPrefix: "\002\000\003\000\000\000bob\000\000\000\200" IsPoint: false IsInclusive: false DatashardId: 72075186233409548 } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409549 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 3 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index_build/unittest >> IndexBuildTest::RejectsDropIndex [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:50:22.882253Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:50:22.882334Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:50:22.882376Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:50:22.882414Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:50:22.882468Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:50:22.882507Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:50:22.882561Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:50:22.882630Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:50:22.883369Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:50:22.883705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:50:22.973652Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:50:22.973704Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:22.991894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:50:22.992149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:50:22.992347Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:50:22.998807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:50:22.999164Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:50:22.999913Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:23.000129Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:50:23.003591Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:23.005082Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:23.005141Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:23.005209Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:50:23.005256Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:23.005361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:50:23.005584Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:50:23.013349Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:50:23.160691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:50:23.160934Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:23.161187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:50:23.161450Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:50:23.161567Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:23.164007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:23.164147Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:50:23.164339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:23.164392Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:50:23.164429Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:50:23.164460Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:50:23.166333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:23.166408Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:50:23.166447Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:50:23.168206Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:23.168256Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:23.168298Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:23.168349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:50:23.172026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:50:23.174382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:50:23.174648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:50:23.175923Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:23.176091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:50:23.176160Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:23.176505Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:50:23.176574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:23.176774Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:50:23.176854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:50:23.179364Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:23.179433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:23.179665Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:23.179709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... xStats { PerShardStats { ShardId: 72075186233409547 CpuTimeUsec: 703 } } 2025-05-07T08:50:26.276683Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 107, tablet: 72075186233409547, partId: 0 2025-05-07T08:50:26.276814Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 107:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409547 Status: COMPLETE TxId: 107 Step: 5000004 OrderId: 107 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409547 CpuTimeUsec: 703 } } 2025-05-07T08:50:26.276919Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409547 Status: COMPLETE TxId: 107 Step: 5000004 OrderId: 107 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409547 CpuTimeUsec: 703 } } 2025-05-07T08:50:26.278034Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 324 RawX2: 8589936899 } Origin: 72075186233409547 State: 5 TxId: 107 Step: 0 Generation: 2 2025-05-07T08:50:26.278096Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 107, tablet: 72075186233409547, partId: 0 2025-05-07T08:50:26.278234Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 107:0, at schemeshard: 72057594046678944, message: Source { RawX1: 324 RawX2: 8589936899 } Origin: 72075186233409547 State: 5 TxId: 107 Step: 0 Generation: 2 2025-05-07T08:50:26.278299Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:332: TDropTable TDeleteTableBarrier operationId: 107:0 HandleReply TEvDataShard::TEvSchemaChanged, save it, at schemeshard: 72057594046678944 2025-05-07T08:50:26.280537Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 107:0, at schemeshard: 72057594046678944 2025-05-07T08:50:26.280619Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:368: TDropTable TDeleteTableBarrier operationId: 107:0 ProgressState, operation type: TxDropTable, at tablet# 72057594046678944 2025-05-07T08:50:26.280684Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1059: Set barrier, OperationId: 107:0, name: RenamePathBarrier, done: 0, blocked: 1, parts count: 1 2025-05-07T08:50:26.280729Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1103: All parts have reached barrier, tx: 107, done: 0, blocked: 1 2025-05-07T08:50:26.280825Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:344: TDropTable TDeleteTableBarrier operationId: 107:0 HandleReply TEvPrivate::TEvCompleteBarrier, msg: NKikimr::NSchemeShard::TEvPrivate::TEvCompleteBarrier { TxId: 107 Name: RenamePathBarrier }, at tablet# 72057594046678944 2025-05-07T08:50:26.280969Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 107:0 137 -> 129 2025-05-07T08:50:26.281106Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:50:26.281226Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T08:50:26.282209Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 107:0, at schemeshard: 72057594046678944 2025-05-07T08:50:26.284026Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 107:0, at schemeshard: 72057594046678944 2025-05-07T08:50:26.286182Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:26.286254Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 107, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:26.286431Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 107, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T08:50:26.286601Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:26.286650Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:205:2207], at schemeshard: 72057594046678944, txId: 107, path id: 1 2025-05-07T08:50:26.286707Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:205:2207], at schemeshard: 72057594046678944, txId: 107, path id: 2 2025-05-07T08:50:26.287103Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 107:0, at schemeshard: 72057594046678944 2025-05-07T08:50:26.287169Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1036: NTableState::TProposedWaitParts operationId# 107:0 ProgressState at tablet: 72057594046678944 2025-05-07T08:50:26.287283Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 107:0, at schemeshard: 72057594046678944 2025-05-07T08:50:26.287332Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 107:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-05-07T08:50:26.287382Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 107:0 129 -> 240 2025-05-07T08:50:26.288393Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 107 2025-05-07T08:50:26.288521Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 107 2025-05-07T08:50:26.288570Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 107 2025-05-07T08:50:26.288618Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 107, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 9 2025-05-07T08:50:26.288677Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:50:26.289443Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 107 2025-05-07T08:50:26.289532Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 107 2025-05-07T08:50:26.289564Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 107 2025-05-07T08:50:26.289601Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 107, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-05-07T08:50:26.289647Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-05-07T08:50:26.289730Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 107, ready parts: 0/1, is published: true 2025-05-07T08:50:26.293020Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 107:0, at schemeshard: 72057594046678944 2025-05-07T08:50:26.293095Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 107:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:50:26.293401Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T08:50:26.293538Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#107:0 progress is 1/1 2025-05-07T08:50:26.293590Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 107 ready parts: 1/1 2025-05-07T08:50:26.293644Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#107:0 progress is 1/1 2025-05-07T08:50:26.293684Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 107 ready parts: 1/1 2025-05-07T08:50:26.293730Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 107, ready parts: 1/1, is published: true 2025-05-07T08:50:26.293810Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:376:2344] message: TxId: 107 2025-05-07T08:50:26.293863Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 107 ready parts: 1/1 2025-05-07T08:50:26.293907Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 107:0 2025-05-07T08:50:26.293948Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 107:0 2025-05-07T08:50:26.294079Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T08:50:26.295914Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 107 2025-05-07T08:50:26.296235Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 107 2025-05-07T08:50:26.297257Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 107: got EvNotifyTxCompletionResult 2025-05-07T08:50:26.297315Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 107: satisfy waiter [2:579:2539] TestWaitNotification: OK eventTxId 107 >> TColumnShardTestReadWrite::WriteOverload-InStore+WithWritePortionsOnInsert [GOOD] |89.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/tablet/ut/unittest >> THDRRQuoterResourceTreeRuntimeTest::TestActiveMultiresourceSessionDisconnectsAndThenConnectsAgain [GOOD] >> TReplicationTests::AlterReplicatedTable [GOOD] >> TReplicationTests::AlterReplicatedIndexTable >> THDRRQuoterResourceTreeRuntimeTest::TestCreateInactiveSession [GOOD] >> THDRRQuoterResourceTreeRuntimeTest::TestDeleteResourceSessions [GOOD] >> THDRRQuoterResourceTreeRuntimeTest::TestDistributeResourcesBetweenConsumers [GOOD] >> THDRRQuoterResourceTreeRuntimeTest::TestEffectiveProps [GOOD] >> THDRRQuoterResourceTreeRuntimeTest::TestDeleteResourceWithActiveChildren [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest >> ReadOnlyVDisk::TestSync [GOOD] Test command err: RandomSeed# 13551914121005873207 Setting VDisk read-only to 1 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] Setting VDisk read-only to 1 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] SEND TEvPut with key [1:1:0:0:0:131072:0] 2025-05-07T08:50:07.119092Z 1 00h02m00.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:8809:940] 2025-05-07T08:50:07.119520Z 2 00h02m00.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:8816:947] TEvPutResult: TEvPutResult {Id# [1:1:0:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 0 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] Setting VDisk read-only to 0 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] Setting VDisk read-only to 1 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] Setting VDisk read-only to 1 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] SEND TEvPut with key [1:1:1:0:0:32768:0] 2025-05-07T08:50:09.534382Z 3 00h06m00.210512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:8823:954] 2025-05-07T08:50:09.534549Z 2 00h06m00.210512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:8816:947] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 0 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] Setting VDisk read-only to 0 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] Setting VDisk read-only to 1 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] Setting VDisk read-only to 1 for position 3 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:3:0] SEND TEvPut with key [1:1:2:0:0:131072:0] TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 0 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] Setting VDisk read-only to 0 for position 3 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:3:0] Setting VDisk read-only to 1 for position 3 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:3:0] Setting VDisk read-only to 1 for position 4 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:4:0] SEND TEvPut with key [1:1:3:0:0:32768:0] 2025-05-07T08:50:14.463842Z 5 00h14m00.361536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Unavailable in read-only Sender# [1:8837:968] 2025-05-07T08:50:14.463950Z 4 00h14m00.361536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Unavailable in read-only Sender# [1:8830:961] TEvPutResult: TEvPutResult {Id# [1:1:3:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 0 for position 3 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:3:0] Setting VDisk read-only to 0 for position 4 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:4:0] Setting VDisk read-only to 1 for position 4 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:4:0] Setting VDisk read-only to 1 for position 5 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:5:0] SEND TEvPut with key [1:1:4:0:0:131072:0] 2025-05-07T08:50:17.437509Z 6 00h18m00.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Unavailable in read-only Sender# [1:8844:975] 2025-05-07T08:50:17.437616Z 5 00h18m00.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Unavailable in read-only Sender# [1:8837:968] TEvPutResult: TEvPutResult {Id# [1:1:4:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 0 for position 4 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:4:0] Setting VDisk read-only to 0 for position 5 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:5:0] Setting VDisk read-only to 1 for position 5 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:5:0] Setting VDisk read-only to 1 for position 6 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:6:0] SEND TEvPut with key [1:1:5:0:0:32768:0] 2025-05-07T08:50:20.858985Z 7 00h22m00.500000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Unavailable in read-only Sender# [1:8851:982] 2025-05-07T08:50:20.859127Z 6 00h22m00.500000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Unavailable in read-only Sender# [1:8844:975] TEvPutResult: TEvPutResult {Id# [1:1:5:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 0 for position 5 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:5:0] Setting VDisk read-only to 0 for position 6 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:6:0] Setting VDisk read-only to 1 for position 6 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:6:0] Setting VDisk read-only to 1 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] SEND TEvPut with key [1:1:6:0:0:131072:0] 2025-05-07T08:50:24.127768Z 7 00h26m00.561536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Unavailable in read-only Sender# [1:8851:982] TEvPutResult: TEvPutResult {Id# [1:1:6:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 0 for position 6 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:6:0] Setting VDisk read-only to 0 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] === Read all 7 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:1:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:1:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:2:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:2:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:3:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:4:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:4:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:5:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:5:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:6:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:6:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} >> TReplicationTests::Describe [GOOD] >> TReplicationTests::CreateReplicatedTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::WriteOverload-InStore+WithWritePortionsOnInsert [GOOD] Test command err: 2025-05-07T08:50:14.652102Z node 1 :BLOB_CACHE NOTICE: ctor_logger.h:56: MaxCacheDataSize: 20971520 InFlightDataSize: 0 2025-05-07T08:50:14.774151Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-05-07T08:50:14.799949Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-05-07T08:50:14.800238Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-05-07T08:50:14.808318Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-05-07T08:50:14.808524Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-05-07T08:50:14.808728Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-05-07T08:50:14.808820Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-05-07T08:50:14.808911Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-05-07T08:50:14.808992Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-05-07T08:50:14.809072Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-05-07T08:50:14.809152Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-05-07T08:50:14.809226Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-05-07T08:50:14.809308Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-05-07T08:50:14.809418Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-05-07T08:50:14.809489Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-05-07T08:50:14.838825Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-05-07T08:50:14.839034Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:137;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=11;current_normalizer=CLASS_NAME=Granules; 2025-05-07T08:50:14.839110Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-05-07T08:50:14.839336Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-05-07T08:50:14.839533Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-05-07T08:50:14.839631Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-05-07T08:50:14.839714Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-05-07T08:50:14.839820Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-05-07T08:50:14.839887Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-05-07T08:50:14.839947Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-05-07T08:50:14.839988Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-05-07T08:50:14.840201Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-05-07T08:50:14.840276Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-05-07T08:50:14.840335Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-05-07T08:50:14.840385Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-05-07T08:50:14.840488Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-05-07T08:50:14.840555Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-05-07T08:50:14.840607Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanInsertionDedup;id=CleanInsertionDedup; 2025-05-07T08:50:14.840649Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=8;type=CleanInsertionDedup; 2025-05-07T08:50:14.840767Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanInsertionDedup;id=8; 2025-05-07T08:50:14.840831Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-05-07T08:50:14.840865Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-05-07T08:50:14.840939Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-05-07T08:50:14.840983Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-05-07T08:50:14.841016Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-05-07T08:50:14.841262Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-05-07T08:50:14.841329Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-05-07T08:50:14.841369Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-05-07T08:50:14.841598Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-05-07T08:50:14.841653Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-05-07T08:50:14.841693Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-05-07T08:50:14.841839Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-05-07T08:50:14.841914Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-05-07T08:50:14.841958Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-05-07T08:50:14.842119Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-05-07T08:50:14.842207Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-05-07T08:50:14.842260Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-05-07T08:50:14.842297Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-05-07T08:50:14.842790Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=58; 2025-05-07T08:50:14.842913Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=50; ... ;size_of_meta=144; 2025-05-07T08:50:26.532723Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=18;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_portion.cpp:40;memory_size=326;data_size=316;sum=4890;count=15;size_of_portion=216; 2025-05-07T08:50:26.533694Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:15 Blob count: 1 2025-05-07T08:50:26.546362Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:15 Blob count: 1 2025-05-07T08:50:26.548576Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=19;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:54;memory_size=94;data_size=68;sum=1504;count=31; 2025-05-07T08:50:26.548666Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=19;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:75;memory_size=254;data_size=244;sum=4064;count=32;size_of_meta=144; 2025-05-07T08:50:26.548731Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=19;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_portion.cpp:40;memory_size=326;data_size=316;sum=5216;count=16;size_of_portion=216; 2025-05-07T08:50:26.549584Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:16 Blob count: 1 2025-05-07T08:50:26.562405Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:16 Blob count: 1 2025-05-07T08:50:26.574562Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=20;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:54;memory_size=94;data_size=68;sum=1598;count=33; 2025-05-07T08:50:26.574689Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=20;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:75;memory_size=254;data_size=244;sum=4318;count=34;size_of_meta=144; 2025-05-07T08:50:26.574788Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=20;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_portion.cpp:40;memory_size=326;data_size=316;sum=5542;count=17;size_of_portion=216; 2025-05-07T08:50:26.575782Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:17 Blob count: 1 2025-05-07T08:50:26.590538Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:17 Blob count: 1 2025-05-07T08:50:26.592864Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=21;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:54;memory_size=94;data_size=68;sum=1692;count=35; 2025-05-07T08:50:26.592960Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=21;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:75;memory_size=254;data_size=244;sum=4572;count=36;size_of_meta=144; 2025-05-07T08:50:26.593032Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=21;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_portion.cpp:40;memory_size=326;data_size=316;sum=5868;count=18;size_of_portion=216; 2025-05-07T08:50:26.594133Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:18 Blob count: 1 2025-05-07T08:50:26.609075Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:18 Blob count: 1 2025-05-07T08:50:26.611095Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=22;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:54;memory_size=94;data_size=68;sum=1786;count=37; 2025-05-07T08:50:26.611200Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=22;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:75;memory_size=254;data_size=244;sum=4826;count=38;size_of_meta=144; 2025-05-07T08:50:26.611278Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=22;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_portion.cpp:40;memory_size=326;data_size=316;sum=6194;count=19;size_of_portion=216; 2025-05-07T08:50:26.612350Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:19 Blob count: 1 2025-05-07T08:50:26.625307Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:19 Blob count: 1 2025-05-07T08:50:26.638474Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=23;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:54;memory_size=94;data_size=68;sum=1880;count=39; 2025-05-07T08:50:26.638582Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=23;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:75;memory_size=254;data_size=244;sum=5080;count=40;size_of_meta=144; 2025-05-07T08:50:26.638661Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=23;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_portion.cpp:40;memory_size=326;data_size=316;sum=6520;count=20;size_of_portion=216; 2025-05-07T08:50:26.639616Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:20 Blob count: 1 2025-05-07T08:50:26.652771Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:20 Blob count: 1 2025-05-07T08:50:26.654880Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=24;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:54;memory_size=94;data_size=68;sum=1974;count=41; 2025-05-07T08:50:26.655010Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=24;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:75;memory_size=254;data_size=244;sum=5334;count=42;size_of_meta=144; 2025-05-07T08:50:26.655088Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=24;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_portion.cpp:40;memory_size=326;data_size=316;sum=6846;count=21;size_of_portion=216; 2025-05-07T08:50:26.657985Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:21 Blob count: 1 2025-05-07T08:50:26.670239Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:21 Blob count: 1 2025-05-07T08:50:27.101699Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=25;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:54;memory_size=94;data_size=68;sum=2068;count=43; 2025-05-07T08:50:27.101847Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=25;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:75;memory_size=254;data_size=244;sum=5588;count=44;size_of_meta=144; 2025-05-07T08:50:27.101958Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=25;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_portion.cpp:40;memory_size=326;data_size=316;sum=7172;count=22;size_of_portion=216; 2025-05-07T08:50:27.103254Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:22 Blob count: 1 2025-05-07T08:50:27.115897Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:22 Blob count: 1 |89.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/security/ut/ydb-core-security-ut |89.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/security/ut/ydb-core-security-ut |89.1%| [LD] {RESULT} $(B)/ydb/core/security/ut/ydb-core-security-ut |89.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/tablet/ut/unittest >> THDRRQuoterResourceTreeRuntimeTest::TestDeleteResourceWithActiveChildren [GOOD] >> test_sql_streaming.py::test[suites-ReadTopicWithMetadataNestedDeep-default.txt] [FAIL] >> Viewer::Plan2SvgBad [GOOD] >> TKesusTest::TestAcquireWaiterDowngrade >> TReplicationTests::AlterReplicatedIndexTable [GOOD] >> TReplicationTests::CopyReplicatedTable >> TKesusTest::TestAcquireWaiterDowngrade [GOOD] >> TKesusTest::TestAcquireWaiterUpgrade >> TKesusTest::TestAttachNewSessions >> TSchemeShardUserAttrsTest::VariousUse |89.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_user_attributes/unittest >> TKesusTest::TestReleaseLockFailure >> TKesusTest::TestAcquireWaiterUpgrade [GOOD] >> TKesusTest::TestAcquireWaiterChangeTimeoutToZero >> test_sql_streaming.py::test[suites-GroupByHoppingWindowPercentile-default.txt] [FAIL] >> TKesusTest::TestAttachNewSessions [GOOD] >> TKesusTest::TestAttachMissingSession >> TSubDomainTest::Boot >> TKesusTest::TestReleaseLockFailure [GOOD] >> TKesusTest::TestReleaseSemaphore ------- [TM] {asan, default-linux-x86_64, release} ydb/services/fq/ut_integration/unittest >> Yq_1::Basic_TaggedLiteral [GOOD] Test command err: 2025-05-07T08:47:33.666737Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501622749828275606:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:47:33.666781Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; E0507 08:47:36.597495754 107843 dns_resolver.cc:162] no server name supplied in dns URI E0507 08:47:36.619206529 107843 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-05-07T08:47:37.494384Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:21782: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:21782 } ] 2025-05-07T08:47:37.921525Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:38.670410Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501622749828275606:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:47:38.670464Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:47:38.941088Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:39.569637Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:21782: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:21782 } ] 2025-05-07T08:47:39.595387Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:21782: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:21782 2025-05-07T08:47:39.703692Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:39.953050Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:40.711671Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:40.962077Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0507 08:47:41.623567965 108416 dns_resolver.cc:162] no server name supplied in dns URI E0507 08:47:41.626552082 108416 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-05-07T08:47:41.730467Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:41.972395Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:42.401144Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:21782: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:21782 } ] 2025-05-07T08:47:42.747685Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:42.991160Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:43.771409Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:44.000569Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:44.933031Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:45.020494Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:45.946311Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:46.027807Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0507 08:47:46.810883711 108416 dns_resolver.cc:162] no server name supplied in dns URI E0507 08:47:46.811314551 108416 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-05-07T08:47:46.980906Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:47.066315Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:47.278180Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:21782: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:21782 2025-05-07T08:47:47.279117Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:21782: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:21782 2025-05-07T08:47:47.283622Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:21782: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:21782 } ] 2025-05-07T08:47:48.013104Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:48.080201Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:49.017767Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:49.077997Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:50.018923Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:50.106599Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:51.020976Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:51.138639Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0507 08:47:51.881390601 108425 dns_resolver.cc:162] no server name supplied in dns URI E0507 08:47:51.906847603 108425 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-05-07T08:47:52.023557Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:52.141517Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:53.035465Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:53.163638Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:53.631476Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:21782: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint lo ... "type.googleapis.com/NKikimrKqp.TKqpTableSinkSettings" value: "\032\036\n\016Root/yq/quotas\020\200\202\224\204\200\200\200\200\001\030\003(\001\"\023\n\014subject_type\020\001 \201 \"\021\n\nsubject_id\020\002 \201 \"\022\n\013metric_name\020\003 \201 *\026\n\020limit_updated_at\020\005 2*\022\n\014metric_limit\020\004 \004*\022\n\013metric_name\020\003 \201 *\022\n\014metric_usage\020\006 \004*\021\n\nsubject_id\020\002 \201 *\023\n\014subject_type\020\001 \201 *\026\n\020usage_updated_at\020\007 20\222\250\200\200\200\200@8\004@\000H\001R\022\t\306l\274g~\036\033h\021X\t\000\000\004\000\020\000X\000`\000h\004h\003h\002h\005h\001h\000h\006x\000" } } } 2025-05-07T08:49:02.018158Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [4:7501623132570807500:2392], TxId: 281474976715798, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=YTU4OTY0MzktZjEyYzM5NDEtZTY1Y2UwMTktNzMwZThhOTg=. TraceId : 01jtmyy71b5xqth1e687cwhchj. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646926 2025-05-07T08:49:02.018185Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1074: SelfId: [4:7501623132570807500:2392], TxId: 281474976715798, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=YTU4OTY0MzktZjEyYzM5NDEtZTY1Y2UwMTktNzMwZThhOTg=. TraceId : 01jtmyy71b5xqth1e687cwhchj. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Received channels info: 2025-05-07T08:49:02.018256Z node 4 :KQP_COMPUTE DEBUG: dq_sync_compute_actor_base.h:357: SelfId: [4:7501623132570807500:2392], TxId: 281474976715798, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=YTU4OTY0MzktZjEyYzM5NDEtZTY1Y2UwMTktNzMwZThhOTg=. TraceId : 01jtmyy71b5xqth1e687cwhchj. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. About to drain async output 0. FreeSpace: 67108864, allowedOvercommit: 4194304, toSend: 71303168, finished: 0 2025-05-07T08:49:02.029863Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:3094: TxId: 281474976715798, task: 1. Add data: 78 / 78 2025-05-07T08:49:02.029949Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:3063: TxId: 281474976715798, task: 1. Send data=78, closed=1, bufferActorId=[4:7501623132570807494:2392] 2025-05-07T08:49:02.029997Z node 4 :KQP_COMPUTE DEBUG: dq_sync_compute_actor_base.h:371: SelfId: [4:7501623132570807500:2392], TxId: 281474976715798, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=YTU4OTY0MzktZjEyYzM5NDEtZTY1Y2UwMTktNzMwZThhOTg=. TraceId : 01jtmyy71b5xqth1e687cwhchj. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Drain async output 0. Free space decreased: -9223372036787666944, sent data from buffer: 78 2025-05-07T08:49:02.030023Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715798, task: 1. Tasks execution finished 2025-05-07T08:49:02.030038Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1593: SelfId: [4:7501623132570807500:2392], TxId: 281474976715798, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=YTU4OTY0MzktZjEyYzM5NDEtZTY1Y2UwMTktNzMwZThhOTg=. TraceId : 01jtmyy71b5xqth1e687cwhchj. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Waiting finish of sink[0] 2025-05-07T08:49:02.030078Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [4:7501623132570807500:2392], TxId: 281474976715798, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=YTU4OTY0MzktZjEyYzM5NDEtZTY1Y2UwMTktNzMwZThhOTg=. TraceId : 01jtmyy71b5xqth1e687cwhchj. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646926 2025-05-07T08:49:02.030125Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1074: SelfId: [4:7501623132570807500:2392], TxId: 281474976715798, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=YTU4OTY0MzktZjEyYzM5NDEtZTY1Y2UwMTktNzMwZThhOTg=. TraceId : 01jtmyy71b5xqth1e687cwhchj. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Received channels info: 2025-05-07T08:49:02.030154Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715798, task: 1. Tasks execution finished 2025-05-07T08:49:02.030163Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1593: SelfId: [4:7501623132570807500:2392], TxId: 281474976715798, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=YTU4OTY0MzktZjEyYzM5NDEtZTY1Y2UwMTktNzMwZThhOTg=. TraceId : 01jtmyy71b5xqth1e687cwhchj. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Waiting finish of sink[0] 2025-05-07T08:49:02.030201Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [4:7501623132570807500:2392], TxId: 281474976715798, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=YTU4OTY0MzktZjEyYzM5NDEtZTY1Y2UwMTktNzMwZThhOTg=. TraceId : 01jtmyy71b5xqth1e687cwhchj. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646922 2025-05-07T08:49:02.030222Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715798, task: 1. Tasks execution finished 2025-05-07T08:49:02.030234Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1593: SelfId: [4:7501623132570807500:2392], TxId: 281474976715798, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=YTU4OTY0MzktZjEyYzM5NDEtZTY1Y2UwMTktNzMwZThhOTg=. TraceId : 01jtmyy71b5xqth1e687cwhchj. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Waiting finish of sink[0] 2025-05-07T08:49:02.030290Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:563: SelfId: [4:7501623132570807498:2543], Table: `Root/yq/quotas` ([72057594046644480:3:1]), SessionActorId: [4:7501623085326164489:2543]Recv EvWriteResult from ShardID=72075186224037890, Status=STATUS_COMPLETED, TxId=55, Locks= , Cookie=1 2025-05-07T08:49:02.030301Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:1802: SelfId: [4:7501623132570807494:2392], SessionActorId: [4:7501623081031195635:2392], Create new TableWriteActor for table `Root/yq/quotas` ([72057594046644480:3:1]). lockId=281474976715794 [4:7501623132570807505:2392] 2025-05-07T08:49:02.030321Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:815: SelfId: [4:7501623132570807498:2543], Table: `Root/yq/quotas` ([72057594046644480:3:1]), SessionActorId: [4:7501623085326164489:2543]Got completed result TxId=55, TabletId=72075186224037890, Cookie=1, Mode=3, Locks= 2025-05-07T08:49:02.030339Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:327: Table: `Root/yq/quotas` ([72057594046644480:3:1]), SessionActorId: [4:7501623081031195635:2392]Open: token=0 2025-05-07T08:49:02.030363Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:1853: SelfId: [4:7501623132570807494:2392], SessionActorId: [4:7501623081031195635:2392], ProcessRequestQueue [72057594046644480:3:1] NOT READY queue=1 2025-05-07T08:49:02.030372Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2759: SelfId: [4:7501623128275840189:2543], SessionActorId: [4:7501623085326164489:2543], Committed TxId=0 2025-05-07T08:49:02.030418Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:334: SelfId: [4:7501623132570807505:2392], Table: `Root/yq/quotas` ([72057594046644480:3:1]), SessionActorId: [4:7501623081031195635:2392]Write: token=0 2025-05-07T08:49:02.030547Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:342: SelfId: [4:7501623132570807505:2392], Table: `Root/yq/quotas` ([72057594046644480:3:1]), SessionActorId: [4:7501623081031195635:2392]Close: token=0 2025-05-07T08:49:02.030633Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2987: SelfId: [4:7501623132570807502:2392], TxId: 281474976715798, task: 1. TKqpForwardWriteActor recieve EvBufferWriteResult from [4:7501623132570807494:2392] 2025-05-07T08:49:02.030649Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:3005: SelfId: [4:7501623132570807502:2392], TxId: 281474976715798, task: 1. Finished 2025-05-07T08:49:02.030670Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [4:7501623132570807500:2392], TxId: 281474976715798, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=YTU4OTY0MzktZjEyYzM5NDEtZTY1Y2UwMTktNzMwZThhOTg=. TraceId : 01jtmyy71b5xqth1e687cwhchj. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646922 2025-05-07T08:49:02.030688Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715798, task: 1. Tasks execution finished 2025-05-07T08:49:02.030704Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [4:7501623132570807500:2392], TxId: 281474976715798, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=YTU4OTY0MzktZjEyYzM5NDEtZTY1Y2UwMTktNzMwZThhOTg=. TraceId : 01jtmyy71b5xqth1e687cwhchj. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Compute state finished. All channels and sinks finished 2025-05-07T08:49:02.030792Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715798, task: 1. pass away 2025-05-07T08:49:02.030891Z node 4 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:66;problem=finish_compute_actor;tx_id=281474976715798;task_id=1;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-05-07T08:49:02.031248Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:1965: SelfId: [4:7501623132570807494:2392], SessionActorId: [4:7501623081031195635:2392], Start immediate commit 2025-05-07T08:49:02.031262Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:869: SelfId: [4:7501623132570807505:2392], Table: `Root/yq/quotas` ([72057594046644480:3:1]), SessionActorId: [4:7501623081031195635:2392]SetImmediateCommit 2025-05-07T08:49:02.031277Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:1911: SelfId: [4:7501623132570807494:2392], SessionActorId: [4:7501623081031195635:2392], Flush data 2025-05-07T08:49:02.031445Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:989: SelfId: [4:7501623132570807505:2392], Table: `Root/yq/quotas` ([72057594046644480:3:1]), SessionActorId: [4:7501623081031195635:2392]Send EvWrite to ShardID=72075186224037890, isPrepare=0, isImmediateCommit=1, TxId=0, LockTxId=0, LockNodeId=0, Locks= LockId: 281474976715794 DataShard: 72075186224037890 Generation: 1 Counter: 25 SchemeShard: 72057594046644480 PathId: 3, Size=136, Cookie=1, OperationsCount=1, IsFinal=1, Attempts=0, Mode=3 2025-05-07T08:49:02.037410Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:563: SelfId: [4:7501623132570807505:2392], Table: `Root/yq/quotas` ([72057594046644480:3:1]), SessionActorId: [4:7501623081031195635:2392]Recv EvWriteResult from ShardID=72075186224037890, Status=STATUS_COMPLETED, TxId=56, Locks= , Cookie=1 2025-05-07T08:49:02.037450Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:815: SelfId: [4:7501623132570807505:2392], Table: `Root/yq/quotas` ([72057594046644480:3:1]), SessionActorId: [4:7501623081031195635:2392]Got completed result TxId=56, TabletId=72075186224037890, Cookie=1, Mode=3, Locks= 2025-05-07T08:49:02.037495Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2759: SelfId: [4:7501623132570807494:2392], SessionActorId: [4:7501623081031195635:2392], Committed TxId=0 2025-05-07T08:49:02.806910Z node 4 :FQ_PENDING_FETCHER ERROR: pending_fetcher.cpp:259: Error with GetTask:
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv6:%5B::%5D:1495: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint [::]:1495 2025-05-07T08:49:02.878698Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7261: Cannot get console configs 2025-05-07T08:49:02.878732Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded ------- [TM] {asan, default-linux-x86_64, release} ydb/services/fq/ut_integration/unittest >> Yq_1::DescribeQuery [GOOD] Test command err: 2025-05-07T08:47:27.854792Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501622725784822374:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:47:27.854900Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:47:33.186450Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501622725784822374:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:47:33.188044Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; E0507 08:47:34.854852986 107312 dns_resolver.cc:162] no server name supplied in dns URI E0507 08:47:34.873341481 107312 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-05-07T08:47:35.675931Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:35.687138Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:37.191693Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:29739: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:29739 } ] 2025-05-07T08:47:37.192198Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:29739: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:29739 2025-05-07T08:47:37.194193Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:37.202335Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:38.219339Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:38.219367Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:39.239652Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:39.239686Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:39.422892Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:29739: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:29739 } ] E0507 08:47:39.965303432 107924 dns_resolver.cc:162] no server name supplied in dns URI E0507 08:47:39.965413389 107924 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-05-07T08:47:40.315483Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:40.315862Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:41.310168Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:41.310218Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:42.315046Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:42.315673Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:43.503068Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:43.548588Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:44.533197Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:44.556330Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0507 08:47:44.985716657 107924 dns_resolver.cc:162] no server name supplied in dns URI E0507 08:47:44.986710932 107924 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-05-07T08:47:45.536431Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:45.562952Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:46.539492Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:46.591048Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:46.848907Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:29739: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:29739 } ] 2025-05-07T08:47:47.160508Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:29739: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:29739 2025-05-07T08:47:47.160642Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:29739: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:29739 2025-05-07T08:47:47.540174Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:47.592673Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:48.542432Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:48.639714Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:49.553768Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:49.643450Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0507 08:47:50.083131831 107923 dns_resolver.cc:162] no server name supplied in dns URI E0507 08:47:50.084197507 107923 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-05-07T08:47:50.559735Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:50.647695Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:51.663844Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:51.668666Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:52.674141Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:52.682085Z node 1 :MET ... int { ActorId { RawX1: 7501623152156971012 RawX2: 4503616807242120 } } InMemory: true } 2025-05-07T08:49:06.525668Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1081: SelfId: [4:7501623152156971017:3079], TxId: 281474976710811, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=YzU5MzMzNzUtNTNjMmFkY2UtNDYyZmY5ZjQtZDE5ZTM5YzI=. CustomerSuppliedId : . TraceId : 01jtmyyb3ad35mjjege233f3z5. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Update input channelId: 1, peer: [4:7501623152156971016:3078] 2025-05-07T08:49:06.525714Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [4:7501623152156971017:3079], TxId: 281474976710811, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=YzU5MzMzNzUtNTNjMmFkY2UtNDYyZmY5ZjQtZDE5ZTM5YzI=. CustomerSuppliedId : . TraceId : 01jtmyyb3ad35mjjege233f3z5. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646926 2025-05-07T08:49:06.525794Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1074: SelfId: [4:7501623152156971017:3079], TxId: 281474976710811, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=YzU5MzMzNzUtNTNjMmFkY2UtNDYyZmY5ZjQtZDE5ZTM5YzI=. CustomerSuppliedId : . TraceId : 01jtmyyb3ad35mjjege233f3z5. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Received channels info: Update { Id: 1 TransportVersion: DATA_TRANSPORT_OOB_PICKLE_1_0 SrcTaskId: 1 DstTaskId: 2 SrcEndpoint { ActorId { RawX1: 7501623152156971016 RawX2: 4503616807242758 } } DstEndpoint { ActorId { RawX1: 7501623152156971017 RawX2: 4503616807242759 } } InMemory: true DstStageId: 1 } Update { Id: 2 TransportVersion: DATA_TRANSPORT_OOB_PICKLE_1_0 SrcTaskId: 2 SrcEndpoint { ActorId { RawX1: 7501623152156971017 RawX2: 4503616807242759 } } DstEndpoint { ActorId { RawX1: 7501623152156971012 RawX2: 4503616807242120 } } InMemory: true } 2025-05-07T08:49:06.525815Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [4:7501623152156971017:3079], TxId: 281474976710811, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=YzU5MzMzNzUtNTNjMmFkY2UtNDYyZmY5ZjQtZDE5ZTM5YzI=. CustomerSuppliedId : . TraceId : 01jtmyyb3ad35mjjege233f3z5. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646922 2025-05-07T08:49:06.526723Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:959: TxId: 281474976710811, task: 1, CA Id [4:7501623152156971016:3078]. Recv TEvReadResult from ShardID=72075186224037888, ReadId=0, Status=SUCCESS, Finished=1, RowCount=1, TxLocks= , BrokenTxLocks= 2025-05-07T08:49:06.526752Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1047: TxId: 281474976710811, task: 1, CA Id [4:7501623152156971016:3078]. Taken 0 locks 2025-05-07T08:49:06.526769Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1061: TxId: 281474976710811, task: 1, CA Id [4:7501623152156971016:3078]. new data for read #0 seqno = 1 finished = 1 2025-05-07T08:49:06.526792Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [4:7501623152156971016:3078], TxId: 281474976710811, task: 1. Ctx: { SessionId : ydb://session/3?node_id=4&id=YzU5MzMzNzUtNTNjMmFkY2UtNDYyZmY5ZjQtZDE5ZTM5YzI=. CustomerSuppliedId : . TraceId : 01jtmyyb3ad35mjjege233f3z5. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 276037645 2025-05-07T08:49:06.526812Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [4:7501623152156971016:3078], TxId: 281474976710811, task: 1. Ctx: { SessionId : ydb://session/3?node_id=4&id=YzU5MzMzNzUtNTNjMmFkY2UtNDYyZmY5ZjQtZDE5ZTM5YzI=. CustomerSuppliedId : . TraceId : 01jtmyyb3ad35mjjege233f3z5. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646922 2025-05-07T08:49:06.526831Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1328: TxId: 281474976710811, task: 1, CA Id [4:7501623152156971016:3078]. enter getasyncinputdata results size 1, freeSpace 8388608 2025-05-07T08:49:06.526864Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1224: TxId: 281474976710811, task: 1, CA Id [4:7501623152156971016:3078]. enter pack cells method shardId: 72075186224037888 processedRows: 0 packed rows: 0 freeSpace: 8388608 2025-05-07T08:49:06.526894Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1305: TxId: 281474976710811, task: 1, CA Id [4:7501623152156971016:3078]. exit pack cells method shardId: 72075186224037888 processedRows: 0 packed rows: 1 freeSpace: 8386365 2025-05-07T08:49:06.526914Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1362: TxId: 281474976710811, task: 1, CA Id [4:7501623152156971016:3078]. returned 1 rows; processed 1 rows 2025-05-07T08:49:06.526950Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1399: TxId: 281474976710811, task: 1, CA Id [4:7501623152156971016:3078]. dropping batch for read #0 2025-05-07T08:49:06.526963Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:459: TxId: 281474976710811, task: 1, CA Id [4:7501623152156971016:3078]. effective maxinflight 1024 sorted 0 2025-05-07T08:49:06.526975Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:481: TxId: 281474976710811, task: 1, CA Id [4:7501623152156971016:3078]. Scheduled table scans, in flight: 0 shards. pending shards to read: 0, 2025-05-07T08:49:06.526995Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1424: TxId: 281474976710811, task: 1, CA Id [4:7501623152156971016:3078]. returned async data processed rows 1 left freeSpace 8386365 received rows 1 running reads 0 pending shards 0 finished = 1 has limit 0 limit reached 0 2025-05-07T08:49:06.527213Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [4:7501623152156971016:3078], TxId: 281474976710811, task: 1. Ctx: { SessionId : ydb://session/3?node_id=4&id=YzU5MzMzNzUtNTNjMmFkY2UtNDYyZmY5ZjQtZDE5ZTM5YzI=. CustomerSuppliedId : . TraceId : 01jtmyyb3ad35mjjege233f3z5. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-05-07T08:49:06.527231Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [4:7501623152156971016:3078], TxId: 281474976710811, task: 1. Ctx: { SessionId : ydb://session/3?node_id=4&id=YzU5MzMzNzUtNTNjMmFkY2UtNDYyZmY5ZjQtZDE5ZTM5YzI=. CustomerSuppliedId : . TraceId : 01jtmyyb3ad35mjjege233f3z5. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646922 2025-05-07T08:49:06.527261Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:670: TxId: 281474976710811, task: 1. Tasks execution finished, waiting for chunk delivery in output channelId: 1, seqNo: [1] 2025-05-07T08:49:06.527275Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [4:7501623152156971017:3079], TxId: 281474976710811, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=YzU5MzMzNzUtNTNjMmFkY2UtNDYyZmY5ZjQtZDE5ZTM5YzI=. CustomerSuppliedId : . TraceId : 01jtmyyb3ad35mjjege233f3z5. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646923 2025-05-07T08:49:06.527301Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:163: TxId: 281474976710811, task: 2. Finish input channelId: 1, from: [4:7501623152156971016:3078] 2025-05-07T08:49:06.527334Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [4:7501623152156971017:3079], TxId: 281474976710811, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=YzU5MzMzNzUtNTNjMmFkY2UtNDYyZmY5ZjQtZDE5ZTM5YzI=. CustomerSuppliedId : . TraceId : 01jtmyyb3ad35mjjege233f3z5. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646922 2025-05-07T08:49:06.527507Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [4:7501623152156971017:3079], TxId: 281474976710811, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=YzU5MzMzNzUtNTNjMmFkY2UtNDYyZmY5ZjQtZDE5ZTM5YzI=. CustomerSuppliedId : . TraceId : 01jtmyyb3ad35mjjege233f3z5. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-05-07T08:49:06.527522Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [4:7501623152156971016:3078], TxId: 281474976710811, task: 1. Ctx: { SessionId : ydb://session/3?node_id=4&id=YzU5MzMzNzUtNTNjMmFkY2UtNDYyZmY5ZjQtZDE5ZTM5YzI=. CustomerSuppliedId : . TraceId : 01jtmyyb3ad35mjjege233f3z5. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646927 2025-05-07T08:49:06.527548Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [4:7501623152156971016:3078], TxId: 281474976710811, task: 1. Ctx: { SessionId : ydb://session/3?node_id=4&id=YzU5MzMzNzUtNTNjMmFkY2UtNDYyZmY5ZjQtZDE5ZTM5YzI=. CustomerSuppliedId : . TraceId : 01jtmyyb3ad35mjjege233f3z5. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646922 2025-05-07T08:49:06.527567Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976710811, task: 1. Tasks execution finished 2025-05-07T08:49:06.527580Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [4:7501623152156971016:3078], TxId: 281474976710811, task: 1. Ctx: { SessionId : ydb://session/3?node_id=4&id=YzU5MzMzNzUtNTNjMmFkY2UtNDYyZmY5ZjQtZDE5ZTM5YzI=. CustomerSuppliedId : . TraceId : 01jtmyyb3ad35mjjege233f3z5. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Compute state finished. All channels and sinks finished 2025-05-07T08:49:06.527689Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976710811, task: 1. pass away 2025-05-07T08:49:06.527776Z node 4 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:66;problem=finish_compute_actor;tx_id=281474976710811;task_id=1;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-05-07T08:49:06.528124Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [4:7501623152156971017:3079], TxId: 281474976710811, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=YzU5MzMzNzUtNTNjMmFkY2UtNDYyZmY5ZjQtZDE5ZTM5YzI=. CustomerSuppliedId : . TraceId : 01jtmyyb3ad35mjjege233f3z5. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646922 2025-05-07T08:49:06.528156Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710811, task: 2. Tasks execution finished, don't wait for ack delivery in input channelId: 1, seqNo: [1] 2025-05-07T08:49:06.528165Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976710811, task: 2. Tasks execution finished 2025-05-07T08:49:06.528175Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [4:7501623152156971017:3079], TxId: 281474976710811, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=YzU5MzMzNzUtNTNjMmFkY2UtNDYyZmY5ZjQtZDE5ZTM5YzI=. CustomerSuppliedId : . TraceId : 01jtmyyb3ad35mjjege233f3z5. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Compute state finished. All channels and sinks finished 2025-05-07T08:49:06.528217Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976710811, task: 2. pass away 2025-05-07T08:49:06.528261Z node 4 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:66;problem=finish_compute_actor;tx_id=281474976710811;task_id=2;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-05-07T08:49:07.443973Z node 4 :FQ_PENDING_FETCHER ERROR: pending_fetcher.cpp:259: Error with GetTask:
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv6:%5B::%5D:21359: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint [::]:21359 >> TReplicationTests::CreateReplicatedTable [GOOD] >> TReplicationTests::DropReplicationWithInvalidCredentials >> TKesusTest::TestAcquireWaiterChangeTimeoutToZero [GOOD] >> TKesusTest::TestAcquireWaiterRelease ------- [TM] {asan, default-linux-x86_64, release} ydb/services/fq/ut_integration/unittest >> Yq_1::Basic_EmptyDict [FAIL] Test command err: 2025-05-07T08:47:37.188119Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501622766271473478:2140];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:47:37.188170Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; E0507 08:47:38.744602400 108548 dns_resolver.cc:162] no server name supplied in dns URI E0507 08:47:38.751637399 108548 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-05-07T08:47:38.959545Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:40.034609Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:13438: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:13438 } ] 2025-05-07T08:47:40.062414Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:40.322759Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:13438: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:13438 2025-05-07T08:47:41.092440Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:42.095649Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:42.202589Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501622766271473478:2140];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:47:42.202997Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:47:43.099201Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:43.212438Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0507 08:47:43.723158154 108946 dns_resolver.cc:162] no server name supplied in dns URI E0507 08:47:43.723290400 108946 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-05-07T08:47:44.103599Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:44.214814Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:44.474000Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:13438: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:13438 } ] 2025-05-07T08:47:44.788652Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:13438: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:13438 2025-05-07T08:47:45.114500Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:45.221126Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:46.140027Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:46.240388Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:47.158834Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:47.302185Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:48.191155Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:48.322766Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:48.587221Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:13438: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:13438 } ] E0507 08:47:48.963673418 108946 dns_resolver.cc:162] no server name supplied in dns URI E0507 08:47:48.982748958 108946 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-05-07T08:47:49.203060Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:49.332066Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:50.239468Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:50.360182Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:51.586237Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:51.586279Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:52.605269Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:52.670413Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:53.690646Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:53.764659Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0507 08:47:54.327242858 108928 dns_resolver.cc:162] no server name supplied in dns URI E0507 08:47:54.329045683 108928 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-05-07T08:47:54.692126Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:54.765532Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:54.914493Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:13438: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:13438 } ] 2025-05-07T08:47:54.946124Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:13438: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:13438 2025-05-07T08:47:54.946246Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:13438: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:13438 2025-05-07T08:47:55.721300Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:55.767704Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: ... _cloud, metrics name: yq.analyticsQuery.count, issues: 2025-05-07T08:49:24.370211Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:690: TQuotaUsageResponse error for subject type: cloud, subject id: mock_cloud, metrics name: yq.streamingQuery.count, issues: 2025-05-07T08:49:24.370238Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:24.370368Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:690: TQuotaUsageResponse error for subject type: cloud, subject id: mock_cloud, metrics name: yq.analyticsQuery.count, issues: 2025-05-07T08:49:24.370382Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:690: TQuotaUsageResponse error for subject type: cloud, subject id: mock_cloud, metrics name: yq.streamingQuery.count, issues: 2025-05-07T08:49:24.370393Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:24.370506Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:24.370546Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:690: TQuotaUsageResponse error for subject type: cloud, subject id: mock_cloud, metrics name: yq.analyticsQuery.count, issues: 2025-05-07T08:49:24.370562Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:690: TQuotaUsageResponse error for subject type: cloud, subject id: mock_cloud, metrics name: yq.streamingQuery.count, issues: 2025-05-07T08:49:24.370605Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:24.370725Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:690: TQuotaUsageResponse error for subject type: cloud, subject id: mock_cloud, metrics name: yq.analyticsQuery.count, issues: 2025-05-07T08:49:24.370753Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:690: TQuotaUsageResponse error for subject type: cloud, subject id: mock_cloud, metrics name: yq.streamingQuery.count, issues: 2025-05-07T08:49:24.370779Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:24.371005Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:24.371068Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:690: TQuotaUsageResponse error for subject type: cloud, subject id: mock_cloud, metrics name: yq.analyticsQuery.count, issues: 2025-05-07T08:49:24.371082Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:690: TQuotaUsageResponse error for subject type: cloud, subject id: mock_cloud, metrics name: yq.streamingQuery.count, issues: 2025-05-07T08:49:24.371189Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:24.371430Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:24.371467Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:690: TQuotaUsageResponse error for subject type: cloud, subject id: mock_cloud, metrics name: yq.analyticsQuery.count, issues: 2025-05-07T08:49:24.371482Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:690: TQuotaUsageResponse error for subject type: cloud, subject id: mock_cloud, metrics name: yq.streamingQuery.count, issues: 2025-05-07T08:49:24.371674Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:24.371736Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:690: TQuotaUsageResponse error for subject type: cloud, subject id: mock_cloud, metrics name: yq.analyticsQuery.count, issues: 2025-05-07T08:49:24.371751Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:690: TQuotaUsageResponse error for subject type: cloud, subject id: mock_cloud, metrics name: yq.streamingQuery.count, issues: 2025-05-07T08:49:24.371940Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:24.372080Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:690: TQuotaUsageResponse error for subject type: cloud, subject id: mock_cloud, metrics name: yq.analyticsQuery.count, issues: 2025-05-07T08:49:24.372094Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:690: TQuotaUsageResponse error for subject type: cloud, subject id: mock_cloud, metrics name: yq.streamingQuery.count, issues: 2025-05-07T08:49:24.372106Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:24.372298Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:24.372418Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:690: TQuotaUsageResponse error for subject type: cloud, subject id: mock_cloud, metrics name: yq.analyticsQuery.count, issues: 2025-05-07T08:49:24.372435Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:690: TQuotaUsageResponse error for subject type: cloud, subject id: mock_cloud, metrics name: yq.streamingQuery.count, issues: 2025-05-07T08:49:24.372448Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:24.372553Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:24.372587Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:690: TQuotaUsageResponse error for subject type: cloud, subject id: mock_cloud, metrics name: yq.analyticsQuery.count, issues: 2025-05-07T08:49:24.372605Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:690: TQuotaUsageResponse error for subject type: cloud, subject id: mock_cloud, metrics name: yq.streamingQuery.count, issues: 2025-05-07T08:49:24.372653Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:24.372743Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:690: TQuotaUsageResponse error for subject type: cloud, subject id: mock_cloud, metrics name: yq.analyticsQuery.count, issues: 2025-05-07T08:49:24.372761Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:690: TQuotaUsageResponse error for subject type: cloud, subject id: mock_cloud, metrics name: yq.streamingQuery.count, issues: 2025-05-07T08:49:24.372779Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:24.372883Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:690: TQuotaUsageResponse error for subject type: cloud, subject id: mock_cloud, metrics name: yq.analyticsQuery.count, issues: 2025-05-07T08:49:24.372900Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:690: TQuotaUsageResponse error for subject type: cloud, subject id: mock_cloud, metrics name: yq.streamingQuery.count, issues: 2025-05-07T08:49:24.372912Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:24.373021Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:690: TQuotaUsageResponse error for subject type: cloud, subject id: mock_cloud, metrics name: yq.analyticsQuery.count, issues: 2025-05-07T08:49:24.373034Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:24.373063Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:690: TQuotaUsageResponse error for subject type: cloud, subject id: mock_cloud, metrics name: yq.streamingQuery.count, issues: 2025-05-07T08:49:24.373149Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:690: TQuotaUsageResponse error for subject type: cloud, subject id: mock_cloud, metrics name: yq.analyticsQuery.count, issues: 2025-05-07T08:49:24.373166Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:690: TQuotaUsageResponse error for subject type: cloud, subject id: mock_cloud, metrics name: yq.streamingQuery.count, issues: 2025-05-07T08:49:24.373178Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:24.373357Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:690: TQuotaUsageResponse error for subject type: cloud, subject id: mock_cloud, metrics name: yq.analyticsQuery.count, issues: 2025-05-07T08:49:24.373372Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:24.373407Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:690: TQuotaUsageResponse error for subject type: cloud, subject id: mock_cloud, metrics name: yq.streamingQuery.count, issues: 2025-05-07T08:49:24.373529Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:690: TQuotaUsageResponse error for subject type: cloud, subject id: mock_cloud, metrics name: yq.analyticsQuery.count, issues: 2025-05-07T08:49:24.373543Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:24.373580Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:690: TQuotaUsageResponse error for subject type: cloud, subject id: mock_cloud, metrics name: yq.streamingQuery.count, issues: 2025-05-07T08:49:24.373665Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:690: TQuotaUsageResponse error for subject type: cloud, subject id: mock_cloud, metrics name: yq.analyticsQuery.count, issues: 2025-05-07T08:49:24.373683Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:690: TQuotaUsageResponse error for subject type: cloud, subject id: mock_cloud, metrics name: yq.streamingQuery.count, issues: 2025-05-07T08:49:24.373696Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:24.373806Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:690: TQuotaUsageResponse error for subject type: cloud, subject id: mock_cloud, metrics name: yq.analyticsQuery.count, issues: 2025-05-07T08:49:24.373819Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:24.373850Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:690: TQuotaUsageResponse error for subject type: cloud, subject id: mock_cloud, metrics name: yq.streamingQuery.count, issues: 2025-05-07T08:49:24.373934Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:24.374470Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:690: TQuotaUsageResponse error for subject type: cloud, subject id: mock_cloud, metrics name: yq.analyticsQuery.count, issues: 2025-05-07T08:49:24.374489Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:690: TQuotaUsageResponse error for subject type: cloud, subject id: mock_cloud, metrics name: yq.streamingQuery.count, issues: assertion failed at ydb/services/fq/ut_integration/fq_ut.cpp:57, TString (anonymous namespace)::CreateNewHistoryAndWaitFinish(const TString &, NYdb::NFq::TClient &, const TString &, const FederatedQuery::QueryMeta::ComputeStatus &): (result.GetStatus() == EStatus::SUCCESS) failed: (BAD_REQUEST != SUCCESS)
: Error: Control Plane is not ready yet. Please retry later., code: 1007 , with diff: (BAD_REQ|S)U(|CC)ES(T|S) TBackTrace::Capture()+28 (0x192A7F0C) NUnitTest::NPrivate::RaiseError(char const*, TBasicString> const&, bool)+592 (0x197650D0) ??+0 (0x18E60E38) NTestSuiteYq_1::TTestCaseBasic_EmptyDict::Execute_(NUnitTest::TTestContext&)+2141 (0x18E6C1AD) std::__y1::__function::__func, void ()>::operator()()+280 (0x18ED4008) TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool)+534 (0x1979C2D6) NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool)+505 (0x1976BC59) NTestSuiteYq_1::TCurrentTest::Execute()+1237 (0x18ED2F75) NUnitTest::TTestFactory::Execute()+2438 (0x1976D526) NUnitTest::RunMain(int, char**)+5213 (0x1979684D) ??+0 (0x7F5D89EA4D90) __libc_start_main+128 (0x7F5D89EA4E40) _start+41 (0x167EA029) ------- [TM] {asan, default-linux-x86_64, release} ydb/services/fq/ut_integration/unittest >> Yq_1::CreateQuery_Without_Connection [GOOD] Test command err: 2025-05-07T08:47:37.522047Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501622766892388749:2143];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:47:37.522398Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; E0507 08:47:42.289683973 108557 dns_resolver.cc:162] no server name supplied in dns URI E0507 08:47:42.382687274 108557 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-05-07T08:47:43.596142Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:16298: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:16298 } ] 2025-05-07T08:47:46.412321Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:16298: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:16298 } ] 2025-05-07T08:47:46.573123Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:16298: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:16298 E0507 08:47:46.757294046 109115 dns_resolver.cc:162] no server name supplied in dns URI E0507 08:47:46.758737412 109115 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-05-07T08:47:49.678343Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:16298: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:16298 } ] E0507 08:47:52.090574817 109114 dns_resolver.cc:162] no server name supplied in dns URI E0507 08:47:52.091349814 109114 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-05-07T08:47:53.800628Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:16298: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:16298 2025-05-07T08:47:53.806093Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:16298: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:16298 } ] 2025-05-07T08:47:56.742810Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501622766892388749:2143];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:47:56.742971Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:47:56.765853Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0507 08:47:57.178345007 109115 dns_resolver.cc:162] no server name supplied in dns URI E0507 08:47:57.179016998 109115 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-05-07T08:47:57.745577Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:57.768783Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:58.754274Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:58.770712Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:59.759510Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:47:59.771324Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:00.643582Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:16298: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:16298 } ] 2025-05-07T08:48:00.681653Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:16298: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:16298 2025-05-07T08:48:00.696704Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:16298: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:16298 2025-05-07T08:48:00.766277Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:00.876578Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:01.767826Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:01.873558Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0507 08:48:02.249569167 109114 dns_resolver.cc:162] no server name supplied in dns URI E0507 08:48:02.262124746 109114 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-05-07T08:48:02.818314Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:02.923630Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:03.819686Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:03.922677Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:04.826512Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:04.927541Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:05.838354Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:05.930349Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0507 08:48:07.345002752 109115 dns_resolver.cc:162] no server name supplied in dns URI E0507 08:48:07.345455048 109115 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-05-07T08:48:09.004729Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:09.004757Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:10.008776Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:10.008801Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:11.024704Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:11.024732Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:48:11.498760Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:16298: Failed to conne ... .252005Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.252040Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.252089Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.252125Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.252166Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.252225Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.252272Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.252310Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.252349Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.252386Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.252421Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.252494Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.252576Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.252698Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.252800Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.252875Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.253026Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.253107Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.253138Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.253206Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.253258Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.253317Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.253376Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.253416Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.253481Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.253513Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.253566Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.253604Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.253683Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.253734Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.253776Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.253832Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.253921Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.253987Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.254054Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.254104Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.254186Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.254213Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.254257Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.254294Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.254338Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.254367Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.254433Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.254466Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.254542Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.254613Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.254656Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.254699Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.254735Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.254802Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.254871Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.254934Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.255056Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.255079Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.255223Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.255275Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.255332Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.255356Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.255415Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.255463Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.255526Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.255571Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.255648Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.255674Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.255726Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.255797Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.255851Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.255920Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.255951Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.255998Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.256035Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.256099Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.256158Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.256197Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.256271Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.256295Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.256355Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.256432Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.256508Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.256581Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.256655Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.256689Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.256776Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.256801Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.256859Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.256883Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.256951Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.256999Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.257081Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.257104Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.257147Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.257177Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.257255Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.257277Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.257324Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.257363Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.257440Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.257476Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.257531Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.257565Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.257639Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.257665Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.257728Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-05-07T08:49:09.257807Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: [good] Yq_1::CreateQuery_Without_Connection >> TKesusTest::TestAttachMissingSession [GOOD] >> TKesusTest::TestAttachOldGeneration >> TReplicationTests::CopyReplicatedTable [GOOD] >> TSubDomainTest::CreateDummyTabletsInDifferentDomains >> TSchemeShardUserAttrsTest::VariousUse [GOOD] >> TKesusTest::TestReleaseSemaphore [GOOD] >> TKesusTest::TestSemaphoreData >> TKesusTest::TestAcquireWaiterRelease [GOOD] >> TKesusTest::TestAllocatesResources >> TKesusTest::TestAttachOldGeneration [GOOD] >> TKesusTest::TestAttachFastPath >> TSchemeshardBackgroundCleaningTest::TempInTemp [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_replication/unittest >> TReplicationTests::CopyReplicatedTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:50:20.805656Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:50:20.805778Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:50:20.805820Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:50:20.805860Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:50:20.805910Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:50:20.805942Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:50:20.806111Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:50:20.806207Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:50:20.806981Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:50:20.807373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:50:20.902605Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:50:20.902687Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:20.920975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:50:20.921104Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:50:20.921281Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:50:20.931986Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:50:20.932930Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:50:20.933708Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:20.934102Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:50:20.937248Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:20.939062Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:20.939142Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:20.939223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:50:20.939278Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:20.939340Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:50:20.939570Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:50:20.957252Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:50:21.112923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:50:21.113174Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:21.113411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:50:21.113710Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:50:21.113769Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:21.123065Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:21.123259Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:50:21.123494Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:21.123550Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:50:21.123587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:50:21.123623Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:50:21.126241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:21.126313Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:50:21.126356Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:50:21.128536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:21.128598Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:21.128684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:21.128740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:50:21.132372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:50:21.134702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:50:21.134959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:50:21.135976Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:21.136120Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:50:21.136174Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:21.136489Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:50:21.136543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:21.136720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:50:21.136806Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:50:21.139309Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:21.139359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:21.139574Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:21.139618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... TxStats { PerShardStats { ShardId: 72075186233409547 CpuTimeUsec: 1831 } } 2025-05-07T08:50:30.900538Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409547 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 3 ProposeLatency: 4 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409547 CpuTimeUsec: 1831 } } 2025-05-07T08:50:30.901292Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 412 RawX2: 34359740749 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-05-07T08:50:30.901359Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409547, partId: 0 2025-05-07T08:50:30.901509Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 412 RawX2: 34359740749 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-05-07T08:50:30.901573Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1005: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 2025-05-07T08:50:30.901690Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1009: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 412 RawX2: 34359740749 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-05-07T08:50:30.901773Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:2, datashard: 72075186233409547, left await: 1, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:30.901835Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1014: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvSchemaChanged CollectSchemaChanged: false 2025-05-07T08:50:30.906162Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:50:30.906389Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:50:30.919212Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 309 RawX2: 34359740664 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-05-07T08:50:30.919281Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-05-07T08:50:30.919406Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 309 RawX2: 34359740664 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-05-07T08:50:30.919450Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1005: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 2025-05-07T08:50:30.919531Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1009: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 309 RawX2: 34359740664 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-05-07T08:50:30.919590Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, datashard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:30.919651Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:50:30.919707Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-05-07T08:50:30.919758Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 102:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-05-07T08:50:30.919790Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 102:0 129 -> 240 2025-05-07T08:50:30.922014Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:50:30.922642Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:50:30.922722Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_copy_table.cpp:306: TCopyTable TCopyTableBarrier operationId: 102:0ProgressState, operation type TxCopyTable 2025-05-07T08:50:30.922817Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1059: Set barrier, OperationId: 102:0, name: CopyTableBarrier, done: 0, blocked: 1, parts count: 1 2025-05-07T08:50:30.922874Z node 8 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1103: All parts have reached barrier, tx: 102, done: 0, blocked: 1 2025-05-07T08:50:30.922978Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_copy_table.cpp:289: TCopyTable TCopyTableBarrier operationId: 102:0 HandleReply TEvPrivate::TEvCompleteBarrier, msg: NKikimr::NSchemeShard::TEvPrivate::TEvCompleteBarrier { TxId: 102 Name: CopyTableBarrier }, at tablet# 72057594046678944 2025-05-07T08:50:30.923028Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 102:0 240 -> 240 2025-05-07T08:50:30.929014Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:50:30.929098Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 102:0 ProgressState 2025-05-07T08:50:30.929287Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T08:50:30.929341Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:50:30.929394Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T08:50:30.929448Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:50:30.929497Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-05-07T08:50:30.929596Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [8:337:2316] message: TxId: 102 2025-05-07T08:50:30.929689Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:50:30.929746Z node 8 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-05-07T08:50:30.929792Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 102:0 2025-05-07T08:50:30.930003Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-05-07T08:50:30.930064Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T08:50:30.939165Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T08:50:30.939254Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [8:440:2401] TestWaitNotification: OK eventTxId 102 2025-05-07T08:50:30.939922Z node 8 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/CopyTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:50:30.940225Z node 8 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/CopyTable" took 329us result status StatusSuccess 2025-05-07T08:50:30.940681Z node 8 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/CopyTable" PathDescription { Self { Name: "CopyTable" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "CopyTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |89.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/fq/streaming_optimize/py3test >> test_sql_streaming.py::test[suites-ReadTopicWithMetadataNestedDeep-default.txt] [FAIL] >> TKesusTest::TestAllocatesResources [GOOD] >> TReplicationTests::DropReplicationWithInvalidCredentials [GOOD] >> TReplicationTests::DropReplicationWithUnknownSecret >> TBackupTests::ShouldSucceedOnSingleShardTable[Raw] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_user_attributes/unittest >> TSchemeShardUserAttrsTest::VariousUse [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:50:30.297702Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:50:30.297797Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:50:30.297839Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:50:30.297870Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:50:30.297906Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:50:30.297927Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:50:30.298016Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:50:30.298085Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:50:30.298702Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:50:30.299063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:50:30.384072Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:50:30.384127Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:30.401593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:50:30.401743Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:50:30.401885Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:50:30.420192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:50:30.422590Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:50:30.423526Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:30.423980Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:50:30.431197Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:30.433305Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:30.433401Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:30.433464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:50:30.433534Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:30.433593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:50:30.433792Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:50:30.443992Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:50:30.624622Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:50:30.624903Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:30.625207Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:50:30.625478Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:50:30.625550Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:30.629366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:30.629557Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:50:30.629802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:30.629894Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:50:30.629942Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:50:30.630003Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:50:30.633370Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:30.633452Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:50:30.633521Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:50:30.636163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:30.636255Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:30.636315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:30.636410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:50:30.640798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:50:30.644957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:50:30.645224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:50:30.646361Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:30.646550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:50:30.646612Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:30.647021Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:50:30.647093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:30.647352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:50:30.647449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:50:30.650674Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:30.650768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:30.650991Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:30.651061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... chemeshard__operation_side_effects.cpp:989: Publication details: tx: 112, [OwnerId: 72057594046678944, LocalPathId: 4], 18446744073709551615 2025-05-07T08:50:31.233471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 2025-05-07T08:50:31.248453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 2025-05-07T08:50:31.255261Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:31.255330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 112, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:31.255512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 112, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-05-07T08:50:31.255594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 112, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-05-07T08:50:31.255741Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:31.255795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2208], at schemeshard: 72057594046678944, txId: 112, path id: 1 2025-05-07T08:50:31.255846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2208], at schemeshard: 72057594046678944, txId: 112, path id: 3 2025-05-07T08:50:31.255895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2208], at schemeshard: 72057594046678944, txId: 112, path id: 4 FAKE_COORDINATOR: Erasing txId 112 2025-05-07T08:50:31.256768Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 112 2025-05-07T08:50:31.256875Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 112 2025-05-07T08:50:31.256913Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 3, at schemeshard: 72057594046678944, txId: 112 2025-05-07T08:50:31.256957Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 112, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 9 2025-05-07T08:50:31.257007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-05-07T08:50:31.257599Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 7 PathOwnerId: 72057594046678944, cookie: 112 2025-05-07T08:50:31.257684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 7 PathOwnerId: 72057594046678944, cookie: 112 2025-05-07T08:50:31.257728Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 112 2025-05-07T08:50:31.257765Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 112, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 7 2025-05-07T08:50:31.257812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-05-07T08:50:31.266480Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 112 2025-05-07T08:50:31.266668Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 112 2025-05-07T08:50:31.266723Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 112 2025-05-07T08:50:31.266800Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 112, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-05-07T08:50:31.266846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-05-07T08:50:31.266969Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 112, subscribers: 0 2025-05-07T08:50:31.267433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T08:50:31.267491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-05-07T08:50:31.267593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-05-07T08:50:31.269839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 2025-05-07T08:50:31.270474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 2025-05-07T08:50:31.272651Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 2025-05-07T08:50:31.272765Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 112, wait until txId: 112 TestWaitNotification wait txId: 112 2025-05-07T08:50:31.273284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 112: send EvNotifyTxCompletion 2025-05-07T08:50:31.273334Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 112 2025-05-07T08:50:31.273996Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 112, at schemeshard: 72057594046678944 2025-05-07T08:50:31.274117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 112: got EvNotifyTxCompletionResult 2025-05-07T08:50:31.274160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 112: satisfy waiter [1:499:2490] TestWaitNotification: OK eventTxId 112 2025-05-07T08:50:31.275077Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirB" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:50:31.275350Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirB" took 236us result status StatusSuccess 2025-05-07T08:50:31.275746Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirB" PathDescription { Self { Name: "DirB" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 109 CreateStep: 5000008 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 6 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } UserAttributes { Key: "AttrB1" Value: "ValB1" } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 113 2025-05-07T08:50:31.291860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpRmDir Drop { Name: "DirB" } ApplyIf { PathId: 2 PathVersion: 8 } ApplyIf { PathId: 3 PathVersion: 7 } ApplyIf { PathId: 4 PathVersion: 3 } } TxId: 113 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:50:31.292125Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_rmdir.cpp:29: TRmDir Propose, path: /MyRoot/DirB, pathId: 0, opId: 113:0, at schemeshard: 72057594046678944 2025-05-07T08:50:31.292266Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 113:1, propose status:StatusPreconditionFailed, reason: fail user constraint: ApplyIf section: no path with id [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-05-07T08:50:31.295244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 113, response: Status: StatusPreconditionFailed Reason: "fail user constraint: ApplyIf section: no path with id [OwnerId: 72057594046678944, LocalPathId: 4]" TxId: 113 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:50:31.295437Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 113, database: /MyRoot, subject: , status: StatusPreconditionFailed, reason: fail user constraint: ApplyIf section: no path with id [OwnerId: 72057594046678944, LocalPathId: 4], operation: DROP DIRECTORY, path: /MyRoot/DirB TestModificationResult got TxId: 113, wait until txId: 113 >> TKesusTest::TestSemaphoreData [GOOD] >> TKesusTest::TestSemaphoreReleaseReacquire >> TKesusTest::TestAttachFastPath [GOOD] >> TKesusTest::TestAttachFastPathBlocked >> test_sql_streaming.py::test[suites-WriteTwoTopics-default.txt] [FAIL] |89.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_backup/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/tablet/ut/unittest >> TKesusTest::TestAllocatesResources [GOOD] Test command err: 2025-05-07T08:50:29.140790Z node 1 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-05-07T08:50:29.140983Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-05-07T08:50:29.160825Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-05-07T08:50:29.160982Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-05-07T08:50:29.180471Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-05-07T08:50:29.181177Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[1:130:2156], cookie=14799927457981077463, session=0, seqNo=0) 2025-05-07T08:50:29.181377Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-05-07T08:50:29.212644Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[1:130:2156], cookie=14799927457981077463, session=1) 2025-05-07T08:50:29.213025Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[1:130:2156], cookie=5407699823355864694, session=0, seqNo=0) 2025-05-07T08:50:29.213192Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2025-05-07T08:50:29.229562Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[1:130:2156], cookie=5407699823355864694, session=2) 2025-05-07T08:50:29.229993Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[1:130:2156], cookie=111, session=1, semaphore="Lock1" count=1) 2025-05-07T08:50:29.230171Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:126: [72057594037927937] Created new ephemeral semaphore 1 "Lock1" 2025-05-07T08:50:29.230266Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #1 session 1 2025-05-07T08:50:29.243876Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[1:130:2156], cookie=111) 2025-05-07T08:50:29.244270Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[1:130:2156], cookie=222, session=2, semaphore="Lock1" count=18446744073709551615) 2025-05-07T08:50:29.244628Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[1:130:2156], cookie=333, session=2, semaphore="Lock1" count=1) 2025-05-07T08:50:29.244736Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #2 session 2 2025-05-07T08:50:29.259869Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[1:130:2156], cookie=222) 2025-05-07T08:50:29.259970Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[1:130:2156], cookie=333) 2025-05-07T08:50:29.260771Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[1:146:2170], cookie=4158355297186280654, name="Lock1") 2025-05-07T08:50:29.260896Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[1:146:2170], cookie=4158355297186280654) 2025-05-07T08:50:29.796650Z node 2 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-05-07T08:50:29.796769Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-05-07T08:50:29.837646Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-05-07T08:50:29.837871Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-05-07T08:50:29.863347Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-05-07T08:50:29.863955Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[2:132:2158], cookie=12701126335169939328, session=0, seqNo=0) 2025-05-07T08:50:29.864122Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-05-07T08:50:29.879067Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[2:132:2158], cookie=12701126335169939328, session=1) 2025-05-07T08:50:29.879441Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[2:132:2158], cookie=10096822708001021868, session=0, seqNo=0) 2025-05-07T08:50:29.879615Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2025-05-07T08:50:29.895594Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[2:132:2158], cookie=10096822708001021868, session=2) 2025-05-07T08:50:29.895944Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[2:132:2158], cookie=111, session=1, semaphore="Lock1" count=18446744073709551615) 2025-05-07T08:50:29.896090Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:126: [72057594037927937] Created new ephemeral semaphore 1 "Lock1" 2025-05-07T08:50:29.896201Z node 2 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #1 session 1 2025-05-07T08:50:29.910559Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[2:132:2158], cookie=111) 2025-05-07T08:50:29.910928Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[2:132:2158], cookie=222, session=2, semaphore="Lock1" count=1) 2025-05-07T08:50:29.911306Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[2:132:2158], cookie=333, session=2, semaphore="Lock1" count=18446744073709551615) 2025-05-07T08:50:29.926611Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[2:132:2158], cookie=222) 2025-05-07T08:50:29.926701Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[2:132:2158], cookie=333) 2025-05-07T08:50:29.927297Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[2:148:2172], cookie=9179445705292871908, name="Lock1") 2025-05-07T08:50:29.927388Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[2:148:2172], cookie=9179445705292871908) 2025-05-07T08:50:29.927832Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[2:151:2175], cookie=9327504915355289937, name="Lock1") 2025-05-07T08:50:29.927915Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[2:151:2175], cookie=9327504915355289937) 2025-05-07T08:50:30.502295Z node 3 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-05-07T08:50:30.502419Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-05-07T08:50:30.526695Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-05-07T08:50:30.527298Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-05-07T08:50:30.551680Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-05-07T08:50:30.552235Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[3:132:2158], cookie=12418320617011375888, session=0, seqNo=0) 2025-05-07T08:50:30.552415Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-05-07T08:50:30.564619Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[3:132:2158], cookie=12418320617011375888, session=1) 2025-05-07T08:50:30.564975Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[3:132:2158], cookie=10921599048982111529, session=0, seqNo=0) 2025-05-07T08:50:30.565107Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2025-05-07T08:50:30.577206Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[3:132:2158], cookie=10921599048982111529, session=2) 2025-05-07T08:50:30.578015Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[3:132:2158], cookie=111, session=1, semaphore="Lock1" count=18446744073709551615) 2025-05-07T08:50:30.578188Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:126: [72057594037927937] Created new ephemeral semaphore 1 "Lock1" 2025-05-07T08:50:30.578282Z node 3 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #1 session 1 2025-05-07T08:50:30.590643Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[3:132:2158], cookie=111) 2025-05-07T08:50:30.591021Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[3:132:2158], cookie=222, session=2, semaphore="Lock1" count=1) 2025-05-07T08:50:30.591396Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[3:132:2158], cookie=333, session=2, semaphore="Lock1" count=1) 2025-05-07T08:50:30.591478Z node 3 :KESUS_TABLET DEBUG: tablet_db.cpp:124: [72057594037927937] Deleting session 2 / semaphore 1 "Lock1" waiter link 2025-05-07T08:50:30.604317Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[3:132:2158], cookie=222) 2025-05-07T08:50:30.604407Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[3:132:2158], cookie=333) 2025-05-07T08:50:30.605060Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[3:151:2175], cookie=2591435310805334621, name="Lock1") 2025-05-07T08:50:30.605162Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[3:151:2175], cookie=2591435310805334621) 2025-05-07T08:50:30.605653Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[3:154:2178], cookie=13752092850302103431, name="Lock1") 2025-05-07T08:50:30.605729Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[3:154:2178], cookie=13752092850302103431) 2025-05-07T08:50:30.621333Z node 3 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-05-07T08:50:30.621482Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-05-07T08:50:30.622079Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-05-07T08:50:30.622735Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-05-07T08:50:30.671187Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-05-07T08:50:30.671429Z node 3 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #1 session 1 2025-05-07T08:50:30.671858Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[3:194:2208], cookie=11317696560793436393, name="Lock1") 2025-05-07T08:50:30.671960Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[3:194:2208], cookie=11317696560793436393) 2025-05-07T08:50:30.672608Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[3:202:2215], cookie=12810690614262569501, name="Lock1") 2025-05-07T08:50:30.672686Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[3:202:2215], cookie=12810690614262569501) 2025-05-07T08:50:31.503817Z node 4 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-05-07T08:50:31.503980Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-05-07T08:50:31.524359Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-05-07T08:50:31.524494Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-05-07T08:50:31.539510Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-05-07T08:50:31.540014Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[4:132:2158], cookie=960689534810555548, session=0, seqNo=0) 2025-05-07T08:50:31.540190Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-05-07T08:50:31.563286Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[4:132:2158], cookie=960689534810555548, session=1) 2025-05-07T08:50:31.563702Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[4:132:2158], cookie=11729616621578251542, session=0, seqNo=0) 2025-05-07T08:50:31.563869Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2025-05-07T08:50:31.577364Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[4:132:2158], cookie=11729616621578251542, session=2) 2025-05-07T08:50:31.577716Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[4:132:2158], cookie=111, session=1, semaphore="Lock1" count=18446744073709551615) 2025-05-07T08:50:31.577908Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:126: [72057594037927937] Created new ephemeral semaphore 1 "Lock1" 2025-05-07T08:50:31.578035Z node 4 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #1 session 1 2025-05-07T08:50:31.590891Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[4:132:2158], cookie=111) 2025-05-07T08:50:31.591323Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[4:132:2158], cookie=222, session=2, semaphore="Lock1" count=1) 2025-05-07T08:50:31.591756Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:37: [72057594037927937] TTxSemaphoreRelease::Execute (sender=[4:132:2158], cookie=333, name="Lock1") 2025-05-07T08:50:31.591873Z node 4 :KESUS_TABLET DEBUG: tablet_db.cpp:124: [72057594037927937] Deleting session 2 / semaphore 1 "Lock1" waiter link 2025-05-07T08:50:31.606111Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[4:132:2158], cookie=222) 2025-05-07T08:50:31.606212Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:93: [72057594037927937] TTxSemaphoreRelease::Complete (sender=[4:132:2158], cookie=333) 2025-05-07T08:50:32.094299Z node 5 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-05-07T08:50:32.094437Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-05-07T08:50:32.116415Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-05-07T08:50:32.116581Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-05-07T08:50:32.144753Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-05-07T08:50:32.153060Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[5:132:2158], cookie=5229463296691557657, path="/Root", config={ MaxUnitsPerSecond: 100 }) 2025-05-07T08:50:32.153368Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 1 "Root" 2025-05-07T08:50:32.168065Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[5:132:2158], cookie=5229463296691557657) 2025-05-07T08:50:32.168831Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[5:141:2165], cookie=4409274268265827999, path="/Root/Res", config={ }) 2025-05-07T08:50:32.169141Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 2 "Root/Res" 2025-05-07T08:50:32.181636Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[5:141:2165], cookie=4409274268265827999) 2025-05-07T08:50:32.183723Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:145: [72057594037927937] Send TEvSubscribeOnResourcesResult to [5:146:2170]. Cookie: 8811479191505035595. Data: { Results { ResourceId: 2 Error { Status: SUCCESS } EffectiveProps { ResourceId: 2 ResourcePath: "Root/Res" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 100 MaxBurstSizeCoefficient: 1 Weight: 1 } AccountingConfig { ReportPeriodMs: 5000 AccountPeriodMs: 1000 CollectPeriodSec: 30 ProvisionedCoefficient: 60 OvershootCoefficient: 1.1 Provisioned { BillingPeriodSec: 60 } OnDemand { BillingPeriodSec: 60 } Overshoot { BillingPeriodSec: 60 } } } } ProtocolVersion: 1 } 2025-05-07T08:50:32.183814Z node 5 :KESUS_TABLET DEBUG: quoter_runtime.cpp:150: [72057594037927937] Subscribe on quoter resources (sender=[5:146:2170], cookie=8811479191505035595) 2025-05-07T08:50:32.184398Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:193: [72057594037927937] Send TEvUpdateConsumptionStateAck to [5:146:2170]. Cookie: 10635266690294329393. Data: { } 2025-05-07T08:50:32.184450Z node 5 :KESUS_TABLET DEBUG: quoter_runtime.cpp:198: [72057594037927937] Update quoter resources consumption state (sender=[5:146:2170], cookie=10635266690294329393) 2025-05-07T08:50:32.227681Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72057594037927937] Send TEvResourcesAllocated to [5:146:2170]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 2 Amount: 10 StateNotification { Status: SUCCESS } } } 2025-05-07T08:50:32.270590Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72057594037927937] Send TEvResourcesAllocated to [5:146:2170]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 2 Amount: 10 StateNotification { Status: SUCCESS } } } 2025-05-07T08:50:32.302014Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72057594037927937] Send TEvResourcesAllocated to [5:146:2170]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 2 Amount: 10 StateNotification { Status: SUCCESS } } } 2025-05-07T08:50:32.338492Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72057594037927937] Send TEvResourcesAllocated to [5:146:2170]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 2 Amount: 10 StateNotification { Status: SUCCESS } } } 2025-05-07T08:50:32.381271Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72057594037927937] Send TEvResourcesAllocated to [5:146:2170]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 2 Amount: 10 StateNotification { Status: SUCCESS } } } >> TKesusTest::TestAttachFastPathBlocked [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_background_cleaning/unittest >> TSchemeshardBackgroundCleaningTest::TempInTemp [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:224:2060] recipient: [1:218:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:224:2060] recipient: [1:218:2142] Leader for TabletID 72057594046678944 is [1:234:2152] sender: [1:235:2060] recipient: [1:218:2142] 2025-05-07T08:49:11.774901Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:49:11.775005Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:49:11.775061Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:49:11.784908Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:49:11.786953Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:49:11.787021Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:49:11.791408Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:49:11.791556Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:49:11.793136Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:49:11.803459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:49:12.189630Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:49:12.189714Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:12.260218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:49:12.274905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:49:12.288851Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:49:12.347596Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:49:12.370125Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:49:12.390305Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:49:12.434204Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:49:12.527782Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:49:12.666211Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:49:12.666323Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:49:12.683168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:49:12.683280Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:49:12.683361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:49:12.683588Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:49:12.710871Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:234:2152] sender: [1:348:2060] recipient: [1:17:2064] 2025-05-07T08:49:12.906136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:49:12.906427Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:12.906689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:49:12.927327Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:49:12.927478Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:12.939226Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:49:12.948154Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:49:12.948521Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:12.948597Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:49:12.948641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:49:12.948750Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:49:12.960922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:12.961028Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:49:12.961074Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:49:12.968732Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:12.968817Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:49:12.982076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:49:13.016723Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:49:13.038209Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:49:13.044085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:49:13.058780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:49:13.066825Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:49:13.067042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 243 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:49:13.067100Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:49:13.077498Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:49:13.077631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:49:13.077826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:49:13.077915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:49:13.085660Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:49:13.085727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:49:13.085930Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:49:13.086002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 46678944 TestModificationResult got TxId: 106, wait until txId: 106 TestWaitNotification wait txId: 106 2025-05-07T08:50:31.634813Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 106: send EvNotifyTxCompletion 2025-05-07T08:50:31.634869Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 106 2025-05-07T08:50:31.639153Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [7:678:2504], Recipient [7:234:2152]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:50:31.639262Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:50:31.639310Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046678944 2025-05-07T08:50:31.639583Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124996, Sender [7:575:2401], Recipient [7:234:2152]: NKikimrScheme.TEvNotifyTxCompletion TxId: 106 2025-05-07T08:50:31.639623Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4853: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-05-07T08:50:31.639717Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 106, at schemeshard: 72057594046678944 2025-05-07T08:50:31.639854Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-05-07T08:50:31.639902Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [7:676:2502] 2025-05-07T08:50:31.640135Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877764, Sender [7:678:2504], Recipient [7:234:2152]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:50:31.640179Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4938: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:50:31.640220Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5761: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 106 TestModificationResults wait txId: 107 2025-05-07T08:50:31.640722Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122432, Sender [8:551:2102], Recipient [7:234:2152] 2025-05-07T08:50:31.640774Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4851: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-05-07T08:50:31.643530Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/test/tmp/a/b" OperationType: ESchemeOpMkDir MkDir { Name: "tmp2" } TempDirOwnerActorId { RawX1: 551 RawX2: 34359740470 } AllowCreateInTempDir: false } TxId: 107 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:50:31.643979Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /MyRoot/test/tmp/a/b/tmp2, operationId: 107:0, at schemeshard: 72057594046678944 2025-05-07T08:50:31.644146Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 107:1, propose status:StatusPreconditionFailed, reason: Check failed: path: '/MyRoot/test/tmp/a/b', error: path is temporary (id: [OwnerId: 72057594046678944, LocalPathId: 5], type: EPathTypeDir, state: EPathStateNoChanges), at schemeshard: 72057594046678944 2025-05-07T08:50:31.644364Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-05-07T08:50:31.646915Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 107, response: Status: StatusPreconditionFailed Reason: "Check failed: path: \'/MyRoot/test/tmp/a/b\', error: path is temporary (id: [OwnerId: 72057594046678944, LocalPathId: 5], type: EPathTypeDir, state: EPathStateNoChanges)" TxId: 107 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:50:31.647237Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 107, database: /MyRoot, subject: , status: StatusPreconditionFailed, reason: Check failed: path: '/MyRoot/test/tmp/a/b', error: path is temporary (id: [OwnerId: 72057594046678944, LocalPathId: 5], type: EPathTypeDir, state: EPathStateNoChanges), operation: CREATE DIRECTORY, path: /MyRoot/test/tmp/a/b/tmp2 2025-05-07T08:50:31.647310Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 TestModificationResult got TxId: 107, wait until txId: 107 TestWaitNotification wait txId: 107 2025-05-07T08:50:31.647778Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 107: send EvNotifyTxCompletion 2025-05-07T08:50:31.647830Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 107 2025-05-07T08:50:31.648367Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [7:684:2510], Recipient [7:234:2152]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:50:31.648433Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:50:31.648476Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046678944 2025-05-07T08:50:31.648599Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124996, Sender [7:575:2401], Recipient [7:234:2152]: NKikimrScheme.TEvNotifyTxCompletion TxId: 107 2025-05-07T08:50:31.648633Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4853: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-05-07T08:50:31.648721Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 107, at schemeshard: 72057594046678944 2025-05-07T08:50:31.648829Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 107: got EvNotifyTxCompletionResult 2025-05-07T08:50:31.648872Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 107: satisfy waiter [7:682:2508] 2025-05-07T08:50:31.649090Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877764, Sender [7:684:2510], Recipient [7:234:2152]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:50:31.649189Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4938: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:50:31.649236Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5761: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 107 TestModificationResults wait txId: 108 2025-05-07T08:50:31.649726Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122432, Sender [8:551:2102], Recipient [7:234:2152] 2025-05-07T08:50:31.649783Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4851: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-05-07T08:50:31.652583Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/test/tmp/a/b" OperationType: ESchemeOpMkDir MkDir { Name: "tmp2" } TempDirOwnerActorId { RawX1: 551 RawX2: 34359740470 } AllowCreateInTempDir: true } TxId: 108 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:50:31.652917Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /MyRoot/test/tmp/a/b/tmp2, operationId: 108:0, at schemeshard: 72057594046678944 2025-05-07T08:50:31.652997Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 108:1, propose status:StatusPreconditionFailed, reason: Can't create temporary directory while flag AllowCreateInTempDir is set. Temporary directory can't be created in another temporary directory., at schemeshard: 72057594046678944 2025-05-07T08:50:31.653216Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-05-07T08:50:31.656298Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 108, response: Status: StatusPreconditionFailed Reason: "Can\'t create temporary directory while flag AllowCreateInTempDir is set. Temporary directory can\'t be created in another temporary directory." TxId: 108 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:50:31.656580Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 108, database: /MyRoot, subject: , status: StatusPreconditionFailed, reason: Can't create temporary directory while flag AllowCreateInTempDir is set. Temporary directory can't be created in another temporary directory., operation: CREATE DIRECTORY, path: /MyRoot/test/tmp/a/b/tmp2 2025-05-07T08:50:31.656667Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 TestModificationResult got TxId: 108, wait until txId: 108 TestWaitNotification wait txId: 108 2025-05-07T08:50:31.657075Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 108: send EvNotifyTxCompletion 2025-05-07T08:50:31.657112Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 108 2025-05-07T08:50:31.657422Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [7:690:2516], Recipient [7:234:2152]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:50:31.657477Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:50:31.657517Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046678944 2025-05-07T08:50:31.657631Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124996, Sender [7:575:2401], Recipient [7:234:2152]: NKikimrScheme.TEvNotifyTxCompletion TxId: 108 2025-05-07T08:50:31.657658Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4853: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-05-07T08:50:31.657720Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 108, at schemeshard: 72057594046678944 2025-05-07T08:50:31.657820Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 108: got EvNotifyTxCompletionResult 2025-05-07T08:50:31.657852Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 108: satisfy waiter [7:688:2514] 2025-05-07T08:50:31.658021Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877764, Sender [7:690:2516], Recipient [7:234:2152]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:50:31.658051Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4938: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:50:31.658082Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5761: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 108 >> IndexBuildTest::BaseCase [GOOD] >> IndexBuildTest::CancelBuild >> TKesusTest::TestSemaphoreReleaseReacquire [GOOD] >> TKesusTest::TestSemaphoreSessionFailures >> TBackupTests::ShouldSucceedOnMultiShardTable[Raw] |89.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/fq/streaming_optimize/py3test >> test_sql_streaming.py::test[suites-GroupByHoppingWindowPercentile-default.txt] [FAIL] >> TBackupTests::ShouldSucceedOnSingleShardTable[Zstd] >> TBackupTests::ShouldSucceedOnLargeData_MinWriteBatch >> TBackupTests::ShouldSucceedOnSingleShardTable[Raw] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/tablet/ut/unittest >> TKesusTest::TestAttachFastPathBlocked [GOOD] Test command err: 2025-05-07T08:50:30.221917Z node 1 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-05-07T08:50:30.222076Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-05-07T08:50:30.247722Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-05-07T08:50:30.247848Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-05-07T08:50:30.262618Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-05-07T08:50:30.263323Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[1:130:2156], cookie=12794494096264779755, session=0, seqNo=0) 2025-05-07T08:50:30.263510Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-05-07T08:50:30.293193Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[1:130:2156], cookie=12794494096264779755, session=1) 2025-05-07T08:50:30.293696Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[1:130:2156], cookie=15780805724178324418, session=0, seqNo=0) 2025-05-07T08:50:30.293891Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2025-05-07T08:50:30.310906Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[1:130:2156], cookie=15780805724178324418, session=2) 2025-05-07T08:50:30.943273Z node 2 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-05-07T08:50:30.943385Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-05-07T08:50:30.963200Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-05-07T08:50:30.963510Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-05-07T08:50:30.992020Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-05-07T08:50:30.992548Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[2:132:2158], cookie=1885130557347043205, session=1, seqNo=0) 2025-05-07T08:50:31.014682Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[2:132:2158], cookie=1885130557347043205, session=1) 2025-05-07T08:50:31.801642Z node 3 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-05-07T08:50:31.801756Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-05-07T08:50:31.823169Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-05-07T08:50:31.823704Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-05-07T08:50:31.852328Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-05-07T08:50:31.853168Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[3:132:2158], cookie=13378546370198781968, session=0, seqNo=0) 2025-05-07T08:50:31.853312Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-05-07T08:50:31.866888Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[3:132:2158], cookie=13378546370198781968, session=1) 2025-05-07T08:50:32.536054Z node 4 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-05-07T08:50:32.536180Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-05-07T08:50:32.582979Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-05-07T08:50:32.583119Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-05-07T08:50:32.614663Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-05-07T08:50:32.615014Z node 4 :KESUS_TABLET DEBUG: tx_config_set.cpp:28: [72057594037927937] TTxConfigSet::Execute (sender=[4:132:2158], cookie=17995863630739252908, path="") 2025-05-07T08:50:32.647377Z node 4 :KESUS_TABLET DEBUG: tx_config_set.cpp:94: [72057594037927937] TTxConfigSet::Complete (sender=[4:132:2158], cookie=17995863630739252908, status=SUCCESS) 2025-05-07T08:50:32.648387Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[4:141:2165], cookie=5145645372674779110, session=0, seqNo=0) 2025-05-07T08:50:32.648518Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-05-07T08:50:32.670589Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[4:141:2165], cookie=5145645372674779110, session=1) 2025-05-07T08:50:32.671471Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[4:142:2166], cookie=111, session=0, seqNo=0) 2025-05-07T08:50:32.671596Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2025-05-07T08:50:32.671751Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:262: [72057594037927937] Fast-path attach session=1 to sender=[4:142:2166], cookie=222, seqNo=0 2025-05-07T08:50:32.690731Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[4:142:2166], cookie=111, session=2) 2025-05-07T08:50:33.605812Z node 5 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-05-07T08:50:33.605940Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-05-07T08:50:33.652219Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-05-07T08:50:33.652396Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-05-07T08:50:33.680204Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-05-07T08:50:33.680570Z node 5 :KESUS_TABLET DEBUG: tx_config_set.cpp:28: [72057594037927937] TTxConfigSet::Execute (sender=[5:132:2158], cookie=12147192043747554574, path="") 2025-05-07T08:50:33.695845Z node 5 :KESUS_TABLET DEBUG: tx_config_set.cpp:94: [72057594037927937] TTxConfigSet::Complete (sender=[5:132:2158], cookie=12147192043747554574, status=SUCCESS) 2025-05-07T08:50:33.697068Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[5:141:2165], cookie=13951242659660018090, session=0, seqNo=0) 2025-05-07T08:50:33.697214Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-05-07T08:50:33.712126Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[5:141:2165], cookie=13951242659660018090, session=1) 2025-05-07T08:50:33.712991Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[5:141:2165], cookie=123, session=1, semaphore="Lock1" count=18446744073709551615) 2025-05-07T08:50:33.713162Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:126: [72057594037927937] Created new ephemeral semaphore 1 "Lock1" 2025-05-07T08:50:33.713258Z node 5 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #1 session 1 2025-05-07T08:50:33.713640Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[5:142:2166], cookie=111, session=0, seqNo=0) 2025-05-07T08:50:33.713731Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2025-05-07T08:50:33.713858Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[5:142:2166], cookie=222, session=1, seqNo=0) 2025-05-07T08:50:33.727890Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[5:141:2165], cookie=123) 2025-05-07T08:50:33.728006Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[5:142:2166], cookie=111, session=2) 2025-05-07T08:50:33.728062Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[5:142:2166], cookie=222, session=1) >> TReplicationTests::DropReplicationWithUnknownSecret [GOOD] >> TBackupTests::BackupUuidColumn[Zstd] >> TKesusTest::TestSemaphoreSessionFailures [GOOD] >> TTopicReaderTests::TestRun_ReadTwoMessages_With_Limit_1 [GOOD] >> TTopicReaderTests::TestRun_Read_Less_Messages_Than_Sent >> TTicketParserTest::NebiusAuthenticationUnavailable >> TTicketParserTest::LoginRefreshGroupsWithError >> TSubDomainTest::Boot [GOOD] >> TSubDomainTest::CheckAccessCopyTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_replication/unittest >> TReplicationTests::DropReplicationWithUnknownSecret [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:50:21.869264Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:50:21.869360Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:50:21.869406Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:50:21.869444Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:50:21.869495Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:50:21.869525Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:50:21.869581Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:50:21.869655Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:50:21.870484Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:50:21.870881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:50:21.966949Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:50:21.967017Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:21.990329Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:50:21.990470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:50:21.990684Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:50:22.008107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:50:22.008918Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:50:22.009754Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:22.010183Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:50:22.018733Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:22.020189Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:22.020252Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:22.020303Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:50:22.020361Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:22.020397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:50:22.020532Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:50:22.027012Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:50:22.182555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:50:22.182878Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:22.183172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:50:22.183483Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:50:22.183572Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:22.186850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:22.187041Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:50:22.187299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:22.187365Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:50:22.187411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:50:22.187452Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:50:22.190211Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:22.190293Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:50:22.190341Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:50:22.193229Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:22.193297Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:22.193359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:22.193420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:50:22.197609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:50:22.200355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:50:22.200617Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:50:22.201768Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:22.201946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:50:22.202028Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:22.202385Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:50:22.202456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:22.202671Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:50:22.202811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:50:22.205739Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:22.205798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:22.206093Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:22.206149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... ToDone TxId: 102 ready parts: 1/1 2025-05-07T08:50:34.406377Z node 9 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T08:50:34.406433Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:50:34.406521Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: false 2025-05-07T08:50:34.406607Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:50:34.406689Z node 9 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-05-07T08:50:34.406772Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 102:0 2025-05-07T08:50:34.406994Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T08:50:34.407072Z node 9 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 102, publications: 2, subscribers: 0 2025-05-07T08:50:34.407137Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-05-07T08:50:34.407194Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 2], 18446744073709551615 2025-05-07T08:50:34.409261Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 274137603, Sender [9:206:2208], Recipient [9:125:2151]: NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 7 } 2025-05-07T08:50:34.409327Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4924: StateWork, processing event NSchemeBoard::NSchemeshardEvents::TEvUpdateAck 2025-05-07T08:50:34.409432Z node 9 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:50:34.409540Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:50:34.409592Z node 9 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-05-07T08:50:34.409673Z node 9 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-05-07T08:50:34.409754Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:50:34.409891Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-05-07T08:50:34.419279Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 274137603, Sender [9:206:2208], Recipient [9:125:2151]: NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 18446744073709551615 } 2025-05-07T08:50:34.419361Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4924: StateWork, processing event NSchemeBoard::NSchemeshardEvents::TEvUpdateAck 2025-05-07T08:50:34.419469Z node 9 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:50:34.419585Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:50:34.419628Z node 9 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-05-07T08:50:34.419668Z node 9 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-05-07T08:50:34.419715Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-05-07T08:50:34.419876Z node 9 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-05-07T08:50:34.419956Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-05-07T08:50:34.420324Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435084, Sender [9:125:2151], Recipient [9:125:2151]: NKikimr::NSchemeShard::TEvPrivate::TEvCleanDroppedPaths 2025-05-07T08:50:34.420386Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5011: StateWork, processing event TEvPrivate::TEvCleanDroppedPaths 2025-05-07T08:50:34.420477Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T08:50:34.420559Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T08:50:34.420659Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:50:34.429948Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-05-07T08:50:34.432551Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-05-07T08:50:34.432632Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-05-07T08:50:34.433392Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-05-07T08:50:34.433440Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-05-07T08:50:34.433564Z node 9 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-05-07T08:50:34.433882Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-05-07T08:50:34.433952Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-05-07T08:50:34.434520Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [9:444:2399], Recipient [9:125:2151]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:50:34.434604Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:50:34.434664Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046678944 2025-05-07T08:50:34.434911Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124996, Sender [9:362:2341], Recipient [9:125:2151]: NKikimrScheme.TEvNotifyTxCompletion TxId: 102 2025-05-07T08:50:34.434965Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4853: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-05-07T08:50:34.435089Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-05-07T08:50:34.435239Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T08:50:34.435293Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [9:442:2397] 2025-05-07T08:50:34.435543Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877764, Sender [9:444:2399], Recipient [9:125:2151]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:50:34.435590Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4938: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:50:34.435644Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5761: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 102 2025-05-07T08:50:34.436196Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122945, Sender [9:445:2400], Recipient [9:125:2151]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/Replication" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false } 2025-05-07T08:50:34.436290Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4852: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-05-07T08:50:34.436455Z node 9 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Replication" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:50:34.436722Z node 9 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Replication" took 282us result status StatusPathDoesNotExist 2025-05-07T08:50:34.436928Z node 9 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Replication\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/Replication" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TBackupTests::ShouldSucceedOnMultiShardTable[Zstd] |89.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/fq/streaming_optimize/py3test >> test_sql_streaming.py::test[suites-WriteTwoTopics-default.txt] [FAIL] >> TBackupTests::BackupUuidColumn[Raw] >> TBackupTests::ShouldSucceedOnLargeData[Zstd] >> TTicketParserTest::TicketFromCertificateWithValidationGood ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/tablet/ut/unittest >> TKesusTest::TestSemaphoreSessionFailures [GOOD] Test command err: 2025-05-07T08:50:30.445250Z node 1 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-05-07T08:50:30.445388Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-05-07T08:50:30.467037Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-05-07T08:50:30.467188Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-05-07T08:50:30.498350Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-05-07T08:50:30.498863Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[1:132:2158], cookie=18410018285877289256, session=0, seqNo=0) 2025-05-07T08:50:30.499072Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-05-07T08:50:30.511277Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[1:132:2158], cookie=18410018285877289256, session=1) 2025-05-07T08:50:30.511577Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[1:132:2158], cookie=7254419630490656878, session=0, seqNo=0) 2025-05-07T08:50:30.511693Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2025-05-07T08:50:30.529735Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[1:132:2158], cookie=7254419630490656878, session=2) 2025-05-07T08:50:30.530125Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:37: [72057594037927937] TTxSemaphoreRelease::Execute (sender=[1:132:2158], cookie=111, name="Lock1") 2025-05-07T08:50:30.546539Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:93: [72057594037927937] TTxSemaphoreRelease::Complete (sender=[1:132:2158], cookie=111) 2025-05-07T08:50:30.546963Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[1:132:2158], cookie=222, session=1, semaphore="Lock1" count=18446744073709551615) 2025-05-07T08:50:30.547164Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:126: [72057594037927937] Created new ephemeral semaphore 1 "Lock1" 2025-05-07T08:50:30.547267Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #1 session 1 2025-05-07T08:50:30.563087Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[1:132:2158], cookie=222) 2025-05-07T08:50:30.563420Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:37: [72057594037927937] TTxSemaphoreRelease::Execute (sender=[1:132:2158], cookie=333, name="Lock1") 2025-05-07T08:50:30.576680Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:93: [72057594037927937] TTxSemaphoreRelease::Complete (sender=[1:132:2158], cookie=333) 2025-05-07T08:50:31.371492Z node 2 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-05-07T08:50:31.371637Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-05-07T08:50:31.394565Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-05-07T08:50:31.394857Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-05-07T08:50:31.430811Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-05-07T08:50:31.431420Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[2:132:2158], cookie=10542926388524291997, session=0, seqNo=0) 2025-05-07T08:50:31.431593Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-05-07T08:50:31.450766Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[2:132:2158], cookie=10542926388524291997, session=1) 2025-05-07T08:50:31.451228Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[2:132:2158], cookie=6491337714151994124, session=0, seqNo=0) 2025-05-07T08:50:31.451383Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2025-05-07T08:50:31.464158Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[2:132:2158], cookie=6491337714151994124, session=2) 2025-05-07T08:50:31.464832Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:32: [72057594037927937] TTxSemaphoreCreate::Execute (sender=[2:143:2167], cookie=4361531154869656469, name="Sem1", limit=1) 2025-05-07T08:50:31.465023Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:104: [72057594037927937] Created new semaphore 1 "Sem1" 2025-05-07T08:50:31.478534Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:112: [72057594037927937] TTxSemaphoreCreate::Complete (sender=[2:143:2167], cookie=4361531154869656469) 2025-05-07T08:50:31.478995Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[2:132:2158], cookie=111, session=1, semaphore="Sem1" count=1) 2025-05-07T08:50:31.479186Z node 2 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Sem1" queue: next order #1 session 1 2025-05-07T08:50:31.479428Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[2:132:2158], cookie=222, session=2, semaphore="Sem1" count=1) 2025-05-07T08:50:31.494687Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[2:132:2158], cookie=111) 2025-05-07T08:50:31.494802Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[2:132:2158], cookie=222) 2025-05-07T08:50:31.495509Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[2:151:2175], cookie=2115163890826598186, name="Sem1") 2025-05-07T08:50:31.495635Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[2:151:2175], cookie=2115163890826598186) 2025-05-07T08:50:31.496154Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[2:154:2178], cookie=6710770043674797106, name="Sem1") 2025-05-07T08:50:31.496233Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[2:154:2178], cookie=6710770043674797106) 2025-05-07T08:50:31.496481Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:37: [72057594037927937] TTxSemaphoreRelease::Execute (sender=[2:132:2158], cookie=333, name="Sem1") 2025-05-07T08:50:31.496604Z node 2 :KESUS_TABLET DEBUG: tablet_db.cpp:124: [72057594037927937] Deleting session 2 / semaphore 1 "Sem1" waiter link 2025-05-07T08:50:31.509094Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:93: [72057594037927937] TTxSemaphoreRelease::Complete (sender=[2:132:2158], cookie=333) 2025-05-07T08:50:31.509811Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[2:159:2183], cookie=17538603610563314196, name="Sem1") 2025-05-07T08:50:31.509925Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[2:159:2183], cookie=17538603610563314196) 2025-05-07T08:50:31.510452Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[2:162:2186], cookie=5106999165093783111, name="Sem1") 2025-05-07T08:50:31.510526Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[2:162:2186], cookie=5106999165093783111) 2025-05-07T08:50:31.510876Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:37: [72057594037927937] TTxSemaphoreRelease::Execute (sender=[2:132:2158], cookie=444, name="Sem1") 2025-05-07T08:50:31.510989Z node 2 :KESUS_TABLET DEBUG: tablet_db.cpp:98: [72057594037927937] Deleting session 1 / semaphore 1 "Sem1" owner link 2025-05-07T08:50:31.524682Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:93: [72057594037927937] TTxSemaphoreRelease::Complete (sender=[2:132:2158], cookie=444) 2025-05-07T08:50:31.525428Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[2:167:2191], cookie=3816708320374119503, name="Sem1") 2025-05-07T08:50:31.525520Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[2:167:2191], cookie=3816708320374119503) 2025-05-07T08:50:31.526164Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[2:170:2194], cookie=2027329036885744271, name="Sem1") 2025-05-07T08:50:31.526244Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[2:170:2194], cookie=2027329036885744271) 2025-05-07T08:50:32.159235Z node 3 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-05-07T08:50:32.159350Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-05-07T08:50:32.186462Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-05-07T08:50:32.187259Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-05-07T08:50:32.220003Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-05-07T08:50:32.220355Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:32: [72057594037927937] TTxSemaphoreCreate::Execute (sender=[3:132:2158], cookie=17640513991266322162, name="Sem1", limit=1) 2025-05-07T08:50:32.220545Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:104: [72057594037927937] Created new semaphore 1 "Sem1" 2025-05-07T08:50:32.233923Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:112: [72057594037927937] TTxSemaphoreCreate::Complete (sender=[3:132:2158], cookie=17640513991266322162) 2025-05-07T08:50:32.234579Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:32: [72057594037927937] TTxSemaphoreCreate::Execute (sender=[3:141:2165], cookie=11825727680743540791, name="Sem2", limit=1) 2025-05-07T08:50:32.234760Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:104: [72057594037927937] Created new semaphore 2 "Sem2" 2025-05-07T08:50:32.253863Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:112: [72057594037927937] TTxSemaphoreCreate::Complete (sender=[3:141:2165], cookie=11825727680743540791) 2025-05-07T08:50:32.254516Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[3:146:2170], cookie=2629653621157435900, name="Sem1") 2025-05-07T08:50:32.254616Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[3:146:2170], cookie=2629653621157435900) 2025-05-07T08:50:32.255080Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[3:149:2173], cookie=4012477929324329253, name="Sem2") 2025-05-07T08:50:32.255153Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[3:149:2173], cookie=4012477929324329253) 2025-05-07T08:50:32.277097Z node 3 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-05-07T08:50:32.277232Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execu ... TxSemaphoreCreate::Complete (sender=[4:244:2267], cookie=15803471566054858479) 2025-05-07T08:50:33.902903Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[4:132:2158], cookie=111, session=1, semaphore="Sem1" count=1) 2025-05-07T08:50:33.903108Z node 4 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 11 "Sem1" queue: next order #1 session 1 2025-05-07T08:50:33.916643Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[4:132:2158], cookie=111) 2025-05-07T08:50:33.917396Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[4:132:2158], cookie=222, session=2, semaphore="Sem1" count=1) 2025-05-07T08:50:33.947237Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[4:132:2158], cookie=222) 2025-05-07T08:50:33.947972Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:37: [72057594037927937] TTxSemaphoreRelease::Execute (sender=[4:132:2158], cookie=333, name="Sem1") 2025-05-07T08:50:33.948140Z node 4 :KESUS_TABLET DEBUG: tablet_db.cpp:124: [72057594037927937] Deleting session 2 / semaphore 11 "Sem1" waiter link 2025-05-07T08:50:33.962905Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:93: [72057594037927937] TTxSemaphoreRelease::Complete (sender=[4:132:2158], cookie=333) 2025-05-07T08:50:33.963679Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[4:132:2158], cookie=444, session=2, semaphore="Sem1" count=1) 2025-05-07T08:50:33.980855Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[4:132:2158], cookie=444) 2025-05-07T08:50:33.981623Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:37: [72057594037927937] TTxSemaphoreRelease::Execute (sender=[4:132:2158], cookie=555, name="Sem1") 2025-05-07T08:50:33.981766Z node 4 :KESUS_TABLET DEBUG: tablet_db.cpp:98: [72057594037927937] Deleting session 1 / semaphore 11 "Sem1" owner link 2025-05-07T08:50:33.981847Z node 4 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 11 "Sem1" queue: next order #3 session 2 2025-05-07T08:50:34.005524Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:93: [72057594037927937] TTxSemaphoreRelease::Complete (sender=[4:132:2158], cookie=555) 2025-05-07T08:50:34.794305Z node 5 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-05-07T08:50:34.794462Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-05-07T08:50:34.832738Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-05-07T08:50:34.832959Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-05-07T08:50:34.870861Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-05-07T08:50:34.871556Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[5:132:2158], cookie=12709294911587330891, session=0, seqNo=0) 2025-05-07T08:50:34.871784Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-05-07T08:50:34.891191Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[5:132:2158], cookie=12709294911587330891, session=1) 2025-05-07T08:50:34.891630Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:32: [72057594037927937] TTxSemaphoreCreate::Execute (sender=[5:132:2158], cookie=112, name="Sem1", limit=5) 2025-05-07T08:50:34.891897Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:104: [72057594037927937] Created new semaphore 1 "Sem1" 2025-05-07T08:50:34.908426Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:112: [72057594037927937] TTxSemaphoreCreate::Complete (sender=[5:132:2158], cookie=112) 2025-05-07T08:50:34.908946Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_update.cpp:28: [72057594037927937] TTxSemaphoreUpdate::Execute (sender=[5:132:2158], cookie=113, name="Sem1") 2025-05-07T08:50:34.927917Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_update.cpp:84: [72057594037927937] TTxSemaphoreUpdate::Complete (sender=[5:132:2158], cookie=113) 2025-05-07T08:50:34.928383Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:28: [72057594037927937] TTxSemaphoreDelete::Execute (sender=[5:132:2158], cookie=114, name="Sem1", force=0) 2025-05-07T08:50:34.928513Z node 5 :KESUS_TABLET DEBUG: tablet_db.cpp:58: [72057594037927937] Deleting semaphore 1 "Sem1" 2025-05-07T08:50:34.947094Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:95: [72057594037927937] TTxSemaphoreDelete::Complete (sender=[5:132:2158], cookie=114) 2025-05-07T08:50:34.947527Z node 5 :KESUS_TABLET DEBUG: tx_session_detach.cpp:100: [72057594037927937] Fast-path detach session=1 from sender=[5:132:2158], cookie=5201267822175480192 2025-05-07T08:50:34.948024Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:32: [72057594037927937] TTxSemaphoreCreate::Execute (sender=[5:132:2158], cookie=115, name="Sem1", limit=5) 2025-05-07T08:50:34.974999Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:112: [72057594037927937] TTxSemaphoreCreate::Complete (sender=[5:132:2158], cookie=115) 2025-05-07T08:50:34.975447Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_update.cpp:28: [72057594037927937] TTxSemaphoreUpdate::Execute (sender=[5:132:2158], cookie=116, name="Sem1") 2025-05-07T08:50:34.995099Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_update.cpp:84: [72057594037927937] TTxSemaphoreUpdate::Complete (sender=[5:132:2158], cookie=116) 2025-05-07T08:50:34.995618Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:28: [72057594037927937] TTxSemaphoreDelete::Execute (sender=[5:132:2158], cookie=117, name="Sem1", force=0) 2025-05-07T08:50:35.008498Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:95: [72057594037927937] TTxSemaphoreDelete::Complete (sender=[5:132:2158], cookie=117) 2025-05-07T08:50:35.008938Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[5:132:2158], cookie=118, session=1, semaphore="Sem1" count=1) 2025-05-07T08:50:35.024500Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[5:132:2158], cookie=118) 2025-05-07T08:50:35.025014Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:37: [72057594037927937] TTxSemaphoreRelease::Execute (sender=[5:132:2158], cookie=119, name="Sem1") 2025-05-07T08:50:35.042258Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:93: [72057594037927937] TTxSemaphoreRelease::Complete (sender=[5:132:2158], cookie=119) 2025-05-07T08:50:35.042690Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[5:132:2158], cookie=120, name="Sem1") 2025-05-07T08:50:35.042815Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[5:132:2158], cookie=120) 2025-05-07T08:50:35.043101Z node 5 :KESUS_TABLET DEBUG: tx_session_destroy.cpp:37: [72057594037927937] TTxSessionDestroy::Execute (sender=[5:132:2158], cookie=7500493445224589255, session=1) 2025-05-07T08:50:35.043216Z node 5 :KESUS_TABLET DEBUG: tablet_db.cpp:32: [72057594037927937] Deleting session 1 2025-05-07T08:50:35.062889Z node 5 :KESUS_TABLET DEBUG: tx_session_destroy.cpp:75: [72057594037927937] TTxSessionDestroy::Complete (sender=[5:132:2158], cookie=7500493445224589255) 2025-05-07T08:50:35.063336Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:32: [72057594037927937] TTxSemaphoreCreate::Execute (sender=[5:132:2158], cookie=121, name="Sem1", limit=5) 2025-05-07T08:50:35.079039Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:112: [72057594037927937] TTxSemaphoreCreate::Complete (sender=[5:132:2158], cookie=121) 2025-05-07T08:50:35.079520Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_update.cpp:28: [72057594037927937] TTxSemaphoreUpdate::Execute (sender=[5:132:2158], cookie=122, name="Sem1") 2025-05-07T08:50:35.099043Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_update.cpp:84: [72057594037927937] TTxSemaphoreUpdate::Complete (sender=[5:132:2158], cookie=122) 2025-05-07T08:50:35.099498Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:28: [72057594037927937] TTxSemaphoreDelete::Execute (sender=[5:132:2158], cookie=123, name="Sem1", force=0) 2025-05-07T08:50:35.119082Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:95: [72057594037927937] TTxSemaphoreDelete::Complete (sender=[5:132:2158], cookie=123) 2025-05-07T08:50:35.119541Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[5:132:2158], cookie=124, session=1, semaphore="Sem1" count=1) 2025-05-07T08:50:35.138929Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[5:132:2158], cookie=124) 2025-05-07T08:50:35.139353Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:37: [72057594037927937] TTxSemaphoreRelease::Execute (sender=[5:132:2158], cookie=125, name="Sem1") 2025-05-07T08:50:35.169528Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:93: [72057594037927937] TTxSemaphoreRelease::Complete (sender=[5:132:2158], cookie=125) 2025-05-07T08:50:35.169935Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[5:132:2158], cookie=126, name="Sem1") 2025-05-07T08:50:35.170070Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[5:132:2158], cookie=126) 2025-05-07T08:50:35.170887Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:32: [72057594037927937] TTxSemaphoreCreate::Execute (sender=[5:132:2158], cookie=127, name="Sem1", limit=5) 2025-05-07T08:50:35.170981Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:112: [72057594037927937] TTxSemaphoreCreate::Complete (sender=[5:132:2158], cookie=127) 2025-05-07T08:50:35.171256Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_update.cpp:28: [72057594037927937] TTxSemaphoreUpdate::Execute (sender=[5:132:2158], cookie=128, name="Sem1") 2025-05-07T08:50:35.171379Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_update.cpp:84: [72057594037927937] TTxSemaphoreUpdate::Complete (sender=[5:132:2158], cookie=128) 2025-05-07T08:50:35.171694Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:28: [72057594037927937] TTxSemaphoreDelete::Execute (sender=[5:132:2158], cookie=129, name="Sem1", force=0) 2025-05-07T08:50:35.171764Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:95: [72057594037927937] TTxSemaphoreDelete::Complete (sender=[5:132:2158], cookie=129) 2025-05-07T08:50:35.172249Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[5:132:2158], cookie=130, session=1, semaphore="Sem1" count=1) 2025-05-07T08:50:35.172353Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[5:132:2158], cookie=130) 2025-05-07T08:50:35.172659Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:37: [72057594037927937] TTxSemaphoreRelease::Execute (sender=[5:132:2158], cookie=131, name="Sem1") 2025-05-07T08:50:35.172739Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:93: [72057594037927937] TTxSemaphoreRelease::Complete (sender=[5:132:2158], cookie=131) 2025-05-07T08:50:35.172973Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[5:132:2158], cookie=132, name="Sem1") 2025-05-07T08:50:35.173038Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[5:132:2158], cookie=132) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_backup/unittest >> TBackupTests::ShouldSucceedOnSingleShardTable[Raw] [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:50:33.408023Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:50:33.408122Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:50:33.408165Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:50:33.408202Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:50:33.408266Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:50:33.408307Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:50:33.408383Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:50:33.408483Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:50:33.409263Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:50:33.409618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:50:33.504567Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:50:33.504631Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:33.531598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:50:33.531869Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:50:33.532032Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:50:33.546390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:50:33.546768Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:50:33.547439Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:33.547681Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:50:33.558945Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:33.560416Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:33.560510Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:33.560610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:50:33.560686Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:33.560755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:50:33.561011Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:50:33.568182Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:50:33.704756Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:50:33.706475Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:33.706784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:50:33.707079Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:50:33.707163Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:33.715331Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:33.715525Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:50:33.715775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:33.715847Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:50:33.715901Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:50:33.715935Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:50:33.718459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:33.718523Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:50:33.718573Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:50:33.720670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:33.720731Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:33.720792Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:33.720852Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:50:33.724662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:50:33.730935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:50:33.731190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:50:33.732274Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:33.732425Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:50:33.732469Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:33.732793Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:50:33.732848Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:33.733024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:50:33.733120Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:50:33.735562Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:33.735619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:33.735817Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:33.735865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... schemeshard: 72057594046678944 2025-05-07T08:50:34.125556Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:412: TBackup TPropose, opId: 102:0 HandleReply TEvOperationPlan, stepId: 5000003, at schemeshard: 72057594046678944 2025-05-07T08:50:34.125698Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 102:0 128 -> 129 2025-05-07T08:50:34.125831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T08:50:34.136545Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:783: [Export] [s3] Bootstrap: self# [1:414:2385], attempt# 0 2025-05-07T08:50:34.281594Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:427: [Export] [s3] Handle TEvExportScan::TEvReady: self# [1:414:2385], sender# [1:413:2383] FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 2025-05-07T08:50:34.290030Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:34.290152Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T08:50:34.290583Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:34.290684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-05-07T08:50:34.290833Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:50:34.290894Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:258: TBackup TProposedWaitParts, opId: 102:0 ProgressState, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 102 2025-05-07T08:50:34.292105Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:50:34.292287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:50:34.292357Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-05-07T08:50:34.292410Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-05-07T08:50:34.292464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T08:50:34.292603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true REQUEST: PUT /metadata.json HTTP/1.1 HEADERS: Host: localhost:6182 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 4347E976-E5CE-4685-957E-26CBA4CB7873 amz-sdk-request: attempt=1 content-length: 61 content-md5: 5ZuHSMjV1bVKZhThhMGD5g== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /metadata.json / / 61 2025-05-07T08:50:34.308758Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:387: [Export] [s3] HandleMetadata TEvExternalStorage::TEvPutObjectResponse: self# [1:414:2385], result# PutObjectResult { ETag: e59b8748c8d5d5b54a6614e184c183e6 } 2025-05-07T08:50:34.314561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 REQUEST: PUT /scheme.pb HTTP/1.1 HEADERS: Host: localhost:6182 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 464A8BC8-40DC-43A9-A28C-3834A7957633 amz-sdk-request: attempt=1 content-length: 357 content-md5: csvC5nqNTZsSLy4ymlp0/Q== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /scheme.pb / / 357 2025-05-07T08:50:34.319719Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:294: [Export] [s3] HandleScheme TEvExternalStorage::TEvPutObjectResponse: self# [1:414:2385], result# PutObjectResult { ETag: 72cbc2e67a8d4d9b122f2e329a5a74fd } 2025-05-07T08:50:34.319834Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [1:413:2383] 2025-05-07T08:50:34.319959Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:445: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [1:414:2385], sender# [1:413:2383], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 1 Checksum: } REQUEST: PUT /data_00.csv HTTP/1.1 HEADERS: Host: localhost:6182 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: B9104691-A618-43D7-9199-3590FB8536EA amz-sdk-request: attempt=1 content-length: 11 content-md5: bj4KQf2rit2DOGLxvSlUww== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /data_00.csv / / 11 2025-05-07T08:50:34.327560Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:487: [Export] [s3] HandleData TEvExternalStorage::TEvPutObjectResponse: self# [1:414:2385], result# PutObjectResult { ETag: 6e3e0a41fdab8add833862f1bd2954c3 } 2025-05-07T08:50:34.327645Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:702: [Export] [s3] Finish: self# [1:414:2385], success# 1, error# , multipart# 0, uploadId# (empty maybe) 2025-05-07T08:50:34.327843Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:413:2383], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-05-07T08:50:34.363376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 305 RawX2: 4294969588 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-05-07T08:50:34.363457Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-05-07T08:50:34.363649Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 305 RawX2: 4294969588 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-05-07T08:50:34.363772Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 305 RawX2: 4294969588 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-05-07T08:50:34.363857Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, datashard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:34.363905Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:50:34.363968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-05-07T08:50:34.364016Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 102:0 129 -> 240 2025-05-07T08:50:34.364210Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:34.366758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:50:34.367345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:50:34.367432Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 102:0 ProgressState 2025-05-07T08:50:34.367620Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T08:50:34.367663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:50:34.367705Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T08:50:34.367744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:50:34.367792Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-05-07T08:50:34.367896Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:334:2313] message: TxId: 102 2025-05-07T08:50:34.367953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:50:34.367996Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-05-07T08:50:34.368030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 102:0 2025-05-07T08:50:34.368170Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T08:50:34.370653Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T08:50:34.370734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:397:2369] TestWaitNotification: OK eventTxId 102 |89.1%| [TA] $(B)/ydb/core/tx/schemeshard/ut_replication/test-results/unittest/{meta.json ... results_accumulator.log} >> TBackupTests::ShouldSucceedOnSingleShardTable[Zstd] [GOOD] >> TBackupTests::ShouldSucceedOnMultiShardTable[Raw] [GOOD] |89.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/scheme_board/ut_subscriber/ydb-core-tx-scheme_board-ut_subscriber |89.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/scheme_board/ut_subscriber/ydb-core-tx-scheme_board-ut_subscriber |89.1%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_replication/test-results/unittest/{meta.json ... results_accumulator.log} >> TTicketParserTest::AuthenticationWithUserAccount >> TTopicReaderTests::TestRun_ReadMoreMessagesThanLimit_Without_Wait_NoDelimiter [GOOD] >> TTopicReaderTests::TestRun_ReadMessages_Output_Base64 |89.1%| [LD] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_subscriber/ydb-core-tx-scheme_board-ut_subscriber >> DSProxyStrategyTest::Restore_block42 [GOOD] >> TBackupTests::BackupUuidColumn[Zstd] [GOOD] >> Viewer::TabletMerging [GOOD] >> Viewer::StorageGroupOutputWithoutFilterNoDepends >> TTicketParserTest::AuthorizationRetryError >> test_sql_streaming.py::test[suites-GroupByHopTimeExtractorUnusedColumns-default.txt] [FAIL] |89.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/driver_lib/run/ut/ydb-core-driver_lib-run-ut |89.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/driver_lib/run/ut/ydb-core-driver_lib-run-ut |89.1%| [LD] {RESULT} $(B)/ydb/core/driver_lib/run/ut/ydb-core-driver_lib-run-ut |89.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_strategy/unittest >> DSProxyStrategyTest::Restore_block42 [GOOD] >> TBackupTests::BackupUuidColumn[Raw] [GOOD] >> TBackupTests::ShouldSucceedOnMultiShardTable[Zstd] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_backup/unittest >> TBackupTests::ShouldSucceedOnMultiShardTable[Raw] [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:50:35.342263Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:50:35.342361Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:50:35.342401Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:50:35.342528Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:50:35.342607Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:50:35.342647Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:50:35.342716Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:50:35.342821Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:50:35.343534Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:50:35.343968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:50:35.441608Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:50:35.441705Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:35.463313Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:50:35.463451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:50:35.463674Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:50:35.474574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:50:35.475163Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:50:35.475901Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:35.476238Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:50:35.478485Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:35.479870Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:35.479938Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:35.479992Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:50:35.480038Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:35.480084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:50:35.480294Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:50:35.486905Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:50:35.656060Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:50:35.656376Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:35.656631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:50:35.656872Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:50:35.657037Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:35.659759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:35.659955Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:50:35.660172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:35.660234Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:50:35.660466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:50:35.660596Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:50:35.662960Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:35.663035Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:50:35.663084Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:50:35.665402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:35.665459Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:35.665507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:35.665574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:50:35.669922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:50:35.672508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:50:35.672712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:50:35.673904Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:35.674098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:50:35.674197Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:35.674491Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:50:35.674569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:35.674805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:50:35.674903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:50:35.677277Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:35.677328Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:35.677560Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:35.677610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-05-07T08:50:36.275993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true REQUEST: PUT /metadata.json HTTP/1.1 HEADERS: Host: localhost:22605 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 95F46EA4-697B-4A8D-8F07-37267F463CB8 amz-sdk-request: attempt=1 content-length: 61 content-md5: 5ZuHSMjV1bVKZhThhMGD5g== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /metadata.json / / 61 2025-05-07T08:50:36.278249Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:387: [Export] [s3] HandleMetadata TEvExternalStorage::TEvPutObjectResponse: self# [1:480:2439], result# PutObjectResult { ETag: e59b8748c8d5d5b54a6614e184c183e6 } 2025-05-07T08:50:36.280173Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:487: [Export] [s3] HandleData TEvExternalStorage::TEvPutObjectResponse: self# [1:482:2440], result# PutObjectResult { ETag: 8ec321cb31fe732aef669066d1d41519 } 2025-05-07T08:50:36.280244Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:702: [Export] [s3] Finish: self# [1:482:2440], success# 1, error# , multipart# 0, uploadId# (empty maybe) 2025-05-07T08:50:36.280474Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:481:2438], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } REQUEST: PUT /scheme.pb HTTP/1.1 HEADERS: Host: localhost:22605 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 0D14FB0C-20EE-48D5-A7F4-2C1AC082C32C amz-sdk-request: attempt=1 content-length: 638 content-md5: Myp3UygaBNGp6+7AMgyRnQ== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /scheme.pb / / 638 2025-05-07T08:50:36.291235Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:294: [Export] [s3] HandleScheme TEvExternalStorage::TEvPutObjectResponse: self# [1:480:2439], result# PutObjectResult { ETag: 332a7753281a04d1a9ebeec0320c919d } 2025-05-07T08:50:36.291339Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [1:479:2437] 2025-05-07T08:50:36.291419Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:445: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [1:480:2439], sender# [1:479:2437], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 1 Checksum: } 2025-05-07T08:50:36.297941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 REQUEST: PUT /data_00.csv HTTP/1.1 HEADERS: Host: localhost:22605 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 344EACA7-D01F-4736-82DA-C00D77813C37 amz-sdk-request: attempt=1 content-length: 11 content-md5: bj4KQf2rit2DOGLxvSlUww== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /data_00.csv / / 11 2025-05-07T08:50:36.303193Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:487: [Export] [s3] HandleData TEvExternalStorage::TEvPutObjectResponse: self# [1:480:2439], result# PutObjectResult { ETag: 6e3e0a41fdab8add833862f1bd2954c3 } 2025-05-07T08:50:36.303261Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:702: [Export] [s3] Finish: self# [1:480:2439], success# 1, error# , multipart# 0, uploadId# (empty maybe) 2025-05-07T08:50:36.303442Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:479:2437], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-05-07T08:50:36.336253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 320 RawX2: 4294969600 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-05-07T08:50:36.336332Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-05-07T08:50:36.336490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 320 RawX2: 4294969600 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-05-07T08:50:36.336639Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 320 RawX2: 4294969600 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-05-07T08:50:36.336719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, datashard: 72075186233409546, left await: 1, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:36.336863Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:36.337361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 325 RawX2: 4294969603 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-05-07T08:50:36.337402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409547, partId: 0 2025-05-07T08:50:36.337531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 325 RawX2: 4294969603 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-05-07T08:50:36.337623Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 325 RawX2: 4294969603 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-05-07T08:50:36.337669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:2, datashard: 72075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:36.337722Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:50:36.337808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-05-07T08:50:36.337863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 102:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-05-07T08:50:36.337892Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 102:0 129 -> 240 2025-05-07T08:50:36.338108Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:36.350567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:50:36.351390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:50:36.351878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:50:36.351946Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 102:0 ProgressState 2025-05-07T08:50:36.352257Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T08:50:36.352307Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:50:36.352355Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T08:50:36.352397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:50:36.352441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-05-07T08:50:36.352532Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:374:2341] message: TxId: 102 2025-05-07T08:50:36.352620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:50:36.352679Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-05-07T08:50:36.352731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 102:0 2025-05-07T08:50:36.352925Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T08:50:36.357069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T08:50:36.357143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:457:2417] TestWaitNotification: OK eventTxId 102 >> TTicketParserTest::LoginBad ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_backup/unittest >> TBackupTests::ShouldSucceedOnSingleShardTable[Zstd] [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:50:35.334110Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:50:35.334218Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:50:35.334263Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:50:35.334296Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:50:35.334365Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:50:35.334399Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:50:35.334477Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:50:35.334546Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:50:35.335231Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:50:35.335610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:50:35.443201Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:50:35.443292Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:35.462021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:50:35.462180Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:50:35.462387Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:50:35.475831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:50:35.476506Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:50:35.477353Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:35.477982Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:50:35.481026Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:35.482786Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:35.482867Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:35.482929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:50:35.482981Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:35.483024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:50:35.483254Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:50:35.492217Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:50:35.648245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:50:35.648615Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:35.648889Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:50:35.649145Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:50:35.649232Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:35.655251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:35.655466Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:50:35.655724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:35.655784Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:50:35.655852Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:50:35.655905Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:50:35.661349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:35.661447Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:50:35.661516Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:50:35.674217Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:35.674305Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:35.674362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:35.674446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:50:35.693510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:50:35.700339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:50:35.700616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:50:35.701697Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:35.701888Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:50:35.701942Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:35.702302Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:50:35.702365Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:35.702572Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:50:35.702655Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:50:35.709328Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:35.709400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:35.709657Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:35.709712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... : 72057594046678944 2025-05-07T08:50:36.192221Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:412: TBackup TPropose, opId: 102:0 HandleReply TEvOperationPlan, stepId: 5000003, at schemeshard: 72057594046678944 2025-05-07T08:50:36.192382Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 102:0 128 -> 129 2025-05-07T08:50:36.192527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T08:50:36.221148Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:783: [Export] [s3] Bootstrap: self# [1:418:2389], attempt# 0 2025-05-07T08:50:36.271211Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:427: [Export] [s3] Handle TEvExportScan::TEvReady: self# [1:418:2389], sender# [1:417:2386] FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 2025-05-07T08:50:36.279044Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:36.279119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T08:50:36.279523Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:36.279812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2208], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-05-07T08:50:36.280583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:50:36.280668Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:258: TBackup TProposedWaitParts, opId: 102:0 ProgressState, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 102 2025-05-07T08:50:36.281439Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:50:36.281583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:50:36.281636Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-05-07T08:50:36.281701Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-05-07T08:50:36.281777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T08:50:36.281869Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true REQUEST: PUT /metadata.json HTTP/1.1 HEADERS: Host: localhost:23287 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 7590E4BA-BB2A-46A8-90FC-658D81791447 amz-sdk-request: attempt=1 content-length: 61 content-md5: 5ZuHSMjV1bVKZhThhMGD5g== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /metadata.json / / 61 2025-05-07T08:50:36.284655Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:387: [Export] [s3] HandleMetadata TEvExternalStorage::TEvPutObjectResponse: self# [1:418:2389], result# PutObjectResult { ETag: e59b8748c8d5d5b54a6614e184c183e6 } 2025-05-07T08:50:36.300376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 REQUEST: PUT /scheme.pb HTTP/1.1 HEADERS: Host: localhost:23287 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 63EA7626-2428-4E4E-B278-B24DEF007107 amz-sdk-request: attempt=1 content-length: 357 content-md5: csvC5nqNTZsSLy4ymlp0/Q== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /scheme.pb / / 357 2025-05-07T08:50:36.310116Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:294: [Export] [s3] HandleScheme TEvExternalStorage::TEvPutObjectResponse: self# [1:418:2389], result# PutObjectResult { ETag: 72cbc2e67a8d4d9b122f2e329a5a74fd } 2025-05-07T08:50:36.310233Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [1:417:2386] 2025-05-07T08:50:36.310491Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:445: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [1:418:2389], sender# [1:417:2386], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 1 Checksum: } REQUEST: PUT /data_00.csv.zst HTTP/1.1 HEADERS: Host: localhost:23287 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: D4A23515-F490-4323-B5A2-2ABBBA87E44B amz-sdk-request: attempt=1 content-length: 20 content-md5: 2qFn9G0TW8wfvJ9C+A5Jbw== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /data_00.csv.zst / / 20 2025-05-07T08:50:36.315161Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:487: [Export] [s3] HandleData TEvExternalStorage::TEvPutObjectResponse: self# [1:418:2389], result# PutObjectResult { ETag: daa167f46d135bcc1fbc9f42f80e496f } 2025-05-07T08:50:36.315245Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:702: [Export] [s3] Finish: self# [1:418:2389], success# 1, error# , multipart# 0, uploadId# (empty maybe) 2025-05-07T08:50:36.315469Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:417:2386], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-05-07T08:50:36.347710Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 309 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-05-07T08:50:36.347790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-05-07T08:50:36.347974Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 309 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-05-07T08:50:36.348084Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 309 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-05-07T08:50:36.348159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, datashard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:36.348210Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:50:36.348252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-05-07T08:50:36.348318Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 102:0 129 -> 240 2025-05-07T08:50:36.348558Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:36.351202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:50:36.351626Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:50:36.351677Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 102:0 ProgressState 2025-05-07T08:50:36.351790Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T08:50:36.351846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:50:36.351890Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T08:50:36.351930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:50:36.351973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-05-07T08:50:36.352054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:337:2316] message: TxId: 102 2025-05-07T08:50:36.352123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:50:36.352181Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-05-07T08:50:36.352217Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 102:0 2025-05-07T08:50:36.352332Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T08:50:36.354732Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T08:50:36.354814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:401:2373] TestWaitNotification: OK eventTxId 102 >> TTicketParserTest::BulkAuthorizationRetryError >> TTicketParserTest::LoginGood ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_backup/unittest >> TBackupTests::BackupUuidColumn[Zstd] [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:50:35.824327Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:50:35.824433Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:50:35.824473Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:50:35.824513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:50:35.824574Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:50:35.824633Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:50:35.824717Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:50:35.824784Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:50:35.825604Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:50:35.826284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:50:35.926919Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:50:35.927002Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:35.956288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:50:35.956553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:50:35.956741Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:50:35.965504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:50:35.965853Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:50:35.966594Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:35.966823Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:50:35.970266Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:35.971719Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:35.971786Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:35.971863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:50:35.971946Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:35.971994Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:50:35.972220Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:50:35.980626Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:50:36.147452Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:50:36.147699Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:36.147951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:50:36.148232Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:50:36.148304Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:36.151838Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:36.152008Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:50:36.152224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:36.152289Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:50:36.152330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:50:36.152377Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:50:36.154612Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:36.154679Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:50:36.154754Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:50:36.156747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:36.156811Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:36.156858Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:36.156916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:50:36.176487Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:50:36.178789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:50:36.178997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:50:36.180133Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:36.180299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:50:36.180341Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:36.180657Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:50:36.180709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:36.180859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:50:36.180945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:50:36.183392Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:36.183466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:36.183692Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:36.183753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... ard: 72057594046678944 2025-05-07T08:50:36.555986Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:412: TBackup TPropose, opId: 102:0 HandleReply TEvOperationPlan, stepId: 5000003, at schemeshard: 72057594046678944 2025-05-07T08:50:36.556149Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 102:0 128 -> 129 2025-05-07T08:50:36.556314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T08:50:36.564912Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:783: [Export] [s3] Bootstrap: self# [1:414:2385], attempt# 0 2025-05-07T08:50:36.664615Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:427: [Export] [s3] Handle TEvExportScan::TEvReady: self# [1:414:2385], sender# [1:413:2383] FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 2025-05-07T08:50:36.675813Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:36.675899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T08:50:36.676213Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:36.676263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-05-07T08:50:36.676361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:50:36.676414Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:258: TBackup TProposedWaitParts, opId: 102:0 ProgressState, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 102 2025-05-07T08:50:36.677502Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:50:36.677641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:50:36.677708Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-05-07T08:50:36.677758Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-05-07T08:50:36.677809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T08:50:36.677915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true REQUEST: PUT /metadata.json HTTP/1.1 HEADERS: Host: localhost:5414 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 93C38E19-0D38-4135-9CA7-8E39EA4AE12B amz-sdk-request: attempt=1 content-length: 61 content-md5: 5ZuHSMjV1bVKZhThhMGD5g== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /metadata.json / / 61 2025-05-07T08:50:36.682209Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-05-07T08:50:36.686208Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:387: [Export] [s3] HandleMetadata TEvExternalStorage::TEvPutObjectResponse: self# [1:414:2385], result# PutObjectResult { ETag: e59b8748c8d5d5b54a6614e184c183e6 } REQUEST: PUT /scheme.pb HTTP/1.1 HEADERS: Host: localhost:5414 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 7198D8B1-2506-44D6-9D45-A08008638093 amz-sdk-request: attempt=1 content-length: 357 content-md5: IxJB3qM/y2xlsv8qcwTF7g== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /scheme.pb / / 357 2025-05-07T08:50:36.691858Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:294: [Export] [s3] HandleScheme TEvExternalStorage::TEvPutObjectResponse: self# [1:414:2385], result# PutObjectResult { ETag: 231241dea33fcb6c65b2ff2a7304c5ee } 2025-05-07T08:50:36.691969Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [1:413:2383] 2025-05-07T08:50:36.692193Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:445: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [1:414:2385], sender# [1:413:2383], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 1 Checksum: } REQUEST: PUT /data_00.csv.zst HTTP/1.1 HEADERS: Host: localhost:5414 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 75A81AD2-D8E5-46EF-94B4-269142727381 amz-sdk-request: attempt=1 content-length: 40 content-md5: LXbLDYru8NmFsYXNSXjnpQ== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /data_00.csv.zst / / 40 2025-05-07T08:50:36.695707Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:487: [Export] [s3] HandleData TEvExternalStorage::TEvPutObjectResponse: self# [1:414:2385], result# PutObjectResult { ETag: 2d76cb0d8aeef0d985b185cd4978e7a5 } 2025-05-07T08:50:36.695770Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:702: [Export] [s3] Finish: self# [1:414:2385], success# 1, error# , multipart# 0, uploadId# (empty maybe) 2025-05-07T08:50:36.695937Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:413:2383], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-05-07T08:50:36.719336Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 305 RawX2: 4294969588 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 20 RowsProcessed: 1 } 2025-05-07T08:50:36.719413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-05-07T08:50:36.719632Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 305 RawX2: 4294969588 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 20 RowsProcessed: 1 } 2025-05-07T08:50:36.719764Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 305 RawX2: 4294969588 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 20 RowsProcessed: 1 } 2025-05-07T08:50:36.719839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, datashard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:36.719902Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:50:36.719958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-05-07T08:50:36.720014Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 102:0 129 -> 240 2025-05-07T08:50:36.720204Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:36.726793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:50:36.727260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:50:36.727327Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 102:0 ProgressState 2025-05-07T08:50:36.727464Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T08:50:36.727508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:50:36.727582Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T08:50:36.727621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:50:36.727666Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-05-07T08:50:36.727762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:334:2313] message: TxId: 102 2025-05-07T08:50:36.727846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:50:36.727892Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-05-07T08:50:36.727926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 102:0 2025-05-07T08:50:36.728063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T08:50:36.735017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T08:50:36.735093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:397:2369] TestWaitNotification: OK eventTxId 102 >> IndexBuildTest::CancelBuild [GOOD] >> TTicketParserTest::TicketFromCertificateCheckIssuerGood >> HullReplWriteSst::Basic [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_backup/unittest >> TBackupTests::BackupUuidColumn[Raw] [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:50:36.925501Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:50:36.925623Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:50:36.925669Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:50:36.925715Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:50:36.925768Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:50:36.925815Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:50:36.925885Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:50:36.927029Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:50:36.927824Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:50:36.928315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:50:37.035243Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:50:37.035455Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:37.069772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:50:37.070056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:50:37.070359Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:50:37.086807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:50:37.087723Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:50:37.088881Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:37.089287Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:50:37.092519Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:37.094542Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:37.094622Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:37.094687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:50:37.094780Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:37.094835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:50:37.095088Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:50:37.110184Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:50:37.266953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:50:37.267288Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:37.267548Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:50:37.267836Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:50:37.267938Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:37.272071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:37.272320Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:50:37.272559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:37.272636Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:50:37.272682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:50:37.272717Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:50:37.281263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:37.281357Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:50:37.281407Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:50:37.291366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:37.291469Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:37.291521Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:37.291582Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:50:37.296067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:50:37.303106Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:50:37.303367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:50:37.304517Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:37.304732Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:50:37.304787Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:37.305108Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:50:37.305180Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:37.305382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:50:37.305478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:50:37.321034Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:37.321106Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:37.321374Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:37.321464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... emeshard: 72057594046678944 2025-05-07T08:50:37.891755Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:412: TBackup TPropose, opId: 102:0 HandleReply TEvOperationPlan, stepId: 5000003, at schemeshard: 72057594046678944 2025-05-07T08:50:37.891903Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 102:0 128 -> 129 2025-05-07T08:50:37.892045Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T08:50:37.970167Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:783: [Export] [s3] Bootstrap: self# [1:418:2389], attempt# 0 2025-05-07T08:50:38.057866Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:427: [Export] [s3] Handle TEvExportScan::TEvReady: self# [1:418:2389], sender# [1:417:2386] REQUEST: PUT /metadata.json HTTP/1.1 HEADERS: Host: localhost:20895 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: B83A1E7E-415B-4B8D-817E-580BB96B66E0 amz-sdk-request: attempt=1 content-length: 61 content-md5: 5ZuHSMjV1bVKZhThhMGD5g== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /metadata.json / / 61 2025-05-07T08:50:38.067680Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:387: [Export] [s3] HandleMetadata TEvExternalStorage::TEvPutObjectResponse: self# [1:418:2389], result# PutObjectResult { ETag: e59b8748c8d5d5b54a6614e184c183e6 } FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 2025-05-07T08:50:38.072777Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:38.072840Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T08:50:38.073191Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:38.073242Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2208], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-05-07T08:50:38.073886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:50:38.073952Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:258: TBackup TProposedWaitParts, opId: 102:0 ProgressState, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 102 2025-05-07T08:50:38.074901Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:50:38.075059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:50:38.075158Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-05-07T08:50:38.075231Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-05-07T08:50:38.075278Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T08:50:38.075392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true REQUEST: PUT /scheme.pb HTTP/1.1 HEADERS: Host: localhost:20895 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 7CB74901-EF2C-423A-B31F-3A2B98607CED amz-sdk-request: attempt=1 content-length: 357 content-md5: IxJB3qM/y2xlsv8qcwTF7g== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /scheme.pb / / 357 2025-05-07T08:50:38.078429Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:294: [Export] [s3] HandleScheme TEvExternalStorage::TEvPutObjectResponse: self# [1:418:2389], result# PutObjectResult { ETag: 231241dea33fcb6c65b2ff2a7304c5ee } 2025-05-07T08:50:38.079489Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [1:417:2386] 2025-05-07T08:50:38.079626Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:445: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [1:418:2389], sender# [1:417:2386], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 1 Checksum: } 2025-05-07T08:50:38.083574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 REQUEST: PUT /data_00.csv HTTP/1.1 HEADERS: Host: localhost:20895 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 028025A7-4BD6-4DAB-AFD7-7246CCE416D0 amz-sdk-request: attempt=1 content-length: 39 content-md5: GLX1nc5/cKhlAfxBHlykQA== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /data_00.csv / / 39 2025-05-07T08:50:38.084508Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:487: [Export] [s3] HandleData TEvExternalStorage::TEvPutObjectResponse: self# [1:418:2389], result# PutObjectResult { ETag: 18b5f59dce7f70a86501fc411e5ca440 } 2025-05-07T08:50:38.084568Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:702: [Export] [s3] Finish: self# [1:418:2389], success# 1, error# , multipart# 0, uploadId# (empty maybe) 2025-05-07T08:50:38.084770Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:417:2386], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-05-07T08:50:38.095276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 309 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 20 RowsProcessed: 1 } 2025-05-07T08:50:38.095355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-05-07T08:50:38.095529Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 309 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 20 RowsProcessed: 1 } 2025-05-07T08:50:38.095665Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 309 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 20 RowsProcessed: 1 } 2025-05-07T08:50:38.095751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, datashard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:38.095792Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:50:38.095836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-05-07T08:50:38.095882Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 102:0 129 -> 240 2025-05-07T08:50:38.096102Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:38.100851Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:50:38.101124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:50:38.101190Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 102:0 ProgressState 2025-05-07T08:50:38.101316Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T08:50:38.101354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:50:38.101400Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T08:50:38.101436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:50:38.101477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-05-07T08:50:38.101572Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:337:2316] message: TxId: 102 2025-05-07T08:50:38.101632Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:50:38.101690Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-05-07T08:50:38.101752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 102:0 2025-05-07T08:50:38.101917Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T08:50:38.106883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T08:50:38.106975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:401:2373] TestWaitNotification: OK eventTxId 102 |89.1%| [TA] $(B)/ydb/core/blobstorage/dsproxy/ut_strategy/test-results/unittest/{meta.json ... results_accumulator.log} |89.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/fq/streaming_optimize/py3test >> test_sql_streaming.py::test[suites-GroupByHopTimeExtractorUnusedColumns-default.txt] [FAIL] >> Viewer::JsonStorageListingV2 [GOOD] >> Viewer::JsonStorageListingV2GroupIdFilter >> TTicketParserTest::AccessServiceAuthenticationOk ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_backup/unittest >> TBackupTests::ShouldSucceedOnMultiShardTable[Zstd] [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:50:36.792271Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:50:36.792382Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:50:36.792436Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:50:36.792484Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:50:36.792544Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:50:36.792592Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:50:36.792697Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:50:36.792788Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:50:36.793699Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:50:36.798355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:50:37.051021Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:50:37.051168Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:37.097934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:50:37.098284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:50:37.098532Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:50:37.115372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:50:37.115816Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:50:37.116766Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:37.117054Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:50:37.128926Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:37.131035Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:37.131156Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:37.131254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:50:37.131333Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:37.131398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:50:37.131702Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:50:37.151404Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:50:37.372663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:50:37.372978Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:37.373253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:50:37.373564Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:50:37.373667Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:37.391044Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:37.391287Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:50:37.391524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:37.391602Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:50:37.391654Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:50:37.391700Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:50:37.398837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:37.398918Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:50:37.398966Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:50:37.407100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:37.407186Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:37.407253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:37.407325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:50:37.424981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:50:37.434250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:50:37.434530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:50:37.435739Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:37.435917Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:50:37.435970Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:37.436349Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:50:37.436416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:37.436622Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:50:37.436720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:50:37.439596Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:37.439657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:37.439862Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:37.439918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 8: TBackup TProposedWaitParts, opId: 102:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:50:38.151360Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:294: [Export] [s3] HandleScheme TEvExternalStorage::TEvPutObjectResponse: self# [1:472:2432], result# PutObjectResult { ETag: 332a7753281a04d1a9ebeec0320c919d } 2025-05-07T08:50:38.151475Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:487: [Export] [s3] HandleData TEvExternalStorage::TEvPutObjectResponse: self# [1:479:2437], result# PutObjectResult { ETag: f0d3871f5c9cc0f5c2e4afaffb7eeef2 } 2025-05-07T08:50:38.151519Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:702: [Export] [s3] Finish: self# [1:479:2437], success# 1, error# , multipart# 0, uploadId# (empty maybe) 2025-05-07T08:50:38.151722Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [1:471:2430] 2025-05-07T08:50:38.151956Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:445: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [1:472:2432], sender# [1:471:2430], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 1 Checksum: } 2025-05-07T08:50:38.152070Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:478:2435], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-05-07T08:50:38.160676Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:50:38.160822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:50:38.160970Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-05-07T08:50:38.161019Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-05-07T08:50:38.161076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-05-07T08:50:38.161195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 REQUEST: PUT /data_00.csv.zst HTTP/1.1 HEADERS: Host: localhost:32133 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: C878B2DE-83D1-4450-AFC0-A31F3487BBAD amz-sdk-request: attempt=1 content-length: 20 content-md5: 2qFn9G0TW8wfvJ9C+A5Jbw== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /data_00.csv.zst / / 20 2025-05-07T08:50:38.163632Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:487: [Export] [s3] HandleData TEvExternalStorage::TEvPutObjectResponse: self# [1:472:2432], result# PutObjectResult { ETag: daa167f46d135bcc1fbc9f42f80e496f } 2025-05-07T08:50:38.163689Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:702: [Export] [s3] Finish: self# [1:472:2432], success# 1, error# , multipart# 0, uploadId# (empty maybe) FAKE_COORDINATOR: Erasing txId 102 2025-05-07T08:50:38.163887Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:471:2430], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-05-07T08:50:38.178035Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-05-07T08:50:38.215899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 317 RawX2: 4294969597 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-05-07T08:50:38.215977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-05-07T08:50:38.216141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 317 RawX2: 4294969597 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-05-07T08:50:38.216255Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 317 RawX2: 4294969597 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-05-07T08:50:38.216352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, datashard: 72075186233409546, left await: 1, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:38.216558Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:38.217053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 319 RawX2: 4294969598 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-05-07T08:50:38.217095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409547, partId: 0 2025-05-07T08:50:38.217223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 319 RawX2: 4294969598 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-05-07T08:50:38.217318Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 319 RawX2: 4294969598 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-05-07T08:50:38.217363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:2, datashard: 72075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:38.217399Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:50:38.217441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-05-07T08:50:38.217498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 102:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-05-07T08:50:38.217529Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 102:0 129 -> 240 2025-05-07T08:50:38.217655Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:38.227475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:50:38.227695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:50:38.228233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:50:38.228291Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 102:0 ProgressState 2025-05-07T08:50:38.228414Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T08:50:38.228466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:50:38.228512Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T08:50:38.228553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:50:38.228594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-05-07T08:50:38.228686Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:369:2337] message: TxId: 102 2025-05-07T08:50:38.228767Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:50:38.228816Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-05-07T08:50:38.228867Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 102:0 2025-05-07T08:50:38.229016Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T08:50:38.243073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T08:50:38.243168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:450:2411] TestWaitNotification: OK eventTxId 102 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index_build/unittest >> IndexBuildTest::CancelBuild [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:50:18.861433Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:50:18.861547Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:50:18.861605Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:50:18.861657Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:50:18.861710Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:50:18.861755Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:50:18.861820Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:50:18.861901Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:50:18.862861Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:50:18.863337Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:50:18.960821Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:50:18.960890Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:18.979102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:50:18.979344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:50:18.979548Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:50:18.986653Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:50:18.986987Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:50:18.987686Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:18.987884Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:50:18.991185Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:18.992818Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:18.992892Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:18.992971Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:50:18.993025Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:18.993138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:50:18.993399Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:50:19.005417Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:50:19.150788Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:50:19.151101Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:19.151411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:50:19.151696Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:50:19.151805Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:19.154680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:19.154852Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:50:19.155081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:19.155141Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:50:19.155185Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:50:19.155224Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:50:19.157593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:19.157691Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:50:19.157740Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:50:19.160196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:19.160265Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:19.160325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:19.160396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:50:19.164565Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:50:19.167074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:50:19.167352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:50:19.168518Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:19.168727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:50:19.168796Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:19.169163Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:50:19.169238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:19.169479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:50:19.169574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:50:19.172200Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:19.172308Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:19.172527Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:19.172573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 524025Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976710760:0 128 -> 240 2025-05-07T08:50:39.528314Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710760:0, at schemeshard: 72057594046678944 2025-05-07T08:50:39.528372Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 281474976710760:0 ProgressState 2025-05-07T08:50:39.528460Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710760:0 progress is 1/1 2025-05-07T08:50:39.528483Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-05-07T08:50:39.528521Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710760:0 progress is 1/1 2025-05-07T08:50:39.528542Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-05-07T08:50:39.528571Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976710760, ready parts: 1/1, is published: true 2025-05-07T08:50:39.528638Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:123:2149] message: TxId: 281474976710760 2025-05-07T08:50:39.528694Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-05-07T08:50:39.528724Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976710760:0 2025-05-07T08:50:39.528742Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976710760:0 2025-05-07T08:50:39.528810Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 13 FAKE_COORDINATOR: Erasing txId 281474976710760 2025-05-07T08:50:39.531327Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6706: Handle: TEvNotifyTxCompletionResult: txId# 281474976710760 2025-05-07T08:50:39.531423Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6708: Message: TxId: 281474976710760 2025-05-07T08:50:39.531496Z node 2 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:2321: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, txId# 281474976710760, buildInfoId: 102 2025-05-07T08:50:39.531583Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:2324: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, txId# 281474976710760, buildInfo: TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobal, IndexName: index1, IndexColumn: index, State: Cancellation_Unlocking, IsCancellationRequested: 1, Issue: , SubscribersCount: 1, CreateSender: [2:1169:3022], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-05-07T08:50:39.534245Z node 2 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1105: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Cancellation_Unlocking TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobal, IndexName: index1, IndexColumn: index, State: Cancellation_Unlocking, IsCancellationRequested: 1, Issue: , SubscribersCount: 1, CreateSender: [2:1169:3022], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-05-07T08:50:39.534364Z node 2 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:25: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Cancellation_Unlocking to Cancelled 2025-05-07T08:50:39.537171Z node 2 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1105: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Cancelled TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobal, IndexName: index1, IndexColumn: index, State: Cancelled, IsCancellationRequested: 1, Issue: , SubscribersCount: 1, CreateSender: [2:1169:3022], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-05-07T08:50:39.537240Z node 2 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:325: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 102, subscribers count# 1 2025-05-07T08:50:39.537477Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T08:50:39.537537Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [2:1265:3107] TestWaitNotification: OK eventTxId 102 2025-05-07T08:50:39.540536Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__get.cpp:19: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: DoExecute DatabaseName: "/MyRoot" IndexBuildId: 102 2025-05-07T08:50:39.540840Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:93: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: Reply Status: SUCCESS IndexBuild { Id: 102 State: STATE_CANCELLED Settings { source_path: "/MyRoot/Table" index { name: "index1" index_columns: "index" global_index { } } max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 } } Progress: 0 } BUILDINDEX RESPONSE Get: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 102 State: STATE_CANCELLED Settings { source_path: "/MyRoot/Table" index { name: "index1" index_columns: "index" global_index { } } max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 } } Progress: 0 } 2025-05-07T08:50:39.544458Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:50:39.544758Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table" took 341us result status StatusSuccess 2025-05-07T08:50:39.545209Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table" PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 TableSchemaVersion: 3 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "index" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 3 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 10 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 11 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:50:39.551571Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/index1" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-05-07T08:50:39.551893Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/index1" took 377us result status StatusPathDoesNotExist 2025-05-07T08:50:39.552125Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table/index1\', error: path has been deleted (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeTableIndex, state: EPathStateNotExist), drop stepId: 5000005, drop txId: 281474976710759" Path: "/MyRoot/Table/index1" PathId: 3 LastExistedPrefixPath: "/MyRoot/Table" LastExistedPrefixPathId: 2 LastExistedPrefixDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/repl/ut/unittest >> HullReplWriteSst::Basic [GOOD] Test command err: commit chunk# 1 {ChunkIdx: 1 Offset: 101191680 Size: 33023592} 750534 commit chunk# 2 {ChunkIdx: 2 Offset: 101220352 Size: 32996136} 749910 commit chunk# 3 {ChunkIdx: 3 Offset: 101232640 Size: 32981088} 749568 commit chunk# 4 {ChunkIdx: 4 Offset: 101216256 Size: 32997676} 749945 commit chunk# 5 {ChunkIdx: 5 Offset: 101236736 Size: 32979592} 749534 commit chunk# 6 {ChunkIdx: 6 Offset: 101228544 Size: 32989184} 749752 commit chunk# 7 {ChunkIdx: 7 Offset: 101212160 Size: 33003880} 750086 commit chunk# 8 {ChunkIdx: 8 Offset: 101240832 Size: 32974400} 749416 commit chunk# 9 {ChunkIdx: 9 Offset: 101216256 Size: 32999744} 749992 commit chunk# 10 {ChunkIdx: 10 Offset: 101220352 Size: 32995300} 749891 commit chunk# 11 {ChunkIdx: 11 Offset: 101208064 Size: 33008764} 750197 commit chunk# 12 {ChunkIdx: 12 Offset: 101236736 Size: 32979284} 749527 commit chunk# 13 {ChunkIdx: 13 Offset: 101191680 Size: 33026012} 750589 commit chunk# 14 {ChunkIdx: 14 Offset: 101253120 Size: 32963928} 749178 commit chunk# 15 {ChunkIdx: 15 Offset: 101195776 Size: 33019808} 750448 commit chunk# 16 {ChunkIdx: 16 Offset: 101228544 Size: 32989184} 749752 commit chunk# 17 {ChunkIdx: 17 Offset: 101249024 Size: 32967360} 749256 commit chunk# 18 {ChunkIdx: 18 Offset: 101236736 Size: 32980956} 749565 commit chunk# 19 {ChunkIdx: 19 Offset: 101224448 Size: 32993012} 749839 commit chunk# 20 {ChunkIdx: 20 Offset: 101220352 Size: 32996972} 749929 commit chunk# 21 {ChunkIdx: 21 Offset: 101261312 Size: 32954820} 748971 commit chunk# 22 {ChunkIdx: 22 Offset: 101228544 Size: 32988964} 749747 commit chunk# 23 {ChunkIdx: 23 Offset: 101253120 Size: 32960848} 749108 commit chunk# 24 {ChunkIdx: 24 Offset: 101216256 Size: 32998996} 749975 commit chunk# 25 {ChunkIdx: 25 Offset: 101232640 Size: 32985048} 749658 commit chunk# 26 {ChunkIdx: 26 Offset: 101228544 Size: 32987336} 749710 commit chunk# 27 {ChunkIdx: 27 Offset: 101212160 Size: 33003968} 750088 commit chunk# 28 {ChunkIdx: 28 Offset: 101216256 Size: 33001460} 750031 >> ObjectStorageListingTest::ListingNoFilter >> TTicketParserTest::AuthenticationWithUserAccount [GOOD] >> TTicketParserTest::AuthenticationUnsupported >> TTicketParserTest::NebiusAuthenticationUnavailable [GOOD] >> TTicketParserTest::NebiusAuthorizationRetryError >> TTicketParserTest::TicketFromCertificateWithValidationGood [GOOD] >> TTicketParserTest::TicketFromCertificateWithValidationDifferentIssuersGood |89.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_object_storage_listing/unittest |89.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_object_storage_listing/unittest |89.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_object_storage_listing/unittest >> KqpStreamLookup::ReadTableDuringSplit [GOOD] |89.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_object_storage_listing/unittest >> TBackupTests::ShouldSucceedOnLargeData[Raw] |89.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_object_storage_listing/unittest |89.2%| [TA] $(B)/ydb/core/blobstorage/vdisk/repl/ut/test-results/unittest/{meta.json ... results_accumulator.log} |89.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_backup/unittest |89.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_rtmr/ydb-core-tx-schemeshard-ut_rtmr |89.2%| [TA] {RESULT} $(B)/ydb/core/blobstorage/dsproxy/ut_strategy/test-results/unittest/{meta.json ... results_accumulator.log} |89.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_rtmr/ydb-core-tx-schemeshard-ut_rtmr |89.2%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/repl/ut/test-results/unittest/{meta.json ... results_accumulator.log} |89.2%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_rtmr/ydb-core-tx-schemeshard-ut_rtmr >> AutoConfig::GetServicePoolsWith2CPUs [GOOD] >> TTicketParserTest::LoginBad [GOOD] >> TTicketParserTest::BulkAuthorizationWithRequiredPermissions >> AutoConfig::GetServicePoolsWith4AndMoreCPUs [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_kqp/unittest >> KqpStreamLookup::ReadTableDuringSplit [GOOD] Test command err: 2025-05-07T08:50:30.662023Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:50:30.662180Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:50:30.662431Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002e6b/r3tmp/tmpF1VpvI/pdisk_1.dat 2025-05-07T08:50:31.110353Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:50:31.179274Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:31.234942Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:31.235115Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:31.247432Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:50:31.343416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:50:31.744285Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:733:2615], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:31.744448Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:744:2620], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:31.744554Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:31.750976Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T08:50:31.944840Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:747:2623], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T08:50:32.047767Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:817:2662] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:50:42.889582Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715660. Ctx: { TraceId: 01jtmz0ynxar7jxds2hpe5pgdy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjE2OGRhYTMtZjM0ZmRjYTgtODdhNWRlNWUtY2ZkMjVlMjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:50:42.951757Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jtmz0ynxar7jxds2hpe5pgdy", SessionId: ydb://session/3?node_id=1&id=MjE2OGRhYTMtZjM0ZmRjYTgtODdhNWRlNWUtY2ZkMjVlMjg=, Slow query, duration: 11.209550s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "UPSERT INTO `/Root/TestTable` (key, value) VALUES (0, 00), (1, 11), (2, 22), (3, 33), (4, 44), (5, 55), (6, 66), (7, 77), (8, 88), (9, 99), (10, 1010), (11, 1111), (12, 1212), (13, 1313), (14, 1414), (15, 1515), (16, 1616), (17, 1717), (18, 1818), (19, 1919), (20, 2020), (21, 2121), (22, 2222), (23, 2323), (24, 2424), (25, 2525), (26, 2626), (27, 2727), (28, 2828), (29, 2929), (30, 3030), (31, 3131), (32, 3232), (33, 3333), (34, 3434), (35, 3535), (36, 3636), (37, 3737), (38, 3838), (39, 3939), (40, 4040), (41, 4141), (42, 4242), (43, 4343), (44, 4444), (45, 4545), (46, 4646), (47, 4747), (48, 4848), (49, 4949), (50, 5050), (51, 5151), (52, 5252), (53, 5353), (54, 5454), (55, 5555), (56, 5656), (57, 5757), (58, 5858), (59, 5959), (60, 6060), (61, 6161), (62, 6262), (63, 6363), (64, 6464), (65, 6565), (66, 6666), (67, 6767), (68, 6868), (69, 6969), (70, 7070), (71, 7171), (72, 7272), (73, 7373), (74, 7474), (75, 7575), (76, 7676), (77, 7777), (78, 7878), (79, 7979), (80, 8080), (81, 8181), (82, 8282), (83, 8383), (84, 8484), (85, 8585), (86, 8686), (87, 8787), (88, 8888), (89, 8989), (90, 9090), (91, 9191), (92, 9292), (93, 9393), (94, 9494), (95, 9595), (96, 9696), (97, 9797), (98, 9898), (99, 9999), (100, 100100), (101, 101101), (102, 102102), (103, 103103), (104, 104104), (105, 105105), (106, 106106), (107, 107107), (108, 108108), (109, 109109), (110, 110110), (111, 111111), (112, 112112), (113, 113113), (114, 114114), (115, 115115), (116, 116116), (117, 117117), (118, 118118), (119, 119119), (120, 120120), (121, 121121), (122, 122122), (123, 123123), (124, 124124), (125, 125125), (126, 126126), (127, 127127), (128, 128128), (129, 129129), (130, 130130), (131, 131131), (132, 132132), (133, 133133), (134, 134134), (135, 135135), (136, 136136), (137, 137137), (138, 138138), (139, 139139), (140, 140140), (141, 141141), (142, 142142), (143, 143143), (144, 144144), (145, 145145), (146, 146146), (147, 147147), (148, 148148), (149, 149149), (150, 150150), (151, 151151), (152, 152152), (153, 153153), (154, 154154), (155, 155155), (156, 156156), (157, 157157), (158, 158158), (159, 159159), (160, 160160), (161, 161161), (162, 162162), (163, 163163), (164, 164164), (165, 165165), (166, 166166), (167, 167167), (168, 168168), (169, 169169), (170, 170170), (171, 171171), (172, 172172), (173, 173173), (174, 174174), (175, 175175), (176, 176176), (177, 177177), (178, 178178), (179, 179179), (180, 180180), (181, 181181), (182, 182182), (183, 183183), (184, 184184), (185, 185185), (186, 186186), (187, 187187), (188, 188188), (189, 189189), (190, 190190), (191, 191191), (192, 192192), (193, 193193), (194, 194194), (195, 195195), (196, 196196), (197, 197197), (198, 198198), (199, 199199), (200, 200200), (201, 201201), (202, 202202), (203, 203203), (204, 204204), (205, 205205), (206, 206206), (207, 207207), (208, 208208), (209, 209209), (210, 210210), (211, 211211), (212, 212212), (213, 213213), (214, 214214), (215, 215215), (216, 216216), (217, 217217), (218, 218218), (219, 219219), (220, 220220), (221, 221221), (222, 222222), (223, 223223), (224, 224224), (225, 225225), (226, 226226), (227, 227227), (228, 228228), (229, 229229), (230, 230230), (231, 231231), (232, 232232), (233, 233233), (234, 234234), (235, 235235), (236, 236236), (237, 237237), (238, 238238), (239, 239239), (240, 240240), (241, 241241), (242, 242242), (243, 243243), (244, 244244), (245, 245245), (246, 246246), (247, 247247), (248, 248248), (249, 249249), (250, 250250), (251, 251251), (252, 252252), (253, 253253), (254, 254254), (255, 255255), (256, 256256), (257, 257257), (258, 258258), (259, 259259), (260, 260260), (261, 261261), (262, 262262), (263, 263263), (264, 264264), (265, 265265), (266, 266266), (267, 267267), (268, 268268), (269, 269269), (270, 270270), (271, 271271), (272, 272272), (273, 273273), (274, 274274), (275, 275275), (276, 276276), (277, 277277), (278, 278278), (279, 279279), (280, 280280), (281, 281281), (282, 282282), (283, 283283), (284, 284284), (285, 285285), (286, 286286), (287, 287287), (288, 288288), (289, 289289), (290, 290290), (291, 291291), (292, 292292), (293, 293293), (294, 294294), (295, 295295), (296, 296296), (297, 297297), (298, 298298), (299, 299299), (300, 300300), (301, 301301), (302, 302302), (303, 303303), (304, 304304), (305, 305305), (306, 306306), (307, 307307), (308, 308308), (309, 309309), (310, 310310), (311, 311311), (312, 312312), (313, 313313), (314, 314314), (315, 315315), (316, 316316), (317, 317317), (318, 318318), (319, 319319), (320, 320320), (321, 321321), (322, 322322), (323, 323323), (324, 324324), (325, 325325), (326, 326326), (327, 327327), (328, 328328), (329, 329329), (330, 330330), (331, 331331), (332, 332332), (333, 333333), (334, 334334), (335, 335335), (336, 336336), (337, 337337), (338, 338338), (339, 339339), (340, 340340), (341, 341341), (342, 342342), (343, 343343), (344, 344344), (345, 345345), (346, 346346), (347, 347347), (348, 348348), (349, 349349), (350, 350350), (351, 351351), (352, 352352), (353, 353353), (354, 354354), (355, 355355), (356, 356356), (357, 357357), (358, 358358), (359, 359359), (360, 360360), (361, 361361), (362, 362362), (363, 363363), (364, 364364), (365, 365365), (366, 366366), (367, 367367), (368, 368368), (369, 369369), (370, 370370), (371, 371371), (372, 372372), (373, 373373), (374, 374374), (375, 375375), (376, 376376), (377, 377377), (378, 378378), (379, 379379), (380, 380380), (381, 381381), (382, 382382), (383, 383383), (384, 384384), (385, 385385), (386, 386386), (387, 387387), (388, 388388), (389, 389389), (390, 390390), (391, 391391), (392, 392392), (393, 393393), (394, 394394), (395, 395395), (396, 396396), (397, 397397), (398, 398398), (399, 399399), (400, 400400), (401, 401401), (402, 402402), (403, 403403), (404, 404404), (405, 405405), (406, 406406), (407, 407407), (408, 408408), (409, 409409), (410, 410410), (411, 411411), (412, 412412), (413, 413413), (414, 414414), (415, 415415), (416, 416416), (417, 417417), (418, 418418), (419, 419419), (420, 420420), (421, 421421), (422, 422422), (423, 423423), (424, 424424), (425, 425425), (426, 426426), (427, 427427), (428, 428428), (429, 429429), (430, 430430), (431, 431431), (432, 432432), (433, 433433), (434, 434434), (435, 435435), (436, 436436), (437, 437437), (438, 438438), (439, 439439), (440, 440440), (441, 441441), (442, 442442), (443, 443443), (444, 444444), (445, 445445), (446, 446446), (447, 447447), (448, 448448), (449, 449449), (450, 450450), (451, 451451), (452, 452452), (453, 453453), (454, 454454), (455, 455455), (456, 456456), (457, 457457), (458, 458458), (459, 459459), (460, 460460), (461, 461461), (462, 462462), (463, 463463), (464, 464464), (465, 465465), (466, 466466), (467, 467467), (468, 468468), (469, 469469), (470, 470470), (471, 471471), (472, 472472), (473, 473473), (474, 474474), (475, 475475), (476, 476476), (477, 477477), (478, 478478), (479, 479479), (480, 480480), (481, 481481), (482, 482482), (483, 483483), (484, 484484), (485, 485485), (486, 486486), (487, 487487), (488, 488488), (489, 489489), (490, 490490), (491, 491491), (492, 492492), (493, 493493), (494, 494494), (495, 495495), (496, 496496), (497, 497497), (498, 498498), (499, 499499), (500, 500500), (501, 501501), (502, 502502), (503, 503503), (504, 504504), (505, 505505), (506, 506506), (507, 507507), (508, 508508), (509, 509509), (510, 510510), (511, 511511), (512, 512512), (513, 513513), (514, 514514), (515, 515515), (516, 516516), (517, 517517), (518, 518518), (519, 519519), (520, 520520), (521, 521521), (522, 522522), (523, 523523), (524, 524524), (525, 525525), (526, 526526), (527, 527527), (528, 528528), (529, 529529), (530, 530530), (531, 531531), (532, 532532), (533, 533533), (534, 534534), (535, 535535), (536, 536536), (537, 537537), (538, 538538), (539, 539539), (540, 540540), (541, 541541), (542, 542542), (543, 543543), (544, 544544), (545, 545545), (546, 546546), (547, 547547), (548, 548548), (549, 549549), (550, 550550), (551, 551551), (552, 552552), (553, 553553), (554, 554554), (555, 555555), (556, 556556), (557, 557557), (558, 558558), (559, 559559), (560, 560560), (561, 561561), (562, 562562), (563, 563563), (564, 564564), (565, 565565), (566, 566566), (567, 567567), (568, 568568), (569, 569569), (570, 570570), (571, 571571), (572, 572572), (573, 573573), (574, 574574), (575, 575575), (576, 576576), (577, 577577), (578, 578578), (579, 579579), (580, 580580), (581, 581581), (582, 582582), (583, 583583), (584, 584584), (585, 585585), (586, 586586), (587, 587587), (588, 588588), (589, 589589), (590, 590590), (591, 591591), (592, 592592), (593, 593593), (594, 594594), (595, 595595), (596, 596596), (597, 597597), (598, 598598), (599, 599599), (600, 600600), (601, 601601), (602, 602602), (603, 603603), (604, 604604), (605, 605605), (606, 606606), (607, 607607), (608, 608608), (609, 609609), (610, 610610), (611, 611611), (612, 612612), (613, 613613), (614, 614614), (615, 615615), (616, 616616), (617, 617617), (618, 618618), (619, 619619), (620, 620620), (621, 621621), (622, 622622), (623, 623623), (624, 624624), (625, 625625), (626, 626626), (627, 627627), (628, 628628), (629, 629629), (630, 630630), (631, 631631), (632, 632632), (633, 633633), (634, 634634), (635, 635635), (636, 636636), (637, 637637), (638, 638638), (639, 639639), (640, 640640), (641, 641641), (642, 642642), (643, 643643), (644, 644644), (645, 645645), (646, 646646), (647, 647647), (648, 648648), (649, 649649), (650, 650650), (651, 651651), (652, 652652), (653, 653653), (654, 654654), (655, 655655), (656, 656656), (657, 657657), (658, 658658), (659, 659659), (660, 660660), (661, 661661), (662, 662662), (663, 663663), (664, 664664), (665, 665665), (666, 666666), (667, 667667), (668, 668668), (669, 669669), (670, 670670), (671, 671671), (672, 672672), (673, 673673), (674, 674674), (675, 675675), (676, 676676), (677, 677677), (678, 678678), (679, 679679), (680, 680680), (681, 681681), (682, 682682), (683, 683683), (684, 684684), (685, 685685), (686, 686686), (687, 687687), (688, 688688), (689, 689689), (690, 690690), (691, 691691), (692, 692692), (693, 693693), (694, 694694), (695, 695695), (696, 696696), (697, 697697), (698, 698698), (699, 699699), (700, 700700), (701, 701701), (702, 702702), (703, 703703), (704, 704704), (705, 705705), (706, 706706), (707, 707707), (708, 708708), (709, 709709), (710, 710710), (711, 711711), (712, 712712), (713, 713713), (714, 714714), (715, 715715), (716, 716716), (717, 717717), (718, 718718), (719, 719719), (720, 720720), (721, 721721), (722, 722722), (723, 723723), (724, 724724), (725, 725725), (726, 726726), (727, 727727), (728, 728728), (729, 729729), (730, 730730), (731, 731731), (732, 732732), (733, 733733), (734, 734734), (735, 735735), (736, 736736), (737, 737737), (738, 738738), (739, 739739), (740, 740740), (741, 741741), (742, 742742), (743, 743743), (744, 744744), (745, 745745), (746, 746746), (747, 747747), (748, 748748), (749, 749749), (750, 750750), (751, 751751), (752, 752752), (753, 753753), (754, 754754), (755, 755755), (756, 756756), (757, 757757), (758, 758758), (759, 759759), (760, 760760), (761, 761761), (762, 762762), (763, 763763), (764, 764764), (765, 765765), (766, 766766), (767, 767767), (768, 768768), (769, 769769), (770, 770770), (771, 771771), (772, 772772), (773, 773773), (774, 774774), (775, 775775), (776, 776776), (777, 777777), (778, 778778), (779, 779779), (780, 780780), (781, 781781), (782, 782782), (783, 783783), (784, 784784), (785, 785785), (786, 786786), (787, 787787), (788, 788788), (789, 789789), (790, 790790), (791, 791791), (792, 792792), (793, 793793), (794, 794794), (795, 795795), (796, 796796), (797, 797797), (798, 798798), (799, 799799), (800, 800800), (801, 801801), (802, 802802), (803, 803803), (804, 804804), (805, 805805), (806, 806806), (807, 807807), (808, 808808), (809, 809809), (810, 810810), (811, 811811), (812, 812812), (813, 813813), (814, 814814), (815, 815815), (816, 816816), (817, 817817), (818, 818818), (819, 819819), (820, 820820), (821, 821821), (822, 822822), (823, 823823), (824, 824824), (825, 825825), (826, 826826), (827, 827827), (828, 828828), (829, 829829), (830, 830830), (831, 831831), (832, 832832), (833, 833833), (834, 834834), (835, 835835), (836, 836836), (837, 837837), (838, 838838), (839, 839839), (840, 840840), (841, 841841), (842, 842842), (843, 843843), (844, 844844), (845, 845845), (846, 846846), (847, 847847), (848, 848848), (849, 849849), (850, 850850), (851, 851851), (852, 852852), (853, 853853), (854, 854854), (855, 855855), (856, 856856), (857, 857857), (858, 858858), (859, 859859), (860, 860860), (861, 861861), (862, 862862), (863, 863863), (864, 864864), (865, 865865), (866, 866866), (867, 867867), (868, 868868), (869, 869869), (870, 870870), (871, 871871), (872, 872872), (873, 873873), (874, 874874), (875, 875875), (876, 876876), (877, 877877), (878, 878878), (879, 879879), (880, 880880), (881, 881881), (882, 882882), (883, 883883), (884, 884884), (885, 885885), (886, 886886), (887, 887887), (888, 888888), (889, 889889), (890, 890890), (891, 891891), (892, 892892), (893, 893893), (894, 894894), (895, 895895), (896, 896896), (897, 897897), (898, 898898), (899, 899899), (900, 900900), (901, 901901), (902, 902902), (903, 903903), (904, 904904), (905, 905905), (906, 906906), (907, 907907), (908, 908908), (909, 909909), (910, 910910), (911, 911911), (912, 912912), (913, 913913), (914, 914914), (915, 915915), (916, 916916), (917, 917917), (918, 918918), (919, 919919), (920, 920920), (921, 921921), (922, 922922), (923, 923923), (924, 924924), (925, 925925), (926, 926926), (927, 927927), (928, 928928), (929, 929929), (930, 930930), (931, 931931), (932, 932932), (933, 933933), (934, 934934), (935, 935935), (936, 936936), (937, 937937), (938, 938938), (939, 939939), (940, 940940), (941, 941941), (942, 942942), (943, 943943), (944, 944944), (945, 945945), (946, 946946), (947, 947947), (948, 948948), (949, 949949), (950, 950950), (951, 951951), (952, 952952), (953, 953953), (954, 954954), (955, 955955), (956, 956956), (957, 957957), (958, 958958), (959, 959959), (960, 960960), (961, 961961), (962, 962962), (963, 963963), (964, 964964), (965, 965965), (966, 966966), (967, 967967), (968, 968968), (969, 969969), (970, 970970), (971, 971971), (972, 972972), (973, 973973), (974, 974974), (975, 975975), (976, 976976), (977, 977977), (978, 978978), (979, 979979), (980, 980980), (981, 981981), (982, 982982), (983, 983983), (984, 984984), (985, 985985), (986, 986986), (987, 987987), (988, 988988), (989, 989989), (990, 990990), (991, 991991), (992, 992992), (993, 993993), (994, 994994), (995, 995995), (996, 996996), (997, 997997), (998, 998998), (999, 999999), (10000, 10000);", parameters: 0b 2025-05-07T08:50:43.412987Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jtmz19mq5rf9q7mjh31kpvjn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmVlYTM3YTYtZDQ4Nzk3NzMtNjI3ZDJmNTMtMjQzMWY5ODY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Captured TEvDataShard::TEvRead from KQP_SOURCE_READ_ACTOR to TX_DATASHARD_ACTOR 2025-05-07T08:50:43.437531Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jtmz19mq5rf9q7mjh31kpvjn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmVlYTM3YTYtZDQ4Nzk3NzMtNjI3ZDJmNTMtMjQzMWY5ODY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Captured TEvDataShard::TEvRead from KQP_STREAM_LOOKUP_ACTOR to TX_DATASHARD_ACTOR --- split started --- --- split finished --- Captured TEvDataShard::TEvRead from KQP_STREAM_LOOKUP_ACTOR to TX_DATASHARD_ACTOR Captured TEvDataShard::TEvRead from KQP_STREAM_LOOKUP_ACTOR to TX_DATASHARD_ACTOR >> KqpStreamLookup::ReadTableWithIndexDuringSplit [GOOD] >> AutoConfig::GetASPoolsWith2CPUs [GOOD] >> TTicketParserTest::LoginGood [GOOD] >> TTicketParserTest::LoginGoodWithGroups >> VectorIndexBuildTest::BaseCase [GOOD] >> AutoConfig::GetServicePoolsWith3CPUs [GOOD] >> TSubDomainTest::CheckAccessCopyTable [GOOD] >> TSubDomainTest::ConsistentCopyTable |89.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/driver_lib/run/ut/unittest >> AutoConfig::GetServicePoolsWith2CPUs [GOOD] |89.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/driver_lib/run/ut/unittest >> AutoConfig::GetServicePoolsWith4AndMoreCPUs [GOOD] >> TTicketParserTest::AccessServiceAuthenticationOk [GOOD] >> TTicketParserTest::AccessServiceAuthenticationApiKeyOk |89.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/persqueue/ut/ydb-core-persqueue-ut |89.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/persqueue/ut/ydb-core-persqueue-ut |89.2%| [LD] {RESULT} $(B)/ydb/core/persqueue/ut/ydb-core-persqueue-ut |89.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/driver_lib/run/ut/unittest >> AutoConfig::GetASPoolsWith2CPUs [GOOD] |89.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/pg/ydb-core-kqp-ut-pg |89.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/pg/ydb-core-kqp-ut-pg |89.2%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/pg/ydb-core-kqp-ut-pg >> TTicketParserTest::TicketFromCertificateCheckIssuerGood [GOOD] >> TTicketParserTest::TicketFromCertificateCheckIssuerBad |89.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/driver_lib/run/ut/unittest >> AutoConfig::GetServicePoolsWith3CPUs [GOOD] |89.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/driver_lib/run/ut/unittest >> TSubDomainTest::CreateDummyTabletsInDifferentDomains [GOOD] >> TSubDomainTest::CoordinatorRunAtSubdomainNodeWhenAvailable >> Viewer::SharedDoesntShowExclusiveNodes [GOOD] >> Viewer::ServerlessWithExclusiveNodesCheckTable >> TTicketParserTest::AuthenticationUnsupported [GOOD] >> TTicketParserTest::AuthenticationUnknown >> AutoConfig::GetServicePoolsWith1CPU [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_kqp/unittest >> KqpStreamLookup::ReadTableWithIndexDuringSplit [GOOD] Test command err: 2025-05-07T08:50:29.115930Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:50:29.116088Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:50:29.116361Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002e88/r3tmp/tmpe6Pfyo/pdisk_1.dat 2025-05-07T08:50:29.731321Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:50:29.821363Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:29.878623Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:29.879450Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:29.892285Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:50:29.992748Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:50:30.422527Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:781:2652], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:30.422798Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:791:2657], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:30.423371Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:30.430463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T08:50:30.646928Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:795:2660], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T08:50:30.738411Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:867:2701] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:50:44.943097Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715660. Ctx: { TraceId: 01jtmz0xceaj959fzsfjakan19, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MWRlZWEwN2EtNzlkNmZhM2UtODM4NWQzYzctMThhZjdhMmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:50:44.999006Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jtmz0xceaj959fzsfjakan19, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MWRlZWEwN2EtNzlkNmZhM2UtODM4NWQzYzctMThhZjdhMmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:50:45.121010Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jtmz0xceaj959fzsfjakan19", SessionId: ydb://session/3?node_id=1&id=MWRlZWEwN2EtNzlkNmZhM2UtODM4NWQzYzctMThhZjdhMmM=, Slow query, duration: 14.706426s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "UPSERT INTO `/Root/TestTable` (key, value) VALUES (0, 00), (1, 11), (2, 22), (3, 33), (4, 44), (5, 55), (6, 66), (7, 77), (8, 88), (9, 99), (10, 1010), (11, 1111), (12, 1212), (13, 1313), (14, 1414), (15, 1515), (16, 1616), (17, 1717), (18, 1818), (19, 1919), (20, 2020), (21, 2121), (22, 2222), (23, 2323), (24, 2424), (25, 2525), (26, 2626), (27, 2727), (28, 2828), (29, 2929), (30, 3030), (31, 3131), (32, 3232), (33, 3333), (34, 3434), (35, 3535), (36, 3636), (37, 3737), (38, 3838), (39, 3939), (40, 4040), (41, 4141), (42, 4242), (43, 4343), (44, 4444), (45, 4545), (46, 4646), (47, 4747), (48, 4848), (49, 4949), (50, 5050), (51, 5151), (52, 5252), (53, 5353), (54, 5454), (55, 5555), (56, 5656), (57, 5757), (58, 5858), (59, 5959), (60, 6060), (61, 6161), (62, 6262), (63, 6363), (64, 6464), (65, 6565), (66, 6666), (67, 6767), (68, 6868), (69, 6969), (70, 7070), (71, 7171), (72, 7272), (73, 7373), (74, 7474), (75, 7575), (76, 7676), (77, 7777), (78, 7878), (79, 7979), (80, 8080), (81, 8181), (82, 8282), (83, 8383), (84, 8484), (85, 8585), (86, 8686), (87, 8787), (88, 8888), (89, 8989), (90, 9090), (91, 9191), (92, 9292), (93, 9393), (94, 9494), (95, 9595), (96, 9696), (97, 9797), (98, 9898), (99, 9999), (100, 100100), (101, 101101), (102, 102102), (103, 103103), (104, 104104), (105, 105105), (106, 106106), (107, 107107), (108, 108108), (109, 109109), (110, 110110), (111, 111111), (112, 112112), (113, 113113), (114, 114114), (115, 115115), (116, 116116), (117, 117117), (118, 118118), (119, 119119), (120, 120120), (121, 121121), (122, 122122), (123, 123123), (124, 124124), (125, 125125), (126, 126126), (127, 127127), (128, 128128), (129, 129129), (130, 130130), (131, 131131), (132, 132132), (133, 133133), (134, 134134), (135, 135135), (136, 136136), (137, 137137), (138, 138138), (139, 139139), (140, 140140), (141, 141141), (142, 142142), (143, 143143), (144, 144144), (145, 145145), (146, 146146), (147, 147147), (148, 148148), (149, 149149), (150, 150150), (151, 151151), (152, 152152), (153, 153153), (154, 154154), (155, 155155), (156, 156156), (157, 157157), (158, 158158), (159, 159159), (160, 160160), (161, 161161), (162, 162162), (163, 163163), (164, 164164), (165, 165165), (166, 166166), (167, 167167), (168, 168168), (169, 169169), (170, 170170), (171, 171171), (172, 172172), (173, 173173), (174, 174174), (175, 175175), (176, 176176), (177, 177177), (178, 178178), (179, 179179), (180, 180180), (181, 181181), (182, 182182), (183, 183183), (184, 184184), (185, 185185), (186, 186186), (187, 187187), (188, 188188), (189, 189189), (190, 190190), (191, 191191), (192, 192192), (193, 193193), (194, 194194), (195, 195195), (196, 196196), (197, 197197), (198, 198198), (199, 199199), (200, 200200), (201, 201201), (202, 202202), (203, 203203), (204, 204204), (205, 205205), (206, 206206), (207, 207207), (208, 208208), (209, 209209), (210, 210210), (211, 211211), (212, 212212), (213, 213213), (214, 214214), (215, 215215), (216, 216216), (217, 217217), (218, 218218), (219, 219219), (220, 220220), (221, 221221), (222, 222222), (223, 223223), (224, 224224), (225, 225225), (226, 226226), (227, 227227), (228, 228228), (229, 229229), (230, 230230), (231, 231231), (232, 232232), (233, 233233), (234, 234234), (235, 235235), (236, 236236), (237, 237237), (238, 238238), (239, 239239), (240, 240240), (241, 241241), (242, 242242), (243, 243243), (244, 244244), (245, 245245), (246, 246246), (247, 247247), (248, 248248), (249, 249249), (250, 250250), (251, 251251), (252, 252252), (253, 253253), (254, 254254), (255, 255255), (256, 256256), (257, 257257), (258, 258258), (259, 259259), (260, 260260), (261, 261261), (262, 262262), (263, 263263), (264, 264264), (265, 265265), (266, 266266), (267, 267267), (268, 268268), (269, 269269), (270, 270270), (271, 271271), (272, 272272), (273, 273273), (274, 274274), (275, 275275), (276, 276276), (277, 277277), (278, 278278), (279, 279279), (280, 280280), (281, 281281), (282, 282282), (283, 283283), (284, 284284), (285, 285285), (286, 286286), (287, 287287), (288, 288288), (289, 289289), (290, 290290), (291, 291291), (292, 292292), (293, 293293), (294, 294294), (295, 295295), (296, 296296), (297, 297297), (298, 298298), (299, 299299), (300, 300300), (301, 301301), (302, 302302), (303, 303303), (304, 304304), (305, 305305), (306, 306306), (307, 307307), (308, 308308), (309, 309309), (310, 310310), (311, 311311), (312, 312312), (313, 313313), (314, 314314), (315, 315315), (316, 316316), (317, 317317), (318, 318318), (319, 319319), (320, 320320), (321, 321321), (322, 322322), (323, 323323), (324, 324324), (325, 325325), (326, 326326), (327, 327327), (328, 328328), (329, 329329), (330, 330330), (331, 331331), (332, 332332), (333, 333333), (334, 334334), (335, 335335), (336, 336336), (337, 337337), (338, 338338), (339, 339339), (340, 340340), (341, 341341), (342, 342342), (343, 343343), (344, 344344), (345, 345345), (346, 346346), (347, 347347), (348, 348348), (349, 349349), (350, 350350), (351, 351351), (352, 352352), (353, 353353), (354, 354354), (355, 355355), (356, 356356), (357, 357357), (358, 358358), (359, 359359), (360, 360360), (361, 361361), (362, 362362), (363, 363363), (364, 364364), (365, 365365), (366, 366366), (367, 367367), (368, 368368), (369, 369369), (370, 370370), (371, 371371), (372, 372372), (373, 373373), (374, 374374), (375, 375375), (376, 376376), (377, 377377), (378, 378378), (379, 379379), (380, 380380), (381, 381381), (382, 382382), (383, 383383), (384, 384384), (385, 385385), (386, 386386), (387, 387387), (388, 388388), (389, 389389), (390, 390390), (391, 391391), (392, 392392), (393, 393393), (394, 394394), (395, 395395), (396, 396396), (397, 397397), (398, 398398), (399, 399399), (400, 400400), (401, 401401), (402, 402402), (403, 403403), (404, 404404), (405, 405405), (406, 406406), (407, 407407), (408, 408408), (409, 409409), (410, 410410), (411, 411411), (412, 412412), (413, 413413), (414, 414414), (415, 415415), (416, 416416), (417, 417417), (418, 418418), (419, 419419), (420, 420420), (421, 421421), (422, 422422), (423, 423423), (424, 424424), (425, 425425), (426, 426426), (427, 427427), (428, 428428), (429, 429429), (430, 430430), (431, 431431), (432, 432432), (433, 433433), (434, 434434), (435, 435435), (436, 436436), (437, 437437), (438, 438438), (439, 439439), (440, 440440), (441, 441441), (442, 442442), (443, 443443), (444, 444444), (445, 445445), (446, 446446), (447, 447447), (448, 448448), (449, 449449), (450, 450450), (451, 451451), (452, 452452), (453, 453453), (454, 454454), (455, 455455), (456, 456456), (457, 457457), (458, 458458), (459, 459459), (460, 460460), (461, 461461), (462, 462462), (463, 463463), (464, 464464), (465, 465465), (466, 466466), (467, 467467), (468, 468468), (469, 469469), (470, 470470), (471, 471471), (472, 472472), (473, 473473), (474, 474474), (475, 475475), (476, 476476), (477, 477477), (478, 478478), (479, 479479), (480, 480480), (481, 481481), (482, 482482), (483, 483483), (484, 484484), (485, 485485), (486, 486486), (487, 487487), (488, 488488), (489, 489489), (490, 490490), (491, 491491), (492, 492492), (493, 493493), (494, 494494), (495, 495495), (496, 496496), (497, 497497), (498, 498498), (499, 499499), (500, 500500), (501, 501501), (502, 502502), (503, 503503), (504, 504504), (505, 505505), (506, 506506), (507, 507507), (508, 508508), (509, 509509), (510, 510510), (511, 511511), (512, 512512), (513, 513513), (514, 514514), (515, 515515), (516, 516516), (517, 517517), (518, 518518), (519, 519519), (520, 520520), (521, 521521), (522, 522522), (523, 523523), (524, 524524), (525, 525525), (526, 526526), (527, 527527), (528, 528528), (529, 529529), (530, 530530), (531, 531531), (532, 532532), (533, 533533), (534, 534534), (535, 535535), (536, 536536), (537, 537537), (538, 538538), (539, 539539), (540, 540540), (541, 541541), (542, 542542), (543, 543543), (544, 544544), (545, 545545), (546, 546546), (547, 547547), (548, 548548), (549, 549549), (550, 550550), (551, 551551), (552, 552552), (553, 553553), (554, 554554), (555, 555555), (556, 556556), (557, 557557), (558, 558558), (559, 559559), (560, 560560), (561, 561561), (562, 562562), (563, 563563), (564, 564564), (565, 565565), (566, 566566), (567, 567567), (568, 568568), (569, 569569), (570, 570570), (571, 571571), (572, 572572), (573, 573573), (574, 574574), (575, 575575), (576, 576576), (577, 577577), (578, 578578), (579, 579579), (580, 580580), (581, 581581), (582, 582582), (583, 583583), (584, 584584), (585, 585585), (586, 586586), (587, 587587), (588, 588588), (589, 589589), (590, 590590), (591, 591591), (592, 592592), (593, 593593), (594, 594594), (595, 595595), (596, 596596), (597, 597597), (598, 598598), (599, 599599), (600, 600600), (601, 601601), (602, 602602), (603, 603603), (604, 604604), (605, 605605), (606, 606606), (607, 607607), (608, 608608), (609, 609609), (610, 610610), (611, 611611), (612, 612612), (613, 613613), (614, 614614), (615, 615615), (616, 616616), (617, 617617), (618, 618618), (619, 619619), (620, 620620), (621, 621621), (622, 622622), (623, 623623), (624, 624624), (625, 625625), (626, 626626), (627, 627627), (628, 628628), (629, 629629), (630, 630630), (631, 631631), (632, 632632), (633, 633633), (634, 634634), (635, 635635), (636, 636636), (637, 637637), (638, 638638), (639, 639639), (640, 640640), (641, 641641), (642, 642642), (643, 643643), (644, 644644), (645, 645645), (646, 646646), (647, 647647), (648, 648648), (649, 649649), (650, 650650), (651, 651651), (652, 652652), (653, 653653), (654, 654654), (655, 655655), (656, 656656), (657, 657657), (658, 658658), (659, 659659), (660, 660660), (661, 661661), (662, 662662), (663, 663663), (664, 664664), (665, 665665), (666, 666666), (667, 667667), (668, 668668), (669, 669669), (670, 670670), (671, 671671), (672, 672672), (673, 673673), (674, 674674), (675, 675675), (676, 676676), (677, 677677), (678, 678678), (679, 679679), (680, 680680), (681, 681681), (682, 682682), (683, 683683), (684, 684684), (685, 685685), (686, 686686), (687, 687687), (688, 688688), (689, 689689), (690, 690690), (691, 691691), (692, 692692), (693, 693693), (694, 694694), (695, 695695), (696, 696696), (697, 697697), (698, 698698), (699, 699699), (700, 700700), (701, 701701), (702, 702702), (703, 703703), (704, 704704), (705, 705705), (706, 706706), (707, 707707), (708, 708708), (709, 709709), (710, 710710), (711, 711711), (712, 712712), (713, 713713), (714, 714714), (715, 715715), (716, 716716), (717, 717717), (718, 718718), (719, 719719), (720, 720720), (721, 721721), (722, 722722), (723, 723723), (724, 724724), (725, 725725), (726, 726726), (727, 727727), (728, 728728), (729, 729729), (730, 730730), (731, 731731), (732, 732732), (733, 733733), (734, 734734), (735, 735735), (736, 736736), (737, 737737), (738, 738738), (739, 739739), (740, 740740), (741, 741741), (742, 742742), (743, 743743), (744, 744744), (745, 745745), (746, 746746), (747, 747747), (748, 748748), (749, 749749), (750, 750750), (751, 751751), (752, 752752), (753, 753753), (754, 754754), (755, 755755), (756, 756756), (757, 757757), (758, 758758), (759, 759759), (760, 760760), (761, 761761), (762, 762762), (763, 763763), (764, 764764), (765, 765765), (766, 766766), (767, 767767), (768, 768768), (769, 769769), (770, 770770), (771, 771771), (772, 772772), (773, 773773), (774, 774774), (775, 775775), (776, 776776), (777, 777777), (778, 778778), (779, 779779), (780, 780780), (781, 781781), (782, 782782), (783, 783783), (784, 784784), (785, 785785), (786, 786786), (787, 787787), (788, 788788), (789, 789789), (790, 790790), (791, 791791), (792, 792792), (793, 793793), (794, 794794), (795, 795795), (796, 796796), (797, 797797), (798, 798798), (799, 799799), (800, 800800), (801, 801801), (802, 802802), (803, 803803), (804, 804804), (805, 805805), (806, 806806), (807, 807807), (808, 808808), (809, 809809), (810, 810810), (811, 811811), (812, 812812), (813, 813813), (814, 814814), (815, 815815), (816, 816816), (817, 817817), (818, 818818), (819, 819819), (820, 820820), (821, 821821), (822, 822822), (823, 823823), (824, 824824), (825, 825825), (826, 826826), (827, 827827), (828, 828828), (829, 829829), (830, 830830), (831, 831831), (832, 832832), (833, 833833), (834, 834834), (835, 835835), (836, 836836), (837, 837837), (838, 838838), (839, 839839), (840, 840840), (841, 841841), (842, 842842), (843, 843843), (844, 844844), (845, 845845), (846, 846846), (847, 847847), (848, 848848), (849, 849849), (850, 850850), (851, 851851), (852, 852852), (853, 853853), (854, 854854), (855, 855855), (856, 856856), (857, 857857), (858, 858858), (859, 859859), (860, 860860), (861, 861861), (862, 862862), (863, 863863), (864, 864864), (865, 865865), (866, 866866), (867, 867867), (868, 868868), (869, 869869), (870, 870870), (871, 871871), (872, 872872), (873, 873873), (874, 874874), (875, 875875), (876, 876876), (877, 877877), (878, 878878), (879, 879879), (880, 880880), (881, 881881), (882, 882882), (883, 883883), (884, 884884), (885, 885885), (886, 886886), (887, 887887), (888, 888888), (889, 889889), (890, 890890), (891, 891891), (892, 892892), (893, 893893), (894, 894894), (895, 895895), (896, 896896), (897, 897897), (898, 898898), (899, 899899), (900, 900900), (901, 901901), (902, 902902), (903, 903903), (904, 904904), (905, 905905), (906, 906906), (907, 907907), (908, 908908), (909, 909909), (910, 910910), (911, 911911), (912, 912912), (913, 913913), (914, 914914), (915, 915915), (916, 916916), (917, 917917), (918, 918918), (919, 919919), (920, 920920), (921, 921921), (922, 922922), (923, 923923), (924, 924924), (925, 925925), (926, 926926), (927, 927927), (928, 928928), (929, 929929), (930, 930930), (931, 931931), (932, 932932), (933, 933933), (934, 934934), (935, 935935), (936, 936936), (937, 937937), (938, 938938), (939, 939939), (940, 940940), (941, 941941), (942, 942942), (943, 943943), (944, 944944), (945, 945945), (946, 946946), (947, 947947), (948, 948948), (949, 949949), (950, 950950), (951, 951951), (952, 952952), (953, 953953), (954, 954954), (955, 955955), (956, 956956), (957, 957957), (958, 958958), (959, 959959), (960, 960960), (961, 961961), (962, 962962), (963, 963963), (964, 964964), (965, 965965), (966, 966966), (967, 967967), (968, 968968), (969, 969969), (970, 970970), (971, 971971), (972, 972972), (973, 973973), (974, 974974), (975, 975975), (976, 976976), (977, 977977), (978, 978978), (979, 979979), (980, 980980), (981, 981981), (982, 982982), (983, 983983), (984, 984984), (985, 985985), (986, 986986), (987, 987987), (988, 988988), (989, 989989), (990, 990990), (991, 991991), (992, 992992), (993, 993993), (994, 994994), (995, 995995), (996, 996996), (997, 997997), (998, 998998), (999, 999999), (10000, 10000);", parameters: 0b 2025-05-07T08:50:45.443516Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jtmz1brkacht4fa5mnnw7xcy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTBhMzFlODYtZmUwMDkwMDAtNjgzZTMyZi1lMWM0NzIzMQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Captured TEvDataShard::TEvRead from KQP_SOURCE_READ_ACTOR to TX_DATASHARD_ACTOR ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index_build/unittest >> VectorIndexBuildTest::BaseCase [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:50:22.061372Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:50:22.061457Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:50:22.061494Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:50:22.061546Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:50:22.061604Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:50:22.061649Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:50:22.061698Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:50:22.061763Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:50:22.062533Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:50:22.062954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:50:22.166782Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:50:22.166851Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:22.183890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:50:22.184094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:50:22.184263Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:50:22.197073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:50:22.197405Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:50:22.198313Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:22.198521Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:50:22.201871Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:22.203433Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:22.203504Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:22.203578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:50:22.203629Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:22.203758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:50:22.204004Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:50:22.214350Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:50:22.366161Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:50:22.366400Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:22.366634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:50:22.366859Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:50:22.366920Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:22.369268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:22.369408Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:50:22.369592Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:22.369643Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:50:22.369699Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:50:22.369734Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:50:22.371826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:22.371903Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:50:22.371942Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:50:22.373906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:22.373959Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:22.374024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:22.374075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:50:22.377715Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:50:22.380836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:50:22.381043Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:50:22.382034Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:22.382215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:50:22.382278Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:22.382595Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:50:22.382652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:22.382829Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:50:22.382903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:50:22.386005Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:22.386067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:22.386251Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:22.386291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... DataShard::TEvStateChangedResult 2025-05-07T08:50:45.970671Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186233409568 state Offline 2025-05-07T08:50:45.971034Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877763, Sender [1:4967:6619], Recipient [1:4617:6280]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72075186233409561 ClientId: [1:4967:6619] ServerId: [1:4968:6620] } 2025-05-07T08:50:45.971077Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3164: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-05-07T08:50:45.971536Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72075186233409561 ShardLocalIdx: 8 TxId_Deprecated: 8 TabletID: 72075186233409568 2025-05-07T08:50:45.971905Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 268829696, Sender [1:4606:6271], Recipient [1:4617:6280]: NKikimr::TEvTablet::TEvTabletDead 2025-05-07T08:50:45.973111Z node 1 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186233409568 2025-05-07T08:50:45.973257Z node 1 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186233409568 2025-05-07T08:50:45.974793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 8 ShardOwnerId: 72075186233409561 ShardLocalIdx: 8, at schemeshard: 72075186233409561 2025-05-07T08:50:45.975130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72075186233409561, LocalPathId: 7] was 1 Forgetting tablet 72075186233409568 2025-05-07T08:50:45.976081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72075186233409561 2025-05-07T08:50:45.976126Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72075186233409561, LocalPathId: 7], at schemeshard: 72075186233409561 2025-05-07T08:50:45.976195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72075186233409561, LocalPathId: 3] was 4 2025-05-07T08:50:45.981818Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72075186233409561:8 2025-05-07T08:50:45.981950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72075186233409561:8 tabletId 72075186233409568 2025-05-07T08:50:45.984445Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72075186233409561 2025-05-07T08:50:46.076958Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 2050, transactions count in step: 1, at schemeshard: 72075186233409561 2025-05-07T08:50:46.077102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976735763 AckTo { RawX1: 0 RawX2: 0 } } Step: 2050 MediatorID: 72075186233409563 TabletID: 72075186233409561, at schemeshard: 72075186233409561 2025-05-07T08:50:46.077161Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_lock.cpp:44: [72075186233409561] TDropLock TPropose opId# 281474976735763:0 HandleReply TEvOperationPlan: step# 2050 2025-05-07T08:50:46.077211Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976735763:0 128 -> 240 2025-05-07T08:50:46.081029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976735763:0, at schemeshard: 72075186233409561 2025-05-07T08:50:46.081100Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72075186233409561] TDone opId# 281474976735763:0 ProgressState 2025-05-07T08:50:46.081185Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976735763:0 progress is 1/1 2025-05-07T08:50:46.081234Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976735763 ready parts: 1/1 2025-05-07T08:50:46.081275Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976735763:0 progress is 1/1 2025-05-07T08:50:46.081305Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976735763 ready parts: 1/1 2025-05-07T08:50:46.081338Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976735763, ready parts: 1/1, is published: true 2025-05-07T08:50:46.081411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:3375:5120] message: TxId: 281474976735763 2025-05-07T08:50:46.081469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976735763 ready parts: 1/1 2025-05-07T08:50:46.081505Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976735763:0 2025-05-07T08:50:46.081533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976735763:0 2025-05-07T08:50:46.081617Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72075186233409561, LocalPathId: 2] was 4 2025-05-07T08:50:46.086873Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6706: Handle: TEvNotifyTxCompletionResult: txId# 281474976735763 2025-05-07T08:50:46.086966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6708: Message: TxId: 281474976735763 2025-05-07T08:50:46.087041Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:2321: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, txId# 281474976735763, buildInfoId: 115 2025-05-07T08:50:46.087143Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:2324: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, txId# 281474976735763, buildInfo: TBuildInfo{ IndexBuildId: 115, Uid: , DomainPathId: [OwnerId: 72075186233409561, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409561, LocalPathId: 2], IndexType: EIndexTypeGlobalVectorKmeansTree, IndexName: index1, IndexColumn: embedding, State: Unlocking, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:4237:5937], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976735757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976735758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976735762, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976735763, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-05-07T08:50:46.091286Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1105: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 115 Unlocking TBuildInfo{ IndexBuildId: 115, Uid: , DomainPathId: [OwnerId: 72075186233409561, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409561, LocalPathId: 2], IndexType: EIndexTypeGlobalVectorKmeansTree, IndexName: index1, IndexColumn: embedding, State: Unlocking, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:4237:5937], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976735757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976735758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976735762, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976735763, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-05-07T08:50:46.091391Z node 1 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:25: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Unlocking to Done 2025-05-07T08:50:46.094483Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1105: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 115 Done TBuildInfo{ IndexBuildId: 115, Uid: , DomainPathId: [OwnerId: 72075186233409561, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409561, LocalPathId: 2], IndexType: EIndexTypeGlobalVectorKmeansTree, IndexName: index1, IndexColumn: embedding, State: Done, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:4237:5937], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976735757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976735758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976735762, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976735763, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-05-07T08:50:46.094560Z node 1 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:325: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 115, subscribers count# 1 2025-05-07T08:50:46.094775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 115: got EvNotifyTxCompletionResult 2025-05-07T08:50:46.094827Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 115: satisfy waiter [1:4371:6049] TestWaitNotification: OK eventTxId 115 2025-05-07T08:50:46.105085Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__get.cpp:19: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: DoExecute DatabaseName: "/MyRoot/CommonDB" IndexBuildId: 115 2025-05-07T08:50:46.105438Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:93: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: Reply Status: SUCCESS IndexBuild { Id: 115 Issues { message: "TShardStatus { ShardIdx: 72075186233409561:7 Status: INVALID UploadStatus: STATUS_CODE_UNSPECIFIED DebugMessage:
: Error: Shard or requested range is empty\n SeqNoRound: 1 Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 } }" severity: 1 } State: STATE_DONE Settings { source_path: "/MyRoot/CommonDB/Table" index { name: "index1" index_columns: "embedding" global_vector_kmeans_tree_index { } } max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 } } Progress: 100 } BUILDINDEX RESPONSE Get: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 115 Issues { message: "TShardStatus { ShardIdx: 72075186233409561:7 Status: INVALID UploadStatus: STATUS_CODE_UNSPECIFIED DebugMessage:
: Error: Shard or requested range is empty\n SeqNoRound: 1 Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 } }" severity: 1 } State: STATE_DONE Settings { source_path: "/MyRoot/CommonDB/Table" index { name: "index1" index_columns: "embedding" global_vector_kmeans_tree_index { } } max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 } } Progress: 100 } >> EvWrite::WriteWithSplit [GOOD] >> Normalizers::ChunksV0MetaNormalizer |89.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/driver_lib/run/ut/unittest |89.2%| [TA] $(B)/ydb/core/tx/datashard/ut_kqp/test-results/unittest/{meta.json ... results_accumulator.log} |89.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/driver_lib/run/ut/unittest >> AutoConfig::GetServicePoolsWith1CPU [GOOD] >> AutoConfig::GetASPoolsith1CPU [GOOD] >> TTicketParserTest::LoginRefreshGroupsWithError [GOOD] >> TTicketParserTest::NebiusAccessServiceAuthenticationOk >> AutoConfig::GetASPoolsWith3CPUs [GOOD] >> ObjectStorageListingTest::ListingNoFilter [GOOD] >> TTicketParserTest::TicketFromCertificateWithValidationDifferentIssuersGood [GOOD] >> TTicketParserTest::TicketFromCertificateWithValidationDifferentIssuersBad >> TSubscriberTest::NotifyUpdate |89.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/driver_lib/run/ut/unittest >> AutoConfig::GetASPoolsith1CPU [GOOD] >> TSubscriberTest::SyncWithOutdatedReplica >> AutoConfig::GetASPoolsWith4AndMoreCPUs >> Viewer::TenantInfo5kkTablets [GOOD] >> Viewer::UseTransactionWhenExecuteDataActionQuery |89.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/driver_lib/run/ut/unittest >> AutoConfig::GetASPoolsWith3CPUs [GOOD] >> AutoConfig::GetASPoolsWith4AndMoreCPUs [GOOD] >> TSubscriberTest::NotifyUpdate [GOOD] >> TSubscriberTest::SyncWithOutdatedReplica [GOOD] >> TSubscriberTest::StrongNotificationAfterCommit >> TSubscriberCombinationsTest::MigratedPathRecreation ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_object_storage_listing/unittest >> ObjectStorageListingTest::ListingNoFilter [GOOD] Test command err: 2025-05-07T08:50:46.543088Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:50:46.543285Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:50:46.543587Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002e8b/r3tmp/tmpAixXOf/pdisk_1.dat 2025-05-07T08:50:46.966277Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:50:47.021045Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:47.079538Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:47.079702Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:47.091639Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:50:47.174549Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:50:47.212161Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:665:2569] 2025-05-07T08:50:47.212490Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T08:50:47.255528Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T08:50:47.255699Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T08:50:47.258475Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-05-07T08:50:47.258563Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-05-07T08:50:47.258611Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-05-07T08:50:47.259003Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T08:50:47.259214Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T08:50:47.259299Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:681:2569] in generation 1 2025-05-07T08:50:47.270161Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T08:50:47.303290Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-05-07T08:50:47.303552Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T08:50:47.303680Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:683:2579] 2025-05-07T08:50:47.303719Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T08:50:47.303779Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-05-07T08:50:47.303825Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:50:47.304367Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T08:50:47.304499Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T08:50:47.304607Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T08:50:47.304655Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:50:47.304697Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T08:50:47.304743Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:50:47.304855Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:672:2573], sessionId# [0:0:0] 2025-05-07T08:50:47.305448Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T08:50:47.305755Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-05-07T08:50:47.305902Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-05-07T08:50:47.307974Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:50:47.323453Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T08:50:47.323594Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-05-07T08:50:47.495678Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:697:2587], serverId# [1:699:2589], sessionId# [0:0:0] 2025-05-07T08:50:47.500610Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-05-07T08:50:47.500739Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:50:47.501152Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T08:50:47.501211Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-05-07T08:50:47.501269Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-05-07T08:50:47.501542Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-05-07T08:50:47.501729Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-05-07T08:50:47.502938Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T08:50:47.503039Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-05-07T08:50:47.505299Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-05-07T08:50:47.505869Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:50:47.507926Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-05-07T08:50:47.507974Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:50:47.508493Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-05-07T08:50:47.508547Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:50:47.509437Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:50:47.509487Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T08:50:47.509547Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-05-07T08:50:47.509614Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:410:2405], exec latency: 0 ms, propose latency: 0 ms 2025-05-07T08:50:47.509670Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-05-07T08:50:47.509755Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:50:47.514197Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:50:47.516350Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-05-07T08:50:47.516527Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-05-07T08:50:47.516591Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-05-07T08:50:47.527011Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:731:2613], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:47.527163Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:741:2618], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:47.527262Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:47.532443Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T08:50:47.539381Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:50:47.713828Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:50:47.719501Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:745:2621], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T08:50:47.793395Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:815:2660] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:50:48.240326Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715660. Ctx: { TraceId: 01jtmz1e340axcjxwzfcd6gjef, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2ZjOGUxMmMtNzAwMzNkMjctMzkzOWUyN2YtMTA1OGZjMjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:50:48.247978Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:846:2677], serverId# [1:847:2678], sessionId# [0:0:0] 2025-05-07T08:50:48.248618Z node 1 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:2] at 72075186224037888 2025-05-07T08:50:48.248866Z node 1 :TX_DATASHARD DEBUG: execute_write_unit.cpp:410: Executed write operation for [0:2] at 72075186224037888, row count=5 2025-05-07T08:50:48.260687Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:50:48.266595Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:854:2684], serverId# [1:855:2685], sessionId# [0:0:0] 2025-05-07T08:50:48.267017Z node 1 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:152: 72075186224037888 S3 Listing: start at key ((type:2, value:"d\0\0\0") (type:4608, value:"/test/")), end at key ((type:2, value:"d\0\0\0") (type:4608, value:"/test0")) restarted: 0 last path: "" contents: 0 common prefixes: 0 2025-05-07T08:50:48.267293Z node 1 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:374: 72075186224037888 S3 Listing: finished status: 0 description: "" contents: 3 common prefixes: 2 2025-05-07T08:50:48.267535Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [1:854:2684], serverId# [1:855:2685], sessionId# [0:0:0] |89.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/driver_lib/run/ut/unittest >> AutoConfig::GetASPoolsWith4AndMoreCPUs [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberTest::NotifyUpdate [GOOD] Test command err: 2025-05-07T08:50:49.061906Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:960: [main][1:34:2065][path] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-05-07T08:50:49.065659Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:38:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:3:2050] 2025-05-07T08:50:49.065797Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:39:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:6:2053] 2025-05-07T08:50:49.065842Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:40:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:9:2056] 2025-05-07T08:50:49.065906Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:34:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:35:2065] 2025-05-07T08:50:49.066028Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:34:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:36:2065] 2025-05-07T08:50:49.066093Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:836: [main][1:34:2065][path] Set up state: owner# [1:33:2064], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-05-07T08:50:49.066157Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:34:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:37:2065] 2025-05-07T08:50:49.066193Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:854: [main][1:34:2065][path] Ignore empty state: owner# [1:33:2064], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-05-07T08:50:49.066764Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:38:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:3:2050] 2025-05-07T08:50:49.066845Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:34:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:35:2065] 2025-05-07T08:50:49.066902Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:842: [main][1:34:2065][path] Update to strong state: owner# [1:33:2064], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements } >> TSubscriberTest::SyncPartial >> TSubscriberTest::StrongNotificationAfterCommit [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberTest::SyncWithOutdatedReplica [GOOD] Test command err: 2025-05-07T08:50:49.087501Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:960: [main][1:35:2066][path] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-05-07T08:50:49.089713Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:39:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 2] Version: 2 }: sender# [1:3:2050] 2025-05-07T08:50:49.089830Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:40:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 2, LocalPathId: 2] Version: 1 }: sender# [1:6:2053] 2025-05-07T08:50:49.089872Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:41:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 2, LocalPathId: 2] Version: 1 }: sender# [1:9:2056] 2025-05-07T08:50:49.089944Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:35:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 2] Version: 2 }: sender# [1:36:2066] 2025-05-07T08:50:49.090067Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:35:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 2, LocalPathId: 2] Version: 1 }: sender# [1:37:2066] 2025-05-07T08:50:49.090136Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:836: [main][1:35:2066][path] Set up state: owner# [1:33:2064], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 2, LocalPathId: 2], Version: 1) DomainId: [OwnerId: 2, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-05-07T08:50:49.090253Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:35:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 2, LocalPathId: 2] Version: 1 }: sender# [1:38:2066] 2025-05-07T08:50:49.090366Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:854: [main][1:35:2066][path] Path was already updated: owner# [1:33:2064], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 2, LocalPathId: 2], Version: 1) DomainId: [OwnerId: 2, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 2, LocalPathId: 2], Version: 1) DomainId: [OwnerId: 2, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-05-07T08:50:49.090531Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:871: [main][1:35:2066][path] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:33:2064], cookie# 1 2025-05-07T08:50:49.090628Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:39:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:36:2066], cookie# 1 2025-05-07T08:50:49.090692Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:40:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:37:2066], cookie# 1 2025-05-07T08:50:49.090778Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:41:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:38:2066], cookie# 1 2025-05-07T08:50:49.090839Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:39:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:3:2050], cookie# 1 2025-05-07T08:50:49.090887Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:40:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 }: sender# [1:6:2053], cookie# 1 2025-05-07T08:50:49.090920Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:41:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 }: sender# [1:9:2056], cookie# 1 2025-05-07T08:50:49.090972Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:35:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:36:2066], cookie# 1 2025-05-07T08:50:49.091013Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:932: [main][1:35:2066][path] Sync is in progress: cookie# 1, size# 3, half# 1, successes# 1, faulires# 0 2025-05-07T08:50:49.091051Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:35:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 }: sender# [1:37:2066], cookie# 1 2025-05-07T08:50:49.091156Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:946: [main][1:35:2066][path] Sync is done: cookie# 1, size# 3, half# 1, successes# 2, faulires# 0, partial# 0 2025-05-07T08:50:49.091200Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:35:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 }: sender# [1:38:2066], cookie# 1 2025-05-07T08:50:49.091241Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:906: [main][1:35:2066][path] Unexpected sync response: sender# [1:38:2066], cookie# 1 |89.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_rtmr/unittest >> TSubscriberCombinationsTest::MigratedPathRecreation [GOOD] >> TSubscriberTest::Boot >> TTicketParserTest::BulkAuthorizationWithRequiredPermissions [GOOD] >> TTicketParserTest::BulkAuthorizationWithUserAccount >> TSubscriberTest::SyncPartial [GOOD] |89.2%| [TA] $(B)/ydb/core/driver_lib/run/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TTicketParserTest::LoginGoodWithGroups [GOOD] >> TTicketParserTest::LoginRefreshGroupsGood ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberTest::StrongNotificationAfterCommit [GOOD] Test command err: 2025-05-07T08:50:49.707316Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:960: [main][1:34:2065][path] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-05-07T08:50:49.709520Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:38:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:3:2050] 2025-05-07T08:50:49.709633Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:39:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:6:2053] 2025-05-07T08:50:49.709686Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:40:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:9:2056] 2025-05-07T08:50:49.709758Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:34:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:35:2065] 2025-05-07T08:50:49.709837Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:34:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:36:2065] 2025-05-07T08:50:49.709893Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:836: [main][1:34:2065][path] Set up state: owner# [1:33:2064], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-05-07T08:50:49.709955Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:34:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:37:2065] 2025-05-07T08:50:49.710860Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:854: [main][1:34:2065][path] Ignore empty state: owner# [1:33:2064], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-05-07T08:50:49.712285Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:38:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:3:2050] 2025-05-07T08:50:49.712385Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:34:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:35:2065] 2025-05-07T08:50:49.712445Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:842: [main][1:34:2065][path] Update to strong state: owner# [1:33:2064], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-05-07T08:50:49.712641Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:39:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:6:2053] 2025-05-07T08:50:49.712708Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:34:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:36:2065] 2025-05-07T08:50:49.712787Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:854: [main][1:34:2065][path] Ignore empty state: owner# [1:33:2064], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } >> TSubscriberTest::Boot [GOOD] >> TColumnShardTestReadWrite::WriteReadDuplicate [GOOD] |89.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_rtmr/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberTest::SyncPartial [GOOD] Test command err: 2025-05-07T08:50:50.131565Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:960: [main][1:34:2065][path] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-05-07T08:50:50.141508Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:38:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:3:2050] 2025-05-07T08:50:50.141737Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:39:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:6:2053] 2025-05-07T08:50:50.141804Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:40:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:9:2056] 2025-05-07T08:50:50.141900Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:34:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:35:2065] 2025-05-07T08:50:50.142036Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:34:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:36:2065] 2025-05-07T08:50:50.142099Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:836: [main][1:34:2065][path] Set up state: owner# [1:33:2064], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-05-07T08:50:50.142184Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:34:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:37:2065] 2025-05-07T08:50:50.142230Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:854: [main][1:34:2065][path] Ignore empty state: owner# [1:33:2064], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-05-07T08:50:50.142536Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:871: [main][1:34:2065][path] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:33:2064], cookie# 1 2025-05-07T08:50:50.142706Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:38:2065][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:35:2065], cookie# 1 2025-05-07T08:50:50.142791Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:39:2065][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:36:2065], cookie# 1 2025-05-07T08:50:50.142848Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:40:2065][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:37:2065], cookie# 1 2025-05-07T08:50:50.142923Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:39:2065][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [1:6:2053], cookie# 1 2025-05-07T08:50:50.142996Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:40:2065][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [1:9:2056], cookie# 1 2025-05-07T08:50:50.143068Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:34:2065][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 }: sender# [1:35:2065], cookie# 1 2025-05-07T08:50:50.143122Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:932: [main][1:34:2065][path] Sync is in progress: cookie# 1, size# 3, half# 1, successes# 0, faulires# 1 2025-05-07T08:50:50.143177Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:34:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:35:2065] 2025-05-07T08:50:50.143240Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:854: [main][1:34:2065][path] Ignore empty state: owner# [1:33:2064], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-05-07T08:50:50.143291Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:34:2065][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [1:36:2065], cookie# 1 2025-05-07T08:50:50.143333Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:932: [main][1:34:2065][path] Sync is in progress: cookie# 1, size# 3, half# 1, successes# 1, faulires# 1 2025-05-07T08:50:50.143375Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:34:2065][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [1:37:2065], cookie# 1 2025-05-07T08:50:50.143411Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:946: [main][1:34:2065][path] Sync is done: cookie# 1, size# 3, half# 1, successes# 2, faulires# 1, partial# 0 2025-05-07T08:50:50.143559Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:871: [main][1:34:2065][path] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:33:2064], cookie# 2 2025-05-07T08:50:50.143683Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:34:2065][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 }: sender# [1:35:2065], cookie# 2 2025-05-07T08:50:50.143713Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:932: [main][1:34:2065][path] Sync is in progress: cookie# 2, size# 3, half# 1, successes# 0, faulires# 1 2025-05-07T08:50:50.143769Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:39:2065][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:36:2065], cookie# 2 2025-05-07T08:50:50.143815Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:40:2065][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:37:2065], cookie# 2 2025-05-07T08:50:50.143877Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:40:2065][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [1:9:2056], cookie# 2 2025-05-07T08:50:50.143932Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:34:2065][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 }: sender# [1:36:2065], cookie# 2 2025-05-07T08:50:50.143986Z node 1 :SCHEME_BOARD_SUBSCRIBER WARN: subscriber.cpp:948: [main][1:34:2065][path] Sync is done: cookie# 2, size# 3, half# 1, successes# 0, faulires# 2, partial# 1 2025-05-07T08:50:50.144043Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:34:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:36:2065] 2025-05-07T08:50:50.144092Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:854: [main][1:34:2065][path] Ignore empty state: owner# [1:33:2064], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-05-07T08:50:50.144159Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:34:2065][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [1:37:2065], cookie# 2 2025-05-07T08:50:50.144189Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:906: [main][1:34:2065][path] Unexpected sync response: sender# [1:37:2065], cookie# 2 2025-05-07T08:50:50.144300Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:871: [main][1:34:2065][path] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:33:2064], cookie# 3 2025-05-07T08:50:50.144527Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:34:2065][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 }: sender# [1:35:2065], cookie# 3 2025-05-07T08:50:50.144584Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:932: [main][1:34:2065][path] Sync is in progress: cookie# 3, size# 3, half# 1, successes# 0, faulires# 1 2025-05-07T08:50:50.144629Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:34:2065][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 }: sender# [1:36:2065], cookie# 3 2025-05-07T08:50:50.144659Z node 1 :SCHEME_BOARD_SUBSCRIBER WARN: subscriber.cpp:948: [main][1:34:2065][path] Sync is done: cookie# 3, size# 3, half# 1, successes# 0, faulires# 2, partial# 1 2025-05-07T08:50:50.144735Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:40:2065][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:37:2065], cookie# 3 2025-05-07T08:50:50.144836Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:34:2065][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 }: sender# [1:37:2065], cookie# 3 2025-05-07T08:50:50.144870Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:906: [main][1:34:2065][path] Unexpected sync response: sender# [1:37:2065], cookie# 3 2025-05-07T08:50:50.144923Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:34:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:37:2065] 2025-05-07T08:50:50.144967Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:854: [main][1:34:2065][path] Ignore empty state: owner# [1:33:2064], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } |89.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/replication/controller/ut_assign_tx_id/core-tx-replication-controller-ut_assign_tx_id |89.2%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_kqp/test-results/unittest/{meta.json ... results_accumulator.log} |89.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/controller/ut_assign_tx_id/core-tx-replication-controller-ut_assign_tx_id |89.2%| [TA] {RESULT} $(B)/ydb/core/driver_lib/run/ut/test-results/unittest/{meta.json ... results_accumulator.log} |89.2%| [LD] {RESULT} $(B)/ydb/core/tx/replication/controller/ut_assign_tx_id/core-tx-replication-controller-ut_assign_tx_id >> TTicketParserTest::AccessServiceAuthenticationApiKeyOk [GOOD] >> TTicketParserTest::AuthenticationUnavailable |89.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_rtmr/unittest >> TTicketParserTest::TicketFromCertificateCheckIssuerBad [GOOD] >> TTicketParserTest::TicketFromCertificateWithValidationBad ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberTest::Boot [GOOD] Test command err: 2025-05-07T08:50:49.817652Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:3:2050] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 800 Generation: 1 }: sender# [1:34:2065] 2025-05-07T08:50:49.817748Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:3:2050] Successful handshake: owner# 800, generation# 1 2025-05-07T08:50:49.818069Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 900 Generation: 1 }: sender# [1:35:2066] 2025-05-07T08:50:49.818121Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:6:2053] Successful handshake: owner# 900, generation# 1 2025-05-07T08:50:49.818184Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [1:3:2050] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 800 Generation: 1 }: sender# [1:34:2065] 2025-05-07T08:50:49.818216Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [1:3:2050] Commit generation: owner# 800, generation# 1 2025-05-07T08:50:49.818474Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [1:6:2053] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 900 Generation: 1 }: sender# [1:35:2066] 2025-05-07T08:50:49.818511Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [1:6:2053] Commit generation: owner# 900, generation# 1 2025-05-07T08:50:49.818615Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:960: [main][1:37:2068][/root/db/dir_inside] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-05-07T08:50:49.819068Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:3:2050] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /root/db/dir_inside DomainOwnerId: 1 }: sender# [1:41:2068] 2025-05-07T08:50:49.819106Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:3:2050] Upsert description: path# /root/db/dir_inside 2025-05-07T08:50:49.819237Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:3:2050] Subscribe: subscriber# [1:41:2068], path# /root/db/dir_inside, domainOwnerId# 1, capabilities# AckNotifications: true 2025-05-07T08:50:49.819387Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /root/db/dir_inside DomainOwnerId: 1 }: sender# [1:42:2068] 2025-05-07T08:50:49.819408Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:6:2053] Upsert description: path# /root/db/dir_inside 2025-05-07T08:50:49.819462Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:6:2053] Subscribe: subscriber# [1:42:2068], path# /root/db/dir_inside, domainOwnerId# 1, capabilities# AckNotifications: true 2025-05-07T08:50:49.819580Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:9:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /root/db/dir_inside DomainOwnerId: 1 }: sender# [1:43:2068] 2025-05-07T08:50:49.819602Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:9:2056] Upsert description: path# /root/db/dir_inside 2025-05-07T08:50:49.819635Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:9:2056] Subscribe: subscriber# [1:43:2068], path# /root/db/dir_inside, domainOwnerId# 1, capabilities# AckNotifications: true 2025-05-07T08:50:49.819710Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:41:2068][/root/db/dir_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/db/dir_inside Version: 0 }: sender# [1:3:2050] 2025-05-07T08:50:49.819769Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:3:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:41:2068] 2025-05-07T08:50:49.819811Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:42:2068][/root/db/dir_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/db/dir_inside Version: 0 }: sender# [1:6:2053] 2025-05-07T08:50:49.819840Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:6:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:42:2068] 2025-05-07T08:50:49.819872Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:43:2068][/root/db/dir_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/db/dir_inside Version: 0 }: sender# [1:9:2056] 2025-05-07T08:50:49.819934Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:9:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:43:2068] 2025-05-07T08:50:49.820016Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:37:2068][/root/db/dir_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/db/dir_inside Version: 0 }: sender# [1:38:2068] 2025-05-07T08:50:49.820102Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:37:2068][/root/db/dir_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/db/dir_inside Version: 0 }: sender# [1:39:2068] 2025-05-07T08:50:49.820171Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:836: [main][1:37:2068][/root/db/dir_inside] Set up state: owner# [1:36:2067], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-05-07T08:50:49.820229Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:37:2068][/root/db/dir_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/db/dir_inside Version: 0 }: sender# [1:40:2068] 2025-05-07T08:50:49.820264Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:854: [main][1:37:2068][/root/db/dir_inside] Ignore empty state: owner# [1:36:2067], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } =========== !argsLeft.IsDeletion 2025-05-07T08:50:49.820487Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:3:2050] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 800 Generation: 1 }: sender# [1:34:2065], cookie# 0, event size# 118 2025-05-07T08:50:49.820528Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:3:2050] Update description: path# /root/db/dir_inside, pathId# [OwnerId: 800, LocalPathId: 1111], deletion# false 2025-05-07T08:50:49.827324Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [1:3:2050] Upsert description: path# /root/db/dir_inside, pathId# [OwnerId: 800, LocalPathId: 1111], pathDescription# {Status StatusSuccess, Path /root/db/dir_inside, PathId [OwnerId: 800, LocalPathId: 1111], PathVersion 1, SubdomainPathId [OwnerId: 800, LocalPathId: 1], PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 67} 2025-05-07T08:50:49.827587Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:41:2068][/root/db/dir_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/db/dir_inside PathId: [OwnerId: 800, LocalPathId: 1111] Version: 1 }: sender# [1:3:2050] 2025-05-07T08:50:49.827650Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:3:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 1 }: sender# [1:41:2068] 2025-05-07T08:50:49.827713Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:37:2068][/root/db/dir_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/db/dir_inside PathId: [OwnerId: 800, LocalPathId: 1111] Version: 1 }: sender# [1:38:2068] 2025-05-07T08:50:49.827832Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:842: [main][1:37:2068][/root/db/dir_inside] Update to strong state: owner# [1:36:2067], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 800, LocalPathId: 1111], Version: 1) DomainId: [OwnerId: 800, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } =========== argsLeft.GetSuperId() < argsRight.GetSuperId() =========== !argsRight.IsDeletion 2025-05-07T08:50:49.828119Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:6:2053] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 900 Generation: 1 }: sender# [1:35:2066], cookie# 0, event size# 117 2025-05-07T08:50:49.828167Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:6:2053] Update description: path# /root/db/dir_inside, pathId# [OwnerId: 900, LocalPathId: 11], deletion# false 2025-05-07T08:50:49.828233Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [1:6:2053] Upsert description: path# /root/db/dir_inside, pathId# [OwnerId: 900, LocalPathId: 11], pathDescription# {Status StatusSuccess, Path /root/db/dir_inside, PathId [OwnerId: 900, LocalPathId: 11], PathVersion 1, SubdomainPathId [OwnerId: 800, LocalPathId: 1], PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 67} 2025-05-07T08:50:49.828350Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:42:2068][/root/db/dir_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/db/dir_inside PathId: [OwnerId: 900, LocalPathId: 11] Version: 1 }: sender# [1:6:2053] 2025-05-07T08:50:49.828409Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:6:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 1 }: sender# [1:42:2068] 2025-05-07T08:50:49.828539Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:37:2068][/root/db/dir_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/db/dir_inside PathId: [OwnerId: 900, LocalPathId: 11] Version: 1 }: sender# [1:39:2068] 2025-05-07T08:50:49.828609Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:842: [main][1:37:2068][/root/db/dir_inside] Path was updated to new version: owner# [1:36:2067], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 800, LocalPathId: 1111], Version: 1) DomainId: [OwnerId: 800, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 900, LocalPathId: 11], Version: 1) DomainId: [OwnerId: 800, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-05-07T08:50:50.320540Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:960: [main][3:34:2065][path] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-05-07T08:50:50.321449Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:38:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [3:3:2050] 2025-05-07T08:50:50.321529Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:39:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [3:6:2053] 2025-05-07T08:50:50.321599Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:40:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [3:9:2056] 2025-05-07T08:50:50.321680Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][3:34:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [3:35:2065] 2025-05-07T08:50:50.321752Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][3:34:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [3:36:2065] 2025-05-07T08:50:50.321804Z node 3 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:836: [main][3:34:2065][path] Set up state: owner# [3:33:2064], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-05-07T08:50:50.321878Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][3:34:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [3:37:2065] 2025-05-07T08:50:50.321926Z node 3 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:854: [main][3:34:2065][path] Ignore empty state: owner# [3:33:2064], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } |89.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/client/server/ut/ydb-core-client-server-ut |89.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/client/server/ut/ydb-core-client-server-ut |89.3%| [LD] {RESULT} $(B)/ydb/core/client/server/ut/ydb-core-client-server-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::WriteReadDuplicate [GOOD] Test command err: 2025-05-07T08:50:24.822258Z node 1 :BLOB_CACHE NOTICE: ctor_logger.h:56: MaxCacheDataSize: 20971520 InFlightDataSize: 0 2025-05-07T08:50:24.929533Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-05-07T08:50:24.957786Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-05-07T08:50:24.958105Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-05-07T08:50:24.966077Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-05-07T08:50:24.966342Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-05-07T08:50:24.966678Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-05-07T08:50:24.966840Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-05-07T08:50:24.966967Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-05-07T08:50:24.967105Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-05-07T08:50:24.967244Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-05-07T08:50:24.967366Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-05-07T08:50:24.967514Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-05-07T08:50:24.967658Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-05-07T08:50:24.967804Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-05-07T08:50:24.967925Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-05-07T08:50:24.999462Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-05-07T08:50:24.999639Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:137;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=11;current_normalizer=CLASS_NAME=Granules; 2025-05-07T08:50:24.999693Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-05-07T08:50:24.999896Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-05-07T08:50:25.000116Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-05-07T08:50:25.000202Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-05-07T08:50:25.000254Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-05-07T08:50:25.000372Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-05-07T08:50:25.000437Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-05-07T08:50:25.000495Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-05-07T08:50:25.000535Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-05-07T08:50:25.000708Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-05-07T08:50:25.000774Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-05-07T08:50:25.000835Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-05-07T08:50:25.000872Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-05-07T08:50:25.000965Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-05-07T08:50:25.001025Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-05-07T08:50:25.001072Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanInsertionDedup;id=CleanInsertionDedup; 2025-05-07T08:50:25.001114Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=8;type=CleanInsertionDedup; 2025-05-07T08:50:25.001202Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanInsertionDedup;id=8; 2025-05-07T08:50:25.001251Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-05-07T08:50:25.001280Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-05-07T08:50:25.001342Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-05-07T08:50:25.001388Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-05-07T08:50:25.001422Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-05-07T08:50:25.001638Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-05-07T08:50:25.001697Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-05-07T08:50:25.001734Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-05-07T08:50:25.001958Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-05-07T08:50:25.002049Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-05-07T08:50:25.002082Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-05-07T08:50:25.002237Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-05-07T08:50:25.002296Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-05-07T08:50:25.002335Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-05-07T08:50:25.002420Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-05-07T08:50:25.002494Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-05-07T08:50:25.002563Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-05-07T08:50:25.002597Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-05-07T08:50:25.003038Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=50; 2025-05-07T08:50:25.003127Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=36; ... DEBUG: log.cpp:784: SelfId=[1:3521:5533];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:65;event=intervals_finished; 2025-05-07T08:50:50.517033Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:3521:5533];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:187;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-05-07T08:50:50.517070Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:3521:5533];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:73;event=DoExtractReadyResults;result=1;count=10;finished=1; 2025-05-07T08:50:50.517108Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:3521:5533];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:198;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-05-07T08:50:50.518458Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:3521:5533];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:104;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-05-07T08:50:50.518626Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:3521:5533];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:187;stage=start;iterator=ready_results:(count:1;records_count:10;schema=timestamp: timestamp[us];);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-05-07T08:50:50.518686Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:3521:5533];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:73;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-05-07T08:50:50.518824Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:3521:5533];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:229;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;);columns=1;rows=10; 2025-05-07T08:50:50.518890Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:3521:5533];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:249;stage=data_format;batch_size=80;num_rows=10;batch_columns=timestamp; 2025-05-07T08:50:50.519205Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:3521:5533];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:365;event=send_data;compute_actor_id=[1:3516:5528];bytes=80;rows=10;faults=0;finished=0;fault=0;schema=timestamp: timestamp[us]; 2025-05-07T08:50:50.519358Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:3521:5533];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:269;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-05-07T08:50:50.519496Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:3521:5533];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:187;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-05-07T08:50:50.519595Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:3521:5533];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-05-07T08:50:50.520148Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:3521:5533];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:104;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-05-07T08:50:50.520280Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:3521:5533];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:187;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-05-07T08:50:50.520381Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:3521:5533];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-05-07T08:50:50.520420Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:409: Scan [1:3521:5533] finished for tablet 9437184 2025-05-07T08:50:50.520879Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:3521:5533];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:415;event=scan_finish;compute_actor_id=[1:3516:5528];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.003},{"events":["f_processing","f_task_result"],"t":0.005},{"events":["l_task_result"],"t":0.02},{"events":["f_ack"],"t":0.021},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.023}],"full":{"a":1746607850497162,"name":"_full_task","f":1746607850497162,"d_finished":0,"c":0,"l":1746607850520476,"d":23314},"events":[{"name":"bootstrap","f":1746607850497941,"d_finished":2665,"c":1,"l":1746607850500606,"d":2665},{"a":1746607850520122,"name":"ack","f":1746607850518427,"d_finished":1213,"c":1,"l":1746607850519640,"d":1567},{"a":1746607850520103,"name":"processing","f":1746607850502195,"d_finished":4026,"c":8,"l":1746607850519643,"d":4399},{"name":"ProduceResults","f":1746607850499452,"d_finished":2709,"c":11,"l":1746607850520405,"d":2709},{"a":1746607850520408,"name":"Finish","f":1746607850520408,"d_finished":0,"c":0,"l":1746607850520476,"d":68},{"name":"task_result","f":1746607850502215,"d_finished":2644,"c":7,"l":1746607850517199,"d":2644}],"id":"9437184::49"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-05-07T08:50:50.520963Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:3521:5533];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:365;event=send_data;compute_actor_id=[1:3516:5528];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-05-07T08:50:50.521405Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:3521:5533];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=scan_finished;compute_actor_id=[1:3516:5528];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.003},{"events":["f_processing","f_task_result"],"t":0.005},{"events":["l_task_result"],"t":0.02},{"events":["f_ack"],"t":0.021},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.023}],"full":{"a":1746607850497162,"name":"_full_task","f":1746607850497162,"d_finished":0,"c":0,"l":1746607850521007,"d":23845},"events":[{"name":"bootstrap","f":1746607850497941,"d_finished":2665,"c":1,"l":1746607850500606,"d":2665},{"a":1746607850520122,"name":"ack","f":1746607850518427,"d_finished":1213,"c":1,"l":1746607850519640,"d":2098},{"a":1746607850520103,"name":"processing","f":1746607850502195,"d_finished":4026,"c":8,"l":1746607850519643,"d":4930},{"name":"ProduceResults","f":1746607850499452,"d_finished":2709,"c":11,"l":1746607850520405,"d":2709},{"a":1746607850520408,"name":"Finish","f":1746607850520408,"d_finished":0,"c":0,"l":1746607850521007,"d":599},{"name":"task_result","f":1746607850502215,"d_finished":2644,"c":7,"l":1746607850517199,"d":2644}],"id":"9437184::49"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-05-07T08:50:50.521488Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:3521:5533];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-05-07T08:50:50.496546Z;index_granules=0;index_portions=1;index_batches=2;committed_batches=0;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=2784;inserted_portions_bytes=0;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=2784;selected_rows=0; 2025-05-07T08:50:50.521568Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:3521:5533];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:189;event=scan_aborted;reason=unexpected on destructor; 2025-05-07T08:50:50.522147Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:3521:5533];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; >> KqpPg::ReadPgArray >> TTicketParserTest::AuthorizationRetryError [GOOD] >> TTicketParserTest::AuthorizationRetryErrorImmediately >> TTicketParserTest::AuthenticationUnknown [GOOD] >> TTicketParserTest::Authorization >> KqpPg::ReadPgArray [GOOD] >> KqpPg::TableArrayInsert+useSink >> TPQTabletTests::Multiple_PQTablets_1 >> KqpPg::NoTableQuery+useSink >> KqpPg::TypeCoercionInsert-useSink >> KqpPg::InsertFromSelect_Simple+useSink >> KqpPg::CreateTableSerialColumns+useSink >> TPQTabletTests::Multiple_PQTablets_1 [GOOD] >> KqpPg::TypeCoercionBulkUpsert >> KqpPg::EmptyQuery+useSink >> Normalizers::ChunksV0MetaNormalizer [GOOD] >> TPQTabletTests::Multiple_PQTablets_2 >> ReadOnlyVDisk::TestStorageLoad [GOOD] >> TTicketParserTest::NebiusAccessServiceAuthenticationOk [GOOD] >> TTicketParserTest::NebiusAuthenticationRetryError >> KqpPg::InsertNoTargetColumns_Simple+useSink |89.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_incremental_backup/ydb-core-tx-datashard-ut_incremental_backup >> KqpPg::CreateTableBulkUpsertAndRead |89.3%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_incremental_backup/ydb-core-tx-datashard-ut_incremental_backup |89.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_incremental_backup/ydb-core-tx-datashard-ut_incremental_backup >> TTicketParserTest::TicketFromCertificateWithValidationDifferentIssuersBad [GOOD] >> TTicketParserTest::TicketFromCertificateWithValidationDefaultGroupGood >> TPQTabletTests::Multiple_PQTablets_2 [GOOD] >> TPQTabletTests::DropTablet_And_Tx ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> Normalizers::ChunksV0MetaNormalizer [GOOD] Test command err: 2025-05-07T08:50:10.463610Z node 1 :BLOB_CACHE NOTICE: ctor_logger.h:56: MaxCacheDataSize: 20971520 InFlightDataSize: 0 2025-05-07T08:50:10.620610Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-05-07T08:50:10.646541Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-05-07T08:50:10.646851Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-05-07T08:50:10.655088Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-05-07T08:50:10.655328Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-05-07T08:50:10.655640Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-05-07T08:50:10.655760Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-05-07T08:50:10.655884Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-05-07T08:50:10.655997Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-05-07T08:50:10.656112Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-05-07T08:50:10.656240Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-05-07T08:50:10.656367Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-05-07T08:50:10.656491Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-05-07T08:50:10.656602Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-05-07T08:50:10.656728Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:138:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-05-07T08:50:10.693829Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-05-07T08:50:10.694525Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:137;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=11;current_normalizer=CLASS_NAME=Granules; 2025-05-07T08:50:10.694593Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-05-07T08:50:10.694810Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-05-07T08:50:10.694999Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-05-07T08:50:10.695091Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-05-07T08:50:10.695136Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-05-07T08:50:10.695262Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-05-07T08:50:10.695336Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-05-07T08:50:10.695423Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-05-07T08:50:10.695459Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-05-07T08:50:10.695647Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-05-07T08:50:10.695744Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-05-07T08:50:10.695792Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-05-07T08:50:10.695822Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-05-07T08:50:10.695952Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-05-07T08:50:10.696017Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-05-07T08:50:10.696067Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanInsertionDedup;id=CleanInsertionDedup; 2025-05-07T08:50:10.696102Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=8;type=CleanInsertionDedup; 2025-05-07T08:50:10.696177Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanInsertionDedup;id=8; 2025-05-07T08:50:10.696215Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-05-07T08:50:10.696241Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-05-07T08:50:10.696303Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-05-07T08:50:10.696353Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-05-07T08:50:10.696399Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-05-07T08:50:10.696638Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-05-07T08:50:10.696709Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-05-07T08:50:10.696743Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-05-07T08:50:10.696966Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-05-07T08:50:10.697007Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-05-07T08:50:10.697034Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-05-07T08:50:10.697216Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-05-07T08:50:10.697287Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-05-07T08:50:10.697318Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-05-07T08:50:10.697398Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-05-07T08:50:10.697466Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-05-07T08:50:10.697504Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-05-07T08:50:10.697531Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-05-07T08:50:10.698016Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=65; 2025-05-07T08:50:10.698128Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=39; ... 08:50:52.600098Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:373:2378];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=merge.cpp:74;event=DoApply;interval_idx=0; 2025-05-07T08:50:52.600158Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:373:2378];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=scanner.cpp:21;event=interval_result_received;interval_idx=0;intervalId=11; 2025-05-07T08:50:52.600253Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:373:2378];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=scanner.cpp:47;event=interval_result;interval_idx=0;count=20048;merger=0;interval_id=11; 2025-05-07T08:50:52.600334Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:373:2378];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=scanner.cpp:65;event=intervals_finished; 2025-05-07T08:50:52.600481Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:373:2378];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:187;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-05-07T08:50:52.600539Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:373:2378];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:73;event=DoExtractReadyResults;result=1;count=20048;finished=1; 2025-05-07T08:50:52.600592Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:373:2378];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:198;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-05-07T08:50:52.600872Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:373:2378];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:104;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-05-07T08:50:52.601083Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:373:2378];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:187;stage=start;iterator=ready_results:(count:1;records_count:20048;schema=key1: uint64 key2: uint64 field: string;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-05-07T08:50:52.601143Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:373:2378];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:73;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-05-07T08:50:52.601292Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:373:2378];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:229;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;);columns=3;rows=20048; 2025-05-07T08:50:52.601379Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:373:2378];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:249;stage=data_format;batch_size=2405760;num_rows=20048;batch_columns=key1,key2,field; 2025-05-07T08:50:52.601636Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:373:2378];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:365;event=send_data;compute_actor_id=[2:371:2377];bytes=2405760;rows=20048;faults=0;finished=0;fault=0;schema=key1: uint64 key2: uint64 field: string; 2025-05-07T08:50:52.601802Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:373:2378];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:269;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-05-07T08:50:52.601951Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:373:2378];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:187;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-05-07T08:50:52.602103Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:373:2378];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-05-07T08:50:52.603218Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:373:2378];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:104;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-05-07T08:50:52.603415Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:373:2378];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:187;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-05-07T08:50:52.603567Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:373:2378];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-05-07T08:50:52.603637Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:409: Scan [2:373:2378] finished for tablet 9437184 2025-05-07T08:50:52.604258Z node 2 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[2:373:2378];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:415;event=scan_finish;compute_actor_id=[2:371:2377];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.003},{"events":["l_bootstrap"],"t":0.005},{"events":["f_processing","f_task_result"],"t":0.009},{"events":["f_ack","l_task_result"],"t":0.307},{"events":["l_ProduceResults","f_Finish"],"t":0.309},{"events":["l_ack","l_processing","l_Finish"],"t":0.31}],"full":{"a":1746607852293623,"name":"_full_task","f":1746607852293623,"d_finished":0,"c":0,"l":1746607852603721,"d":310098},"events":[{"name":"bootstrap","f":1746607852293891,"d_finished":4879,"c":1,"l":1746607852298770,"d":4879},{"a":1746607852603181,"name":"ack","f":1746607852600834,"d_finished":1303,"c":1,"l":1746607852602137,"d":1843},{"a":1746607852603157,"name":"processing","f":1746607852303150,"d_finished":160254,"c":9,"l":1746607852602141,"d":160818},{"name":"ProduceResults","f":1746607852297501,"d_finished":5927,"c":12,"l":1746607852603609,"d":5927},{"a":1746607852603616,"name":"Finish","f":1746607852603616,"d_finished":0,"c":0,"l":1746607852603721,"d":105},{"name":"task_result","f":1746607852303178,"d_finished":158737,"c":8,"l":1746607852600679,"d":158737}],"id":"9437184::3"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-05-07T08:50:52.604352Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:373:2378];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:365;event=send_data;compute_actor_id=[2:371:2377];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-05-07T08:50:52.604926Z node 2 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[2:373:2378];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=scan_finished;compute_actor_id=[2:371:2377];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.003},{"events":["l_bootstrap"],"t":0.005},{"events":["f_processing","f_task_result"],"t":0.009},{"events":["f_ack","l_task_result"],"t":0.307},{"events":["l_ProduceResults","f_Finish"],"t":0.309},{"events":["l_ack","l_processing","l_Finish"],"t":0.31}],"full":{"a":1746607852293623,"name":"_full_task","f":1746607852293623,"d_finished":0,"c":0,"l":1746607852604409,"d":310786},"events":[{"name":"bootstrap","f":1746607852293891,"d_finished":4879,"c":1,"l":1746607852298770,"d":4879},{"a":1746607852603181,"name":"ack","f":1746607852600834,"d_finished":1303,"c":1,"l":1746607852602137,"d":2531},{"a":1746607852603157,"name":"processing","f":1746607852303150,"d_finished":160254,"c":9,"l":1746607852602141,"d":161506},{"name":"ProduceResults","f":1746607852297501,"d_finished":5927,"c":12,"l":1746607852603609,"d":5927},{"a":1746607852603616,"name":"Finish","f":1746607852603616,"d_finished":0,"c":0,"l":1746607852604409,"d":793},{"name":"task_result","f":1746607852303178,"d_finished":158737,"c":8,"l":1746607852600679,"d":158737}],"id":"9437184::3"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-05-07T08:50:52.605025Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:373:2378];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-05-07T08:50:52.292913Z;index_granules=0;index_portions=1;index_batches=939;committed_batches=0;schema_columns=3;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=2589264;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=2589264;selected_rows=0; 2025-05-07T08:50:52.605101Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:373:2378];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=read_context.h:189;event=scan_aborted;reason=unexpected on destructor; 2025-05-07T08:50:52.605459Z node 2 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[2:373:2378];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;; >> TTicketParserTest::BulkAuthorizationWithUserAccount [GOOD] >> TTicketParserTest::BulkAuthorizationWithUserAccount2 >> KqpPg::JoinWithQueryService+StreamLookup >> IndexBuildTest::MergeIndexTableShardsOnlyWhenReady [GOOD] >> IndexBuildTest::RejectsCancel ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest >> ReadOnlyVDisk::TestStorageLoad [GOOD] Test command err: RandomSeed# 8891420383255848234 Setting VDisk read-only to 1 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] 2025-05-07T08:50:14.789881Z 1 00h02m38.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:14.793132Z 1 00h02m38.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:14.796957Z 1 00h02m38.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:14.802611Z 1 00h02m38.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:14.802859Z 1 00h02m38.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:14.977285Z 1 00h02m38.200000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:14.992294Z 1 00h02m38.300000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:15.020526Z 1 00h02m38.500000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:15.034717Z 1 00h02m38.600000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:15.375069Z 1 00h02m38.800000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:15.387641Z 1 00h02m38.900000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:15.677292Z 1 00h02m39.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:15.678127Z 1 00h02m39.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:15.796796Z 1 00h02m39.200000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:16.253695Z 1 00h02m39.400000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:16.271358Z 1 00h02m39.500000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:16.305743Z 1 00h02m39.700000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:16.324614Z 1 00h02m39.800000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:16.402365Z 1 00h02m40.000000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:16.440207Z 1 00h02m40.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:16.443917Z 1 00h02m40.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:16.479149Z 1 00h02m40.200000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:16.501010Z 1 00h02m40.300000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:16.519468Z 1 00h02m40.400000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:16.540549Z 1 00h02m40.500000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:16.564784Z 1 00h02m40.600000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:16.588678Z 1 00h02m40.700000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:16.610862Z 1 00h02m40.800000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:16.781751Z 1 00h02m40.900000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:16.995600Z 1 00h02m41.000000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:17.099430Z 1 00h02m41.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:17.101997Z 1 00h02m41.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:17.245163Z 1 00h02m41.300000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:17.260186Z 1 00h02m41.400000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:17.311310Z 1 00h02m41.600000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:17.493749Z 1 00h02m41.700000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:17.691224Z 1 00h02m41.900000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:17.704117Z 1 00h02m42.000000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:17.800514Z 1 00h02m42.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:17.804375Z 1 00h02m42.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:17.999447Z 1 00h02m42.300000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:18.015164Z 1 00h02m42.400000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:18.046885Z 1 00h02m42.600000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:18.063129Z 1 00h02m42.700000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:18.127528Z 1 00h02m42.900000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:18.363861Z 1 00h02m43.000000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:18.385910Z 1 00h02m43.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:18.388717Z 1 00h02m43.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:18.408183Z 1 00h02m43.200000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:18.425257Z 1 00h02m43.300000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:18.560330Z 1 00h02m43.400000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:18.576675Z 1 00h02m43.500000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:18.590414Z 1 00h02m43.600000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:18.606014Z 1 00h02m43.700000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:18.646292Z 1 00h02m43.800000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:18.912323Z 1 00h02m43.900000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:18.995825Z 1 00h02m44.000000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:19.025635Z 1 00h02m44.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:19.048190Z 1 00h02m44.200000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:19.077272Z 1 00h02m44.300000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:19.435051Z 1 00h02m44.500000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:19.461910Z 1 00h02m44.600000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:19.494940Z 1 00h02m44.800000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:19.578854Z 1 00h02m44.900000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:19.781194Z 1 00h02m45.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:19.806274Z 1 00h02m45.200000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:19.824869Z 1 00h02m45.300000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:19.966360Z 1 00h02m45.500000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:20.248692Z 1 00h02m45.600000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:20.286989Z 1 00h02m45.800000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:20.334578Z 1 00h02m45.900000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5309:700] 2025-05-07T08:50:20.526933Z 1 00h02m46.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1 ... k [82000000:1:0:5:0] Setting VDisk read-only to 0 for position 6 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:6:0] 2025-05-07T08:50:36.434139Z 8 00h20m54.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:36.438915Z 8 00h20m54.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:36.459763Z 8 00h20m54.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:36.473498Z 8 00h20m54.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:36.474448Z 8 00h20m54.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:36.822441Z 8 00h20m54.512560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:36.841023Z 8 00h20m54.612560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:36.873175Z 8 00h20m54.812560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:36.898693Z 8 00h20m54.912560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:36.954459Z 8 00h20m55.112560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:36.977632Z 8 00h20m55.212560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:37.038717Z 8 00h20m55.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:37.040881Z 8 00h20m55.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:37.066643Z 8 00h20m55.512560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:37.144334Z 8 00h20m55.712560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:37.655710Z 8 00h20m55.812560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:37.895230Z 8 00h20m56.012560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:37.944039Z 8 00h20m56.112560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:37.986304Z 8 00h20m56.312560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:38.020269Z 8 00h20m56.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:38.022170Z 8 00h20m56.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:38.045895Z 8 00h20m56.512560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:38.089109Z 8 00h20m56.712560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:38.109237Z 8 00h20m56.812560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:38.137121Z 8 00h20m56.912560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:38.169859Z 8 00h20m57.012560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:38.233605Z 8 00h20m57.112560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:38.261702Z 8 00h20m57.212560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:38.291124Z 8 00h20m57.312560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:38.403798Z 8 00h20m57.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:38.405032Z 8 00h20m57.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:39.145809Z 8 00h20m57.612560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:39.196083Z 8 00h20m57.712560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:39.433470Z 8 00h20m57.912560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:40.001427Z 8 00h20m58.012560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:40.076031Z 8 00h20m58.212560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:40.096791Z 8 00h20m58.312560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:40.403853Z 8 00h20m58.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:40.405638Z 8 00h20m58.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:40.451137Z 8 00h20m58.612560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:40.474860Z 8 00h20m58.712560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:40.948805Z 8 00h20m58.912560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:41.000220Z 8 00h20m59.012560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:41.743931Z 8 00h20m59.212560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:41.758772Z 8 00h20m59.312560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:41.786565Z 8 00h20m59.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:41.788282Z 8 00h20m59.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:41.869352Z 8 00h20m59.612560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:42.027405Z 8 00h20m59.712560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:42.100716Z 8 00h20m59.912560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:42.761429Z 8 00h21m00.012560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:42.924146Z 8 00h21m00.212560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:42.944917Z 8 00h21m00.312560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:43.028823Z 8 00h21m00.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:43.031122Z 8 00h21m00.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:43.058882Z 8 00h21m00.512560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:43.081162Z 8 00h21m00.612560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:43.108831Z 8 00h21m00.712560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:43.304220Z 8 00h21m00.812560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:43.327042Z 8 00h21m00.912560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:43.528408Z 8 00h21m01.112560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:43.545938Z 8 00h21m01.212560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:43.800496Z 8 00h21m01.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:43.823683Z 8 00h21m01.512560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:43.845374Z 8 00h21m01.612560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:43.972001Z 8 00h21m01.812560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:44.132787Z 8 00h21m01.912560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:44.278063Z 8 00h21m02.112560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:44.298557Z 8 00h21m02.212560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:44.412394Z 8 00h21m02.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:44.412880Z 8 00h21m02.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] 2025-05-07T08:50:44.416058Z 8 00h21m02.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5358:749] >> TPQTabletTests::DropTablet_And_Tx [GOOD] >> TPQTabletTests::DropTablet_Before_Write >> TSubDomainTest::ConsistentCopyTable [GOOD] |89.3%| [TA] $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/test-results/unittest/{meta.json ... results_accumulator.log} >> TTicketParserTest::BulkAuthorizationRetryError [GOOD] >> TTicketParserTest::BulkAuthorizationRetryErrorImmediately >> TPQTabletTests::DropTablet_Before_Write [GOOD] >> TPQTabletTests::DropTablet_And_UnplannedConfigTransaction >> TTicketParserTest::AuthenticationUnavailable [GOOD] >> TTicketParserTest::AuthenticationRetryError >> TPQTest::TestSeveralOwners >> TPQTabletTests::DropTablet_And_UnplannedConfigTransaction [GOOD] >> TTicketParserTest::TicketFromCertificateWithValidationBad [GOOD] >> TTicketParserTest::NebiusAuthorizationWithRequiredPermissions >> TTopicReaderTests::TestRun_Read_Less_Messages_Than_Sent [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_base_tenant/unittest >> TSubDomainTest::ConsistentCopyTable [GOOD] Test command err: 2025-05-07T08:50:31.112468Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623514376171591:2190];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:31.112544Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0048f3/r3tmp/tmpKYjBFU/pdisk_1.dat 2025-05-07T08:50:31.863651Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:31.867338Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:31.867511Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:31.871449Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:4822 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-05-07T08:50:32.114481Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2663: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7501623514376171738:2117], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-05-07T08:50:32.114592Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2663: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7501623514376171738:2117], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 72057594046644480 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-05-07T08:50:32.114627Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2283: Create subscriber: self# [1:7501623514376171738:2117], path# /dc-1, domainOwnerId# 72057594046644480 2025-05-07T08:50:32.114880Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:960: [main][1:7501623518671139291:2258][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-05-07T08:50:32.144477Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7501623514376171412:2049] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7501623518671139295:2258] 2025-05-07T08:50:32.144563Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7501623514376171412:2049] Subscribe: subscriber# [1:7501623518671139295:2258], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-05-07T08:50:32.144686Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7501623514376171415:2052] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7501623518671139296:2258] 2025-05-07T08:50:32.144712Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7501623514376171415:2052] Subscribe: subscriber# [1:7501623518671139296:2258], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-05-07T08:50:32.144739Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7501623514376171418:2055] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7501623518671139297:2258] 2025-05-07T08:50:32.144753Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7501623514376171418:2055] Subscribe: subscriber# [1:7501623518671139297:2258], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-05-07T08:50:32.144853Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7501623518671139295:2258][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7501623514376171412:2049] 2025-05-07T08:50:32.144878Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7501623518671139296:2258][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7501623514376171415:2052] 2025-05-07T08:50:32.144915Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7501623518671139297:2258][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7501623514376171418:2055] 2025-05-07T08:50:32.144989Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:7501623518671139291:2258][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7501623518671139292:2258] 2025-05-07T08:50:32.145036Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:7501623518671139291:2258][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7501623518671139293:2258] 2025-05-07T08:50:32.145075Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:836: [main][1:7501623518671139291:2258][/dc-1] Set up state: owner# [1:7501623514376171738:2117], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-05-07T08:50:32.145223Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:7501623518671139291:2258][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7501623518671139294:2258] 2025-05-07T08:50:32.145271Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:854: [main][1:7501623518671139291:2258][/dc-1] Path was already updated: owner# [1:7501623514376171738:2117], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-05-07T08:50:32.145323Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7501623514376171412:2049] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7501623518671139295:2258] 2025-05-07T08:50:32.145341Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7501623514376171415:2052] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7501623518671139296:2258] 2025-05-07T08:50:32.145354Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7501623514376171418:2055] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7501623518671139297:2258] 2025-05-07T08:50:32.235359Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7501623514376171714:2104] Handle TEvNavigate describe path dc-1 2025-05-07T08:50:32.235448Z node 1 :TX_PROXY DEBUG: describe.cpp:227: Actor# [1:7501623518671139299:2260] HANDLE EvNavigateScheme dc-1 2025-05-07T08:50:32.299169Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2550: HandleNotify: self# [1:7501623514376171738:2117], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } } } PathId: 1 PathOwnerId: 72057594046644480 } 2025-05-07T08:50:32.299625Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2425: ResolveCacheItem: self# [1:7501623514376171738:2117], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } } } PathId: 1 PathOwnerId: 72057594046644480 }, by path# { Subscriber: { Subscriber: [1:7501623518671139291:2258] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-05-07T08:50:32.299891Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1818: FillEntry for TNavigate: self# [1:7501623514376171738:2117], cacheItem# { Subscriber: { Subscriber: [1:7501623518671139291:2258] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-05-07T08:50:32.300127Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:264: Send result: self# [1:7501623518671139300:2261], recipient# [1:7501623518671139290:2257], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 72057594046644480 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: fals ... vNavigateKeySet: self# [6:7501623587517471322:2105], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-05-07T08:50:53.582283Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1818: FillEntry for TNavigate: self# [6:7501623587517471322:2105], cacheItem# { Subscriber: { Subscriber: [6:7501623608992308045:2239] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-05-07T08:50:53.582326Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1818: FillEntry for TNavigate: self# [6:7501623587517471322:2105], cacheItem# { Subscriber: { Subscriber: [6:7501623608992308046:2240] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-05-07T08:50:53.582429Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:264: Send result: self# [6:7501623608992308068:2244], recipient# [6:7501623608992308043:2323], result# { ErrorCount: 2 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-05-07T08:50:53.582814Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:7501623608992308043:2323], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:50:53.690144Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2663: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [6:7501623587517471322:2105], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-05-07T08:50:53.690281Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1818: FillEntry for TNavigate: self# [6:7501623587517471322:2105], cacheItem# { Subscriber: { Subscriber: [6:7501623608992308045:2239] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-05-07T08:50:53.690325Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1818: FillEntry for TNavigate: self# [6:7501623587517471322:2105], cacheItem# { Subscriber: { Subscriber: [6:7501623608992308046:2240] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-05-07T08:50:53.690437Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:264: Send result: self# [6:7501623608992308069:2245], recipient# [6:7501623608992308043:2323], result# { ErrorCount: 2 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-05-07T08:50:53.690880Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:7501623608992308043:2323], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:50:54.090321Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2663: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [6:7501623587517471322:2105], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-05-07T08:50:54.090463Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1818: FillEntry for TNavigate: self# [6:7501623587517471322:2105], cacheItem# { Subscriber: { Subscriber: [6:7501623608992308045:2239] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-05-07T08:50:54.090519Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1818: FillEntry for TNavigate: self# [6:7501623587517471322:2105], cacheItem# { Subscriber: { Subscriber: [6:7501623608992308046:2240] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-05-07T08:50:54.090652Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:264: Send result: self# [6:7501623613287275366:2246], recipient# [6:7501623608992308043:2323], result# { ErrorCount: 2 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-05-07T08:50:54.091035Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:7501623608992308043:2323], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:50:54.094094Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2663: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [6:7501623587517471322:2105], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-05-07T08:50:54.094235Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1818: FillEntry for TNavigate: self# [6:7501623587517471322:2105], cacheItem# { Subscriber: { Subscriber: [6:7501623591812438814:2227] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-05-07T08:50:54.094328Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:264: Send result: self# [6:7501623613287275368:2247], recipient# [6:7501623613287275367:2325], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } >> TTicketParserTest::AuthorizationRetryErrorImmediately [GOOD] >> TTicketParserTest::AuthorizationWithRequiredPermissions >> TPQTabletTests::Huge_ProposeTransacton >> TTicketParserTest::Authorization [GOOD] >> TTicketParserTest::AuthorizationModify >> TPartitionTests::TabletConfig_Is_Newer_That_PartitionConfig >> TTopicReaderTests::TestRun_ReadMessages_Output_Base64 [GOOD] >> TTicketParserTest::NebiusAuthorizationRetryError [GOOD] >> TTicketParserTest::NebiusAuthorizationRetryErrorImmediately >> TPartitionTests::TabletConfig_Is_Newer_That_PartitionConfig [GOOD] >> KqpPg::EmptyQuery+useSink [GOOD] >> KqpPg::EmptyQuery-useSink >> TPQTest::TestSeveralOwners [GOOD] >> TPQTest::TestReserveBytes >> TPartitionTests::ShadowPartitionCountersRestore >> KqpPg::NoTableQuery+useSink [GOOD] >> KqpPg::NoTableQuery-useSink >> TTicketParserTest::TicketFromCertificateWithValidationDefaultGroupGood [GOOD] >> TTicketParserTest::TicketFromCertificateWithValidationCheckIssuerBad >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_SourceId_OldPartitionExists_NotBoundary_Test >> TPartitionTests::ShadowPartitionCountersRestore [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/public/lib/ydb_cli/topic/ut/unittest >> TTopicReaderTests::TestRun_Read_Less_Messages_Than_Sent [GOOD] Test command err: === Starting PQ server === Server->StartServer(false); 2025-05-07T08:50:02.430204Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623389524595448:2071];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:02.430262Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:50:02.499244Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501623389562653910:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:02.499928Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:50:02.690160Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-05-07T08:50:02.716917Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00281a/r3tmp/tmpzKn5Hu/pdisk_1.dat 2025-05-07T08:50:03.151643Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:03.155860Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:03.155988Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:03.156792Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:03.156839Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:03.164397Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-05-07T08:50:03.164555Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:50:03.165019Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24171, node 1 2025-05-07T08:50:03.451870Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/zvgn/00281a/r3tmp/yandexOGUg1U.tmp 2025-05-07T08:50:03.451925Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/zvgn/00281a/r3tmp/yandexOGUg1U.tmp 2025-05-07T08:50:03.452100Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/zvgn/00281a/r3tmp/yandexOGUg1U.tmp 2025-05-07T08:50:03.452269Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:50:03.550462Z INFO: TTestServer started on Port 8765 GrpcPort 24171 TClient is connected to server localhost:8765 PQClient connected to localhost:24171 === TenantModeEnabled() = 0 === Init PQ - start server on port 24171 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:50:04.025538Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976710657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-05-07T08:50:04.025819Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-05-07T08:50:04.026060Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-05-07T08:50:04.026290Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976710657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-05-07T08:50:04.026342Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-05-07T08:50:04.029438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 281474976710657, response: Status: StatusAccepted TxId: 281474976710657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-05-07T08:50:04.029577Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-05-07T08:50:04.029740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-05-07T08:50:04.029791Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 281474976710657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-05-07T08:50:04.029819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 281474976710657:0 ProgressState no shards to create, do next state 2025-05-07T08:50:04.029863Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976710657:0 2 -> 3 2025-05-07T08:50:04.032750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-05-07T08:50:04.032794Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976710657:0 ProgressState, at schemeshard: 72057594046644480 2025-05-07T08:50:04.032817Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976710657:0 3 -> 128 waiting... 2025-05-07T08:50:04.036339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-05-07T08:50:04.036379Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-05-07T08:50:04.036427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976710657:0, at tablet# 72057594046644480 2025-05-07T08:50:04.036483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 281474976710657 ready parts: 1/1 2025-05-07T08:50:04.041554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976710657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:50:04.042045Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:50:04.042081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976710657, ready parts: 0/1, is published: true 2025-05-07T08:50:04.042111Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:50:04.044361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 281474976710657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976710657 msg type: 269090816 2025-05-07T08:50:04.044586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 281474976710657, partId: 4294967295, tablet: 72057594046316545 2025-05-07T08:50:04.051463Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 1746607804088, transactions count in step: 1, at schemeshard: 72057594046644480 2025-05-07T08:50:04.051613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1746607804088 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-05-07T08:50:04.051644Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-05-07T08:50:04.051919Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976710657:0 128 -> 240 2025-05-07T08:50:04.051951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-05-07T08:50:04.052142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-05-07T08:50:04.052226Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 1], at schemeshard: 72057594046644480 2025-05-07T08:50:04.069327Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-05-07T08:50:04.069393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976710657, path id: [OwnerId: 72057594046644480, LocalPathId: 1] 2025-05-07T08:50:04.069622Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-05-07T08:50:04.069639Z node 1 :FLAT_TX_SC ... ions=[], ActiveFamilyCount=1) generation 1 step 1 2025-05-07T08:50:54.055137Z node 6 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1322: [72075186224037898][rt3.dc1--topic1] consumer cli start rebalancing. familyCount=1, sessionCount=1, desiredFamilyCount=1, allowPlusOne=0 2025-05-07T08:50:54.055185Z node 6 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1399: [72075186224037898][rt3.dc1--topic1] consumer cli balancing duration: 0.000272s 2025-05-07T08:50:54.059003Z node 5 :PQ_READ_PROXY INFO: partition_actor.cpp:962: session cookie 1 consumer shared/cli session shared/cli_5_1_8573980531054676633_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) pipe restart attempt 0 pipe creation result: OK TabletId: 72075186224037897 Generation: 1, pipe: [5:7501623614354711182:2661] 2025-05-07T08:50:54.059149Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/cli session shared/cli_5_1_8573980531054676633_v1 grpc read done: success# 1, data# { read_request { bytes_size: 52428800 } } 2025-05-07T08:50:54.059380Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1816: session cookie 1 consumer shared/cli session shared/cli_5_1_8573980531054676633_v1 got read request: guid# d805dd2-fac0528f-884f0f95-7e5e3fbe 2025-05-07T08:50:54.059773Z node 5 :PQ_READ_PROXY DEBUG: caching_service.cpp:282: Direct read cache: registered server session: shared/cli_5_1_8573980531054676633_v1:1 with generation 1 2025-05-07T08:50:54.073388Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 1 consumer shared/cli session shared/cli_5_1_8573980531054676633_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) initDone 0 event { CmdGetClientOffsetResult { Offset: 0 EndOffset: 3 SizeLag: 409 WriteTimestampEstimateMS: 1746607854045 ClientHasAnyCommits: false } Cookie: 18446744073709551615 } 2025-05-07T08:50:54.073450Z node 5 :PQ_READ_PROXY INFO: partition_actor.cpp:683: session cookie 1 consumer shared/cli session shared/cli_5_1_8573980531054676633_v1 INIT DONE TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) EndOffset 3 readOffset 0 committedOffset 0 2025-05-07T08:50:54.073522Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1413: session cookie 1 consumer shared/cli session shared/cli_5_1_8573980531054676633_v1 sending to client partition status 2025-05-07T08:50:54.074678Z :INFO: [] [] [5a3ccb30-59c8be02-3f680d79-fef2d1b1] [] Confirm partition stream create. Partition stream id: 1. Cluster: "-". Topic: "/topic1". Partition: 0. Read offset: (NULL) 2025-05-07T08:50:54.075214Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/cli session shared/cli_5_1_8573980531054676633_v1 grpc read done: success# 1, data# { start_partition_session_response { partition_session_id: 1 } } 2025-05-07T08:50:54.075383Z node 5 :PQ_READ_PROXY INFO: read_session_actor.cpp:533: session cookie 1 consumer shared/cli session shared/cli_5_1_8573980531054676633_v1 got StartRead from client: partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), readOffset# 0, commitOffset# (empty maybe) 2025-05-07T08:50:54.075444Z node 5 :PQ_READ_PROXY INFO: partition_actor.cpp:1002: session cookie 1 consumer shared/cli session shared/cli_5_1_8573980531054676633_v1 Start reading TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) EndOffset 3 readOffset 0 committedOffset 0 clientCommitOffset (empty maybe) clientReadOffset 0 2025-05-07T08:50:54.075475Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:948: session cookie 1 consumer shared/cli session shared/cli_5_1_8573980531054676633_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) ready for read with readOffset 0 endOffset 3 2025-05-07T08:50:54.075540Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2309: session cookie 1 consumer shared/cli session shared/cli_5_1_8573980531054676633_v1 partition ready for read: partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), readOffset# 0, endOffset# 3, WTime# 0, sizeLag# 409 2025-05-07T08:50:54.075558Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2320: session cookie 1 consumer shared/cli session shared/cli_5_1_8573980531054676633_v1TEvPartitionReady. Aval parts: 1 2025-05-07T08:50:54.075604Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2243: session cookie 1 consumer shared/cli session shared/cli_5_1_8573980531054676633_v1 performing read request: guid# 9e6480a1-5d0e55b2-73e82fc0-6f7ec80b, from# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), count# 3, size# 490, partitionsAsked# 1, maxTimeLag# 0ms 2025-05-07T08:50:54.075741Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1369: session cookie 1 consumer shared/cli session shared/cli_5_1_8573980531054676633_v1 READ FROM TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1)maxCount 3 maxSize 490 maxTimeLagMs 0 readTimestampMs 0 readOffset 0 EndOffset 3 ClientCommitOffset 0 committedOffset 0 Guid 9e6480a1-5d0e55b2-73e82fc0-6f7ec80b 2025-05-07T08:50:54.077248Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 1 consumer shared/cli session shared/cli_5_1_8573980531054676633_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) initDone 1 event { CmdReadResult { MaxOffset: 3 Result { Offset: 0 Data: "... 79 bytes ..." SourceId: "\000source1" SeqNo: 1 WriteTimestampMS: 1746607853845 CreateTimestampMS: 1746607853847 UncompressedSize: 8 PartitionKey: "" ExplicitHash: "" } Result { Offset: 1 Data: "... 79 bytes ..." SourceId: "\000source1" SeqNo: 2 WriteTimestampMS: 1746607853913 CreateTimestampMS: 1746607853847 UncompressedSize: 8 PartitionKey: "" ExplicitHash: "" } Result { Offset: 2 Data: "... 79 bytes ..." SourceId: "\000source1" SeqNo: 3 WriteTimestampMS: 1746607853913 CreateTimestampMS: 1746607853847 UncompressedSize: 8 PartitionKey: "" ExplicitHash: "" } BlobsFromDisk: 0 BlobsFromCache: 0 SizeLag: 43 RealReadOffset: 2 WaitQuotaTimeMs: 0 EndOffset: 3 StartOffset: 0 } Cookie: 0 } 2025-05-07T08:50:54.077430Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1252: session cookie 1 consumer shared/cli session shared/cli_5_1_8573980531054676633_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) wait data in partition inited, cookie 1 from offset3 2025-05-07T08:50:54.077470Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:880: session cookie 1 consumer shared/cli session shared/cli_5_1_8573980531054676633_v1 after read state TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) EndOffset 3 ReadOffset 3 ReadGuid 9e6480a1-5d0e55b2-73e82fc0-6f7ec80b has messages 1 2025-05-07T08:50:54.077551Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1917: session cookie 1 consumer shared/cli session shared/cli_5_1_8573980531054676633_v1 read done: guid# 9e6480a1-5d0e55b2-73e82fc0-6f7ec80b, partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), size# 371 2025-05-07T08:50:54.077579Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2079: session cookie 1 consumer shared/cli session shared/cli_5_1_8573980531054676633_v1 response to read: guid# 9e6480a1-5d0e55b2-73e82fc0-6f7ec80b 2025-05-07T08:50:54.077806Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2122: session cookie 1 consumer shared/cli session shared/cli_5_1_8573980531054676633_v1 Process answer. Aval parts: 0 2025-05-07T08:50:54.081283Z :DEBUG: [] [] [5a3ccb30-59c8be02-3f680d79-fef2d1b1] [] Got ReadResponse, serverBytesSize = 371, now ReadSizeBudget = 0, ReadSizeServerDelta = 52428429 2025-05-07T08:50:54.081420Z :DEBUG: [] [] [5a3ccb30-59c8be02-3f680d79-fef2d1b1] [] In ContinueReadingDataImpl, ReadSizeBudget = 0, ReadSizeServerDelta = 52428429 2025-05-07T08:50:54.081743Z :DEBUG: [] Decompression task done. Partition/PartitionSessionId: 1 (0-2) 2025-05-07T08:50:54.081806Z :DEBUG: [] [] [5a3ccb30-59c8be02-3f680d79-fef2d1b1] [] Returning serverBytesSize = 371 to budget 2025-05-07T08:50:54.081839Z :DEBUG: [] [] [5a3ccb30-59c8be02-3f680d79-fef2d1b1] [] In ContinueReadingDataImpl, ReadSizeBudget = 371, ReadSizeServerDelta = 52428429 2025-05-07T08:50:54.082116Z :DEBUG: [] [] [5a3ccb30-59c8be02-3f680d79-fef2d1b1] [] After sending read request: ReadSizeBudget = 0, ReadSizeServerDelta = 52428800 2025-05-07T08:50:54.082459Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/cli session shared/cli_5_1_8573980531054676633_v1 grpc read done: success# 1, data# { read_request { bytes_size: 371 } } 2025-05-07T08:50:54.082591Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1816: session cookie 1 consumer shared/cli session shared/cli_5_1_8573980531054676633_v1 got read request: guid# 361c9edb-a743658a-eaada0a3-442d5f92 2025-05-07T08:50:54.086093Z :DEBUG: [] Take Data. Partition 0. Read: {0, 0} (0-0) 2025-05-07T08:50:54.086164Z :DEBUG: [] Take Data. Partition 0. Read: {1, 0} (1-1) 2025-05-07T08:50:54.086196Z :DEBUG: [] Take Data. Partition 0. Read: {1, 1} (2-2) 2025-05-07T08:50:54.086247Z :DEBUG: [] [] [5a3ccb30-59c8be02-3f680d79-fef2d1b1] [] The application data is transferred to the client. Number of messages 3, size 24 bytes 2025-05-07T08:50:54.086302Z :DEBUG: [] [] [5a3ccb30-59c8be02-3f680d79-fef2d1b1] [] Returning serverBytesSize = 0 to budget 2025-05-07T08:50:54.086481Z :INFO: [] [] [5a3ccb30-59c8be02-3f680d79-fef2d1b1] Closing read session. Close timeout: 0.000000s 2025-05-07T08:50:54.086521Z :INFO: [] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:/topic1:0:1:2:0 2025-05-07T08:50:54.086571Z :INFO: [] [] [5a3ccb30-59c8be02-3f680d79-fef2d1b1] Counters: { Errors: 0 CurrentSessionLifetimeMs: 120 BytesRead: 24 MessagesRead: 3 BytesReadCompressed: 24 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-05-07T08:50:54.086668Z :NOTICE: [] [] [5a3ccb30-59c8be02-3f680d79-fef2d1b1] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-05-07T08:50:54.086730Z :DEBUG: [] [] [5a3ccb30-59c8be02-3f680d79-fef2d1b1] [] Abort session to cluster 2025-05-07T08:50:54.087253Z :NOTICE: [] [] [5a3ccb30-59c8be02-3f680d79-fef2d1b1] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-05-07T08:50:54.088602Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/cli session shared/cli_5_1_8573980531054676633_v1 grpc read done: success# 0, data# { } 2025-05-07T08:50:54.088633Z node 5 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer shared/cli session shared/cli_5_1_8573980531054676633_v1 grpc read failed 2025-05-07T08:50:54.088664Z node 5 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer shared/cli session shared/cli_5_1_8573980531054676633_v1 grpc closed 2025-05-07T08:50:54.088705Z node 5 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer shared/cli session shared/cli_5_1_8573980531054676633_v1 is DEAD 2025-05-07T08:50:54.089730Z node 5 :PQ_READ_PROXY DEBUG: caching_service.cpp:138: Direct read cache: server session deregistered: shared/cli_5_1_8573980531054676633_v1 2025-05-07T08:50:54.090181Z node 6 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037898][rt3.dc1--topic1] pipe [5:7501623614354711180:2658] disconnected; active server actors: 1 2025-05-07T08:50:54.090221Z node 6 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037898][rt3.dc1--topic1] pipe [5:7501623614354711180:2658] client cli disconnected session shared/cli_5_1_8573980531054676633_v1 >> TTicketParserTest::BulkAuthorizationWithUserAccount2 [GOOD] >> TTicketParserTest::BulkAuthorizationUnavailable >> TTicketParserTest::BulkAuthorizationRetryErrorImmediately [GOOD] >> TTicketParserTest::BulkAuthorization >> TPartitionTests::TestNonConflictingActsBatchOk >> KqpPg::InsertNoTargetColumns_Simple+useSink [GOOD] >> KqpPg::InsertNoTargetColumns_Simple-useSink ------- [TM] {asan, default-linux-x86_64, release} ydb/public/lib/ydb_cli/topic/ut/unittest >> TTopicReaderTests::TestRun_ReadMessages_Output_Base64 [GOOD] Test command err: === Starting PQ server === Server->StartServer(false); 2025-05-07T08:50:03.022179Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623395211317634:2076];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:03.022248Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:50:03.048792Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501623393583186822:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:03.048837Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:50:03.341708Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-05-07T08:50:03.347304Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00280d/r3tmp/tmp8eXLnc/pdisk_1.dat 2025-05-07T08:50:03.716706Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:03.716827Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:03.718928Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:03.719000Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:03.720232Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:50:03.723077Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-05-07T08:50:03.724573Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15712, node 1 2025-05-07T08:50:03.761065Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:03.777027Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T08:50:03.779470Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T08:50:03.832769Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/zvgn/00280d/r3tmp/yandexCQwzED.tmp 2025-05-07T08:50:03.832796Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/zvgn/00280d/r3tmp/yandexCQwzED.tmp 2025-05-07T08:50:03.832976Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/zvgn/00280d/r3tmp/yandexCQwzED.tmp 2025-05-07T08:50:03.833128Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:50:03.901406Z INFO: TTestServer started on Port 20233 GrpcPort 15712 TClient is connected to server localhost:20233 PQClient connected to localhost:15712 === TenantModeEnabled() = 0 === Init PQ - start server on port 15712 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:50:04.343977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976710657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-05-07T08:50:04.344117Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-05-07T08:50:04.344254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-05-07T08:50:04.344448Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976710657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-05-07T08:50:04.344490Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-05-07T08:50:04.347092Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 281474976710657, response: Status: StatusAccepted TxId: 281474976710657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-05-07T08:50:04.347259Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-05-07T08:50:04.347470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-05-07T08:50:04.347546Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 281474976710657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-05-07T08:50:04.347573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 281474976710657:0 ProgressState no shards to create, do next state 2025-05-07T08:50:04.347587Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976710657:0 2 -> 3 waiting... 2025-05-07T08:50:04.349909Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-05-07T08:50:04.349983Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976710657:0 ProgressState, at schemeshard: 72057594046644480 2025-05-07T08:50:04.349999Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976710657:0 3 -> 128 2025-05-07T08:50:04.350424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:50:04.350459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976710657, ready parts: 0/1, is published: true 2025-05-07T08:50:04.350525Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:50:04.351847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-05-07T08:50:04.351870Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-05-07T08:50:04.351921Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976710657:0, at tablet# 72057594046644480 2025-05-07T08:50:04.351951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 281474976710657 ready parts: 1/1 2025-05-07T08:50:04.376854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976710657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:50:04.378750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 281474976710657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976710657 msg type: 269090816 2025-05-07T08:50:04.378867Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 281474976710657, partId: 4294967295, tablet: 72057594046316545 2025-05-07T08:50:04.381086Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 1746607804424, transactions count in step: 1, at schemeshard: 72057594046644480 2025-05-07T08:50:04.381192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1746607804424 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-05-07T08:50:04.381215Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-05-07T08:50:04.381400Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976710657:0 128 -> 240 2025-05-07T08:50:04.381421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-05-07T08:50:04.381543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-05-07T08:50:04.381590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 1], at schemeshard: 72057594046644480 2025-05-07T08:50:04.383798Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-05-07T08:50:04.383826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976710657, path id: [OwnerId: 72057594046644480, LocalPa ... NCER DEBUG: read_balancer__balancing.cpp:1322: [72075186224037898][rt3.dc1--topic1] consumer cli start rebalancing. familyCount=1, sessionCount=1, desiredFamilyCount=1, allowPlusOne=0 2025-05-07T08:50:54.865666Z node 6 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1399: [72075186224037898][rt3.dc1--topic1] consumer cli balancing duration: 0.000211s 2025-05-07T08:50:54.866282Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/cli session shared/cli_5_1_10373832595373520161_v1 grpc read done: success# 1, data# { read_request { bytes_size: 52428800 } } 2025-05-07T08:50:54.867426Z node 5 :PQ_READ_PROXY INFO: read_session_actor.cpp:1315: session cookie 1 consumer shared/cli session shared/cli_5_1_10373832595373520161_v1 assign: record# { Partition: 0 TabletId: 72075186224037897 Topic: "rt3.dc1--topic1" Generation: 1 Step: 1 Session: "shared/cli_5_1_10373832595373520161_v1" ClientId: "cli" PipeClient { RawX1: 7501623615661662318 RawX2: 4503621102209627 } Path: "/Root/PQ/rt3.dc1--topic1" } 2025-05-07T08:50:54.867598Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1816: session cookie 1 consumer shared/cli session shared/cli_5_1_10373832595373520161_v1 got read request: guid# b117591-55ae018e-d68c671d-e8965fd6 2025-05-07T08:50:54.867698Z node 5 :PQ_READ_PROXY INFO: partition_actor.cpp:1122: session cookie 1 consumer shared/cli session shared/cli_5_1_10373832595373520161_v1 INITING TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) 2025-05-07T08:50:54.871422Z node 5 :PQ_READ_PROXY INFO: partition_actor.cpp:962: session cookie 1 consumer shared/cli session shared/cli_5_1_10373832595373520161_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) pipe restart attempt 0 pipe creation result: OK TabletId: 72075186224037897 Generation: 1, pipe: [5:7501623615661662322:2655] 2025-05-07T08:50:54.871548Z node 5 :PQ_READ_PROXY DEBUG: caching_service.cpp:282: Direct read cache: registered server session: shared/cli_5_1_10373832595373520161_v1:1 with generation 1 2025-05-07T08:50:54.884431Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 1 consumer shared/cli session shared/cli_5_1_10373832595373520161_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) initDone 0 event { CmdGetClientOffsetResult { Offset: 0 EndOffset: 3 SizeLag: 420 WriteTimestampEstimateMS: 1746607854840 ClientHasAnyCommits: false } Cookie: 18446744073709551615 } 2025-05-07T08:50:54.884503Z node 5 :PQ_READ_PROXY INFO: partition_actor.cpp:683: session cookie 1 consumer shared/cli session shared/cli_5_1_10373832595373520161_v1 INIT DONE TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) EndOffset 3 readOffset 0 committedOffset 0 2025-05-07T08:50:54.884585Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1413: session cookie 1 consumer shared/cli session shared/cli_5_1_10373832595373520161_v1 sending to client partition status 2025-05-07T08:50:54.885699Z :INFO: [] [] [44afc0c5-83c2ea46-89032025-194abca0] [] Confirm partition stream create. Partition stream id: 1. Cluster: "-". Topic: "/topic1". Partition: 0. Read offset: (NULL) 2025-05-07T08:50:54.890217Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/cli session shared/cli_5_1_10373832595373520161_v1 grpc read done: success# 1, data# { start_partition_session_response { partition_session_id: 1 } } 2025-05-07T08:50:54.890726Z node 5 :PQ_READ_PROXY INFO: read_session_actor.cpp:533: session cookie 1 consumer shared/cli session shared/cli_5_1_10373832595373520161_v1 got StartRead from client: partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), readOffset# 0, commitOffset# (empty maybe) 2025-05-07T08:50:54.890824Z node 5 :PQ_READ_PROXY INFO: partition_actor.cpp:1002: session cookie 1 consumer shared/cli session shared/cli_5_1_10373832595373520161_v1 Start reading TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) EndOffset 3 readOffset 0 committedOffset 0 clientCommitOffset (empty maybe) clientReadOffset 0 2025-05-07T08:50:54.890861Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:948: session cookie 1 consumer shared/cli session shared/cli_5_1_10373832595373520161_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) ready for read with readOffset 0 endOffset 3 2025-05-07T08:50:54.890933Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2309: session cookie 1 consumer shared/cli session shared/cli_5_1_10373832595373520161_v1 partition ready for read: partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), readOffset# 0, endOffset# 3, WTime# 0, sizeLag# 420 2025-05-07T08:50:54.890953Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2320: session cookie 1 consumer shared/cli session shared/cli_5_1_10373832595373520161_v1TEvPartitionReady. Aval parts: 1 2025-05-07T08:50:54.891004Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2243: session cookie 1 consumer shared/cli session shared/cli_5_1_10373832595373520161_v1 performing read request: guid# 2e3106a4-3153a02b-781caa25-f439da38, from# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), count# 3, size# 504, partitionsAsked# 1, maxTimeLag# 0ms 2025-05-07T08:50:54.891139Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1369: session cookie 1 consumer shared/cli session shared/cli_5_1_10373832595373520161_v1 READ FROM TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1)maxCount 3 maxSize 504 maxTimeLagMs 0 readTimestampMs 0 readOffset 0 EndOffset 3 ClientCommitOffset 0 committedOffset 0 Guid 2e3106a4-3153a02b-781caa25-f439da38 2025-05-07T08:50:54.892278Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 1 consumer shared/cli session shared/cli_5_1_10373832595373520161_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) initDone 1 event { CmdReadResult { MaxOffset: 3 Result { Offset: 0 Data: "... 79 bytes ..." SourceId: "\000source1" SeqNo: 1 WriteTimestampMS: 1746607854613 CreateTimestampMS: 1746607854613 UncompressedSize: 8 PartitionKey: "" ExplicitHash: "" } Result { Offset: 1 Data: "... 79 bytes ..." SourceId: "\000source1" SeqNo: 2 WriteTimestampMS: 1746607854623 CreateTimestampMS: 1746607854613 UncompressedSize: 8 PartitionKey: "" ExplicitHash: "" } Result { Offset: 2 Data: "... 79 bytes ..." SourceId: "\000source1" SeqNo: 3 WriteTimestampMS: 1746607854634 CreateTimestampMS: 1746607854613 UncompressedSize: 8 PartitionKey: "" ExplicitHash: "" } BlobsFromDisk: 0 BlobsFromCache: 0 SizeLag: 54 RealReadOffset: 2 WaitQuotaTimeMs: 0 EndOffset: 3 StartOffset: 0 } Cookie: 0 } 2025-05-07T08:50:54.892466Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1252: session cookie 1 consumer shared/cli session shared/cli_5_1_10373832595373520161_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) wait data in partition inited, cookie 1 from offset3 2025-05-07T08:50:54.892526Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:880: session cookie 1 consumer shared/cli session shared/cli_5_1_10373832595373520161_v1 after read state TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) EndOffset 3 ReadOffset 3 ReadGuid 2e3106a4-3153a02b-781caa25-f439da38 has messages 1 2025-05-07T08:50:54.892634Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1917: session cookie 1 consumer shared/cli session shared/cli_5_1_10373832595373520161_v1 read done: guid# 2e3106a4-3153a02b-781caa25-f439da38, partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), size# 490 2025-05-07T08:50:54.892664Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2079: session cookie 1 consumer shared/cli session shared/cli_5_1_10373832595373520161_v1 response to read: guid# 2e3106a4-3153a02b-781caa25-f439da38 2025-05-07T08:50:54.892923Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2122: session cookie 1 consumer shared/cli session shared/cli_5_1_10373832595373520161_v1 Process answer. Aval parts: 0 2025-05-07T08:50:54.893623Z :DEBUG: [] [] [44afc0c5-83c2ea46-89032025-194abca0] [] Got ReadResponse, serverBytesSize = 490, now ReadSizeBudget = 0, ReadSizeServerDelta = 52428310 2025-05-07T08:50:54.893770Z :DEBUG: [] [] [44afc0c5-83c2ea46-89032025-194abca0] [] In ContinueReadingDataImpl, ReadSizeBudget = 0, ReadSizeServerDelta = 52428310 2025-05-07T08:50:54.896891Z :DEBUG: [] Decompression task done. Partition/PartitionSessionId: 1 (0-2) 2025-05-07T08:50:54.896962Z :DEBUG: [] [] [44afc0c5-83c2ea46-89032025-194abca0] [] Returning serverBytesSize = 490 to budget 2025-05-07T08:50:54.897002Z :DEBUG: [] [] [44afc0c5-83c2ea46-89032025-194abca0] [] In ContinueReadingDataImpl, ReadSizeBudget = 490, ReadSizeServerDelta = 52428310 2025-05-07T08:50:54.897312Z :DEBUG: [] [] [44afc0c5-83c2ea46-89032025-194abca0] [] After sending read request: ReadSizeBudget = 0, ReadSizeServerDelta = 52428800 2025-05-07T08:50:54.898090Z :DEBUG: [] Take Data. Partition 0. Read: {0, 0} (0-0) 2025-05-07T08:50:54.898150Z :DEBUG: [] Take Data. Partition 0. Read: {1, 0} (1-1) 2025-05-07T08:50:54.898184Z :DEBUG: [] Take Data. Partition 0. Read: {2, 0} (2-2) 2025-05-07T08:50:54.898237Z :DEBUG: [] [] [44afc0c5-83c2ea46-89032025-194abca0] [] The application data is transferred to the client. Number of messages 3, size 24 bytes 2025-05-07T08:50:54.898289Z :DEBUG: [] [] [44afc0c5-83c2ea46-89032025-194abca0] [] Returning serverBytesSize = 0 to budget 2025-05-07T08:50:54.898519Z :INFO: [] [] [44afc0c5-83c2ea46-89032025-194abca0] Closing read session. Close timeout: 0.000000s 2025-05-07T08:50:54.898573Z :INFO: [] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:/topic1:0:1:2:0 2025-05-07T08:50:54.898619Z :INFO: [] [] [44afc0c5-83c2ea46-89032025-194abca0] Counters: { Errors: 0 CurrentSessionLifetimeMs: 60 BytesRead: 24 MessagesRead: 3 BytesReadCompressed: 24 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-05-07T08:50:54.898782Z :NOTICE: [] [] [44afc0c5-83c2ea46-89032025-194abca0] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-05-07T08:50:54.898826Z :DEBUG: [] [] [44afc0c5-83c2ea46-89032025-194abca0] [] Abort session to cluster 2025-05-07T08:50:54.899350Z :NOTICE: [] [] [44afc0c5-83c2ea46-89032025-194abca0] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-05-07T08:50:54.906185Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/cli session shared/cli_5_1_10373832595373520161_v1 grpc read done: success# 1, data# { read_request { bytes_size: 490 } } 2025-05-07T08:50:54.906249Z node 5 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer shared/cli session shared/cli_5_1_10373832595373520161_v1 grpc closed 2025-05-07T08:50:54.906289Z node 5 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer shared/cli session shared/cli_5_1_10373832595373520161_v1 is DEAD 2025-05-07T08:50:54.907512Z node 5 :PQ_READ_PROXY DEBUG: caching_service.cpp:138: Direct read cache: server session deregistered: shared/cli_5_1_10373832595373520161_v1 2025-05-07T08:50:54.907701Z node 6 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037898][rt3.dc1--topic1] pipe [5:7501623615661662318:2651] disconnected; active server actors: 1 2025-05-07T08:50:54.907743Z node 6 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037898][rt3.dc1--topic1] pipe [5:7501623615661662318:2651] client cli disconnected session shared/cli_5_1_10373832595373520161_v1 >> KqpPg::CreateTableSerialColumns+useSink [GOOD] >> KqpPg::CreateTableSerialColumns-useSink >> Viewer::UseTransactionWhenExecuteDataActionQuery [GOOD] >> ViewerTopicDataTests::TopicDataTest >> KqpPg::JoinWithQueryService+StreamLookup [GOOD] >> KqpPg::Insert_Serial+useSink >> Viewer::StorageGroupOutputWithoutFilterNoDepends [GOOD] >> Viewer::StorageGroupOutputWithSpaceCheckDependsOnVDiskSpaceStatus >> TTicketParserTest::AuthorizationWithRequiredPermissions [GOOD] >> TTicketParserTest::AuthorizationWithUserAccount >> TTicketParserTest::NebiusAuthorizationWithRequiredPermissions [GOOD] >> TTicketParserTest::NebiusAuthorizationUnavailable >> IndexBuildTest::RejectsCancel [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_SourceId_PartitionInactive_1_Test >> TPartitionTests::WriteSubDomainOutOfSpace_DisableExpiration >> TTicketParserTest::AuthorizationModify [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index_build/unittest >> IndexBuildTest::RejectsCancel [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:50:21.878455Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:50:21.878548Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:50:21.878594Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:50:21.878630Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:50:21.878669Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:50:21.878699Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:50:21.878752Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:50:21.878843Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:50:21.879567Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:50:21.879875Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:50:21.964729Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:50:21.964788Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:21.980033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:50:21.980145Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:50:21.980336Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:50:21.988613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:50:21.989236Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:50:21.989953Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:21.990298Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:50:21.992413Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:21.993990Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:21.994054Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:21.994114Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:50:21.994173Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:21.994268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:50:21.994455Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:50:22.001433Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:50:22.144629Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:50:22.144855Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:22.145077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:50:22.145298Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:50:22.145352Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:22.149098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:22.149244Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:50:22.149496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:22.149548Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:50:22.149591Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:50:22.149625Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:50:22.155535Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:22.155660Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:50:22.155717Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:50:22.159161Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:22.159243Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:22.159317Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:22.159381Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:50:22.163899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:50:22.167054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:50:22.167337Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:50:22.168616Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:22.168800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:50:22.168881Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:22.169254Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:50:22.169335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:22.169542Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:50:22.169632Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:50:22.174271Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:22.174342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:22.174606Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:22.174658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [ ... ss, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000004, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 101, upload bytes: 1818, read rows: 101, read bytes: 1818 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-05-07T08:51:00.655167Z node 2 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:25: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Unlocking to Done 2025-05-07T08:51:00.659250Z node 2 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1105: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Done TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobal, IndexName: index1, IndexColumn: index, State: Done, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [2:1162:3016], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000004, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 101, upload bytes: 1818, read rows: 101, read bytes: 1818 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-05-07T08:51:00.659308Z node 2 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:325: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 102, subscribers count# 1 2025-05-07T08:51:00.659448Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T08:51:00.659486Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [2:1254:3097] TestWaitNotification: OK eventTxId 102 2025-05-07T08:51:00.661493Z node 2 :BUILD_INDEX NOTICE: schemeshard_build_index__cancel.cpp:18: TIndexBuilder::TXTYPE_CANCEL_INDEX_BUILD: DoExecute TxId: 105 DatabaseName: "/MyRoot" IndexBuildId: 102 2025-05-07T08:51:00.661618Z node 2 :BUILD_INDEX NOTICE: schemeshard_build_index_tx_base.h:91: TIndexBuilder::TXTYPE_CANCEL_INDEX_BUILD: Reply TxId: 105 Status: PRECONDITION_FAILED Issues { message: "Index build process with id <102> has been finished already" severity: 1 } BUILDINDEX RESPONSE CANCEL: NKikimrIndexBuilder.TEvCancelResponse TxId: 105 Status: PRECONDITION_FAILED Issues { message: "Index build process with id <102> has been finished already" severity: 1 } 2025-05-07T08:51:00.663614Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__get.cpp:19: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: DoExecute DatabaseName: "/MyRoot" IndexBuildId: 102 2025-05-07T08:51:00.663793Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:93: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: Reply Status: SUCCESS IndexBuild { Id: 102 State: STATE_DONE Settings { source_path: "/MyRoot/Table" index { name: "index1" index_columns: "index" global_index { } } max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 } } Progress: 100 } BUILDINDEX RESPONSE Get: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 102 State: STATE_DONE Settings { source_path: "/MyRoot/Table" index { name: "index1" index_columns: "index" global_index { } } max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 } } Progress: 100 } 2025-05-07T08:51:00.665483Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:51:00.665724Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table" took 261us result status StatusSuccess 2025-05-07T08:51:00.666179Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table" PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 TableSchemaVersion: 3 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "index" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "index1" LocalPathId: 3 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "index" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 3 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 10 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 11 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:51:00.668985Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/index1" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-05-07T08:51:00.669514Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/index1" took 323us result status StatusSuccess 2025-05-07T08:51:00.670394Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/index1" PathDescription { Self { Name: "index1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 2 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateAlter Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 11 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } TableIndex { Name: "index1" LocalPathId: 3 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "index" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { Columns { Name: "index" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "index" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TTicketParserTest::NebiusAuthorizationRetryErrorImmediately [GOOD] >> TTicketParserTest::NebiusAuthorization >> TPartitionTests::WriteSubDomainOutOfSpace_DisableExpiration [GOOD] >> TPartitionTests::WriteSubDomainOutOfSpace_IgnoreQuotaDeadline |89.3%| [TA] $(B)/ydb/public/lib/ydb_cli/topic/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ut/unittest >> TTicketParserTest::AuthorizationModify [GOOD] Test command err: 2025-05-07T08:50:37.195201Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623540949909993:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:37.195237Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003efa/r3tmp/tmpu9KiGr/pdisk_1.dat 2025-05-07T08:50:38.102485Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:38.127446Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:38.127591Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:38.129790Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 31917, node 1 2025-05-07T08:50:38.363024Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:38.363056Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:38.363062Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:38.363213Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17901 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:50:38.908734Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:38.949551Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-05-07T08:50:38.949602Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T08:50:38.949616Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:816: CanInitLoginToken, database /Root, A6 error 2025-05-07T08:50:38.950045Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:563: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-05-07T08:50:38.950102Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000010088] Connect to grpc://localhost:9622 2025-05-07T08:50:38.952830Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000010088] Request AuthenticateRequest { iam_token: "**** (8E120919)" } 2025-05-07T08:50:38.992554Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000010088] Response AuthenticateResponse { subject { user_account { id: "user1" } } } 2025-05-07T08:50:38.992738Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:1003: Ticket **** (8E120919) asking for UserAccount(user1@as) 2025-05-07T08:50:38.994282Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000010788] Connect to grpc://localhost:28258 2025-05-07T08:50:39.002937Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000010788] Request GetUserAccountRequest { user_account_id: "user1" } 2025-05-07T08:50:39.026997Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000010788] Response UserAccount { yandex_passport_user_account { login: "login1" } } 2025-05-07T08:50:39.030245Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket **** (8E120919) () has now valid token of login1@passport 2025-05-07T08:50:43.737389Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501623568468387705:2199];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:43.783457Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003efa/r3tmp/tmp2b5pSy/pdisk_1.dat 2025-05-07T08:50:43.953783Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:43.984891Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:43.984972Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:43.988207Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1142, node 2 2025-05-07T08:50:44.100112Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:44.100135Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:44.100141Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:44.100247Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2881 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:50:44.395487Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:44.406866Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T08:50:44.416048Z node 2 :TICKET_PARSER ERROR: ticket_parser_impl.h:969: Ticket **** (8E120919): Token is not supported 2025-05-07T08:50:47.809946Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7501623584224480949:2066];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:47.809996Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003efa/r3tmp/tmpMiAYDz/pdisk_1.dat 2025-05-07T08:50:48.035239Z node 3 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:48.063512Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:48.063596Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:48.065263Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12461, node 3 2025-05-07T08:50:48.148042Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:48.148067Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:48.148078Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:48.148230Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13441 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:50:48.435636Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:48.450585Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T08:50:48.456161Z node 3 :TICKET_PARSER ERROR: ticket_parser_impl.h:969: Ticket **** (8E120919): Unknown token 2025-05-07T08:50:52.255253Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501623606041494023:2194];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:52. ... 3Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T08:50:52.937173Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:816: CanInitLoginToken, database /Root, A6 error 2025-05-07T08:50:52.937192Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:493: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.read) 2025-05-07T08:50:52.937321Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700002e708] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.read" resource_path { id: "XXXXXXXX" type: "ydb.database" } resource_path { id: "XXXXXXXX" type: "resource-manager.folder" } } 2025-05-07T08:50:52.946072Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [51700002e708] Status 16 Access Denied 2025-05-07T08:50:52.946994Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1413: Ticket **** (8E120919) permission something.read now has a permanent error "Access Denied" retryable:0 2025-05-07T08:50:52.947038Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1815: Ticket **** (8E120919) () has now permanent error message 'Access Denied' 2025-05-07T08:50:52.947620Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-05-07T08:50:52.947637Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T08:50:52.947648Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:816: CanInitLoginToken, database /Root, A6 error 2025-05-07T08:50:52.947672Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:493: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.read) 2025-05-07T08:50:52.947822Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700002e708] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.read" resource_path { id: "XXXXXXXX" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-05-07T08:50:52.949558Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [51700002e708] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-05-07T08:50:52.950277Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1398: Ticket **** (8E120919) permission something.read now has a valid subject "user1@as" 2025-05-07T08:50:52.950350Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket **** (8E120919) () has now valid token of user1@as 2025-05-07T08:50:52.950923Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-05-07T08:50:52.950943Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T08:50:52.950952Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:816: CanInitLoginToken, database /Root, A6 error 2025-05-07T08:50:52.950972Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:493: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.read) 2025-05-07T08:50:52.951113Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700002e708] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "XXXXXXXX" type: "resource-manager.folder" } } 2025-05-07T08:50:52.952444Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [51700002e708] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-05-07T08:50:52.953780Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1398: Ticket **** (8E120919) permission something.read now has a valid subject "user1@as" 2025-05-07T08:50:52.953862Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket **** (8E120919) () has now valid token of user1@as 2025-05-07T08:50:52.954434Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-05-07T08:50:52.954455Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T08:50:52.954465Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:816: CanInitLoginToken, database /Root, A6 error 2025-05-07T08:50:52.954493Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:493: Ticket **** (8E120919) asking for AccessServiceAuthorization(monitoring.view) 2025-05-07T08:50:52.954614Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700002e708] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "monitoring.view" resource_path { id: "gizmo" type: "iam.gizmo" } } 2025-05-07T08:50:52.956106Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [51700002e708] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-05-07T08:50:52.956274Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1398: Ticket **** (8E120919) permission monitoring.view now has a valid subject "user1@as" 2025-05-07T08:50:52.956343Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket **** (8E120919) () has now valid token of user1@as 2025-05-07T08:50:56.843419Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7501623621745580549:2059];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:56.843473Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003efa/r3tmp/tmpIDTc4m/pdisk_1.dat 2025-05-07T08:50:56.970656Z node 5 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:57.001371Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:57.001448Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 23594, node 5 2025-05-07T08:50:57.008365Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:50:57.054739Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:57.054765Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:57.054783Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:57.054944Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16485 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-05-07T08:50:57.401674Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:50:57.416191Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-05-07T08:50:57.416233Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T08:50:57.416247Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:816: CanInitLoginToken, database /Root, A6 error 2025-05-07T08:50:57.416288Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:493: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.read) 2025-05-07T08:50:57.416370Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000110308] Connect to grpc://localhost:25333 2025-05-07T08:50:57.417345Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000110308] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-05-07T08:50:57.430525Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000110308] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-05-07T08:50:57.430714Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1398: Ticket **** (8E120919) permission something.read now has a valid subject "user1@as" 2025-05-07T08:50:57.430817Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket **** (8E120919) () has now valid token of user1@as 2025-05-07T08:50:57.432550Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-05-07T08:50:57.432580Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T08:50:57.432594Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:816: CanInitLoginToken, database /Root, A6 error 2025-05-07T08:50:57.432643Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:493: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.read) 2025-05-07T08:50:57.432693Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:493: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.write) 2025-05-07T08:50:57.432897Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000110308] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-05-07T08:50:57.433559Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000110308] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.write" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-05-07T08:50:57.436819Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000110308] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-05-07T08:50:57.436962Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000110308] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-05-07T08:50:57.437222Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1398: Ticket **** (8E120919) permission something.read now has a valid subject "user1@as" 2025-05-07T08:50:57.437284Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1398: Ticket **** (8E120919) permission something.write now has a valid subject "user1@as" 2025-05-07T08:50:57.437389Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket **** (8E120919) () has now valid token of user1@as >> KqpPg::EmptyQuery-useSink [GOOD] >> KqpPg::DuplicatedColumns+useSink >> TTicketParserTest::BulkAuthorization [GOOD] >> TTicketParserTest::AuthorizationWithUserAccount2 >> KqpPg::NoTableQuery-useSink [GOOD] >> KqpPg::PgCreateTable >> TPartitionTests::WriteSubDomainOutOfSpace_IgnoreQuotaDeadline [GOOD] >> TTicketParserTest::TicketFromCertificateWithValidationCheckIssuerBad [GOOD] >> TQuotaTracker::TestSmallMessages [GOOD] >> TQuotaTracker::TestBigMessages [GOOD] >> TSourceIdTests::ExpensiveCleanup >> TTicketParserTest::BulkAuthorizationUnavailable [GOOD] >> TPartitionTests::CorrectRange_Multiple_Transactions >> TSourceIdTests::ExpensiveCleanup [GOOD] >> KqpPg::InsertNoTargetColumns_Simple-useSink [GOOD] >> KqpPg::InsertNoTargetColumns_Serial-useSink >> TSubDomainTest::CoordinatorRunAtSubdomainNodeWhenAvailable [GOOD] >> TSubDomainTest::CoordinatorRunAtSubdomainNodeWhenAvailable2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ut/unittest >> TTicketParserTest::TicketFromCertificateWithValidationCheckIssuerBad [GOOD] Test command err: 2025-05-07T08:50:36.742559Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623536563997231:2066];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:36.751322Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003f05/r3tmp/tmpo7ANI0/pdisk_1.dat 2025-05-07T08:50:37.754120Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:37.754251Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:37.767030Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:50:37.770582Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; TServer::EnableGrpc on GrpcPort 16106, node 1 2025-05-07T08:50:37.832482Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T08:50:37.859182Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T08:50:37.874014Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:37.883822Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:37.883851Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:37.883857Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:37.883957Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19574 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:50:38.237882Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:38.261953Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:50:38.274474Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket 6F04F78715E9FA87CECB2065A405425BD617F524089E79BC9A8FC4297294F841 () has now valid token of C=RU,ST=MSK,L=MSK,O=YA,OU=UtTest,CN=localhost@cert 2025-05-07T08:50:44.413859Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501623570105071063:2262];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:44.434111Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003f05/r3tmp/tmpWST8X8/pdisk_1.dat 2025-05-07T08:50:44.656096Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:44.658009Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:44.658076Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:44.682384Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19625, node 2 2025-05-07T08:50:44.754860Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:44.754898Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:44.754905Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:44.755027Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27176 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:50:45.183260Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:45.190926Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:50:45.195207Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket 19CD33D7BA3E4EEF6E1D014CD3ED8A862AAC7F836BF18ABFCEBAD8691A4F43DD () has now valid token of C=RU,ST=MSK,L=MSK,O=YA,OU=UtTest,CN=localhost@cert 2025-05-07T08:50:49.376216Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7501623592187428234:2065];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:49.376328Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003f05/r3tmp/tmpTIJlzL/pdisk_1.dat 2025-05-07T08:50:49.600710Z node 3 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:49.627558Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:49.627677Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:49.637204Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 65150, node 3 2025-05-07T08:50:49.765734Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:49.765757Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:49.765768Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:49.765902Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31172 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:50:50.062424Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:50.070024Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T08:50:50.077423Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1815: Ticket 1E39DCE328A65D03C910B91029AC98770440C117710AA03406E626AC89B17E06 () has now permanent error message 'Cannot create token from certificate. Client certificate failed verification' 2025-05-07T08:50:50.078206Z node 3 :TICKET_PARSER ERROR: ticket_parser_impl.h:969: Ticket 1E39DCE328A65D03C910B91029AC98770440C117710AA03406E626AC89B17E06: Cannot create token from certificate. Client certificate failed verification 2025-05-07T08:50:54.213993Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501623614008117390:2064];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:54.220913Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003f05/r3tmp/tmpYa5t5q/pdisk_1.dat 2025-05-07T08:50:54.430364Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:54.453496Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:54.453596Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:54.456386Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1355, node 4 2025-05-07T08:50:54.602454Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:54.602485Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:54.602495Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:54.602651Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13406 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:50:54.874809Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:54.884681Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket 2306FAFEDC7C96E1399B9A446F8D76F0EFA013F55EB22B607376733D64BC6EDB () has now valid token of C=RU,ST=MSK,L=MSK,O=YA,OU=UtTest,CN=localhost@cert 2025-05-07T08:50:58.891600Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7501623629461290689:2065];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:58.891681Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003f05/r3tmp/tmp4gdcoQ/pdisk_1.dat 2025-05-07T08:50:59.011461Z node 5 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:59.037367Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:59.037452Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:59.040038Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4779, node 5 2025-05-07T08:50:59.134823Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:59.134850Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:59.134860Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:59.134994Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15587 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:50:59.423664Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:59.444742Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1815: Ticket F3316950D389E59E506C6412C7D08ED2A368F740D02C317B412A28698527A074 () has now permanent error message 'Cannot create token from certificate. Client certificate failed verification' 2025-05-07T08:50:59.445336Z node 5 :TICKET_PARSER ERROR: ticket_parser_impl.h:969: Ticket F3316950D389E59E506C6412C7D08ED2A368F740D02C317B412A28698527A074: Cannot create token from certificate. Client certificate failed verification >> KqpPg::Insert_Serial+useSink [GOOD] >> KqpPg::Insert_Serial-useSink >> KqpPg::CreateTableSerialColumns-useSink [GOOD] >> KqpPg::DropIndex >> TPartitionTests::CorrectRange_Multiple_Transactions [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TSourceIdTests::ExpensiveCleanup [GOOD] Test command err: 2025-05-07T08:51:01.828256Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:01.828371Z node 1 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037927937] doesn't have tx writes info 2025-05-07T08:51:01.848312Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:1:Initializer] Start initializing step TInitConfigStep 2025-05-07T08:51:01.848699Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:1:Initializer] Start initializing step TInitInternalFieldsStep 2025-05-07T08:51:01.849088Z node 1 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [1:179:2194] 2025-05-07T08:51:01.851589Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:55: [Root/PQ/rt3.dc1--account--topic:1:Initializer] Initializing completed. 2025-05-07T08:51:01.851681Z node 1 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 1 generation 0 [1:179:2194] 2025-05-07T08:51:01.851737Z node 1 :PERSQUEUE DEBUG: partition.cpp:571: [PQ: 72057594037927937, Partition: 1, State: StateInit] SYNC INIT topic Root/PQ/rt3.dc1--account--topic partitition 1 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-05-07T08:51:01.852287Z node 1 :PERSQUEUE DEBUG: partition.cpp:3847: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Process pending events. Count 0 2025-05-07T08:51:01.852538Z node 1 :PERSQUEUE DEBUG: partition.cpp:2182: [PQ: 72057594037927937, Partition: 1, State: StateIdle] === DumpKeyValueRequest === 2025-05-07T08:51:01.852580Z node 1 :PERSQUEUE DEBUG: partition.cpp:2183: [PQ: 72057594037927937, Partition: 1, State: StateIdle] --- delete ---------------- 2025-05-07T08:51:01.852620Z node 1 :PERSQUEUE DEBUG: partition.cpp:2191: [PQ: 72057594037927937, Partition: 1, State: StateIdle] --- write ----------------- 2025-05-07T08:51:01.852671Z node 1 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 1, State: StateIdle] i0000000001 2025-05-07T08:51:01.852720Z node 1 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 1, State: StateIdle] m0000000001cclient-1 2025-05-07T08:51:01.852746Z node 1 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 1, State: StateIdle] m0000000001uclient-1 2025-05-07T08:51:01.852766Z node 1 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 1, State: StateIdle] _config_1 2025-05-07T08:51:01.852799Z node 1 :PERSQUEUE DEBUG: partition.cpp:2196: [PQ: 72057594037927937, Partition: 1, State: StateIdle] --- rename ---------------- 2025-05-07T08:51:01.852836Z node 1 :PERSQUEUE DEBUG: partition.cpp:2201: [PQ: 72057594037927937, Partition: 1, State: StateIdle] =========================== 2025-05-07T08:51:01.852942Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:774: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 1 user client-1 readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-05-07T08:51:01.853132Z node 1 :PERSQUEUE INFO: partition.cpp:3707: [PQ: 72057594037927937, Partition: 1, State: StateIdle] SubDomainOutOfSpace was changed. Topic: "Root/PQ/rt3.dc1--account--topic". Partition: 1. SubDomainOutOfSpace: 1 2025-05-07T08:51:01.853219Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner1|717c9d02-2fb40cb2-872019a4-c97ea348_0 generated for partition 1 topic 'Root/PQ/rt3.dc1--account--topic' owner owner1 Send disk status response with cookie: 0 2025-05-07T08:51:01.853599Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72057594037927937, Partition: 1, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-05-07T08:51:01.853710Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:35: [PQ: 72057594037927937, Partition: 1, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 1 2025-05-07T08:51:01.854032Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:1704: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Send write quota request. Topic: "Root/PQ/rt3.dc1--account--topic". Partition: 1. Amount: 22. Cookie: 1 2025-05-07T08:51:01.854131Z node 1 :PERSQUEUE DEBUG: partition.cpp:3627: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Got quota. Topic: "Root/PQ/rt3.dc1--account--topic". Partition: 1: Cookie: 1 2025-05-07T08:51:01.854266Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:1233: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 1 part blob processing sourceId 'SourceId' seqNo 0 partNo 0 2025-05-07T08:51:01.855136Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:1333: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 1 part blob complete sourceId 'SourceId' seqNo 0 partNo 0 FormedBlobsCount 0 NewHead: Offset 100 PartNo 0 PackedSize 118 count 1 nextOffset 101 batches 1 2025-05-07T08:51:01.855755Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:1623: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Add new write blob: topic 'Root/PQ/rt3.dc1--account--topic' partition 1 compactOffset 100,1 HeadOffset 0 endOffset 0 curOffset 101 d0000000001_00000000000000000100_00000_0000000001_00000| size 104 WTime 128 2025-05-07T08:51:01.855907Z node 1 :PERSQUEUE DEBUG: partition.cpp:2182: [PQ: 72057594037927937, Partition: 1, State: StateIdle] === DumpKeyValueRequest === 2025-05-07T08:51:01.855945Z node 1 :PERSQUEUE DEBUG: partition.cpp:2183: [PQ: 72057594037927937, Partition: 1, State: StateIdle] --- delete ---------------- 2025-05-07T08:51:01.855989Z node 1 :PERSQUEUE DEBUG: partition.cpp:2189: [PQ: 72057594037927937, Partition: 1, State: StateIdle] [x0000000001, x0000000002) 2025-05-07T08:51:01.856041Z node 1 :PERSQUEUE DEBUG: partition.cpp:2191: [PQ: 72057594037927937, Partition: 1, State: StateIdle] --- write ----------------- 2025-05-07T08:51:01.856076Z node 1 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 1, State: StateIdle] m0000000001pSourceId 2025-05-07T08:51:01.856105Z node 1 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 1, State: StateIdle] d0000000001_00000000000000000100_00000_0000000001_00000| 2025-05-07T08:51:01.856126Z node 1 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 1, State: StateIdle] i0000000001 2025-05-07T08:51:01.856156Z node 1 :PERSQUEUE DEBUG: partition.cpp:2196: [PQ: 72057594037927937, Partition: 1, State: StateIdle] --- rename ---------------- 2025-05-07T08:51:01.856191Z node 1 :PERSQUEUE DEBUG: partition.cpp:2201: [PQ: 72057594037927937, Partition: 1, State: StateIdle] =========================== 2025-05-07T08:51:01.904535Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72057594037927937, Partition: 1, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 22 WriteNewSizeFromSupportivePartitions# 0 2025-05-07T08:51:01.904646Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72057594037927937, Partition: 1, State: StateIdle] TPartition::ReplyWrite. Partition: 1 2025-05-07T08:51:01.904721Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Answering for message sourceid: 'SourceId', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 1, SeqNo: 0, partNo: 0, Offset: 100 is stored on disk 2025-05-07T08:51:01.904867Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:774: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 1 user client-1 readTimeStamp for offset 0 initiated queuesize 0 startOffset 100 ReadingTimestamp 0 rrg 0 2025-05-07T08:51:02.205171Z node 1 :PERSQUEUE INFO: partition.cpp:3707: [PQ: 72057594037927937, Partition: 1, State: StateIdle] SubDomainOutOfSpace was changed. Topic: "Root/PQ/rt3.dc1--account--topic". Partition: 1. SubDomainOutOfSpace: 0 2025-05-07T08:51:02.238158Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:1704: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Send write quota request. Topic: "Root/PQ/rt3.dc1--account--topic". Partition: 1. Amount: 22. Cookie: 2 2025-05-07T08:51:02.238324Z node 1 :PERSQUEUE DEBUG: partition.cpp:3627: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Got quota. Topic: "Root/PQ/rt3.dc1--account--topic". Partition: 1: Cookie: 2 2025-05-07T08:51:02.238535Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:1233: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 1 part blob processing sourceId 'SourceId' seqNo 1 partNo 0 2025-05-07T08:51:02.239139Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:1295: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 1 part blob sourceId 'SourceId' seqNo 1 partNo 0 result is x0000000001_00000000000000000100_00000_0000000001_00000 size 104 2025-05-07T08:51:02.239243Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:1049: [PQ: 72057594037927937, Partition: 1, State: StateIdle] writing blob: topic 'Root/PQ/rt3.dc1--account--topic' partition 1 old key x0000000001_00000000000000000100_00000_0000000001_00000 new key d0000000001_00000000000000000100_00000_0000000001_00000 size 104 WTime 1329 2025-05-07T08:51:02.240664Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:1333: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 1 part blob complete sourceId 'SourceId' seqNo 1 partNo 0 FormedBlobsCount 1 NewHead: Offset 200 PartNo 0 PackedSize 118 count 1 nextOffset 201 batches 1 2025-05-07T08:51:02.241365Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:1623: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Add new write blob: topic 'Root/PQ/rt3.dc1--account--topic' partition 1 compactOffset 200,1 HeadOffset 100 endOffset 101 curOffset 201 d0000000001_00000000000000000200_00000_0000000001_00000| size 105 WTime 1329 2025-05-07T08:51:02.241580Z node 1 :PERSQUEUE DEBUG: partition.cpp:2182: [PQ: 72057594037927937, Partition: 1, State: StateIdle] === DumpKeyValueRequest === 2025-05-07T08:51:02.241646Z node 1 :PERSQUEUE DEBUG: partition.cpp:2183: [PQ: 72057594037927937, Partition: 1, State: StateIdle] --- delete ---------------- 2025-05-07T08:51:02.241710Z node 1 :PERSQUEUE DEBUG: partition.cpp:2189: [PQ: 72057594037927937, Partition: 1, State: StateIdle] [x0000000001, x0000000002) 2025-05-07T08:51:02.241766Z node 1 :PERSQUEUE DEBUG: partition.cpp:2191: [PQ: 72057594037927937, Partition: 1, State: StateIdle] --- write ----------------- 2025-05-07T08:51:02.241816Z node 1 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 1, State: StateIdle] d0000000001_00000000000000000100_00000_0000000001_00000 2025-05-07T08:51:02.241857Z node 1 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 1, State: StateIdle] m0000000001pSourceId 2025-05-07T08:51:02.241883Z node 1 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 1, State: StateIdle] d0000000001_00000000000000000200_00000_0000000001_00000| 2025-05-07T08:51:02.241924Z node 1 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 1, State: StateIdle] i0000000001 2025-05-07T08:51:02.242253Z node 1 :PERSQUEUE DEBUG: partition.cpp:2196: [PQ: 72057594037927937, Partition: 1, State: StateIdle] --- rename ---------------- 2025-05-07T08:51:02.242324Z node 1 :PERSQUEUE DEBUG: partition.cpp:2201: [PQ: 72057594037927937, Partition: 1, State: StateIdle] =========================== 2025-05-07T08:51:02.267730Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72057594037927937, Partition: 1, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 22 WriteNewSizeFromSupportivePartitions# 0 2025-05-07T08:51 ... 2 Iteration 143 Iteration 144 Iteration 145 Iteration 146 Iteration 147 Iteration 148 Iteration 149 Iteration 150 Iteration 151 Iteration 152 Iteration 153 Iteration 154 Iteration 155 Iteration 156 Iteration 157 Iteration 158 Iteration 159 Iteration 160 Iteration 161 Iteration 162 Iteration 163 Iteration 164 Iteration 165 Iteration 166 Iteration 167 Iteration 168 Iteration 169 Iteration 170 Iteration 171 Iteration 172 Iteration 173 Iteration 174 Iteration 175 Iteration 176 Iteration 177 Iteration 178 Iteration 179 Iteration 180 Iteration 181 Iteration 182 Iteration 183 Iteration 184 Iteration 185 Iteration 186 Iteration 187 Iteration 188 Iteration 189 Iteration 190 Iteration 191 Iteration 192 Iteration 193 Iteration 194 Iteration 195 Iteration 196 Iteration 197 Iteration 198 Iteration 199 Iteration 200 Iteration 201 Iteration 202 Iteration 203 Iteration 204 Iteration 205 Iteration 206 Iteration 207 Iteration 208 Iteration 209 Iteration 210 Iteration 211 Iteration 212 Iteration 213 Iteration 214 Iteration 215 Iteration 216 Iteration 217 Iteration 218 Iteration 219 Iteration 220 Iteration 221 Iteration 222 Iteration 223 Iteration 224 Iteration 225 Iteration 226 Iteration 227 Iteration 228 Iteration 229 Iteration 230 Iteration 231 Iteration 232 Iteration 233 Iteration 234 Iteration 235 Iteration 236 Iteration 237 Iteration 238 Iteration 239 Iteration 240 Iteration 241 Iteration 242 Iteration 243 Iteration 244 Iteration 245 Iteration 246 Iteration 247 Iteration 248 Iteration 249 Iteration 250 Iteration 251 Iteration 252 Iteration 253 Iteration 254 Iteration 255 Iteration 256 Iteration 257 Iteration 258 Iteration 259 Iteration 260 Iteration 261 Iteration 262 Iteration 263 Iteration 264 Iteration 265 Iteration 266 Iteration 267 Iteration 268 Iteration 269 Iteration 270 Iteration 271 Iteration 272 Iteration 273 Iteration 274 Iteration 275 Iteration 276 Iteration 277 Iteration 278 Iteration 279 Iteration 280 Iteration 281 Iteration 282 Iteration 283 Iteration 284 Iteration 285 Iteration 286 Iteration 287 Iteration 288 Iteration 289 Iteration 290 Iteration 291 Iteration 292 Iteration 293 Iteration 294 Iteration 295 Iteration 296 Iteration 297 Iteration 298 Iteration 299 Iteration 300 Iteration 301 Iteration 302 Iteration 303 Iteration 304 Iteration 305 Iteration 306 Iteration 307 Iteration 308 Iteration 309 Iteration 310 Iteration 311 Iteration 312 Iteration 313 Iteration 314 Iteration 315 Iteration 316 Iteration 317 Iteration 318 Iteration 319 Iteration 320 Iteration 321 Iteration 322 Iteration 323 Iteration 324 Iteration 325 Iteration 326 Iteration 327 Iteration 328 Iteration 329 Iteration 330 Iteration 331 Iteration 332 Iteration 333 Iteration 334 Iteration 335 Iteration 336 Iteration 337 Iteration 338 Iteration 339 Iteration 340 Iteration 341 Iteration 342 Iteration 343 Iteration 344 Iteration 345 Iteration 346 Iteration 347 Iteration 348 Iteration 349 Iteration 350 Iteration 351 Iteration 352 Iteration 353 Iteration 354 Iteration 355 Iteration 356 Iteration 357 Iteration 358 Iteration 359 Iteration 360 Iteration 361 Iteration 362 Iteration 363 Iteration 364 Iteration 365 Iteration 366 Iteration 367 Iteration 368 Iteration 369 Iteration 370 Iteration 371 Iteration 372 Iteration 373 Iteration 374 Iteration 375 Iteration 376 Iteration 377 Iteration 378 Iteration 379 Iteration 380 Iteration 381 Iteration 382 Iteration 383 Iteration 384 Iteration 385 Iteration 386 Iteration 387 Iteration 388 Iteration 389 Iteration 390 Iteration 391 Iteration 392 Iteration 393 Iteration 394 Iteration 395 Iteration 396 Iteration 397 Iteration 398 Iteration 399 Iteration 400 Iteration 401 Iteration 402 Iteration 403 Iteration 404 Iteration 405 Iteration 406 Iteration 407 Iteration 408 Iteration 409 Iteration 410 Iteration 411 Iteration 412 Iteration 413 Iteration 414 Iteration 415 Iteration 416 Iteration 417 Iteration 418 Iteration 419 Iteration 420 Iteration 421 Iteration 422 Iteration 423 Iteration 424 Iteration 425 Iteration 426 Iteration 427 Iteration 428 Iteration 429 Iteration 430 Iteration 431 Iteration 432 Iteration 433 Iteration 434 Iteration 435 Iteration 436 Iteration 437 Iteration 438 Iteration 439 Iteration 440 Iteration 441 Iteration 442 Iteration 443 Iteration 444 Iteration 445 Iteration 446 Iteration 447 Iteration 448 Iteration 449 Iteration 450 Iteration 451 Iteration 452 Iteration 453 Iteration 454 Iteration 455 Iteration 456 Iteration 457 Iteration 458 Iteration 459 Iteration 460 Iteration 461 Iteration 462 Iteration 463 Iteration 464 Iteration 465 Iteration 466 Iteration 467 Iteration 468 Iteration 469 Iteration 470 Iteration 471 Iteration 472 Iteration 473 Iteration 474 Iteration 475 Iteration 476 Iteration 477 Iteration 478 Iteration 479 Iteration 480 Iteration 481 Iteration 482 Iteration 483 Iteration 484 Iteration 485 Iteration 486 Iteration 487 Iteration 488 Iteration 489 Iteration 490 Iteration 491 Iteration 492 Iteration 493 Iteration 494 Iteration 495 Iteration 496 Iteration 497 Iteration 498 Iteration 499 Iteration 500 Iteration 501 Iteration 502 Iteration 503 Iteration 504 Iteration 505 Iteration 506 Iteration 507 Iteration 508 Iteration 509 Iteration 510 Iteration 511 Iteration 512 Iteration 513 Iteration 514 Iteration 515 Iteration 516 Iteration 517 Iteration 518 Iteration 519 Iteration 520 Iteration 521 Iteration 522 Iteration 523 Iteration 524 Iteration 525 Iteration 526 Iteration 527 Iteration 528 Iteration 529 Iteration 530 Iteration 531 Iteration 532 Iteration 533 Iteration 534 Iteration 535 Iteration 536 Iteration 537 Iteration 538 Iteration 539 Iteration 540 Iteration 541 Iteration 542 Iteration 543 Iteration 544 Iteration 545 Iteration 546 Iteration 547 Iteration 548 Iteration 549 Iteration 550 Iteration 551 Iteration 552 Iteration 553 Iteration 554 Iteration 555 Iteration 556 Iteration 557 Iteration 558 Iteration 559 Iteration 560 Iteration 561 Iteration 562 Iteration 563 Iteration 564 Iteration 565 Iteration 566 Iteration 567 Iteration 568 Iteration 569 Iteration 570 Iteration 571 Iteration 572 Iteration 573 Iteration 574 Iteration 575 Iteration 576 Iteration 577 Iteration 578 Iteration 579 Iteration 580 Iteration 581 Iteration 582 Iteration 583 Iteration 584 Iteration 585 Iteration 586 Iteration 587 Iteration 588 Iteration 589 Iteration 590 Iteration 591 Iteration 592 Iteration 593 Iteration 594 Iteration 595 Iteration 596 Iteration 597 Iteration 598 Iteration 599 Iteration 600 Iteration 601 Iteration 602 Iteration 603 Iteration 604 Iteration 605 Iteration 606 Iteration 607 Iteration 608 Iteration 609 Iteration 610 Iteration 611 Iteration 612 Iteration 613 Iteration 614 Iteration 615 Iteration 616 Iteration 617 Iteration 618 Iteration 619 Iteration 620 Iteration 621 Iteration 622 Iteration 623 Iteration 624 Iteration 625 Iteration 626 Iteration 627 Iteration 628 Iteration 629 Iteration 630 Iteration 631 Iteration 632 Iteration 633 Iteration 634 Iteration 635 Iteration 636 Iteration 637 Iteration 638 Iteration 639 Iteration 640 Iteration 641 Iteration 642 Iteration 643 Iteration 644 Iteration 645 Iteration 646 Iteration 647 Iteration 648 Iteration 649 Iteration 650 Iteration 651 Iteration 652 Iteration 653 Iteration 654 Iteration 655 Iteration 656 Iteration 657 Iteration 658 Iteration 659 Iteration 660 Iteration 661 Iteration 662 Iteration 663 Iteration 664 Iteration 665 Iteration 666 Iteration 667 Iteration 668 Iteration 669 Iteration 670 Iteration 671 Iteration 672 Iteration 673 Iteration 674 Iteration 675 Iteration 676 Iteration 677 Iteration 678 Iteration 679 Iteration 680 Iteration 681 Iteration 682 Iteration 683 Iteration 684 Iteration 685 Iteration 686 Iteration 687 Iteration 688 Iteration 689 Iteration 690 Iteration 691 Iteration 692 Iteration 693 Iteration 694 Iteration 695 Iteration 696 Iteration 697 Iteration 698 Iteration 699 Iteration 700 Iteration 701 Iteration 702 Iteration 703 Iteration 704 Iteration 705 Iteration 706 Iteration 707 Iteration 708 Iteration 709 Iteration 710 Iteration 711 Iteration 712 Iteration 713 Iteration 714 Iteration 715 Iteration 716 Iteration 717 Iteration 718 Iteration 719 Iteration 720 Iteration 721 Iteration 722 Iteration 723 Iteration 724 Iteration 725 Iteration 726 Iteration 727 Iteration 728 Iteration 729 Iteration 730 Iteration 731 Iteration 732 Iteration 733 Iteration 734 Iteration 735 Iteration 736 Iteration 737 Iteration 738 Iteration 739 Iteration 740 Iteration 741 Iteration 742 Iteration 743 Iteration 744 Iteration 745 Iteration 746 Iteration 747 Iteration 748 Iteration 749 Iteration 750 Iteration 751 Iteration 752 Iteration 753 Iteration 754 Iteration 755 Iteration 756 Iteration 757 Iteration 758 Iteration 759 Iteration 760 Iteration 761 Iteration 762 Iteration 763 Iteration 764 Iteration 765 Iteration 766 Iteration 767 Iteration 768 Iteration 769 Iteration 770 Iteration 771 Iteration 772 Iteration 773 Iteration 774 Iteration 775 Iteration 776 Iteration 777 Iteration 778 Iteration 779 Iteration 780 Iteration 781 Iteration 782 Iteration 783 Iteration 784 Iteration 785 Iteration 786 Iteration 787 Iteration 788 Iteration 789 Iteration 790 Iteration 791 Iteration 792 Iteration 793 Iteration 794 Iteration 795 Iteration 796 Iteration 797 Iteration 798 Iteration 799 Iteration 800 Iteration 801 Iteration 802 Iteration 803 Iteration 804 Iteration 805 Iteration 806 Iteration 807 Iteration 808 Iteration 809 Iteration 810 Iteration 811 Iteration 812 Iteration 813 Iteration 814 Iteration 815 Iteration 816 Iteration 817 Iteration 818 Iteration 819 Iteration 820 Iteration 821 Iteration 822 Iteration 823 Iteration 824 Iteration 825 Iteration 826 Iteration 827 Iteration 828 Iteration 829 Iteration 830 Iteration 831 Iteration 832 Iteration 833 Iteration 834 Iteration 835 Iteration 836 Iteration 837 Iteration 838 Iteration 839 Iteration 840 Iteration 841 Iteration 842 Iteration 843 Iteration 844 Iteration 845 Iteration 846 Iteration 847 Iteration 848 Iteration 849 Iteration 850 Iteration 851 Iteration 852 Iteration 853 Iteration 854 Iteration 855 Iteration 856 Iteration 857 Iteration 858 Iteration 859 Iteration 860 Iteration 861 Iteration 862 Iteration 863 Iteration 864 Iteration 865 Iteration 866 Iteration 867 Iteration 868 Iteration 869 Iteration 870 Iteration 871 Iteration 872 Iteration 873 Iteration 874 Iteration 875 Iteration 876 Iteration 877 Iteration 878 Iteration 879 Iteration 880 Iteration 881 Iteration 882 Iteration 883 Iteration 884 Iteration 885 Iteration 886 Iteration 887 Iteration 888 Iteration 889 Iteration 890 Iteration 891 Iteration 892 Iteration 893 Iteration 894 Iteration 895 Iteration 896 Iteration 897 Iteration 898 Iteration 899 Iteration 900 Iteration 901 Iteration 902 Iteration 903 Iteration 904 Iteration 905 Iteration 906 Iteration 907 Iteration 908 Iteration 909 Iteration 910 Iteration 911 Iteration 912 Iteration 913 Iteration 914 Iteration 915 Iteration 916 Iteration 917 Iteration 918 Iteration 919 Iteration 920 Iteration 921 Iteration 922 Iteration 923 Iteration 924 Iteration 925 Iteration 926 Iteration 927 Iteration 928 Iteration 929 Iteration 930 Iteration 931 Iteration 932 Iteration 933 Iteration 934 Iteration 935 Iteration 936 Iteration 937 Iteration 938 Iteration 939 Iteration 940 Iteration 941 Iteration 942 Iteration 943 Iteration 944 Iteration 945 Iteration 946 Iteration 947 Iteration 948 Iteration 949 Iteration 950 Iteration 951 Iteration 952 Iteration 953 Iteration 954 Iteration 955 Iteration 956 Iteration 957 Iteration 958 Iteration 959 Iteration 960 Iteration 961 Iteration 962 Iteration 963 Iteration 964 Iteration 965 Iteration 966 Iteration 967 Iteration 968 Iteration 969 Iteration 970 Iteration 971 Iteration 972 Iteration 973 Iteration 974 Iteration 975 Iteration 976 Iteration 977 Iteration 978 Iteration 979 Iteration 980 Iteration 981 Iteration 982 Iteration 983 Iteration 984 Iteration 985 Iteration 986 Iteration 987 Iteration 988 Iteration 989 Iteration 990 Iteration 991 Iteration 992 Iteration 993 Iteration 994 Iteration 995 Iteration 996 Iteration 997 Iteration 998 Iteration 999 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ut/unittest >> TTicketParserTest::BulkAuthorizationUnavailable [GOOD] Test command err: 2025-05-07T08:50:40.136167Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623553582797097:2128];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:40.136205Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003ed9/r3tmp/tmpSkaF91/pdisk_1.dat 2025-05-07T08:50:42.102931Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:50:42.390196Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:42.390314Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:42.394703Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:42.398106Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17484, node 1 2025-05-07T08:50:42.634720Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:42.634771Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:42.634778Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:42.634910Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4973 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:50:43.121118Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:43.202160Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1486: Updated state for /Root keys 1 2025-05-07T08:50:43.202476Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-05-07T08:50:43.202510Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T08:50:43.203078Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1815: Ticket **** (5DAB89DE) () has now permanent error message 'Token is not in correct format' 2025-05-07T08:50:43.203095Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:779: CanInitLoginToken, database /Root, A2 error Token is not in correct format 2025-05-07T08:50:43.203127Z node 1 :TICKET_PARSER ERROR: ticket_parser_impl.h:969: Ticket **** (5DAB89DE): Token is not in correct format 2025-05-07T08:50:46.186463Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501623580007173196:2062];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:46.186523Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003ed9/r3tmp/tmpWENmM6/pdisk_1.dat 2025-05-07T08:50:46.365040Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:46.389051Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:46.389175Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:46.391553Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5996, node 2 2025-05-07T08:50:46.466665Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:46.466698Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:46.466707Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:46.466849Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29531 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:50:46.757871Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:46.764918Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:50:46.767198Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-05-07T08:50:46.767230Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T08:50:46.767240Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:816: CanInitLoginToken, database /Root, A6 error 2025-05-07T08:50:46.767354Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:514: Ticket **** (8E120919) asking for AccessServiceBulkAuthorization( something.read something.write) 2025-05-07T08:50:46.767408Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000031108] Connect to grpc://localhost:18142 2025-05-07T08:50:46.779493Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000031108] Request BulkAuthorizeRequest { iam_token: "**** (8E120919)" actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.write" } } result_filter: ALL_FAILED } 2025-05-07T08:50:46.806869Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000031108] Response BulkAuthorizeResponse { subject { user_account { id: "user1" } } results { items { permission: "something.write" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission_denied_error { message: "Access Denied" } } } } 2025-05-07T08:50:46.810120Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:1329: Ticket **** (8E120919) permission something.write access denied for subject "user1@as" 2025-05-07T08:50:46.810317Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket **** (8E120919) () has now valid token of user1@as 2025-05-07T08:50:46.811671Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-05-07T08:50:46.811699Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T08:50:46.811707Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:816: CanInitLoginToken, database /Root, A6 error 2025-05-07T08:50:46.811776Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:514: Ticket **** (8E120919) asking for AccessServiceBulkAuthorization( something.read something.write) 2025-05-07T08:50:46.811996Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000031108] Request BulkAuthorizeRequest { iam_token: "**** (8E120919)" actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.write" } } result_filter: ALL_FAILED } 2025-05-07T08:50:46.814371Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000031108] Response BulkAuthorizeResponse { subject { user_account { id: "user1" } } results { items { permission: "something.write" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission_denied_error { message: "Access Denied" } } } } 2025-05-07T08:50:46.814580Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:1329: Ticket **** (8E120919) permission something.write access denied for subject "user1@as" 2025-05-07T08:50:46.814665Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1815: Ticket **** (8E120919) () has now permanent error message 'something.write for folder_id aaaa1234 - Access Denied' 2025-05-07T08:50:50.483761Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7501623598727396286:2069];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:50.484421Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003ed9/r3tmp/tmpoq8wqA/pdisk_1.dat 2025-05-07T08:50:50.703619Z node 3 :IMPORT WARN: ... fier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:54.774722Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:54.774733Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:54.774893Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2610 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-05-07T08:50:55.108734Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:50:55.115138Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T08:50:55.118767Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-05-07T08:50:55.118807Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T08:50:55.118816Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:816: CanInitLoginToken, database /Root, A6 error 2025-05-07T08:50:55.118943Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:514: Ticket **** (8E120919) asking for AccessServiceBulkAuthorization( something.read somewhere.sleep something.list something.write something.eat) 2025-05-07T08:50:55.119002Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000101188] Connect to grpc://localhost:23618 2025-05-07T08:50:55.120117Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000101188] Request BulkAuthorizeRequest { iam_token: "**** (8E120919)" actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "somewhere.sleep" } items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.list" ...(truncated) } 2025-05-07T08:50:55.131020Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000101188] Response BulkAuthorizeResponse { subject { user_account { id: "user1" } } results { items { permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission_denied_error { message: "Access Denied" } } items { permission: "somewhere.sleep" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission_denied_error { message: "Access Denied" } } items { permission: "something.list" r...(truncated) } 2025-05-07T08:50:55.131567Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1329: Ticket **** (8E120919) permission something.read access denied for subject "user1@as" 2025-05-07T08:50:55.131590Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1329: Ticket **** (8E120919) permission somewhere.sleep access denied for subject "user1@as" 2025-05-07T08:50:55.131601Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1329: Ticket **** (8E120919) permission something.list access denied for subject "user1@as" 2025-05-07T08:50:55.131614Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1329: Ticket **** (8E120919) permission something.eat access denied for subject "user1@as" 2025-05-07T08:50:55.131639Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1003: Ticket **** (8E120919) asking for UserAccount(user1@as) 2025-05-07T08:50:55.131793Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000101508] Connect to grpc://localhost:16736 2025-05-07T08:50:55.132602Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000101508] Request GetUserAccountRequest { user_account_id: "user1" } 2025-05-07T08:50:55.150264Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000101508] Response UserAccount { yandex_passport_user_account { login: "login1" } } 2025-05-07T08:50:55.152172Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket **** (8E120919) () has now valid token of login1@passport 2025-05-07T08:50:58.955255Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7501623632855404677:2065];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:58.955306Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003ed9/r3tmp/tmpop0Zbb/pdisk_1.dat 2025-05-07T08:50:59.108084Z node 5 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4312, node 5 2025-05-07T08:50:59.138905Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:59.138999Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:59.140707Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:50:59.254757Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:59.254794Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:59.254805Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:59.254975Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27767 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:50:59.604864Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:59.614929Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-05-07T08:50:59.614963Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T08:50:59.614973Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:816: CanInitLoginToken, database /Root, A6 error 2025-05-07T08:50:59.615066Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:514: Ticket **** (8E120919) asking for AccessServiceBulkAuthorization( something.read something.write) 2025-05-07T08:50:59.615110Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [5170000a9d08] Connect to grpc://localhost:16236 2025-05-07T08:50:59.616191Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [5170000a9d08] Request BulkAuthorizeRequest { iam_token: "**** (8E120919)" actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.write" } } result_filter: ALL_FAILED } 2025-05-07T08:50:59.638186Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [5170000a9d08] Status 14 Service Unavailable 2025-05-07T08:50:59.638478Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1145: Ticket **** (8E120919) permission something.read now has a retryable error "Service Unavailable" retryable: 1 2025-05-07T08:50:59.638507Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1145: Ticket **** (8E120919) permission something.write now has a retryable error "Service Unavailable" retryable: 1 2025-05-07T08:50:59.638541Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1802: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' 2025-05-07T08:50:59.638647Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:514: Ticket **** (8E120919) asking for AccessServiceBulkAuthorization( something.read something.write) 2025-05-07T08:50:59.639026Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [5170000a9d08] Request BulkAuthorizeRequest { iam_token: "**** (8E120919)" actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.write" } } result_filter: ALL_FAILED } 2025-05-07T08:50:59.645232Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [5170000a9d08] Status 1 CANCELLED 2025-05-07T08:50:59.645510Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1145: Ticket **** (8E120919) permission something.read now has a retryable error "CANCELLED" retryable: 1 2025-05-07T08:50:59.645532Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1145: Ticket **** (8E120919) permission something.write now has a retryable error "CANCELLED" retryable: 1 2025-05-07T08:50:59.645668Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1802: Ticket **** (8E120919) () has now retryable error message 'CANCELLED' >> Viewer::JsonStorageListingV1 [GOOD] >> Viewer::JsonStorageListingV1GroupIdFilter >> TPartitionTests::TestNonConflictingActsBatchOk [GOOD] >> TPartitionTests::CorrectRange_Rollback >> TPartitionTests::TestBatchingWithChangeConfig >> TTicketParserTest::AuthorizationWithUserAccount [GOOD] >> TTicketParserTest::AuthorizationUnavailable >> TTicketParserTest::NebiusAuthorizationUnavailable [GOOD] >> TPartitionTests::CorrectRange_Rollback [GOOD] >> TPartitionTests::DataTxCalcPredicateOk >> TTicketParserTest::NebiusAuthorization [GOOD] >> TTicketParserTest::NebiusAuthorizationModify >> TTicketParserTest::NebiusAuthenticationRetryError [GOOD] >> TTicketParserTest::NebiusAuthenticationRetryErrorImmediately >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnBadRootStatusInGetNodeRequest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ut/unittest >> TTicketParserTest::NebiusAuthorizationUnavailable [GOOD] Test command err: 2025-05-07T08:50:42.231624Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623556591837723:2196];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:42.232184Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003ec0/r3tmp/tmpPPScF5/pdisk_1.dat 2025-05-07T08:50:43.286352Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:50:43.286534Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:43.289731Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:43.290155Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:43.292486Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16907, node 1 2025-05-07T08:50:43.442761Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:43.442789Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:43.442812Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:43.442947Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23335 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:50:43.889166Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:43.913734Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket F02761B9ECACFA1CAE662983912B2FAD85D9F61DAE1F5C0346DF871F82FE0DCA () has now valid token of C=RU,ST=MSK,L=MSK,O=YA,OU=UtTest,CN=localhost@cert 2025-05-07T08:50:47.208602Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501623585833650150:2195];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:47.208842Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003ec0/r3tmp/tmpfEmjr2/pdisk_1.dat 2025-05-07T08:50:47.406861Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:47.433113Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:47.435207Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:47.436366Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24631, node 2 2025-05-07T08:50:47.544350Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:47.544382Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:47.544390Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:47.544476Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21825 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:50:47.773999Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:47.784592Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T08:50:47.787122Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1815: Ticket 2BB968CC31833D1E77E52212680225D7C5594CC907DB36BA0ABEFE03C57DC7FE () has now permanent error message 'Cannot create token from certificate. Client`s certificate and server`s certificate have different issuers' 2025-05-07T08:50:47.787742Z node 2 :TICKET_PARSER ERROR: ticket_parser_impl.h:969: Ticket 2BB968CC31833D1E77E52212680225D7C5594CC907DB36BA0ABEFE03C57DC7FE: Cannot create token from certificate. Client`s certificate and server`s certificate have different issuers 2025-05-07T08:50:51.873278Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7501623599617642671:2082];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:51.874152Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003ec0/r3tmp/tmp7JbuD9/pdisk_1.dat 2025-05-07T08:50:52.219974Z node 3 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:52.240226Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:52.240299Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:52.241861Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25884, node 3 2025-05-07T08:50:52.327836Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:52.327865Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:52.327873Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:52.328022Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24842 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:50:52.603173Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:52.618210Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T08:50:52.624188Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1815: Ticket 8493A69E54A27C4154F3ED34FFB761826498BA83D63E9F1888DF33C09AE3601E () has now permanent error message 'Cannot create token from certificate. Client certificate failed verification' 2025-05-07T08:50:52.624708Z node 3 :TICKET_PARSER ERROR: ticket_parser_impl.h:969: Ticket 8493A69E54A27C4154F3ED34FFB761826498BA83D63E9F1888DF33C09AE3601E: Cannot create token from certificate. Client certificate failed verification 2025-05-07T08:50:56.576233Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501623621901538737:2130];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:56.597521Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003ec0/r3tmp/tmpxfqYVC/pdisk_1.dat 2025-05-07T08:50:56.762945Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:56.767841Z node 4 :HIVE WARN: node_info.cpp:25: HIV ... ror 2025-05-07T08:50:57.172433Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:532: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read something.write) 2025-05-07T08:50:57.172498Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [51700000e808] Connect to grpc://localhost:12573 2025-05-07T08:50:57.175775Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700000e808] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } checks { key: 1 value { permission { name: "something.write" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } checks { key: 1 value { permission { name: "something.write" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } NebiusAccessService::Authorize response results { key: 0 value { account { user_account { id: "user1" } } } } results { key: 1 value { resultCode: PERMISSION_DENIED } } 0: "OK" 2025-05-07T08:50:57.202406Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [51700000e808] Response AuthorizeResponse { results { key: 0 value { account { user_account { id: "user1" } } } } results { key: 1 value { resultCode: PERMISSION_DENIED } } } 2025-05-07T08:50:57.202605Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1225: Ticket **** (8E120919) permission something.write access denied for subject "user1@as" 2025-05-07T08:50:57.202766Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket **** (8E120919) () has now valid token of user1@as 2025-05-07T08:50:57.206077Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-05-07T08:50:57.206107Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T08:50:57.206115Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:816: CanInitLoginToken, database /Root, A6 error 2025-05-07T08:50:57.206176Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:532: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read something.write) 2025-05-07T08:50:57.206471Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700000e808] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } checks { key: 1 value { permission { name: "something.write" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } checks { key: 1 value { permission { name: "something.write" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } NebiusAccessService::Authorize response results { key: 0 value { account { user_account { id: "user1" } } } } results { key: 1 value { resultCode: PERMISSION_DENIED } } 0: "OK" 2025-05-07T08:50:57.215462Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [51700000e808] Response AuthorizeResponse { results { key: 0 value { account { user_account { id: "user1" } } } } results { key: 1 value { resultCode: PERMISSION_DENIED } } } 2025-05-07T08:50:57.215611Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1225: Ticket **** (8E120919) permission something.write access denied for subject "user1@as" 2025-05-07T08:50:57.215684Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1815: Ticket **** (8E120919) () has now permanent error message 'something.write for aaaa1234 bbbb4554 - PERMISSION_DENIED' 2025-05-07T08:51:01.075269Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7501623643684040098:2106];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:01.075436Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003ec0/r3tmp/tmpQoO5v3/pdisk_1.dat 2025-05-07T08:51:01.263088Z node 5 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:51:01.275445Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:01.275547Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:01.277339Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15948, node 5 2025-05-07T08:51:01.438713Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:51:01.438741Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:51:01.438750Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:51:01.438887Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12162 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:51:01.791719Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:01.799257Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T08:51:01.801836Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-05-07T08:51:01.801868Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T08:51:01.801876Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:816: CanInitLoginToken, database /Root, A6 error 2025-05-07T08:51:01.801961Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:532: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read something.write) 2025-05-07T08:51:01.802027Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000110308] Connect to grpc://localhost:16257 2025-05-07T08:51:01.803078Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000110308] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } checks { key: 1 value { permission { name: "something.write" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } checks { key: 1 value { permission { name: "something.write" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } NebiusAccessService::Authorize response 14: "Service Unavailable" 2025-05-07T08:51:01.814533Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000110308] Status 14 Service Unavailable 2025-05-07T08:51:01.816403Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1145: Ticket **** (8E120919) permission something.read now has a retryable error "Service Unavailable" retryable: 1 2025-05-07T08:51:01.816435Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1145: Ticket **** (8E120919) permission something.write now has a retryable error "Service Unavailable" retryable: 1 2025-05-07T08:51:01.816476Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1802: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' 2025-05-07T08:51:01.816569Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:532: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read something.write) 2025-05-07T08:51:01.816989Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000110308] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } checks { key: 1 value { permission { name: "something.write" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } checks { key: 1 value { permission { name: "something.write" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } NebiusAccessService::Authorize response results { key: 0 value { account { user_account { id: "user1" } } } } 14: "Service Unavailable" 2025-05-07T08:51:01.820229Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000110308] Status 14 Service Unavailable 2025-05-07T08:51:01.820808Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1145: Ticket **** (8E120919) permission something.read now has a retryable error "Service Unavailable" retryable: 1 2025-05-07T08:51:01.820841Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1145: Ticket **** (8E120919) permission something.write now has a retryable error "Service Unavailable" retryable: 1 2025-05-07T08:51:01.820872Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1802: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' >> TPQTest::TestReserveBytes [GOOD] >> TPQTest::TestSourceIdDropBySourceIdCount >> TTicketParserTest::AuthorizationWithUserAccount2 [GOOD] >> TTicketParserTest::BulkAuthorizationModify >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnBadRootStatusInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailesOnNotATopic |89.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest |89.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest >> KqpPg::DuplicatedColumns+useSink [GOOD] >> KqpPg::DuplicatedColumns-useSink >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailesOnNotATopic [GOOD] >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnBalancerDescribeResultFailureWhenTopicsAreGivenExplicitly >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnBalancerDescribeResultFailureWhenTopicsAreGivenExplicitly [GOOD] >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnDuplicatedPartition >> KqpPg::TypeCoercionBulkUpsert [GOOD] >> KqpPg::TypeCoercionInsert+useSink >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnDuplicatedPartition [GOOD] >> ViewerTopicDataTests::TopicDataTest [GOOD] >> TTicketParserTest::AuthenticationRetryError [GOOD] >> TTicketParserTest::AuthenticationRetryErrorImmediately >> Viewer::ServerlessWithExclusiveNodesCheckTable [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnDuplicatedPartition [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "path \'Root/PQ\' has unknown/invalid root prefix \'Root\', Marker# PQ14" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "the following topics are not created: rt3.dc1--topic2, Marker# PQ95" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "topic \'Root/PQ\' describe error, Status# LookupError, Marker# PQ1" ErrorCode: ERROR } Assert failed: Check response: { Status: 128 ErrorReason: "multiple partition 2 in TopicRequest for topic \'rt3.dc1--topic2\'" ErrorCode: BAD_REQUEST } >> TPartitionTests::TestBatchingWithChangeConfig [GOOD] >> KqpPg::InsertNoTargetColumns_Serial-useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefault+useSink >> KqpPg::DropIndex [GOOD] >> KqpPg::CreateUniqPgColumn+useSink >> TPartitionTests::TestBatchingWithProposeConfig >> KqpPg::Insert_Serial-useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultText+useSink >> TTicketParserTest::AuthorizationUnavailable [GOOD] >> IncrementalBackup::SimpleRestore >> IncrementalBackup::SimpleBackup >> IncrementalBackup::BackupRestore >> IncrementalBackup::SimpleRestoreBackupCollection+WithIncremental >> TTicketParserTest::NebiusAuthenticationRetryErrorImmediately [GOOD] >> TTicketParserTest::NebiusAccessKeySignatureUnsupported >> TTicketParserTest::NebiusAuthorizationModify [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/viewer/ut/unittest >> Viewer::ServerlessWithExclusiveNodesCheckTable [GOOD] Test command err: 2025-05-07T08:49:29.760169Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623248684257220:2068];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:29.762350Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-05-07T08:49:30.117622Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9693, node 1 2025-05-07T08:49:30.130616Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T08:49:30.168550Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:30.168687Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:30.171610Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:49:30.184736Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:30.184764Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:30.184772Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:30.184900Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2648 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:49:30.566849Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:30.596573Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480 2025-05-07T08:49:30.600576Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:33.179978Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623265864127102:2347], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:33.180011Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623265864127096:2344], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:33.180113Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:33.183793Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480 2025-05-07T08:49:33.194208Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623265864127110:2348], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-05-07T08:49:33.260612Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623265864127161:2354] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-05-07T08:49:35.478419Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:49:35.543302Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:35.580729Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:35.580840Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:35.582969Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24192, node 2 2025-05-07T08:49:35.682591Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:35.682615Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:35.682623Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:35.682752Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13898 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:49:35.978266Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:36.025134Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-05-07T08:49:36.027499Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:39.032194Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501623293887901187:2344], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:39.032269Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501623293887901195:2347], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:39.032333Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:39.037532Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480 2025-05-07T08:49:39.054239Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7501623293887901201:2348], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-05-07T08:49:39.109827Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501623293887901252:2355] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:49:41.302230Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7501623299901002574:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:41.302324Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-05-07T08:49:41.480138Z node 3 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:41.516131Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:41.516236Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:41.517356Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29658, node 3 2025-05- ... n: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:49:48.632223Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:48.642360Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:49:48.656093Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480 2025-05-07T08:49:48.660248Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:52.545699Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501623346541788117:2345], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:52.545790Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501623346541788125:2348], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:52.545855Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:52.551888Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480 2025-05-07T08:49:52.565132Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7501623346541788131:2349], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-05-07T08:49:52.660188Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:7501623346541788182:2353] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:49:52.827511Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7501623325066950952:2064];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:52.827681Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:50:02.623331Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:468:2429], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:50:02.623557Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:50:02.623665Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-05-07T08:50:03.060611Z node 5 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:03.235952Z node 5 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-05-07T08:50:03.265822Z node 5 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:422} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-05-07T08:50:03.984284Z node 5 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:362} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 13090, node 5 TClient is connected to server localhost:23107 2025-05-07T08:50:04.521297Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:04.521376Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:04.521424Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:04.521745Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:50:18.687078Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:544:2430], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:50:18.687330Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:50:18.687443Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-05-07T08:50:19.304371Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:19.519404Z node 7 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-05-07T08:50:19.563292Z node 7 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:422} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-05-07T08:50:20.634966Z node 7 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:362} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 28497, node 7 TClient is connected to server localhost:28407 2025-05-07T08:50:21.426776Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:21.426898Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:21.426985Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:21.427875Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:50:38.569020Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [10:542:2430], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:50:38.569624Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:50:38.569876Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-05-07T08:50:39.242379Z node 10 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:39.499178Z node 10 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-05-07T08:50:39.549797Z node 10 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:422} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-05-07T08:50:41.654875Z node 10 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:362} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 7526, node 10 TClient is connected to server localhost:16526 2025-05-07T08:50:43.258829Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:43.258961Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:43.259059Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:43.260471Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:51:01.582272Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [13:622:2431], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:51:01.583012Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:51:01.583150Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-05-07T08:51:02.172439Z node 13 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:51:02.369159Z node 13 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-05-07T08:51:02.410881Z node 13 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:422} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-05-07T08:51:03.609188Z node 13 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:362} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 63153, node 13 TClient is connected to server localhost:12042 2025-05-07T08:51:04.488754Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:51:04.488873Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:51:04.488965Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:51:04.490086Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnFailedGetAllTopicsRequest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ut/unittest >> TTicketParserTest::AuthorizationUnavailable [GOOD] Test command err: 2025-05-07T08:50:37.955593Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623540535076333:2127];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:37.982848Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003eea/r3tmp/tmpL0MkPq/pdisk_1.dat 2025-05-07T08:50:38.546465Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:38.552163Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:38.552295Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:38.554790Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25272, node 1 2025-05-07T08:50:38.726625Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:38.726651Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:38.726667Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:38.726810Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22371 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:50:39.206409Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:39.233584Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:50:39.249839Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:493: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthorization(something.read) 2025-05-07T08:50:39.250043Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000010088] Connect to grpc://localhost:26302 2025-05-07T08:50:39.253861Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000010088] Request AuthorizeRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-05-07T08:50:39.298267Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000010088] Status 14 Service Unavailable 2025-05-07T08:50:39.302210Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:1413: Ticket AKIA****MPLE (B3EDC139) permission something.read now has a permanent error "Service Unavailable" retryable:1 2025-05-07T08:50:39.302268Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1802: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-05-07T08:50:39.302322Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:493: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthorization(something.read) 2025-05-07T08:50:39.302627Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000010088] Request AuthorizeRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-05-07T08:50:39.305069Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000010088] Status 14 Service Unavailable 2025-05-07T08:50:39.305450Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:1413: Ticket AKIA****MPLE (B3EDC139) permission something.read now has a permanent error "Service Unavailable" retryable:1 2025-05-07T08:50:39.305485Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1802: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-05-07T08:50:40.003592Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1506: Refreshing ticket AKIA****MPLE (B3EDC139) 2025-05-07T08:50:40.003663Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:493: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthorization(something.read) 2025-05-07T08:50:40.004481Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000010088] Request AuthorizeRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-05-07T08:50:40.007593Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000010088] Status 14 Service Unavailable 2025-05-07T08:50:40.007954Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:1413: Ticket AKIA****MPLE (B3EDC139) permission something.read now has a permanent error "Service Unavailable" retryable:1 2025-05-07T08:50:40.007991Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1802: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-05-07T08:50:42.014694Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1506: Refreshing ticket AKIA****MPLE (B3EDC139) 2025-05-07T08:50:42.014754Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:493: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthorization(something.read) 2025-05-07T08:50:42.015008Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000010088] Request AuthorizeRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-05-07T08:50:42.031149Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000010088] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-05-07T08:50:42.031549Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:1398: Ticket AKIA****MPLE (B3EDC139) permission something.read now has a valid subject "user1@as" 2025-05-07T08:50:42.031682Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket AKIA****MPLE (B3EDC139) () has now valid token of user1@as 2025-05-07T08:50:42.956130Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623540535076333:2127];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:42.956225Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:50:52.282368Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501623603670585336:2215];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:52.339782Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003eea/r3tmp/tmp2kAaaS/pdisk_1.dat 2025-05-07T08:50:52.488769Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:52.502444Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:52.502524Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:52.504148Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15395, node 2 2025-05-07T08:50:52.678553Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:52.678575Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:52.678582Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:52.678688Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27707 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:50:52.947978Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:52.955169Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:50:52.966205Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:493: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthorization(something.read) 2025-05-07T08:50:52.966304Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [5170000b5688] Connect to grpc://localhost:3359 2025-05-07T08:50:52.967239Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [5170000b5688] Request AuthorizeRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_p ... DEBUG: grpc_service_client.h:107: [517000035a88] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-05-07T08:51:01.479257Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1398: Ticket **** (8E120919) permission something.read now has a valid subject "user1@as" 2025-05-07T08:51:01.479317Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1003: Ticket **** (8E120919) asking for UserAccount(user1@as) 2025-05-07T08:51:01.482878Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000036188] Connect to grpc://localhost:10428 2025-05-07T08:51:01.483878Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000036188] Request GetUserAccountRequest { user_account_id: "user1" } 2025-05-07T08:51:01.503187Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000036188] Response UserAccount { yandex_passport_user_account { login: "login1" } } 2025-05-07T08:51:01.506403Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket **** (8E120919) () has now valid token of login1@passport 2025-05-07T08:51:01.514259Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-05-07T08:51:01.514294Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T08:51:01.514302Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:816: CanInitLoginToken, database /Root, A6 error 2025-05-07T08:51:01.514336Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:493: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.write) 2025-05-07T08:51:01.514915Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000035a88] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.write" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-05-07T08:51:01.517304Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000035a88] Status 16 Access Denied 2025-05-07T08:51:01.517568Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1413: Ticket **** (8E120919) permission something.write now has a permanent error "Access Denied" retryable:0 2025-05-07T08:51:01.517609Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1815: Ticket **** (8E120919) () has now permanent error message 'Access Denied' 2025-05-07T08:51:01.530266Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-05-07T08:51:01.530308Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T08:51:01.530320Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:816: CanInitLoginToken, database /Root, A6 error 2025-05-07T08:51:01.530359Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:493: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.read) 2025-05-07T08:51:01.530414Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:493: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.write) 2025-05-07T08:51:01.530600Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000035a88] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-05-07T08:51:01.531242Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000035a88] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.write" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-05-07T08:51:01.534886Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000035a88] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-05-07T08:51:01.535169Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1398: Ticket **** (8E120919) permission something.read now has a valid subject "user1@as" 2025-05-07T08:51:01.536365Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000035a88] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-05-07T08:51:01.537720Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1398: Ticket **** (8E120919) permission something.write now has a valid subject "user1@as" 2025-05-07T08:51:01.537744Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1003: Ticket **** (8E120919) asking for UserAccount(user1@as) 2025-05-07T08:51:01.537919Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket **** (8E120919) () has now valid token of login1@passport 2025-05-07T08:51:05.688028Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7501623663264068783:2061];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:05.688113Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003eea/r3tmp/tmpj7yF4v/pdisk_1.dat 2025-05-07T08:51:05.879508Z node 5 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:51:05.893779Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:05.893879Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:05.897462Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26993, node 5 2025-05-07T08:51:05.951247Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:51:05.951277Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:51:05.951291Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:51:05.951474Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20238 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:51:06.257499Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:06.265159Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T08:51:06.267282Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-05-07T08:51:06.267315Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T08:51:06.267324Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:816: CanInitLoginToken, database /Root, A6 error 2025-05-07T08:51:06.267360Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:493: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.read) 2025-05-07T08:51:06.267432Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:493: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.write) 2025-05-07T08:51:06.267486Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [5170000cc288] Connect to grpc://localhost:63091 2025-05-07T08:51:06.268455Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [5170000cc288] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-05-07T08:51:06.268749Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [5170000cc288] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.write" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-05-07T08:51:06.279118Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [5170000cc288] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-05-07T08:51:06.279360Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1398: Ticket **** (8E120919) permission something.read now has a valid subject "user1@as" 2025-05-07T08:51:06.279704Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [5170000cc288] Status 14 Service Unavailable 2025-05-07T08:51:06.280014Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1413: Ticket **** (8E120919) permission something.write now has a permanent error "Service Unavailable" retryable:1 2025-05-07T08:51:06.280049Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1802: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' 2025-05-07T08:51:06.280076Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:493: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.read) 2025-05-07T08:51:06.280127Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:493: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.write) 2025-05-07T08:51:06.280319Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [5170000cc288] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-05-07T08:51:06.280906Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [5170000cc288] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.write" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-05-07T08:51:06.285224Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [5170000cc288] Status 1 CANCELLED 2025-05-07T08:51:06.285504Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [5170000cc288] Status 1 CANCELLED 2025-05-07T08:51:06.286303Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1421: Ticket **** (8E120919) permission something.read now has a retryable error "CANCELLED" 2025-05-07T08:51:06.286724Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1413: Ticket **** (8E120919) permission something.write now has a permanent error "CANCELLED" retryable:1 2025-05-07T08:51:06.286758Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1802: Ticket **** (8E120919) () has now retryable error message 'CANCELLED' >> TTicketParserTest::BulkAuthorizationModify [GOOD] >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnFailedGetAllTopicsRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnNoBalancerInGetNodeRequest >> TPartitionTests::DataTxCalcPredicateOk [GOOD] >> TPartitionTests::DataTxCalcPredicateError >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnNoBalancerInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnEmptyTopicName >> TTicketParserTest::LoginRefreshGroupsGood [GOOD] >> TTicketParserTest::LoginCheckRemovedUser ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ut/unittest >> TTicketParserTest::NebiusAuthorizationModify [GOOD] Test command err: 2025-05-07T08:50:36.098583Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623538363554951:2063];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:36.100405Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003f14/r3tmp/tmpzshKIP/pdisk_1.dat 2025-05-07T08:50:36.855655Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:36.903576Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:36.903714Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:36.909251Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23911, node 1 2025-05-07T08:50:37.123030Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:37.123075Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:37.123083Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:37.123209Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15297 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:50:37.821616Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:37.853104Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-05-07T08:50:37.853151Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T08:50:37.853174Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:816: CanInitLoginToken, database /Root, A6 error 2025-05-07T08:50:37.853601Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:563: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-05-07T08:50:37.853688Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000010088] Connect to grpc://localhost:11191 2025-05-07T08:50:37.856263Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000010088] Request AuthenticateRequest { iam_token: "**** (8E120919)" } NebiusAccessService::Authenticate request iam_token: "user1" NebiusAccessService::Authenticate response 14: "Service Unavailable" 2025-05-07T08:50:37.931011Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000010088] Status 14 Service Unavailable 2025-05-07T08:50:37.932962Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1802: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' 2025-05-07T08:50:37.932994Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:563: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-05-07T08:50:37.933164Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000010088] Request AuthenticateRequest { iam_token: "**** (8E120919)" } NebiusAccessService::Authenticate request iam_token: "user1" NebiusAccessService::Authenticate response 14: "Service Unavailable" 2025-05-07T08:50:37.937036Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000010088] Status 14 Service Unavailable 2025-05-07T08:50:37.938293Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1802: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003f14/r3tmp/tmpB79W0B/pdisk_1.dat 2025-05-07T08:50:44.205177Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:50:44.233440Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:44.241244Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:44.241343Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:44.243110Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13352, node 2 2025-05-07T08:50:44.395105Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:44.395133Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:44.395141Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:44.395249Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22395 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:50:44.743822Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:44.764054Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:50:44.768019Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-05-07T08:50:44.768054Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T08:50:44.768077Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:816: CanInitLoginToken, database /Root, A6 error 2025-05-07T08:50:44.768172Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:532: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read) 2025-05-07T08:50:44.768224Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000051e08] Connect to grpc://localhost:15646 2025-05-07T08:50:44.769621Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000051e08] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } NebiusAccessService::Authorize response 14: "Service Unavailable" 2025-05-07T08:50:44.790805Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000051e08] Status 14 Service Unavailable NebiusAccessService::Authorize request 2025-05-07T08:50:44.791440Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:1145: Ticket **** (8E120919) permission something.read now has a retryable error "Service Unavailable" retryable: 1 2025-05-07T08:50:44.791491Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1802: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' 2025-05-07T08:50:44.791566Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:532: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read) 2025-05-07T08:50:44.791846Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000051e08] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } } checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } NebiusAccessService::Authorize response 14: "Service Unavailable" 2025-05-07T08:50:44.794627Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000051e08] Status 14 Service Unavailable 2025-05-07T08:50:44.795356Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:1145: Ticket **** (8E120919) permission something.read now has a retryable error "Service Unavailable" retryable: 1 2025-05-07T08:50:44.795400Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1802: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' 2025-05-07T08:50:45.934117Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1506: Refreshing ticket **** (8E120919) 2025-05-07T08:50:45.934209Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:532: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read) 2025-05-07T08:50:46.034267Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000051e08] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { ... permanent error message 'Access Denied' 2025-05-07T08:51:02.800732Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-05-07T08:51:02.800762Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T08:51:02.800774Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:816: CanInitLoginToken, database /Root, A6 error 2025-05-07T08:51:02.800828Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:532: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read) 2025-05-07T08:51:02.801002Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000068d88] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "XXXXXXXX" } } iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "XXXXXXXX" } } iam_token: "user1" } } NebiusAccessService::Authorize response results { key: 0 value { account { user_account { id: "user1" } } } } 0: "OK" 2025-05-07T08:51:02.802910Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000068d88] Response AuthorizeResponse { results { key: 0 value { account { user_account { id: "user1" } } } } } 2025-05-07T08:51:02.803169Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket **** (8E120919) () has now valid token of user1@as 2025-05-07T08:51:02.803659Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-05-07T08:51:02.803681Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T08:51:02.803689Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:816: CanInitLoginToken, database /Root, A6 error 2025-05-07T08:51:02.803733Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:532: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read) 2025-05-07T08:51:02.803922Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000068d88] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "XXXXXXXX" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "something.read" } container_id: "XXXXXXXX" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } NebiusAccessService::Authorize response results { key: 0 value { account { user_account { id: "user1" } } } } 0: "OK" 2025-05-07T08:51:02.805783Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000068d88] Response AuthorizeResponse { results { key: 0 value { account { user_account { id: "user1" } } } } } 2025-05-07T08:51:02.806043Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket **** (8E120919) () has now valid token of user1@as 2025-05-07T08:51:02.806571Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-05-07T08:51:02.806588Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T08:51:02.806597Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:816: CanInitLoginToken, database /Root, A6 error 2025-05-07T08:51:02.806650Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:532: Ticket **** (8E120919) asking for AccessServiceAuthorization( monitoring.view) 2025-05-07T08:51:02.806825Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000068d88] Request AuthorizeRequest { checks { key: 0 value { permission { name: "monitoring.view" } container_id: "gizmo" iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "monitoring.view" } container_id: "gizmo" iam_token: "user1" } } NebiusAccessService::Authorize response results { key: 0 value { account { user_account { id: "user1" } } } } 0: "OK" 2025-05-07T08:51:02.808653Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000068d88] Response AuthorizeResponse { results { key: 0 value { account { user_account { id: "user1" } } } } } 2025-05-07T08:51:02.808921Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket **** (8E120919) () has now valid token of user1@as 2025-05-07T08:51:06.691690Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7501623664124238988:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:06.691756Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003f14/r3tmp/tmpal4oeM/pdisk_1.dat 2025-05-07T08:51:06.829838Z node 5 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:51:06.860786Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:06.860889Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:06.862472Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13881, node 5 2025-05-07T08:51:06.923469Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:51:06.923493Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:51:06.923501Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:51:06.923657Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27098 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:51:07.247781Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:07.256371Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:51:07.259112Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-05-07T08:51:07.259137Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T08:51:07.259146Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:816: CanInitLoginToken, database /Root, A6 error 2025-05-07T08:51:07.259203Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:532: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read) 2025-05-07T08:51:07.259240Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [51700006ad08] Connect to grpc://localhost:11025 2025-05-07T08:51:07.260329Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700006ad08] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } NebiusAccessService::Authorize response results { key: 0 value { account { user_account { id: "user1" } } } } 0: "OK" 2025-05-07T08:51:07.271280Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [51700006ad08] Response AuthorizeResponse { results { key: 0 value { account { user_account { id: "user1" } } } } } 2025-05-07T08:51:07.271582Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket **** (8E120919) () has now valid token of user1@as 2025-05-07T08:51:07.272904Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-05-07T08:51:07.272942Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T08:51:07.272956Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:816: CanInitLoginToken, database /Root, A6 error 2025-05-07T08:51:07.273030Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:532: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read something.write) 2025-05-07T08:51:07.273366Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700006ad08] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } checks { key: 1 value { permission { name: "something.write" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } checks { key: 1 value { permission { name: "something.write" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } NebiusAccessService::Authorize response results { key: 0 value { account { user_account { id: "user1" } } } } results { key: 1 value { account { user_account { id: "user1" } } } } 0: "OK" 2025-05-07T08:51:07.276544Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [51700006ad08] Response AuthorizeResponse { results { key: 0 value { account { user_account { id: "user1" } } } } results { key: 1 value { account { user_account { id: "user1" } } } } } 2025-05-07T08:51:07.276883Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket **** (8E120919) () has now valid token of user1@as >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnEmptyTopicName [GOOD] >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnDuplicatedTopicName >> KqpPg::DuplicatedColumns-useSink [GOOD] >> KqpPg::InsertFromSelect_NoReorder+useSink >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnBadRootStatusInGetNodeRequest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ut/unittest >> TTicketParserTest::BulkAuthorizationModify [GOOD] Test command err: 2025-05-07T08:50:39.522208Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623550613401320:2195];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:39.522263Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003ed8/r3tmp/tmp9FUJFm/pdisk_1.dat 2025-05-07T08:50:40.435986Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:40.436101Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:40.448726Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:40.462534Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4670, node 1 2025-05-07T08:50:40.784642Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:40.784670Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:40.784677Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:40.784800Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11130 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:50:42.409590Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:42.442519Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:50:42.461916Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:514: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceBulkAuthorization( something.read) 2025-05-07T08:50:42.462040Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000010408] Connect to grpc://localhost:14373 2025-05-07T08:50:42.485257Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000010408] Request BulkAuthorizeRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } } result_filter: ALL_FAILED } 2025-05-07T08:50:42.515124Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000010408] Status 14 Service Unavailable 2025-05-07T08:50:42.515698Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:1145: Ticket AKIA****MPLE (B3EDC139) permission something.read now has a retryable error "Service Unavailable" retryable: 1 2025-05-07T08:50:42.515751Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1802: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-05-07T08:50:42.515846Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:514: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceBulkAuthorization( something.read) 2025-05-07T08:50:42.516173Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000010408] Request BulkAuthorizeRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } } result_filter: ALL_FAILED } 2025-05-07T08:50:42.531868Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000010408] Status 14 Service Unavailable 2025-05-07T08:50:42.532541Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:1145: Ticket AKIA****MPLE (B3EDC139) permission something.read now has a retryable error "Service Unavailable" retryable: 1 2025-05-07T08:50:42.532582Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1802: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-05-07T08:50:43.811784Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1506: Refreshing ticket AKIA****MPLE (B3EDC139) 2025-05-07T08:50:43.811944Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:514: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceBulkAuthorization( something.read) 2025-05-07T08:50:43.813304Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000010408] Request BulkAuthorizeRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } } result_filter: ALL_FAILED } 2025-05-07T08:50:43.818074Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000010408] Status 14 Service Unavailable 2025-05-07T08:50:43.818491Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:1145: Ticket AKIA****MPLE (B3EDC139) permission something.read now has a retryable error "Service Unavailable" retryable: 1 2025-05-07T08:50:43.818524Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1802: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-05-07T08:50:44.523996Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623550613401320:2195];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:44.524078Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:50:44.818347Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1506: Refreshing ticket AKIA****MPLE (B3EDC139) 2025-05-07T08:50:44.818446Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:514: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceBulkAuthorization( something.read) 2025-05-07T08:50:44.818741Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000010408] Request BulkAuthorizeRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } } result_filter: ALL_FAILED } 2025-05-07T08:50:44.822088Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000010408] Response BulkAuthorizeResponse { subject { user_account { id: "user1" } } } 2025-05-07T08:50:44.823135Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket AKIA****MPLE (B3EDC139) () has now valid token of user1@as 2025-05-07T08:50:55.276352Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501623618182130573:2066];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:55.276499Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003ed8/r3tmp/tmpecIkpH/pdisk_1.dat 2025-05-07T08:50:55.473918Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:55.476184Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:55.476298Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:55.487921Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15233, node 2 2025-05-07T08:50:55.582662Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:55.582685Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:55.582713Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:55.582853Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9104 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:50:55.821731Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:55.836782Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T08:50:55.839030Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:514: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceBulkAuthorization( something.read) 2025-05-07T08:50:55.839065Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [51700004b888] Connect to grpc://localhost:8085 2025-05-07T08:50:55.839816Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700004b888] Request BulkAuthorizeR ... EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:51:03.814413Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:03.825129Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-05-07T08:51:03.825167Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T08:51:03.825180Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:816: CanInitLoginToken, database /Root, A6 error 2025-05-07T08:51:03.825229Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:493: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.read) 2025-05-07T08:51:03.825290Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:493: Ticket **** (8E120919) asking for AccessServiceAuthorization(somewhere.sleep) 2025-05-07T08:51:03.825317Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:493: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.list) 2025-05-07T08:51:03.825342Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:493: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.write) 2025-05-07T08:51:03.825386Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:493: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.eat) 2025-05-07T08:51:03.825468Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000041088] Connect to grpc://localhost:16527 2025-05-07T08:51:03.829904Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000041088] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-05-07T08:51:03.830448Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000041088] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "somewhere.sleep" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-05-07T08:51:03.830592Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000041088] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.list" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-05-07T08:51:03.830752Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000041088] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.write" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-05-07T08:51:03.830853Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000041088] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.eat" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-05-07T08:51:03.852255Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000041088] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-05-07T08:51:03.852500Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1398: Ticket **** (8E120919) permission something.write now has a valid subject "user1@as" 2025-05-07T08:51:03.852673Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000041088] Status 16 Access Denied 2025-05-07T08:51:03.852998Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000041088] Status 16 Access Denied 2025-05-07T08:51:03.853189Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000041088] Status 16 Access Denied 2025-05-07T08:51:03.853379Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1413: Ticket **** (8E120919) permission something.read now has a permanent error "Access Denied" retryable:0 2025-05-07T08:51:03.853414Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1413: Ticket **** (8E120919) permission somewhere.sleep now has a permanent error "Access Denied" retryable:0 2025-05-07T08:51:03.853434Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1413: Ticket **** (8E120919) permission something.list now has a permanent error "Access Denied" retryable:0 2025-05-07T08:51:03.863175Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000041088] Status 16 Access Denied 2025-05-07T08:51:03.863362Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1413: Ticket **** (8E120919) permission something.eat now has a permanent error "Access Denied" retryable:0 2025-05-07T08:51:03.863663Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1003: Ticket **** (8E120919) asking for UserAccount(user1@as) 2025-05-07T08:51:03.865488Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [51700008fc88] Connect to grpc://localhost:8076 2025-05-07T08:51:03.870565Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700008fc88] Request GetUserAccountRequest { user_account_id: "user1" } 2025-05-07T08:51:03.880980Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [51700008fc88] Response UserAccount { yandex_passport_user_account { login: "login1" } } 2025-05-07T08:51:03.881765Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket **** (8E120919) () has now valid token of login1@passport 2025-05-07T08:51:07.474742Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7501623670313916461:2144];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:07.485515Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003ed8/r3tmp/tmpzxdrvA/pdisk_1.dat 2025-05-07T08:51:07.582178Z node 5 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5002, node 5 2025-05-07T08:51:07.614597Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:07.614720Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:07.617865Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:51:07.670737Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:51:07.670762Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:51:07.670771Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:51:07.670919Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:65134 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-05-07T08:51:07.967640Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:51:07.975900Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-05-07T08:51:07.975938Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T08:51:07.975948Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:816: CanInitLoginToken, database /Root, A6 error 2025-05-07T08:51:07.976035Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:514: Ticket **** (8E120919) asking for AccessServiceBulkAuthorization( something.read) 2025-05-07T08:51:07.976093Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000075c08] Connect to grpc://localhost:18065 2025-05-07T08:51:07.977156Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000075c08] Request BulkAuthorizeRequest { iam_token: "**** (8E120919)" actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } } result_filter: ALL_FAILED } 2025-05-07T08:51:07.985903Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000075c08] Response BulkAuthorizeResponse { subject { user_account { id: "user1" } } } 2025-05-07T08:51:07.987576Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket **** (8E120919) () has now valid token of user1@as 2025-05-07T08:51:07.988176Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-05-07T08:51:07.988197Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T08:51:07.988209Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:816: CanInitLoginToken, database /Root, A6 error 2025-05-07T08:51:07.988299Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:514: Ticket **** (8E120919) asking for AccessServiceBulkAuthorization( something.read something.write) 2025-05-07T08:51:07.988562Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000075c08] Request BulkAuthorizeRequest { iam_token: "**** (8E120919)" actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.write" } } result_filter: ALL_FAILED } 2025-05-07T08:51:07.990380Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000075c08] Response BulkAuthorizeResponse { subject { user_account { id: "user1" } } } 2025-05-07T08:51:07.990595Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket **** (8E120919) () has now valid token of user1@as >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesFirst >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnDuplicatedTopicName [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/viewer/ut/unittest >> ViewerTopicDataTests::TopicDataTest [GOOD] Test command err: 2025-05-07T08:50:02.685860Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:1758:2432], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:50:02.687192Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:50:02.687861Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:50:02.688588Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:1069:2375], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:50:02.688792Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:1072:2375], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:50:02.689786Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:1761:2375], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:50:02.689914Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:50:02.690027Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:50:02.690472Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:50:02.690546Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:50:02.690832Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:1755:2375], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:50:02.690936Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:50:02.691332Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:50:02.691785Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:50:02.692179Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-05-07T08:50:03.199120Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:03.391924Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-05-07T08:50:03.411887Z node 1 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:422} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-05-07T08:50:04.031285Z node 1 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:362} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 26074, node 1 TClient is connected to server localhost:20350 2025-05-07T08:50:04.351737Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:04.351831Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:04.351884Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:04.352728Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:50:49.438915Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7501623593039899020:2129];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:49.439157Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-05-07T08:50:49.754097Z node 6 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:49.783850Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:49.783999Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:49.786337Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10346, node 6 2025-05-07T08:50:49.910860Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:49.910893Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:49.910910Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:49.911093Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24530 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:50:50.516566Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:50.535395Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T08:50:50.550675Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-05-07T08:50:50.555274Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:50.561991Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 2025-05-07T08:50:54.443405Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[6:7501623593039899020:2129];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:54.443506Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:50:54.642730Z node 6 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-05-07T08:50:54.642807Z node 6 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-05-07T08:50:55.301511Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7501623618809703414:2346], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:55.301845Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:55.302289Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7501623618809703449:2349], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:55.310190Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480 2025-05-07T08:50:55.333224Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7501623618809703451:2350], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-05-07T08:50:55.420723Z node 6 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [6:7501623618809703502:2360] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:50:55.787450Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-05-07T08:50:55.968223Z node 6 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb ... onId [producer3|703cbd08-233c5457-3650bb0c-ca0b9ea8_0] Write session got write response: sequence_numbers: 11 sequence_numbers: 12 offsets: 50 offsets: 51 already_written: false already_written: false write_statistics { persist_duration_ms: 4 } 2025-05-07T08:51:05.126847Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|703cbd08-233c5457-3650bb0c-ca0b9ea8_0] Write session: acknoledged message 11 2025-05-07T08:51:05.126882Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|703cbd08-233c5457-3650bb0c-ca0b9ea8_0] Write session: acknoledged message 12 2025-05-07T08:51:05.127129Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|703cbd08-233c5457-3650bb0c-ca0b9ea8_0] Write session got write response: sequence_numbers: 13 offsets: 52 already_written: false write_statistics { persist_duration_ms: 4 } 2025-05-07T08:51:05.127159Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|703cbd08-233c5457-3650bb0c-ca0b9ea8_0] Write session: acknoledged message 13 2025-05-07T08:51:05.127296Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|703cbd08-233c5457-3650bb0c-ca0b9ea8_0] Write session got write response: sequence_numbers: 14 offsets: 53 already_written: false write_statistics { persist_duration_ms: 4 } 2025-05-07T08:51:05.127324Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|703cbd08-233c5457-3650bb0c-ca0b9ea8_0] Write session: acknoledged message 14 2025-05-07T08:51:05.133294Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|703cbd08-233c5457-3650bb0c-ca0b9ea8_0] Write session got write response: sequence_numbers: 15 offsets: 54 already_written: false write_statistics { persist_duration_ms: 8 queued_in_partition_duration_ms: 52 } 2025-05-07T08:51:05.133355Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|703cbd08-233c5457-3650bb0c-ca0b9ea8_0] Write session: acknoledged message 15 2025-05-07T08:51:05.143411Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|703cbd08-233c5457-3650bb0c-ca0b9ea8_0] Write session: try to update token 2025-05-07T08:51:05.143493Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|703cbd08-233c5457-3650bb0c-ca0b9ea8_0] Send 1 message(s) (0 left), first sequence number is 20 2025-05-07T08:51:05.244433Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|703cbd08-233c5457-3650bb0c-ca0b9ea8_0] Write session got write response: sequence_numbers: 16 offsets: 55 already_written: false write_statistics { persist_duration_ms: 4 } 2025-05-07T08:51:05.244504Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|703cbd08-233c5457-3650bb0c-ca0b9ea8_0] Write session: acknoledged message 16 2025-05-07T08:51:05.244795Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|703cbd08-233c5457-3650bb0c-ca0b9ea8_0] Write session got write response: sequence_numbers: 17 offsets: 56 already_written: false write_statistics { persist_duration_ms: 4 queued_in_partition_duration_ms: 99 } 2025-05-07T08:51:05.244824Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|703cbd08-233c5457-3650bb0c-ca0b9ea8_0] Write session: acknoledged message 17 2025-05-07T08:51:05.244964Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|703cbd08-233c5457-3650bb0c-ca0b9ea8_0] Write session got write response: sequence_numbers: 18 offsets: 57 already_written: false write_statistics { persist_duration_ms: 4 queued_in_partition_duration_ms: 95 } 2025-05-07T08:51:05.244989Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|703cbd08-233c5457-3650bb0c-ca0b9ea8_0] Write session: acknoledged message 18 2025-05-07T08:51:05.245124Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|703cbd08-233c5457-3650bb0c-ca0b9ea8_0] Write session got write response: sequence_numbers: 19 offsets: 58 already_written: false write_statistics { persist_duration_ms: 4 queued_in_partition_duration_ms: 95 } 2025-05-07T08:51:05.245155Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|703cbd08-233c5457-3650bb0c-ca0b9ea8_0] Write session: acknoledged message 19 2025-05-07T08:51:05.245280Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|703cbd08-233c5457-3650bb0c-ca0b9ea8_0] Write session got write response: sequence_numbers: 20 offsets: 59 already_written: false write_statistics { persist_duration_ms: 3 queued_in_partition_duration_ms: 1 } 2025-05-07T08:51:05.245302Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|703cbd08-233c5457-3650bb0c-ca0b9ea8_0] Write session: acknoledged message 20 2025-05-07T08:51:05.256813Z :INFO: [] MessageGroupId [producer3] SessionId [producer3|703cbd08-233c5457-3650bb0c-ca0b9ea8_0] Write session will now close 2025-05-07T08:51:05.256924Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|703cbd08-233c5457-3650bb0c-ca0b9ea8_0] Write session: aborting 2025-05-07T08:51:05.257694Z :INFO: [] MessageGroupId [producer3] SessionId [producer3|703cbd08-233c5457-3650bb0c-ca0b9ea8_0] Write session: gracefully shut down, all writes complete 2025-05-07T08:51:05.258277Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|703cbd08-233c5457-3650bb0c-ca0b9ea8_0] Write session: destroy 2025-05-07T08:51:05.999702Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7501623659460522229:2393], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:06.000437Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:06.000947Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7501623663755489561:2397], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:06.006267Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480 2025-05-07T08:51:06.032723Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7501623663755489563:2398], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-05-07T08:51:06.101580Z node 7 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [7:7501623663755489614:2519] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:51:06.191144Z node 7 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [7:7501623663755489623:2402], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T08:51:06.193453Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=7&id=ZDc3YWY0MTgtNzM5YzdiOTctOGJjYzFlODgtNDljYjdjNTM=, ActorId: [7:7501623659460522226:2391], ActorState: ExecuteState, TraceId: 01jtmz20476faqhftd5dta13ab, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T08:51:06.194635Z node 7 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } Size: 1398104 Size: 1398104 Size: 1398104 Size: 1398104 Size: 1398104 Size: 1398104 Size: 1398104 Size: 1398104 Size: 1398104 Size: 1398104 2025-05-07T08:51:06.231492Z :DEBUG: [] MessageGroupId [producer4] SessionId [] Write session: try to update token 2025-05-07T08:51:06.232134Z :INFO: [] MessageGroupId [producer4] SessionId [] Write session: Do CDS request 2025-05-07T08:51:06.232239Z :INFO: [] MessageGroupId [producer4] SessionId [] Start write session. Will connect to endpoint: localhost:13413 2025-05-07T08:51:06.239903Z :DEBUG: [] MessageGroupId [producer4] SessionId [] Write session: send init request: init_request { topic: "/Root/topic1" message_group_id: "producer4" } 2025-05-07T08:51:06.246200Z :INFO: [] MessageGroupId [producer4] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1746607866246 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-05-07T08:51:06.246328Z :INFO: [] MessageGroupId [producer4] SessionId [] Write session established. Init response: session_id: "producer4|c3aa21c3-e89b6ee-dcf74fc7-3160f338_0" topic: "topic1" 2025-05-07T08:51:06.249710Z :DEBUG: [] MessageGroupId [producer4] SessionId [producer4|c3aa21c3-e89b6ee-dcf74fc7-3160f338_0] Write 1 messages with Id from 1 to 1 2025-05-07T08:51:06.250467Z :INFO: [] MessageGroupId [producer4] SessionId [producer4|c3aa21c3-e89b6ee-dcf74fc7-3160f338_0] Write session: close. Timeout = 18446744073709551 ms 2025-05-07T08:51:06.302356Z :DEBUG: [] MessageGroupId [producer4] SessionId [producer4|c3aa21c3-e89b6ee-dcf74fc7-3160f338_0] Write session: try to update token 2025-05-07T08:51:06.302423Z :DEBUG: [] MessageGroupId [producer4] SessionId [producer4|c3aa21c3-e89b6ee-dcf74fc7-3160f338_0] Send 1 message(s) (0 left), first sequence number is 1 2025-05-07T08:51:06.323742Z :DEBUG: [] MessageGroupId [producer4] SessionId [producer4|c3aa21c3-e89b6ee-dcf74fc7-3160f338_0] Write session got write response: sequence_numbers: 1 offsets: 60 already_written: false write_statistics { persist_duration_ms: 2 queued_in_partition_duration_ms: 14 } 2025-05-07T08:51:06.323791Z :DEBUG: [] MessageGroupId [producer4] SessionId [producer4|c3aa21c3-e89b6ee-dcf74fc7-3160f338_0] Write session: acknoledged message 1 2025-05-07T08:51:06.353652Z :INFO: [] MessageGroupId [producer4] SessionId [producer4|c3aa21c3-e89b6ee-dcf74fc7-3160f338_0] Write session will now close 2025-05-07T08:51:06.353787Z :DEBUG: [] MessageGroupId [producer4] SessionId [producer4|c3aa21c3-e89b6ee-dcf74fc7-3160f338_0] Write session: aborting 2025-05-07T08:51:06.356482Z :INFO: [] MessageGroupId [producer4] SessionId [producer4|c3aa21c3-e89b6ee-dcf74fc7-3160f338_0] Write session: gracefully shut down, all writes complete 2025-05-07T08:51:06.356730Z :DEBUG: [] MessageGroupId [producer4] SessionId [producer4|c3aa21c3-e89b6ee-dcf74fc7-3160f338_0] Write session is aborting and will not restart 2025-05-07T08:51:06.357305Z :DEBUG: [] MessageGroupId [producer4] SessionId [producer4|c3aa21c3-e89b6ee-dcf74fc7-3160f338_0] Write session: destroy Size: 4194320 Got response:400: PathErrorUnknown Got response:400: No such partition in topic Got response:400: Bad offset 2025-05-07T08:51:06.631772Z node 7 :PERSQUEUE ERROR: partition_read.cpp:672: [PQ: 72075186224037889, Partition: 0, State: StateIdle] reading from too big offset - topic topic1 partition 0 client $without_consumer EndOffset 61 offset 10000 >> TTicketParserTest::AuthenticationRetryErrorImmediately [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnBadRootStatusInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnBalancerDescribeResultFailureWhenTopicsAreGivenExplicitly ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnDuplicatedTopicName [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "no path \'/Root/PQ/\', Marker# PQ17" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "topic \'rt3.dc1--topic1\' has no balancer, Marker# PQ193" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "TopicRequest must have Topic field." ErrorCode: BAD_REQUEST } Assert failed: Check response: { Status: 128 ErrorReason: "multiple TopicRequest for topic \'rt3.dc1--topic1\'" ErrorCode: BAD_REQUEST } >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnBalancerDescribeResultFailureWhenTopicsAreGivenExplicitly [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnDuplicatedTopicName >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesFirst [GOOD] >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesSecond >> TPartitionTests::TestBatchingWithProposeConfig [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnBalancerDescribeResultFailureWhenTopicsAreGivenExplicitly >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnDuplicatedTopicName [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnDuplicatedPartition |89.3%| [TA] $(B)/ydb/tests/fq/streaming_optimize/test-results/py3test/{meta.json ... results_accumulator.log} >> KqpSystemView::QueryStatsSimple [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnFailedGetAllTopicsRequest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ut/unittest >> TTicketParserTest::AuthenticationRetryErrorImmediately [GOOD] Test command err: 2025-05-07T08:50:42.224381Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623564049820286:2198];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:42.225124Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003e8e/r3tmp/tmpkSeoSc/pdisk_1.dat 2025-05-07T08:50:42.873899Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:42.892733Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:42.892836Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:42.896981Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12197, node 1 2025-05-07T08:50:43.200150Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:43.200181Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:43.200193Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:43.200309Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14979 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:50:43.736201Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:43.759720Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:50:43.763533Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:563: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-05-07T08:50:43.763600Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000010088] Connect to grpc://localhost:63287 2025-05-07T08:50:43.771359Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000010088] Request AuthenticateRequest { iam_token: "**** (8E120919)" } 2025-05-07T08:50:43.803934Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000010088] Response AuthenticateResponse { subject { user_account { id: "user1" } } } 2025-05-07T08:50:43.806361Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket **** (8E120919) () has now valid token of user1@as 2025-05-07T08:50:46.898524Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501623578611654765:2056];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:46.899830Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003e8e/r3tmp/tmpPzrl5V/pdisk_1.dat 2025-05-07T08:50:47.139572Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:47.171059Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:47.171155Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:47.173372Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3875, node 2 2025-05-07T08:50:47.270675Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:47.270710Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:47.270735Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:47.270859Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2078 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:50:47.628063Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:47.636726Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:50:47.640070Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:563: Ticket ApiK****alid (AB5B5EA8) asking for AccessServiceAuthentication 2025-05-07T08:50:47.640160Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000031108] Connect to grpc://localhost:22435 2025-05-07T08:50:47.641156Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000031108] Request AuthenticateRequest { api_key: "ApiK****alid (AB5B5EA8)" } 2025-05-07T08:50:47.652385Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000031108] Response AuthenticateResponse { subject { user_account { id: "ApiKey-value-valid" } } } 2025-05-07T08:50:47.652970Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket ApiK****alid (AB5B5EA8) () has now valid token of ApiKey-value-valid@as 2025-05-07T08:50:51.326917Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7501623603259326474:2066];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:51.327043Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003e8e/r3tmp/tmpl1Z9T4/pdisk_1.dat 2025-05-07T08:50:51.501823Z node 3 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:51.558448Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:51.558543Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:51.561255Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10493, node 3 2025-05-07T08:50:51.674668Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:51.674702Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:51.674729Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:51.674894Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14111 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:50:52.011390Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:52.022549Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T08:50:52.025332Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-05-07T08:50:52.025364Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T08:50:52.025373Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:816: CanInitLoginToken, database /Root, A6 error 2025-05-07T08:50:52.025405Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:563: Ticket **** (8E120919) ask ... lable' 2025-05-07T08:50:52.045708Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:563: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-05-07T08:50:52.045878Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000100388] Request AuthenticateRequest { iam_token: "**** (8E120919)" } 2025-05-07T08:50:52.048045Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000100388] Status 14 Service Unavailable 2025-05-07T08:50:52.048357Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1802: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' 2025-05-07T08:50:56.026836Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501623621716133044:2217];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003e8e/r3tmp/tmpsjNNb6/pdisk_1.dat 2025-05-07T08:50:56.073579Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:50:56.133393Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:56.155067Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:56.155154Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:56.156574Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26674, node 4 2025-05-07T08:50:56.225642Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:56.225664Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:56.225672Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:56.225806Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9341 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-05-07T08:50:56.483423Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:50:56.494671Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T08:50:56.496585Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:563: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthentication 2025-05-07T08:50:56.496664Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [5170000c4488] Connect to grpc://localhost:26060 2025-05-07T08:50:56.497880Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [5170000c4488] Request AuthenticateRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } } 2025-05-07T08:50:56.505651Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [5170000c4488] Status 14 Service Unavailable 2025-05-07T08:50:56.506086Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1802: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-05-07T08:50:56.506125Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:563: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthentication 2025-05-07T08:50:56.506334Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [5170000c4488] Request AuthenticateRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } } 2025-05-07T08:50:56.508242Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [5170000c4488] Status 14 Service Unavailable 2025-05-07T08:50:56.508513Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1802: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-05-07T08:50:58.022105Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1506: Refreshing ticket AKIA****MPLE (B3EDC139) 2025-05-07T08:50:58.022166Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:563: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthentication 2025-05-07T08:50:58.022361Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [5170000c4488] Request AuthenticateRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } } 2025-05-07T08:50:58.028423Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [5170000c4488] Status 14 Service Unavailable 2025-05-07T08:50:58.030088Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1802: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-05-07T08:50:59.024823Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1506: Refreshing ticket AKIA****MPLE (B3EDC139) 2025-05-07T08:50:59.024861Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:563: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthentication 2025-05-07T08:50:59.025049Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [5170000c4488] Request AuthenticateRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } } 2025-05-07T08:50:59.029757Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [5170000c4488] Response AuthenticateResponse { subject { user_account { id: "user1" } } } 2025-05-07T08:50:59.030036Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket AKIA****MPLE (B3EDC139) () has now valid token of user1@as 2025-05-07T08:51:01.011492Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7501623621716133044:2217];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:01.011601Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:51:09.171318Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7501623676897603751:2063];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:09.171368Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003e8e/r3tmp/tmpZkwmbQ/pdisk_1.dat 2025-05-07T08:51:09.311152Z node 5 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:51:09.350410Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:09.350543Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:09.355932Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26787, node 5 2025-05-07T08:51:09.529502Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:51:09.529532Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:51:09.529542Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:51:09.529701Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14339 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:51:09.839686Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:09.852538Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:563: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthentication 2025-05-07T08:51:09.852626Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000081208] Connect to grpc://localhost:28970 2025-05-07T08:51:09.854298Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000081208] Request AuthenticateRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } } 2025-05-07T08:51:09.870855Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000081208] Status 14 Service Unavailable 2025-05-07T08:51:09.871416Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1802: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-05-07T08:51:09.871469Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:563: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthentication 2025-05-07T08:51:09.871725Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000081208] Request AuthenticateRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } } 2025-05-07T08:51:09.874659Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000081208] Response AuthenticateResponse { subject { user_account { id: "user1" } } } 2025-05-07T08:51:09.875165Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket AKIA****MPLE (B3EDC139) () has now valid token of user1@as >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnBalancerDescribeResultFailureWhenTopicsAreGivenExplicitly [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnEmptyTopicName >> KqpPg::InsertValuesFromTableWithDefault+useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefault-useSink >> TPartitionTests::DataTxCalcPredicateError [GOOD] >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesSecond [GOOD] >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::HandlesPipeDisconnection_AnswerDoesNotArrive >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnDuplicatedPartition [GOOD] >> TPartitionTests::DataTxCalcPredicateOrder ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPartitionTests::TestBatchingWithProposeConfig [GOOD] Test command err: 2025-05-07T08:50:57.366810Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:50:57.366926Z node 1 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037927937] doesn't have tx writes info 2025-05-07T08:50:57.385595Z node 1 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 3, State: StateInit] bootstrapping 3 [1:179:2194] 2025-05-07T08:50:57.388073Z node 1 :PERSQUEUE INFO: partition_init.cpp:784: [Root/PQ/rt3.dc1--account--topic:3:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-05-07T08:50:57.000000Z 2025-05-07T08:50:57.388144Z node 1 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 3, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 3 generation 0 [1:179:2194] Got cmd write: CmdDeleteRange { Range { From: "m0000000003cclient-1" IncludeFrom: true To: "m0000000003cclient-1" IncludeTo: true } } CmdDeleteRange { Range { From: "m0000000003uclient-1" IncludeFrom: true To: "m0000000003uclient-1" IncludeTo: true } } CmdWrite { Key: "i0000000003" Value: "\010\000\020\n\030\000(\350\272\303\317\3522" StorageChannel: INLINE } CmdWrite { Key: "m0000000003cclient-2" Value: "\010\000\020\000\030\000\"\000(\0000\000@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000003uclient-2" Value: "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000" StorageChannel: INLINE } CmdWrite { Key: "_config_3" Value: "\022\t\030\200\243\0058\200\200\200\005\030\000\"\027rt3.dc1--account--topic(\0020\001\272\001 /Root/PQ/rt3.dc1--account--topic\352\001\000\372\001\002\010\000\212\002\007account\220\002\001\242\002\002\010\000\252\002\016\n\010client-2@\000H\000" StorageChannel: INLINE } 2025-05-07T08:50:58.323481Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:50:58.323551Z node 2 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037927937] doesn't have tx writes info 2025-05-07T08:50:58.343926Z node 2 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: {0, {0, 1111}, 123}, State: StateInit] bootstrapping {0, {0, 1111}, 123} [2:179:2194] 2025-05-07T08:50:58.347995Z node 2 :PERSQUEUE INFO: partition_init.cpp:773: [rt3.dc1--account--topic:{0, {0, 1111}, 123}:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-05-07T08:50:58.348072Z node 2 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: {0, {0, 1111}, 123}, State: StateInit] init complete for topic 'rt3.dc1--account--topic' partition {0, {0, 1111}, 123} generation 0 [2:179:2194] 2025-05-07T08:50:59.409149Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:50:59.409224Z node 3 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037927937] doesn't have tx writes info 2025-05-07T08:50:59.429727Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitConfigStep Got KV request 2025-05-07T08:50:59.429981Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-05-07T08:50:59.430226Z node 3 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [3:178:2193] 2025-05-07T08:50:59.431164Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDiskStatusStep Got KV request 2025-05-07T08:50:59.431371Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitMetaStep Got KV request 2025-05-07T08:50:59.431538Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInfoRangeStep Got KV request 2025-05-07T08:50:59.431725Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataRangeStep Got KV request 2025-05-07T08:50:59.432136Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:620: [Root/PQ/rt3.dc1--account--topic:0:TInitDataRangeStep] Got data offset 0 count 50 size 684 so 0 eo 50 d0000000000_00000000000000000000_00000_0000000050_00000 2025-05-07T08:50:59.432231Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataStep 2025-05-07T08:50:59.432269Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-05-07T08:50:59.432317Z node 3 :PERSQUEUE INFO: partition_init.cpp:784: [Root/PQ/rt3.dc1--account--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-05-07T08:50:59.000000Z 2025-05-07T08:50:59.432353Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:55: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Initializing completed. 2025-05-07T08:50:59.432404Z node 3 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 0 generation 0 [3:178:2193] 2025-05-07T08:50:59.432466Z node 3 :PERSQUEUE DEBUG: partition.cpp:571: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic Root/PQ/rt3.dc1--account--topic partitition 0 so 0 endOffset 50 Head Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 SYNC INIT DATA KEY: d0000000000_00000000000000000000_00000_0000000050_00000 size 684 2025-05-07T08:50:59.432516Z node 3 :PERSQUEUE DEBUG: partition.cpp:3847: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-05-07T08:50:59.786508Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src3|66eac90e-d86583d6-9df91a27-22177e97_0 generated for partition 0 topic 'Root/PQ/rt3.dc1--account--topic' owner src3 2025-05-07T08:50:59.786659Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:35: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 Got batch complete: 1 2025-05-07T08:50:59.786879Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src4|2963f1ba-cbbc7e40-ec0bfd26-802ac10c_0 generated for partition 0 topic 'Root/PQ/rt3.dc1--account--topic' owner src4 2025-05-07T08:50:59.786945Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:35: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 Got batch complete: 1 Create distr tx with id = 0 and act no: 1 Create immediate tx with id = 3 and act no: 4 Create immediate tx with id = 6 and act no: 7 2025-05-07T08:51:02.205778Z node 3 :PERSQUEUE DEBUG: partition.cpp:1124: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 0 Create distr tx with id = 8 and act no: 9 Create distr tx with id = 10 and act no: 11 2025-05-07T08:51:02.742996Z node 3 :PERSQUEUE DEBUG: partition.cpp:1124: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 8 2025-05-07T08:51:02.743081Z node 3 :PERSQUEUE DEBUG: partition.cpp:1124: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 10 2025-05-07T08:51:02.743192Z node 3 :PERSQUEUE DEBUG: partition.cpp:1360: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-05-07T08:51:02.743248Z node 3 :PERSQUEUE DEBUG: partition.cpp:1360: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-05-07T08:51:03.554741Z node 3 :PERSQUEUE DEBUG: partition.cpp:1360: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-05-07T08:51:04.905340Z node 3 :PERSQUEUE DEBUG: partition.cpp:1360: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-05-07T08:51:04.905613Z node 3 :PERSQUEUE DEBUG: partition.cpp:1360: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse Got batch complete: 17 Wait batch completion 2025-05-07T08:51:04.905923Z node 3 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 10 Wait kv request 2025-05-07T08:51:05.178793Z node 3 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 0 2025-05-07T08:51:05.178867Z node 3 :PERSQUEUE DEBUG: partition.cpp:2421: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 0 2025-05-07T08:51:05.178933Z node 3 :PERSQUEUE DEBUG: partition.cpp:2448: [PQ: 72057594037927937, Partition: 0, State: StateIdle] t.WriteInfo->BodyKeys.size=0, t.WriteInfo->BlobsFromHead.size=0 2025-05-07T08:51:05.178996Z node 3 :PERSQUEUE DEBUG: partition.cpp:2449: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0, NewHead=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 2025-05-07T08:51:05.179102Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:138: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-05-07T08:51:05.179142Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:138: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-05-07T08:51:05.179259Z node 3 :PERSQUEUE DEBUG: partition.cpp:2421: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: (empty maybe) 2025-05-07T08:51:05.179301Z node 3 :PERSQUEUE DEBUG: partition.cpp:2448: [PQ: 72057594037927937, Partition: 0, State: StateIdle] t.WriteInfo->BodyKeys.size=0, t.WriteInfo->BlobsFromHead.size=0 2025-05-07T08:51:05.179345Z node 3 :PERSQUEUE DEBUG: partition.cpp:2449: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0, NewHead=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 2025-05-07T08:51:05.209690Z node 3 :PERSQUEUE DEBUG: partition.cpp:3319: [PQ: 72057594037927937, Partition: 0, State: StateIdle] schedule TEvPersQueue::TEvProposeTransactionResult(COMPLETE), reason= 2025-05-07T08:51:05.209868Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:1126: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Already written message. Topic: 'Root/PQ/rt3.dc1--account--topic' Partition: 0 SourceId: 'src4'. Message seqNo: 7. Committed seqNo: (NULL). Writing seqNo: 7. EndOffset: 50. CurOffset: 50. Offset: 50 2025-05-07T08:51:05.210154Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:1233: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob processing sourceId 'src4' seqNo 8 partNo 0 2025-05-07T08:51:05.211086Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:1333: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob complete sourceId 'src4' seqNo 8 partNo 0 FormedBlobsCount 0 NewHead: Offset 51 PartNo 0 PackedSize 84 count 1 nextOffset 52 batches 1 2025-05-07T08:51:05.211194Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:1233: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob processing sourceId 'src4' seqNo 9 partNo 0 2025-05-07T08:51:05.211244Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:1333: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob complete sourc ... 000_00000000000000000000_00000_0000000050_00000 2025-05-07T08:51:10.225204Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataStep 2025-05-07T08:51:10.225247Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-05-07T08:51:10.225297Z node 5 :PERSQUEUE INFO: partition_init.cpp:784: [Root/PQ/rt3.dc1--account--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-05-07T08:51:10.000000Z 2025-05-07T08:51:10.225336Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:55: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Initializing completed. 2025-05-07T08:51:10.225382Z node 5 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 0 generation 0 [5:178:2193] 2025-05-07T08:51:10.225437Z node 5 :PERSQUEUE DEBUG: partition.cpp:571: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic Root/PQ/rt3.dc1--account--topic partitition 0 so 0 endOffset 50 Head Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 SYNC INIT DATA KEY: d0000000000_00000000000000000000_00000_0000000050_00000 size 684 2025-05-07T08:51:10.225491Z node 5 :PERSQUEUE DEBUG: partition.cpp:3847: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-05-07T08:51:10.225571Z node 5 :PERSQUEUE DEBUG: partition_read.cpp:774: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-1 readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-05-07T08:51:10.225619Z node 5 :PERSQUEUE DEBUG: partition_read.cpp:816: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-1 send read request for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 1 rrg 0 2025-05-07T08:51:10.225665Z node 5 :PERSQUEUE DEBUG: partition_read.cpp:774: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-0 readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 1 rrg 0 2025-05-07T08:51:10.225986Z node 5 :PERSQUEUE DEBUG: partition_read.cpp:731: [PQ: 72057594037927937, Partition: 0, State: StateIdle] read cookie 0 Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-1 offset 0 count 1 size 1024000 endOffset 50 max time lag 0ms effective offset 0 2025-05-07T08:51:10.226171Z node 5 :PERSQUEUE DEBUG: partition_read.cpp:931: [PQ: 72057594037927937, Partition: 0, State: StateIdle] read cookie 0 added 1 blobs, size 684 count 50 last offset 1, current partition end offset: 50 2025-05-07T08:51:10.226219Z node 5 :PERSQUEUE DEBUG: partition_read.cpp:955: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Reading cookie 0. Send blob request. Create distr tx with id = 0 and act no: 1 2025-05-07T08:51:11.616758Z node 5 :PERSQUEUE DEBUG: partition.cpp:1124: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 0 2025-05-07T08:51:11.616896Z node 5 :PERSQUEUE DEBUG: partition.cpp:1033: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvProposePartitionConfig Step 1, TxId 3 Wait batch completion 2025-05-07T08:51:12.930729Z node 5 :PERSQUEUE DEBUG: partition.cpp:1360: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-05-07T08:51:12.930931Z node 5 :PERSQUEUE DEBUG: partition.cpp:2421: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: (empty maybe) 2025-05-07T08:51:12.930996Z node 5 :PERSQUEUE DEBUG: partition.cpp:3319: [PQ: 72057594037927937, Partition: 0, State: StateIdle] schedule TEvPersQueue::TEvProposeTransactionResult(COMPLETE), reason= 2025-05-07T08:51:12.931259Z node 5 :PERSQUEUE DEBUG: partition.cpp:2182: [PQ: 72057594037927937, Partition: 0, State: StateIdle] === DumpKeyValueRequest === 2025-05-07T08:51:12.931311Z node 5 :PERSQUEUE DEBUG: partition.cpp:2183: [PQ: 72057594037927937, Partition: 0, State: StateIdle] --- delete ---------------- 2025-05-07T08:51:12.931355Z node 5 :PERSQUEUE DEBUG: partition.cpp:2191: [PQ: 72057594037927937, Partition: 0, State: StateIdle] --- write ----------------- 2025-05-07T08:51:12.931407Z node 5 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 0, State: StateIdle] i0000000000 2025-05-07T08:51:12.931447Z node 5 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 0, State: StateIdle] I0000000000 2025-05-07T08:51:12.931474Z node 5 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 0, State: StateIdle] m0000000000cclient-0 2025-05-07T08:51:12.931505Z node 5 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 0, State: StateIdle] m0000000000uclient-0 2025-05-07T08:51:12.931540Z node 5 :PERSQUEUE DEBUG: partition.cpp:2196: [PQ: 72057594037927937, Partition: 0, State: StateIdle] --- rename ---------------- 2025-05-07T08:51:12.931592Z node 5 :PERSQUEUE DEBUG: partition.cpp:2201: [PQ: 72057594037927937, Partition: 0, State: StateIdle] =========================== Got KV request Got batch complete: 2 Got KV request Got KV request Send disk status response with cookie: 0 Wait immediate tx complete 2 2025-05-07T08:51:14.194864Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 Got batch complete: 1 Got propose resutl: Origin: 72057594037927937 Status: COMPLETE TxId: 2 Wait batch completion 2025-05-07T08:51:14.195230Z node 5 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 3 2025-05-07T08:51:14.195376Z node 5 :PERSQUEUE DEBUG: partition.cpp:3216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-1 drop done 2025-05-07T08:51:14.195661Z node 5 :PERSQUEUE DEBUG: partition.cpp:2182: [PQ: 72057594037927937, Partition: 0, State: StateIdle] === DumpKeyValueRequest === 2025-05-07T08:51:14.195719Z node 5 :PERSQUEUE DEBUG: partition.cpp:2183: [PQ: 72057594037927937, Partition: 0, State: StateIdle] --- delete ---------------- 2025-05-07T08:51:14.195769Z node 5 :PERSQUEUE DEBUG: partition.cpp:2189: [PQ: 72057594037927937, Partition: 0, State: StateIdle] [m0000000000cclient-1, m0000000000cclient-1] 2025-05-07T08:51:14.195811Z node 5 :PERSQUEUE DEBUG: partition.cpp:2189: [PQ: 72057594037927937, Partition: 0, State: StateIdle] [m0000000000uclient-1, m0000000000uclient-1] 2025-05-07T08:51:14.195851Z node 5 :PERSQUEUE DEBUG: partition.cpp:2191: [PQ: 72057594037927937, Partition: 0, State: StateIdle] --- write ----------------- 2025-05-07T08:51:14.195899Z node 5 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 0, State: StateIdle] i0000000000 2025-05-07T08:51:14.195939Z node 5 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 0, State: StateIdle] I0000000000 2025-05-07T08:51:14.195966Z node 5 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 0, State: StateIdle] m0000000000cclient-0 2025-05-07T08:51:14.195993Z node 5 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 0, State: StateIdle] m0000000000uclient-0 2025-05-07T08:51:14.196038Z node 5 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 0, State: StateIdle] _config_0 2025-05-07T08:51:14.196079Z node 5 :PERSQUEUE DEBUG: partition.cpp:2196: [PQ: 72057594037927937, Partition: 0, State: StateIdle] --- rename ---------------- 2025-05-07T08:51:14.196127Z node 5 :PERSQUEUE DEBUG: partition.cpp:2201: [PQ: 72057594037927937, Partition: 0, State: StateIdle] =========================== Got KV request Send disk status response with cookie: 0 2025-05-07T08:51:14.218285Z node 5 :PERSQUEUE DEBUG: event_helpers.cpp:40: tablet 72057594037927937 topic 'Root/PQ/rt3.dc1--account--topic' partition 0 error: cannot finish read request. Consumer client-1 is gone from partition 2025-05-07T08:51:14.218496Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-05-07T08:51:14.218603Z node 5 :PERSQUEUE DEBUG: partition.cpp:2421: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: (empty maybe) 2025-05-07T08:51:14.218657Z node 5 :PERSQUEUE DEBUG: partition.cpp:3319: [PQ: 72057594037927937, Partition: 0, State: StateIdle] schedule TEvPersQueue::TEvProposeTransactionResult(COMPLETE), reason= 2025-05-07T08:51:14.218879Z node 5 :PERSQUEUE DEBUG: partition.cpp:2182: [PQ: 72057594037927937, Partition: 0, State: StateIdle] === DumpKeyValueRequest === 2025-05-07T08:51:14.218932Z node 5 :PERSQUEUE DEBUG: partition.cpp:2183: [PQ: 72057594037927937, Partition: 0, State: StateIdle] --- delete ---------------- 2025-05-07T08:51:14.218979Z node 5 :PERSQUEUE DEBUG: partition.cpp:2191: [PQ: 72057594037927937, Partition: 0, State: StateIdle] --- write ----------------- 2025-05-07T08:51:14.219031Z node 5 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 0, State: StateIdle] i0000000000 2025-05-07T08:51:14.219069Z node 5 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 0, State: StateIdle] m0000000000cclient-0 2025-05-07T08:51:14.219097Z node 5 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 0, State: StateIdle] m0000000000uclient-0 2025-05-07T08:51:14.219135Z node 5 :PERSQUEUE DEBUG: partition.cpp:2196: [PQ: 72057594037927937, Partition: 0, State: StateIdle] --- rename ---------------- 2025-05-07T08:51:14.219179Z node 5 :PERSQUEUE DEBUG: partition.cpp:2201: [PQ: 72057594037927937, Partition: 0, State: StateIdle] =========================== Got KV request 2025-05-07T08:51:14.219399Z node 5 :PERSQUEUE DEBUG: partition_read.cpp:774: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-0 readTimeStamp for offset 5 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-05-07T08:51:14.219462Z node 5 :PERSQUEUE DEBUG: partition_read.cpp:816: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-0 send read request for offset 5 initiated queuesize 0 startOffset 0 ReadingTimestamp 1 rrg 0 Got KV request Got batch complete: 1 Got KV request Got KV request Got KV request 2025-05-07T08:51:14.220140Z node 5 :PERSQUEUE DEBUG: partition_read.cpp:731: [PQ: 72057594037927937, Partition: 0, State: StateIdle] read cookie 1 Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-0 offset 5 count 1 size 1024000 endOffset 50 max time lag 0ms effective offset 5 2025-05-07T08:51:14.220358Z node 5 :PERSQUEUE DEBUG: partition_read.cpp:931: [PQ: 72057594037927937, Partition: 0, State: StateIdle] read cookie 1 added 1 blobs, size 0 count 45 last offset 6, current partition end offset: 50 2025-05-07T08:51:14.220419Z node 5 :PERSQUEUE DEBUG: partition_read.cpp:955: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Reading cookie 1. Send blob request. Got KV request Got KV request Wait batch completion Send disk status response with cookie: 0 Wait immediate tx complete 4 2025-05-07T08:51:14.253596Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 Got propose resutl: Origin: 72057594037927937 Status: COMPLETE TxId: 4 >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnNotOkStatusInGetNodeRequest >> KqpPg::InsertValuesFromTableWithDefaultText+useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultText-useSink >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnFailedGetAllTopicsRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnNotOkStatusInGetNodeRequest >> TTicketParserTest::NebiusAccessKeySignatureUnsupported [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnEmptyTopicName [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnDuplicatedTopicName >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::HandlesTimeout ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnDuplicatedPartition [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "path \'Root/PQ\' has unknown/invalid root prefix \'Root\', Marker# PQ14" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "topic \'Root/PQ\' describe error, Status# LookupError, Marker# PQ1" ErrorCode: ERROR } Assert failed: Check response: { Status: 128 ErrorReason: "multiple TopicRequest for topic \'rt3.dc1--topic1\'" ErrorCode: BAD_REQUEST } Assert failed: Check response: { Status: 128 ErrorReason: "multiple partition 2 in TopicRequest for topic \'rt3.dc1--topic2\'" ErrorCode: BAD_REQUEST } >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnNotOkStatusInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnZeroBalancerTabletIdInGetNodeRequest >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnDuplicatedTopicName [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnDuplicatedPartition >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnNotOkStatusInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnNoBalancerInGetNodeRequest >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::HandlesPipeDisconnection_AnswerDoesNotArrive [GOOD] >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::HandlesTimeout [GOOD] >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnZeroBalancerTabletIdInGetNodeRequest >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnZeroBalancerTabletIdInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesFirst >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnNoBalancerInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnEmptyTopicName >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnDuplicatedPartition [GOOD] >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnZeroBalancerTabletIdInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::SuccessfullyReplies ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::HandlesPipeDisconnection_AnswerDoesNotArrive [GOOD] Test command err: 2025-05-07T08:51:13.753019Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3088: [PQ: 72057594037928037] Handle TEvInterconnect::TEvNodeInfo 2025-05-07T08:51:13.756779Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3120: [PQ: 72057594037928037] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-05-07T08:51:13.757095Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:745: [PQ: 72057594037928037] doesn't have tx info 2025-05-07T08:51:13.757162Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:757: [PQ: 72057594037928037] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-05-07T08:51:13.757202Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:969: [PQ: 72057594037928037] no config, start with empty partitions and default config 2025-05-07T08:51:13.757246Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4846: [PQ: 72057594037928037] Txs.size=0, PlannedTxs.size=0 2025-05-07T08:51:13.757307Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:13.757378Z node 1 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037928037] doesn't have tx writes info 2025-05-07T08:51:13.758060Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928037] server connected, pipe [1:260:2252], now have 1 active actors on pipe 2025-05-07T08:51:13.758155Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1454: [PQ: 72057594037928037] Handle TEvPersQueue::TEvUpdateConfig 2025-05-07T08:51:13.778139Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1640: [PQ: 72057594037928037] Config update version 1(current 0) received from actor [1:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-05-07T08:51:13.781533Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:585: [PQ: 72057594037928037] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-05-07T08:51:13.781720Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:13.782647Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037928037] Config applied version 1 actor [1:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-05-07T08:51:13.782804Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitConfigStep 2025-05-07T08:51:13.783222Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-05-07T08:51:13.783593Z node 1 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037928037, Partition: 0, State: StateInit] bootstrapping 0 [1:268:2258] 2025-05-07T08:51:13.786065Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic1:0:Initializer] Initializing completed. 2025-05-07T08:51:13.786136Z node 1 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037928037, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--topic1' partition 0 generation 2 [1:268:2258] 2025-05-07T08:51:13.786205Z node 1 :PERSQUEUE DEBUG: partition.cpp:571: [PQ: 72057594037928037, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--topic1 partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-05-07T08:51:13.786281Z node 1 :PERSQUEUE DEBUG: partition.cpp:3847: [PQ: 72057594037928037, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-05-07T08:51:13.787109Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928037] server connected, pipe [1:271:2260], now have 1 active actors on pipe 2025-05-07T08:51:13.840801Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3088: [PQ: 72057594037928137] Handle TEvInterconnect::TEvNodeInfo 2025-05-07T08:51:13.846726Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3120: [PQ: 72057594037928137] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-05-07T08:51:13.847067Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:745: [PQ: 72057594037928137] doesn't have tx info 2025-05-07T08:51:13.847123Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:757: [PQ: 72057594037928137] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-05-07T08:51:13.847189Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:969: [PQ: 72057594037928137] no config, start with empty partitions and default config 2025-05-07T08:51:13.847243Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4846: [PQ: 72057594037928137] Txs.size=0, PlannedTxs.size=0 2025-05-07T08:51:13.847303Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928137] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:13.847388Z node 1 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037928137] doesn't have tx writes info 2025-05-07T08:51:13.848064Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928137] server connected, pipe [1:403:2358], now have 1 active actors on pipe 2025-05-07T08:51:13.848128Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1454: [PQ: 72057594037928137] Handle TEvPersQueue::TEvUpdateConfig 2025-05-07T08:51:13.848307Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1640: [PQ: 72057594037928137] Config update version 2(current 0) received from actor [1:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-05-07T08:51:13.850902Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:585: [PQ: 72057594037928137] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-05-07T08:51:13.851034Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928137] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:13.851993Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037928137] Config applied version 2 actor [1:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-05-07T08:51:13.852170Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:0:Initializer] Start initializing step TInitConfigStep 2025-05-07T08:51:13.852543Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-05-07T08:51:13.852767Z node 1 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037928137, Partition: 0, State: StateInit] bootstrapping 0 [1:411:2364] 2025-05-07T08:51:13.854889Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:0:Initializer] Initializing completed. 2025-05-07T08:51:13.854959Z node 1 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037928137, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 0 generation 2 [1:411:2364] 2025-05-07T08:51:13.855022Z node 1 :PERSQUEUE DEBUG: partition.cpp:571: [PQ: 72057594037928137, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-05-07T08:51:13.855071Z node 1 :PERSQUEUE DEBUG: partition.cpp:3847: [PQ: 72057594037928137, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-05-07T08:51:13.855891Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928137] server connected, pipe [1:414:2366], now have 1 active actors on pipe 2025-05-07T08:51:13.887339Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3088: [PQ: 72057594037928138] Handle TEvInterconnect::TEvNodeInfo 2025-05-07T08:51:13.891587Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3120: [PQ: 72057594037928138] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-05-07T08:51:13.891921Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:745: [PQ: 72057594037928138] doesn't have tx info 2025-05-07T08:51:13.892008Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:757: [PQ: 72057594037928138] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-05-07T08:51:13.892057Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:969: [PQ: 72057594037928138] no config, start with empty partitions and default config 2025-05-07T08:51:13.892100Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4846: [PQ: 72057594037928138] Txs.size=0, PlannedTxs.size=0 2025-05-07T08:51:13.892165Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:13.892224Z node 1 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037928138] doesn't have tx writes info 2025-05-07T08:51:13.892875Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928138] server connected, pipe [1:463:2403], now have 1 active actors on pipe 2025-05-07T08:51:13.893004Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1454: [PQ: 72057594037928138] Handle TEvPersQueue::TEvUpdateConfig 2025-05-07T08:51:13.893178Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1640: [PQ: 72057594037928138] Config update version 3(current 0) received from actor [1:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 3 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-05-07T08:51:13.895610Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:585: [PQ: 72057594037928138] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 3 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-05-07T08:51:13.895747Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:13.896589Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037928138] Config applied version 3 actor [1:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 3 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-05-07T08:51:13.896704Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:1:Initializer] ... RSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037928138] Config applied version 11 actor [3:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 11 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-05-07T08:51:15.501052Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:1:Initializer] Start initializing step TInitConfigStep 2025-05-07T08:51:15.501605Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:1:Initializer] Start initializing step TInitInternalFieldsStep 2025-05-07T08:51:15.501860Z node 3 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037928138, Partition: 1, State: StateInit] bootstrapping 1 [3:473:2411] 2025-05-07T08:51:15.505993Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:1:Initializer] Initializing completed. 2025-05-07T08:51:15.506149Z node 3 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037928138, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 1 generation 2 [3:473:2411] 2025-05-07T08:51:15.506229Z node 3 :PERSQUEUE DEBUG: partition.cpp:571: [PQ: 72057594037928138, Partition: 1, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 1 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-05-07T08:51:15.506304Z node 3 :PERSQUEUE DEBUG: partition.cpp:3847: [PQ: 72057594037928138, Partition: 1, State: StateIdle] Process pending events. Count 0 2025-05-07T08:51:15.507448Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928138] server connected, pipe [3:476:2413], now have 1 active actors on pipe 2025-05-07T08:51:15.528873Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3088: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-05-07T08:51:15.533809Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3120: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-05-07T08:51:15.534319Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:745: [PQ: 72057594037928139] doesn't have tx info 2025-05-07T08:51:15.534393Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:757: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-05-07T08:51:15.534454Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:969: [PQ: 72057594037928139] no config, start with empty partitions and default config 2025-05-07T08:51:15.534511Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4846: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-05-07T08:51:15.534584Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:15.534685Z node 3 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037928139] doesn't have tx writes info 2025-05-07T08:51:15.535563Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928139] server connected, pipe [3:525:2450], now have 1 active actors on pipe 2025-05-07T08:51:15.535704Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1454: [PQ: 72057594037928139] Handle TEvPersQueue::TEvUpdateConfig 2025-05-07T08:51:15.535941Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1640: [PQ: 72057594037928139] Config update version 12(current 0) received from actor [3:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 12 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-05-07T08:51:15.539239Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:585: [PQ: 72057594037928139] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 12 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-05-07T08:51:15.539415Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:15.540100Z node 3 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037928139] Config applied version 12 actor [3:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 12 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-05-07T08:51:15.540270Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-05-07T08:51:15.540716Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-05-07T08:51:15.540948Z node 3 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [3:533:2456] 2025-05-07T08:51:15.543275Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-05-07T08:51:15.543367Z node 3 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 2 [3:533:2456] 2025-05-07T08:51:15.543439Z node 3 :PERSQUEUE DEBUG: partition.cpp:571: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-05-07T08:51:15.543505Z node 3 :PERSQUEUE DEBUG: partition.cpp:3847: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-05-07T08:51:15.544456Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928139] server connected, pipe [3:536:2458], now have 1 active actors on pipe 2025-05-07T08:51:15.546378Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928037] server connected, pipe [3:545:2461], now have 1 active actors on pipe 2025-05-07T08:51:15.547141Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928138] server connected, pipe [3:548:2462], now have 1 active actors on pipe 2025-05-07T08:51:15.547230Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928137] server connected, pipe [3:547:2462], now have 1 active actors on pipe 2025-05-07T08:51:15.547430Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928139] server connected, pipe [3:549:2462], now have 1 active actors on pipe 2025-05-07T08:51:15.548389Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928139] server connected, pipe [3:562:2473], now have 1 active actors on pipe 2025-05-07T08:51:15.578958Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3088: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-05-07T08:51:15.581653Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3120: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-05-07T08:51:15.582146Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:745: [PQ: 72057594037928139] doesn't have tx info 2025-05-07T08:51:15.582215Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:757: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-05-07T08:51:15.582392Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4846: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-05-07T08:51:15.583034Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:15.583102Z node 3 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037928139] doesn't have tx writes info 2025-05-07T08:51:15.583251Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-05-07T08:51:15.583731Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-05-07T08:51:15.583962Z node 3 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [3:619:2518] 2025-05-07T08:51:15.586437Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDiskStatusStep 2025-05-07T08:51:15.588126Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitMetaStep 2025-05-07T08:51:15.588520Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInfoRangeStep 2025-05-07T08:51:15.588931Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDataRangeStep 2025-05-07T08:51:15.589262Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDataStep 2025-05-07T08:51:15.589322Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-05-07T08:51:15.589387Z node 3 :PERSQUEUE INFO: partition_init.cpp:773: [rt3.dc1--topic2:2:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-05-07T08:51:15.589436Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-05-07T08:51:15.589499Z node 3 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 3 [3:619:2518] 2025-05-07T08:51:15.589567Z node 3 :PERSQUEUE DEBUG: partition.cpp:571: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-05-07T08:51:15.589626Z node 3 :PERSQUEUE DEBUG: partition.cpp:3847: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-05-07T08:51:15.590913Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72057594037928037] server disconnected, pipe [3:545:2461] destroyed 2025-05-07T08:51:15.590989Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72057594037928137] server disconnected, pipe [3:547:2462] destroyed 2025-05-07T08:51:15.591107Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72057594037928138] server disconnected, pipe [3:548:2462] destroyed RESPONSE Status: 1 ErrorCode: OK MetaResponse { CmdGetReadSessionsInfoResult { TopicResult { Topic: "rt3.dc1--topic2" PartitionResult { Partition: 0 ClientOffset: 0 StartOffset: 0 EndOffset: 0 TimeLag: 0 TabletNode: "::1" ClientReadOffset: 0 ReadTimeLag: 0 TabletNodeId: 3 ErrorCode: OK } PartitionResult { Partition: 1 ClientOffset: 0 StartOffset: 0 EndOffset: 0 TimeLag: 0 TabletNode: "::1" ClientReadOffset: 0 ReadTimeLag: 0 TabletNodeId: 3 ErrorCode: OK } PartitionResult { Partition: 2 ErrorCode: INITIALIZING ErrorReason: "tablet for partition is not running" } ErrorCode: OK } TopicResult { Topic: "rt3.dc1--topic1" PartitionResult { Partition: 0 ClientOffset: 0 StartOffset: 0 EndOffset: 0 TimeLag: 0 TabletNode: "::1" ClientReadOffset: 0 ReadTimeLag: 0 TabletNodeId: 3 ErrorCode: OK } ErrorCode: OK } } } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ut/unittest >> TTicketParserTest::NebiusAccessKeySignatureUnsupported [GOOD] Test command err: 2025-05-07T08:50:36.175003Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623536485365861:2077];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:36.182643Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003f44/r3tmp/tmpLiVRU8/pdisk_1.dat 2025-05-07T08:50:36.801476Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:36.801623Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:36.810756Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:50:36.850356Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 12579, node 1 2025-05-07T08:50:37.046918Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:37.046940Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:37.046947Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:37.047066Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6647 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:50:37.679302Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:37.699627Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:50:37.943384Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1486: Updated state for /Root keys 2025-05-07T08:50:37.952947Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-05-07T08:50:37.952991Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T08:50:37.953636Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1802: Ticket eyJh****nGAg (CFCDF421) () has now retryable error message 'Security state is empty' 2025-05-07T08:50:37.953868Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-05-07T08:50:37.953883Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T08:50:37.954196Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1802: Ticket eyJh****nGAg (CFCDF421) () has now retryable error message 'Security state is empty' 2025-05-07T08:50:37.954211Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:779: CanInitLoginToken, database /Root, A2 error Security state is empty 2025-05-07T08:50:37.954223Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:779: CanInitLoginToken, database /Root, A2 error Security state is empty 2025-05-07T08:50:37.954248Z node 1 :TICKET_PARSER ERROR: ticket_parser_impl.h:969: Ticket eyJh****nGAg (CFCDF421): Security state is empty 2025-05-07T08:50:40.213044Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1506: Refreshing ticket eyJh****nGAg (CFCDF421) 2025-05-07T08:50:40.213519Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-05-07T08:50:40.213579Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T08:50:40.213883Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1802: Ticket eyJh****nGAg (CFCDF421) () has now retryable error message 'Security state is empty' 2025-05-07T08:50:40.213896Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:779: CanInitLoginToken, database /Root, A2 error Security state is empty 2025-05-07T08:50:40.962185Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1486: Updated state for /Root keys 1 2025-05-07T08:50:41.182122Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623536485365861:2077];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:41.182205Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:50:43.226102Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1506: Refreshing ticket eyJh****nGAg (CFCDF421) 2025-05-07T08:50:43.226290Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-05-07T08:50:43.226302Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T08:50:43.227117Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket eyJh****nGAg (CFCDF421) () has now valid token of user1 2025-05-07T08:50:43.227134Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:806: CanInitLoginToken, database /Root, A4 success test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003f44/r3tmp/tmpKzueo2/pdisk_1.dat 2025-05-07T08:50:48.816379Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:50:48.903674Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:48.909169Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:48.909255Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:48.911126Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 30755, node 2 2025-05-07T08:50:49.062305Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:49.062327Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:49.062333Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:49.062442Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29814 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:50:49.409713Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:49.422936Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T08:50:49.430151Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:563: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-05-07T08:50:49.430255Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [51700007c508] Connect to grpc://localhost:14280 2025-05-07T08:50:49.432875Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700007c508] Request AuthenticateRequest { iam_token: "**** (8E120919)" } NebiusAccessService::Authenticate request iam_token: "user1" NebiusAccessService::Authenticate response account { user_account { id: "user1" } } 0: "" 2025-05-07T08:50:49.450823Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [51700007c508] Response AuthenticateResponse { account { user_account { id: "user1" } } } 2025-05-07T08:50:49.452258Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket **** (8E120919) () has now valid token of user1@as 2025-05-07T08:50:53.720475Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7501623611379341658:2212];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003f44/r3tmp/tmpip6lxO/pdisk_1.dat 2025-05-07T08:50:53.793436Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:50:53.962203Z node 3 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:53.968289Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:53.968425Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:53.970284Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5716, node 3 2025-05-07T08:50:54.023285 ... 5170000f0088] Request AuthenticateRequest { iam_token: "**** (8E120919)" } NebiusAccessService::Authenticate request iam_token: "user1" 2025-05-07T08:50:54.354736Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [5170000f0088] Status 14 Service Unavailable 2025-05-07T08:50:54.355116Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1802: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' 2025-05-07T08:50:54.355135Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:563: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-05-07T08:50:54.355238Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [5170000f0088] Request AuthenticateRequest { iam_token: "**** (8E120919)" } NebiusAccessService::Authenticate request iam_token: "user1" 2025-05-07T08:50:54.357123Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [5170000f0088] Status 14 Service Unavailable 2025-05-07T08:50:54.357236Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1802: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' 2025-05-07T08:50:55.606652Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1506: Refreshing ticket **** (8E120919) 2025-05-07T08:50:55.606729Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:563: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-05-07T08:50:55.606895Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [5170000f0088] Request AuthenticateRequest { iam_token: "**** (8E120919)" } NebiusAccessService::Authenticate request iam_token: "user1" 2025-05-07T08:50:55.609847Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [5170000f0088] Status 14 Service Unavailable 2025-05-07T08:50:55.610248Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1802: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' 2025-05-07T08:50:57.610091Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1506: Refreshing ticket **** (8E120919) 2025-05-07T08:50:57.610133Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:563: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-05-07T08:50:57.610259Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [5170000f0088] Request AuthenticateRequest { iam_token: "**** (8E120919)" } NebiusAccessService::Authenticate request iam_token: "user1" NebiusAccessService::Authenticate response account { user_account { id: "user1" } } 0: "" 2025-05-07T08:50:57.612526Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [5170000f0088] Response AuthenticateResponse { account { user_account { id: "user1" } } } 2025-05-07T08:50:57.612766Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket **** (8E120919) () has now valid token of user1@as 2025-05-07T08:50:58.630089Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7501623611379341658:2212];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:58.630172Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:51:06.967024Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501623666865211658:2063];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:06.967073Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003f44/r3tmp/tmpaGaLoa/pdisk_1.dat 2025-05-07T08:51:07.142602Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:51:07.159094Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:07.159216Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:07.163883Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13044, node 4 2025-05-07T08:51:07.286631Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:51:07.286655Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:51:07.286663Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:51:07.286796Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61198 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:51:07.572330Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:07.579272Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T08:51:07.581443Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-05-07T08:51:07.581490Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T08:51:07.581499Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:816: CanInitLoginToken, database /Root, A6 error 2025-05-07T08:51:07.581527Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:563: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-05-07T08:51:07.581589Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000100388] Connect to grpc://localhost:24495 2025-05-07T08:51:07.582436Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000100388] Request AuthenticateRequest { iam_token: "**** (8E120919)" } NebiusAccessService::Authenticate request iam_token: "user1" NebiusAccessService::Authenticate response 14: "Service Unavailable" 2025-05-07T08:51:07.595928Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000100388] Status 14 Service Unavailable 2025-05-07T08:51:07.596098Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1802: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' 2025-05-07T08:51:07.596123Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:563: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-05-07T08:51:07.596271Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000100388] Request AuthenticateRequest { iam_token: "**** (8E120919)" } NebiusAccessService::Authenticate request iam_token: "user1" NebiusAccessService::Authenticate response account { user_account { id: "user1" } } 0: "" 2025-05-07T08:51:07.599652Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000100388] Response AuthenticateResponse { account { user_account { id: "user1" } } } 2025-05-07T08:51:07.601277Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket **** (8E120919) () has now valid token of user1@as 2025-05-07T08:51:11.137275Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7501623687227896400:2063];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:11.137411Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003f44/r3tmp/tmpUQrKY8/pdisk_1.dat 2025-05-07T08:51:11.263515Z node 5 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:51:11.290361Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:11.290488Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:11.292172Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18604, node 5 2025-05-07T08:51:11.386872Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:51:11.386898Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:51:11.386908Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:51:11.387034Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:62960 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:51:11.678490Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:11.687431Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T08:51:11.690067Z node 5 :TICKET_PARSER ERROR: ticket_parser_impl.h:914: Ticket AKIA****MPLE (B3EDC139): Access key signature is not supported >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnZeroBalancerTabletIdInGetNodeRequest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::QueryStatsSimple [GOOD] Test command err: Trying to start YDB, gRPC: 63176, MsgBus: 16811 2025-05-07T08:49:54.933992Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623356503368921:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:54.934040Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:49:54.994461Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501623355037403672:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:54.994531Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:49:55.003792Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7501623360940220988:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:55.003883Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003035/r3tmp/tmpCQ7KYb/pdisk_1.dat 2025-05-07T08:49:55.523858Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:55.530604Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:55.530725Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:55.540417Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:55.540498Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:55.540691Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:55.540741Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:55.544283Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:49:55.551222Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-05-07T08:49:55.551279Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-05-07T08:49:55.552989Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:49:55.555052Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 63176, node 1 2025-05-07T08:49:55.689629Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:55.689669Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:55.689683Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:55.689862Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16811 TClient is connected to server localhost:16811 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:49:56.445927Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:56.497301Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:56.798530Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:57.111864Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:57.229330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:59.564647Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623377978207373:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:59.564751Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:59.905852Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:49:59.938502Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623356503368921:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:59.939033Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:49:59.979900Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:49:59.996202Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7501623355037403672:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:59.996258Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:50:00.003944Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7501623360940220988:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:00.004007Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:50:00.060381Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:50:00.133654Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:50:00.190777Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:50:00.265393Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:50:00.360465Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:50:00.512934Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623382273175531:2408], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:00.513009Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:00.513212Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623382273175536:2411], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:00.517548Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:50:00.567425Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623382273175538:2412], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:50:00.629530Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623382273175617:4250] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:50:02.406720Z node 1 ... workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:50:56.434836Z node 13 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746607856391, txId: 281474976715674] shutting down Trying to start YDB, gRPC: 26967, MsgBus: 5085 2025-05-07T08:51:00.218539Z node 16 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[16:7501623639511621445:2076];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:00.292646Z node 18 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[18:7501623638657058992:2063];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:00.218665Z node 16 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:51:00.318045Z node 18 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003035/r3tmp/tmp1AhD7x/pdisk_1.dat 2025-05-07T08:51:00.721671Z node 17 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:51:00.929016Z node 16 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:51:00.983915Z node 16 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(16, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:00.984032Z node 16 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(16, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:00.987356Z node 16 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(18, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:00.987463Z node 16 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(18, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:00.987708Z node 16 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(17, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:00.987769Z node 16 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(17, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:00.991113Z node 16 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(16, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:51:00.994377Z node 16 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 18 Cookie 18 2025-05-07T08:51:00.994416Z node 16 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 17 Cookie 17 2025-05-07T08:51:00.996118Z node 16 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(18, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:51:00.996385Z node 16 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(17, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26967, node 16 2025-05-07T08:51:01.426812Z node 16 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:51:01.426848Z node 16 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:51:01.426861Z node 16 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:51:01.427054Z node 16 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5085 TClient is connected to server localhost:5085 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:51:02.563852Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:02.641269Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:02.871807Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:03.666773Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:04.029311Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:05.226110Z node 16 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[16:7501623639511621445:2076];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:05.226213Z node 16 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:51:05.282601Z node 18 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[18:7501623638657058992:2063];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:05.299500Z node 18 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:51:08.373475Z node 16 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [16:7501623673871361877:2372], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:08.373594Z node 16 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:08.473191Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:51:08.567755Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:51:08.681450Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:51:08.788094Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:51:08.863593Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:51:08.954425Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:51:09.081214Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:51:09.306465Z node 16 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [16:7501623678166330045:2415], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:09.306643Z node 16 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:09.314214Z node 16 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [16:7501623678166330050:2418], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:09.326735Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:51:09.372088Z node 16 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [16:7501623678166330052:2419], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:51:09.463839Z node 16 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [16:7501623678166330133:4262] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:51:12.027879Z node 16 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746607872011, txId: 281474976710674] shutting down >> TSubDomainTest::CoordinatorRunAtSubdomainNodeWhenAvailable2 [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnEmptyTopicName [GOOD] >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::SuccessfullyReplies [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::HandlesTimeout ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnDuplicatedPartition [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "topic \'Root/PQ\' describe error, Status# LookupError, Marker# PQ1" ErrorCode: ERROR } Assert failed: Check response: { Status: 128 ErrorReason: "TopicRequest must have Topic field." ErrorCode: BAD_REQUEST } Assert failed: Check response: { Status: 128 ErrorReason: "multiple TopicRequest for topic \'rt3.dc1--topic1\'" ErrorCode: BAD_REQUEST } Assert failed: Check response: { Status: 128 ErrorReason: "multiple partition 2 in TopicRequest for topic \'rt3.dc1--topic2\'" ErrorCode: BAD_REQUEST } >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesFirst [GOOD] >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::HandlesPipeDisconnection_AnswerDoesNotArrive >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnZeroBalancerTabletIdInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesFirst >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_SourceId_OldPartitionExists_NotBoundary_Test [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_PreferedPartition_Active_Test ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnEmptyTopicName [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "no path \'/Root/PQ/\', Marker# PQ17" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "no path \'Root/PQ\', Marker# PQ150" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "topic \'rt3.dc1--topic1\' has no balancer, Marker# PQ193" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "TopicRequest must have Topic field." ErrorCode: BAD_REQUEST } >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::HandlesTimeout [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::SuccessfullyPassesResponsesFromTablets >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnFailedGetAllTopicsRequest >> IncrementalBackup::SimpleRestore [GOOD] >> IncrementalBackup::SimpleBackupBackupCollection+WithIncremental ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::SuccessfullyReplies [GOOD] Test command err: Assert failed: Check response: { Status: 130 ErrorReason: "Timeout while waiting for response, may be just slow, Marker# PQ16" ErrorCode: ERROR } Assert failed: Check response: { Status: 128 ErrorReason: "topic \'rt3.dc1--topic1\' is not created, Marker# PQ94" ErrorCode: UNKNOWN_TOPIC } >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::HandlesPipeDisconnection_AnswerDoesNotArrive [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnFailedGetAllTopicsRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnNotOkStatusInGetNodeRequest >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesFirst [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::HandlesPipeDisconnection_AnswerDoesNotArrive >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::SuccessfullyPassesResponsesFromTablets [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnBadRootStatusInGetNodeRequest >> IncrementalBackup::SimpleRestoreBackupCollection+WithIncremental [GOOD] >> IncrementalBackup::SimpleRestoreBackupCollection-WithIncremental ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::HandlesPipeDisconnection_AnswerDoesNotArrive [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "no path \'Root/PQ\', Marker# PQ150" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "topic \'rt3.dc1--topic1\' is not created, Marker# PQ94" ErrorCode: UNKNOWN_TOPIC } 2025-05-07T08:51:16.508181Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3088: [PQ: 72057594037928037] Handle TEvInterconnect::TEvNodeInfo 2025-05-07T08:51:16.513037Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3120: [PQ: 72057594037928037] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-05-07T08:51:16.513411Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:745: [PQ: 72057594037928037] doesn't have tx info 2025-05-07T08:51:16.513471Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:757: [PQ: 72057594037928037] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-05-07T08:51:16.513533Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:969: [PQ: 72057594037928037] no config, start with empty partitions and default config 2025-05-07T08:51:16.513605Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4846: [PQ: 72057594037928037] Txs.size=0, PlannedTxs.size=0 2025-05-07T08:51:16.513658Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:16.513726Z node 3 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037928037] doesn't have tx writes info 2025-05-07T08:51:16.514338Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928037] server connected, pipe [3:259:2251], now have 1 active actors on pipe 2025-05-07T08:51:16.514441Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1454: [PQ: 72057594037928037] Handle TEvPersQueue::TEvUpdateConfig 2025-05-07T08:51:16.531940Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1640: [PQ: 72057594037928037] Config update version 1(current 0) received from actor [3:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-05-07T08:51:16.535184Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:585: [PQ: 72057594037928037] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-05-07T08:51:16.535344Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:16.536253Z node 3 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037928037] Config applied version 1 actor [3:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-05-07T08:51:16.536417Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitConfigStep 2025-05-07T08:51:16.536843Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-05-07T08:51:16.537560Z node 3 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037928037, Partition: 0, State: StateInit] bootstrapping 0 [3:267:2257] 2025-05-07T08:51:16.540567Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic1:0:Initializer] Initializing completed. 2025-05-07T08:51:16.540653Z node 3 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037928037, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--topic1' partition 0 generation 2 [3:267:2257] 2025-05-07T08:51:16.540737Z node 3 :PERSQUEUE DEBUG: partition.cpp:571: [PQ: 72057594037928037, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--topic1 partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-05-07T08:51:16.540834Z node 3 :PERSQUEUE DEBUG: partition.cpp:3847: [PQ: 72057594037928037, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-05-07T08:51:16.541829Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928037] server connected, pipe [3:270:2259], now have 1 active actors on pipe 2025-05-07T08:51:16.604475Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3088: [PQ: 72057594037928137] Handle TEvInterconnect::TEvNodeInfo 2025-05-07T08:51:16.609577Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3120: [PQ: 72057594037928137] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-05-07T08:51:16.610025Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:745: [PQ: 72057594037928137] doesn't have tx info 2025-05-07T08:51:16.610081Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:757: [PQ: 72057594037928137] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-05-07T08:51:16.610139Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:969: [PQ: 72057594037928137] no config, start with empty partitions and default config 2025-05-07T08:51:16.610184Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4846: [PQ: 72057594037928137] Txs.size=0, PlannedTxs.size=0 2025-05-07T08:51:16.610231Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928137] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:16.610287Z node 3 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037928137] doesn't have tx writes info 2025-05-07T08:51:16.611018Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928137] server connected, pipe [3:405:2360], now have 1 active actors on pipe 2025-05-07T08:51:16.611142Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1454: [PQ: 72057594037928137] Handle TEvPersQueue::TEvUpdateConfig 2025-05-07T08:51:16.611343Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1640: [PQ: 72057594037928137] Config update version 2(current 0) received from actor [3:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-05-07T08:51:16.613791Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:585: [PQ: 72057594037928137] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-05-07T08:51:16.613933Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928137] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:16.614800Z node 3 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037928137] Config applied version 2 actor [3:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-05-07T08:51:16.614951Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:0:Initializer] Start initializing step TInitConfigStep 2025-05-07T08:51:16.615302Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-05-07T08:51:16.615536Z node 3 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037928137, Partition: 0, State: StateInit] bootstrapping 0 [3:413:2366] 2025-05-07T08:51:16.617828Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:0:Initializer] Initializing completed. 2025-05-07T08:51:16.617919Z node 3 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037928137, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 0 generation 2 [3:413:2366] 2025-05-07T08:51:16.618032Z node 3 :PERSQUEUE DEBUG: partition.cpp:571: [PQ: 72057594037928137, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-05-07T08:51:16.618094Z node 3 :PERSQUEUE DEBUG: partition.cpp:3847: [PQ: 72057594037928137, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-05-07T08:51:16.619011Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928137] server connected, pipe [3:416:2368], now have 1 active actors on pipe 2025-05-07T08:51:16.648089Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3088: [PQ: 72057594037928138] Handle TEvInterconnect::TEvNodeInfo 2025-05-07T08:51:16.663129Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3120: [PQ: 72057594037928138] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-05-07T08:51:16.663557Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:745: [PQ: 72057594037928138] doesn't have tx info 2025-05-07T08:51:16.663617Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:757: [PQ: 72057594037928138] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-05-07T08:51:16.663667Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:969: [PQ: 72057594037928138] no config, start with empty partitions and default config 2025-05-07T08:51:16.663735Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4846: [PQ: 72057594037928138] Txs.size=0, PlannedTxs.size=0 2025-05-07T08:51:16.663801Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:16.663880Z node 3 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037928138] doesn't have tx writes info 2025-05-07T08:51:16.664722Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928138] server connected, pipe [3:465:2405], now have 1 active actors on pipe 2025-05-07T08:51:16.664795Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1454: [PQ: 72057594037928138] Handle TEvPersQueue::TEvUpdateConfig 2025-05-07T08:51:16.664991Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1640: [PQ: 72057594037928138] Config update version 3(current 0) received from actor [3:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 3 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-05-07T08:51:16.671576Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:585: [PQ: 72057594037928138] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 3 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-05-07T08:51:16.671747Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:16.672715Z node 3 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037928138] Config applied version 3 actor [3:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 ... eSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 7 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-05-07T08:51:17.513330Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:585: [PQ: 72057594037928138] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 7 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-05-07T08:51:17.513481Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:17.514224Z node 4 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037928138] Config applied version 7 actor [4:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 7 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-05-07T08:51:17.514375Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:1:Initializer] Start initializing step TInitConfigStep 2025-05-07T08:51:17.514847Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:1:Initializer] Start initializing step TInitInternalFieldsStep 2025-05-07T08:51:17.515055Z node 4 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037928138, Partition: 1, State: StateInit] bootstrapping 1 [4:473:2411] 2025-05-07T08:51:17.517122Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:1:Initializer] Initializing completed. 2025-05-07T08:51:17.517199Z node 4 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037928138, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 1 generation 2 [4:473:2411] 2025-05-07T08:51:17.517258Z node 4 :PERSQUEUE DEBUG: partition.cpp:571: [PQ: 72057594037928138, Partition: 1, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 1 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-05-07T08:51:17.517317Z node 4 :PERSQUEUE DEBUG: partition.cpp:3847: [PQ: 72057594037928138, Partition: 1, State: StateIdle] Process pending events. Count 0 2025-05-07T08:51:17.518251Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928138] server connected, pipe [4:476:2413], now have 1 active actors on pipe 2025-05-07T08:51:17.547983Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:3088: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-05-07T08:51:17.551902Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:3120: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-05-07T08:51:17.552294Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:745: [PQ: 72057594037928139] doesn't have tx info 2025-05-07T08:51:17.552351Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:757: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-05-07T08:51:17.552395Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:969: [PQ: 72057594037928139] no config, start with empty partitions and default config 2025-05-07T08:51:17.552438Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:4846: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-05-07T08:51:17.552518Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:17.552583Z node 4 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037928139] doesn't have tx writes info 2025-05-07T08:51:17.553311Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928139] server connected, pipe [4:525:2450], now have 1 active actors on pipe 2025-05-07T08:51:17.553385Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:1454: [PQ: 72057594037928139] Handle TEvPersQueue::TEvUpdateConfig 2025-05-07T08:51:17.553594Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:1640: [PQ: 72057594037928139] Config update version 8(current 0) received from actor [4:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 8 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-05-07T08:51:17.556386Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:585: [PQ: 72057594037928139] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 8 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-05-07T08:51:17.556534Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:17.557384Z node 4 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037928139] Config applied version 8 actor [4:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 8 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-05-07T08:51:17.557530Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-05-07T08:51:17.557929Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-05-07T08:51:17.558199Z node 4 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [4:533:2456] 2025-05-07T08:51:17.560288Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-05-07T08:51:17.560371Z node 4 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 2 [4:533:2456] 2025-05-07T08:51:17.560442Z node 4 :PERSQUEUE DEBUG: partition.cpp:571: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-05-07T08:51:17.560493Z node 4 :PERSQUEUE DEBUG: partition.cpp:3847: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-05-07T08:51:17.561263Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928139] server connected, pipe [4:536:2458], now have 1 active actors on pipe 2025-05-07T08:51:17.562486Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928138] server connected, pipe [4:545:2462], now have 1 active actors on pipe 2025-05-07T08:51:17.562545Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928037] server connected, pipe [4:544:2461], now have 1 active actors on pipe 2025-05-07T08:51:17.562593Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928139] server connected, pipe [4:546:2462], now have 1 active actors on pipe 2025-05-07T08:51:17.573627Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928139] server connected, pipe [4:551:2466], now have 1 active actors on pipe 2025-05-07T08:51:17.599136Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:3088: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-05-07T08:51:17.601714Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:3120: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-05-07T08:51:17.602075Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:745: [PQ: 72057594037928139] doesn't have tx info 2025-05-07T08:51:17.602124Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:757: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-05-07T08:51:17.602293Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:4846: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-05-07T08:51:17.603140Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:17.603193Z node 4 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037928139] doesn't have tx writes info 2025-05-07T08:51:17.603316Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-05-07T08:51:17.603645Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-05-07T08:51:17.603874Z node 4 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [4:608:2511] 2025-05-07T08:51:17.605909Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDiskStatusStep 2025-05-07T08:51:17.607275Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitMetaStep 2025-05-07T08:51:17.607542Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInfoRangeStep 2025-05-07T08:51:17.607860Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDataRangeStep 2025-05-07T08:51:17.608251Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDataStep 2025-05-07T08:51:17.608299Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-05-07T08:51:17.608358Z node 4 :PERSQUEUE INFO: partition_init.cpp:773: [rt3.dc1--topic2:2:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-05-07T08:51:17.608403Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-05-07T08:51:17.608454Z node 4 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 3 [4:608:2511] 2025-05-07T08:51:17.608515Z node 4 :PERSQUEUE DEBUG: partition.cpp:571: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-05-07T08:51:17.608575Z node 4 :PERSQUEUE DEBUG: partition.cpp:3847: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-05-07T08:51:17.609452Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72057594037928138] server disconnected, pipe [4:545:2462] destroyed 2025-05-07T08:51:17.609527Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72057594037928037] server disconnected, pipe [4:544:2461] destroyed RESPONSE Status: 1 ErrorCode: OK MetaResponse { CmdGetPartitionLocationsResult { TopicResult { Topic: "rt3.dc1--topic2" PartitionLocation { Partition: 1 Host: "::1" HostId: 4 ErrorCode: OK } PartitionLocation { Partition: 2 ErrorCode: INITIALIZING ErrorReason: "Tablet for that partition is not running" } ErrorCode: OK } TopicResult { Topic: "rt3.dc1--topic1" PartitionLocation { Partition: 0 Host: "::1" HostId: 4 ErrorCode: OK } ErrorCode: OK } } } >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::HandlesTimeout >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnZeroBalancerTabletIdInGetNodeRequest >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::HandlesTimeout ------- [TM] {asan, default-linux-x86_64, release} ydb/core/viewer/ut/unittest >> Viewer::Plan2SvgBad [GOOD] Test command err: 2025-05-07T08:49:29.501661Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623249977022217:2059];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:29.501789Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-05-07T08:49:29.822196Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:29.883987Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:29.884148Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:29.885386Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12368, node 1 2025-05-07T08:49:29.946498Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:29.946541Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:29.946555Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:29.946699Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12252 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:49:30.243147Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:30.272973Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480 2025-05-07T08:49:30.276186Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:32.984166Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623262861924820:2347], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:32.984241Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623262861924798:2344], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:32.984383Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:32.988385Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480 2025-05-07T08:49:32.996703Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623262861924825:2348], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-05-07T08:49:33.073116Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623267156892172:2354] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:49:35.331147Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501623274011772105:2135];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:35.331204Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-05-07T08:49:35.497709Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:35.529529Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:35.529647Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:35.531585Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26285, node 2 2025-05-07T08:49:35.602724Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:49:35.602754Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:49:35.602762Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:49:35.602916Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5415 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:49:35.899372Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:35.934625Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-05-07T08:49:35.936727Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:49:38.852176Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501623286896674614:2347], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:38.852281Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501623286896674599:2344], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:38.852691Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:49:38.856773Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480 2025-05-07T08:49:38.870090Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7501623286896674628:2348], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-05-07T08:49:38.946343Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501623286896674679:2351] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:49:40.599728Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7501623298332293299:2058];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:49:40.599803Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-05-07T08:49:40.842386Z node 3 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:49:40.852815Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:49:40.852941Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:49:40.856032Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, ( ... emeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480 2025-05-07T08:49:59.546928Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [5:7501623378645802265:2350], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-05-07T08:49:59.636872Z node 5 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [5:7501623378645802317:2357] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:49:59.736326Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-05-07T08:50:00.064813Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-05-07T08:50:00.064880Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-05-07T08:50:00.622697Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-05-07T08:50:00.622760Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-05-07T08:50:00.710849Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-05-07T08:50:00.710902Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-05-07T08:50:00.795144Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-05-07T08:50:00.795201Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-05-07T08:50:00.874159Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-05-07T08:50:00.874212Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-05-07T08:50:00.952489Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-05-07T08:50:00.952542Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-05-07T08:50:01.028483Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-05-07T08:50:01.028534Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-05-07T08:50:01.115310Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-05-07T08:50:01.115366Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-05-07T08:50:01.251306Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-05-07T08:50:01.251356Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-05-07T08:50:01.333986Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-05-07T08:50:01.334035Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-05-07T08:50:01.429735Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-05-07T08:50:01.429785Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-05-07T08:50:01.517664Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-05-07T08:50:01.517710Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-05-07T08:50:01.625675Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-05-07T08:50:01.625739Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-05-07T08:50:01.720862Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-05-07T08:50:01.720929Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-05-07T08:50:01.808122Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-05-07T08:50:01.808252Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-05-07T08:50:01.892258Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-05-07T08:50:01.892314Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-05-07T08:50:01.929283Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715680:0, at schemeshard: 72057594046644480 2025-05-07T08:50:01.932342Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715679:0, at schemeshard: 72057594046644480 2025-05-07T08:50:01.934358Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715681:0, at schemeshard: 72057594046644480 2025-05-07T08:50:03.657459Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-05-07T08:50:03.657532Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success assertion failed at ydb/core/viewer/viewer_ut.cpp:1948, virtual void NTestSuiteViewer::TTestCaseQueryExecuteScript::Execute_(NUnitTest::TTestContext &): (json.GetMap().contains("metadata")) {} TBackTrace::Capture()+28 (0x192CEE4C) NUnitTest::NPrivate::RaiseError(char const*, TBasicString> const&, bool)+592 (0x1978A900) NTestSuiteViewer::TTestCaseQueryExecuteScript::Execute_(NUnitTest::TTestContext&)+9171 (0x18E6C983) std::__y1::__function::__func, void ()>::operator()()+280 (0x18E82FD8) TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool)+534 (0x197C1AE6) NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool)+505 (0x19791489) NTestSuiteViewer::TCurrentTest::Execute()+1204 (0x18E81E84) NUnitTest::TTestFactory::Execute()+2438 (0x19792D56) NUnitTest::RunMain(int, char**)+5213 (0x197BC05D) ??+0 (0x7FFBEB1D6D90) __libc_start_main+128 (0x7FFBEB1D6E40) _start+41 (0x1677D029) 2025-05-07T08:50:09.999584Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7501623421242378063:2064];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:09.999666Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-05-07T08:50:10.279270Z node 6 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:10.323902Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:10.324043Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:10.325342Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21422, node 6 2025-05-07T08:50:10.466885Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:10.466922Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:10.466933Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:10.467124Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7308 2025-05-07T08:50:15.003647Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[6:7501623421242378063:2064];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:15.003771Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:50:15.590460Z node 6 :TICKET_PARSER ERROR: ticket_parser_impl.h:969: Ticket **** (8C3E2D8D): Could not find correct token validator 2025-05-07T08:50:19.339179Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7501623465815563273:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:19.339260Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-05-07T08:50:19.596675Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:19.626481Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:19.626633Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:19.630684Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10672, node 7 2025-05-07T08:50:19.832906Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:19.832941Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:19.832956Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:19.833178Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19325 2025-05-07T08:50:24.342213Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7501623465815563273:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:24.342337Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:50:25.470087Z node 7 :TICKET_PARSER ERROR: ticket_parser_impl.h:969: Ticket **** (8C3E2D8D): Could not find correct token validator >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnNotOkStatusInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnNoBalancerInGetNodeRequest >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnFailedGetAllTopicsRequest >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnBadRootStatusInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailesOnNotATopic >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::HandlesPipeDisconnection_AnswerDoesNotArrive [GOOD] >> KqpPg::CreateUniqPgColumn+useSink [GOOD] >> KqpPg::CreateUniqPgColumn-useSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_base_tenant/unittest >> TSubDomainTest::CoordinatorRunAtSubdomainNodeWhenAvailable2 [GOOD] Test command err: 2025-05-07T08:50:32.552671Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623519929831016:2216];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:32.552733Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0048f0/r3tmp/tmp4fI5k8/pdisk_1.dat 2025-05-07T08:50:33.494311Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:33.532399Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:33.532538Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:33.560082Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19419 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-05-07T08:50:33.769913Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7501623519929831101:2122] Handle TEvNavigate describe path dc-1 2025-05-07T08:50:33.770011Z node 1 :TX_PROXY DEBUG: describe.cpp:227: Actor# [1:7501623524224798886:2451] HANDLE EvNavigateScheme dc-1 2025-05-07T08:50:33.770166Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2663: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7501623524224798458:2148], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-05-07T08:50:33.770254Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:871: [main][1:7501623524224798853:2431][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7501623524224798458:2148], cookie# 1 2025-05-07T08:50:33.772107Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7501623524224798857:2431][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7501623524224798854:2431], cookie# 1 2025-05-07T08:50:33.772149Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7501623524224798858:2431][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7501623524224798855:2431], cookie# 1 2025-05-07T08:50:33.772166Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7501623524224798859:2431][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7501623524224798856:2431], cookie# 1 2025-05-07T08:50:33.772209Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7501623519929830765:2051] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7501623524224798857:2431], cookie# 1 2025-05-07T08:50:33.772239Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7501623519929830768:2054] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7501623524224798858:2431], cookie# 1 2025-05-07T08:50:33.772259Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7501623519929830771:2057] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7501623524224798859:2431], cookie# 1 2025-05-07T08:50:33.772298Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7501623524224798857:2431][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7501623519929830765:2051], cookie# 1 2025-05-07T08:50:33.772315Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7501623524224798858:2431][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7501623519929830768:2054], cookie# 1 2025-05-07T08:50:33.772337Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7501623524224798859:2431][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7501623519929830771:2057], cookie# 1 2025-05-07T08:50:33.772377Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7501623524224798853:2431][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7501623524224798854:2431], cookie# 1 2025-05-07T08:50:33.772404Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:932: [main][1:7501623524224798853:2431][/dc-1] Sync is in progress: cookie# 1, size# 3, half# 1, successes# 1, faulires# 0 2025-05-07T08:50:33.772420Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7501623524224798853:2431][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7501623524224798855:2431], cookie# 1 2025-05-07T08:50:33.772441Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:946: [main][1:7501623524224798853:2431][/dc-1] Sync is done: cookie# 1, size# 3, half# 1, successes# 2, faulires# 0, partial# 0 2025-05-07T08:50:33.772464Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7501623524224798853:2431][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7501623524224798856:2431], cookie# 1 2025-05-07T08:50:33.772478Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:906: [main][1:7501623524224798853:2431][/dc-1] Unexpected sync response: sender# [1:7501623524224798856:2431], cookie# 1 2025-05-07T08:50:33.772557Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2550: HandleNotify: self# [1:7501623524224798458:2148], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-05-07T08:50:33.780488Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2425: ResolveCacheItem: self# [1:7501623524224798458:2148], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7501623524224798853:2431] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-05-07T08:50:33.780637Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1818: FillEntry for TNavigate: self# [1:7501623524224798458:2148], cacheItem# { Subscriber: { Subscriber: [1:7501623524224798853:2431] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 1 IsSync: true Partial: 0 } 2025-05-07T08:50:33.783607Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:264: Send result: self# [1:7501623524224798887:2452], recipient# [1:7501623524224798886:2451], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-05-07T08:50:33.783718Z node 1 :TX_PROXY DEBUG: describe.cpp:311: Actor# [1:7501623524224798886:2451] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-05-07T08:50:33.840419Z node 1 :TX_PROXY DEBUG: describe.cpp:389: Actor# [1:7501623524224798886:2451] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ShowPrivateTable: true } 2025-05-07T08:50:33.844692Z node 1 :TX_PROXY DEBUG: describe.cpp:402: Actor# [1:7501623524224798886:2451] Handle TEvDescribeSchemeResult Forward to# [1:7501623524224798885:2450] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-05-07T08:50:33.890049Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7501623519929831101:2122] Handle TEvProposeTransaction 2025-05-07T08:50:33.890083Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7501623519929831101:2122] TxId# 281474976710657 ProcessProposeTransaction 2025-05-07T08:50:33.890214Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7501623519929831101:2122] Cookie# 0 userReqId# "" txid# 281474976710657 SEND to# [1:7501623524224798892:2456] 2025-05-07T08:50:34.035384Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1520: Actor# [1:7501623524224798892:2456] txid# 281474976710657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "dc-1" StoragePools { Name: "/dc-1:test" Kind: "test" } } } } UserToken: "" PeerName: "" 2025-05-07T08:50:34.035444Z node 1 :TX_PROXY DEBUG: schemereq.cpp:563: Actor ... on: 0 }, entry# { Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-05-07T08:51:15.622805Z node 11 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:264: Send result: self# [11:7501623703999293471:2148], recipient# [11:7501623703999293470:2317], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-05-07T08:51:15.622964Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:51:15.706485Z node 13 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2663: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [13:7501623668951840662:2111], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-05-07T08:51:15.706681Z node 13 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1818: FillEntry for TNavigate: self# [13:7501623668951840662:2111], cacheItem# { Subscriber: { Subscriber: [13:7501623673246808071:2181] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-05-07T08:51:15.706801Z node 13 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:264: Send result: self# [13:7501623703311579211:2201], recipient# [13:7501623703311579210:2320], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-05-07T08:51:15.721149Z node 11 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2663: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [11:7501623669639554997:2110], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-05-07T08:51:15.721348Z node 11 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1818: FillEntry for TNavigate: self# [11:7501623669639554997:2110], cacheItem# { Subscriber: { Subscriber: [11:7501623673934522330:2127] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-05-07T08:51:15.721481Z node 11 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:264: Send result: self# [11:7501623703999293473:2149], recipient# [11:7501623703999293472:2318], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-05-07T08:51:15.725011Z node 13 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2663: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [13:7501623668951840662:2111], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-05-07T08:51:15.725172Z node 13 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1818: FillEntry for TNavigate: self# [13:7501623668951840662:2111], cacheItem# { Subscriber: { Subscriber: [13:7501623673246808071:2181] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-05-07T08:51:15.725284Z node 13 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:264: Send result: self# [13:7501623703311579213:2202], recipient# [13:7501623703311579212:2321], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-05-07T08:51:15.763466Z node 11 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2663: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [11:7501623669639554997:2110], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-05-07T08:51:15.763640Z node 11 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1818: FillEntry for TNavigate: self# [11:7501623669639554997:2110], cacheItem# { Subscriber: { Subscriber: [11:7501623673934522330:2127] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-05-07T08:51:15.763751Z node 11 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:264: Send result: self# [11:7501623703999293475:2150], recipient# [11:7501623703999293474:2319], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-05-07T08:51:16.098315Z node 11 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2663: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [11:7501623669639554997:2110], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-05-07T08:51:16.098518Z node 11 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1818: FillEntry for TNavigate: self# [11:7501623669639554997:2110], cacheItem# { Subscriber: { Subscriber: [11:7501623695409358839:2133] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-05-07T08:51:16.098588Z node 11 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1818: FillEntry for TNavigate: self# [11:7501623669639554997:2110], cacheItem# { Subscriber: { Subscriber: [11:7501623695409358840:2134] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-05-07T08:51:16.098757Z node 11 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:264: Send result: self# [11:7501623708294260772:2151], recipient# [11:7501623695409358837:2310], result# { ErrorCount: 2 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-05-07T08:51:16.099258Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [11:7501623695409358837:2310], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::HandlesTimeout [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::SuccessfullyPassesResponsesFromTablets >> KqpPg::InsertFromSelect_NoReorder+useSink [GOOD] >> KqpPg::DropTablePg >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnZeroBalancerTabletIdInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesFirst >> IncrementalBackup::SimpleBackup [GOOD] >> IncrementalBackup::MultiRestore >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::HandlesTimeout [GOOD] >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::SuccessfullyPassesResponsesFromTablets >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnNoBalancerInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailesOnNotATopic [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_SourceId_PartitionInactive_1_Test [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_SourceId_PartitionNotExists_Test >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnBadRootStatusInGetNodeRequest >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnFailedGetAllTopicsRequest [GOOD] >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnNoBalancerInGetNodeRequest >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnBadRootStatusInGetNodeRequest >> TPartitionTests::DataTxCalcPredicateOrder [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::HandlesPipeDisconnection_AnswerDoesNotArrive [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "topic \'rt3.dc1--topic1\' is not created, Marker# PQ94" ErrorCode: UNKNOWN_TOPIC } 2025-05-07T08:51:17.491234Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3088: [PQ: 72057594037928037] Handle TEvInterconnect::TEvNodeInfo 2025-05-07T08:51:17.495667Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3120: [PQ: 72057594037928037] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-05-07T08:51:17.496028Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:745: [PQ: 72057594037928037] doesn't have tx info 2025-05-07T08:51:17.496080Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:757: [PQ: 72057594037928037] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-05-07T08:51:17.496148Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:969: [PQ: 72057594037928037] no config, start with empty partitions and default config 2025-05-07T08:51:17.496202Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4846: [PQ: 72057594037928037] Txs.size=0, PlannedTxs.size=0 2025-05-07T08:51:17.496252Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:17.496324Z node 2 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037928037] doesn't have tx writes info 2025-05-07T08:51:17.497028Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928037] server connected, pipe [2:262:2254], now have 1 active actors on pipe 2025-05-07T08:51:17.497141Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1454: [PQ: 72057594037928037] Handle TEvPersQueue::TEvUpdateConfig 2025-05-07T08:51:17.519611Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1640: [PQ: 72057594037928037] Config update version 1(current 0) received from actor [2:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-05-07T08:51:17.531310Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:585: [PQ: 72057594037928037] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-05-07T08:51:17.531474Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:17.532557Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037928037] Config applied version 1 actor [2:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-05-07T08:51:17.532745Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitConfigStep 2025-05-07T08:51:17.533214Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-05-07T08:51:17.533643Z node 2 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037928037, Partition: 0, State: StateInit] bootstrapping 0 [2:270:2260] 2025-05-07T08:51:17.540641Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic1:0:Initializer] Initializing completed. 2025-05-07T08:51:17.540752Z node 2 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037928037, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--topic1' partition 0 generation 2 [2:270:2260] 2025-05-07T08:51:17.540824Z node 2 :PERSQUEUE DEBUG: partition.cpp:571: [PQ: 72057594037928037, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--topic1 partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-05-07T08:51:17.540903Z node 2 :PERSQUEUE DEBUG: partition.cpp:3847: [PQ: 72057594037928037, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-05-07T08:51:17.546250Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928037] server connected, pipe [2:273:2262], now have 1 active actors on pipe 2025-05-07T08:51:17.605862Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3088: [PQ: 72057594037928137] Handle TEvInterconnect::TEvNodeInfo 2025-05-07T08:51:17.610913Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3120: [PQ: 72057594037928137] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-05-07T08:51:17.611260Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:745: [PQ: 72057594037928137] doesn't have tx info 2025-05-07T08:51:17.611325Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:757: [PQ: 72057594037928137] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-05-07T08:51:17.611386Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:969: [PQ: 72057594037928137] no config, start with empty partitions and default config 2025-05-07T08:51:17.611436Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4846: [PQ: 72057594037928137] Txs.size=0, PlannedTxs.size=0 2025-05-07T08:51:17.611499Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928137] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:17.611595Z node 2 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037928137] doesn't have tx writes info 2025-05-07T08:51:17.612342Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928137] server connected, pipe [2:406:2361], now have 1 active actors on pipe 2025-05-07T08:51:17.612424Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1454: [PQ: 72057594037928137] Handle TEvPersQueue::TEvUpdateConfig 2025-05-07T08:51:17.612640Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1640: [PQ: 72057594037928137] Config update version 2(current 0) received from actor [2:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-05-07T08:51:17.615396Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:585: [PQ: 72057594037928137] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-05-07T08:51:17.615557Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928137] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:17.616350Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037928137] Config applied version 2 actor [2:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-05-07T08:51:17.616470Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:0:Initializer] Start initializing step TInitConfigStep 2025-05-07T08:51:17.616856Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-05-07T08:51:17.617070Z node 2 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037928137, Partition: 0, State: StateInit] bootstrapping 0 [2:414:2367] 2025-05-07T08:51:17.619386Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:0:Initializer] Initializing completed. 2025-05-07T08:51:17.619463Z node 2 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037928137, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 0 generation 2 [2:414:2367] 2025-05-07T08:51:17.619528Z node 2 :PERSQUEUE DEBUG: partition.cpp:571: [PQ: 72057594037928137, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-05-07T08:51:17.619587Z node 2 :PERSQUEUE DEBUG: partition.cpp:3847: [PQ: 72057594037928137, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-05-07T08:51:17.620382Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928137] server connected, pipe [2:417:2369], now have 1 active actors on pipe 2025-05-07T08:51:17.638153Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3088: [PQ: 72057594037928138] Handle TEvInterconnect::TEvNodeInfo 2025-05-07T08:51:17.642743Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3120: [PQ: 72057594037928138] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-05-07T08:51:17.643095Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:745: [PQ: 72057594037928138] doesn't have tx info 2025-05-07T08:51:17.643171Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:757: [PQ: 72057594037928138] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-05-07T08:51:17.643238Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:969: [PQ: 72057594037928138] no config, start with empty partitions and default config 2025-05-07T08:51:17.643287Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4846: [PQ: 72057594037928138] Txs.size=0, PlannedTxs.size=0 2025-05-07T08:51:17.643352Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:17.643421Z node 2 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037928138] doesn't have tx writes info 2025-05-07T08:51:17.644175Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928138] server connected, pipe [2:466:2406], now have 1 active actors on pipe 2025-05-07T08:51:17.644298Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1454: [PQ: 72057594037928138] Handle TEvPersQueue::TEvUpdateConfig 2025-05-07T08:51:17.644499Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1640: [PQ: 72057594037928138] Config update version 3(current 0) received from actor [2:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 3 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-05-07T08:51:17.647216Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:585: [PQ: 72057594037928138] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 3 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-05-07T08:51:17.647375Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:17.648179Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037928138] Config applied version 3 actor [2:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 3 Partitions { PartitionId: 1 } ... termark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 8 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-05-07T08:51:18.572473Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:585: [PQ: 72057594037928139] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 8 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-05-07T08:51:18.572623Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:18.573520Z node 3 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037928139] Config applied version 8 actor [3:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 8 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-05-07T08:51:18.573668Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-05-07T08:51:18.574120Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-05-07T08:51:18.574398Z node 3 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [3:534:2457] 2025-05-07T08:51:18.576681Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-05-07T08:51:18.576769Z node 3 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 2 [3:534:2457] 2025-05-07T08:51:18.576839Z node 3 :PERSQUEUE DEBUG: partition.cpp:571: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-05-07T08:51:18.576900Z node 3 :PERSQUEUE DEBUG: partition.cpp:3847: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-05-07T08:51:18.577885Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928139] server connected, pipe [3:537:2459], now have 1 active actors on pipe 2025-05-07T08:51:18.579251Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928037] server connected, pipe [3:543:2462], now have 1 active actors on pipe 2025-05-07T08:51:18.579353Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928138] server connected, pipe [3:544:2463], now have 1 active actors on pipe 2025-05-07T08:51:18.579594Z node 3 :PERSQUEUE DEBUG: partition.cpp:855: [PQ: 72057594037928037, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-05-07T08:51:18.579834Z node 3 :PERSQUEUE DEBUG: partition.cpp:855: [PQ: 72057594037928138, Partition: 1, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-05-07T08:51:18.579894Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928139] server connected, pipe [3:545:2463], now have 1 active actors on pipe 2025-05-07T08:51:18.580123Z node 3 :PERSQUEUE DEBUG: partition.cpp:855: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-05-07T08:51:18.591445Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928139] server connected, pipe [3:553:2470], now have 1 active actors on pipe 2025-05-07T08:51:18.619472Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3088: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-05-07T08:51:18.622560Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3120: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-05-07T08:51:18.623007Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:745: [PQ: 72057594037928139] doesn't have tx info 2025-05-07T08:51:18.623070Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:757: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-05-07T08:51:18.623225Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4846: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-05-07T08:51:18.624173Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:18.624230Z node 3 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037928139] doesn't have tx writes info 2025-05-07T08:51:18.624366Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-05-07T08:51:18.624772Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-05-07T08:51:18.625019Z node 3 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [3:610:2515] 2025-05-07T08:51:18.627480Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDiskStatusStep 2025-05-07T08:51:18.629135Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitMetaStep 2025-05-07T08:51:18.629542Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInfoRangeStep 2025-05-07T08:51:18.629908Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDataRangeStep 2025-05-07T08:51:18.630238Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDataStep 2025-05-07T08:51:18.630454Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-05-07T08:51:18.630508Z node 3 :PERSQUEUE INFO: partition_init.cpp:773: [rt3.dc1--topic2:2:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-05-07T08:51:18.630555Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-05-07T08:51:18.630623Z node 3 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 3 [3:610:2515] 2025-05-07T08:51:18.630739Z node 3 :PERSQUEUE DEBUG: partition.cpp:571: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-05-07T08:51:18.630808Z node 3 :PERSQUEUE DEBUG: partition.cpp:3847: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-05-07T08:51:18.631840Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72057594037928138] server disconnected, pipe [3:544:2463] destroyed 2025-05-07T08:51:18.631930Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72057594037928037] server disconnected, pipe [3:543:2462] destroyed RESPONSE Status: 1 ErrorCode: OK MetaResponse { CmdGetPartitionStatusResult { TopicResult { Topic: "rt3.dc1--topic2" PartitionResult { Partition: 1 Status: STATUS_OK LastInitDurationSeconds: 0 CreationTimestamp: 0 GapCount: 0 GapSize: 0 AvgWriteSpeedPerSec: 0 AvgWriteSpeedPerMin: 0 AvgWriteSpeedPerHour: 0 AvgWriteSpeedPerDay: 0 AvgReadSpeedPerSec: 0 AvgReadSpeedPerMin: 0 AvgReadSpeedPerHour: 0 AvgReadSpeedPerDay: 0 ReadBytesQuota: 0 WriteBytesQuota: 50000000 PartitionSize: 0 StartOffset: 0 EndOffset: 0 LastWriteTimestampMs: 78 WriteLagMs: 0 AvgQuotaSpeedPerSec: 0 AvgQuotaSpeedPerMin: 0 AvgQuotaSpeedPerHour: 0 AvgQuotaSpeedPerDay: 0 SourceIdCount: 0 SourceIdRetentionPeriodSec: 0 UsedReserveSize: 0 AggregatedCounters { Values: 78 Values: 0 Values: 1 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 50000000 Values: 0 Values: 9223372036854775807 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 1 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 } Generation: 2 Cookie: 1 ScaleStatus: NORMAL } PartitionResult { Partition: 2 Status: STATUS_UNKNOWN } ErrorCode: OK } TopicResult { Topic: "rt3.dc1--topic1" PartitionResult { Partition: 0 Status: STATUS_OK LastInitDurationSeconds: 0 CreationTimestamp: 0 GapCount: 0 GapSize: 0 AvgWriteSpeedPerSec: 0 AvgWriteSpeedPerMin: 0 AvgWriteSpeedPerHour: 0 AvgWriteSpeedPerDay: 0 AvgReadSpeedPerSec: 0 AvgReadSpeedPerMin: 0 AvgReadSpeedPerHour: 0 AvgReadSpeedPerDay: 0 ReadBytesQuota: 0 WriteBytesQuota: 50000000 PartitionSize: 0 StartOffset: 0 EndOffset: 0 LastWriteTimestampMs: 38 WriteLagMs: 0 AvgQuotaSpeedPerSec: 0 AvgQuotaSpeedPerMin: 0 AvgQuotaSpeedPerHour: 0 AvgQuotaSpeedPerDay: 0 SourceIdCount: 0 SourceIdRetentionPeriodSec: 0 UsedReserveSize: 0 AggregatedCounters { Values: 38 Values: 0 Values: 1 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 50000000 Values: 0 Values: 9223372036854775807 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 1 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 } Generation: 2 Cookie: 1 ScaleStatus: NORMAL } ErrorCode: OK } } } >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::SuccessfullyPassesResponsesFromTablets [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesSecond |89.3%| [TA] $(B)/ydb/core/kqp/ut/sysview/test-results/unittest/{meta.json ... results_accumulator.log} >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnBadRootStatusInGetNodeRequest [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnNoBalancerInGetNodeRequest [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "no path \'/Root/PQ/\', Marker# PQ17" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "no path \'Root/PQ\', Marker# PQ150" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "topic \'rt3.dc1--topic1\' has no balancer, Marker# PQ193" ErrorCode: UNKNOWN_TOPIC } |89.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/statistics/service/ut/ut_aggregation/ydb-core-statistics-service-ut-ut_aggregation >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnBalancerDescribeResultFailureWhenTopicsAreGivenExplicitly >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnNoBalancerInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnEmptyTopicName >> KqpPg::InsertFromSelect_Simple+useSink [GOOD] >> KqpPg::InsertFromSelect_Simple-useSink |89.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/statistics/service/ut/ut_aggregation/ydb-core-statistics-service-ut-ut_aggregation >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::SuccessfullyPassesResponsesFromTablets [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailesOnNotATopic [GOOD] Test command err: Assert failed: Check response: { Status: 130 ErrorReason: "Timeout while waiting for response, may be just slow, Marker# PQ16" ErrorCode: ERROR } 2025-05-07T08:51:17.721335Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3088: [PQ: 72057594037928037] Handle TEvInterconnect::TEvNodeInfo 2025-05-07T08:51:17.726085Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3120: [PQ: 72057594037928037] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-05-07T08:51:17.726417Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:745: [PQ: 72057594037928037] doesn't have tx info 2025-05-07T08:51:17.726477Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:757: [PQ: 72057594037928037] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-05-07T08:51:17.726546Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:969: [PQ: 72057594037928037] no config, start with empty partitions and default config 2025-05-07T08:51:17.726597Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4846: [PQ: 72057594037928037] Txs.size=0, PlannedTxs.size=0 2025-05-07T08:51:17.726647Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:17.726735Z node 2 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037928037] doesn't have tx writes info 2025-05-07T08:51:17.727384Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928037] server connected, pipe [2:261:2253], now have 1 active actors on pipe 2025-05-07T08:51:17.727479Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1454: [PQ: 72057594037928037] Handle TEvPersQueue::TEvUpdateConfig 2025-05-07T08:51:17.741664Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1640: [PQ: 72057594037928037] Config update version 1(current 0) received from actor [2:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-05-07T08:51:17.744265Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:585: [PQ: 72057594037928037] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-05-07T08:51:17.744372Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:17.745225Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037928037] Config applied version 1 actor [2:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-05-07T08:51:17.745367Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitConfigStep 2025-05-07T08:51:17.745799Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-05-07T08:51:17.746231Z node 2 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037928037, Partition: 0, State: StateInit] bootstrapping 0 [2:269:2259] 2025-05-07T08:51:17.748495Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic1:0:Initializer] Initializing completed. 2025-05-07T08:51:17.748563Z node 2 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037928037, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--topic1' partition 0 generation 2 [2:269:2259] 2025-05-07T08:51:17.748626Z node 2 :PERSQUEUE DEBUG: partition.cpp:571: [PQ: 72057594037928037, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--topic1 partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-05-07T08:51:17.748694Z node 2 :PERSQUEUE DEBUG: partition.cpp:3847: [PQ: 72057594037928037, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-05-07T08:51:17.749384Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928037] server connected, pipe [2:272:2261], now have 1 active actors on pipe 2025-05-07T08:51:17.812291Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3088: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-05-07T08:51:17.819287Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3120: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-05-07T08:51:17.819730Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:745: [PQ: 72057594037928139] doesn't have tx info 2025-05-07T08:51:17.819788Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:757: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-05-07T08:51:17.819835Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:969: [PQ: 72057594037928139] no config, start with empty partitions and default config 2025-05-07T08:51:17.819886Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4846: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-05-07T08:51:17.819962Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:17.820058Z node 2 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037928139] doesn't have tx writes info 2025-05-07T08:51:17.820887Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928139] server connected, pipe [2:404:2359], now have 1 active actors on pipe 2025-05-07T08:51:17.821021Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1454: [PQ: 72057594037928139] Handle TEvPersQueue::TEvUpdateConfig 2025-05-07T08:51:17.821368Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1640: [PQ: 72057594037928139] Config update version 2(current 0) received from actor [2:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-05-07T08:51:17.827330Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:585: [PQ: 72057594037928139] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-05-07T08:51:17.827498Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:17.828655Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037928139] Config applied version 2 actor [2:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-05-07T08:51:17.828784Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-05-07T08:51:17.829367Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-05-07T08:51:17.829593Z node 2 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [2:412:2365] 2025-05-07T08:51:17.832014Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-05-07T08:51:17.832094Z node 2 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 2 [2:412:2365] 2025-05-07T08:51:17.832159Z node 2 :PERSQUEUE DEBUG: partition.cpp:571: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-05-07T08:51:17.832223Z node 2 :PERSQUEUE DEBUG: partition.cpp:3847: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-05-07T08:51:17.833124Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928139] server connected, pipe [2:415:2367], now have 1 active actors on pipe 2025-05-07T08:51:17.834606Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928037] server connected, pipe [2:421:2370], now have 1 active actors on pipe 2025-05-07T08:51:17.835055Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928139] server connected, pipe [2:423:2371], now have 1 active actors on pipe 2025-05-07T08:51:17.835587Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72057594037928037] server disconnected, pipe [2:421:2370] destroyed 2025-05-07T08:51:17.835966Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72057594037928139] server disconnected, pipe [2:423:2371] destroyed Assert failed: Check response: { Status: 128 ErrorReason: "path \'Root/PQ\' has unknown/invalid root prefix \'Root\', Marker# PQ14" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "the following topics are not created: rt3.dc1--topic2, Marker# PQ95" ErrorCode: UNKNOWN_TOPIC } >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnBadRootStatusInGetNodeRequest [GOOD] |89.3%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/test-results/unittest/{meta.json ... results_accumulator.log} >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesFirst [GOOD] >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailesOnNotATopic >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesSecond >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesSecond |89.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_transfer/ydb-core-tx-schemeshard-ut_transfer |89.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_transfer/ydb-core-tx-schemeshard-ut_transfer >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnFailedGetAllTopicsRequest >> IncrementalBackup::BackupRestore [GOOD] >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnBalancerDescribeResultFailureWhenTopicsAreGivenExplicitly [GOOD] >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnEmptyTopicName [GOOD] >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnEmptyTopicName >> KqpPg::InsertValuesFromTableWithDefault-useSink [GOOD] >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::HandlesTimeout |89.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/statistics/database/ut/ydb-core-statistics-database-ut >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailesOnNotATopic [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultAndCast+useSink |89.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/statistics/database/ut/ydb-core-statistics-database-ut |89.3%| [LD] {RESULT} $(B)/ydb/core/statistics/service/ut/ut_aggregation/ydb-core-statistics-service-ut-ut_aggregation >> IncrementalBackup::ComplexRestoreBackupCollection+WithIncremental >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnBalancerDescribeResultFailureWhenTopicsAreGivenExplicitly >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnNotOkStatusInGetNodeRequest >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnFailedGetAllTopicsRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesSecond [GOOD] >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnEmptyTopicName [GOOD] >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesSecond [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultText-useSink [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesSecond [GOOD] >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::HandlesTimeout [GOOD] >> KqpPg::TableArrayInsert+useSink [GOOD] >> KqpPg::CreateTableBulkUpsertAndRead [GOOD] >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnBalancerDescribeResultFailureWhenTopicsAreGivenExplicitly [GOOD] >> Viewer::StorageGroupOutputWithSpaceCheckDependsOnVDiskSpaceStatus [GOOD] >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnNotOkStatusInGetNodeRequest >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailesOnNotATopic >> KqpPg::InsertValuesFromTableWithDefaultTextNotNull+useSink >> IncrementalBackup::SimpleRestoreBackupCollection-WithIncremental [GOOD] >> TBackupTests::ShouldSucceedOnLargeData_MinWriteBatch [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultAndCast+useSink [GOOD] >> IncrementalBackup::SimpleBackupBackupCollection+WithIncremental [GOOD] >> TTicketParserTest::LoginCheckRemovedUser [GOOD] >> VDiskBalancing::TestRandom_Block42 [GOOD] >> KqpPg::DropTablePg [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultAndCast-useSink >> IncrementalBackup::MultiRestore [GOOD] >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::SuccessfullyPassesResponsesFromTablets >> KqpPg::TableArrayInsert-useSink >> TBackupTests::ShouldSucceedOnLargeData[Zstd] [GOOD] >> KqpPg::CreateUniqPgColumn-useSink [GOOD] >> KqpPg::CreateUniqComplexPgColumn+useSink |89.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest |89.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest |89.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnNotOkStatusInGetNodeRequest [GOOD] >> Viewer::StorageGroupOutputWithSpaceCheckDependsOnUsage >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnNotOkStatusInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailesOnNotATopic [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultTextNotNull+useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultTextNotNull-useSink >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::HandlesPipeDisconnection_AnswerDoesNotArrive >> KqpPg::CopyTableSerialColumns+useSink >> TTicketParserTest::LoginEmptyTicketBad >> KqpPg::DropTablePgMultiple >> IncrementalBackup::E2EBackupCollection >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::SuccessfullyPassesResponsesFromTablets [GOOD] >> IncrementalBackup::SimpleBackupBackupCollection-WithIncremental >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnNoBalancerInGetNodeRequest >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnZeroBalancerTabletIdInGetNodeRequest >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailesOnNotATopic >> TTicketParserTest::LoginEmptyTicketBad [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::HandlesPipeDisconnection_AnswerDoesNotArrive [GOOD] >> KqpPg::CopyTableSerialColumns+useSink [GOOD] >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnNoBalancerInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailesOnNotATopic [GOOD] >> KqpPg::CopyTableSerialColumns-useSink >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnZeroBalancerTabletIdInGetNodeRequest [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnEmptyTopicName [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "path \'Root/PQ\' has unknown/invalid root prefix \'Root\', Marker# PQ14" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "topic \'Root/PQ\' describe error, Status# LookupError, Marker# PQ1" ErrorCode: ERROR } Assert failed: Check response: { Status: 128 ErrorReason: "empty topic in GetTopicMetadata request" ErrorCode: BAD_REQUEST } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnEmptyTopicName [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "no path \'/Root/PQ/\', Marker# PQ17" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "topic \'rt3.dc1--topic1\' has no balancer, Marker# PQ193" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "empty topic in GetReadSessionsInfo request" ErrorCode: BAD_REQUEST } |89.4%| [TA] {RESULT} $(B)/ydb/public/lib/ydb_cli/topic/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnNoClientSpecified >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnNoClientSpecified [GOOD] >> KqpPg::TypeCoercionInsert-useSink [GOOD] >> KqpPg::TypeCoercionInsert+useSink [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_PreferedPartition_Active_Test [GOOD] >> TPQTabletTests::Huge_ProposeTransacton [GOOD] >> IncrementalBackup::SimpleBackupBackupCollection-WithIncremental [GOOD] >> KqpPg::DropTablePgMultiple [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultAndCast-useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultBool+useSink >> TBackupTests::ShouldSucceedOnLargeData[Raw] [GOOD] |89.4%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_transfer/ydb-core-tx-schemeshard-ut_transfer ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnNoBalancerInGetNodeRequest [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "no path \'/Root/PQ/\', Marker# PQ17" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "no path \'Root/PQ\', Marker# PQ150" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "topic \'rt3.dc1--topic1\' has no balancer, Marker# PQ193" ErrorCode: UNKNOWN_TOPIC } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnBalancerDescribeResultFailureWhenTopicsAreGivenExplicitly [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "path \'Root/PQ\' has unknown/invalid root prefix \'Root\', Marker# PQ14" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "the following topics are not created: rt3.dc1--topic2, Marker# PQ95" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "topic \'Root/PQ\' describe error, Status# LookupError, Marker# PQ1" ErrorCode: ERROR } |89.4%| [LD] {RESULT} $(B)/ydb/core/statistics/database/ut/ydb-core-statistics-database-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::HandlesPipeDisconnection_AnswerDoesNotArrive [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "topic \'rt3.dc1--topic1\' is not created, Marker# PQ94" ErrorCode: UNKNOWN_TOPIC } 2025-05-07T08:51:19.347885Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3088: [PQ: 72057594037928037] Handle TEvInterconnect::TEvNodeInfo 2025-05-07T08:51:19.353076Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3120: [PQ: 72057594037928037] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-05-07T08:51:19.353513Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:745: [PQ: 72057594037928037] doesn't have tx info 2025-05-07T08:51:19.353582Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:757: [PQ: 72057594037928037] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-05-07T08:51:19.353634Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:969: [PQ: 72057594037928037] no config, start with empty partitions and default config 2025-05-07T08:51:19.353694Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4846: [PQ: 72057594037928037] Txs.size=0, PlannedTxs.size=0 2025-05-07T08:51:19.353747Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:19.353826Z node 2 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037928037] doesn't have tx writes info 2025-05-07T08:51:19.354640Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928037] server connected, pipe [2:259:2251], now have 1 active actors on pipe 2025-05-07T08:51:19.354849Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1454: [PQ: 72057594037928037] Handle TEvPersQueue::TEvUpdateConfig 2025-05-07T08:51:19.381459Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1640: [PQ: 72057594037928037] Config update version 1(current 0) received from actor [2:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-05-07T08:51:19.388728Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:585: [PQ: 72057594037928037] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-05-07T08:51:19.388901Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:19.389678Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037928037] Config applied version 1 actor [2:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-05-07T08:51:19.389826Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitConfigStep 2025-05-07T08:51:19.390176Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-05-07T08:51:19.390489Z node 2 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037928037, Partition: 0, State: StateInit] bootstrapping 0 [2:267:2257] 2025-05-07T08:51:19.392372Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic1:0:Initializer] Initializing completed. 2025-05-07T08:51:19.392446Z node 2 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037928037, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--topic1' partition 0 generation 2 [2:267:2257] 2025-05-07T08:51:19.392508Z node 2 :PERSQUEUE DEBUG: partition.cpp:571: [PQ: 72057594037928037, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--topic1 partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-05-07T08:51:19.392566Z node 2 :PERSQUEUE DEBUG: partition.cpp:3847: [PQ: 72057594037928037, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-05-07T08:51:19.393264Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928037] server connected, pipe [2:270:2259], now have 1 active actors on pipe 2025-05-07T08:51:19.468375Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3088: [PQ: 72057594037928137] Handle TEvInterconnect::TEvNodeInfo 2025-05-07T08:51:19.474033Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3120: [PQ: 72057594037928137] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-05-07T08:51:19.474468Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:745: [PQ: 72057594037928137] doesn't have tx info 2025-05-07T08:51:19.474527Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:757: [PQ: 72057594037928137] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-05-07T08:51:19.474575Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:969: [PQ: 72057594037928137] no config, start with empty partitions and default config 2025-05-07T08:51:19.474620Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4846: [PQ: 72057594037928137] Txs.size=0, PlannedTxs.size=0 2025-05-07T08:51:19.474693Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928137] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:19.474785Z node 2 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037928137] doesn't have tx writes info 2025-05-07T08:51:19.475565Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928137] server connected, pipe [2:405:2360], now have 1 active actors on pipe 2025-05-07T08:51:19.475648Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1454: [PQ: 72057594037928137] Handle TEvPersQueue::TEvUpdateConfig 2025-05-07T08:51:19.475869Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1640: [PQ: 72057594037928137] Config update version 2(current 0) received from actor [2:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-05-07T08:51:19.478834Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:585: [PQ: 72057594037928137] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-05-07T08:51:19.478995Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928137] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:19.479946Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037928137] Config applied version 2 actor [2:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-05-07T08:51:19.480084Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:0:Initializer] Start initializing step TInitConfigStep 2025-05-07T08:51:19.480553Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-05-07T08:51:19.480783Z node 2 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037928137, Partition: 0, State: StateInit] bootstrapping 0 [2:413:2366] 2025-05-07T08:51:19.483338Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:0:Initializer] Initializing completed. 2025-05-07T08:51:19.483428Z node 2 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037928137, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 0 generation 2 [2:413:2366] 2025-05-07T08:51:19.483496Z node 2 :PERSQUEUE DEBUG: partition.cpp:571: [PQ: 72057594037928137, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-05-07T08:51:19.483563Z node 2 :PERSQUEUE DEBUG: partition.cpp:3847: [PQ: 72057594037928137, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-05-07T08:51:19.484464Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928137] server connected, pipe [2:416:2368], now have 1 active actors on pipe 2025-05-07T08:51:19.503240Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3088: [PQ: 72057594037928138] Handle TEvInterconnect::TEvNodeInfo 2025-05-07T08:51:19.509104Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3120: [PQ: 72057594037928138] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-05-07T08:51:19.509546Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:745: [PQ: 72057594037928138] doesn't have tx info 2025-05-07T08:51:19.509608Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:757: [PQ: 72057594037928138] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-05-07T08:51:19.509678Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:969: [PQ: 72057594037928138] no config, start with empty partitions and default config 2025-05-07T08:51:19.509752Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4846: [PQ: 72057594037928138] Txs.size=0, PlannedTxs.size=0 2025-05-07T08:51:19.509830Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:19.509905Z node 2 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037928138] doesn't have tx writes info 2025-05-07T08:51:19.510819Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928138] server connected, pipe [2:465:2405], now have 1 active actors on pipe 2025-05-07T08:51:19.510983Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1454: [PQ: 72057594037928138] Handle TEvPersQueue::TEvUpdateConfig 2025-05-07T08:51:19.511246Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1640: [PQ: 72057594037928138] Config update version 3(current 0) received from actor [2:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 3 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-05-07T08:51:19.514361Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:585: [PQ: 72057594037928138] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 3 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-05-07T08:51:19.514535Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:19.515401Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037928138] Config applied version 3 actor [2:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 3 Partitions { PartitionId: 1 } ... axWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 11 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-05-07T08:51:21.254460Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:585: [PQ: 72057594037928138] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 11 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-05-07T08:51:21.254591Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:21.255311Z node 4 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037928138] Config applied version 11 actor [4:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 11 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-05-07T08:51:21.255445Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:1:Initializer] Start initializing step TInitConfigStep 2025-05-07T08:51:21.255817Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:1:Initializer] Start initializing step TInitInternalFieldsStep 2025-05-07T08:51:21.256020Z node 4 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037928138, Partition: 1, State: StateInit] bootstrapping 1 [4:474:2412] 2025-05-07T08:51:21.258146Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:1:Initializer] Initializing completed. 2025-05-07T08:51:21.258211Z node 4 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037928138, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 1 generation 2 [4:474:2412] 2025-05-07T08:51:21.258268Z node 4 :PERSQUEUE DEBUG: partition.cpp:571: [PQ: 72057594037928138, Partition: 1, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 1 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-05-07T08:51:21.258323Z node 4 :PERSQUEUE DEBUG: partition.cpp:3847: [PQ: 72057594037928138, Partition: 1, State: StateIdle] Process pending events. Count 0 2025-05-07T08:51:21.259114Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928138] server connected, pipe [4:477:2414], now have 1 active actors on pipe 2025-05-07T08:51:21.274890Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:3088: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-05-07T08:51:21.277821Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:3120: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-05-07T08:51:21.278151Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:745: [PQ: 72057594037928139] doesn't have tx info 2025-05-07T08:51:21.278204Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:757: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-05-07T08:51:21.278245Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:969: [PQ: 72057594037928139] no config, start with empty partitions and default config 2025-05-07T08:51:21.278288Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:4846: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-05-07T08:51:21.278340Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:21.278403Z node 4 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037928139] doesn't have tx writes info 2025-05-07T08:51:21.279097Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928139] server connected, pipe [4:526:2451], now have 1 active actors on pipe 2025-05-07T08:51:21.279207Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:1454: [PQ: 72057594037928139] Handle TEvPersQueue::TEvUpdateConfig 2025-05-07T08:51:21.279461Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:1640: [PQ: 72057594037928139] Config update version 12(current 0) received from actor [4:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 12 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-05-07T08:51:21.281191Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:585: [PQ: 72057594037928139] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 12 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-05-07T08:51:21.281328Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:21.281923Z node 4 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037928139] Config applied version 12 actor [4:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 12 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-05-07T08:51:21.282094Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-05-07T08:51:21.282434Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-05-07T08:51:21.282625Z node 4 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [4:534:2457] 2025-05-07T08:51:21.284179Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-05-07T08:51:21.284240Z node 4 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 2 [4:534:2457] 2025-05-07T08:51:21.284290Z node 4 :PERSQUEUE DEBUG: partition.cpp:571: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-05-07T08:51:21.284332Z node 4 :PERSQUEUE DEBUG: partition.cpp:3847: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-05-07T08:51:21.284966Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928139] server connected, pipe [4:537:2459], now have 1 active actors on pipe 2025-05-07T08:51:21.285768Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928037] server connected, pipe [4:543:2462], now have 1 active actors on pipe 2025-05-07T08:51:21.285850Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928138] server connected, pipe [4:544:2463], now have 1 active actors on pipe 2025-05-07T08:51:21.286047Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928139] server connected, pipe [4:545:2463], now have 1 active actors on pipe 2025-05-07T08:51:21.297997Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928139] server connected, pipe [4:553:2470], now have 1 active actors on pipe 2025-05-07T08:51:21.322605Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:3088: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-05-07T08:51:21.324994Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:3120: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-05-07T08:51:21.325348Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:745: [PQ: 72057594037928139] doesn't have tx info 2025-05-07T08:51:21.325402Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:757: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-05-07T08:51:21.325563Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:4846: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-05-07T08:51:21.326111Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:21.326165Z node 4 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037928139] doesn't have tx writes info 2025-05-07T08:51:21.326279Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-05-07T08:51:21.326614Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-05-07T08:51:21.326868Z node 4 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [4:610:2515] 2025-05-07T08:51:21.328999Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDiskStatusStep 2025-05-07T08:51:21.330354Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitMetaStep 2025-05-07T08:51:21.330670Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInfoRangeStep 2025-05-07T08:51:21.331006Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDataRangeStep 2025-05-07T08:51:21.331336Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDataStep 2025-05-07T08:51:21.331416Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-05-07T08:51:21.331460Z node 4 :PERSQUEUE INFO: partition_init.cpp:773: [rt3.dc1--topic2:2:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-05-07T08:51:21.331501Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-05-07T08:51:21.331559Z node 4 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 3 [4:610:2515] 2025-05-07T08:51:21.331634Z node 4 :PERSQUEUE DEBUG: partition.cpp:571: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-05-07T08:51:21.331689Z node 4 :PERSQUEUE DEBUG: partition.cpp:3847: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-05-07T08:51:21.332618Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72057594037928138] server disconnected, pipe [4:544:2463] destroyed 2025-05-07T08:51:21.332850Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72057594037928037] server disconnected, pipe [4:543:2462] destroyed RESPONSE Status: 1 ErrorCode: OK MetaResponse { CmdGetPartitionOffsetsResult { TopicResult { Topic: "rt3.dc1--topic2" PartitionResult { Partition: 1 StartOffset: 0 EndOffset: 0 ErrorCode: OK WriteTimestampEstimateMS: 0 } PartitionResult { Partition: 2 ErrorCode: INITIALIZING ErrorReason: "partition is not ready yet" } ErrorCode: OK } TopicResult { Topic: "rt3.dc1--topic1" PartitionResult { Partition: 0 StartOffset: 0 EndOffset: 0 ErrorCode: OK WriteTimestampEstimateMS: 0 } ErrorCode: OK } } } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TestRandom_Block42 [GOOD] Test command err: RandomSeed# 8810701051644685388 Step = 0 SEND TEvPut with key [1:1:0:0:0:585447:0] TEvPutResult: TEvPutResult {Id# [1:1:0:0:0:585447:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 1 SEND TEvPut with key [1:1:1:0:0:37868:0] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:37868:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 2 SEND TEvPut with key [1:1:2:0:0:619381:0] TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:619381:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 3 SEND TEvPut with key [1:1:3:0:0:725585:0] TEvPutResult: TEvPutResult {Id# [1:1:3:0:0:725585:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 4 SEND TEvPut with key [1:1:4:0:0:2934723:0] TEvPutResult: TEvPutResult {Id# [1:1:4:0:0:2934723:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Stop node 4 2025-05-07T08:46:33.710305Z 1 00h01m00.010512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 5 Step = 5 SEND TEvPut with key [1:1:5:0:0:502135:0] TEvPutResult: TEvPutResult {Id# [1:1:5:0:0:502135:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999976} Step = 6 SEND TEvPut with key [1:1:6:0:0:3044947:0] TEvPutResult: TEvPutResult {Id# [1:1:6:0:0:3044947:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999976} Stop node 7 2025-05-07T08:46:34.038275Z 1 00h01m10.060512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 8 Step = 7 SEND TEvPut with key [1:1:7:0:0:582354:0] TEvPutResult: TEvPutResult {Id# [1:1:7:0:0:582354:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} Step = 8 SEND TEvPut with key [1:1:8:0:0:1478820:0] TEvPutResult: TEvPutResult {Id# [1:1:8:0:0:1478820:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} Step = 9 SEND TEvPut with key [1:1:9:0:0:1360774:0] TEvPutResult: TEvPutResult {Id# [1:1:9:0:0:1360774:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} Start node 4 Step = 10 SEND TEvPut with key [1:1:10:0:0:1727870:0] TEvPutResult: TEvPutResult {Id# [1:1:10:0:0:1727870:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 11 SEND TEvPut with key [1:1:11:0:0:1883457:0] TEvPutResult: TEvPutResult {Id# [1:1:11:0:0:1883457:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 12 SEND TEvPut with key [1:1:12:0:0:568368:0] TEvPutResult: TEvPutResult {Id# [1:1:12:0:0:568368:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 13 SEND TEvPut with key [1:1:13:0:0:896600:0] TEvPutResult: TEvPutResult {Id# [1:1:13:0:0:896600:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 14 SEND TEvPut with key [1:1:14:0:0:179270:0] TEvPutResult: TEvPutResult {Id# [1:1:14:0:0:179270:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 15 SEND TEvPut with key [1:1:15:0:0:3026131:0] TEvPutResult: TEvPutResult {Id# [1:1:15:0:0:3026131:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 16 SEND TEvPut with key [1:1:16:0:0:670396:0] TEvPutResult: TEvPutResult {Id# [1:1:16:0:0:670396:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 17 SEND TEvPut with key [1:1:17:0:0:1584741:0] TEvPutResult: TEvPutResult {Id# [1:1:17:0:0:1584741:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 18 SEND TEvPut with key [1:1:18:0:0:2384818:0] TEvPutResult: TEvPutResult {Id# [1:1:18:0:0:2384818:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 19 SEND TEvPut with key [1:1:19:0:0:2867010:0] TEvPutResult: TEvPutResult {Id# [1:1:19:0:0:2867010:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 20 SEND TEvPut with key [1:1:20:0:0:2911789:0] TEvPutResult: TEvPutResult {Id# [1:1:20:0:0:2911789:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 21 SEND TEvPut with key [1:1:21:0:0:2463622:0] TEvPutResult: TEvPutResult {Id# [1:1:21:0:0:2463622:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 22 SEND TEvPut with key [1:1:22:0:0:322338:0] TEvPutResult: TEvPutResult {Id# [1:1:22:0:0:322338:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 23 SEND TEvPut with key [1:1:23:0:0:2119770:0] TEvPutResult: TEvPutResult {Id# [1:1:23:0:0:2119770:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 24 SEND TEvPut with key [1:1:24:0:0:56036:0] TEvPutResult: TEvPutResult {Id# [1:1:24:0:0:56036:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 25 SEND TEvPut with key [1:1:25:0:0:2648607:0] TEvPutResult: TEvPutResult {Id# [1:1:25:0:0:2648607:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Stop node 0 2025-05-07T08:46:36.035883Z 3 00h01m30.100512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [3:188:17] ServerId# [1:296:58] TabletId# 72057594037932033 PipeClientId# [3:188:17] 2025-05-07T08:46:36.036081Z 6 00h01m30.100512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [6:209:17] ServerId# [1:299:61] TabletId# 72057594037932033 PipeClientId# [6:209:17] 2025-05-07T08:46:36.036192Z 5 00h01m30.100512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [5:7661:16] ServerId# [1:7670:1092] TabletId# 72057594037932033 PipeClientId# [5:7661:16] 2025-05-07T08:46:36.036286Z 4 00h01m30.100512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [4:195:17] ServerId# [1:297:59] TabletId# 72057594037932033 PipeClientId# [4:195:17] 2025-05-07T08:46:36.036438Z 2 00h01m30.100512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [2:181:17] ServerId# [1:295:57] TabletId# 72057594037932033 PipeClientId# [2:181:17] 2025-05-07T08:46:36.036530Z 7 00h01m30.100512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [7:216:17] ServerId# [1:300:62] TabletId# 72057594037932033 PipeClientId# [7:216:17] Step = 26 SEND TEvPut with key [1:1:26:0:0:539431:0] TEvPutResult: TEvPutResult {Id# [1:1:26:0:0:539431:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 27 SEND TEvPut with key [1:1:27:0:0:148482:0] TEvPutResult: TEvPutResult {Id# [1:1:27:0:0:148482:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 28 SEND TEvPut with key [1:1:28:0:0:2673563:0] TEvPutResult: TEvPutResult {Id# [1:1:28:0:0:2673563:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 29 SEND TEvPut with key [1:1:29:0:0:265170:0] TEvPutResult: TEvPutResult {Id# [1:1:29:0:0:265170:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 30 SEND TEvPut with key [1:1:30:0:0:2398732:0] TEvPutResult: TEvPutResult {Id# [1:1:30:0:0:2398732:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Compact vdisk 2 Step = 31 SEND TEvPut with key [1:1:31:0:0:2302132:0] TEvPutResult: TEvPutResult {Id# [1:1:31:0:0:2302132:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 32 SEND TEvPut with key [1:1:32:0:0:3112269:0] TEvPutResult: TEvPutResult {Id# [1:1:32:0:0:3112269:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 33 SEND TEvPut with key [1:1:33:0:0:883758:0] TEvPutResult: TEvPutResult {Id# [1:1:33:0:0:883758:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 34 SEND TEvPut with key [1:1:34:0:0:1212958:0] TEvPutResult: TEvPutResult {Id# [1:1:34:0:0:1212958:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 35 SEND TEvPut with key [1:1:35:0:0:3026131:0] TEvPutResult: TEvPutResult {Id# [1:1:35:0:0:3026131:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 36 SEND TEvPut with key [1:1:36:0:0:139148:0] TEvPutResult: TEvPutResult {Id# [1:1:36:0:0:139148:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 37 SEND TEvPut with key [1:1:37:0:0:200198:0] TEvPutResult: TEvPutResult {Id# [1:1:37:0:0:200198:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 38 SEND TEvPut with key [1:1:38:0:0:1252178:0] TEvPutResult: TEvPutResult {Id# [1:1:38:0:0:1252178:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 39 SEND TEvPut with key [1:1:39:0:0:1897783:0] TEvPutResult: TEvPutResult {Id# [1:1:39:0:0:1897783:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 40 SEND TEvPut with key [1:1:40:0:0:1486678:0] TEvPutResult: TEvPutResult {Id# [1:1:40:0:0:1486678:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 41 SEND TEvPut with key [1:1:41:0:0:1285964:0] TEvPutResult: TEvPutResult {Id# [1:1:41:0:0:1285964:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 42 SEND TEvPut with key [1:1:42:0:0:1221731:0] TEvPutResult: TEvPutResult {Id# [1:1:42:0:0:1221731:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 43 SEND TEvPut with key [1:1:43:0:0:1613844:0] TEvPutResult: TEvPutResult {Id# [1:1:43:0:0:1613844:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 44 SEND TEvPut with key [1:1:44:0:0:2582908:0] TEvPutResult: TEvPutResult {Id# [1:1:44:0:0:2582908:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 45 SEND TEvPut with key [1:1:45:0:0:1703743:0] TEvPutResult: TEvPutResult {Id# [1:1:45:0:0:1703743:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 46 SEND TEvPut with key [1:1:46:0:0:1362981:0] TEvPutResult: TEvPutResult {Id# [1:1:46:0:0:1362981:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 47 SEND TEvPut with key [1:1:47:0:0:1469807:0] TEvPutResult: TEvPutResult {Id# [1:1:47:0:0:1469807:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 48 SEND TEvPut with key [1:1:48:0:0:2832565:0] TEvPutResult: TEvPutResult {Id# [1:1:48:0:0:2832565:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 49 SEND TEvPut with key [1:1:49:0:0:1960611:0] TEvPutResult: TEvPutResult {Id# [1:1:49:0:0:1960611:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 50 SEND TEvPut with key [1:1:50:0:0:1164230:0] TEvPutResult: TEvPutResult {Id# [1:1:50:0:0:1164230:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 51 SEND TEvPut with key [1:1:51:0:0:836900:0] TEvPutResult: TEvPutResult {Id# [1:1:51:0:0:836900:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 52 SEND TEvPut with key [1:1:52:0:0:838380:0] TEvPutResult: TEvPutResult {Id# [1:1:52:0:0:838380:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 53 SEND TEvPut with key [1:1:53:0:0:1975575:0] TEvPutResult: TEvPutResult {Id# [1:1:53:0:0:1975575:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Start node 0 Step = 54 SEND TEvPut with key [1:1:54:0:0:1888556:0] TEvPutResult: TEvPutResult {Id# [1:1:54:0:0:1888556:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999817} Step = 55 SEND TEvPut with key [1:1:55:0:0:715063:0] TEvPutResult: TEvPutResult {Id# [1:1:55:0:0:715063:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999817} Step = 56 SEND TEvPut with key [1:1:56:0:0:42993:0] TEvPutResult: TEvPutResult {Id# [1:1:56:0:0:42993:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999817} Step = 57 SEND TEvPut with key [1:1:57:0:0:1491407:0] TEvPutResult: TEvPutResult {Id# [1:1:57:0:0:1491407:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999817} Step = 58 SEND TEvPut with key [1:1:58:0:0:702845:0] TEvPutResult: TEvPutResult {Id# [1:1:58:0:0:702845:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999817} Step = 59 SEND TEvPut with key [1:1:59:0:0:2539948:0] TEvPutResult: TEvPutResult {Id# [1:1:59:0:0:2539948:0] Status ... ND TEvPut with key [1:1:936:0:0:2748248:0] TEvPutResult: TEvPutResult {Id# [1:1:936:0:0:2748248:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999731} Step = 937 SEND TEvPut with key [1:1:937:0:0:112302:0] TEvPutResult: TEvPutResult {Id# [1:1:937:0:0:112302:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 938 SEND TEvPut with key [1:1:938:0:0:800417:0] TEvPutResult: TEvPutResult {Id# [1:1:938:0:0:800417:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 939 SEND TEvPut with key [1:1:939:0:0:2336442:0] TEvPutResult: TEvPutResult {Id# [1:1:939:0:0:2336442:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 940 SEND TEvPut with key [1:1:940:0:0:982070:0] TEvPutResult: TEvPutResult {Id# [1:1:940:0:0:982070:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Start node 4 Step = 941 SEND TEvPut with key [1:1:941:0:0:713632:0] TEvPutResult: TEvPutResult {Id# [1:1:941:0:0:713632:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999695} Step = 942 SEND TEvPut with key [1:1:942:0:0:1644191:0] TEvPutResult: TEvPutResult {Id# [1:1:942:0:0:1644191:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999695} Step = 943 SEND TEvPut with key [1:1:943:0:0:254634:0] TEvPutResult: TEvPutResult {Id# [1:1:943:0:0:254634:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999695} Step = 944 SEND TEvPut with key [1:1:944:0:0:1141270:0] TEvPutResult: TEvPutResult {Id# [1:1:944:0:0:1141270:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999695} Step = 945 SEND TEvPut with key [1:1:945:0:0:610103:0] TEvPutResult: TEvPutResult {Id# [1:1:945:0:0:610103:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999756} Step = 946 SEND TEvPut with key [1:1:946:0:0:24822:0] TEvPutResult: TEvPutResult {Id# [1:1:946:0:0:24822:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999744} Compact vdisk 6 Step = 947 SEND TEvPut with key [1:1:947:0:0:100167:0] TEvPutResult: TEvPutResult {Id# [1:1:947:0:0:100167:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999622} Step = 948 SEND TEvPut with key [1:1:948:0:0:645630:0] TEvPutResult: TEvPutResult {Id# [1:1:948:0:0:645630:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999622} Step = 949 SEND TEvPut with key [1:1:949:0:0:2125890:0] TEvPutResult: TEvPutResult {Id# [1:1:949:0:0:2125890:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999622} Step = 950 SEND TEvPut with key [1:1:950:0:0:2544891:0] TEvPutResult: TEvPutResult {Id# [1:1:950:0:0:2544891:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999622} Step = 951 SEND TEvPut with key [1:1:951:0:0:647007:0] TEvPutResult: TEvPutResult {Id# [1:1:951:0:0:647007:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999622} Step = 952 SEND TEvPut with key [1:1:952:0:0:2031652:0] TEvPutResult: TEvPutResult {Id# [1:1:952:0:0:2031652:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999622} Step = 953 SEND TEvPut with key [1:1:953:0:0:2109805:0] TEvPutResult: TEvPutResult {Id# [1:1:953:0:0:2109805:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999622} Stop node 3 2025-05-07T08:50:34.623645Z 1 00h28m30.765331s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 4 Step = 954 SEND TEvPut with key [1:1:954:0:0:1353403:0] TEvPutResult: TEvPutResult {Id# [1:1:954:0:0:1353403:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999719} Stop node 4 2025-05-07T08:50:35.322789Z 1 00h28m40.783560s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 5 Step = 955 SEND TEvPut with key [1:1:955:0:0:1286278:0] TEvPutResult: TEvPutResult {Id# [1:1:955:0:0:1286278:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Start node 3 Step = 956 SEND TEvPut with key [1:1:956:0:0:1875483:0] TEvPutResult: TEvPutResult {Id# [1:1:956:0:0:1875483:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 957 SEND TEvPut with key [1:1:957:0:0:1021388:0] TEvPutResult: TEvPutResult {Id# [1:1:957:0:0:1021388:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Start node 4 Step = 958 SEND TEvPut with key [1:1:958:0:0:860806:0] TEvPutResult: TEvPutResult {Id# [1:1:958:0:0:860806:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999695} Step = 959 SEND TEvPut with key [1:1:959:0:0:385917:0] TEvPutResult: TEvPutResult {Id# [1:1:959:0:0:385917:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999695} Step = 960 SEND TEvPut with key [1:1:960:0:0:200998:0] TEvPutResult: TEvPutResult {Id# [1:1:960:0:0:200998:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999695} Step = 961 SEND TEvPut with key [1:1:961:0:0:1661659:0] TEvPutResult: TEvPutResult {Id# [1:1:961:0:0:1661659:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999695} Step = 962 SEND TEvPut with key [1:1:962:0:0:771410:0] TEvPutResult: TEvPutResult {Id# [1:1:962:0:0:771410:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999695} Step = 963 SEND TEvPut with key [1:1:963:0:0:1414281:0] TEvPutResult: TEvPutResult {Id# [1:1:963:0:0:1414281:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999695} Step = 964 SEND TEvPut with key [1:1:964:0:0:2848837:0] TEvPutResult: TEvPutResult {Id# [1:1:964:0:0:2848837:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999731} Step = 965 SEND TEvPut with key [1:1:965:0:0:989600:0] TEvPutResult: TEvPutResult {Id# [1:1:965:0:0:989600:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999731} Step = 966 SEND TEvPut with key [1:1:966:0:0:2761296:0] TEvPutResult: TEvPutResult {Id# [1:1:966:0:0:2761296:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999695} Step = 967 SEND TEvPut with key [1:1:967:0:0:981163:0] TEvPutResult: TEvPutResult {Id# [1:1:967:0:0:981163:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999695} Step = 968 SEND TEvPut with key [1:1:968:0:0:14298:0] TEvPutResult: TEvPutResult {Id# [1:1:968:0:0:14298:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999695} Step = 969 SEND TEvPut with key [1:1:969:0:0:626285:0] TEvPutResult: TEvPutResult {Id# [1:1:969:0:0:626285:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999695} Step = 970 SEND TEvPut with key [1:1:970:0:0:334566:0] TEvPutResult: TEvPutResult {Id# [1:1:970:0:0:334566:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999695} Stop node 7 2025-05-07T08:50:38.257923Z 1 00h29m10.785096s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 8 Step = 971 SEND TEvPut with key [1:1:971:0:0:972888:0] TEvPutResult: TEvPutResult {Id# [1:1:971:0:0:972888:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999658} Step = 972 SEND TEvPut with key [1:1:972:0:0:786055:0] TEvPutResult: TEvPutResult {Id# [1:1:972:0:0:786055:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999658} Step = 973 SEND TEvPut with key [1:1:973:0:0:2707502:0] TEvPutResult: TEvPutResult {Id# [1:1:973:0:0:2707502:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999658} Stop node 1 2025-05-07T08:50:39.232335Z 1 00h29m20.790904s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 2 Step = 974 SEND TEvPut with key [1:1:974:0:0:2660812:0] TEvPutResult: TEvPutResult {Id# [1:1:974:0:0:2660812:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999658} Start node 1 Step = 975 SEND TEvPut with key [1:1:975:0:0:3005283:0] TEvPutResult: TEvPutResult {Id# [1:1:975:0:0:3005283:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Stop node 1 2025-05-07T08:50:39.940562Z 1 00h29m40.791928s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 2 Step = 976 SEND TEvPut with key [1:1:976:0:0:1542748:0] TEvPutResult: TEvPutResult {Id# [1:1:976:0:0:1542748:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999658} Step = 977 SEND TEvPut with key [1:1:977:0:0:2837300:0] TEvPutResult: TEvPutResult {Id# [1:1:977:0:0:2837300:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999658} Step = 978 SEND TEvPut with key [1:1:978:0:0:481535:0] TEvPutResult: TEvPutResult {Id# [1:1:978:0:0:481535:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999658} Step = 979 SEND TEvPut with key [1:1:979:0:0:24668:0] TEvPutResult: TEvPutResult {Id# [1:1:979:0:0:24668:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999658} Step = 980 SEND TEvPut with key [1:1:980:0:0:1760402:0] TEvPutResult: TEvPutResult {Id# [1:1:980:0:0:1760402:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999658} Step = 981 SEND TEvPut with key [1:1:981:0:0:1711812:0] TEvPutResult: TEvPutResult {Id# [1:1:981:0:0:1711812:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999658} Step = 982 SEND TEvPut with key [1:1:982:0:0:1422922:0] TEvPutResult: TEvPutResult {Id# [1:1:982:0:0:1422922:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999658} Step = 983 SEND TEvPut with key [1:1:983:0:0:2533122:0] TEvPutResult: TEvPutResult {Id# [1:1:983:0:0:2533122:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999658} Step = 984 SEND TEvPut with key [1:1:984:0:0:347759:0] TEvPutResult: TEvPutResult {Id# [1:1:984:0:0:347759:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999658} Step = 985 SEND TEvPut with key [1:1:985:0:0:1862506:0] TEvPutResult: TEvPutResult {Id# [1:1:985:0:0:1862506:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999658} Step = 986 SEND TEvPut with key [1:1:986:0:0:101043:0] TEvPutResult: TEvPutResult {Id# [1:1:986:0:0:101043:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999658} Step = 987 SEND TEvPut with key [1:1:987:0:0:672278:0] TEvPutResult: TEvPutResult {Id# [1:1:987:0:0:672278:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999658} Step = 988 SEND TEvPut with key [1:1:988:0:0:2042425:0] TEvPutResult: TEvPutResult {Id# [1:1:988:0:0:2042425:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999658} Step = 989 SEND TEvPut with key [1:1:989:0:0:1201477:0] TEvPutResult: TEvPutResult {Id# [1:1:989:0:0:1201477:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999658} Step = 990 SEND TEvPut with key [1:1:990:0:0:1724337:0] TEvPutResult: TEvPutResult {Id# [1:1:990:0:0:1724337:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999658} Step = 991 SEND TEvPut with key [1:1:991:0:0:2174403:0] TEvPutResult: TEvPutResult {Id# [1:1:991:0:0:2174403:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999658} Step = 992 SEND TEvPut with key [1:1:992:0:0:193000:0] TEvPutResult: TEvPutResult {Id# [1:1:992:0:0:193000:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999658} Step = 993 SEND TEvPut with key [1:1:993:0:0:618508:0] TEvPutResult: TEvPutResult {Id# [1:1:993:0:0:618508:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999658} Step = 994 SEND TEvPut with key [1:1:994:0:0:2278246:0] TEvPutResult: TEvPutResult {Id# [1:1:994:0:0:2278246:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999658} Step = 995 SEND TEvPut with key [1:1:995:0:0:2001881:0] TEvPutResult: TEvPutResult {Id# [1:1:995:0:0:2001881:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999658} Step = 996 SEND TEvPut with key [1:1:996:0:0:1759634:0] TEvPutResult: TEvPutResult {Id# [1:1:996:0:0:1759634:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999658} Step = 997 SEND TEvPut with key [1:1:997:0:0:2469234:0] TEvPutResult: TEvPutResult {Id# [1:1:997:0:0:2469234:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999658} Step = 998 SEND TEvPut with key [1:1:998:0:0:1329395:0] TEvPutResult: TEvPutResult {Id# [1:1:998:0:0:1329395:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999658} Step = 999 SEND TEvPut with key [1:1:999:0:0:1243807:0] TEvPutResult: TEvPutResult {Id# [1:1:999:0:0:1243807:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999658} Starting nodes Start compaction 1 Start checking >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_PreferedPartition_InactiveConfig_Test >> KqpPg::TableSelect+useSink >> KqpPg::V1CreateTable >> KqpPg::DropTableIfExists ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnNoClientSpecified [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "no path \'Root/PQ\', Marker# PQ150" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "topic \'rt3.dc1--topic1\' is not created, Marker# PQ94" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "No clientId specified in CmdGetReadSessionsInfo" ErrorCode: BAD_REQUEST } |89.4%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/sysview/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpPg::InsertValuesFromTableWithDefaultTextNotNull-useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultTextNotNullButNull+useSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_backup/unittest >> TBackupTests::ShouldSucceedOnLargeData_MinWriteBatch [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:50:35.344164Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:50:35.344259Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:50:35.344320Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:50:35.344358Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:50:35.344421Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:50:35.344454Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:50:35.344537Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:50:35.344612Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:50:35.345400Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:50:35.345840Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:50:35.443996Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:50:35.444099Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:35.461344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:50:35.461482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:50:35.461676Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:50:35.471922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:50:35.472535Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:50:35.473490Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:35.473792Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:50:35.476206Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:35.477639Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:35.477697Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:35.477750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:50:35.477795Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:35.477838Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:50:35.478073Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:50:35.485078Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:50:35.733061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:50:35.733337Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:35.733580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:50:35.733829Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:50:35.733908Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:35.745460Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:35.745671Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:50:35.745938Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:35.746022Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:50:35.746076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:50:35.746127Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:50:35.753911Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:35.754026Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:50:35.754083Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:50:35.759377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:35.759462Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:35.759515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:35.759655Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:50:35.764109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:50:35.771787Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:50:35.772011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:50:35.773156Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:35.773319Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:50:35.773373Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:35.773715Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:50:35.773784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:35.773997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:50:35.774086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:50:35.782790Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:35.782852Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:35.783108Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:35.783162Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 8:51:23.599775Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:412: TBackup TPropose, opId: 102:0 HandleReply TEvOperationPlan, stepId: 5000003, at schemeshard: 72057594046678944 2025-05-07T08:51:23.599912Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 102:0 128 -> 129 2025-05-07T08:51:23.600053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T08:51:23.638764Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:783: [Export] [s3] Bootstrap: self# [1:3456:5420], attempt# 0 2025-05-07T08:51:23.665004Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:427: [Export] [s3] Handle TEvExportScan::TEvReady: self# [1:3456:5420], sender# [1:3455:5419] REQUEST: PUT /metadata.json HTTP/1.1 HEADERS: Host: localhost:23513 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: ACDF7DEE-2056-46B2-9CC3-7A4294BF7711 amz-sdk-request: attempt=1 content-length: 61 content-md5: 5ZuHSMjV1bVKZhThhMGD5g== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /metadata.json / / 61 2025-05-07T08:51:23.675289Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:387: [Export] [s3] HandleMetadata TEvExternalStorage::TEvPutObjectResponse: self# [1:3456:5420], result# PutObjectResult { ETag: e59b8748c8d5d5b54a6614e184c183e6 } REQUEST: PUT /scheme.pb HTTP/1.1 HEADERS: Host: localhost:23513 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 91B31022-6097-4C3F-96A2-249965CA5D80 amz-sdk-request: attempt=1 content-length: 357 content-md5: csvC5nqNTZsSLy4ymlp0/Q== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /scheme.pb / / 357 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 2025-05-07T08:51:23.680700Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:294: [Export] [s3] HandleScheme TEvExternalStorage::TEvPutObjectResponse: self# [1:3456:5420], result# PutObjectResult { ETag: 72cbc2e67a8d4d9b122f2e329a5a74fd } 2025-05-07T08:51:23.681033Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:51:23.681088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T08:51:23.681366Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:51:23.681424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2208], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-05-07T08:51:23.681818Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [1:3455:5419] 2025-05-07T08:51:23.682329Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:445: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [1:3456:5420], sender# [1:3455:5419], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 1 Checksum: } 2025-05-07T08:51:23.682715Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:51:23.682779Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:258: TBackup TProposedWaitParts, opId: 102:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:51:23.684921Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:51:23.685075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:51:23.685133Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-05-07T08:51:23.685201Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-05-07T08:51:23.685289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T08:51:23.685432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true REQUEST: PUT /data_00.csv.zst HTTP/1.1 HEADERS: Host: localhost:23513 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 97EBE7D1-69F5-471A-9299-C8CA97BC783E amz-sdk-request: attempt=1 content-length: 740 content-md5: P/a/uWmNWYxyRT1pAtAE7A== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /data_00.csv.zst / / 740 FAKE_COORDINATOR: Erasing txId 102 2025-05-07T08:51:23.686365Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:487: [Export] [s3] HandleData TEvExternalStorage::TEvPutObjectResponse: self# [1:3456:5420], result# PutObjectResult { ETag: 3ff6bfb9698d598c72453d6902d004ec } 2025-05-07T08:51:23.686417Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:702: [Export] [s3] Finish: self# [1:3456:5420], success# 1, error# , multipart# 0, uploadId# (empty maybe) 2025-05-07T08:51:23.686622Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:3455:5419], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-05-07T08:51:23.702142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-05-07T08:51:23.739535Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 309 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10000 RowsProcessed: 1000 } 2025-05-07T08:51:23.739614Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-05-07T08:51:23.739778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 309 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10000 RowsProcessed: 1000 } 2025-05-07T08:51:23.739887Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 309 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10000 RowsProcessed: 1000 } 2025-05-07T08:51:23.739955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, datashard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-05-07T08:51:23.740000Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:51:23.740042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-05-07T08:51:23.740112Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 102:0 129 -> 240 2025-05-07T08:51:23.740289Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:51:23.744218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:51:23.744631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:51:23.744693Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 102:0 ProgressState 2025-05-07T08:51:23.744818Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T08:51:23.744856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:51:23.744898Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T08:51:23.744928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:51:23.744967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-05-07T08:51:23.745046Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:337:2316] message: TxId: 102 2025-05-07T08:51:23.745103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:51:23.745151Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-05-07T08:51:23.745182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 102:0 2025-05-07T08:51:23.745336Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T08:51:23.750010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T08:51:23.750095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:3441:5406] TestWaitNotification: OK eventTxId 102 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPartitionTests::DataTxCalcPredicateOrder [GOOD] Test command err: 2025-05-07T08:51:04.628218Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:04.628311Z node 1 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037927937] doesn't have tx writes info 2025-05-07T08:51:04.655720Z node 1 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 3, State: StateInit] bootstrapping 3 [1:179:2194] 2025-05-07T08:51:04.657784Z node 1 :PERSQUEUE INFO: partition_init.cpp:784: [Root/PQ/rt3.dc1--account--topic:3:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-05-07T08:51:04.000000Z 2025-05-07T08:51:04.657896Z node 1 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 3, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 3 generation 0 [1:179:2194] Got cmd write: CmdWrite { Key: "i0000000003" Value: "\010\000\020\n\030\000(\300\361\303\317\3522" StorageChannel: INLINE } CmdWrite { Key: "m0000000003cclient" Value: "\010\000\020\001\030\001\"\007session(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000003uclient" Value: "\000\000\000\000\000\000\000\000\001\000\000\000\001\000\000\000session" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000003" Value: "\010\000\020\n\030\000(\300\361\303\317\3522" StorageChannel: INLINE } CmdWrite { Key: "I0000000003" Value: "\010\271`\020\264\222\004" StorageChannel: INLINE } CmdWrite { Key: "m0000000003cclient" Value: "\010\001\020\001\030\001\"\007session(\0000\001@\001" StorageChannel: INLINE } CmdWrite { Key: "m0000000003uclient" Value: "\001\000\000\000\000\000\000\000\001\000\000\000\001\000\000\000session" StorageChannel: INLINE } 2025-05-07T08:51:05.657033Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:05.657112Z node 2 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037927937] doesn't have tx writes info 2025-05-07T08:51:05.674857Z node 2 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 3, State: StateInit] bootstrapping 3 [2:177:2192] 2025-05-07T08:51:05.676648Z node 2 :PERSQUEUE INFO: partition_init.cpp:784: [Root/PQ/rt3.dc1--account--topic:3:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-05-07T08:51:05.000000Z 2025-05-07T08:51:05.676723Z node 2 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 3, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 3 generation 0 [2:177:2192] Got cmd write: CmdWrite { Key: "i0000000003" Value: "\010\000\020\n\030\000(\250\371\303\317\3522" StorageChannel: INLINE } CmdWrite { Key: "m0000000003cclient" Value: "\010\000\020\001\030\001\"\007session(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000003uclient" Value: "\000\000\000\000\000\000\000\000\001\000\000\000\001\000\000\000session" StorageChannel: INLINE } 2025-05-07T08:51:06.498352Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:06.498465Z node 3 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037927937] doesn't have tx writes info 2025-05-07T08:51:06.514939Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitConfigStep Got KV request 2025-05-07T08:51:06.515115Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-05-07T08:51:06.515320Z node 3 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [3:178:2193] 2025-05-07T08:51:06.515933Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDiskStatusStep Got KV request 2025-05-07T08:51:06.516046Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitMetaStep Got KV request 2025-05-07T08:51:06.516168Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInfoRangeStep Got KV request 2025-05-07T08:51:06.516301Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataRangeStep Got KV request 2025-05-07T08:51:06.516598Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:620: [Root/PQ/rt3.dc1--account--topic:0:TInitDataRangeStep] Got data offset 0 count 50 size 684 so 0 eo 50 d0000000000_00000000000000000000_00000_0000000050_00000 2025-05-07T08:51:06.516691Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataStep 2025-05-07T08:51:06.516721Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-05-07T08:51:06.516754Z node 3 :PERSQUEUE INFO: partition_init.cpp:784: [Root/PQ/rt3.dc1--account--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-05-07T08:51:06.000000Z 2025-05-07T08:51:06.516785Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:55: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Initializing completed. 2025-05-07T08:51:06.516817Z node 3 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 0 generation 0 [3:178:2193] 2025-05-07T08:51:06.516860Z node 3 :PERSQUEUE DEBUG: partition.cpp:571: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic Root/PQ/rt3.dc1--account--topic partitition 0 so 0 endOffset 50 Head Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 SYNC INIT DATA KEY: d0000000000_00000000000000000000_00000_0000000050_00000 size 684 2025-05-07T08:51:06.516898Z node 3 :PERSQUEUE DEBUG: partition.cpp:3847: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-05-07T08:51:07.823599Z node 3 :PERSQUEUE DEBUG: partition.cpp:3264: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client session is set to 0 (startOffset 0) session session 2025-05-07T08:51:07.823790Z node 3 :PERSQUEUE DEBUG: partition.cpp:2182: [PQ: 72057594037927937, Partition: 0, State: StateIdle] === DumpKeyValueRequest === 2025-05-07T08:51:07.823845Z node 3 :PERSQUEUE DEBUG: partition.cpp:2183: [PQ: 72057594037927937, Partition: 0, State: StateIdle] --- delete ---------------- 2025-05-07T08:51:07.823889Z node 3 :PERSQUEUE DEBUG: partition.cpp:2191: [PQ: 72057594037927937, Partition: 0, State: StateIdle] --- write ----------------- 2025-05-07T08:51:07.823927Z node 3 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 0, State: StateIdle] i0000000000 2025-05-07T08:51:07.823966Z node 3 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 0, State: StateIdle] m0000000000cclient 2025-05-07T08:51:07.823990Z node 3 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 0, State: StateIdle] m0000000000uclient 2025-05-07T08:51:07.824025Z node 3 :PERSQUEUE DEBUG: partition.cpp:2196: [PQ: 72057594037927937, Partition: 0, State: StateIdle] --- rename ---------------- 2025-05-07T08:51:07.824061Z node 3 :PERSQUEUE DEBUG: partition.cpp:2201: [PQ: 72057594037927937, Partition: 0, State: StateIdle] =========================== Got KV request Got batch complete: 1 Got KV request Got KV request Got cmd write: CmdWrite { Key: "i0000000000" Value: "\010\000\0202\030\000(\220\201\304\317\3522" StorageChannel: INLINE } CmdWrite { Key: "m0000000000cclient" Value: "\010\000\020\001\030\001\"\007session(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000000uclient" Value: "\000\000\000\000\000\000\000\000\001\000\000\000\001\000\000\000session" StorageChannel: INLINE } 2025-05-07T08:51:07.852773Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 Create distr tx with id = 0 and act no: 1 2025-05-07T08:51:07.853081Z node 3 :PERSQUEUE DEBUG: partition.cpp:1124: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 0 Wait first predicate result 2025-05-07T08:51:09.138561Z node 3 :PERSQUEUE DEBUG: partition.cpp:1360: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse Got batch complete: 1 Create distr tx with id = 2 and act no: 3 2025-05-07T08:51:09.138921Z node 3 :PERSQUEUE DEBUG: partition.cpp:1124: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 2 Wait second predicate result 2025-05-07T08:51:10.471488Z node 3 :PERSQUEUE DEBUG: partition.cpp:1360: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-05-07T08:51:10.471590Z node 3 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 0 2025-05-07T08:51:10.471657Z node 3 :PERSQUEUE DEBUG: partition.cpp:2421: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 0 2025-05-07T08:51:10.471736Z node 3 :PERSQUEUE DEBUG: partition.cpp:2448: [PQ: 72057594037927937, Partition: 0, State: StateIdle] t.WriteInfo->BodyKeys.size=0, t.WriteInfo->BlobsFromHead.size=0 2025-05-07T08:51:10.471797Z node 3 :PERSQUEUE DEBUG: partition.cpp:2449: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0, NewHead=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 Got batch complete: 1 2025-05-07T08:51:10.472021Z node 3 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 2 2025-05-07T08:51:10.472075Z node 3 :PERSQUEUE DEBUG: partition.cpp:2421: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 2 2025-05-07T08:51:10.472129Z node 3 :PERSQUEUE DEBUG: partition.cpp:2448: [PQ: 72057594037927937, Partition: 0, State: StateIdle] t.WriteInfo->BodyKeys.size=0, t.WriteInfo->BlobsFromHead.size=0 2025-05-07T08:51:10.472188Z node 3 :PERSQUEUE DEBUG: partition.cpp:2449: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0, NewHead=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 2025-05-07T08:51:10.472387Z node 3 :PERSQUEUE DEBUG: partition.cpp:2182: [PQ: 72057594037927937, Partition: 0, State: StateIdle] === DumpKeyValueRequest === 2025-05-07T08:51:10.472432Z node 3 :PERSQUEUE DEBUG: partition.cpp:2183: [PQ: 72057594037927937, Partition: 0, State: StateIdle] --- delete ---------------- 2025-05-07T08:51:10.472475Z node 3 :PERSQUEUE DEBUG: partition.cpp:2191: [PQ: 72057594037927937, Partition: 0, State: StateIdle] --- write ----------------- 2025-05-07T08:51:10.472556Z node 3 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 0, State: StateIdle] m0000000000psrc1 2025-05-07T08:51:10.472594Z node 3 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 0, State: StateIdle] i0000000000 2025-05-07T08:51:10.472634Z node 3 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 0, State: StateIdle] i0000000000 2025-05-07T08:51:10.472664Z node 3 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: ... : partition.cpp:557: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 0 generation 0 [4:178:2193] 2025-05-07T08:51:12.259420Z node 4 :PERSQUEUE DEBUG: partition.cpp:571: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic Root/PQ/rt3.dc1--account--topic partitition 0 so 0 endOffset 1 Head Offset 1 PartNo 0 PackedSize 0 count 0 nextOffset 1 batches 0 SYNC INIT DATA KEY: d0000000000_00000000000000000000_00000_0000000001_00000 size 684 2025-05-07T08:51:12.259454Z node 4 :PERSQUEUE DEBUG: partition.cpp:3847: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-05-07T08:51:13.582480Z node 4 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie SourceId|b3a4bdde-e8dd5b01-259b09d7-8762c2d8_0 generated for partition 0 topic 'Root/PQ/rt3.dc1--account--topic' owner SourceId 2025-05-07T08:51:13.582647Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:35: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 Got batch complete: 1 Wait write response Wait kv request 2025-05-07T08:51:13.582986Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1233: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob processing sourceId 'SourceId' seqNo 4 partNo 0 2025-05-07T08:51:13.583985Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1333: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob complete sourceId 'SourceId' seqNo 4 partNo 0 FormedBlobsCount 0 NewHead: Offset 11 PartNo 0 PackedSize 118 count 1 nextOffset 12 batches 1 2025-05-07T08:51:13.584592Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1623: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Add new write blob: topic 'Root/PQ/rt3.dc1--account--topic' partition 0 compactOffset 11,1 HeadOffset 1 endOffset 1 curOffset 12 d0000000000_00000000000000000011_00000_0000000001_00000| size 104 WTime 5132 2025-05-07T08:51:13.584745Z node 4 :PERSQUEUE DEBUG: partition.cpp:2182: [PQ: 72057594037927937, Partition: 0, State: StateIdle] === DumpKeyValueRequest === 2025-05-07T08:51:13.584790Z node 4 :PERSQUEUE DEBUG: partition.cpp:2183: [PQ: 72057594037927937, Partition: 0, State: StateIdle] --- delete ---------------- 2025-05-07T08:51:13.584829Z node 4 :PERSQUEUE DEBUG: partition.cpp:2189: [PQ: 72057594037927937, Partition: 0, State: StateIdle] [x0000000000, x0000000001) 2025-05-07T08:51:13.584869Z node 4 :PERSQUEUE DEBUG: partition.cpp:2191: [PQ: 72057594037927937, Partition: 0, State: StateIdle] --- write ----------------- 2025-05-07T08:51:13.584910Z node 4 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 0, State: StateIdle] m0000000000pSourceId 2025-05-07T08:51:13.584943Z node 4 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 0, State: StateIdle] d0000000000_00000000000000000011_00000_0000000001_00000| 2025-05-07T08:51:13.584968Z node 4 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 0, State: StateIdle] i0000000000 2025-05-07T08:51:13.584998Z node 4 :PERSQUEUE DEBUG: partition.cpp:2196: [PQ: 72057594037927937, Partition: 0, State: StateIdle] --- rename ---------------- 2025-05-07T08:51:13.585036Z node 4 :PERSQUEUE DEBUG: partition.cpp:2201: [PQ: 72057594037927937, Partition: 0, State: StateIdle] =========================== Got KV request Got batch complete: 1 Got KV request Got KV request 2025-05-07T08:51:13.610295Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 22 WriteNewSizeFromSupportivePartitions# 0 2025-05-07T08:51:13.610407Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-05-07T08:51:13.610516Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'SourceId', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 4, partNo: 0, Offset: 11 is stored on disk Wait second predicate result Create distr tx with id = 0 and act no: 1 2025-05-07T08:51:13.610961Z node 4 :PERSQUEUE DEBUG: partition.cpp:1124: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 0 2025-05-07T08:51:14.947665Z node 4 :PERSQUEUE DEBUG: partition.cpp:1360: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse Got batch complete: 1 2025-05-07T08:51:15.335720Z node 5 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:15.335832Z node 5 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037927937] doesn't have tx writes info 2025-05-07T08:51:15.359249Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitConfigStep Got KV request 2025-05-07T08:51:15.359717Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-05-07T08:51:15.360018Z node 5 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [5:178:2193] 2025-05-07T08:51:15.361070Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDiskStatusStep Got KV request 2025-05-07T08:51:15.361302Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitMetaStep Got KV request 2025-05-07T08:51:15.361488Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInfoRangeStep Got KV request Got KV request 2025-05-07T08:51:15.361777Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataRangeStep Got KV request 2025-05-07T08:51:15.362264Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:620: [Root/PQ/rt3.dc1--account--topic:0:TInitDataRangeStep] Got data offset 0 count 50 size 684 so 0 eo 50 d0000000000_00000000000000000000_00000_0000000050_00000 2025-05-07T08:51:15.362377Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataStep 2025-05-07T08:51:15.362424Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-05-07T08:51:15.362480Z node 5 :PERSQUEUE INFO: partition_init.cpp:784: [Root/PQ/rt3.dc1--account--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-05-07T08:51:15.000000Z 2025-05-07T08:51:15.362522Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:55: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Initializing completed. 2025-05-07T08:51:15.362599Z node 5 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 0 generation 0 [5:178:2193] 2025-05-07T08:51:15.362689Z node 5 :PERSQUEUE DEBUG: partition.cpp:571: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic Root/PQ/rt3.dc1--account--topic partitition 0 so 0 endOffset 50 Head Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 SYNC INIT DATA KEY: d0000000000_00000000000000000000_00000_0000000050_00000 size 684 2025-05-07T08:51:15.362761Z node 5 :PERSQUEUE DEBUG: partition.cpp:3847: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 Create distr tx with id = 0 and act no: 1 2025-05-07T08:51:16.653004Z node 5 :PERSQUEUE DEBUG: partition.cpp:1124: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 0 2025-05-07T08:51:18.054205Z node 5 :PERSQUEUE DEBUG: partition.cpp:1360: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse Got batch complete: 1 Create distr tx with id = 2 and act no: 3 2025-05-07T08:51:18.054539Z node 5 :PERSQUEUE DEBUG: partition.cpp:1124: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 2 2025-05-07T08:51:18.054671Z node 5 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 0 2025-05-07T08:51:18.054737Z node 5 :PERSQUEUE DEBUG: partition.cpp:2421: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 0 2025-05-07T08:51:18.054823Z node 5 :PERSQUEUE DEBUG: partition.cpp:2448: [PQ: 72057594037927937, Partition: 0, State: StateIdle] t.WriteInfo->BodyKeys.size=0, t.WriteInfo->BlobsFromHead.size=0 2025-05-07T08:51:18.054884Z node 5 :PERSQUEUE DEBUG: partition.cpp:2449: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0, NewHead=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 2025-05-07T08:51:19.383745Z node 5 :PERSQUEUE DEBUG: partition.cpp:1360: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse Got batch complete: 1 2025-05-07T08:51:19.384015Z node 5 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 2 2025-05-07T08:51:19.384078Z node 5 :PERSQUEUE DEBUG: partition.cpp:2421: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 2 2025-05-07T08:51:19.384139Z node 5 :PERSQUEUE DEBUG: partition.cpp:2448: [PQ: 72057594037927937, Partition: 0, State: StateIdle] t.WriteInfo->BodyKeys.size=0, t.WriteInfo->BlobsFromHead.size=0 2025-05-07T08:51:19.384204Z node 5 :PERSQUEUE DEBUG: partition.cpp:2449: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0, NewHead=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 2025-05-07T08:51:19.384445Z node 5 :PERSQUEUE DEBUG: partition.cpp:2182: [PQ: 72057594037927937, Partition: 0, State: StateIdle] === DumpKeyValueRequest === 2025-05-07T08:51:19.384499Z node 5 :PERSQUEUE DEBUG: partition.cpp:2183: [PQ: 72057594037927937, Partition: 0, State: StateIdle] --- delete ---------------- 2025-05-07T08:51:19.384550Z node 5 :PERSQUEUE DEBUG: partition.cpp:2191: [PQ: 72057594037927937, Partition: 0, State: StateIdle] --- write ----------------- 2025-05-07T08:51:19.384610Z node 5 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 0, State: StateIdle] m0000000000psrc1 2025-05-07T08:51:19.384647Z node 5 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 0, State: StateIdle] i0000000000 2025-05-07T08:51:19.384681Z node 5 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 0, State: StateIdle] i0000000000 2025-05-07T08:51:19.384715Z node 5 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 0, State: StateIdle] I0000000000 2025-05-07T08:51:19.384759Z node 5 :PERSQUEUE DEBUG: partition.cpp:2196: [PQ: 72057594037927937, Partition: 0, State: StateIdle] --- rename ---------------- 2025-05-07T08:51:19.384816Z node 5 :PERSQUEUE DEBUG: partition.cpp:2201: [PQ: 72057594037927937, Partition: 0, State: StateIdle] =========================== Got KV request Send disk status response with cookie: 0 Wait tx committed for tx 0 2025-05-07T08:51:19.397396Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 2 Wait tx committed for tx 2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_incremental_backup/unittest >> IncrementalBackup::SimpleBackupBackupCollection-WithIncremental [GOOD] Test command err: 2025-05-07T08:51:13.776434Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:51:13.776607Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:51:13.776923Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00319f/r3tmp/tmpH48J3V/pdisk_1.dat 2025-05-07T08:51:14.231951Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:597:2521], Recipient [1:410:2405]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:51:14.232040Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:51:14.232112Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T08:51:14.232253Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122432, Sender [1:594:2519], Recipient [1:410:2405]: {TEvModifySchemeTransaction txid# 1 TabletId# 72057594046644480} 2025-05-07T08:51:14.232284Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4851: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-05-07T08:51:14.370613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 1 TabletId: 72057594046644480 , at schemeshard: 72057594046644480 2025-05-07T08:51:14.370917Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:51:14.371213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-05-07T08:51:14.371464Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-05-07T08:51:14.371594Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:51:14.371706Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T08:51:14.372498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-05-07T08:51:14.372691Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-05-07T08:51:14.372752Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:51:14.372791Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 1:0 2025-05-07T08:51:14.373017Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435072, Sender [1:410:2405], Recipient [1:410:2405]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-05-07T08:51:14.373073Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4857: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-05-07T08:51:14.384999Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:51:14.385112Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-05-07T08:51:14.385158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:51:14.385198Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:51:14.385321Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T08:51:14.385847Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:51:14.385892Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 1:0 2025-05-07T08:51:14.386059Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435072, Sender [1:410:2405], Recipient [1:410:2405]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-05-07T08:51:14.386104Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4857: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-05-07T08:51:14.386182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:51:14.386241Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046644480 2025-05-07T08:51:14.386281Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:51:14.386360Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T08:51:14.386801Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:51:14.386834Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 1:0 2025-05-07T08:51:14.386947Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435072, Sender [1:410:2405], Recipient [1:410:2405]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-05-07T08:51:14.386976Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4857: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-05-07T08:51:14.387012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:51:14.387041Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:51:14.402881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046644480 2025-05-07T08:51:14.402998Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T08:51:14.419108Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:51:14.432253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:51:14.432791Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:51:14.432841Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:51:14.433051Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 2025-05-07T08:51:14.434357Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877760, Sender [1:602:2526], Recipient [1:410:2405]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594046316545 Status: OK ServerId: [1:604:2527] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2025-05-07T08:51:14.434415Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4935: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-05-07T08:51:14.434456Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5663: Handle TEvClientConnected, tabletId: 72057594046316545, status: OK, at schemeshard: 72057594046644480 2025-05-07T08:51:14.434639Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269091328, Sender [1:406:2401], Recipient [1:410:2405]: NKikimrTx.TEvProposeTransactionStatus Status: 16 StepId: 500 TxId: 1 2025-05-07T08:51:14.435089Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:606:2529], Recipient [1:410:2405]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:51:14.435141Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:51:14.435179Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T08:51:14.435314Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124996, Sender [1:594:2519], Recipient [1:410:2405]: NKikimrScheme.TEvNotifyTxCompletion TxId: 1 2025-05-07T08:51:14.435347Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4853: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-05-07T08:51:14.435426Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 1, at schemeshard: 72057594046644480 2025-05-07T08:51:14.435464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 1, ready parts: 0/1, is published: true 2025-05-07T08:51:14.435500Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 1, at schemeshard: 72057594046644480 2025-05-07T08:51:14.471185Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 273285138, Sender [1:43:2090], Recipient [1:410:2405]: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { QueryServiceConfig { AvailableExternalDataSources: "ObjectStorage" } } ItemKinds: 26 ItemKinds: 34 ItemKinds: 52 ItemKinds: 54 ItemKinds: 73 Local: true } 2025-05-07T08:51:14.471307Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7610: Got new config: QueryServiceConfig { AvailableExternalDataSources: "ObjectStorage" } 2025-05-07T08:51:14.471347Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-0 ... essage: Source { RawX1: 664 RawX2: 12884904456 } Origin: 72075186224037888 State: 2 TxId: 281474976715662 Step: 0 Generation: 1 2025-05-07T08:51:28.509260Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 281474976715662, tablet: 72075186224037888, partId: 1 2025-05-07T08:51:28.509381Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 281474976715662:1, at schemeshard: 72057594046644480, message: Source { RawX1: 664 RawX2: 12884904456 } Origin: 72075186224037888 State: 2 TxId: 281474976715662 Step: 0 Generation: 1 2025-05-07T08:51:28.509418Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1005: NTableState::TProposedWaitParts operationId# 281474976715662:1 HandleReply TEvSchemaChanged at tablet: 72057594046644480 2025-05-07T08:51:28.509474Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1009: NTableState::TProposedWaitParts operationId# 281474976715662:1 HandleReply TEvSchemaChanged at tablet: 72057594046644480 message: Source { RawX1: 664 RawX2: 12884904456 } Origin: 72075186224037888 State: 2 TxId: 281474976715662 Step: 0 Generation: 1 2025-05-07T08:51:28.509518Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976715662:1, shardIdx: 72057594046644480:1, datashard: 72075186224037888, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046644480 2025-05-07T08:51:28.509552Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 281474976715662:1, at schemeshard: 72057594046644480 2025-05-07T08:51:28.509595Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 281474976715662:1, datashard: 72075186224037889, at schemeshard: 72057594046644480 2025-05-07T08:51:28.509633Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 281474976715662:1, datashard: 72075186224037888, at schemeshard: 72057594046644480 2025-05-07T08:51:28.509659Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976715662:1 129 -> 240 2025-05-07T08:51:28.509766Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T08:51:28.510231Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 281474976715662:1, at schemeshard: 72057594046644480 2025-05-07T08:51:28.510258Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:51:28.510287Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 281474976715662:1 2025-05-07T08:51:28.510384Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [3:929:2733] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715662 at schemeshard: 72057594046644480 2025-05-07T08:51:28.510440Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [3:664:2568] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715662 at schemeshard: 72057594046644480 2025-05-07T08:51:28.510530Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715662 datashard 72075186224037888 state Ready 2025-05-07T08:51:28.510589Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-05-07T08:51:28.510762Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715662 datashard 72075186224037889 state Ready 2025-05-07T08:51:28.510793Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2025-05-07T08:51:28.510905Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435072, Sender [3:419:2412], Recipient [3:419:2412]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-05-07T08:51:28.510932Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4857: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-05-07T08:51:28.510972Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976715662:1, at schemeshard: 72057594046644480 2025-05-07T08:51:28.511018Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_copy_table.cpp:306: TCopyTable TCopyTableBarrier operationId: 281474976715662:1ProgressState, operation type TxCopyTable 2025-05-07T08:51:28.511058Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T08:51:28.511105Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1059: Set barrier, OperationId: 281474976715662:1, name: CopyTableBarrier, done: 1, blocked: 1, parts count: 2 2025-05-07T08:51:28.511148Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1103: All parts have reached barrier, tx: 281474976715662, done: 1, blocked: 1 2025-05-07T08:51:28.511235Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_copy_table.cpp:289: TCopyTable TCopyTableBarrier operationId: 281474976715662:1 HandleReply TEvPrivate::TEvCompleteBarrier, msg: NKikimr::NSchemeShard::TEvPrivate::TEvCompleteBarrier { TxId: 281474976715662 Name: CopyTableBarrier }, at tablet# 72057594046644480 2025-05-07T08:51:28.511268Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976715662:1 240 -> 240 2025-05-07T08:51:28.511729Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:51:28.511763Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 281474976715662:1 2025-05-07T08:51:28.511849Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435072, Sender [3:419:2412], Recipient [3:419:2412]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-05-07T08:51:28.511873Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4857: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-05-07T08:51:28.511914Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976715662:1, at schemeshard: 72057594046644480 2025-05-07T08:51:28.511952Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046644480] TDone opId# 281474976715662:1 ProgressState 2025-05-07T08:51:28.512067Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T08:51:28.512093Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715662:1 progress is 2/2 2025-05-07T08:51:28.512128Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715662 ready parts: 2/2 2025-05-07T08:51:28.512178Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715662:1 progress is 2/2 2025-05-07T08:51:28.512218Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715662 ready parts: 2/2 2025-05-07T08:51:28.512269Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976715662, ready parts: 2/2, is published: true 2025-05-07T08:51:28.512349Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:899:2713] message: TxId: 281474976715662 2025-05-07T08:51:28.512412Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715662 ready parts: 2/2 2025-05-07T08:51:28.512467Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715662:0 2025-05-07T08:51:28.512514Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976715662:0 2025-05-07T08:51:28.512596Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 10] was 2 2025-05-07T08:51:28.512653Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715662:1 2025-05-07T08:51:28.512681Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976715662:1 2025-05-07T08:51:28.512769Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 11] was 3 2025-05-07T08:51:28.512804Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 3 2025-05-07T08:51:28.513197Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:51:28.513283Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [3:899:2713] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715662 at schemeshard: 72057594046644480 2025-05-07T08:51:28.513567Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877764, Sender [3:913:2720], Recipient [3:419:2412]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:51:28.513599Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4938: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:51:28.513621Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5761: Server pipe is reset, at schemeshard: 72057594046644480 2025-05-07T08:51:28.641914Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [3:1013:2798], serverId# [3:1014:2799], sessionId# [0:0:0] 2025-05-07T08:51:28.642108Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715663. Ctx: { TraceId: 01jtmz2p48ec3mgn8jf5xr7qm4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YjYxNzBlZGUtZmY0NWIwN2EtMmEzNjEwMGUtYTBlOTk4ZTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 1 } items { uint32_value: 10 } }, { items { uint32_value: 2 } items { uint32_value: 20 } }, { items { uint32_value: 3 } items { uint32_value: 30 } } 2025-05-07T08:51:28.757067Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715664. Ctx: { TraceId: 01jtmz2p89davkf2xxa3k250av, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YWRlODVlOTktNTRjNmY1YTEtZDJhNGU0YjYtZDNhMWNiZTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 1 } items { uint32_value: 10 } }, { items { uint32_value: 2 } items { uint32_value: 20 } }, { items { uint32_value: 3 } items { uint32_value: 30 } } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_backup/unittest >> TBackupTests::ShouldSucceedOnLargeData[Zstd] [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:50:37.146476Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:50:37.146602Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:50:37.146648Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:50:37.146693Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:50:37.146807Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:50:37.146843Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:50:37.146920Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:50:37.147015Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:50:37.147956Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:50:37.148380Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:50:37.271593Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:50:37.271792Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:37.293467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:50:37.293669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:50:37.293936Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:50:37.304829Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:50:37.305489Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:50:37.306565Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:37.306935Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:50:37.310247Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:37.312602Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:37.312709Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:37.313100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:50:37.313177Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:37.313235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:50:37.313565Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:50:37.322101Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:50:37.497296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:50:37.497591Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:37.497840Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:50:37.498260Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:50:37.498382Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:37.507368Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:37.507643Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:50:37.507883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:37.507971Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:50:37.508017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:50:37.508068Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:50:37.511281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:37.511369Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:50:37.511410Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:50:37.519049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:37.519143Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:37.519203Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:37.519270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:50:37.527715Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:50:37.534933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:50:37.535190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:50:37.536356Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:37.536572Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:50:37.536628Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:37.536996Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:50:37.537063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:37.537261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:50:37.537404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:50:37.547346Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:37.547426Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:37.547692Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:37.547748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... NDataShard::TEvExportScan::TEvBuffer { Last: 0 Checksum: } REQUEST: PUT /data_00.csv.zst?partNumber=100&uploadId=1 HTTP/1.1 HEADERS: Host: localhost:2580 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: CFAAEE2C-027D-4643-BAF2-6A1CEA0C7B8F amz-sdk-request: attempt=1 content-length: 55 content-md5: B5SOCmjwb1RI3tHamcoRHA== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /data_00.csv.zst / partNumber=100&uploadId=1 / 55 2025-05-07T08:51:25.274952Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:578: [Export] [s3] Handle TEvExternalStorage::TEvUploadPartResponse: self# [1:3456:5420], result# UploadPartResult { ETag: 07948e0a68f06f5448ded1da99ca111c } 2025-05-07T08:51:25.275184Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [1:3455:5419] 2025-05-07T08:51:25.275277Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:445: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [1:3456:5420], sender# [1:3455:5419], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 1 Checksum: } REQUEST: PUT /data_00.csv.zst?partNumber=101&uploadId=1 HTTP/1.1 HEADERS: Host: localhost:2580 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 04DE6CA9-9990-4C2F-A924-3C6C0134DAE8 amz-sdk-request: attempt=1 content-length: 0 content-md5: 1B2M2Y8AsgTpgAmY7PhCfg== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /data_00.csv.zst / partNumber=101&uploadId=1 / 0 2025-05-07T08:51:25.278274Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:578: [Export] [s3] Handle TEvExternalStorage::TEvUploadPartResponse: self# [1:3456:5420], result# UploadPartResult { ETag: d41d8cd98f00b204e9800998ecf8427e } 2025-05-07T08:51:25.278350Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:702: [Export] [s3] Finish: self# [1:3456:5420], success# 1, error# , multipart# 1, uploadId# 1 2025-05-07T08:51:25.283920Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:512: [Export] [s3] Handle TEvDataShard::TEvS3Upload: self# [1:3456:5420], upload# { Id: 1 Status: Complete Error: (empty maybe) Parts: [f8f51a1e4a70db44fa91cc2ab9680824,9eba675fd7f187274786dff2f47292df,921325fb6b8811df3d06a44dbe1f8523,4eeb6b90e8e61075275bd8a42f56bd69,2840a487abe8cb9502b3d9c8a8e1c942,607d8f6e3b235a360d63796efd3a51c2,ed22e08df7fb8840f7cabc779cc86885,efeff2c7731061edd9a39059cc078045,4af01cb3455932f28e3bba713dcd57c9,dc94d36ecf3b36d183d75c84b9b2fac6,e2ce425dd2bb582abcc13d0d714c3554,b71e46686939d2cdf046520dd2774281,ab731a82a161e5e044b24e895a1713d6,1df51aaec89711e13a6f95c13113e36c,b6066b2ed343831b1b0ee0076179981e,332d34d77adc2b024a33d87e07d4233f,cf0093cc99590a0e8f9c199ed6deca07,8cc923ec76224e69263ac93b7bfabd30,690d66897e0780f2dfe3614e5a659a22,7502aae0ec253663b1cbfdc8ede92ab9,7d2c6f728ee0c12097dfe5441970b946,5fc7b9b675e0a125eea67cf05f82627f,fc8c5faa99cc7f4ce7ca320f8e7adb58,8e305c5aca758683ff25407a7bbd9220,181bce9c6393e22a0ac359a7b45d8187,639677548f0a8b776a6db92f44d96505,390ff8f57cfa4c04bfbed0d7a63c90e8,3dd76756e6558fd6c8c918210f7dc136,a3f5254fdad3ded54edef910e704c151,e9186373f80dbaa55dd04d07621de277,8898b965060a431b499261ec0cd3cee3,3ed51c736e64defe04980ce328b17aa4,bb0e45971888796588c12ea1c1bec162,e2b3defa84005d3892986ca6894b811f,656c7c809c8c8485f6e91892591cd284,779c6827126f255bde25ae242bf4c8ff,8883fc9b073e683558f1231c5f2142d0,19390a0e3340bcb6ccfe866a790f05cb,305182d3e9745fba3aad1973bb1bfc93,002819d72a6dc7954ecc1bcd2bd20254,325c6bc3cdd6fd83083cf0126c606218,b86932903843b9626e80bd9ccb5d0571,b5054116537a7c467bdb488c9d67dee7,fc3a45bd17a00b147e4f9c55bc2493da,1118e2f41e8839211163250796a65dce,b403ff17c2c269a79201a03ce439dc2a,88f2692ee439cfadef1cd21d58aac8d3,e5bef12f89b101af84d52299a5867d99,ed613335180c53f69d450ef8b176a4d5,150fd7dcdc86eb38c7f821ff4698d8bc,a0c18bf08acc6ebecac04a2520efee9b,e8463d7ce8f502d1575a433c1b30a9af,f123e0fc879e2fdc2c3e2f698fc4176d,d7ab79d73e4648e0a2bf8dec3a19c019,4e74b82f6a8ea7fad8790ee7dfcdb76e,f72bb1d8aa0f5c9265bae10a3784d8e8,924b317371d16363a37962b17a2ae4bb,7214b458c7e25c791e54bd430b835a6e,e79dba1b56122372af3fe7b06ea91bda,6aae345b94d78fc7c1ed0b8697cf5e62,fd3636ed699facb5f0c12f81741cabc5,2c4a198408c3eb9577fcd339ca62c539,59fbf761f9b7574b65fa6877b167bb8c,14f9f5cfdf3a6c33c577a54429b19cb6,c6d078b3be9cd7943e8145fd982baeef,198f55ae25539fbd54a4a6075beac2d1,939123b44e362c76a151a85af0247fb7,0147f8bd741be7780cbc900b6f4b0899,43453200aeaf201420737354cd73cfe4,de26d1339779fe0c538d01d5963fd423,5c903650e719f959dc9f37ea360c6319,23607b3f36e0a2abae7f1ed8e38596f3,0db9af920c6d1cf868e470bf7a349747,aed6ac19c60d08500582eea9dadcdfee,3f4e37ddd3e2e56a725323fad4d85cf6,942b269af420b4277d025cea489dcb25,89eddc25ba615b6cf09b9cd9a11a16bb,1d8e7f0613dc1919ee90133c468380bd,8bf1e4c1266d8437c1bd85e0fca6640a,e9eabcf5b61cf257f530b156dbd77a88,411f1661ae7650d2144e8c6f8a33b28f,6706ec5b8771e555779d5cbeca41aa75,b3a33ef21a8224ddc78a52e8d7ca8357,58749d344f42c192e572eda4ee66fb01,381aeb5ee3014e2c0fd9b85bd59ce005,9aed2297cd10dce10d68de3ff1830b42,be88e095fc3a13708b714db03b1f2744,5628e81ee17fb22fc828ed1b2169578b,a1cfb563fa4af884fe02ced05c26c881,fc602b8ee2e9746fb52823f8fd1f0f28,a1de256e94c7baa9b8ab905c892d1a14,6bff895b0b5f3552ad4bdc61b0d24148,fcba1d258a8651d831767b42e010e439,bef6e3d7088e671809fe584531f96971,f0b489242271d11200dbdbc78e4ce715,372d2d6877fff7c04433e492ad4dbd45,32191cf1972dcccd59c0b5a8b53d4f23,25928b7997b97ac58f18fbbe589573e8,472e53a27497661c6400410909405c4e,07948e0a68f06f5448ded1da99ca111c,d41d8cd98f00b204e9800998ecf8427e] } REQUEST: POST /data_00.csv.zst?uploadId=1 HTTP/1.1 HEADERS: Host: localhost:2580 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: FFA98F59-70DF-44AF-B826-C199988DD4E4 amz-sdk-request: attempt=1 content-length: 11529 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeAction: 4 / /data_00.csv.zst / uploadId=1 2025-05-07T08:51:25.336960Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:609: [Export] [s3] Handle TEvExternalStorage::TEvCompleteMultipartUploadResponse: self# [1:3456:5420], result# CompleteMultipartUploadResult { Bucket: Key: data_00.csv.zst ETag: c902b621cdd1ee89b9f1c4e6c36e6e45 } 2025-05-07T08:51:25.337311Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:3455:5419], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-05-07T08:51:25.349303Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 309 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10000 RowsProcessed: 1000 } 2025-05-07T08:51:25.349390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-05-07T08:51:25.349536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 309 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10000 RowsProcessed: 1000 } 2025-05-07T08:51:25.349621Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 309 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10000 RowsProcessed: 1000 } 2025-05-07T08:51:25.349671Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, datashard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-05-07T08:51:25.349705Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:51:25.349752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-05-07T08:51:25.349788Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 102:0 129 -> 240 2025-05-07T08:51:25.349920Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:51:25.354853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:51:25.355615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:51:25.355686Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 102:0 ProgressState 2025-05-07T08:51:25.355816Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T08:51:25.355869Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:51:25.355915Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T08:51:25.355962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:51:25.356006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-05-07T08:51:25.356116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:337:2316] message: TxId: 102 2025-05-07T08:51:25.356175Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:51:25.356223Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-05-07T08:51:25.356259Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 102:0 2025-05-07T08:51:25.356406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T08:51:25.361409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T08:51:25.361495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:3441:5406] TestWaitNotification: OK eventTxId 102 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_incremental_backup/unittest >> IncrementalBackup::SimpleRestoreBackupCollection-WithIncremental [GOOD] Test command err: 2025-05-07T08:51:13.867812Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:51:13.868007Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:51:13.868314Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00317e/r3tmp/tmpfLm1N4/pdisk_1.dat 2025-05-07T08:51:14.268051Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:597:2521], Recipient [1:410:2405]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:51:14.268138Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:51:14.268183Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T08:51:14.268343Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122432, Sender [1:594:2519], Recipient [1:410:2405]: {TEvModifySchemeTransaction txid# 1 TabletId# 72057594046644480} 2025-05-07T08:51:14.268381Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4851: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-05-07T08:51:14.430112Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 1 TabletId: 72057594046644480 , at schemeshard: 72057594046644480 2025-05-07T08:51:14.430405Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:51:14.430639Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-05-07T08:51:14.430908Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-05-07T08:51:14.431029Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:51:14.431148Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T08:51:14.431874Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-05-07T08:51:14.432079Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-05-07T08:51:14.432135Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:51:14.432180Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 1:0 2025-05-07T08:51:14.432402Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435072, Sender [1:410:2405], Recipient [1:410:2405]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-05-07T08:51:14.432448Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4857: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-05-07T08:51:14.432527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:51:14.432604Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-05-07T08:51:14.432648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:51:14.432688Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:51:14.432808Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T08:51:14.433282Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:51:14.433325Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 1:0 2025-05-07T08:51:14.433442Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435072, Sender [1:410:2405], Recipient [1:410:2405]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-05-07T08:51:14.433478Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4857: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-05-07T08:51:14.433551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:51:14.433614Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046644480 2025-05-07T08:51:14.433668Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:51:14.433750Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T08:51:14.434204Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:51:14.434248Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 1:0 2025-05-07T08:51:14.434373Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435072, Sender [1:410:2405], Recipient [1:410:2405]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-05-07T08:51:14.434413Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4857: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-05-07T08:51:14.434456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:51:14.434490Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:51:14.434548Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046644480 2025-05-07T08:51:14.434591Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T08:51:14.434651Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:51:14.440065Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:51:14.440676Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:51:14.440732Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:51:14.440950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 2025-05-07T08:51:14.442465Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877760, Sender [1:602:2526], Recipient [1:410:2405]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594046316545 Status: OK ServerId: [1:604:2527] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2025-05-07T08:51:14.442532Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4935: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-05-07T08:51:14.442587Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5663: Handle TEvClientConnected, tabletId: 72057594046316545, status: OK, at schemeshard: 72057594046644480 2025-05-07T08:51:14.442843Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269091328, Sender [1:406:2401], Recipient [1:410:2405]: NKikimrTx.TEvProposeTransactionStatus Status: 16 StepId: 500 TxId: 1 2025-05-07T08:51:14.443316Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:606:2529], Recipient [1:410:2405]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:51:14.443376Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:51:14.443419Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T08:51:14.443561Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124996, Sender [1:594:2519], Recipient [1:410:2405]: NKikimrScheme.TEvNotifyTxCompletion TxId: 1 2025-05-07T08:51:14.443592Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4853: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-05-07T08:51:14.443679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 1, at schemeshard: 72057594046644480 2025-05-07T08:51:14.443727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 1, ready parts: 0/1, is published: true 2025-05-07T08:51:14.443774Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 1, at schemeshard: 72057594046644480 2025-05-07T08:51:14.516746Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 273285138, Sender [1:43:2090], Recipient [1:410:2405]: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { QueryServiceConfig { AvailableExternalDataSources: "ObjectStorage" } } ItemKinds: 26 ItemKinds: 34 ItemKinds: 52 ItemKinds: 54 ItemKinds: 73 Local: true } 2025-05-07T08:51:14.516899Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7610: Got new config: QueryServiceConfig { AvailableExternalDataSources: "ObjectStorage" } 2025-05-07T08:51:14.516960Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-0 ... 0526Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:51:22.770554Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T08:51:22.770732Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269551620, Sender [2:734:2606], Recipient [2:409:2404]: NKikimrTxDataShard.TEvSchemaChanged Source { RawX1: 734 RawX2: 8589937198 } Origin: 72075186224037888 State: 2 TxId: 281474976715662 Step: 0 Generation: 1 2025-05-07T08:51:22.770769Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4872: StateWork, processing event TEvDataShard::TEvSchemaChanged 2025-05-07T08:51:22.770829Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046644480, at schemeshard: 72057594046644480, message: Source { RawX1: 734 RawX2: 8589937198 } Origin: 72075186224037888 State: 2 TxId: 281474976715662 Step: 0 Generation: 1 2025-05-07T08:51:22.770865Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 281474976715662, tablet: 72075186224037888, partId: 0 2025-05-07T08:51:22.770978Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 281474976715662:0, at schemeshard: 72057594046644480, message: Source { RawX1: 734 RawX2: 8589937198 } Origin: 72075186224037888 State: 2 TxId: 281474976715662 Step: 0 Generation: 1 2025-05-07T08:51:22.771012Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1005: NTableState::TProposedWaitParts operationId# 281474976715662:0 HandleReply TEvSchemaChanged at tablet: 72057594046644480 2025-05-07T08:51:22.771062Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1009: NTableState::TProposedWaitParts operationId# 281474976715662:0 HandleReply TEvSchemaChanged at tablet: 72057594046644480 message: Source { RawX1: 734 RawX2: 8589937198 } Origin: 72075186224037888 State: 2 TxId: 281474976715662 Step: 0 Generation: 1 2025-05-07T08:51:22.771098Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976715662:0, shardIdx: 72057594046644480:1, datashard: 72075186224037888, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046644480 2025-05-07T08:51:22.771129Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 281474976715662:0, at schemeshard: 72057594046644480 2025-05-07T08:51:22.771166Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 281474976715662:0, datashard: 72075186224037889, at schemeshard: 72057594046644480 2025-05-07T08:51:22.771213Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 281474976715662:0, datashard: 72075186224037888, at schemeshard: 72057594046644480 2025-05-07T08:51:22.771241Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976715662:0 129 -> 240 2025-05-07T08:51:22.771330Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T08:51:22.771683Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 281474976715662:0, at schemeshard: 72057594046644480 2025-05-07T08:51:22.771707Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:51:22.771737Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 281474976715662:0 2025-05-07T08:51:22.771811Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [2:943:2742] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715662 at schemeshard: 72057594046644480 2025-05-07T08:51:22.771848Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [2:734:2606] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715662 at schemeshard: 72057594046644480 2025-05-07T08:51:22.771944Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715662 datashard 72075186224037888 state Ready 2025-05-07T08:51:22.771994Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-05-07T08:51:22.772115Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715662 datashard 72075186224037889 state Ready 2025-05-07T08:51:22.772141Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2025-05-07T08:51:22.772258Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435072, Sender [2:409:2404], Recipient [2:409:2404]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-05-07T08:51:22.772288Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4857: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-05-07T08:51:22.772321Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976715662:0, at schemeshard: 72057594046644480 2025-05-07T08:51:22.772361Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_copy_table.cpp:306: TCopyTable TCopyTableBarrier operationId: 281474976715662:0ProgressState, operation type TxCopyTable 2025-05-07T08:51:22.772403Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T08:51:22.772435Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1059: Set barrier, OperationId: 281474976715662:0, name: CopyTableBarrier, done: 0, blocked: 1, parts count: 1 2025-05-07T08:51:22.772465Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1103: All parts have reached barrier, tx: 281474976715662, done: 0, blocked: 1 2025-05-07T08:51:22.772526Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_copy_table.cpp:289: TCopyTable TCopyTableBarrier operationId: 281474976715662:0 HandleReply TEvPrivate::TEvCompleteBarrier, msg: NKikimr::NSchemeShard::TEvPrivate::TEvCompleteBarrier { TxId: 281474976715662 Name: CopyTableBarrier }, at tablet# 72057594046644480 2025-05-07T08:51:22.772581Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976715662:0 240 -> 240 2025-05-07T08:51:22.772963Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:51:22.772986Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 281474976715662:0 2025-05-07T08:51:22.773050Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435072, Sender [2:409:2404], Recipient [2:409:2404]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-05-07T08:51:22.773071Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4857: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-05-07T08:51:22.773110Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976715662:0, at schemeshard: 72057594046644480 2025-05-07T08:51:22.773148Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046644480] TDone opId# 281474976715662:0 ProgressState 2025-05-07T08:51:22.773232Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T08:51:22.773258Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715662:0 progress is 1/1 2025-05-07T08:51:22.773287Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715662 ready parts: 1/1 2025-05-07T08:51:22.773328Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715662:0 progress is 1/1 2025-05-07T08:51:22.773354Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715662 ready parts: 1/1 2025-05-07T08:51:22.773384Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976715662, ready parts: 1/1, is published: true 2025-05-07T08:51:22.773441Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:923:2726] message: TxId: 281474976715662 2025-05-07T08:51:22.773482Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715662 ready parts: 1/1 2025-05-07T08:51:22.773521Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715662:0 2025-05-07T08:51:22.773549Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976715662:0 2025-05-07T08:51:22.773661Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 11] was 3 2025-05-07T08:51:22.773692Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046644480, LocalPathId: 6] was 3 2025-05-07T08:51:22.774044Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:51:22.774105Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [2:923:2726] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715662 at schemeshard: 72057594046644480 2025-05-07T08:51:22.774381Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877764, Sender [2:931:2733], Recipient [2:409:2404]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:51:22.774406Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4938: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:51:22.774433Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5761: Server pipe is reset, at schemeshard: 72057594046644480 2025-05-07T08:51:22.868606Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [2:1021:2801], serverId# [2:1022:2802], sessionId# [0:0:0] 2025-05-07T08:51:22.868754Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715663. Ctx: { TraceId: 01jtmz2ggvadafh732j28nycd6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NGM2NDU3NTQtOWZiMjRjMzUtODk3MjFiZDctZTdjZTUzYjE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 1 } items { uint32_value: 10 } }, { items { uint32_value: 2 } items { uint32_value: 20 } }, { items { uint32_value: 3 } items { uint32_value: 30 } }, { items { uint32_value: 4 } items { uint32_value: 40 } }, { items { uint32_value: 5 } items { uint32_value: 50 } } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailesOnNotATopic [GOOD] Test command err: Assert failed: Check response: { Status: 130 ErrorReason: "Timeout while waiting for response, may be just slow, Marker# PQ16" ErrorCode: ERROR } 2025-05-07T08:51:21.125671Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3088: [PQ: 72057594037928037] Handle TEvInterconnect::TEvNodeInfo 2025-05-07T08:51:21.129834Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3120: [PQ: 72057594037928037] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-05-07T08:51:21.130182Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:745: [PQ: 72057594037928037] doesn't have tx info 2025-05-07T08:51:21.130238Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:757: [PQ: 72057594037928037] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-05-07T08:51:21.130280Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:969: [PQ: 72057594037928037] no config, start with empty partitions and default config 2025-05-07T08:51:21.130342Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4846: [PQ: 72057594037928037] Txs.size=0, PlannedTxs.size=0 2025-05-07T08:51:21.130389Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:21.130468Z node 2 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037928037] doesn't have tx writes info 2025-05-07T08:51:21.131193Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928037] server connected, pipe [2:262:2254], now have 1 active actors on pipe 2025-05-07T08:51:21.131304Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1454: [PQ: 72057594037928037] Handle TEvPersQueue::TEvUpdateConfig 2025-05-07T08:51:21.153122Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1640: [PQ: 72057594037928037] Config update version 1(current 0) received from actor [2:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-05-07T08:51:21.156254Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:585: [PQ: 72057594037928037] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-05-07T08:51:21.156437Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:21.157346Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037928037] Config applied version 1 actor [2:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-05-07T08:51:21.157484Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitConfigStep 2025-05-07T08:51:21.157930Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-05-07T08:51:21.158343Z node 2 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037928037, Partition: 0, State: StateInit] bootstrapping 0 [2:270:2260] 2025-05-07T08:51:21.160641Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic1:0:Initializer] Initializing completed. 2025-05-07T08:51:21.160710Z node 2 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037928037, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--topic1' partition 0 generation 2 [2:270:2260] 2025-05-07T08:51:21.160780Z node 2 :PERSQUEUE DEBUG: partition.cpp:571: [PQ: 72057594037928037, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--topic1 partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-05-07T08:51:21.160836Z node 2 :PERSQUEUE DEBUG: partition.cpp:3847: [PQ: 72057594037928037, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-05-07T08:51:21.161524Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928037] server connected, pipe [2:273:2262], now have 1 active actors on pipe 2025-05-07T08:51:21.214478Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3088: [PQ: 72057594037928138] Handle TEvInterconnect::TEvNodeInfo 2025-05-07T08:51:21.219261Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3120: [PQ: 72057594037928138] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-05-07T08:51:21.219540Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:745: [PQ: 72057594037928138] doesn't have tx info 2025-05-07T08:51:21.219596Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:757: [PQ: 72057594037928138] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-05-07T08:51:21.219643Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:969: [PQ: 72057594037928138] no config, start with empty partitions and default config 2025-05-07T08:51:21.219686Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4846: [PQ: 72057594037928138] Txs.size=0, PlannedTxs.size=0 2025-05-07T08:51:21.219795Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:21.219867Z node 2 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037928138] doesn't have tx writes info 2025-05-07T08:51:21.220627Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928138] server connected, pipe [2:406:2361], now have 1 active actors on pipe 2025-05-07T08:51:21.220714Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1454: [PQ: 72057594037928138] Handle TEvPersQueue::TEvUpdateConfig 2025-05-07T08:51:21.220897Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1640: [PQ: 72057594037928138] Config update version 2(current 0) received from actor [2:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-05-07T08:51:21.223447Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:585: [PQ: 72057594037928138] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-05-07T08:51:21.223577Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:21.224426Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037928138] Config applied version 2 actor [2:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-05-07T08:51:21.224544Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:1:Initializer] Start initializing step TInitConfigStep 2025-05-07T08:51:21.224891Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:1:Initializer] Start initializing step TInitInternalFieldsStep 2025-05-07T08:51:21.225113Z node 2 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037928138, Partition: 1, State: StateInit] bootstrapping 1 [2:414:2367] 2025-05-07T08:51:21.227414Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:1:Initializer] Initializing completed. 2025-05-07T08:51:21.227501Z node 2 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037928138, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 1 generation 2 [2:414:2367] 2025-05-07T08:51:21.227565Z node 2 :PERSQUEUE DEBUG: partition.cpp:571: [PQ: 72057594037928138, Partition: 1, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 1 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-05-07T08:51:21.227626Z node 2 :PERSQUEUE DEBUG: partition.cpp:3847: [PQ: 72057594037928138, Partition: 1, State: StateIdle] Process pending events. Count 0 2025-05-07T08:51:21.228463Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928138] server connected, pipe [2:417:2369], now have 1 active actors on pipe 2025-05-07T08:51:21.246025Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3088: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-05-07T08:51:21.250373Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3120: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-05-07T08:51:21.250703Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:745: [PQ: 72057594037928139] doesn't have tx info 2025-05-07T08:51:21.250767Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:757: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-05-07T08:51:21.250812Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:969: [PQ: 72057594037928139] no config, start with empty partitions and default config 2025-05-07T08:51:21.250851Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4846: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-05-07T08:51:21.250906Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:21.250973Z node 2 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037928139] doesn't have tx writes info 2025-05-07T08:51:21.251678Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928139] server connected, pipe [2:466:2406], now have 1 active actors on pipe 2025-05-07T08:51:21.251793Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1454: [PQ: 72057594037928139] Handle TEvPersQueue::TEvUpdateConfig 2025-05-07T08:51:21.251991Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1640: [PQ: 72057594037928139] Config update version 3(current 0) received from actor [2:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 3 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-05-07T08:51:21.254452Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:585: [PQ: 72057594037928139] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 3 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-05-07T08:51:21.254592Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:21.255409Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037928139] Config applied version 3 actor [2:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 3 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-05-07T08:51:21.255511Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-05-07T08:51:21.255815Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-05-07T08:51:21.256044Z node 2 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [2:474:2412] 2025-05-07T08:51:21.258101Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-05-07T08:51:21.258172Z node 2 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 2 [2:474:2412] 2025-05-07T08:51:21.258240Z node 2 :PERSQUEUE DEBUG: partition.cpp:571: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-05-07T08:51:21.258294Z node 2 :PERSQUEUE DEBUG: partition.cpp:3847: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-05-07T08:51:21.259167Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928139] server connected, pipe [2:477:2414], now have 1 active actors on pipe REQUEST MetaRequest { CmdGetReadSessionsInfo { ClientId: "client_id" Topic: "rt3.dc1--topic1" Topic: "rt3.dc1--topic2" } } Ticket: "client_id@builtin" 2025-05-07T08:51:21.268000Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928037] server connected, pipe [2:486:2417], now have 1 active actors on pipe 2025-05-07T08:51:21.268640Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928138] server connected, pipe [2:489:2418], now have 1 active actors on pipe 2025-05-07T08:51:21.269005Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928139] server connected, pipe [2:490:2418], now have 1 active actors on pipe 2025-05-07T08:51:21.269666Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72057594037928037] server disconnected, pipe [2:486:2417] destroyed 2025-05-07T08:51:21.270278Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72057594037928138] server disconnected, pipe [2:489:2418] destroyed 2025-05-07T08:51:21.270382Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72057594037928139] server disconnected, pipe [2:490:2418] destroyed RESULT Status: 1 ErrorCode: OK MetaResponse { CmdGetReadSessionsInfoResult { TopicResult { Topic: "rt3.dc1--topic1" PartitionResult { Partition: 0 ClientOffset: 0 StartOffset: 0 EndOffset: 0 TimeLag: 0 TabletNode: "::1" ClientReadOffset: 0 ReadTimeLag: 0 TabletNodeId: 2 ErrorCode: OK } ErrorCode: OK } TopicResult { Topic: "rt3.dc1--topic2" PartitionResult { Partition: 0 ErrorCode: INITIALIZING ErrorReason: "tablet for partition is not running" } PartitionResult { Partition: 1 ClientOffset: 0 StartOffset: 0 EndOffset: 0 TimeLag: 0 TabletNode: "::1" ClientReadOffset: 0 ReadTimeLag: 0 TabletNodeId: 2 ErrorCode: OK } PartitionResult { Partition: 2 ClientOffset: 0 StartOffset: 0 EndOffset: 0 TimeLag: 0 TabletNode: "::1" ClientReadOffset: 0 ReadTimeLag: 0 TabletNodeId: 2 ErrorCode: OK } ErrorCode: OK } } } Assert failed: Check response: { Status: 128 ErrorReason: "the following topics are not created: rt3.dc1--topic2, Marker# PQ95" ErrorCode: UNKNOWN_TOPIC } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailesOnNotATopic [GOOD] Test command err: Assert failed: Check response: { Status: 130 ErrorReason: "Timeout while waiting for response, may be just slow, Marker# PQ16" ErrorCode: ERROR } 2025-05-07T08:51:19.350577Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3088: [PQ: 72057594037928037] Handle TEvInterconnect::TEvNodeInfo 2025-05-07T08:51:19.354799Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3120: [PQ: 72057594037928037] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-05-07T08:51:19.355190Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:745: [PQ: 72057594037928037] doesn't have tx info 2025-05-07T08:51:19.355269Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:757: [PQ: 72057594037928037] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-05-07T08:51:19.355308Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:969: [PQ: 72057594037928037] no config, start with empty partitions and default config 2025-05-07T08:51:19.355363Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4846: [PQ: 72057594037928037] Txs.size=0, PlannedTxs.size=0 2025-05-07T08:51:19.355412Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:19.355487Z node 2 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037928037] doesn't have tx writes info 2025-05-07T08:51:19.356201Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928037] server connected, pipe [2:262:2254], now have 1 active actors on pipe 2025-05-07T08:51:19.356323Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1454: [PQ: 72057594037928037] Handle TEvPersQueue::TEvUpdateConfig 2025-05-07T08:51:19.372981Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1640: [PQ: 72057594037928037] Config update version 1(current 0) received from actor [2:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-05-07T08:51:19.376323Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:585: [PQ: 72057594037928037] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-05-07T08:51:19.376504Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:19.377432Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037928037] Config applied version 1 actor [2:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-05-07T08:51:19.377571Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitConfigStep 2025-05-07T08:51:19.378050Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-05-07T08:51:19.378470Z node 2 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037928037, Partition: 0, State: StateInit] bootstrapping 0 [2:270:2260] 2025-05-07T08:51:19.381109Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic1:0:Initializer] Initializing completed. 2025-05-07T08:51:19.381179Z node 2 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037928037, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--topic1' partition 0 generation 2 [2:270:2260] 2025-05-07T08:51:19.381236Z node 2 :PERSQUEUE DEBUG: partition.cpp:571: [PQ: 72057594037928037, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--topic1 partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-05-07T08:51:19.381306Z node 2 :PERSQUEUE DEBUG: partition.cpp:3847: [PQ: 72057594037928037, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-05-07T08:51:19.382232Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928037] server connected, pipe [2:273:2262], now have 1 active actors on pipe 2025-05-07T08:51:19.453202Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3088: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-05-07T08:51:19.458145Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3120: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-05-07T08:51:19.458495Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:745: [PQ: 72057594037928139] doesn't have tx info 2025-05-07T08:51:19.458568Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:757: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-05-07T08:51:19.458615Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:969: [PQ: 72057594037928139] no config, start with empty partitions and default config 2025-05-07T08:51:19.458679Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4846: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-05-07T08:51:19.458727Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:19.458819Z node 2 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037928139] doesn't have tx writes info 2025-05-07T08:51:19.459620Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928139] server connected, pipe [2:406:2361], now have 1 active actors on pipe 2025-05-07T08:51:19.459698Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1454: [PQ: 72057594037928139] Handle TEvPersQueue::TEvUpdateConfig 2025-05-07T08:51:19.459897Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1640: [PQ: 72057594037928139] Config update version 2(current 0) received from actor [2:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-05-07T08:51:19.462710Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:585: [PQ: 72057594037928139] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-05-07T08:51:19.462875Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:19.463770Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037928139] Config applied version 2 actor [2:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-05-07T08:51:19.463908Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-05-07T08:51:19.464378Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-05-07T08:51:19.464619Z node 2 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [2:414:2367] 2025-05-07T08:51:19.466892Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-05-07T08:51:19.466969Z node 2 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 2 [2:414:2367] 2025-05-07T08:51:19.467024Z node 2 :PERSQUEUE DEBUG: partition.cpp:571: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-05-07T08:51:19.467099Z node 2 :PERSQUEUE DEBUG: partition.cpp:3847: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-05-07T08:51:19.467917Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928139] server connected, pipe [2:417:2369], now have 1 active actors on pipe 2025-05-07T08:51:19.469675Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928037] server connected, pipe [2:425:2372], now have 1 active actors on pipe 2025-05-07T08:51:19.469729Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928139] server connected, pipe [2:427:2373], now have 1 active actors on pipe 2025-05-07T08:51:19.470144Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72057594037928037] server disconnected, pipe [2:425:2372] destroyed 2025-05-07T08:51:19.470494Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72057594037928139] server disconnected, pipe [2:427:2373] destroyed 2025-05-07T08:51:20.047093Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3088: [PQ: 72057594037928037] Handle TEvInterconnect::TEvNodeInfo 2025-05-07T08:51:20.050971Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3120: [PQ: 72057594037928037] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-05-07T08:51:20.051321Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:745: [PQ: 72057594037928037] doesn't have tx info 2025-05-07T08:51:20.051371Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:757: [PQ: 72057594037928037] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-05-07T08:51:20.051409Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:969: [PQ: 72057594037928037] no config, start with empty partitions and default config 2025-05-07T08:51:20.051453Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4846: [PQ: 72057594037928037] Txs.size=0, PlannedTxs.size=0 2025-05-07T08:51:20.051510Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:20.051574Z node 3 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037928037] doesn't have tx writes info 2025-05-07T08:51:20.052230Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928037] server connected, pipe [3:260:2252], now have 1 active actors on pipe 2025-05-07T08:51:20.052300Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1454: [PQ: 72057594037928037] Handle TEvPersQueue::TEvUpdateConfig 2025-05-07T08:51:20.052489Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1640: [PQ: 72057594037928037] Config update version 3(current 0) received from actor [3:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 3 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-05-07T08:51:20.055927Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:585: [PQ: 72057594037928037] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 3 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-0 ... TopicName: "rt3.dc1--topic2" Version: 5 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-05-07T08:51:20.193828Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:585: [PQ: 72057594037928138] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 5 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-05-07T08:51:20.194007Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:20.194901Z node 3 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037928138] Config applied version 5 actor [3:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 5 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-05-07T08:51:20.195038Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:1:Initializer] Start initializing step TInitConfigStep 2025-05-07T08:51:20.195481Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:1:Initializer] Start initializing step TInitInternalFieldsStep 2025-05-07T08:51:20.195723Z node 3 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037928138, Partition: 1, State: StateInit] bootstrapping 1 [3:472:2410] 2025-05-07T08:51:20.197899Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:1:Initializer] Initializing completed. 2025-05-07T08:51:20.197992Z node 3 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037928138, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 1 generation 2 [3:472:2410] 2025-05-07T08:51:20.198056Z node 3 :PERSQUEUE DEBUG: partition.cpp:571: [PQ: 72057594037928138, Partition: 1, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 1 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-05-07T08:51:20.198113Z node 3 :PERSQUEUE DEBUG: partition.cpp:3847: [PQ: 72057594037928138, Partition: 1, State: StateIdle] Process pending events. Count 0 2025-05-07T08:51:20.198971Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928138] server connected, pipe [3:475:2412], now have 1 active actors on pipe 2025-05-07T08:51:20.218148Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3088: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-05-07T08:51:20.223634Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3120: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-05-07T08:51:20.224054Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:745: [PQ: 72057594037928139] doesn't have tx info 2025-05-07T08:51:20.224117Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:757: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-05-07T08:51:20.224166Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:969: [PQ: 72057594037928139] no config, start with empty partitions and default config 2025-05-07T08:51:20.224211Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4846: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-05-07T08:51:20.224268Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:20.224334Z node 3 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037928139] doesn't have tx writes info 2025-05-07T08:51:20.225158Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928139] server connected, pipe [3:524:2449], now have 1 active actors on pipe 2025-05-07T08:51:20.225277Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1454: [PQ: 72057594037928139] Handle TEvPersQueue::TEvUpdateConfig 2025-05-07T08:51:20.225499Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1640: [PQ: 72057594037928139] Config update version 6(current 0) received from actor [3:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 6 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-05-07T08:51:20.229376Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:585: [PQ: 72057594037928139] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 6 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-05-07T08:51:20.229501Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:20.230266Z node 3 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037928139] Config applied version 6 actor [3:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 6 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-05-07T08:51:20.230407Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-05-07T08:51:20.230757Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-05-07T08:51:20.231007Z node 3 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [3:532:2455] 2025-05-07T08:51:20.232612Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-05-07T08:51:20.232718Z node 3 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 2 [3:532:2455] 2025-05-07T08:51:20.232777Z node 3 :PERSQUEUE DEBUG: partition.cpp:571: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-05-07T08:51:20.232820Z node 3 :PERSQUEUE DEBUG: partition.cpp:3847: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-05-07T08:51:20.233483Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928139] server connected, pipe [3:535:2457], now have 1 active actors on pipe 2025-05-07T08:51:20.234980Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928037] server connected, pipe [3:543:2460], now have 1 active actors on pipe 2025-05-07T08:51:20.235094Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928138] server connected, pipe [3:544:2461], now have 1 active actors on pipe 2025-05-07T08:51:20.235257Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928139] server connected, pipe [3:545:2461], now have 1 active actors on pipe 2025-05-07T08:51:20.246407Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928139] server connected, pipe [3:550:2465], now have 1 active actors on pipe 2025-05-07T08:51:20.278488Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3088: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-05-07T08:51:20.281496Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3120: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-05-07T08:51:20.281776Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:745: [PQ: 72057594037928139] doesn't have tx info 2025-05-07T08:51:20.281826Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:757: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-05-07T08:51:20.281934Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4846: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-05-07T08:51:20.282666Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:20.282725Z node 3 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037928139] doesn't have tx writes info 2025-05-07T08:51:20.282838Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-05-07T08:51:20.283122Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-05-07T08:51:20.283307Z node 3 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [3:607:2510] 2025-05-07T08:51:20.284942Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDiskStatusStep 2025-05-07T08:51:20.286477Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitMetaStep 2025-05-07T08:51:20.286911Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInfoRangeStep 2025-05-07T08:51:20.287267Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDataRangeStep 2025-05-07T08:51:20.287522Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDataStep 2025-05-07T08:51:20.287571Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-05-07T08:51:20.287631Z node 3 :PERSQUEUE INFO: partition_init.cpp:773: [rt3.dc1--topic2:2:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-05-07T08:51:20.287691Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-05-07T08:51:20.287756Z node 3 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 3 [3:607:2510] 2025-05-07T08:51:20.287823Z node 3 :PERSQUEUE DEBUG: partition.cpp:571: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-05-07T08:51:20.287928Z node 3 :PERSQUEUE DEBUG: partition.cpp:3847: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-05-07T08:51:20.288921Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72057594037928138] server disconnected, pipe [3:544:2461] destroyed 2025-05-07T08:51:20.289007Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72057594037928037] server disconnected, pipe [3:543:2460] destroyed RESPONSE Status: 1 ErrorCode: OK MetaResponse { CmdGetPartitionLocationsResult { TopicResult { Topic: "rt3.dc1--topic1" PartitionLocation { Partition: 0 Host: "::1" HostId: 3 ErrorCode: OK } ErrorCode: OK } TopicResult { Topic: "rt3.dc1--topic2" PartitionLocation { Partition: 1 Host: "::1" HostId: 3 ErrorCode: OK } PartitionLocation { Partition: 2 Host: "::1" HostId: 3 ErrorCode: OK } ErrorCode: OK } } } Assert failed: Check response: { Status: 128 ErrorReason: "the following topics are not created: rt3.dc1--topic2, Marker# PQ95" ErrorCode: UNKNOWN_TOPIC } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesSecond [GOOD] Test command err: Assert failed: Check response: { Status: 130 ErrorReason: "Timeout while waiting for response, may be just slow, Marker# PQ16" ErrorCode: ERROR } 2025-05-07T08:51:19.292073Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3088: [PQ: 72057594037928037] Handle TEvInterconnect::TEvNodeInfo 2025-05-07T08:51:19.296355Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3120: [PQ: 72057594037928037] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-05-07T08:51:19.296734Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:745: [PQ: 72057594037928037] doesn't have tx info 2025-05-07T08:51:19.296815Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:757: [PQ: 72057594037928037] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-05-07T08:51:19.296856Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:969: [PQ: 72057594037928037] no config, start with empty partitions and default config 2025-05-07T08:51:19.296913Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4846: [PQ: 72057594037928037] Txs.size=0, PlannedTxs.size=0 2025-05-07T08:51:19.296966Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:19.297036Z node 2 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037928037] doesn't have tx writes info 2025-05-07T08:51:19.297804Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928037] server connected, pipe [2:262:2254], now have 1 active actors on pipe 2025-05-07T08:51:19.297919Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1454: [PQ: 72057594037928037] Handle TEvPersQueue::TEvUpdateConfig 2025-05-07T08:51:19.322737Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1640: [PQ: 72057594037928037] Config update version 1(current 0) received from actor [2:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-05-07T08:51:19.331983Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:585: [PQ: 72057594037928037] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-05-07T08:51:19.332165Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:19.333065Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037928037] Config applied version 1 actor [2:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-05-07T08:51:19.333227Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitConfigStep 2025-05-07T08:51:19.333674Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-05-07T08:51:19.334420Z node 2 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037928037, Partition: 0, State: StateInit] bootstrapping 0 [2:270:2260] 2025-05-07T08:51:19.337286Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic1:0:Initializer] Initializing completed. 2025-05-07T08:51:19.337399Z node 2 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037928037, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--topic1' partition 0 generation 2 [2:270:2260] 2025-05-07T08:51:19.337474Z node 2 :PERSQUEUE DEBUG: partition.cpp:571: [PQ: 72057594037928037, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--topic1 partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-05-07T08:51:19.337565Z node 2 :PERSQUEUE DEBUG: partition.cpp:3847: [PQ: 72057594037928037, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-05-07T08:51:19.338455Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928037] server connected, pipe [2:273:2262], now have 1 active actors on pipe 2025-05-07T08:51:19.437305Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3088: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-05-07T08:51:19.441557Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3120: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-05-07T08:51:19.441917Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:745: [PQ: 72057594037928139] doesn't have tx info 2025-05-07T08:51:19.441962Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:757: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-05-07T08:51:19.442018Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:969: [PQ: 72057594037928139] no config, start with empty partitions and default config 2025-05-07T08:51:19.442062Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4846: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-05-07T08:51:19.442133Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:19.442210Z node 2 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037928139] doesn't have tx writes info 2025-05-07T08:51:19.442916Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928139] server connected, pipe [2:405:2360], now have 1 active actors on pipe 2025-05-07T08:51:19.443040Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1454: [PQ: 72057594037928139] Handle TEvPersQueue::TEvUpdateConfig 2025-05-07T08:51:19.443219Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1640: [PQ: 72057594037928139] Config update version 2(current 0) received from actor [2:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-05-07T08:51:19.445670Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:585: [PQ: 72057594037928139] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-05-07T08:51:19.445789Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:19.446637Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037928139] Config applied version 2 actor [2:99:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-05-07T08:51:19.446772Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-05-07T08:51:19.447161Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-05-07T08:51:19.447363Z node 2 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [2:413:2366] 2025-05-07T08:51:19.449685Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-05-07T08:51:19.449757Z node 2 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 2 [2:413:2366] 2025-05-07T08:51:19.449816Z node 2 :PERSQUEUE DEBUG: partition.cpp:571: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-05-07T08:51:19.449869Z node 2 :PERSQUEUE DEBUG: partition.cpp:3847: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-05-07T08:51:19.450672Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928139] server connected, pipe [2:416:2368], now have 1 active actors on pipe 2025-05-07T08:51:19.452207Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928037] server connected, pipe [2:422:2371], now have 1 active actors on pipe 2025-05-07T08:51:19.452540Z node 2 :PERSQUEUE DEBUG: partition.cpp:855: [PQ: 72057594037928037, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-05-07T08:51:19.452672Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928139] server connected, pipe [2:424:2372], now have 1 active actors on pipe 2025-05-07T08:51:19.452932Z node 2 :PERSQUEUE DEBUG: partition.cpp:855: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-05-07T08:51:19.453234Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72057594037928037] server disconnected, pipe [2:422:2371] destroyed 2025-05-07T08:51:19.453607Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72057594037928139] server disconnected, pipe [2:424:2372] destroyed 2025-05-07T08:51:20.050769Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3088: [PQ: 72057594037928037] Handle TEvInterconnect::TEvNodeInfo 2025-05-07T08:51:20.054560Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3120: [PQ: 72057594037928037] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-05-07T08:51:20.054863Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:745: [PQ: 72057594037928037] doesn't have tx info 2025-05-07T08:51:20.054909Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:757: [PQ: 72057594037928037] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-05-07T08:51:20.054949Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:969: [PQ: 72057594037928037] no config, start with empty partitions and default config 2025-05-07T08:51:20.055014Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4846: [PQ: 72057594037928037] Txs.size=0, PlannedTxs.size=0 2025-05-07T08:51:20.055060Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:20.055115Z node 3 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037928037] doesn't have tx writes info 2025-05-07T08:51:20.055722Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928037] server connected, pipe [3:260:2252], now have 1 active actors on pipe 2025-05-07T08:51:20.055811Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1454: [PQ: 72057594037928037] Handle TEvPersQueue::TEvUpdateConfig 2025-05-07T08:51:20.055972Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1640: [PQ: 72057594037928037] Config update version 3(current 0) received from actor [3:99:2134] txId 12345 config: CacheSize: ... 2, State: StateInit] bootstrapping 2 [3:534:2457] 2025-05-07T08:51:20.208970Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-05-07T08:51:20.209041Z node 3 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 2 [3:534:2457] 2025-05-07T08:51:20.209118Z node 3 :PERSQUEUE DEBUG: partition.cpp:571: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-05-07T08:51:20.209172Z node 3 :PERSQUEUE DEBUG: partition.cpp:3847: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-05-07T08:51:20.209992Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928139] server connected, pipe [3:537:2459], now have 1 active actors on pipe 2025-05-07T08:51:20.211186Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928037] server connected, pipe [3:543:2462], now have 1 active actors on pipe 2025-05-07T08:51:20.211279Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928138] server connected, pipe [3:544:2463], now have 1 active actors on pipe 2025-05-07T08:51:20.211494Z node 3 :PERSQUEUE DEBUG: partition.cpp:855: [PQ: 72057594037928037, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-05-07T08:51:20.211713Z node 3 :PERSQUEUE DEBUG: partition.cpp:855: [PQ: 72057594037928138, Partition: 1, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-05-07T08:51:20.211812Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928139] server connected, pipe [3:545:2463], now have 1 active actors on pipe 2025-05-07T08:51:20.212067Z node 3 :PERSQUEUE DEBUG: partition.cpp:855: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-05-07T08:51:20.223590Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037928139] server connected, pipe [3:553:2470], now have 1 active actors on pipe 2025-05-07T08:51:20.255359Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3088: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-05-07T08:51:20.259000Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3120: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-05-07T08:51:20.259365Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:745: [PQ: 72057594037928139] doesn't have tx info 2025-05-07T08:51:20.259423Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:757: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-05-07T08:51:20.259576Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4846: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-05-07T08:51:20.260368Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:20.260434Z node 3 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037928139] doesn't have tx writes info 2025-05-07T08:51:20.260555Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-05-07T08:51:20.260903Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-05-07T08:51:20.261121Z node 3 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [3:610:2515] 2025-05-07T08:51:20.263830Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDiskStatusStep 2025-05-07T08:51:20.265314Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitMetaStep 2025-05-07T08:51:20.265651Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInfoRangeStep 2025-05-07T08:51:20.266236Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDataRangeStep 2025-05-07T08:51:20.266542Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDataStep 2025-05-07T08:51:20.266594Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-05-07T08:51:20.266677Z node 3 :PERSQUEUE INFO: partition_init.cpp:773: [rt3.dc1--topic2:2:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-05-07T08:51:20.266730Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-05-07T08:51:20.266789Z node 3 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 3 [3:610:2515] 2025-05-07T08:51:20.266848Z node 3 :PERSQUEUE DEBUG: partition.cpp:571: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-05-07T08:51:20.266906Z node 3 :PERSQUEUE DEBUG: partition.cpp:3847: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-05-07T08:51:20.267917Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72057594037928138] server disconnected, pipe [3:544:2463] destroyed 2025-05-07T08:51:20.268017Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72057594037928037] server disconnected, pipe [3:543:2462] destroyed RESPONSE Status: 1 ErrorCode: OK MetaResponse { CmdGetPartitionStatusResult { TopicResult { Topic: "rt3.dc1--topic2" PartitionResult { Partition: 1 Status: STATUS_OK LastInitDurationSeconds: 0 CreationTimestamp: 0 GapCount: 0 GapSize: 0 AvgWriteSpeedPerSec: 0 AvgWriteSpeedPerMin: 0 AvgWriteSpeedPerHour: 0 AvgWriteSpeedPerDay: 0 AvgReadSpeedPerSec: 0 AvgReadSpeedPerMin: 0 AvgReadSpeedPerHour: 0 AvgReadSpeedPerDay: 0 ReadBytesQuota: 0 WriteBytesQuota: 50000000 PartitionSize: 0 StartOffset: 0 EndOffset: 0 LastWriteTimestampMs: 78 WriteLagMs: 0 AvgQuotaSpeedPerSec: 0 AvgQuotaSpeedPerMin: 0 AvgQuotaSpeedPerHour: 0 AvgQuotaSpeedPerDay: 0 SourceIdCount: 0 SourceIdRetentionPeriodSec: 0 UsedReserveSize: 0 AggregatedCounters { Values: 78 Values: 0 Values: 1 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 50000000 Values: 0 Values: 9223372036854775807 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 1 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 } Generation: 2 Cookie: 1 ScaleStatus: NORMAL } PartitionResult { Partition: 2 Status: STATUS_OK LastInitDurationSeconds: 0 CreationTimestamp: 0 GapCount: 0 GapSize: 0 AvgWriteSpeedPerSec: 0 AvgWriteSpeedPerMin: 0 AvgWriteSpeedPerHour: 0 AvgWriteSpeedPerDay: 0 AvgReadSpeedPerSec: 0 AvgReadSpeedPerMin: 0 AvgReadSpeedPerHour: 0 AvgReadSpeedPerDay: 0 ReadBytesQuota: 0 WriteBytesQuota: 50000000 PartitionSize: 0 StartOffset: 0 EndOffset: 0 LastWriteTimestampMs: 92 WriteLagMs: 0 AvgQuotaSpeedPerSec: 0 AvgQuotaSpeedPerMin: 0 AvgQuotaSpeedPerHour: 0 AvgQuotaSpeedPerDay: 0 SourceIdCount: 0 SourceIdRetentionPeriodSec: 0 UsedReserveSize: 0 AggregatedCounters { Values: 92 Values: 0 Values: 1 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 50000000 Values: 0 Values: 9223372036854775807 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 1 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 } Generation: 2 Cookie: 1 ScaleStatus: NORMAL } ErrorCode: OK } TopicResult { Topic: "rt3.dc1--topic1" PartitionResult { Partition: 0 Status: STATUS_OK LastInitDurationSeconds: 0 CreationTimestamp: 0 GapCount: 0 GapSize: 0 AvgWriteSpeedPerSec: 0 AvgWriteSpeedPerMin: 0 AvgWriteSpeedPerHour: 0 AvgWriteSpeedPerDay: 0 AvgReadSpeedPerSec: 0 AvgReadSpeedPerMin: 0 AvgReadSpeedPerHour: 0 AvgReadSpeedPerDay: 0 ReadBytesQuota: 0 WriteBytesQuota: 50000000 PartitionSize: 0 StartOffset: 0 EndOffset: 0 LastWriteTimestampMs: 38 WriteLagMs: 0 AvgQuotaSpeedPerSec: 0 AvgQuotaSpeedPerMin: 0 AvgQuotaSpeedPerHour: 0 AvgQuotaSpeedPerDay: 0 SourceIdCount: 0 SourceIdRetentionPeriodSec: 0 UsedReserveSize: 0 AggregatedCounters { Values: 38 Values: 0 Values: 1 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 50000000 Values: 0 Values: 9223372036854775807 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 1 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 } Generation: 2 Cookie: 1 ScaleStatus: NORMAL } ErrorCode: OK } } } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ut/unittest >> TTicketParserTest::LoginEmptyTicketBad [GOOD] Test command err: 2025-05-07T08:50:40.203232Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623552011444094:2065];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:40.204229Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003ecb/r3tmp/tmp5m25T0/pdisk_1.dat 2025-05-07T08:50:41.669090Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:50:42.542773Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:42.618788Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:42.618917Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:42.631000Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14191, node 1 2025-05-07T08:50:42.889621Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:42.889645Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:42.889668Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:42.889780Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18429 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:50:43.367592Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:43.392259Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:50:43.751561Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1486: Updated state for /Root keys 1 2025-05-07T08:50:43.777199Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-05-07T08:50:43.777247Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T08:50:43.778588Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket eyJh****q8qg (11236201) () has now valid token of user1 2025-05-07T08:50:43.778609Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:806: CanInitLoginToken, database /Root, A4 success test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003ecb/r3tmp/tmpmqLhHd/pdisk_1.dat 2025-05-07T08:50:46.649584Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:50:46.740543Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:46.769100Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:46.769194Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:46.771506Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 31619, node 2 2025-05-07T08:50:46.892138Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:46.892165Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:46.892175Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:46.892332Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31860 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:50:47.161113Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:47.168702Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T08:50:47.418134Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1486: Updated state for /Root keys 1 2025-05-07T08:50:47.426504Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-05-07T08:50:47.426539Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T08:50:47.427262Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket eyJh****n0AQ (3A9EC83A) () has now valid token of user1 2025-05-07T08:50:47.427278Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:806: CanInitLoginToken, database /Root, A4 success 2025-05-07T08:50:50.778105Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7501623595004391597:2087];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:50.782316Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003ecb/r3tmp/tmpczMgrC/pdisk_1.dat 2025-05-07T08:50:51.049794Z node 3 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:51.053129Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:51.053218Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:51.055036Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25524, node 3 2025-05-07T08:50:51.130663Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:51.130688Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:51.130699Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:51.130851Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24791 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:50:51.571694Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:51.583528Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T08:50:51.861492Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1486: Updated state for /Root keys 1 2025-05-07T08:50:51.874342Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-05-07T08:50:51.874378Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T08:50:51.875207Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket eyJh****zfiA (6383B8B7) () has now valid token of user1 2025-05-07T08:50:51.875234Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:806: CanInitLoginToken, database /Root, A4 success 2025-05-07T08:50:51.875646Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1486: Updated state for /Root keys 1 2025-05-07T08:50:55.782097Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7501623595004391597:2087];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:55.782190Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:50:55.806114Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1506: Refreshing ticket eyJh****zfiA (6383B8B7) 2025-05-07T08:50:55.806533Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket eyJh****zfiA (6383B8B7) () has now valid token of user1 2025-05-07T08:50:58.818008Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1506: Refreshing ticket eyJh****zfiA (6383B8B7) 2025-05-07T08:50:58.818376Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket eyJh****zfiA (6383B8B7) () has now valid token of user1 2025-05-07T08:51:01.878538Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1486: Updated state for /Root keys 1 2025-05-07T08:51:02.820564Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1506: Refreshing ticket eyJh****zfiA (6383B8B7) 2025-05-07T08:51:02.820894Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket eyJh****zfiA (6383B8B7) () has now valid token of user1 2025-05-07T08:51:05.990319Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7261: Cannot get console configs 2025-05-07T08:51:05.990351Z node 3 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:51:07.826329Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1506: Refreshing ticket eyJh****zfiA (6383B8B7) 2025-05-07T08:51:07.826730Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket eyJh****zfiA (6383B8B7) () has now valid token of user1 2025-05-07T08:51:11.832391Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1506: Refreshing ticket eyJh****zfiA (6383B8B7) 2025-05-07T08:51:11.832782Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket eyJh****zfiA (6383B8B7) () has now valid token of user1 2025-05-07T08:51:12.676741Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501623692658937874:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:12.677397Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003ecb/r3tmp/tmpJ22za7/pdisk_1.dat 2025-05-07T08:51:12.917907Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 62985, node 4 2025-05-07T08:51:12.970114Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:12.970247Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:12.975237Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:51:13.094782Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:51:13.094808Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:51:13.094816Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:51:13.094932Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16626 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:51:13.474873Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:13.487910Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:51:13.518224Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1486: Updated state for /Root keys 1 2025-05-07T08:51:13.525827Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-05-07T08:51:13.525881Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T08:51:13.526997Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket eyJh****RhYw (C75A8E8D) () has now valid token of user1 2025-05-07T08:51:13.527022Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:806: CanInitLoginToken, database /Root, A4 success 2025-05-07T08:51:13.527682Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1486: Updated state for /Root keys 1 2025-05-07T08:51:17.676840Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7501623692658937874:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:17.676932Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:51:17.690592Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1506: Refreshing ticket eyJh****RhYw (C75A8E8D) 2025-05-07T08:51:17.690838Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1815: Ticket eyJh****RhYw (C75A8E8D) () has now permanent error message 'User not found' 2025-05-07T08:51:21.694433Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1506: Refreshing ticket eyJh****RhYw (C75A8E8D) 2025-05-07T08:51:24.126486Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7501623743587290436:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:24.126554Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003ecb/r3tmp/tmp9jCgom/pdisk_1.dat 2025-05-07T08:51:24.227073Z node 5 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16446, node 5 2025-05-07T08:51:24.269514Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:24.269590Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:24.270673Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:51:24.289782Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:51:24.289804Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:51:24.289810Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:51:24.289895Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11639 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:51:24.552485Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:24.600565Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1486: Updated state for /Root keys 1 2025-05-07T08:51:24.610835Z node 5 :TICKET_PARSER ERROR: ticket_parser_impl.h:922: Ticket **** (00000000): Ticket is empty |89.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/dsproxy/ut_fat/ydb-core-blobstorage-dsproxy-ut_fat |89.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/balance_coverage/ut/ydb-core-tx-balance_coverage-ut >> AssignTxId::Basic >> TPQTest::TestPartitionTotalQuota >> TPQTabletTests::UpdateConfig_1 >> AggregateStatistics::ChildNodesShouldBeInvalidateByTimeout |89.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/provider/ut/ydb-core-kqp-provider-ut |89.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/dsproxy/ut_fat/ydb-core-blobstorage-dsproxy-ut_fat |89.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/balance_coverage/ut/ydb-core-tx-balance_coverage-ut |89.4%| [LD] {RESULT} $(B)/ydb/core/blobstorage/dsproxy/ut_fat/ydb-core-blobstorage-dsproxy-ut_fat |89.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/provider/ut/ydb-core-kqp-provider-ut |89.4%| [LD] {RESULT} $(B)/ydb/core/kqp/provider/ut/ydb-core-kqp-provider-ut |89.4%| [LD] {RESULT} $(B)/ydb/core/tx/balance_coverage/ut/ydb-core-tx-balance_coverage-ut |89.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest |89.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest |89.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest |89.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ydb-public-sdk-cpp-src-client-persqueue_public-ut >> TSourceIdTests::SourceIdStorageAdd [GOOD] >> TSourceIdTests::ProtoSourceIdStorageParseAndAdd [GOOD] >> TSourceIdTests::SourceIdStorageComplexDelete >> AggregateStatistics::ChildNodesShouldBeInvalidateByTimeout [GOOD] |89.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ydb-public-sdk-cpp-src-client-persqueue_public-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPQTabletTests::Huge_ProposeTransacton [GOOD] Test command err: 2025-05-07T08:50:52.767506Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3088: [PQ: 72057594037927937] Handle TEvInterconnect::TEvNodeInfo 2025-05-07T08:50:52.772370Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3120: [PQ: 72057594037927937] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-05-07T08:50:52.772670Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:745: [PQ: 72057594037927937] doesn't have tx info 2025-05-07T08:50:52.772731Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:757: [PQ: 72057594037927937] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-05-07T08:50:52.772797Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:969: [PQ: 72057594037927937] no config, start with empty partitions and default config 2025-05-07T08:50:52.772849Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4846: [PQ: 72057594037927937] Txs.size=0, PlannedTxs.size=0 2025-05-07T08:50:52.772893Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:50:52.773021Z node 1 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037927937] doesn't have tx writes info 2025-05-07T08:50:52.804897Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037927937] server connected, pipe [1:206:2212], now have 1 active actors on pipe 2025-05-07T08:50:52.804995Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1454: [PQ: 72057594037927937] Handle TEvPersQueue::TEvUpdateConfig 2025-05-07T08:50:52.819536Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1640: [PQ: 72057594037927937] Config update version 1(current 0) received from actor [1:177:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "consumer" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } Consumers { Name: "consumer" Generation: 1 Important: true } 2025-05-07T08:50:52.822868Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:585: [PQ: 72057594037927937] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "consumer" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } Consumers { Name: "consumer" Generation: 1 Important: true } 2025-05-07T08:50:52.823071Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:50:52.824014Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037927937] Config applied version 1 actor [1:177:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "consumer" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } Consumers { Name: "consumer" Generation: 1 Important: true } 2025-05-07T08:50:52.824186Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitConfigStep 2025-05-07T08:50:52.824606Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-05-07T08:50:52.824989Z node 1 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:214:2218] 2025-05-07T08:50:52.825928Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:55: [topic:0:Initializer] Initializing completed. 2025-05-07T08:50:52.826307Z node 1 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'topic' partition 0 generation 2 [1:214:2218] 2025-05-07T08:50:52.826376Z node 1 :PERSQUEUE DEBUG: partition.cpp:571: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic topic partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-05-07T08:50:52.827279Z node 1 :PERSQUEUE DEBUG: partition.cpp:3847: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-05-07T08:50:52.827407Z node 1 :PERSQUEUE DEBUG: partition.cpp:3152: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user reinit request with generation 1 2025-05-07T08:50:52.827453Z node 1 :PERSQUEUE DEBUG: partition.cpp:3221: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user reinit with generation 1 done 2025-05-07T08:50:52.827507Z node 1 :PERSQUEUE DEBUG: partition.cpp:3152: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user consumer reinit request with generation 1 2025-05-07T08:50:52.827557Z node 1 :PERSQUEUE DEBUG: partition.cpp:3221: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user consumer reinit with generation 1 done 2025-05-07T08:50:52.827709Z node 1 :PERSQUEUE DEBUG: partition.cpp:2182: [PQ: 72057594037927937, Partition: 0, State: StateIdle] === DumpKeyValueRequest === 2025-05-07T08:50:52.827750Z node 1 :PERSQUEUE DEBUG: partition.cpp:2183: [PQ: 72057594037927937, Partition: 0, State: StateIdle] --- delete ---------------- 2025-05-07T08:50:52.827790Z node 1 :PERSQUEUE DEBUG: partition.cpp:2191: [PQ: 72057594037927937, Partition: 0, State: StateIdle] --- write ----------------- 2025-05-07T08:50:52.827838Z node 1 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 0, State: StateIdle] i0000000000 2025-05-07T08:50:52.827869Z node 1 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 0, State: StateIdle] m0000000000cuser 2025-05-07T08:50:52.827890Z node 1 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 0, State: StateIdle] m0000000000uuser 2025-05-07T08:50:52.827911Z node 1 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 0, State: StateIdle] m0000000000cconsumer 2025-05-07T08:50:52.827943Z node 1 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 0, State: StateIdle] m0000000000uconsumer 2025-05-07T08:50:52.827975Z node 1 :PERSQUEUE DEBUG: partition.cpp:2196: [PQ: 72057594037927937, Partition: 0, State: StateIdle] --- rename ---------------- 2025-05-07T08:50:52.828027Z node 1 :PERSQUEUE DEBUG: partition.cpp:2201: [PQ: 72057594037927937, Partition: 0, State: StateIdle] =========================== 2025-05-07T08:50:52.828120Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:774: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-05-07T08:50:52.828154Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:774: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user consumer readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-05-07T08:50:52.828313Z node 1 :PERSQUEUE DEBUG: read.h:262: CacheProxy. Passthrough write request to KV 2025-05-07T08:50:52.831247Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-05-07T08:50:52.831718Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037927937] server connected, pipe [1:221:2223], now have 1 active actors on pipe 2025-05-07T08:50:52.832333Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037927937] server connected, pipe [1:224:2225], now have 1 active actors on pipe 2025-05-07T08:50:52.833193Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3212: [PQ: 72057594037927937] Handle TEvPersQueue::TEvProposeTransaction SourceActor { RawX1: 177 RawX2: 4294969488 } TxId: 67890 Data { Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "consumer" Path: "/topic" } SendingShards: 22222 ReceivingShards: 22222 Immediate: false } 2025-05-07T08:50:52.833314Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3369: [PQ: 72057594037927937] distributed transaction 2025-05-07T08:50:52.833387Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72057594037927937] Propose TxId 67890, WriteId (empty maybe) 2025-05-07T08:50:52.833425Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4279: [PQ: 72057594037927937] Try execute txs with state UNKNOWN 2025-05-07T08:50:52.833486Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4324: [PQ: 72057594037927937] TxId 67890, State UNKNOWN 2025-05-07T08:50:52.833526Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3908: [PQ: 72057594037927937] schedule TEvProposeTransactionResult(PREPARED) 2025-05-07T08:50:52.833574Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4214: [PQ: 72057594037927937] TxId 67890, NewState PREPARING 2025-05-07T08:50:52.833621Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3804: [PQ: 72057594037927937] write key for TxId 67890 2025-05-07T08:50:52.833749Z node 1 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 67890] save tx TxId: 67890 State: PREPARED MinStep: 134 MaxStep: 30134 PredicatesReceived { TabletId: 22222 } PredicateRecipients: 22222 Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "consumer" Path: "/topic" } Kind: KIND_DATA SourceActor { RawX1: 177 RawX2: 4294969488 } Partitions { } 2025-05-07T08:50:52.833861Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3621: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-05-07T08:50:52.837858Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1225: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-05-07T08:50:52.837939Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4279: [PQ: 72057594037927937] Try execute txs with state PREPARING 2025-05-07T08:50:52.837990Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4324: [PQ: 72057594037927937] TxId 67890, State PREPARING 2025-05-07T08:50:52.838028Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4214: [PQ: 72057594037927937] TxId 67890, NewState PREPARED 2025-05-07T08:50:52.838347Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3212: [PQ: 72057594037927937] Handle TEvPersQueue::TEvProposeTransaction SourceActor { RawX1: 177 RawX2: 4294969488 } TxId: 67891 Data { Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "consumer" Path: "/topic" } SendingShards: 22222 ReceivingShards: 22222 Immediate: false } 2025-05-07T08:50:52.838406Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3369: [PQ: 72057594037927937] distributed transaction 2025-05-07T08:50:52.838474Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72057594037927937] Propose TxId 67891, ... aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-2496" Generation: 2 Important: false } Consumers { Name: "fake-consumer-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-2497" Generation: 2 Important: false } Consumers { Name: "fake-consumer-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-2498" Generation: 2 Important: false } Consumers { Name: "fake-consumer-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-2499" Generation: 2 Important: false } } BootstrapConfig { } SourceActor { RawX1: 177 RawX2: 25769805968 } Partitions { Partition { PartitionId: 0 } Partition { PartitionId: 1 } } 2025-05-07T08:51:29.260323Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3621: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-05-07T08:51:29.277721Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1225: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-05-07T08:51:29.277796Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4279: [PQ: 72057594037927937] Try execute txs with state CALCULATED 2025-05-07T08:51:29.277830Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4324: [PQ: 72057594037927937] TxId 67890, State CALCULATED 2025-05-07T08:51:29.277873Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4271: [PQ: 72057594037927937] TxId 67890 State CALCULATED FrontTxId 67890 2025-05-07T08:51:29.277918Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4214: [PQ: 72057594037927937] TxId 67890, NewState WAIT_RS 2025-05-07T08:51:29.277963Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72057594037927937] TxId 67890 moved from CALCULATED to WAIT_RS 2025-05-07T08:51:29.278064Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3956: [PQ: 72057594037927937] Send TEvTxProcessing::TEvReadSet to 0 receivers. Wait TEvTxProcessing::TEvReadSet from 1 senders. 2025-05-07T08:51:29.278125Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4447: [PQ: 72057594037927937] HaveParticipantsDecision 0 |89.4%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ydb-public-sdk-cpp-src-client-persqueue_public-ut |89.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/database/ut/unittest >> TSourceIdTests::SourceIdStorageComplexDelete [GOOD] >> TSourceIdTests::HeartbeatEmitter [GOOD] >> TSourceIdTests::SourceIdMinSeqNo [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_backup/unittest >> TBackupTests::ShouldSucceedOnLargeData[Raw] [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:50:45.558085Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:50:45.558187Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:50:45.558231Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:50:45.558265Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:50:45.558340Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:50:45.558386Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:50:45.558475Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:50:45.558558Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:50:45.559409Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:50:45.559837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:50:45.671621Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:50:45.671697Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:45.693363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:50:45.693647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:50:45.693846Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:50:45.705510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:50:45.705862Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:50:45.706640Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:45.706924Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:50:45.717548Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:45.719114Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:45.719202Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:45.719308Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:50:45.719381Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:45.719431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:50:45.719684Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:50:45.727491Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:50:45.890782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:50:45.891069Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:45.891352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:50:45.891623Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:50:45.891701Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:45.894519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:45.894760Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:50:45.894982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:45.895046Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:50:45.895088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:50:45.895138Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:50:45.897554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:45.897638Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:50:45.897721Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:50:45.899908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:45.899974Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:45.900014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:45.900069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:50:45.904236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:50:45.906801Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:50:45.907027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:50:45.908148Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:45.908304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:50:45.908369Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:45.908736Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:50:45.908792Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:45.908988Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:50:45.909079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:50:45.911764Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:45.911815Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:45.912062Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:45.912116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 1:5415], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 0 Checksum: } REQUEST: PUT /data_00.csv?partNumber=100&uploadId=1 HTTP/1.1 HEADERS: Host: localhost:18327 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 7FC2C1AA-675A-4942-A3D2-78C2F7C1E3C1 amz-sdk-request: attempt=1 content-length: 130 content-md5: Wyd1w7MZYbbZucaVvuRDAw== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /data_00.csv / partNumber=100&uploadId=1 / 130 2025-05-07T08:51:29.474140Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:578: [Export] [s3] Handle TEvExternalStorage::TEvUploadPartResponse: self# [1:3452:5416], result# UploadPartResult { ETag: 5b2775c3b31961b6d9b9c695bee44303 } 2025-05-07T08:51:29.474436Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [1:3451:5415] 2025-05-07T08:51:29.474557Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:445: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [1:3452:5416], sender# [1:3451:5415], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 1 Checksum: } REQUEST: PUT /data_00.csv?partNumber=101&uploadId=1 HTTP/1.1 HEADERS: Host: localhost:18327 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 633B1521-70C8-46F5-B7DE-C984300209B5 amz-sdk-request: attempt=1 content-length: 0 content-md5: 1B2M2Y8AsgTpgAmY7PhCfg== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /data_00.csv / partNumber=101&uploadId=1 / 0 2025-05-07T08:51:29.478317Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:578: [Export] [s3] Handle TEvExternalStorage::TEvUploadPartResponse: self# [1:3452:5416], result# UploadPartResult { ETag: d41d8cd98f00b204e9800998ecf8427e } 2025-05-07T08:51:29.478397Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:702: [Export] [s3] Finish: self# [1:3452:5416], success# 1, error# , multipart# 1, uploadId# 1 2025-05-07T08:51:29.484431Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:512: [Export] [s3] Handle TEvDataShard::TEvS3Upload: self# [1:3452:5416], upload# { Id: 1 Status: Complete Error: (empty maybe) Parts: [a59dd9a97cf3685e69093fb2d96653c6,bdbb215613239cb3a835fee1fe7e7ca3,cb38dbc776d5763f1926dfb22d508c87,3c430d66d07a0a4b1fa889f321fce197,43baf91083f286b60bf15e7786459cd9,90b5581bef612fa3bf9b38b336af405f,fd4869c26a12d22ee79256d778954d04,a9459bc28198b0b6bd67732c492fd740,697a3f8386ea1ff4e327de943224cb1a,614da0b4ec9464e69cd0c59909e80fbb,9b94eb3f67aa4c8a0bcbf546833ed966,fd45c3afacec641ad19e59d2b31aeba4,fd69678aecbc149601f58cf13c64d33e,90c09ab4923bc9f97f825d36e32bf362,c1586416a281a4cca2b2b4e333d9b079,f31908576272623f9f0a19bf774cde8e,6fe3b42388304d2af07c629aeb683581,7bc90eec21ca5bb3648e6a48e83c5730,8e1dda26de1af89bdffe2eefdcebea1d,14dc42d90caa1575bbfffa9dc8f21d66,92efb2368eecb32d4075c09294fde0b7,98efff5f7c7ecb42e7af65142ce05af9,6206c81807b3b9283b0173ee2c682100,616b431b91aedc9de4593321eb42ba96,9ae4762563ffdec596cc9ca4cb8913e1,946ebf2d95b4796ea2faee21f017be79,45834a9948bb4ab8b62d1894156d13ed,6ad3fe7286856927c1e00422bc8da697,ef89464d20eae46829e1bf557e4d04ce,f128e5de32097d205453080b01c94ac3,c13e650ee2cfcecfdf4f578a2e5b1c2d,fc26314711b25d20fc654cf59301b806,56f6f2c574fba86496a87a7dd5fab46c,c7951eace72cfe0f14f808173e07bc64,3d9ad3340e58b973eaf8d4f14ba3b0f9,fc41d6fdfb52389dda8b26d7a0a3a889,9974b6ae96ffd0b756acb67088e890f9,cde8a5604010abe8fccfa9492144036f,0364e048eaac35c26d48b0c5072b5255,aac5a84927124d6ae4931e2650c80d9f,eab068fe4ca35c2f3e35890bd727eb4f,bc3646bdbcbc7f97dcddf2202ea9421f,6d3f63d672eda4a4617c9e7589a68bfc,0401bade6c3031b5be872238520b993a,1c6405688f86423480173e3e316a20bd,52395f68e877cbb8d7115a247331b0a7,4b0673ac18058554d2c53bf9f99b34b2,87bc1b9e650b31e81a9ad2531e3ef9da,b29053c8cd093c8b92ad3954c42cb7be,faf1084f6b33b00e2e822d1d3c3f0083,eedec03ee8d7eda4654db7206ad0889e,be4469dd028d5519a67098055f25513f,a7afa9827ec27c565cff1ed505a06f4b,91fe8109d2ad934c4364d90c29aaba71,73b81ea00e11db12d66497d30eb48446,cce69ef69777afeab34eefa515abc7f4,4e4ac1a421353964356400b8be8e21da,32cd6083b12660bcd4062af08d89eb05,71957b9db37811c7680638b82dc6384b,a8787e692c423a2dfa07dd261e72790a,283838ab16206b27738ea6653110f833,88bf084fb3029f0d5c0705eece930d70,1ed2f9f7221f1718b81fdf2d846347dd,406706cfbc454922dcad50b9c534b8d1,dbb606c993d798974ed4f5c9ebf195ca,1a4a3868dc6fa26c6b019d237f9ea6f4,82660a3c6b576a1b3fea925f3c179a2e,d393db2749ae42e854e85eeec2ea3592,b42c92ad14ee0e5351fec7e5a045a91b,2c7af27f9dc77efbcbe71c2d7997d6e9,278aba62ab1d9e3ff16df2d82ac5f5c7,6b8380404a7e7ec95ad5f3941d5d404c,c9813b9fc1d6b5087e64849076edd0f8,160785e4dac02a91c43a497ee59eea06,db529a9ba22f60f404031cfe85e966e9,9b70af168e2d3769bd8bc4dffa3202ea,9ac39c3843b6621ace44acf430a59e06,4603ff564a46e93951f246ed18926071,66b85f35ee76a7f71f50e9aad56758de,1665c284ad04d6b893b69372bf8fc6b9,8c1c27ec88fb52f06de6e7516a392672,0a5f992db51277a05ec12f0d6459ef21,8debe3a6023155561cb0890fc05bd7fb,938ece258b7596f8eea7e82bc2b8f88c,767ca0dcf0b154fa3c818044bbfc58fd,914cc7165d994bb05824332ac120446f,ab0ece250f5959a510170ee07aa21b5d,8bf4b44d67f062026b0010a8a0b39cc0,e0aa13fa8246e68c18905d3abadfc44d,27b021b75b6a95f63ea27f7ec238c05f,673e661e4cfea1e431678dd9881c2a8c,f101b34943f1831ae8c0b46ffcb1c2d6,562b32a8142b29c1a88e507ab1981a6b,fdea4c6fc2befb44614992ca8bf34b21,b7c8ec6acc45b037978482996e910b75,aec72fbd2e171b798900b22897d00941,710ef5b5e8eba750b6acc9b32dff42a3,821c7e22ef9c22098171e7f837dcfcc8,aecc9f6d0e6f54e938a10d40fda96d7b,5b2775c3b31961b6d9b9c695bee44303,d41d8cd98f00b204e9800998ecf8427e] } REQUEST: POST /data_00.csv?uploadId=1 HTTP/1.1 HEADERS: Host: localhost:18327 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 449C3B8D-D620-42C7-B67C-B1BC3E0B3FE0 amz-sdk-request: attempt=1 content-length: 11529 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeAction: 4 / /data_00.csv / uploadId=1 2025-05-07T08:51:29.493691Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:609: [Export] [s3] Handle TEvExternalStorage::TEvCompleteMultipartUploadResponse: self# [1:3452:5416], result# CompleteMultipartUploadResult { Bucket: Key: data_00.csv ETag: 5d8c28efc812b445ddd02900ff3ee599 } 2025-05-07T08:51:29.494257Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:3451:5415], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-05-07T08:51:29.511553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 305 RawX2: 4294969588 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10000 RowsProcessed: 1000 } 2025-05-07T08:51:29.511627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-05-07T08:51:29.511806Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 305 RawX2: 4294969588 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10000 RowsProcessed: 1000 } 2025-05-07T08:51:29.511909Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 305 RawX2: 4294969588 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10000 RowsProcessed: 1000 } 2025-05-07T08:51:29.511998Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, datashard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-05-07T08:51:29.512060Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:51:29.512112Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-05-07T08:51:29.512160Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 102:0 129 -> 240 2025-05-07T08:51:29.512332Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:51:29.518806Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:51:29.519454Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:51:29.519512Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 102:0 ProgressState 2025-05-07T08:51:29.519660Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T08:51:29.519705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:51:29.519753Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T08:51:29.519806Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:51:29.519858Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-05-07T08:51:29.519940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:334:2313] message: TxId: 102 2025-05-07T08:51:29.520011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:51:29.520059Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-05-07T08:51:29.520095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 102:0 2025-05-07T08:51:29.520230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T08:51:29.525064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T08:51:29.525152Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:3437:5402] TestWaitNotification: OK eventTxId 102 |89.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/database/ut/unittest |89.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/database/ut/unittest |89.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/database/ut/unittest |89.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/database/ut/unittest >> StatisticsSaveLoad::ForbidAccess >> StatisticsSaveLoad::Simple |89.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/database/ut/unittest |89.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/database/ut/unittest >> TPQTabletTests::UpdateConfig_1 [GOOD] >> StatisticsSaveLoad::Delete >> KqpPg::CopyTableSerialColumns-useSink [GOOD] >> KqpPg::CreateIndex ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/ut_aggregation/unittest >> AggregateStatistics::ChildNodesShouldBeInvalidateByTimeout [GOOD] Test command err: 2025-05-07T08:51:32.336733Z node 1 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-05-07T08:51:32.337727Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:38:2058], server id = [1:38:2058], tablet id = 1, status = OK 2025-05-07T08:51:32.338057Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:38:2058], path = { OwnerId: 3 LocalId: 3 } 2025-05-07T08:51:32.338290Z node 3 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-05-07T08:51:32.338411Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-05-07T08:51:32.338587Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 1 2025-05-07T08:51:32.338867Z node 3 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 3, client id = [3:42:2057], server id = [3:42:2057], tablet id = 3, status = OK 2025-05-07T08:51:32.338926Z node 3 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [3:42:2057], path = { OwnerId: 3 LocalId: 3 } 2025-05-07T08:51:32.339045Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:38:2058], server id = [0:0:0], tablet id = 1, status = ERROR 2025-05-07T08:51:32.339088Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-05-07T08:51:32.339121Z node 3 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 3 2025-05-07T08:51:32.339159Z node 3 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 1 2025-05-07T08:51:32.339322Z node 4 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 1, current Round: 0 2025-05-07T08:51:32.339406Z node 3 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 3, client id = [3:42:2057], server id = [0:0:0], tablet id = 3, status = ERROR 2025-05-07T08:51:32.339434Z node 3 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-05-07T08:51:32.339526Z node 4 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 4, client id = [4:47:2057], server id = [4:47:2057], tablet id = 4, status = OK 2025-05-07T08:51:32.339584Z node 4 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [4:47:2057], path = { OwnerId: 3 LocalId: 3 } 2025-05-07T08:51:32.339662Z node 1 :STATISTICS DEBUG: service_impl.cpp:448: Received TEvAggregateStatisticsResponse SenderNodeId: 3 2025-05-07T08:51:32.339715Z node 4 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 4 2025-05-07T08:51:32.339746Z node 4 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-05-07T08:51:32.339844Z node 4 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 4, client id = [4:47:2057], server id = [0:0:0], tablet id = 4, status = ERROR 2025-05-07T08:51:32.339870Z node 4 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-05-07T08:51:32.339943Z node 2 :STATISTICS DEBUG: service_impl.cpp:448: Received TEvAggregateStatisticsResponse SenderNodeId: 4 2025-05-07T08:51:32.350744Z node 4 :STATISTICS DEBUG: service_impl.cpp:252: Event round 1 is different from the current 0 2025-05-07T08:51:32.350801Z node 4 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-05-07T08:51:32.350830Z node 3 :STATISTICS DEBUG: service_impl.cpp:252: Event round 1 is different from the current 0 2025-05-07T08:51:32.350847Z node 3 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-05-07T08:51:32.361916Z node 1 :STATISTICS INFO: service_impl.cpp:416: Node 2 is unavailable 2025-05-07T08:51:32.362003Z node 1 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 1 2025-05-07T08:51:32.362095Z node 2 :STATISTICS DEBUG: service_impl.cpp:401: Skip TEvKeepAliveTimeout 2025-05-07T08:51:32.362149Z node 1 :STATISTICS DEBUG: service_impl.cpp:252: Event round 1 is different from the current 0 2025-05-07T08:51:32.362176Z node 1 :STATISTICS DEBUG: service_impl.cpp:393: Skip TEvKeepAliveTimeout 2025-05-07T08:51:32.362206Z node 1 :STATISTICS DEBUG: service_impl.cpp:252: Event round 1 is different from the current 0 2025-05-07T08:51:32.362236Z node 1 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-05-07T08:51:32.362372Z node 1 :STATISTICS DEBUG: service_impl.cpp:252: Event round 1 is different from the current 0 2025-05-07T08:51:32.362414Z node 1 :STATISTICS DEBUG: service_impl.cpp:428: Skip TEvAggregateKeepAlive |89.4%| [TA] {RESULT} $(B)/ydb/tests/fq/streaming_optimize/test-results/py3test/{meta.json ... results_accumulator.log} >> TPQTabletTests::UpdateConfig_2 |89.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TSourceIdTests::SourceIdMinSeqNo [GOOD] >> IncrementalBackup::E2EBackupCollection [GOOD] |89.5%| [TA] $(B)/ydb/core/security/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TPQTabletTests::UpdateConfig_2 [GOOD] >> TPQTabletTests::Test_Waiting_For_TEvReadSet_When_There_Are_More_Senders_Than_Recipients |89.5%| [TA] $(B)/ydb/core/client/server/ut/test-results/unittest/{meta.json ... results_accumulator.log} |89.5%| [TA] $(B)/ydb/core/tx/schemeshard/ut_backup/test-results/unittest/{meta.json ... results_accumulator.log} |89.5%| [TA] {RESULT} $(B)/ydb/core/security/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TPQTabletTests::Test_Waiting_For_TEvReadSet_When_There_Are_More_Senders_Than_Recipients [GOOD] |89.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/idx_test/ydb-core-kqp-ut-idx_test |89.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/idx_test/ydb-core-kqp-ut-idx_test >> TTransferTests::Create >> AggregateStatistics::ShouldBePings >> AggregateStatistics::RootNodeShouldBeInvalidateByTimeout |89.5%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_backup/test-results/unittest/{meta.json ... results_accumulator.log} |89.5%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/idx_test/ydb-core-kqp-ut-idx_test >> TPQTabletTests::Test_Waiting_For_TEvReadSet_Without_Recipients >> AggregateStatistics::ShouldBeCorrectlyAggregateStatisticsFromAllNodes [GOOD] >> AggregateStatistics::ShouldBeCcorrectProcessingOfLocalTablets >> AggregateStatistics::RootNodeShouldBeInvalidateByTimeout [GOOD] >> AggregateStatistics::ShouldBePings [GOOD] |89.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/ut_aggregation/unittest |89.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/ut_aggregation/unittest >> AggregateStatistics::ShouldBeCcorrectProcessingTabletTimeout [GOOD] >> AggregateStatistics::ShouldBeCcorrectProcessingOfLocalTablets [GOOD] |89.5%| [TA] {RESULT} $(B)/ydb/core/client/server/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KikimrIcGateway::TestListPath >> KikimrIcGateway::TestCreateExternalTable >> KikimrProvider::TestFillAuthPropertiesBasic [GOOD] >> KikimrProvider::TestFillAuthPropertiesAws [GOOD] >> KikimrProvider::AlterTableAddIndexWithTableSettings [GOOD] >> TPQTabletTests::Test_Waiting_For_TEvReadSet_Without_Recipients [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_incremental_backup/unittest >> IncrementalBackup::E2EBackupCollection [GOOD] Test command err: 2025-05-07T08:51:13.760182Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:51:13.774296Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:51:13.774630Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00316c/r3tmp/tmpRK7FY1/pdisk_1.dat 2025-05-07T08:51:14.219522Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:597:2521], Recipient [1:410:2405]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:51:14.219605Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:51:14.219659Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T08:51:14.219795Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122432, Sender [1:594:2519], Recipient [1:410:2405]: {TEvModifySchemeTransaction txid# 1 TabletId# 72057594046644480} 2025-05-07T08:51:14.219825Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4851: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-05-07T08:51:14.352476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 1 TabletId: 72057594046644480 , at schemeshard: 72057594046644480 2025-05-07T08:51:14.366220Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:51:14.366554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-05-07T08:51:14.366838Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-05-07T08:51:14.366935Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:51:14.367045Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T08:51:14.367841Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-05-07T08:51:14.368009Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-05-07T08:51:14.368059Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:51:14.368116Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 1:0 2025-05-07T08:51:14.368334Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435072, Sender [1:410:2405], Recipient [1:410:2405]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-05-07T08:51:14.368381Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4857: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-05-07T08:51:14.382504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:51:14.382637Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-05-07T08:51:14.382703Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:51:14.382742Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:51:14.382881Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T08:51:14.383547Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:51:14.383598Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 1:0 2025-05-07T08:51:14.383768Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435072, Sender [1:410:2405], Recipient [1:410:2405]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-05-07T08:51:14.383805Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4857: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-05-07T08:51:14.383887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:51:14.383939Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046644480 2025-05-07T08:51:14.383978Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:51:14.384073Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T08:51:14.384524Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:51:14.384557Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 1:0 2025-05-07T08:51:14.384666Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435072, Sender [1:410:2405], Recipient [1:410:2405]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-05-07T08:51:14.384694Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4857: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-05-07T08:51:14.384740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:51:14.384779Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:51:14.402880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046644480 2025-05-07T08:51:14.402987Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T08:51:14.421561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:51:14.425525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:51:14.426226Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:51:14.426281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:51:14.426476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 2025-05-07T08:51:14.427715Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877760, Sender [1:602:2526], Recipient [1:410:2405]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594046316545 Status: OK ServerId: [1:604:2527] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2025-05-07T08:51:14.427811Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4935: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-05-07T08:51:14.427853Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5663: Handle TEvClientConnected, tabletId: 72057594046316545, status: OK, at schemeshard: 72057594046644480 2025-05-07T08:51:14.428043Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269091328, Sender [1:406:2401], Recipient [1:410:2405]: NKikimrTx.TEvProposeTransactionStatus Status: 16 StepId: 500 TxId: 1 2025-05-07T08:51:14.428423Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:606:2529], Recipient [1:410:2405]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:51:14.428470Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:51:14.428510Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T08:51:14.428646Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124996, Sender [1:594:2519], Recipient [1:410:2405]: NKikimrScheme.TEvNotifyTxCompletion TxId: 1 2025-05-07T08:51:14.428683Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4853: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-05-07T08:51:14.428762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 1, at schemeshard: 72057594046644480 2025-05-07T08:51:14.428797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 1, ready parts: 0/1, is published: true 2025-05-07T08:51:14.428837Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 1, at schemeshard: 72057594046644480 2025-05-07T08:51:14.465655Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 273285138, Sender [1:43:2090], Recipient [1:410:2405]: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { QueryServiceConfig { AvailableExternalDataSources: "ObjectStorage" } } ItemKinds: 26 ItemKinds: 34 ItemKinds: 52 ItemKinds: 54 ItemKinds: 73 Local: true } 2025-05-07T08:51:14.465784Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7610: Got new config: QueryServiceConfig { AvailableExternalDataSources: "ObjectStorage" } 2025-05-07T08:51:14.465821Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-0 ... pp:1009: NTableState::TProposedWaitParts operationId# 281474976715668:1 HandleReply TEvSchemaChanged at tablet: 72057594046644480 message: Source { RawX1: 1210 RawX2: 12884904810 } Origin: 72075186224037892 State: 2 TxId: 281474976715668 Step: 0 Generation: 1 2025-05-07T08:51:32.459815Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976715668:1, shardIdx: 72057594046644480:5, datashard: 72075186224037892, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046644480 2025-05-07T08:51:32.459850Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 281474976715668:1, at schemeshard: 72057594046644480 2025-05-07T08:51:32.459883Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 281474976715668:1, datashard: 72075186224037892, at schemeshard: 72057594046644480 2025-05-07T08:51:32.459921Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976715668:1 129 -> 240 2025-05-07T08:51:32.460123Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_create_restore_incremental_backup.cpp:253: TRestoreMultipleIncrementalBackups TDone, operationId: 281474976715668:1 Constructed op# SrcTablePaths: "/Root/.backups/collections/MyCollection/19700101000002Z_incremental/Table" DstTablePath: "/Root/Table" SrcPathIds { OwnerId: 72057594046644480 LocalId: 15 } 2025-05-07T08:51:32.460284Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T08:51:32.460733Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 281474976715668:1, at schemeshard: 72057594046644480 2025-05-07T08:51:32.460760Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:51:32.460782Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 281474976715668:1 2025-05-07T08:51:32.460833Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [3:1210:2922] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715668 at schemeshard: 72057594046644480 2025-05-07T08:51:32.460919Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715668 datashard 72075186224037892 state Ready 2025-05-07T08:51:32.460960Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037892 Got TEvSchemaChangedResult from SS at 72075186224037892 2025-05-07T08:51:32.461138Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435072, Sender [3:419:2412], Recipient [3:419:2412]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-05-07T08:51:32.461170Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4857: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-05-07T08:51:32.461233Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976715668:1, at schemeshard: 72057594046644480 2025-05-07T08:51:32.461293Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_restore_incremental_backup.cpp:260: [72057594046644480] TRestoreMultipleIncrementalBackups TDone, operationId: 281474976715668:1 ProgressState 2025-05-07T08:51:32.461437Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T08:51:32.461481Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715668:1 progress is 1/2 2025-05-07T08:51:32.461522Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715668 ready parts: 1/2 2025-05-07T08:51:32.461572Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1103: All parts have reached barrier, tx: 281474976715668, done: 1, blocked: 1 2025-05-07T08:51:32.461669Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_copy_table.cpp:289: TCopyTable TCopyTableBarrier operationId: 281474976715668:0 HandleReply TEvPrivate::TEvCompleteBarrier, msg: NKikimr::NSchemeShard::TEvPrivate::TEvCompleteBarrier { TxId: 281474976715668 Name: CopyTableBarrier }, at tablet# 72057594046644480 2025-05-07T08:51:32.461711Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976715668:0 240 -> 240 2025-05-07T08:51:32.461902Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715668:1 progress is 1/2 2025-05-07T08:51:32.461950Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715668 ready parts: 1/2 2025-05-07T08:51:32.462022Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976715668, ready parts: 1/2, is published: true 2025-05-07T08:51:32.462480Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:51:32.462515Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 281474976715668:0 2025-05-07T08:51:32.462660Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435072, Sender [3:419:2412], Recipient [3:419:2412]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-05-07T08:51:32.462695Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4857: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-05-07T08:51:32.462790Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976715668:0, at schemeshard: 72057594046644480 2025-05-07T08:51:32.462844Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046644480] TDone opId# 281474976715668:0 ProgressState 2025-05-07T08:51:32.462972Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T08:51:32.463002Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715668:0 progress is 2/2 2025-05-07T08:51:32.463028Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715668 ready parts: 2/2 2025-05-07T08:51:32.463063Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715668:0 progress is 2/2 2025-05-07T08:51:32.463088Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715668 ready parts: 2/2 2025-05-07T08:51:32.463115Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976715668, ready parts: 2/2, is published: true 2025-05-07T08:51:32.463185Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:1417:3087] message: TxId: 281474976715668 2025-05-07T08:51:32.463250Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715668 ready parts: 2/2 2025-05-07T08:51:32.463300Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715668:0 2025-05-07T08:51:32.463341Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976715668:0 2025-05-07T08:51:32.463492Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 16] was 4 2025-05-07T08:51:32.463531Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046644480, LocalPathId: 12] was 3 2025-05-07T08:51:32.463577Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715668:1 2025-05-07T08:51:32.463601Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976715668:1 2025-05-07T08:51:32.463651Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 15] was 3 2025-05-07T08:51:32.463675Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046644480, LocalPathId: 16] was 3 2025-05-07T08:51:32.464192Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:51:32.464292Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [3:1417:3087] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715668 at schemeshard: 72057594046644480 2025-05-07T08:51:32.464681Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877764, Sender [3:1424:3093], Recipient [3:419:2412]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:51:32.464721Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4938: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:51:32.464745Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5761: Server pipe is reset, at schemeshard: 72057594046644480 2025-05-07T08:51:32.479394Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877764, Sender [3:1531:3180], Recipient [3:419:2412]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:51:32.479487Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4938: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:51:32.479530Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5761: Server pipe is reset, at schemeshard: 72057594046644480 2025-05-07T08:51:32.576695Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:419:2412]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T08:51:32.576795Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T08:51:32.576906Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [3:419:2412], Recipient [3:419:2412]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T08:51:32.576948Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T08:51:32.855596Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715669. Ctx: { TraceId: 01jtmz2t7zbbgthan6g7epw4zw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=MmFlNjUxZjktOGVhNjJhODMtNDc3MzNiMWQtYzc0YjIyMTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 2 } items { uint32_value: 200 } }, { items { uint32_value: 3 } items { uint32_value: 30 } } |89.5%| [TA] $(B)/ydb/core/blobstorage/ut_blobstorage/ut_balancing/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/ut_aggregation/unittest >> AggregateStatistics::ShouldBeCorrectlyAggregateStatisticsFromAllNodes [GOOD] Test command err: 2025-05-07T08:51:34.318521Z node 1 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-05-07T08:51:34.319748Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:38:2058], server id = [1:38:2058], tablet id = 1, status = OK 2025-05-07T08:51:34.320139Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:38:2058], path = { OwnerId: 3 LocalId: 3 } 2025-05-07T08:51:34.320495Z node 3 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-05-07T08:51:34.320658Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-05-07T08:51:34.320855Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:39:2059], server id = [1:39:2059], tablet id = 2, status = OK 2025-05-07T08:51:34.320917Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:39:2059], path = { OwnerId: 3 LocalId: 3 } 2025-05-07T08:51:34.321052Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 1 2025-05-07T08:51:34.321414Z node 3 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 3, client id = [3:44:2057], server id = [3:44:2057], tablet id = 5, status = OK 2025-05-07T08:51:34.321483Z node 3 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [3:44:2057], path = { OwnerId: 3 LocalId: 3 } 2025-05-07T08:51:34.321548Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:46:2057], server id = [2:46:2057], tablet id = 4, status = OK 2025-05-07T08:51:34.321582Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:46:2057], path = { OwnerId: 3 LocalId: 3 } 2025-05-07T08:51:34.321710Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:40:2060], server id = [1:40:2060], tablet id = 3, status = OK 2025-05-07T08:51:34.321755Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:40:2060], path = { OwnerId: 3 LocalId: 3 } 2025-05-07T08:51:34.321997Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 4 2025-05-07T08:51:34.322074Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:38:2058], server id = [0:0:0], tablet id = 1, status = ERROR 2025-05-07T08:51:34.322125Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-05-07T08:51:34.322187Z node 3 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 5 2025-05-07T08:51:34.322243Z node 3 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 1 2025-05-07T08:51:34.322381Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:46:2057], server id = [0:0:0], tablet id = 4, status = ERROR 2025-05-07T08:51:34.322402Z node 2 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-05-07T08:51:34.322460Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 2 2025-05-07T08:51:34.322637Z node 4 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 1, current Round: 0 2025-05-07T08:51:34.322762Z node 3 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 3, client id = [3:44:2057], server id = [0:0:0], tablet id = 5, status = ERROR 2025-05-07T08:51:34.322789Z node 3 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-05-07T08:51:34.322858Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 3 2025-05-07T08:51:34.322970Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:39:2059], server id = [0:0:0], tablet id = 2, status = ERROR 2025-05-07T08:51:34.322995Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-05-07T08:51:34.323036Z node 4 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 4, client id = [4:49:2057], server id = [4:49:2057], tablet id = 6, status = OK 2025-05-07T08:51:34.323084Z node 4 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [4:49:2057], path = { OwnerId: 3 LocalId: 3 } 2025-05-07T08:51:34.323262Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:40:2060], server id = [0:0:0], tablet id = 3, status = ERROR 2025-05-07T08:51:34.323287Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-05-07T08:51:34.323337Z node 1 :STATISTICS DEBUG: service_impl.cpp:448: Received TEvAggregateStatisticsResponse SenderNodeId: 3 2025-05-07T08:51:34.323436Z node 4 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 6 2025-05-07T08:51:34.323510Z node 4 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-05-07T08:51:34.323626Z node 4 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 4, client id = [4:49:2057], server id = [0:0:0], tablet id = 6, status = ERROR 2025-05-07T08:51:34.323659Z node 4 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-05-07T08:51:34.323776Z node 2 :STATISTICS DEBUG: service_impl.cpp:448: Received TEvAggregateStatisticsResponse SenderNodeId: 4 2025-05-07T08:51:34.323841Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 1 2025-05-07T08:51:34.324042Z node 1 :STATISTICS DEBUG: service_impl.cpp:448: Received TEvAggregateStatisticsResponse SenderNodeId: 2 2025-05-07T08:51:34.324146Z node 1 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/ut_aggregation/unittest >> AggregateStatistics::RootNodeShouldBeInvalidateByTimeout [GOOD] Test command err: 2025-05-07T08:51:34.300603Z node 1 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-05-07T08:51:34.301670Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:38:2058], server id = [1:38:2058], tablet id = 1, status = OK 2025-05-07T08:51:34.302053Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:38:2058], path = { OwnerId: 3 LocalId: 3 } 2025-05-07T08:51:34.302328Z node 3 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-05-07T08:51:34.302474Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-05-07T08:51:34.302590Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 1 2025-05-07T08:51:34.302852Z node 3 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 3, client id = [3:42:2057], server id = [3:42:2057], tablet id = 3, status = OK 2025-05-07T08:51:34.302910Z node 3 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [3:42:2057], path = { OwnerId: 3 LocalId: 3 } 2025-05-07T08:51:34.303041Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:38:2058], server id = [0:0:0], tablet id = 1, status = ERROR 2025-05-07T08:51:34.303075Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-05-07T08:51:34.303138Z node 3 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 3 2025-05-07T08:51:34.303176Z node 3 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 1 2025-05-07T08:51:34.303335Z node 4 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 1, current Round: 0 2025-05-07T08:51:34.303417Z node 3 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 3, client id = [3:42:2057], server id = [0:0:0], tablet id = 3, status = ERROR 2025-05-07T08:51:34.303443Z node 3 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-05-07T08:51:34.303556Z node 4 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 4, client id = [4:47:2057], server id = [4:47:2057], tablet id = 4, status = OK 2025-05-07T08:51:34.303618Z node 4 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [4:47:2057], path = { OwnerId: 3 LocalId: 3 } 2025-05-07T08:51:34.303692Z node 1 :STATISTICS DEBUG: service_impl.cpp:448: Received TEvAggregateStatisticsResponse SenderNodeId: 3 2025-05-07T08:51:34.303742Z node 4 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 4 2025-05-07T08:51:34.303782Z node 4 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-05-07T08:51:34.303878Z node 4 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 4, client id = [4:47:2057], server id = [0:0:0], tablet id = 4, status = ERROR 2025-05-07T08:51:34.303903Z node 4 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-05-07T08:51:34.303992Z node 2 :STATISTICS DEBUG: service_impl.cpp:448: Received TEvAggregateStatisticsResponse SenderNodeId: 4 2025-05-07T08:51:34.314382Z node 4 :STATISTICS DEBUG: service_impl.cpp:252: Event round 1 is different from the current 0 2025-05-07T08:51:34.314454Z node 4 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-05-07T08:51:34.314493Z node 3 :STATISTICS DEBUG: service_impl.cpp:252: Event round 1 is different from the current 0 2025-05-07T08:51:34.314519Z node 3 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-05-07T08:51:34.325480Z node 1 :STATISTICS INFO: service_impl.cpp:416: Node 2 is unavailable 2025-05-07T08:51:34.325556Z node 1 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 1 2025-05-07T08:51:34.325669Z node 2 :STATISTICS DEBUG: service_impl.cpp:401: Skip TEvKeepAliveTimeout 2025-05-07T08:51:34.325872Z node 1 :STATISTICS DEBUG: service_impl.cpp:252: Event round 1 is different from the current 0 2025-05-07T08:51:34.325913Z node 1 :STATISTICS DEBUG: service_impl.cpp:393: Skip TEvKeepAliveTimeout 2025-05-07T08:51:34.325949Z node 1 :STATISTICS DEBUG: service_impl.cpp:252: Event round 1 is different from the current 0 2025-05-07T08:51:34.326040Z node 1 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-05-07T08:51:34.326260Z node 1 :STATISTICS DEBUG: service_impl.cpp:252: Event round 1 is different from the current 0 2025-05-07T08:51:34.326322Z node 1 :STATISTICS DEBUG: service_impl.cpp:428: Skip TEvAggregateKeepAlive ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/ut_aggregation/unittest >> AggregateStatistics::ShouldBePings [GOOD] Test command err: 2025-05-07T08:51:34.231134Z node 1 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-05-07T08:51:34.231566Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-05-07T08:51:34.340368Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 1 2025-05-07T08:51:34.340511Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 2 2025-05-07T08:51:34.340562Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 1 2025-05-07T08:51:34.341421Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:16:2056], server id = [0:0:0], tablet id = 1, status = ERROR 2025-05-07T08:51:34.341478Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-05-07T08:51:34.341592Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:19:2055], server id = [0:0:0], tablet id = 2, status = ERROR 2025-05-07T08:51:34.341613Z node 2 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-05-07T08:51:34.341715Z node 1 :STATISTICS DEBUG: service_impl.cpp:448: Received TEvAggregateStatisticsResponse SenderNodeId: 2 2025-05-07T08:51:34.341816Z node 1 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 1 |89.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_snapshot/ydb-core-tx-datashard-ut_snapshot |89.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_snapshot/ydb-core-tx-datashard-ut_snapshot ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/ut_aggregation/unittest >> AggregateStatistics::ShouldBeCcorrectProcessingOfLocalTablets [GOOD] Test command err: 2025-05-07T08:51:34.429725Z node 1 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-05-07T08:51:34.430056Z node 1 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 1, client id = [1:9:2056], server id = [1:9:2056], tablet id = 2 2025-05-07T08:51:34.430094Z node 1 :STATISTICS DEBUG: service_impl.cpp:1063: Tablet 2 is not local. 2025-05-07T08:51:34.430797Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:8:2055], server id = [1:8:2055], tablet id = 1, status = ERROR 2025-05-07T08:51:34.430869Z node 1 :STATISTICS DEBUG: service_impl.cpp:1063: Tablet 1 is not local. 2025-05-07T08:51:34.431018Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 3 2025-05-07T08:51:34.431102Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:11:2058], server id = [1:11:2058], tablet id = 4, status = ERROR 2025-05-07T08:51:34.431123Z node 1 :STATISTICS DEBUG: service_impl.cpp:1063: Tablet 4 is not local. 2025-05-07T08:51:34.431223Z node 1 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 1, client id = [1:12:2059], server id = [1:12:2059], tablet id = 5 2025-05-07T08:51:34.431248Z node 1 :STATISTICS DEBUG: service_impl.cpp:1063: Tablet 5 is not local. 2025-05-07T08:51:34.431342Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:10:2057], server id = [0:0:0], tablet id = 3, status = ERROR 2025-05-07T08:51:34.431368Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-05-07T08:51:34.431396Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 6 2025-05-07T08:51:34.431442Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:14:2061], server id = [1:14:2061], tablet id = 7, status = ERROR 2025-05-07T08:51:34.431479Z node 1 :STATISTICS DEBUG: service_impl.cpp:1063: Tablet 7 is not local. 2025-05-07T08:51:34.431531Z node 1 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 1, client id = [1:15:2062], server id = [1:15:2062], tablet id = 8 2025-05-07T08:51:34.431551Z node 1 :STATISTICS DEBUG: service_impl.cpp:1063: Tablet 8 is not local. 2025-05-07T08:51:34.431620Z node 1 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 1 2025-05-07T08:51:34.431704Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:13:2060], server id = [0:0:0], tablet id = 6, status = ERROR 2025-05-07T08:51:34.431731Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/ut_aggregation/unittest >> AggregateStatistics::ShouldBeCcorrectProcessingTabletTimeout [GOOD] Test command err: 2025-05-07T08:51:34.479366Z node 1 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-05-07T08:51:34.480459Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:8:2055], server id = [1:8:2055], tablet id = 1, status = OK 2025-05-07T08:51:34.480888Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:8:2055], path = { OwnerId: 3 LocalId: 3 } 2025-05-07T08:51:34.481056Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:9:2056], server id = [1:9:2056], tablet id = 2, status = OK 2025-05-07T08:51:34.481177Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:9:2056], path = { OwnerId: 3 LocalId: 3 } 2025-05-07T08:51:34.481239Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 1 2025-05-07T08:51:34.481437Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:10:2057], server id = [1:10:2057], tablet id = 3, status = OK 2025-05-07T08:51:34.481483Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:10:2057], path = { OwnerId: 3 LocalId: 3 } 2025-05-07T08:51:34.481562Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:11:2058], server id = [1:11:2058], tablet id = 4, status = OK 2025-05-07T08:51:34.481601Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:11:2058], path = { OwnerId: 3 LocalId: 3 } 2025-05-07T08:51:34.481646Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:8:2055], server id = [0:0:0], tablet id = 1, status = ERROR 2025-05-07T08:51:34.481681Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-05-07T08:51:34.481736Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:12:2059], server id = [1:12:2059], tablet id = 5, status = OK 2025-05-07T08:51:34.481789Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:12:2059], path = { OwnerId: 3 LocalId: 3 } 2025-05-07T08:51:34.481830Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 3 2025-05-07T08:51:34.481963Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:13:2060], server id = [1:13:2060], tablet id = 6, status = OK 2025-05-07T08:51:34.482224Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:13:2060], path = { OwnerId: 3 LocalId: 3 } 2025-05-07T08:51:34.482287Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 5 2025-05-07T08:51:34.482341Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:10:2057], server id = [0:0:0], tablet id = 3, status = ERROR 2025-05-07T08:51:34.482360Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-05-07T08:51:34.482390Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:14:2061], server id = [1:14:2061], tablet id = 7, status = OK 2025-05-07T08:51:34.482499Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:14:2061], path = { OwnerId: 3 LocalId: 3 } 2025-05-07T08:51:34.482580Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:12:2059], server id = [0:0:0], tablet id = 5, status = ERROR 2025-05-07T08:51:34.482604Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-05-07T08:51:34.482654Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 7 2025-05-07T08:51:34.483126Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:14:2061], server id = [0:0:0], tablet id = 7, status = ERROR 2025-05-07T08:51:34.483174Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-05-07T08:51:34.493520Z node 1 :STATISTICS DEBUG: service_impl.cpp:1028: Tablet 1 has already been processed 2025-05-07T08:51:34.493596Z node 1 :STATISTICS ERROR: service_impl.cpp:1032: No result was received from the tablet 2 2025-05-07T08:51:34.493626Z node 1 :STATISTICS DEBUG: service_impl.cpp:1063: Tablet 2 is not local. 2025-05-07T08:51:34.493706Z node 1 :STATISTICS DEBUG: service_impl.cpp:1028: Tablet 3 has already been processed 2025-05-07T08:51:34.493772Z node 1 :STATISTICS ERROR: service_impl.cpp:1032: No result was received from the tablet 4 2025-05-07T08:51:34.493789Z node 1 :STATISTICS DEBUG: service_impl.cpp:1063: Tablet 4 is not local. 2025-05-07T08:51:34.493848Z node 1 :STATISTICS DEBUG: service_impl.cpp:1028: Tablet 5 has already been processed 2025-05-07T08:51:34.493887Z node 1 :STATISTICS ERROR: service_impl.cpp:1032: No result was received from the tablet 6 2025-05-07T08:51:34.493902Z node 1 :STATISTICS DEBUG: service_impl.cpp:1063: Tablet 6 is not local. 2025-05-07T08:51:34.494035Z node 1 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 1 2025-05-07T08:51:34.494145Z node 1 :STATISTICS DEBUG: service_impl.cpp:252: Event round 1 is different from the current 0 2025-05-07T08:51:34.494174Z node 1 :STATISTICS DEBUG: service_impl.cpp:1021: Skip TEvStatisticsRequestTimeout 2025-05-07T08:51:34.494218Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:9:2056], server id = [0:0:0], tablet id = 2, status = ERROR 2025-05-07T08:51:34.494253Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-05-07T08:51:34.494302Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:11:2058], server id = [0:0:0], tablet id = 4, status = ERROR 2025-05-07T08:51:34.494317Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-05-07T08:51:34.494358Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:13:2060], server id = [0:0:0], tablet id = 6, status = ERROR 2025-05-07T08:51:34.494377Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected >> TPQTabletTests::Test_Waiting_For_TEvReadSet_Without_Senders >> BasicUsage::WriteAndReadSomeMessagesWithAsyncCompression >> ReadSessionImplTest::ProperlyOrdersDecompressedData [GOOD] >> ReadSessionImplTest::PacksBatches_ExactlyTwoMessagesInBatch [GOOD] >> ReadSessionImplTest::PacksBatches_OneMessageInEveryBatch [GOOD] >> ReadSessionImplTest::PacksBatches_BigBatchDecompressWithTwoBatchTasks >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_SourceId_PartitionNotExists_Test [GOOD] >> TPartitionGraphTest::BuildGraph [GOOD] >> TPartitionTests::AfterRestart_1 |89.5%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_balancing/test-results/unittest/{meta.json ... results_accumulator.log} |89.5%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_snapshot/ydb-core-tx-datashard-ut_snapshot |89.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/tx_proxy/ut_schemereq/ydb-core-tx-tx_proxy-ut_schemereq |89.5%| [LD] {RESULT} $(B)/ydb/core/tx/tx_proxy/ut_schemereq/ydb-core-tx-tx_proxy-ut_schemereq |89.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/tx_proxy/ut_schemereq/ydb-core-tx-tx_proxy-ut_schemereq >> TPQTabletTests::Test_Waiting_For_TEvReadSet_Without_Senders [GOOD] |89.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/provider/ut/unittest >> KikimrProvider::AlterTableAddIndexWithTableSettings [GOOD] >> KqpPg::CreateUniqComplexPgColumn+useSink [GOOD] >> KqpPg::CreateUniqComplexPgColumn-useSink |89.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/replication/controller/ut_stream_creator/tx-replication-controller-ut_stream_creator |89.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/controller/ut_stream_creator/tx-replication-controller-ut_stream_creator |89.5%| [LD] {RESULT} $(B)/ydb/core/tx/replication/controller/ut_stream_creator/tx-replication-controller-ut_stream_creator >> TPQTest::DirectReadBadSessionOrPipe >> KqpPg::DropTableIfExists [GOOD] >> KqpPg::DropTableIfExists_GenericQuery |89.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/indexes/ydb-core-kqp-ut-indexes |89.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/indexes/ydb-core-kqp-ut-indexes |89.5%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/indexes/ydb-core-kqp-ut-indexes >> TTransferTests::Create [GOOD] >> TTransferTests::CreateSequential >> TPartitionTests::AfterRestart_1 [GOOD] >> TPartitionTests::AfterRestart_2 >> KqpPg::InsertValuesFromTableWithDefaultTextNotNullButNull+useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultTextNotNullButNull-useSink >> ReadSessionImplTest::DecompressRaw [GOOD] >> ReadSessionImplTest::DecompressGzip [GOOD] >> ReadSessionImplTest::DecompressZstd [GOOD] >> ReadSessionImplTest::DecompressRawEmptyMessage [GOOD] >> ReadSessionImplTest::DecompressGzipEmptyMessage [GOOD] >> ReadSessionImplTest::DecompressWithSynchronousExecutor [GOOD] >> ReadSessionImplTest::DataReceivedCallbackReal >> ReadSessionImplTest::ForcefulDestroyPartitionStream [GOOD] >> ReadSessionImplTest::DestroyPartitionStreamRequest [GOOD] >> ReadSessionImplTest::DecompressZstdEmptyMessage [GOOD] >> ReadSessionImplTest::PacksBatches_BatchABitBiggerThanLimit [GOOD] >> ReadSessionImplTest::PacksBatches_BatchesEqualToServerBatches [GOOD] >> ReadSessionImplTest::HoleBetweenOffsets [GOOD] >> ReadSessionImplTest::LOGBROKER_7702 [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultBool+useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultBool-useSink >> PersQueueSdkReadSessionTest::ReadSessionWithExplicitlySpecifiedPartitions >> ReadSessionImplTest::ReconnectOnTmpError >> ReadSessionImplTest::SuccessfulInit [GOOD] >> ReadSessionImplTest::SuccessfulInitAndThenTimeoutCallback [GOOD] >> ReadSessionImplTest::StopsRetryAfterFailedAttempt [GOOD] >> ReadSessionImplTest::StopsRetryAfterTimeout [GOOD] >> ReadSessionImplTest::UnpackBigBatchWithTwoPartitions >> Compression::WriteRAW >> ReadSessionImplTest::ReconnectOnTmpError [GOOD] >> ReadSessionImplTest::ReconnectOnTmpErrorAndThenTimeout [GOOD] >> ReadSessionImplTest::ReconnectOnTimeout [GOOD] >> ReadSessionImplTest::ReconnectOnTimeoutAndThenCreate [GOOD] >> ReadSessionImplTest::ReconnectsAfterFailure [GOOD] >> ReadSessionImplTest::SimpleDataHandlers >> ReadSessionImplTest::UnpackBigBatchWithTwoPartitions [GOOD] >> ReadSessionImplTest::SimpleDataHandlersWithGracefulRelease >> TPartitionTests::AfterRestart_2 [GOOD] >> ApplyClusterEndpointTest::NoPorts [GOOD] >> ApplyClusterEndpointTest::PortFromCds [GOOD] >> ApplyClusterEndpointTest::PortFromDriver [GOOD] >> BasicUsage::MaxByteSizeEqualZero >> ReadSessionImplTest::SimpleDataHandlers [GOOD] >> ReadSessionImplTest::SimpleDataHandlersWithCommit ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> ReadSessionImplTest::LOGBROKER_7702 [GOOD] Test command err: 2025-05-07T08:51:36.457649Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.457678Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.457710Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:51:36.458245Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:51:36.459004Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-05-07T08:51:36.471461Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.472301Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-05-07T08:51:36.473941Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.473989Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.474022Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:51:36.474451Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:51:36.475187Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-05-07T08:51:36.475359Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.475623Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-05-07T08:51:36.475964Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2025-05-07T08:51:36.477040Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.477100Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.477156Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:51:36.477448Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:51:36.477984Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-05-07T08:51:36.478117Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.478364Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-05-07T08:51:36.479217Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.479514Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-05-07T08:51:36.479641Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-05-07T08:51:36.479696Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 0 bytes 2025-05-07T08:51:36.480958Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.481008Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.481035Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:51:36.481412Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:51:36.482027Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-05-07T08:51:36.482217Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.482446Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) Message data size: 11 Compressed message data size: 31 2025-05-07T08:51:36.483465Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-05-07T08:51:36.483714Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function Getting new event 2025-05-07T08:51:36.484145Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (5-8) 2025-05-07T08:51:36.484356Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-4) 2025-05-07T08:51:36.484475Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-05-07T08:51:36.484511Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-05-07T08:51:36.484544Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 22 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 42 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 43 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-05-07T08:51:36.484738Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 3). Partition stream id: 1 Getting new event 2025-05-07T08:51:36.484783Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-05-07T08:51:36.484828Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (4-4) 2025-05-07T08:51:36.484873Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 22 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 44 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 4 SeqNo: 45 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-05-07T08:51:36.485004Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [3, 5). Partition stream id: 1 Getting new event 2025-05-07T08:51:36.485100Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (5-5) 2025-05-07T08:51:36.485119Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (6-6) 2025-05-07T08:51:36.485138Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 22 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 5 SeqNo: 46 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 6 SeqNo: 47 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-05-07T08:51:36.485285Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [5, 7). Partition stream id: 1 Getting new event 2025-05-07T08:51:36.485324Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (7-7) 2025-05-07T08:51:36.485346Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (8-8) 2025-05-07T08:51:36.485369Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 22 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 7 SeqNo: 48 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 8 SeqNo: 49 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-05-07T08:51:36.485484Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [7, 9). Partition stream id: 1 2025-05-07T08:51:36.487527Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.487569Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.487595Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:51:36.488002Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:51:36.488583Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-05-07T08:51:36.488752Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.489031Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) Message data size: 10 Compressed message data size: 30 2025-05-07T08:51:36.490112Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-05-07T08:51:36.490407Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function Getting new event 2025-05-07T08:51:36.490758Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (5-8) 2025-05-07T08:51:36.491020Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-4) 2025-05-07T08:51:36.491174Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-05-07T08:51:36.491210Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-05-07T08:51:36.491236Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-05-07T08:51:36.491256Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (4-4) 2025-05-07T08:51:36.491290Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 4, size 40 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 42 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 43 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 44 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 4 SeqNo: 45 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-05-07T08:51:36.491544Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 5). Partition stream id: 1 Getting new event 2025-05-07T08:51:36.491634Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (5-5) 2025-05-07T08:51:36.491661Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (6-6) 2025-05-07T08:51:36.491681Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (7-7) 2025-05-07T08:51:36.491702Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (8-8) 2025-05-07T08:51:36.491726Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 4, size 40 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 5 SeqNo: 46 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 6 SeqNo: 47 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 7 SeqNo: 48 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 8 SeqNo: 49 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-05-07T08:51:36.491884Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [5, 9). Partition stream id: 1 2025-05-07T08:51:36.493270Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.493299Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.493334Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:51:36.493621Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:51:36.494021Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-05-07T08:51:36.494162Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.494342Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-05-07T08:51:36.495248Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-05-07T08:51:36.496008Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-05-07T08:51:36.496361Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (10-11) 2025-05-07T08:51:36.496499Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2025-05-07T08:51:36.496653Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-05-07T08:51:36.496692Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-05-07T08:51:36.496717Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (10-10) 2025-05-07T08:51:36.496736Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (11-11) 2025-05-07T08:51:36.496791Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 16 bytes 2025-05-07T08:51:36.496817Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 16 bytes got data event: DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 10 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 11 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } } 2025-05-07T08:51:36.496985Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 3). Partition stream id: 1 Got commit req { cookies { assign_id: 1 partition_cookie: 1 } } 2025-05-07T08:51:36.497153Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [10, 12). Partition stream id: 1 Got commit req { cookies { assign_id: 1 partition_cookie: 2 } } >> TPQTest::DirectReadBadSessionOrPipe [GOOD] >> ReadSessionImplTest::SimpleDataHandlersWithCommit [GOOD] >> KikimrIcGateway::TestLoadExternalTable >> AssignTxId::Basic [GOOD] >> ReadSessionImplTest::SimpleDataHandlersWithGracefulRelease [GOOD] >> ReadSessionImplTest::SimpleDataHandlersWithGracefulReleaseWithCommit >> TTransferTests::CreateSequential [GOOD] >> TTransferTests::CreateInParallel >> ReadSessionImplTest::SimpleDataHandlersWithGracefulReleaseWithCommit [GOOD] >> ReadSessionImplTest::PacksBatches_BigBatchDecompressWithTwoBatchTasks [GOOD] >> ReadSessionImplTest::PacksBatches_DecompressesOneMessagePerTime >> ReadSessionImplTest::PacksBatches_DecompressesOneMessagePerTime [GOOD] >> ReadSessionImplTest::PartitionStreamStatus [GOOD] >> ReadSessionImplTest::PartitionStreamCallbacks [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPQTest::DirectReadBadSessionOrPipe [GOOD] Test command err: 2025-05-07T08:51:32.624557Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3088: [PQ: 72057594037927937] Handle TEvInterconnect::TEvNodeInfo 2025-05-07T08:51:32.629494Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3120: [PQ: 72057594037927937] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-05-07T08:51:32.629823Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:745: [PQ: 72057594037927937] doesn't have tx info 2025-05-07T08:51:32.629883Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:757: [PQ: 72057594037927937] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-05-07T08:51:32.629927Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:969: [PQ: 72057594037927937] no config, start with empty partitions and default config 2025-05-07T08:51:32.629955Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4846: [PQ: 72057594037927937] Txs.size=0, PlannedTxs.size=0 2025-05-07T08:51:32.630043Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:32.630091Z node 1 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037927937] doesn't have tx writes info 2025-05-07T08:51:32.647254Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037927937] server connected, pipe [1:178:2193], now have 1 active actors on pipe 2025-05-07T08:51:32.647344Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1454: [PQ: 72057594037927937] Handle TEvPersQueue::TEvUpdateConfig 2025-05-07T08:51:32.660877Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1640: [PQ: 72057594037927937] Config update version 1(current 0) received from actor [1:177:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-05-07T08:51:32.665623Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:585: [PQ: 72057594037927937] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-05-07T08:51:32.665779Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:32.667517Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037927937] Config applied version 1 actor [1:177:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-05-07T08:51:32.667755Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitConfigStep 2025-05-07T08:51:32.667844Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:1:Initializer] Start initializing step TInitConfigStep 2025-05-07T08:51:32.668357Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-05-07T08:51:32.668812Z node 1 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:186:2199] 2025-05-07T08:51:32.669812Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:55: [topic:0:Initializer] Initializing completed. 2025-05-07T08:51:32.669907Z node 1 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'topic' partition 0 generation 2 [1:186:2199] 2025-05-07T08:51:32.669992Z node 1 :PERSQUEUE DEBUG: partition.cpp:571: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic topic partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-05-07T08:51:32.670676Z node 1 :PERSQUEUE DEBUG: partition.cpp:3847: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-05-07T08:51:32.670781Z node 1 :PERSQUEUE DEBUG: partition.cpp:3152: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user reinit request with generation 1 2025-05-07T08:51:32.670840Z node 1 :PERSQUEUE DEBUG: partition.cpp:3221: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user reinit with generation 1 done 2025-05-07T08:51:32.670977Z node 1 :PERSQUEUE DEBUG: partition.cpp:2182: [PQ: 72057594037927937, Partition: 0, State: StateIdle] === DumpKeyValueRequest === 2025-05-07T08:51:32.671024Z node 1 :PERSQUEUE DEBUG: partition.cpp:2183: [PQ: 72057594037927937, Partition: 0, State: StateIdle] --- delete ---------------- 2025-05-07T08:51:32.671060Z node 1 :PERSQUEUE DEBUG: partition.cpp:2191: [PQ: 72057594037927937, Partition: 0, State: StateIdle] --- write ----------------- 2025-05-07T08:51:32.671113Z node 1 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 0, State: StateIdle] i0000000000 2025-05-07T08:51:32.671160Z node 1 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 0, State: StateIdle] m0000000000cuser 2025-05-07T08:51:32.671183Z node 1 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 0, State: StateIdle] m0000000000uuser 2025-05-07T08:51:32.671223Z node 1 :PERSQUEUE DEBUG: partition.cpp:2196: [PQ: 72057594037927937, Partition: 0, State: StateIdle] --- rename ---------------- 2025-05-07T08:51:32.671267Z node 1 :PERSQUEUE DEBUG: partition.cpp:2201: [PQ: 72057594037927937, Partition: 0, State: StateIdle] =========================== 2025-05-07T08:51:32.671383Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:774: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-05-07T08:51:32.671606Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:1:Initializer] Start initializing step TInitInternalFieldsStep 2025-05-07T08:51:32.671901Z node 1 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [1:188:2201] 2025-05-07T08:51:32.672683Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:55: [topic:1:Initializer] Initializing completed. 2025-05-07T08:51:32.672735Z node 1 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'topic' partition 1 generation 2 [1:188:2201] 2025-05-07T08:51:32.672774Z node 1 :PERSQUEUE DEBUG: partition.cpp:571: [PQ: 72057594037927937, Partition: 1, State: StateInit] SYNC INIT topic topic partitition 1 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-05-07T08:51:32.673295Z node 1 :PERSQUEUE DEBUG: partition.cpp:3847: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Process pending events. Count 0 2025-05-07T08:51:32.673374Z node 1 :PERSQUEUE DEBUG: partition.cpp:3152: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic 'topic' partition 1 user user reinit request with generation 1 2025-05-07T08:51:32.673405Z node 1 :PERSQUEUE DEBUG: partition.cpp:3221: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic 'topic' partition 1 user user reinit with generation 1 done 2025-05-07T08:51:32.673474Z node 1 :PERSQUEUE DEBUG: partition.cpp:2182: [PQ: 72057594037927937, Partition: 1, State: StateIdle] === DumpKeyValueRequest === 2025-05-07T08:51:32.673515Z node 1 :PERSQUEUE DEBUG: partition.cpp:2183: [PQ: 72057594037927937, Partition: 1, State: StateIdle] --- delete ---------------- 2025-05-07T08:51:32.673536Z node 1 :PERSQUEUE DEBUG: partition.cpp:2191: [PQ: 72057594037927937, Partition: 1, State: StateIdle] --- write ----------------- 2025-05-07T08:51:32.673561Z node 1 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 1, State: StateIdle] i0000000001 2025-05-07T08:51:32.673578Z node 1 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 1, State: StateIdle] m0000000001cuser 2025-05-07T08:51:32.673592Z node 1 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 1, State: StateIdle] m0000000001uuser 2025-05-07T08:51:32.673609Z node 1 :PERSQUEUE DEBUG: partition.cpp:2196: [PQ: 72057594037927937, Partition: 1, State: StateIdle] --- rename ---------------- 2025-05-07T08:51:32.673627Z node 1 :PERSQUEUE DEBUG: partition.cpp:2201: [PQ: 72057594037927937, Partition: 1, State: StateIdle] =========================== 2025-05-07T08:51:32.673664Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:774: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic 'topic' partition 1 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-05-07T08:51:32.673769Z node 1 :PERSQUEUE DEBUG: read.h:262: CacheProxy. Passthrough write request to KV 2025-05-07T08:51:32.673892Z node 1 :PERSQUEUE DEBUG: read.h:262: CacheProxy. Passthrough write request to KV 2025-05-07T08:51:32.679205Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-05-07T08:51:32.679662Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72057594037927937, Partition: 1, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-05-07T08:51:32.680007Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037927937] server connected, pipe [1:201:2210], now have 1 active actors on pipe 2025-05-07T08:51:32.680762Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037927937] server connected, pipe [1:204:2212], now have 1 active actors on pipe 2025-05-07T08:51:32.681465Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3212: [PQ: 72057594037927937] Handle TEvPersQueue::TEvProposeTransaction SourceActor { RawX1: 177 RawX2: 4294969488 } TxId: 67890 Config { TabletConfig { PartitionConfig { LifetimeSeconds: 86400 WriteSpeedInBytesPerSecond: 10485760 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--account--topic" Version: 2 LocalDC: true TopicPath: "/Root/PQ/rt3.dc1--account--topic" YdbDatabasePath: "" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } FederationAccount: "account" MeteringMode: METERING_MODE_REQUEST_UNITS AllPartitions { PartitionId: 0 } AllPartitions { Partitio ... 94037927937, Partition: 0, State: StateIdle] --- rename ---------------- 2025-05-07T08:51:37.093177Z node 8 :PERSQUEUE DEBUG: partition.cpp:2201: [PQ: 72057594037927937, Partition: 0, State: StateIdle] =========================== 2025-05-07T08:51:37.093208Z node 8 :PERSQUEUE DEBUG: read.h:262: CacheProxy. Passthrough write request to KV 2025-05-07T08:51:37.096493Z node 8 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-05-07T08:51:37.096590Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:377: Answer ok topic: 'rt3.dc1--asdfgs--topic' partition: 0 messageNo: 0 requestId: cookie: 0 Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user2" SessionId: "" Offset: 0 Count: 1 Bytes: 99999 DirectReadId: 1 PartitionSessionId: 1 } PipeClient { RawX1: 212 RawX2: 34359740588 } Cookie: 123 } via pipe: [8:177:2192] 2025-05-07T08:51:37.096911Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037927937] server connected, pipe [8:220:2226], now have 1 active actors on pipe 2025-05-07T08:51:37.097040Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:342: Handle TEvRequest topic: 'rt3.dc1--asdfgs--topic' requestId: 2025-05-07T08:51:37.097078Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2788: [PQ: 72057594037927937] got client message batch for topic 'rt3.dc1--asdfgs--topic' partition 0 2025-05-07T08:51:37.097120Z node 8 :PERSQUEUE WARN: event_helpers.cpp:42: tablet 72057594037927937 topic 'rt3.dc1--asdfgs--topic error: Read prepare request with empty session id 2025-05-07T08:51:37.097186Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:1418: [PQ: 72057594037927937] Handle TEvPQ::TEvError Cookie 6, Error Read prepare request with empty session id 2025-05-07T08:51:37.097214Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:396: Answer error topic: 'rt3.dc1--asdfgs--topic' partition: 0 messageNo: 0 requestId: error: Read prepare request with empty session id Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user2" SessionId: "bad-session" Offset: 0 Count: 1 Bytes: 99999 DirectReadId: 1 PartitionSessionId: 1 } PipeClient { RawX1: 212 RawX2: 34359740588 } Cookie: 123 } via pipe: [8:177:2192] 2025-05-07T08:51:37.097459Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037927937] server connected, pipe [8:223:2229], now have 1 active actors on pipe 2025-05-07T08:51:37.097534Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:342: Handle TEvRequest topic: 'rt3.dc1--asdfgs--topic' requestId: 2025-05-07T08:51:37.097559Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2788: [PQ: 72057594037927937] got client message batch for topic 'rt3.dc1--asdfgs--topic' partition 0 2025-05-07T08:51:37.097588Z node 8 :PERSQUEUE WARN: event_helpers.cpp:42: tablet 72057594037927937 topic 'rt3.dc1--asdfgs--topic error: Read prepare request with unknown(old?) session id bad-session 2025-05-07T08:51:37.097629Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:1418: [PQ: 72057594037927937] Handle TEvPQ::TEvError Cookie 7, Error Read prepare request with unknown(old?) session id bad-session 2025-05-07T08:51:37.097648Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:396: Answer error topic: 'rt3.dc1--asdfgs--topic' partition: 0 messageNo: 0 requestId: error: Read prepare request with unknown(old?) session id bad-session 2025-05-07T08:51:37.097811Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037927937] server connected, pipe [8:226:2232], now have 1 active actors on pipe 2025-05-07T08:51:37.097864Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:342: Handle TEvRequest topic: 'rt3.dc1--asdfgs--topic' requestId: 2025-05-07T08:51:37.097884Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2788: [PQ: 72057594037927937] got client message batch for topic 'rt3.dc1--asdfgs--topic' partition 0 2025-05-07T08:51:37.097924Z node 8 :PERSQUEUE INFO: pq_impl.cpp:1873: [PQ: 72057594037927937] Got cmd delete session: ClientId: "user2" SessionId: "session2" 2025-05-07T08:51:37.097952Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2433: [PQ: 72057594037927937] Destroy direct read session session2 2025-05-07T08:51:37.098056Z node 8 :PERSQUEUE DEBUG: partition.cpp:3264: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 0 user user2 session is set to 0 (startOffset 0) session session2 2025-05-07T08:51:37.098131Z node 8 :PERSQUEUE DEBUG: partition.cpp:2182: [PQ: 72057594037927937, Partition: 0, State: StateIdle] === DumpKeyValueRequest === 2025-05-07T08:51:37.098154Z node 8 :PERSQUEUE DEBUG: partition.cpp:2183: [PQ: 72057594037927937, Partition: 0, State: StateIdle] --- delete ---------------- 2025-05-07T08:51:37.098176Z node 8 :PERSQUEUE DEBUG: partition.cpp:2191: [PQ: 72057594037927937, Partition: 0, State: StateIdle] --- write ----------------- 2025-05-07T08:51:37.098202Z node 8 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 0, State: StateIdle] i0000000000 2025-05-07T08:51:37.098235Z node 8 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 0, State: StateIdle] m0000000000cuser2 2025-05-07T08:51:37.098259Z node 8 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 0, State: StateIdle] m0000000000uuser2 2025-05-07T08:51:37.098285Z node 8 :PERSQUEUE DEBUG: partition.cpp:2196: [PQ: 72057594037927937, Partition: 0, State: StateIdle] --- rename ---------------- 2025-05-07T08:51:37.098309Z node 8 :PERSQUEUE DEBUG: partition.cpp:2201: [PQ: 72057594037927937, Partition: 0, State: StateIdle] =========================== 2025-05-07T08:51:37.098337Z node 8 :PERSQUEUE DEBUG: read.h:262: CacheProxy. Passthrough write request to KV 2025-05-07T08:51:37.100187Z node 8 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-05-07T08:51:37.100279Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:377: Answer ok topic: 'rt3.dc1--asdfgs--topic' partition: 0 messageNo: 0 requestId: cookie: 0 Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user2" SessionId: "session2" Offset: 0 Count: 1 Bytes: 99999 DirectReadId: 1 PartitionSessionId: 1 } PipeClient { RawX1: 212 RawX2: 34359740588 } Cookie: 123 } via pipe: [8:177:2192] 2025-05-07T08:51:37.100651Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037927937] server connected, pipe [8:232:2237], now have 1 active actors on pipe 2025-05-07T08:51:37.100774Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:342: Handle TEvRequest topic: 'rt3.dc1--asdfgs--topic' requestId: 2025-05-07T08:51:37.100803Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2788: [PQ: 72057594037927937] got client message batch for topic 'rt3.dc1--asdfgs--topic' partition 0 2025-05-07T08:51:37.100832Z node 8 :PERSQUEUE WARN: event_helpers.cpp:42: tablet 72057594037927937 topic 'rt3.dc1--asdfgs--topic error: Read prepare request with unknown(old?) session id session2 2025-05-07T08:51:37.100881Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:1418: [PQ: 72057594037927937] Handle TEvPQ::TEvError Cookie 9, Error Read prepare request with unknown(old?) session id session2 2025-05-07T08:51:37.100905Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:396: Answer error topic: 'rt3.dc1--asdfgs--topic' partition: 0 messageNo: 0 requestId: error: Read prepare request with unknown(old?) session id session2 Publish read Send publish read request: Partition: 0 PipeClient { RawX1: 212 RawX2: 34359740588 } Cookie: 123 CmdPublishRead { SessionKey { SessionId: "session2" PartitionSessionId: 1 } DirectReadId: 1 } 2025-05-07T08:51:37.101148Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037927937] server connected, pipe [8:235:2240], now have 1 active actors on pipe 2025-05-07T08:51:37.101208Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:342: Handle TEvRequest topic: 'rt3.dc1--asdfgs--topic' requestId: 2025-05-07T08:51:37.101233Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2788: [PQ: 72057594037927937] got client message batch for topic 'rt3.dc1--asdfgs--topic' partition 0 2025-05-07T08:51:37.101294Z node 8 :PERSQUEUE WARN: event_helpers.cpp:42: tablet 72057594037927937 topic 'rt3.dc1--asdfgs--topic error: Read prepare request with unknown(old?) session id session2Partition: 0 PipeClient { RawX1: 212 RawX2: 34359740588 } Cookie: 123 CmdPublishRead { SessionKey { SessionId: "session2" PartitionSessionId: 1 } DirectReadId: 1 } 2025-05-07T08:51:37.101332Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:1418: [PQ: 72057594037927937] Handle TEvPQ::TEvError Cookie 10, Error Read prepare request with unknown(old?) session id session2Partition: 0 PipeClient { RawX1: 212 RawX2: 34359740588 } Cookie: 123 CmdPublishRead { SessionKey { SessionId: "session2" PartitionSessionId: 1 } DirectReadId: 1 } 2025-05-07T08:51:37.101354Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:396: Answer error topic: 'rt3.dc1--asdfgs--topic' partition: 0 messageNo: 0 requestId: error: Read prepare request with unknown(old?) session id session2Partition: 0 PipeClient { RawX1: 212 RawX2: 34359740588 } Cookie: 123 CmdPublishRead { SessionKey { SessionId: "session2" PartitionSessionId: 1 } DirectReadId: 1 } Got direct read response: Status: 128 ErrorReason: "Read prepare request with unknown(old?) session id session2Partition: 0\nPipeClient {\n RawX1: 212\n RawX2: 34359740588\n}\nCookie: 123\nCmdPublishRead {\n SessionKey {\n SessionId: \"session2\"\n PartitionSessionId: 1\n }\n DirectReadId: 1\n}\n" ErrorCode: BAD_REQUEST PartitionResponse { Cookie: 123 } Forget read Send forget read request: Partition: 0 PipeClient { RawX1: 212 RawX2: 34359740588 } Cookie: 123 CmdForgetRead { SessionKey { SessionId: "session2" PartitionSessionId: 1 } DirectReadId: 1 } 2025-05-07T08:51:37.101586Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72057594037927937] server connected, pipe [8:237:2242], now have 1 active actors on pipe 2025-05-07T08:51:37.101634Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:342: Handle TEvRequest topic: 'rt3.dc1--asdfgs--topic' requestId: 2025-05-07T08:51:37.101655Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2788: [PQ: 72057594037927937] got client message batch for topic 'rt3.dc1--asdfgs--topic' partition 0 2025-05-07T08:51:37.101709Z node 8 :PERSQUEUE WARN: event_helpers.cpp:42: tablet 72057594037927937 topic 'rt3.dc1--asdfgs--topic error: Read prepare request with unknown(old?) session id session2Partition: 0 PipeClient { RawX1: 212 RawX2: 34359740588 } Cookie: 123 CmdForgetRead { SessionKey { SessionId: "session2" PartitionSessionId: 1 } DirectReadId: 1 } 2025-05-07T08:51:37.101745Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:1418: [PQ: 72057594037927937] Handle TEvPQ::TEvError Cookie 11, Error Read prepare request with unknown(old?) session id session2Partition: 0 PipeClient { RawX1: 212 RawX2: 34359740588 } Cookie: 123 CmdForgetRead { SessionKey { SessionId: "session2" PartitionSessionId: 1 } DirectReadId: 1 } 2025-05-07T08:51:37.101765Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:396: Answer error topic: 'rt3.dc1--asdfgs--topic' partition: 0 messageNo: 0 requestId: error: Read prepare request with unknown(old?) session id session2Partition: 0 PipeClient { RawX1: 212 RawX2: 34359740588 } Cookie: 123 CmdForgetRead { SessionKey { SessionId: "session2" PartitionSessionId: 1 } DirectReadId: 1 } Got direct read response: Status: 128 ErrorReason: "Read prepare request with unknown(old?) session id session2Partition: 0\nPipeClient {\n RawX1: 212\n RawX2: 34359740588\n}\nCookie: 123\nCmdForgetRead {\n SessionKey {\n SessionId: \"session2\"\n PartitionSessionId: 1\n }\n DirectReadId: 1\n}\n" ErrorCode: BAD_REQUEST PartitionResponse { Cookie: 123 } ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> ReadSessionImplTest::SimpleDataHandlersWithCommit [GOOD] Test command err: 2025-05-07T08:51:36.880190Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.880240Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.880280Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:51:36.880641Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-05-07T08:51:36.880684Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.880709Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.881637Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.008978s 2025-05-07T08:51:36.882255Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:51:36.882723Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-05-07T08:51:36.882802Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.883815Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.883847Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.883867Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:51:36.884119Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-05-07T08:51:36.884152Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.884197Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.884253Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.009333s 2025-05-07T08:51:36.884807Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:51:36.885261Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-05-07T08:51:36.885338Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.886219Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.886239Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.886258Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:51:36.886621Z :ERROR: [db] [sessionid] [cluster] Got error. Status: TIMEOUT. Description:
: Error: Failed to establish connection to server. Attempts done: 1 2025-05-07T08:51:36.886664Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.886706Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.886785Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.202944s 2025-05-07T08:51:36.887583Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:51:36.888115Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-05-07T08:51:36.888197Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.888933Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.888947Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.888958Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:51:36.889259Z :ERROR: [db] [sessionid] [cluster] Got error. Status: TIMEOUT. Description:
: Error: Failed to establish connection to server. Attempts done: 1 2025-05-07T08:51:36.889290Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.889303Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.889346Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.164828s 2025-05-07T08:51:36.889788Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:51:36.890266Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-05-07T08:51:36.890398Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.891113Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.891133Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.891181Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:51:36.891614Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:51:36.892069Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-05-07T08:51:36.903473Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.904641Z :ERROR: [db] [sessionid] [cluster] Got error. Status: TRANSPORT_UNAVAILABLE. Description:
: Error: GRpc error: (14): 2025-05-07T08:51:36.904673Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.904693Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.904755Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.296794s 2025-05-07T08:51:36.904901Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster 2025-05-07T08:51:36.906406Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.906434Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.906459Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:51:36.907479Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:51:36.908305Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-05-07T08:51:36.908448Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.908831Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-05-07T08:51:37.009890Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:37.010194Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2025-05-07T08:51:37.010274Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-05-07T08:51:37.010329Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (2-2) 2025-05-07T08:51:37.010409Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 6 bytes 2025-05-07T08:51:37.110880Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2025-05-07T08:51:37.111069Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster 2025-05-07T08:51:37.112389Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:37.112414Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:37.112439Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:51:37.112735Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:51:37.120805Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-05-07T08:51:37.121068Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:37.121624Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-05-07T08:51:37.223174Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:37.223447Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2025-05-07T08:51:37.223509Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-05-07T08:51:37.223565Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (2-2) 2025-05-07T08:51:37.223665Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 3). Partition stream id: 1 2025-05-07T08:51:37.223785Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 6 bytes 2025-05-07T08:51:37.224036Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2025-05-07T08:51:37.224177Z :DEBUG: [db] [sessionid] [cluster] Committed response: { cookies { assign_id: 1 partition_cookie: 1 } } 2025-05-07T08:51:37.225578Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> ReadSessionImplTest::SimpleDataHandlersWithGracefulReleaseWithCommit [GOOD] Test command err: 2025-05-07T08:51:36.867580Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.867628Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.867658Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:51:36.868101Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:51:36.868574Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-05-07T08:51:36.868682Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.869483Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.869507Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.869533Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:51:36.869813Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:51:36.870088Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-05-07T08:51:36.870147Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.870828Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.870848Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.870866Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:51:36.871162Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-05-07T08:51:36.871211Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.871237Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.871343Z :INFO: [db] [sessionid] [cluster] Closing session to cluster: SessionClosed { Status: INTERNAL_ERROR Issues: "
: Error: Failed to establish connection to server "" ( cluster cluster). Attempts done: 1 " } 2025-05-07T08:51:36.872202Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.872224Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.872244Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:51:36.872621Z :ERROR: [db] [sessionid] [cluster] Got error. Status: TIMEOUT. Description:
: Error: Failed to establish connection to server. Attempts done: 1 2025-05-07T08:51:36.872664Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.872682Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.872743Z :INFO: [db] [sessionid] [cluster] Closing session to cluster: SessionClosed { Status: TIMEOUT Issues: "
: Error: Failed to establish connection to server. Attempts done: 1 " } 2025-05-07T08:51:36.873883Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 2500, ReadSizeServerDelta = 0 2025-05-07T08:51:36.873926Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 2500, ReadSizeServerDelta = 0 2025-05-07T08:51:36.873952Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:51:36.874375Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:51:36.875109Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-05-07T08:51:36.884647Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 2500, ReadSizeServerDelta = 0 2025-05-07T08:51:36.885236Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-05-07T08:51:36.886167Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 2. Cluster: "TestCluster". Topic: "TestTopic". Partition: 2. Read offset: (NULL) 2025-05-07T08:51:36.889924Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-50) 2025-05-07T08:51:36.890220Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-05-07T08:51:36.890262Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-05-07T08:51:36.890287Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-05-07T08:51:36.890309Z :DEBUG: Take Data. Partition 1. Read: {0, 3} (4-4) 2025-05-07T08:51:36.890343Z :DEBUG: Take Data. Partition 1. Read: {0, 4} (5-5) 2025-05-07T08:51:36.890362Z :DEBUG: Take Data. Partition 1. Read: {0, 5} (6-6) 2025-05-07T08:51:36.890379Z :DEBUG: Take Data. Partition 1. Read: {0, 6} (7-7) 2025-05-07T08:51:36.890400Z :DEBUG: Take Data. Partition 1. Read: {0, 7} (8-8) 2025-05-07T08:51:36.890432Z :DEBUG: Take Data. Partition 1. Read: {0, 8} (9-9) 2025-05-07T08:51:36.890452Z :DEBUG: Take Data. Partition 1. Read: {0, 9} (10-10) 2025-05-07T08:51:36.890473Z :DEBUG: Take Data. Partition 1. Read: {0, 10} (11-11) 2025-05-07T08:51:36.890492Z :DEBUG: Take Data. Partition 1. Read: {0, 11} (12-12) 2025-05-07T08:51:36.890538Z :DEBUG: Take Data. Partition 1. Read: {0, 12} (13-13) 2025-05-07T08:51:36.890561Z :DEBUG: Take Data. Partition 1. Read: {0, 13} (14-14) 2025-05-07T08:51:36.890578Z :DEBUG: Take Data. Partition 1. Read: {0, 14} (15-15) 2025-05-07T08:51:36.890595Z :DEBUG: Take Data. Partition 1. Read: {0, 15} (16-16) 2025-05-07T08:51:36.890667Z :DEBUG: Take Data. Partition 1. Read: {0, 16} (17-17) 2025-05-07T08:51:36.890688Z :DEBUG: Take Data. Partition 1. Read: {0, 17} (18-18) 2025-05-07T08:51:36.890723Z :DEBUG: Take Data. Partition 1. Read: {0, 18} (19-19) 2025-05-07T08:51:36.890749Z :DEBUG: Take Data. Partition 1. Read: {0, 19} (20-20) 2025-05-07T08:51:36.890772Z :DEBUG: Take Data. Partition 1. Read: {0, 20} (21-21) 2025-05-07T08:51:36.890795Z :DEBUG: Take Data. Partition 1. Read: {0, 21} (22-22) 2025-05-07T08:51:36.890815Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (23-23) 2025-05-07T08:51:36.890835Z :DEBUG: Take Data. Partition 1. Read: {1, 1} (24-24) 2025-05-07T08:51:36.890857Z :DEBUG: Take Data. Partition 1. Read: {1, 2} (25-25) 2025-05-07T08:51:36.890875Z :DEBUG: Take Data. Partition 1. Read: {1, 3} (26-26) 2025-05-07T08:51:36.890891Z :DEBUG: Take Data. Partition 1. Read: {1, 4} (27-27) 2025-05-07T08:51:36.890908Z :DEBUG: Take Data. Partition 1. Read: {1, 5} (28-28) 2025-05-07T08:51:36.890923Z :DEBUG: Take Data. Partition 1. Read: {1, 6} (29-29) 2025-05-07T08:51:36.890943Z :DEBUG: Take Data. Partition 1. Read: {1, 7} (30-30) 2025-05-07T08:51:36.890961Z :DEBUG: Take Data. Partition 1. Read: {1, 8} (31-31) 2025-05-07T08:51:36.890978Z :DEBUG: Take Data. Partition 1. Read: {1, 9} (32-32) 2025-05-07T08:51:36.891078Z :DEBUG: Take Data. Partition 1. Read: {1, 10} (33-33) 2025-05-07T08:51:36.891101Z :DEBUG: Take Data. Partition 1. Read: {1, 11} (34-34) 2025-05-07T08:51:36.891130Z :DEBUG: Take Data. Partition 1. Read: {1, 12} (35-35) 2025-05-07T08:51:36.891156Z :DEBUG: Take Data. Partition 1. Read: {1, 13} (36-36) 2025-05-07T08:51:36.891189Z :DEBUG: Take Data. Partition 1. Read: {1, 14} (37-37) 2025-05-07T08:51:36.891217Z :DEBUG: Take Data. Partition 1. Read: {1, 15} (38-38) 2025-05-07T08:51:36.891237Z :DEBUG: Take Data. Partition 1. Read: {1, 16} (39-39) 2025-05-07T08:51:36.891267Z :DEBUG: Take Data. Partition 1. Read: {1, 17} (40-40) 2025-05-07T08:51:36.891285Z :DEBUG: Take Data. Partition 1. Read: {1, 18} (41-41) 2025-05-07T08:51:36.891303Z :DEBUG: Take Data. Partition 1. Read: {1, 19} (42-42) 2025-05-07T08:51:36.891318Z :DEBUG: Take Data. Partition 1. Read: {1, 20} (43-43) 2025-05-07T08:51:36.891329Z :DEBUG: Take Data. Partition 1. Read: {1, 21} (44-44) 2025-05-07T08:51:36.891346Z :DEBUG: Take Data. Partition 1. Read: {1, 22} (45-45) 2025-05-07T08:51:36.891364Z :DEBUG: Take Data. Partition 1. Read: {1, 23} (46-46) 2025-05-07T08:51:36.891385Z :DEBUG: Take Data. Partition 1. Read: {1, 24} (47-47) 2025-05-07T08:51:36.891403Z :DEBUG: Take Data. Partition 1. Read: {1, 25} (48-48) 2025-05-07T08:51:36.891419Z :DEBUG: Take Data. Partition 1. Read: {1, 26} (49-49) 2025-05-07T08:51:36.891438Z :DEBUG: Take Data. Partition 1. Read: {1, 27} (50-50) 2025-05-07T08:51:36.891527Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 50, size 5000 bytes 2025-05-07T08:51:36.893704Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 2 (51-100) 2025-05-07T08:51:36.893923Z :DEBUG: Take Data. Partition 2. Read: {0, 0} (51-51) 2025-05-07T08:51:36.893997Z :DEBUG: Take Data. Partition 2. Read: {0, 1} (52-52) 2025-05-07T08:51:36.894031Z :DEBUG: Take Data. Partition 2. Read: {0, 2} (53-53) 2025-05-07T08:51:36.894053Z :DEBUG: Take Data. Partition 2. Read: {0, 3} (54-54) 2025-05-07T08:51:36.894095Z :DEBUG: Take Data. Partition 2. Read: {0, 4} (55-55) 2025-05-07T08:51:36.894121Z :DEBUG: Take Data. Partition 2. Read: {0, 5} (56-56) 2025-05-07T08:51:36.894139Z :DEBUG: Take Data. Partition 2. Read: {0, 6} (57-57) 2025-05-07T08:51:36.894158Z :DEBUG: Take Data. Partition 2. Read: {0, 7} (58-58) 2025-05-07T08:51:36.894194Z :DEBUG: Take Data. Partition 2. Read: {0, 8} (59-59) 2025-05-07T08:51:36.894225Z :DEBUG: Take Data. Partition 2. Read: {0, 9} (60-60) 2025-05-07T08:51:36.894243Z :DEBUG: Take Data. Partition 2. Read: {0, 10} (61-61) 2025-05-07T08:51:36.894265Z :DEBUG: Take Data. Partition 2. Read: {0, 11} (62-62) 2025-05-07T08:51:36.894283Z :DEBUG: Take Data. Partition 2. Read: {0, 12} (63-63) 2025-05-07T08:51:36.894298Z :DEBUG: Take Data. Partition 2. Read: {0, 13} (64-64) 2025-05-07T08:51:36.894315Z :DEBUG: Take Data. Partition 2. Read: {0, 14} (65-65) 2025-05-07T08:51:36.894342Z :DEBUG: Take Data. Partition 2. Read: {0, 15} (66-66) 2025-05-07T08:51:36.894419Z :DEBUG: Take Data. Partition 2. Read: {0, 16} (67-67) 2025-05-07T08:51:36.894455Z :DEBUG: Take Data. Partition 2. Read: {0, 17} (68-68) 2025-05-07T08:51:36.894476Z :DEBUG: Take Data. Partition 2. Read: {0, 18} (69-69) 2025-05-07T08:51:36.894527Z :DEBUG: Take Data. Partition 2. Read: {0, 19} (70-70) 2025-05-07T08:51:36.894545Z :DEBUG: Take Data. Partition 2. Read: {0, 20} (71-71) 2025-05-07T08:51:36.894564Z :DEBUG: Take Data. Partition 2. Read: {0, 21} (72-72) 2025-05-07T08:51:36.894584Z :DEBUG: Take Data. Partition 2. Read: {1, 0} (73-73) 2025-05-07T08:51:36.894620Z :DEBUG: Take Data. Partition 2. Read: {1, 1} (74-74) 2025-05-07T08:51:36.894639Z :DEBUG: Take Data. Partition 2. Read: {1, 2} (75-75) 2025-05-07T08:51:36.894660Z :DEBUG: Take Data. Partition 2. Read: {1, 3} (76-76) 2025-05-07T08:51:36.894678Z :DEBUG: Take Data. Partition 2. Read: {1, 4} (77-77) 2025-05-07T08:51:36.894699Z :DEBUG: Take Data. Partition 2. Read: {1, 5} (78-78) 2025-05-07T08:51:36.894716Z :DEBUG: Take Data. Partition 2. Read: {1, 6} (79-79) 2025-05-07T08:51:36.894732Z :DEBUG: Take Data. Partition 2. Read: {1, 7} (80-80) 2025-05-07T08:51:36.894752Z :DEBUG: Take Data. Partition 2. Read: {1, 8} (81-81) 2025-05-07T08:51:36.894788Z :DEBUG: Take Data. Partition 2. Read: {1, 9} (82-82) 2025-05-07T08:51:36.894857Z :DEBUG: Take Data. Partition 2. Read: {1, 10} (83-83) 2025-05-07T08:51:36.894885Z :DEBUG: Take Data. Partition 2. Read: {1, 11} (84-84) 2025-05-07T08:51:36.894905Z :DEBUG: Take Data. Partition 2. Read: {1, 12} (85-85) 2025-05-07T08:51:36.894923Z :DEBUG: Take Data. Partition 2. Read: {1, 13} (86-86) 2025-05-07T08:51:36.894940Z :DEBUG: Take Data. Partition 2. Read: {1, 14} (87-87) 2025-05-07T08:51:36.894963Z :DEBUG: Take Data. Partition 2. Read: {1, 15} (88-88) 2025-05-07T08:51:36.894982Z :DEBUG: Take Data. Partition 2. Read: {1, 16} (89-89) 2025-05-07T08:51:36.894999Z :DEBUG: Take Data. Partition 2. Read: {1, 17} (90-90) 2025-05-07T08:51:36.895017Z :DEBUG: Take Data. Partition 2. Read: {1, 18} (91-91) 2025-05-07T08:51:36.895036Z :DEBUG: Take Data. Partition 2. Read: {1, 19} (92-92) 2025-05-07T08:51:36.895052Z :DEBUG: Take Data. Partition 2. Read: {1, 20} (93-93) 2025-05-07T08:51:36.895069Z :DEBUG: Take Data. Partition 2. Read: {1, 21} (94-94) 2025-05-07T08:51:36.895085Z :DEBUG: Take Data. Partition 2. Read: {1, 22} (95-95) 2025-05-07T08:51:36.895104Z :DEBUG: Take Data. Partition 2. Read: {1, 23} (96-96) 2025-05-07T08:51:36.895118Z :DEBUG: Take Data. Partition 2. Read: {1, 24} (97-97) 2025-05-07T08:51:36.895144Z :DEBUG: Take Data. Partition 2. Read: {1, 25} (98-98) 2025-05-07T08:51:36.895165Z :DEBUG: Take Data. Partition 2. Read: {1, 26} (99-99) 2025-05-07T08:51:36.895190Z :DEBUG: Take Data. Partition 2. Read: {1, 27} (100-100) 2025-05-07T08:51:36.895252Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 50, size 5000 bytes 2025-05-07T08:51:36.895410Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 2500, ReadSizeServerDelta = 0 2025-05-07T08:51:36.896777Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.896822Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.896849Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:51:36.897208Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:51:36.897764Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-05-07T08:51:36.897928Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.898336Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-05-07T08:51:36.999473Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:37.002205Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2025-05-07T08:51:37.002279Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-05-07T08:51:37.002322Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (2-2) 2025-05-07T08:51:37.002416Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 6 bytes 2025-05-07T08:51:37.206077Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 3). Partition stream id: 1 2025-05-07T08:51:37.310246Z :DEBUG: [db] [sessionid] [cluster] Committed response: { cookies { assign_id: 1 partition_cookie: 1 } } 2025-05-07T08:51:37.310397Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2025-05-07T08:51:37.310565Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster 2025-05-07T08:51:37.311800Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:37.311821Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:37.311840Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:51:37.312443Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:51:37.312823Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-05-07T08:51:37.312994Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:37.321071Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-05-07T08:51:37.423119Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:37.423365Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2025-05-07T08:51:37.423423Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-05-07T08:51:37.423468Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (2-2) 2025-05-07T08:51:37.423549Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 3). Partition stream id: 1 2025-05-07T08:51:37.423652Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 6 bytes 2025-05-07T08:51:37.423888Z :DEBUG: [db] [sessionid] [cluster] Committed response: { cookies { assign_id: 1 partition_cookie: 1 } } 2025-05-07T08:51:37.423989Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2025-05-07T08:51:37.424093Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> ReadSessionImplTest::PartitionStreamCallbacks [GOOD] Test command err: 2025-05-07T08:51:35.020212Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:35.020253Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:35.020297Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:51:35.020976Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:51:35.021624Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-05-07T08:51:35.032616Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:35.033112Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-05-07T08:51:35.034892Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-05-07T08:51:35.035382Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-05-07T08:51:35.035665Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (2-2) 2025-05-07T08:51:35.035828Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-05-07T08:51:35.035969Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-05-07T08:51:35.036012Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (2-2) 2025-05-07T08:51:35.036061Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2025-05-07T08:51:35.036091Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2025-05-07T08:51:35.037538Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:35.037567Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:35.037593Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:51:35.038081Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:51:35.038773Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-05-07T08:51:35.038980Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:35.039264Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) Message data size: 10 Compressed message data size: 30 2025-05-07T08:51:35.040265Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-05-07T08:51:35.040497Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function Getting new event 2025-05-07T08:51:35.040832Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (5-8) 2025-05-07T08:51:35.041054Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-4) 2025-05-07T08:51:35.041371Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-05-07T08:51:35.041406Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-05-07T08:51:35.041447Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 20 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 42 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 43 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-05-07T08:51:35.041604Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 3). Partition stream id: 1 Getting new event 2025-05-07T08:51:35.041670Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-05-07T08:51:35.041695Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (4-4) 2025-05-07T08:51:35.041714Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 20 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 44 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 4 SeqNo: 45 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-05-07T08:51:35.041848Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [3, 5). Partition stream id: 1 Getting new event 2025-05-07T08:51:35.041922Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (5-5) 2025-05-07T08:51:35.041944Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (6-6) 2025-05-07T08:51:35.041990Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 20 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 5 SeqNo: 46 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 6 SeqNo: 47 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-05-07T08:51:35.042072Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [5, 7). Partition stream id: 1 Getting new event 2025-05-07T08:51:35.042104Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (7-7) 2025-05-07T08:51:35.042124Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (8-8) 2025-05-07T08:51:35.042143Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 20 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 7 SeqNo: 48 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 8 SeqNo: 49 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-05-07T08:51:35.042236Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [7, 9). Partition stream id: 1 2025-05-07T08:51:35.043722Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:35.043752Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:35.043816Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:51:35.044298Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:51:35.044954Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-05-07T08:51:35.045163Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:35.045387Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) Message data size: 100 Compressed message data size: 91 2025-05-07T08:51:35.046474Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-05-07T08:51:35.046763Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function Getting new event 2025-05-07T08:51:35.047258Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (5-8) 2025-05-07T08:51:35.047508Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-4) 2025-05-07T08:51:35.047665Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-05-07T08:51:35.047710Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 100 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..100 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 42 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-05-07T08:51:35.047829Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 2). Partition stream id: 1 Getting new event 2025-05-07T08:51:35.047868Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-05-07T08:51:35.047887Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 100 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..100 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 43 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-05-07T08:51:35.047956Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [2, 3). Partition stream id: 1 Getting new event 2025-05-07T08:51:35.047985Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-05-07T08:51:35.048004Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 100 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..100 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 44 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-05-07T08:51:35.048059Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [3, 4). Partition stream id: 1 Getting new event 2025-05-07T08:51:35.048091Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (4-4) 2025-05-07T08:51:35.048128Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 100 bytes DataReceived { PartitionStream ... tream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 190 SeqNo: 231 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 191 SeqNo: 232 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 192 SeqNo: 233 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 193 SeqNo: 234 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 194 SeqNo: 235 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 195 SeqNo: 236 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 196 SeqNo: 237 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 197 SeqNo: 238 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 198 SeqNo: 239 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 199 SeqNo: 240 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 200 SeqNo: 241 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-05-07T08:51:37.357470Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 201). Partition stream id: 1 2025-05-07T08:51:37.456231Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 5, ReadSizeServerDelta = 0 2025-05-07T08:51:37.456303Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 5, ReadSizeServerDelta = 0 2025-05-07T08:51:37.456491Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:51:37.457271Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:51:37.458446Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-05-07T08:51:37.458885Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 5, ReadSizeServerDelta = 0 2025-05-07T08:51:37.459347Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) Message data size: 1000000 Compressed message data size: 3028 Post function Getting new event 2025-05-07T08:51:37.575569Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-10) 2025-05-07T08:51:37.576623Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-05-07T08:51:37.578630Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-05-07T08:51:37.581560Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-05-07T08:51:37.582486Z :DEBUG: Take Data. Partition 1. Read: {0, 3} (4-4) 2025-05-07T08:51:37.587612Z :DEBUG: Take Data. Partition 1. Read: {0, 4} (5-5) 2025-05-07T08:51:37.588730Z :DEBUG: Take Data. Partition 1. Read: {0, 5} (6-6) 2025-05-07T08:51:37.589644Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (7-7) 2025-05-07T08:51:37.590678Z :DEBUG: Take Data. Partition 1. Read: {1, 1} (8-8) 2025-05-07T08:51:37.599669Z :DEBUG: Take Data. Partition 1. Read: {1, 2} (9-9) 2025-05-07T08:51:37.600605Z :DEBUG: Take Data. Partition 1. Read: {1, 3} (10-10) 2025-05-07T08:51:37.600697Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 10, size 10000000 bytes 2025-05-07T08:51:37.630138Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 5, ReadSizeServerDelta = 0 DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 42 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 43 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 44 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 4 SeqNo: 45 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 5 SeqNo: 46 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 6 SeqNo: 47 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 7 SeqNo: 48 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 8 SeqNo: 49 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 9 SeqNo: 50 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 10 SeqNo: 51 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-05-07T08:51:37.642953Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 11). Partition stream id: 1 2025-05-07T08:51:37.649886Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:37.649947Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:37.649996Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:51:37.650276Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:51:37.650874Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-05-07T08:51:37.651249Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:37.651605Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-05-07T08:51:37.652056Z :DEBUG: [db] [sessionid] [cluster] Requesting status for partition stream id: 1 2025-05-07T08:51:37.653217Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:37.653237Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:37.653257Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:51:37.653652Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:51:37.654388Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-05-07T08:51:37.654760Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:37.655911Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:37.656510Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-05-07T08:51:37.657395Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-05-07T08:51:37.657483Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2025-05-07T08:51:37.657750Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 >> KikimrIcGateway::TestCreateExternalTable [GOOD] >> KikimrIcGateway::TestCreateResourcePool ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest >> AssignTxId::Basic [GOOD] Test command err: 2025-05-07T08:51:32.514466Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623779192197838:2062];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:32.514597Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003791/r3tmp/tmprc7YT7/pdisk_1.dat 2025-05-07T08:51:33.126563Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:51:33.136515Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:33.143183Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:33.158317Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:9785 TServer::EnableGrpc on GrpcPort 19050, node 1 2025-05-07T08:51:33.614886Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:51:33.614921Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:51:33.614928Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:51:33.615062Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9785 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:51:34.050249Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:35.943993Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623792077100391:2331], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:35.944128Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:36.526764Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateReplication, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T08:51:36.556713Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:41: [controller 72075186224037888] OnActivateExecutor 2025-05-07T08:51:36.556820Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_init_schema.cpp:17: [controller 72075186224037888][TxInitSchema] Execute 2025-05-07T08:51:36.559844Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_init_schema.cpp:26: [controller 72075186224037888][TxInitSchema] Complete 2025-05-07T08:51:36.559899Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_init.cpp:239: [controller 72075186224037888][TxInit] Execute 2025-05-07T08:51:36.560125Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_init.cpp:244: [controller 72075186224037888][TxInit] Complete 2025-05-07T08:51:36.560134Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:113: [controller 72075186224037888] SwitchToWork 2025-05-07T08:51:36.603327Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:142: [controller 72075186224037888] Handle NKikimrReplication.TEvCreateReplication PathId { OwnerId: 72057594046644480 LocalId: 2 } OperationId { TxId: 281474976710658 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:19050" Database: "/Root" OAuthToken { Token: "***" } EnableSsl: false } Specific { Targets { SrcPath: "/Root/table" DstPath: "/Root/replica" } } ConsistencySettings { Global { CommitIntervalMilliSeconds: 10000 } } } 2025-05-07T08:51:36.603616Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_create_replication.cpp:22: [controller 72075186224037888][TxCreateReplication] Execute: NKikimrReplication.TEvCreateReplication PathId { OwnerId: 72057594046644480 LocalId: 2 } OperationId { TxId: 281474976710658 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:19050" Database: "/Root" OAuthToken { Token: "***" } EnableSsl: false } Specific { Targets { SrcPath: "/Root/table" DstPath: "/Root/replica" } } ConsistencySettings { Global { CommitIntervalMilliSeconds: 10000 } } } 2025-05-07T08:51:36.603732Z node 1 :REPLICATION_CONTROLLER NOTICE: tx_create_replication.cpp:43: [controller 72075186224037888][TxCreateReplication] Add replication: rid# 1, pathId# [OwnerId: 72057594046644480, LocalPathId: 2] 2025-05-07T08:51:36.604462Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_create_replication.cpp:57: [controller 72075186224037888][TxCreateReplication] Complete 2025-05-07T08:51:36.606847Z node 1 :REPLICATION_CONTROLLER TRACE: tenant_resolver.cpp:33: [TenantResolver][rid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root/replication TableId: [72057594046644480:2:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindReplication DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-05-07T08:51:36.607154Z node 1 :REPLICATION_CONTROLLER TRACE: tenant_resolver.cpp:33: [TenantResolver][rid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-05-07T08:51:36.607312Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:252: [controller 72075186224037888] Handle NKikimr::NReplication::NController::TEvPrivate::TEvResolveTenantResult { ReplicationId: 1 Tenant: /Root Sucess: 1 } 2025-05-07T08:51:36.607325Z node 1 :REPLICATION_CONTROLLER NOTICE: controller.cpp:267: [controller 72075186224037888] Tenant resolved: rid# 1, tenant# /Root 2025-05-07T08:51:36.607343Z node 1 :REPLICATION_CONTROLLER INFO: controller.cpp:271: [controller 72075186224037888] Discover tenant nodes: tenant# /Root 2025-05-07T08:51:36.607927Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:297: [controller 72075186224037888] Handle NKikimr::TEvDiscovery::TEvDiscoveryData 2025-05-07T08:51:36.607988Z node 1 :REPLICATION_CONTROLLER DEBUG: controller.cpp:321: [controller 72075186224037888] Create session: nodeId# 1 TClient::Ls request: /Root/replication TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "replication" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeReplication CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1746607896656 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ReplicationVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsIns... (TRUNCATED) 2025-05-07T08:51:36.627812Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:757: [controller 72075186224037888] Handle NKikimrReplication.TEvGetTxId Versions { Step: 1 TxId: 0 } 2025-05-07T08:51:36.627907Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:76: [controller 72075186224037888][TxAssignTxId] Execute: pending# 1, assigned# 0, allocated# 0 2025-05-07T08:51:36.627970Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:142: [controller 72075186224037888][TxAssignTxId] Complete: pending# 1, assigned# 0, allocated# 0, exhausted# 1 2025-05-07T08:51:36.628085Z node 1 :REPLICATION_CONTROLLER TRACE: tx_assign_tx_id.cpp:174: [controller 72075186224037888] Handle NKikimr::TEvTxAllocatorClient::TEvAllocateResult 2025-05-07T08:51:36.628132Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:76: [controller 72075186224037888][TxAssignTxId] Execute: pending# 1, assigned# 0, allocated# 5 2025-05-07T08:51:36.628709Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:142: [controller 72075186224037888][TxAssignTxId] Complete: pending# 0, assigned# 1, allocated# 4, exhausted# 0 2025-05-07T08:51:36.629193Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:757: [controller 72075186224037888] Handle NKikimrReplication.TEvGetTxId Versions { Step: 9999 TxId: 0 } 2025-05-07T08:51:36.629260Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:76: [controller 72075186224037888][TxAssignTxId] Execute: pending# 1, assigned# 1, allocated# 4 2025-05-07T08:51:36.629315Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:142: [controller 72075186224037888][TxAssignTxId] Complete: pending# 0, assigned# 1, allocated# 4, exhausted# 0 2025-05-07T08:51:36.629613Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:757: [controller 72075186224037888] Handle NKikimrReplication.TEvGetTxId Versions { Step: 9999 TxId: 18446744073709551615 } 2025-05-07T08:51:36.629638Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:76: [controller 72075186224037888][TxAssignTxId] Execute: pending# 1, assigned# 1, allocated# 4 2025-05-07T08:51:36.629673Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:142: [controller 72075186224037888][TxAssignTxId] Complete: pending# 0, assigned# 1, allocated# 4, exhausted# 0 2025-05-07T08:51:36.629877Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:757: [controller 72075186224037888] Handle NKikimrReplication.TEvGetTxId Versions { Step: 10000 TxId: 0 } 2025-05-07T08:51:36.629918Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:76: [controller 72075186224037888][TxAssignTxId] Execute: pending# 1, assigned# 1, allocated# 4 2025-05-07T08:51:36.630343Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:142: [controller 72075186224037888][TxAssignTxId] Complete: pending# 0, assigned# 2, allocated# 3, exhausted# 0 2025-05-07T08:51:36.630676Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:757: [controller 72075186224037888] Handle NKikimrReplication.TEvGetTxId Versions { Step: 5000 TxId: 0 } 2025-05-07T08:51:36.630718Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:76: [controller 72075186224037888][TxAssignTxId] Execute: pending# 1, assigned# 2, allocated# 3 2025-05-07T08:51:36.630748Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:142: [controller 72075186224037888][TxAssignTxId] Complete: pending# 0, assigned# 2, allocated# 3, exhausted# 0 2025-05-07T08:51:36.630989Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:757: [controller 72075186224037888] Handle NKikimrReplication.TEvGetTxId Versions { Step: 20000 TxId: 0 } Versions { Step: 30000 TxId: 0 } Versions { Step: 40000 TxId: 0 } 2025-05-07T08:51:36.631035Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:76: [controller 72075186224037888][TxAssignTxId] Execute: pending# 3, assigned# 2, allocated# 3 2025-05-07T08:51:36.631354Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:142: [controller 72075186224037888][TxAssignTxId] Complete: pending# 0, assigned# 5, allocated# 0, exhausted# 0 2025-05-07T08:51:36.631461Z node 1 :REPLICATION_CONTROLLER TRACE: tx_assign_tx_id.cpp:174: [controller 72075186224037888] Handle NKikimr::TEvTxAllocatorClient::TEvAllocateResult 2025-05-07T08:51:36.631492Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:76: [controller 72075186224037888][TxAssignTxId] Execute: pending# 0, assigned# 5, allocated# 5 2025-05-07T08:51:36.631525Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:142: [controller 72075186224037888][TxAssignTxId] Complete: pending# 0, assigned# 5, allocated# 5, exhausted# 0 2025-05-07T08:51:36.631710Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:757: [controller 72075186224037888] Handle NKikimrReplication.TEvGetTxId Versions { Step: 50000 TxId: 0 } 2025-05-07T08:51:36.631737Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:76: [controller 72075186224037888][TxAssignTxId] Execute: pending# 1, assigned# 5, allocated# 5 2025-05-07T08:51:36.632132Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:142: [controller 72075186224037888][TxAssignTxId] Complete: pending# 0, assigned# 5, allocated# 5, exhausted# 0 2025-05-07T08:51:36.678462Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:27: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribePathResponse { Result: { status: SCHEME_ERROR, issues: {
: Error: Path not found } } } 2025-05-07T08:51:36.678540Z node 1 :REPLICATION_CONTROLLER ERROR: target_discoverer.cpp:78: [TargetDiscoverer][rid 1] Describe path failed: path# /Root/table, status# SCHEME_ERROR, issues# {
: Error: Path not found } 2025-05-07T08:51:36.678723Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:172: [controller 72075186224037888] Handle NKikimr::NReplication::NController::TEvPrivate::TEvDiscoveryTargetsResult { ReplicationId: 1 ToAdd [] ToDelete [] Failed [/Root/table: SCHEME_ERROR ({
: Error: Path not found })] } 2025-05-07T08:51:36.678850Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_discovery_targets_result.cpp:24: [controller 72075186224037888][TxDiscoveryTargetsResult] Execute: NKikimr::NReplication::NController::TEvPrivate::TEvDiscoveryTargetsResult { ReplicationId: 1 ToAdd [] ToDelete [] Failed [/Root/table: SCHEME_ERROR ({
: Error: Path not found })] } 2025-05-07T08:51:36.678900Z node 1 :REPLICATION_CONTROLLER ERROR: tx_discovery_targets_result.cpp:76: [controller 72075186224037888][TxDiscoveryTargetsResult] Discovery error: rid# 1, error# /Root/table: SCHEME_ERROR ({
: Error: Path not found }) 2025-05-07T08:51:36.679567Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_discovery_targets_result.cpp:89: [controller 72075186224037888][TxDiscoveryTargetsResult] Complete |89.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest >> KqpPg::CreateIndex [GOOD] >> KqpPg::CreateNotNullPgColumn ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPartitionTests::AfterRestart_2 [GOOD] Test command err: 2025-05-07T08:51:01.805031Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623644911398454:2278];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:01.805142Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:51:01.866519Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501623645499182825:2091];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:01.866568Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:51:02.160050Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-05-07T08:51:02.160999Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003e80/r3tmp/tmpGT9FkK/pdisk_1.dat 2025-05-07T08:51:02.630513Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:51:02.687932Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:02.688029Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:02.693532Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:51:02.697449Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:02.697546Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:02.707176Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-05-07T08:51:02.712442Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12940, node 1 2025-05-07T08:51:02.960381Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/zvgn/003e80/r3tmp/yandexYgRDo4.tmp 2025-05-07T08:51:02.960414Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/zvgn/003e80/r3tmp/yandexYgRDo4.tmp 2025-05-07T08:51:02.960593Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/zvgn/003e80/r3tmp/yandexYgRDo4.tmp 2025-05-07T08:51:02.960749Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:51:03.053331Z INFO: TTestServer started on Port 19142 GrpcPort 12940 TClient is connected to server localhost:19142 PQClient connected to localhost:12940 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:51:03.711279Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:51:03.800237Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... 2025-05-07T08:51:06.526732Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501623666974019689:2317], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:06.527217Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623666386235848:2343], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:06.529479Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501623666974019684:2314], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:06.529599Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:06.532036Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480 2025-05-07T08:51:06.532554Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623666386235839:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:06.532692Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:06.542221Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623666386235888:2346], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:06.542305Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:06.546366Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501623666974019700:2171] txid# 281474976715657, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-05-07T08:51:06.561906Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623666386235853:2344], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-05-07T08:51:06.562173Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7501623666974019699:2318], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-05-07T08:51:06.630095Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501623666974019726:2177] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:51:06.663323Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623666386235949:2791] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:51:06.855419Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623644911398454:2278];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:06.855681Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:51:06.866615Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7501623645499182825:2091];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:06.866710Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:51:06.875791Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7501623666974019740:2322], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T08:51:06.876175Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=2&id=NjcwMTg3NTktNGEwYTcwZDgtZDM4MzIwYWEtZGRmYzJlMjY=, ActorId: [2:7501623666974019668:2313], ActorState: ExecuteState, TraceId: 01jtmz20kv3zm8p6tth2z2zzeg, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T08:51:06.876253Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:51:06.878776Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7501623666386235968:2350], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T08:51:06.880078Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ... ery: --!syntax_v1 DECLARE $Hash AS Uint64; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `//Root/.metadata/TopicPartitionsMapping` WHERE Hash == $Hash AND Topic == $Topic AND ProducerId == $SourceId; 2025-05-07T08:51:33.118640Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; DECLARE $SeqNo AS Uint64; UPSERT INTO `//Root/.metadata/TopicPartitionsMapping` (Hash, Topic, ProducerId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-05-07T08:51:33.118648Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `//Root/.metadata/TopicPartitionsMapping` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND ProducerId = $SourceId AND Partition = $Partition; 2025-05-07T08:51:33.118667Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__sm_chooser_actor.h:116: TPartitionChooser [3:7501623779665954173:3796] (SourceId=A_Source_5, PreferedPartition=(NULL)) GetOwnershipFast Partition=1 TabletId=1001 2025-05-07T08:51:33.118774Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 269877760, Sender [3:7501623779665954174:3796], Recipient [3:7501623762486084008:3234]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 1001 Status: OK ServerId: [3:7501623779665954173:3796] Leader: 1 Dead: 0 Generation: 1 VersionInfo: } 2025-05-07T08:51:33.118860Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 271188557, Sender [3:7501623779665954173:3796], Recipient [3:7501623762486084008:3234]: NKikimrPQ.TEvCheckPartitionStatusRequest Partition: 1 SourceId: "A_Source_5" 2025-05-07T08:51:33.118940Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__sm_chooser_actor.h:139: StateOwnershipFast, received event# 271188558, Sender [3:7501623762486084008:3234], Recipient [3:7501623779665954173:3796]: NKikimrPQ.TEvCheckPartitionStatusResponse Status: Active 2025-05-07T08:51:33.118963Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:88: TPartitionChooser [3:7501623779665954173:3796] (SourceId=A_Source_5, PreferedPartition=(NULL)) InitTable: SourceId=A_Source_5 TopicsAreFirstClassCitizen=1 UseSrcIdMetaMappingInFirstClass=1 2025-05-07T08:51:33.119027Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 65543, Sender [3:7501623779665954173:3796], Recipient [3:7501623762486084008:3234]: NActors::TEvents::TEvPoison 2025-05-07T08:51:33.119189Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:101: StateInitTable, received event# 277020685, Sender [3:7501623719536409050:2070], Recipient [3:7501623779665954173:3796]: NKikimr::NMetadata::NProvider::TEvManagerPrepared 2025-05-07T08:51:33.119213Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:111: TPartitionChooser [3:7501623779665954173:3796] (SourceId=A_Source_5, PreferedPartition=(NULL)) StartKqpSession 2025-05-07T08:51:33.121795Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:132: StateCreateKqpSession, received event# 271646728, Sender [3:7501623719536409263:2268], Recipient [3:7501623779665954173:3796]: NKikimrKqp.TEvCreateSessionResponse Error: "" Response { SessionId: "ydb://session/3?node_id=3&id=NDc4OTk1NTYtNjg5MmI1ZGEtNjNjOGI4N2MtOTFiMDc2NWI=" NodeId: 3 } YdbStatus: SUCCESS ResourceExhausted: false 2025-05-07T08:51:33.121816Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:142: TPartitionChooser [3:7501623779665954173:3796] (SourceId=A_Source_5, PreferedPartition=(NULL)) Select from the table 2025-05-07T08:51:33.330547Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:163: StateSelect, received event# 271646721, Sender [3:7501623719536409263:2268], Recipient [3:7501623779665954173:3796]: NKikimrKqp.TEvQueryResponse Response { SessionId: "ydb://session/3?node_id=3&id=NDc4OTk1NTYtNjg5MmI1ZGEtNjNjOGI4N2MtOTFiMDc2NWI=" PreparedQuery: "c5cfeb0b-93d8ac78-9acb8661-2974b07f" QueryParameters { Name: "$Hash" Type { Kind: Data Data { Scheme: 4 } } } QueryParameters { Name: "$Topic" Type { Kind: Data Data { Scheme: 4608 } } } QueryParameters { Name: "$SourceId" Type { Kind: Data Data { Scheme: 4608 } } } TxMeta { id: "01jtmz2tt27tfrwmgwepp3zhsv" } YdbResults { columns { name: "Partition" type { optional_type { item { type_id: UINT32 } } } } columns { name: "CreateTime" type { optional_type { item { type_id: UINT64 } } } } columns { name: "AccessTime" type { optional_type { item { type_id: UINT64 } } } } columns { name: "SeqNo" type { optional_type { item { type_id: UINT64 } } } } rows { items { uint32_value: 0 } items { uint64_value: 1746607892953 } items { uint64_value: 1746607892953 } items { uint64_value: 13 } } } QueryDiagnostics: "" } YdbStatus: SUCCESS ConsumedRu: 123 2025-05-07T08:51:33.330769Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:151: TPartitionChooser [3:7501623779665954173:3796] (SourceId=A_Source_5, PreferedPartition=(NULL)) Selected from table PartitionId=0 SeqNo=13 2025-05-07T08:51:33.330792Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__sm_chooser_actor.h:209: TPartitionChooser [3:7501623779665954173:3796] (SourceId=A_Source_5, PreferedPartition=(NULL)) OnPartitionChosen 2025-05-07T08:51:33.330942Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 269877760, Sender [3:7501623779665954218:3796], Recipient [3:7501623762486084008:3234]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 1001 Status: OK ServerId: [3:7501623779665954173:3796] Leader: 1 Dead: 0 Generation: 1 VersionInfo: } 2025-05-07T08:51:33.331011Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 271188557, Sender [3:7501623779665954173:3796], Recipient [3:7501623762486084008:3234]: NKikimrPQ.TEvCheckPartitionStatusRequest Partition: 1 2025-05-07T08:51:33.331104Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:240: StateCheckPartition, received event# 271188558, Sender [3:7501623762486084008:3234], Recipient [3:7501623779665954173:3796]: NKikimrPQ.TEvCheckPartitionStatusResponse Status: Active 2025-05-07T08:51:33.331135Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:174: TPartitionChooser [3:7501623779665954173:3796] (SourceId=A_Source_5, PreferedPartition=(NULL)) Update the table 2025-05-07T08:51:33.331414Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 65543, Sender [3:7501623779665954173:3796], Recipient [3:7501623762486084008:3234]: NActors::TEvents::TEvPoison Received TEvChooseResult: 1 2025-05-07T08:51:33.470803Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:212: StateUpdate, received event# 271646721, Sender [3:7501623719536409263:2268], Recipient [3:7501623779665954173:3796]: NKikimrKqp.TEvQueryResponse Response { SessionId: "ydb://session/3?node_id=3&id=NDc4OTk1NTYtNjg5MmI1ZGEtNjNjOGI4N2MtOTFiMDc2NWI=" PreparedQuery: "67d8134d-d10fd55b-11f9135-6d3b9aea" QueryParameters { Name: "$AccessTime" Type { Kind: Data Data { Scheme: 4 } } } QueryParameters { Name: "$CreateTime" Type { Kind: Data Data { Scheme: 4 } } } QueryParameters { Name: "$Hash" Type { Kind: Data Data { Scheme: 4 } } } QueryParameters { Name: "$Partition" Type { Kind: Data Data { Scheme: 2 } } } QueryParameters { Name: "$SourceId" Type { Kind: Data Data { Scheme: 4608 } } } QueryParameters { Name: "$SeqNo" Type { Kind: Data Data { Scheme: 4 } } } QueryParameters { Name: "$Topic" Type { Kind: Data Data { Scheme: 4608 } } } TxMeta { } QueryDiagnostics: "" } YdbStatus: SUCCESS ConsumedRu: 67 2025-05-07T08:51:33.470866Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:183: TPartitionChooser [3:7501623779665954173:3796] (SourceId=A_Source_5, PreferedPartition=(NULL)) HandleUpdate PartitionPersisted=0 Status=SUCCESS 2025-05-07T08:51:33.470925Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [3:7501623779665954173:3796] (SourceId=A_Source_5, PreferedPartition=(NULL)) ReplyResult: Partition=1, SeqNo=13 2025-05-07T08:51:33.470959Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:268: TPartitionChooser [3:7501623779665954173:3796] (SourceId=A_Source_5, PreferedPartition=(NULL)) Start idle Run query: --!syntax_v1 SELECT Partition, SeqNo FROM `//Root/.metadata/TopicPartitionsMapping` WHERE Hash = 11131928866524144434 AND Topic = "Root" AND ProducerId = "00415F536F757263655F35" 2025-05-07T08:51:33.663205Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720711. Ctx: { TraceId: 01jtmz2tzydcpvbmyd1rfww7kz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=Njg2ZTU2ZTItYmIxNzQwMGItZjNjZWE0MmYtZDZmZjdjMGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:51:35.703957Z node 5 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:35.704084Z node 5 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037927937] doesn't have tx writes info 2025-05-07T08:51:35.734330Z node 5 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 3, State: StateInit] bootstrapping 3 [5:179:2194] 2025-05-07T08:51:35.737621Z node 5 :PERSQUEUE INFO: partition_init.cpp:784: [Root/PQ/rt3.dc1--account--topic:3:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-05-07T08:51:35.000000Z 2025-05-07T08:51:35.737720Z node 5 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 3, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 3 generation 0 [5:179:2194] Got cmd write: CmdWrite { Key: "i0000000003" Value: "\010\000\020\n\030\000(\330\343\305\317\3522" StorageChannel: INLINE } CmdWrite { Key: "I0000000003" Value: "\010\271`\020\316\255\001" StorageChannel: INLINE } CmdWrite { Key: "m0000000003cclient" Value: "\010\004\020\000\030\000\"\007session(\0000\000@\001" StorageChannel: INLINE } CmdWrite { Key: "m0000000003uclient" Value: "\004\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000session" StorageChannel: INLINE } 2025-05-07T08:51:36.561587Z node 6 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:36.561668Z node 6 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037927937] doesn't have tx writes info 2025-05-07T08:51:36.587136Z node 6 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 3, State: StateInit] bootstrapping 3 [6:179:2194] 2025-05-07T08:51:36.589862Z node 6 :PERSQUEUE INFO: partition_init.cpp:784: [Root/PQ/rt3.dc1--account--topic:3:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-05-07T08:51:36.000000Z 2025-05-07T08:51:36.589950Z node 6 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 3, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 3 generation 0 [6:179:2194] >> TTransferTests::Create_Disabled >> IndexBuildTest::CancellationNotEnoughRetries [GOOD] >> IndexBuildTest::CancellationNoTable >> YdbIndexTable::MultiShardTableUniqAndNonUniqIndex |89.5%| [TA] $(B)/ydb/core/tx/replication/controller/ut_assign_tx_id/test-results/unittest/{meta.json ... results_accumulator.log} >> TTransferTests::CreateInParallel [GOOD] >> TTransferTests::CreateDropRecreate >> KikimrIcGateway::TestListPath [GOOD] >> KikimrIcGateway::TestDropTable >> IndexBuildTest::CancellationNoTable [GOOD] >> YdbIndexTable::MultiShardTableOneIndexIndexOverlapDataColumn >> YdbIndexTable::MultiShardTableOneUniqIndex >> YdbIndexTable::MultiShardTableOneIndex >> ReadSessionImplTest::UsesOnRetryStateDuringRetries [GOOD] >> RetryPolicy::TWriteSession_TestPolicy >> TTransferTests::Create_Disabled [GOOD] >> TTransferTests::CreateWithoutCredentials >> DataShardSnapshots::LockedWriteBulkUpsertConflict+UseSink >> DataShardSnapshots::VolatileSnapshotSplit >> KikimrIcGateway::TestLoadTableMetadata >> YdbIndexTable::OnlineBuild >> KqpPg::InsertValuesFromTableWithDefaultTextNotNullButNull-useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultNegativeCase+useSink >> TTransferTests::CreateDropRecreate [GOOD] >> TTransferTests::ConsistencyLevel ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index_build/unittest >> IndexBuildTest::CancellationNoTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:50:23.264616Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:50:23.264720Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:50:23.264773Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:50:23.264819Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:50:23.264879Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:50:23.264910Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:50:23.264985Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:50:23.265069Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:50:23.265862Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:50:23.266486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:50:23.363338Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:50:23.363409Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:23.384195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:50:23.384458Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:50:23.384646Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:50:23.398555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:50:23.398956Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:50:23.399753Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:23.399974Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:50:23.403632Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:23.405072Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:23.405144Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:23.405228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:50:23.405284Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:23.405427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:50:23.405683Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:50:23.414675Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:50:23.572148Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:50:23.572417Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:23.572672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:50:23.572942Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:50:23.573010Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:23.577104Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:23.577286Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:50:23.577504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:23.577564Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:50:23.577608Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:50:23.577648Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:50:23.581353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:23.581468Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:50:23.581529Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:50:23.585415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:23.585498Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:50:23.585556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:23.585640Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:50:23.589930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:50:23.592100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:50:23.592315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:50:23.593275Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:50:23.593418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:50:23.593494Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:23.593823Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:50:23.593885Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:50:23.594100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:50:23.594195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:50:23.596142Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:50:23.596233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:50:23.596429Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:50:23.596480Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... rationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:51:39.680126Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:51:39.680201Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:51:39.680249Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:51:39.685736Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:51:39.685831Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:51:39.685881Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:51:39.694826Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:51:39.694910Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:51:39.694967Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:51:39.695041Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:51:39.695231Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:51:39.699252Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:51:39.699517Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:51:39.701210Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:51:39.701380Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 131 RawX2: 8589936747 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:51:39.701460Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:51:39.701895Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:51:39.703299Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:51:39.704344Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:51:39.704502Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:51:39.711040Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:51:39.712350Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:51:39.712673Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:51:39.712743Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:206:2208], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-05-07T08:51:39.713320Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:51:39.713387Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 1:0 ProgressState 2025-05-07T08:51:39.713526Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-05-07T08:51:39.713576Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:51:39.713625Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-05-07T08:51:39.713663Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:51:39.713714Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-05-07T08:51:39.713768Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:51:39.713813Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1:0 2025-05-07T08:51:39.713857Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 1:0 2025-05-07T08:51:39.714006Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:51:39.714070Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-05-07T08:51:39.714125Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-05-07T08:51:39.715039Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-05-07T08:51:39.715179Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-05-07T08:51:39.715228Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-05-07T08:51:39.715278Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-05-07T08:51:39.715332Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:51:39.715463Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-05-07T08:51:39.722337Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-05-07T08:51:39.723280Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 2025-05-07T08:51:39.724052Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [2:269:2260] Bootstrap 2025-05-07T08:51:39.745850Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [2:269:2260] Become StateWork (SchemeCache [2:274:2265]) 2025-05-07T08:51:39.754479Z node 2 :BUILD_INDEX NOTICE: schemeshard_build_index__create.cpp:23: TIndexBuilder::TXTYPE_CREATE_INDEX_BUILD: DoExecute TxId: 101 DatabaseName: "/MyRoot" Settings { source_path: "/MyRoot/Table" index { name: "index1" index_columns: "index" global_index { settings { } } } max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 } } 2025-05-07T08:51:39.754817Z node 2 :BUILD_INDEX NOTICE: schemeshard_build_index_tx_base.h:91: TIndexBuilder::TXTYPE_CREATE_INDEX_BUILD: Reply TxId: 101 Status: BAD_REQUEST Issues { message: "Check failed: path: \'/MyRoot/Table\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" severity: 1 } SchemeStatus: 2 2025-05-07T08:51:39.755851Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [2:269:2260] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-05-07T08:51:39.763353Z node 2 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 BUILDINDEX RESPONSE CREATE: NKikimrIndexBuilder.TEvCreateResponse TxId: 101 Status: BAD_REQUEST Issues { message: "Check failed: path: \'/MyRoot/Table\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" severity: 1 } SchemeStatus: 2 TestWaitNotification wait txId: 101 2025-05-07T08:51:39.763756Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-05-07T08:51:39.763796Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-05-07T08:51:39.764404Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-05-07T08:51:39.764516Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-05-07T08:51:39.764551Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [2:281:2272] TestWaitNotification: OK eventTxId 101 2025-05-07T08:51:39.765094Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__list.cpp:23: TIndexBuilder::TXTYPE_LIST_INDEX_BUILD: DoExecute DatabaseName: "/MyRoot" PageSize: 100 PageToken: "" 2025-05-07T08:51:39.765186Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:93: TIndexBuilder::TXTYPE_LIST_INDEX_BUILD: Reply Status: SUCCESS NextPageToken: "0" BUILDINDEX RESPONSE LIST: NKikimrIndexBuilder.TEvListResponse Status: SUCCESS NextPageToken: "0" >> TTransferTests::CreateWithoutCredentials [GOOD] >> TTransferTests::CreateWrongConfig >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_PreferedPartition_InactiveConfig_Test [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_PreferedPartition_InactiveActor_Test >> KqpPg::DropTableIfExists_GenericQuery [GOOD] >> KqpPg::EquiJoin+useSink >> TTransferTests::CreateWrongConfig [GOOD] >> TTransferTests::CreateWrongBatchSize >> KikimrIcGateway::TestCreateResourcePool [GOOD] >> KikimrIcGateway::TestALterResourcePool >> TTransferTests::ConsistencyLevel [GOOD] >> TTransferTests::Alter >> KqpPg::InsertValuesFromTableWithDefaultBool-useSink [GOOD] >> KqpPg::InsertNoTargetColumns_SerialNotNull+useSink >> KikimrIcGateway::TestLoadExternalTable [GOOD] >> KikimrIcGateway::TestLoadServiceAccountSecretValueFromExternalDataSourceMetadata >> TTransferTests::CreateWrongBatchSize [GOOD] >> TTransferTests::CreateWrongFlushIntervalIsSmall >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-4 >> KqpPg::CreateNotNullPgColumn [GOOD] >> KqpPg::CreateSequence |89.5%| [TA] $(B)/ydb/core/tx/schemeshard/ut_index_build/test-results/unittest/{meta.json ... results_accumulator.log} >> KikimrIcGateway::TestDropTable [GOOD] >> KikimrIcGateway::TestDropResourcePool >> TTransferTests::Alter [GOOD] >> TTransferTests::CreateWrongFlushIntervalIsSmall [GOOD] >> TTransferTests::CreateWrongFlushIntervalIsBig >> KqpPg::CreateUniqComplexPgColumn-useSink [GOOD] >> KqpPg::CreateTempTable >> StatisticsSaveLoad::Simple [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_transfer/unittest >> TTransferTests::Alter [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:51:34.746928Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:51:34.747025Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:51:34.747214Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:51:34.747262Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:51:34.747317Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:51:34.747361Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:51:34.747439Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:51:34.747518Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:51:34.748359Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:51:34.748787Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:51:34.834908Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:51:34.834980Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:51:34.852239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:51:34.852501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:51:34.852681Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:51:34.859673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:51:34.860079Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:51:34.860879Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:51:34.861111Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:51:34.864887Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:51:34.866600Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:51:34.866707Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:51:34.866796Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:51:34.866852Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:51:34.866901Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:51:34.867153Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:51:34.875254Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:51:34.996634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:51:34.996893Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:51:34.997102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:51:34.997302Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:51:34.997346Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:51:34.999621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:51:34.999762Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:51:34.999982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:51:35.000066Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:51:35.000110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:51:35.000144Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:51:35.002131Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:51:35.002203Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:51:35.002234Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:51:35.004225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:51:35.004269Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:51:35.004305Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:51:35.004354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:51:35.019239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:51:35.023052Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:51:35.023316Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:51:35.024358Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:51:35.024528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:51:35.024578Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:51:35.024986Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:51:35.025056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:51:35.025250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:51:35.025335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:51:35.033894Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:51:35.033957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:51:35.034183Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:51:35.034213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... lterReplication TConfigureParts opId# 104:0 HandleReply NKikimrReplication.TEvAlterReplicationResult OperationId { TxId: 104 PartId: 0 } Origin: 72075186233409547 Status: SUCCESS 2025-05-07T08:51:45.440089Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 104:0 3 -> 128 2025-05-07T08:51:45.440207Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-05-07T08:51:45.440254Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:693: Ack tablet strongly msg opId: 104:0 from tablet: 72057594046678944 to tablet: 72075186233409547 cookie: 72057594046678944:3 2025-05-07T08:51:45.441850Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-05-07T08:51:45.441908Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-05-07T08:51:45.441955Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 104:0 2025-05-07T08:51:45.442109Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435072, Sender [6:124:2150], Recipient [6:124:2150]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-05-07T08:51:45.442141Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4857: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-05-07T08:51:45.442187Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-05-07T08:51:45.442231Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_replication.cpp:189: [72057594046678944] TAlterReplication TPropose opId# 104:0 ProgressState 2025-05-07T08:51:45.442276Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-05-07T08:51:45.442329Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 104 ready parts: 1/1 2025-05-07T08:51:45.442524Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 104 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:51:45.444000Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-05-07T08:51:45.444048Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 104:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:104 msg type: 269090816 2025-05-07T08:51:45.444157Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 104, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 104 at step: 5000005 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 104 at step: 5000005 2025-05-07T08:51:45.444526Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269287424, Sender [6:134:2157], Recipient [6:258:2249] 2025-05-07T08:51:45.444599Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4860: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-05-07T08:51:45.444728Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000005, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:51:45.444865Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 104 Coordinator: 72057594046316545 AckTo { RawX1: 134 RawX2: 25769805933 } } Step: 5000005 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:51:45.444943Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_replication.cpp:203: [72057594046678944] TAlterReplication TPropose opId# 104:0 HandleReply TEvOperationPlan: step# 5000005 2025-05-07T08:51:45.445106Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 104:0 128 -> 240 2025-05-07T08:51:45.445351Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-05-07T08:51:45.445439Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-05-07T08:51:45.445538Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:693: Ack tablet strongly msg opId: 104:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:104 2025-05-07T08:51:45.447245Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-05-07T08:51:45.447314Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:384: Ack coordinator stepId#5000005 first txId#104 countTxs#1 2025-05-07T08:51:45.447388Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:354: Ack mediator stepId#5000005 2025-05-07T08:51:45.447441Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 104:0 2025-05-07T08:51:45.447671Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435072, Sender [6:124:2150], Recipient [6:124:2150]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-05-07T08:51:45.447717Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4857: StateWork, processing event TEvPrivate::TEvProgressOperation FAKE_COORDINATOR: Erasing txId 104 2025-05-07T08:51:45.447856Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:51:45.447918Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-05-07T08:51:45.448188Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:51:45.448252Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [6:207:2209], at schemeshard: 72057594046678944, txId: 104, path id: 3 2025-05-07T08:51:45.448787Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-05-07T08:51:45.448853Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 104:0 ProgressState 2025-05-07T08:51:45.449000Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-05-07T08:51:45.449058Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 1/1 2025-05-07T08:51:45.449108Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-05-07T08:51:45.449164Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 1/1 2025-05-07T08:51:45.449214Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-05-07T08:51:45.449275Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: false 2025-05-07T08:51:45.449350Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-05-07T08:51:45.449411Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 104:0 2025-05-07T08:51:45.449462Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 104:0 2025-05-07T08:51:45.449633Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-05-07T08:51:45.449697Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 104, publications: 1, subscribers: 0 2025-05-07T08:51:45.449744Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 3], 4 2025-05-07T08:51:45.454891Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 274137603, Sender [6:207:2209], Recipient [6:124:2150]: NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 3] Version: 4 } 2025-05-07T08:51:45.454981Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4924: StateWork, processing event NSchemeBoard::NSchemeshardEvents::TEvUpdateAck 2025-05-07T08:51:45.455129Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T08:51:45.455282Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T08:51:45.455347Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-05-07T08:51:45.455403Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 4 2025-05-07T08:51:45.455470Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-05-07T08:51:45.455608Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 0 2025-05-07T08:51:45.455666Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-05-07T08:51:45.466388Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-05-07T08:51:45.467348Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-05-07T08:51:45.467434Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 TestModificationResult got TxId: 104, wait until txId: 104 >> KikimrIcGateway::TestLoadTableMetadata [GOOD] >> KikimrIcGateway::TestLoadTokenSecretValueFromExternalDataSourceMetadata >> TTransferTests::CreateWrongFlushIntervalIsBig [GOOD] >> StatisticsSaveLoad::Delete [GOOD] >> DataShardSnapshots::LockedWriteBulkUpsertConflict+UseSink [GOOD] >> DataShardSnapshots::LockedWriteBulkUpsertConflict-UseSink >> DataShardSnapshots::VolatileSnapshotSplit [GOOD] >> DataShardSnapshots::VolatileSnapshotMerge ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/database/ut/unittest >> StatisticsSaveLoad::Simple [GOOD] Test command err: 2025-05-07T08:51:36.013852Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:451:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:51:36.014182Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:51:36.014272Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0038de/r3tmp/tmpISTeaN/pdisk_1.dat 2025-05-07T08:51:36.419610Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11704, node 1 2025-05-07T08:51:36.691872Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:51:36.691938Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:51:36.691973Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:51:36.692401Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:51:36.695424Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:51:36.796868Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:36.796986Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:36.811678Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:4247 2025-05-07T08:51:37.398411Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:51:40.919601Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-05-07T08:51:40.964542Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:40.964673Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:41.020299Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-05-07T08:51:41.023146Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:51:41.326084Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T08:51:41.326743Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T08:51:41.327327Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T08:51:41.327458Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T08:51:41.327562Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T08:51:41.327858Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T08:51:41.327960Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T08:51:41.328040Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T08:51:41.328125Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T08:51:41.543596Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:41.543719Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:41.562990Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:51:41.769109Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:51:41.844663Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-05-07T08:51:41.844789Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-05-07T08:51:41.921237Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-05-07T08:51:41.929423Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-05-07T08:51:41.929688Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-05-07T08:51:41.929780Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-05-07T08:51:41.929842Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-05-07T08:51:41.929905Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-05-07T08:51:41.929991Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-05-07T08:51:41.930061Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-05-07T08:51:41.930861Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-05-07T08:51:41.975772Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7823: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-05-07T08:51:41.975905Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7853: ConnectToSA(), pipe client id: [2:1868:2600], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-05-07T08:51:41.984090Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1878:2607] 2025-05-07T08:51:42.005594Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1921:2627] 2025-05-07T08:51:42.006139Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1921:2627], schemeshard id = 72075186224037897 2025-05-07T08:51:42.007000Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-05-07T08:51:42.108094Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-05-07T08:51:42.108169Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-05-07T08:51:42.108253Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-05-07T08:51:42.126355Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897 2025-05-07T08:51:42.135980Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-05-07T08:51:42.136144Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-05-07T08:51:42.346970Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-05-07T08:51:42.595663Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-05-07T08:51:42.695885Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-05-07T08:51:43.541951Z node 1 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-05-07T08:51:43.542502Z node 1 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-05-07T08:51:43.591119Z node 1 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-05-07T08:51:43.600422Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2230:3070], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:43.600541Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2246:3075], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:43.600629Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Database, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:43.622439Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72075186224037897 2025-05-07T08:51:43.733609Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:2250:3078], DatabaseId: /Root/Database, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-05-07T08:51:44.103652Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:2342:3110] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:51:44.424458Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:2364:3122]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-05-07T08:51:44.424688Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-05-07T08:51:44.424773Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:2366:3124] 2025-05-07T08:51:44.424854Z node 1 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [1:2366:3124] 2025-05-07T08:51:44.425447Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2367:2830] 2025-05-07T08:51:44.425747Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:2366:3124], server id = [2:2367:2830], tablet id = 72075186224037894, status = OK 2025-05-07T08:51:44.428155Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:2367:2830], node id = 1, have schemeshards count = 0, need schemeshards count = 1 2025-05-07T08:51:44.428270Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 1, schemeshard count = 1 2025-05-07T08:51:44.428677Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-05-07T08:51:44.428775Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [1:2364:3122], StatRequests.size() = 1 2025-05-07T08:51:44.591217Z node 1 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=1&id=NzllZWQ4OWUtNzliZDIzY2YtMjdjYzFkNGEtZmVkZWFkNWY=, TxId: 2025-05-07T08:51:44.591298Z node 1 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=1&id=NzllZWQ4OWUtNzliZDIzY2YtMjdjYzFkNGEtZmVkZWFkNWY=, TxId: 2025-05-07T08:51:44.592199Z node 1 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-05-07T08:51:44.600740Z node 1 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tag AS Uint32; SELECT data FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id AND stat_type = $stat_type AND column_tag = $column_tag; 2025-05-07T08:51:44.727048Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [1:2400:3145]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-05-07T08:51:44.727288Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-05-07T08:51:44.727337Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [1:2400:3145], StatRequests.size() = 1 2025-05-07T08:51:44.981880Z node 1 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=1&id=ZGYyOGJlN2MtMjA5MmMzMTgtMzdiZjcwZDAtNzNhZjU5OGI=, TxId: 01jtmz365t9cktwrp254gkfc2x 2025-05-07T08:51:44.982096Z node 1 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=1&id=ZGYyOGJlN2MtMjA5MmMzMTgtMzdiZjcwZDAtNzNhZjU5OGI=, TxId: 01jtmz365t9cktwrp254gkfc2x 2025-05-07T08:51:44.984521Z node 1 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-05-07T08:51:44.987777Z node 1 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tag AS Uint32; SELECT data FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id AND stat_type = $stat_type AND column_tag = $column_tag; 2025-05-07T08:51:45.023502Z node 1 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=1&id=ZWI0MDhlNzctZjFiMjIzYzktZTFhZTYwODItN2ZmMTA2ZDE=, TxId: 01jtmz366w4r9ttfeg12xdyzrm 2025-05-07T08:51:45.023655Z node 1 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=1&id=ZWI0MDhlNzctZjFiMjIzYzktZTFhZTYwODItN2ZmMTA2ZDE=, TxId: 01jtmz366w4r9ttfeg12xdyzrm ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_transfer/unittest >> TTransferTests::CreateWrongFlushIntervalIsBig [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:51:39.577552Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:51:39.577669Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:51:39.577719Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:51:39.577776Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:51:39.577831Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:51:39.577868Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:51:39.577927Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:51:39.578032Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:51:39.578846Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:51:39.579212Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:51:39.671353Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:51:39.671414Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:51:39.692479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:51:39.692815Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:51:39.692979Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:51:39.699784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:51:39.700075Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:51:39.700707Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:51:39.700884Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:51:39.703742Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:51:39.705074Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:51:39.705145Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:51:39.705233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:51:39.705281Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:51:39.705322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:51:39.705540Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:51:39.712562Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:51:39.879598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:51:39.879855Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:51:39.880115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:51:39.880352Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:51:39.880416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:51:39.883561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:51:39.883701Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:51:39.883915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:51:39.883982Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:51:39.884023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:51:39.884065Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:51:39.887795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:51:39.887871Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:51:39.887910Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:51:39.890276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:51:39.890334Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:51:39.890380Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:51:39.890452Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:51:39.894137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:51:39.897684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:51:39.897895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:51:39.898911Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:51:39.899094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:51:39.899143Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:51:39.899474Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:51:39.899534Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:51:39.899700Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:51:39.899792Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:51:39.902187Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:51:39.902262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:51:39.902451Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:51:39.902506Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... :308:2295], Recipient [6:123:2149]: NKikimrTxColumnShard.TEvNotifyTxCompletionResult Origin: 72075186233409546 TxId: 101 2025-05-07T08:51:47.217185Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4883: StateWork, processing event TEvColumnShard::TEvNotifyTxCompletionResult 2025-05-07T08:51:47.217287Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6106: Handle TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 101 2025-05-07T08:51:47.217350Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409546, partId: 0 2025-05-07T08:51:47.217529Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 101 2025-05-07T08:51:47.217742Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 FAKE_COORDINATOR: Erasing txId 101 2025-05-07T08:51:47.226670Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:51:47.226782Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-05-07T08:51:47.226842Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 101:0 2025-05-07T08:51:47.227058Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435072, Sender [6:123:2149], Recipient [6:123:2149]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-05-07T08:51:47.227108Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4857: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-05-07T08:51:47.227182Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:51:47.227238Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 101:0 ProgressState 2025-05-07T08:51:47.227406Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-05-07T08:51:47.227451Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T08:51:47.227508Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T08:51:47.227570Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T08:51:47.227621Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T08:51:47.227676Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: true 2025-05-07T08:51:47.227784Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [6:341:2320] message: TxId: 101 2025-05-07T08:51:47.227859Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T08:51:47.227927Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:0 2025-05-07T08:51:47.227987Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 101:0 2025-05-07T08:51:47.228239Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T08:51:47.230973Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-05-07T08:51:47.231141Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [6:341:2320] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 101 at schemeshard: 72057594046678944 2025-05-07T08:51:47.231419Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-05-07T08:51:47.231483Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [6:342:2321] 2025-05-07T08:51:47.231795Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877764, Sender [6:344:2323], Recipient [6:123:2149]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:51:47.231859Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4938: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:51:47.231914Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5761: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 101 TestModificationResults wait txId: 102 2025-05-07T08:51:47.232810Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122432, Sender [6:388:2360], Recipient [6:123:2149]: {TEvModifySchemeTransaction txid# 102 TabletId# 72057594046678944} 2025-05-07T08:51:47.232889Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4851: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-05-07T08:51:47.252997Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateTransfer Replication { Name: "Transfer" Config { TransferSpecific { Target { SrcPath: "/MyRoot1/Table" DstPath: "/MyRoot/Table" } Batching { FlushIntervalMilliSeconds: 86400001 } } } } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:51:47.253412Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_replication.cpp:348: [72057594046678944] TCreateReplication Propose: opId# 102:0, path# /MyRoot/Transfer 2025-05-07T08:51:47.253555Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 102:1, propose status:StatusInvalidParameter, reason: Flush interval must be less than or equal to 24 hours, at schemeshard: 72057594046678944 2025-05-07T08:51:47.253853Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-05-07T08:51:47.259277Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 102, response: Status: StatusInvalidParameter Reason: "Flush interval must be less than or equal to 24 hours" TxId: 102 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:51:47.259534Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Flush interval must be less than or equal to 24 hours, operation: CREATE TRANSFER, path: /MyRoot/Transfer 2025-05-07T08:51:47.259613Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-05-07T08:51:47.260007Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-05-07T08:51:47.260079Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-05-07T08:51:47.260602Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [6:394:2366], Recipient [6:123:2149]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:51:47.260680Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:51:47.260732Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046678944 2025-05-07T08:51:47.260927Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124996, Sender [6:341:2320], Recipient [6:123:2149]: NKikimrScheme.TEvNotifyTxCompletion TxId: 102 2025-05-07T08:51:47.260977Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4853: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-05-07T08:51:47.261094Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-05-07T08:51:47.261237Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T08:51:47.261295Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [6:392:2364] 2025-05-07T08:51:47.261539Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877764, Sender [6:394:2366], Recipient [6:123:2149]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:51:47.261588Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4938: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:51:47.261639Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5761: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 102 2025-05-07T08:51:47.262460Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122945, Sender [6:395:2367], Recipient [6:123:2149]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/Transfer" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false } 2025-05-07T08:51:47.262540Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4852: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-05-07T08:51:47.262713Z node 6 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Transfer" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:51:47.263001Z node 6 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Transfer" took 288us result status StatusPathDoesNotExist 2025-05-07T08:51:47.263247Z node 6 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Transfer\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/Transfer" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> KikimrIcGateway::TestALterResourcePool [GOOD] |89.6%| [TA] $(B)/ydb/core/tx/schemeshard/ut_transfer/test-results/unittest/{meta.json ... results_accumulator.log} >> StatisticsSaveLoad::ForbidAccess [GOOD] >> KqpVectorIndexes::OrderByCosineSimilarityNullableLevel2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/database/ut/unittest >> StatisticsSaveLoad::Delete [GOOD] Test command err: 2025-05-07T08:51:36.147168Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:451:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:51:36.147425Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:51:36.147502Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00389a/r3tmp/tmprPitXJ/pdisk_1.dat 2025-05-07T08:51:36.560155Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 15406, node 1 2025-05-07T08:51:36.825886Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:51:36.825932Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:51:36.825954Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:51:36.826276Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:51:36.834192Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:51:36.920219Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:36.920341Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:36.936317Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:64559 2025-05-07T08:51:37.525913Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:51:41.473398Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-05-07T08:51:41.516894Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:41.517040Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:41.568388Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-05-07T08:51:41.573045Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:51:41.863647Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T08:51:41.864426Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T08:51:41.865291Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T08:51:41.865522Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T08:51:41.865733Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T08:51:41.866344Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T08:51:41.866487Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T08:51:41.866584Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T08:51:41.866698Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T08:51:42.084205Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:42.084337Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:42.099306Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:51:42.322502Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:51:42.387645Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-05-07T08:51:42.387777Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-05-07T08:51:42.467189Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-05-07T08:51:42.468883Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-05-07T08:51:42.469146Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-05-07T08:51:42.469222Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-05-07T08:51:42.469285Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-05-07T08:51:42.469348Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-05-07T08:51:42.469438Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-05-07T08:51:42.469508Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-05-07T08:51:42.476353Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-05-07T08:51:42.623117Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7823: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-05-07T08:51:42.623261Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7853: ConnectToSA(), pipe client id: [2:1868:2600], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-05-07T08:51:42.643285Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1878:2607] 2025-05-07T08:51:42.662904Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1921:2627] 2025-05-07T08:51:42.663424Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1921:2627], schemeshard id = 72075186224037897 2025-05-07T08:51:42.664213Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-05-07T08:51:42.714623Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-05-07T08:51:42.716854Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-05-07T08:51:42.717006Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-05-07T08:51:42.750275Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897 2025-05-07T08:51:42.760552Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-05-07T08:51:42.760759Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-05-07T08:51:43.067481Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-05-07T08:51:43.302136Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-05-07T08:51:43.370806Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-05-07T08:51:44.230935Z node 1 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-05-07T08:51:44.231528Z node 1 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-05-07T08:51:44.255550Z node 1 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-05-07T08:51:44.260872Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2230:3070], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:44.261010Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2246:3075], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:44.261095Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Database, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:44.272268Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72075186224037897 2025-05-07T08:51:44.349986Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:2250:3078], DatabaseId: /Root/Database, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-05-07T08:51:44.657018Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:2337:3107] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:51:45.152382Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:2359:3119]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-05-07T08:51:45.152671Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-05-07T08:51:45.152783Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:2361:3121] 2025-05-07T08:51:45.152866Z node 1 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [1:2361:3121] 2025-05-07T08:51:45.153443Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2362:2828] 2025-05-07T08:51:45.153716Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:2361:3121], server id = [2:2362:2828], tablet id = 72075186224037894, status = OK 2025-05-07T08:51:45.153922Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:2362:2828], node id = 1, have schemeshards count = 0, need schemeshards count = 1 2025-05-07T08:51:45.154031Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 1, schemeshard count = 1 2025-05-07T08:51:45.154324Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-05-07T08:51:45.154406Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [1:2359:3119], StatRequests.size() = 1 2025-05-07T08:51:45.351214Z node 1 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=1&id=NjE0Nzk3OTktNmY3N2EwZWUtODYxNmY2MjItY2MwOGFmZWU=, TxId: 2025-05-07T08:51:45.351316Z node 1 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=1&id=NjE0Nzk3OTktNmY3N2EwZWUtODYxNmY2MjItY2MwOGFmZWU=, TxId: 2025-05-07T08:51:45.352572Z node 1 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-05-07T08:51:45.356250Z node 1 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-05-07T08:51:45.413528Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [1:2395:3142]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-05-07T08:51:45.413742Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-05-07T08:51:45.413798Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [1:2395:3142], StatRequests.size() = 1 2025-05-07T08:51:45.620973Z node 1 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=1&id=N2U5OTA3ZGYtZjU1YjU2YjctZThiNWExYi0xNTYwNWJlYg==, TxId: 2025-05-07T08:51:45.621073Z node 1 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=1&id=N2U5OTA3ZGYtZjU1YjU2YjctZThiNWExYi0xNTYwNWJlYg==, TxId: 2025-05-07T08:51:45.623475Z node 1 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-05-07T08:51:45.627434Z node 1 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tag AS Uint32; SELECT data FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id AND stat_type = $stat_type AND column_tag = $column_tag; 2025-05-07T08:51:45.708537Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [1:2427:3157]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-05-07T08:51:45.708779Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-05-07T08:51:45.708841Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 3, ReplyToActorId = [1:2427:3157], StatRequests.size() = 1 2025-05-07T08:51:46.023478Z node 1 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=1&id=YTIxMWMyMjEtZDAwYjI4OTQtZDI4NjhkMTYtZjM4MmI4YzE=, TxId: 01jtmz375z0tk7ezhaqvjgf2hz 2025-05-07T08:51:46.023726Z node 1 :STATISTICS WARN: query_actor.cpp:372: [TQueryBase] Finish with BAD_REQUEST, Issues: {
: Error: No data }, SessionId: ydb://session/3?node_id=1&id=YTIxMWMyMjEtZDAwYjI4OTQtZDI4NjhkMTYtZjM4MmI4YzE=, TxId: 01jtmz375z0tk7ezhaqvjgf2hz >> KqpPg::InsertValuesFromTableWithDefaultNegativeCase+useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultNegativeCase-useSink >> BasicUsage::WriteAndReadSomeMessagesWithAsyncCompression [GOOD] >> BasicUsage::WriteAndReadSomeMessagesWithSyncCompression ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/provider/ut/unittest >> KikimrIcGateway::TestALterResourcePool [GOOD] Test command err: Trying to start YDB, gRPC: 11343, MsgBus: 21854 2025-05-07T08:51:34.908695Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623784198478516:2062];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:34.908774Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004683/r3tmp/tmpKRNXFL/pdisk_1.dat 2025-05-07T08:51:35.254521Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11343, node 1 2025-05-07T08:51:35.312316Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:35.312435Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:35.317365Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:51:35.355831Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:51:35.355861Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:51:35.355874Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:51:35.356029Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21854 TClient is connected to server localhost:21854 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:51:35.886696Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:35.905106Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T08:51:35.923950Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715658:2, at schemeshard: 72057594046644480 2025-05-07T08:51:35.942450Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 12997, MsgBus: 14435 2025-05-07T08:51:38.719425Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501623804524408794:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:38.719975Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004683/r3tmp/tmpFMydys/pdisk_1.dat 2025-05-07T08:51:39.009365Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 12997, node 2 2025-05-07T08:51:39.082658Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:39.082740Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:39.094915Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:51:39.166184Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:51:39.166210Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:51:39.166217Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:51:39.166348Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14435 TClient is connected to server localhost:14435 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:51:39.731604Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:39.755263Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 1213, MsgBus: 32502 2025-05-07T08:51:43.562271Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7501623825418998504:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:43.565411Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004683/r3tmp/tmpLh3TOg/pdisk_1.dat 2025-05-07T08:51:43.815140Z node 3 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:51:43.824366Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:43.834125Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:43.836339Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1213, node 3 2025-05-07T08:51:43.986643Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:51:43.986670Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:51:43.986677Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:51:43.986801Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:32502 TClient is connected to server localhost:32502 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:51:44.768135Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:44.777897Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:51:44.795557Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480 2025-05-07T08:51:44.829785Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterResourcePool, opId: 281474976710659:0, at schemeshard: 72057594046644480 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/database/ut/unittest >> StatisticsSaveLoad::ForbidAccess [GOOD] Test command err: 2025-05-07T08:51:36.134292Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:451:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:51:36.134548Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:51:36.134647Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003909/r3tmp/tmpx2WrxN/pdisk_1.dat 2025-05-07T08:51:36.551353Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 6577, node 1 2025-05-07T08:51:36.803036Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:51:36.803122Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:51:36.803159Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:51:36.803575Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:51:36.810587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:51:36.901152Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:36.901307Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:36.915899Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:14527 2025-05-07T08:51:37.521819Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:51:41.243141Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-05-07T08:51:41.311007Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:41.311146Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:41.364763Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-05-07T08:51:41.367169Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:51:41.652136Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T08:51:41.652856Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T08:51:41.653444Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T08:51:41.653582Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T08:51:41.653816Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T08:51:41.653909Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T08:51:41.653985Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T08:51:41.654055Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T08:51:41.654109Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T08:51:41.859182Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:41.859328Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:41.875975Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:51:42.073483Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:51:42.206801Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-05-07T08:51:42.206956Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-05-07T08:51:42.285027Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-05-07T08:51:42.287032Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-05-07T08:51:42.287294Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-05-07T08:51:42.287404Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-05-07T08:51:42.287462Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-05-07T08:51:42.287531Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-05-07T08:51:42.287590Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-05-07T08:51:42.287677Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-05-07T08:51:42.288365Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-05-07T08:51:42.377376Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7823: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-05-07T08:51:42.377523Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7853: ConnectToSA(), pipe client id: [2:1868:2600], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-05-07T08:51:42.393830Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1878:2607] 2025-05-07T08:51:42.410076Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1921:2627] 2025-05-07T08:51:42.410552Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1921:2627], schemeshard id = 72075186224037897 2025-05-07T08:51:42.411359Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-05-07T08:51:42.450323Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-05-07T08:51:42.450405Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-05-07T08:51:42.450492Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-05-07T08:51:42.474648Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897 2025-05-07T08:51:42.489267Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-05-07T08:51:42.489471Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-05-07T08:51:42.725879Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-05-07T08:51:43.000729Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-05-07T08:51:43.097440Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-05-07T08:51:44.351606Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2210:3056], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:44.351783Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:44.378875Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72075186224037897 2025-05-07T08:51:45.242901Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2514:3106], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:45.243093Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:45.244513Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:2519:3110]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-05-07T08:51:45.244760Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-05-07T08:51:45.244843Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:2521:3112] 2025-05-07T08:51:45.244940Z node 1 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [1:2521:3112] 2025-05-07T08:51:45.245651Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2522:2980] 2025-05-07T08:51:45.246082Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:2521:3112], server id = [2:2522:2980], tablet id = 72075186224037894, status = OK 2025-05-07T08:51:45.246225Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:2522:2980], node id = 1, have schemeshards count = 0, need schemeshards count = 1 2025-05-07T08:51:45.246320Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 1, schemeshard count = 1 2025-05-07T08:51:45.246620Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-05-07T08:51:45.246711Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [1:2519:3110], StatRequests.size() = 1 2025-05-07T08:51:45.267934Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2526:3116], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:45.268075Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:45.268457Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2531:3121], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:45.275659Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480 2025-05-07T08:51:45.454560Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-05-07T08:51:45.454680Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-05-07T08:51:45.577481Z node 1 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [1:2521:3112], schemeshard count = 1 2025-05-07T08:51:46.055741Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:2533:3123], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-05-07T08:51:46.231479Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:2645:3193] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:51:46.261383Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [1:2668:3209]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-05-07T08:51:46.261631Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-05-07T08:51:46.261677Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [1:2668:3209], StatRequests.size() = 1 2025-05-07T08:51:46.556583Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jtmz36cv6g3bcvyj943fw860, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTY5ZTI2NWMtODM3YjQzYjYtNjNhMGIzNTUtNWVkMzRlNmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:51:46.851994Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:303: Access denied: self# [1:2747:3239], for# user@builtin, access# DescribeSchema 2025-05-07T08:51:46.852082Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:303: Access denied: self# [1:2747:3239], for# user@builtin, access# DescribeSchema 2025-05-07T08:51:46.867650Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:2737:3235], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:17: Error: At function: KiReadTable!
:2:17: Error: Cannot find table 'db.[/Root/Database/.metadata/_statistics]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T08:51:46.869847Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=1&id=ZWZiZWMwNWUtMjUxYThkMzgtYjQyNmNmMjEtNTZiNGU3NGM=, ActorId: [1:2728:3227], ActorState: ExecuteState, TraceId: 01jtmz37zpc3e4rcbsncf14dws, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: >> KqpUniqueIndex::UpdateOnFkAlreadyExist >> KqpMultishardIndex::WriteIntoRenamingSyncIndex |89.6%| [TA] $(B)/ydb/core/statistics/database/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KikimrIcGateway::TestDropResourcePool [GOOD] >> KqpPrefixedVectorIndexes::OrderByCosineSimilarityNullableLevel1 >> KqpPg::CreateSequence [GOOD] >> KqpPg::AlterSequence |89.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/security/certificate_check/ut/ydb-core-security-certificate_check-ut |89.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/security/certificate_check/ut/ydb-core-security-certificate_check-ut |89.6%| [TA] {RESULT} $(B)/ydb/core/tx/replication/controller/ut_assign_tx_id/test-results/unittest/{meta.json ... results_accumulator.log} >> BasicUsage::MaxByteSizeEqualZero [GOOD] >> BasicUsage::TSimpleWriteSession_AutoSeqNo_BasicUsage >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-4 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-5 >> KqpIndexes::SelectConcurentTX ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/provider/ut/unittest >> KikimrIcGateway::TestDropResourcePool [GOOD] Test command err: Trying to start YDB, gRPC: 14769, MsgBus: 6423 2025-05-07T08:51:34.844226Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623783840142540:2191];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:34.846428Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004687/r3tmp/tmpdLWS1J/pdisk_1.dat 2025-05-07T08:51:35.240663Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:51:35.270550Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:35.270708Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 14769, node 1 2025-05-07T08:51:35.273329Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:51:35.341390Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:51:35.341416Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:51:35.341423Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:51:35.341592Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6423 TClient is connected to server localhost:6423 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:51:35.881543Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:35.898885Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:51:37.995847Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623796725045002:2335], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:37.995971Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:38.296811Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 2025-05-07T08:51:38.460684Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 2025-05-07T08:51:38.504742Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:51:38.537686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:51:38.589305Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623801020012611:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:38.589397Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:38.589815Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623801020012616:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:38.594201Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710664:3, at schemeshard: 72057594046644480 2025-05-07T08:51:38.606303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710664, at schemeshard: 72057594046644480 2025-05-07T08:51:38.606745Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623801020012618:2369], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710664 completed, doublechecking } 2025-05-07T08:51:38.702643Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623801020012669:2559] txid# 281474976710665, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 11], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 10788, MsgBus: 9969 2025-05-07T08:51:39.923782Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501623807813881918:2129];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:39.923841Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004687/r3tmp/tmpDToQLz/pdisk_1.dat 2025-05-07T08:51:40.123253Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:51:40.139680Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:40.139766Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:40.141332Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10788, node 2 2025-05-07T08:51:40.208335Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:51:40.208366Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:51:40.208373Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:51:40.208506Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9969 TClient is connected to server localhost:9969 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:51:40.916282Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:41.048470Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-05-07T08:51:41.082639Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480 2025-05-07T08:51:44.032162Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501623829288719029:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:44.032287Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:44.077896Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 2025-05-07T08:51:44.138230Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 2025-05-07T08:51:44.183935Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:51:44.224781Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:51:44.314276Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501623829288719340:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:44.314516Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:44.314976Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501623829288719345:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:44.319718Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710664:3, at schemeshard: 72057594046644480 2025-05-07T08:51:44.332430Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7501623829288719347:2370], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710664 completed, doublechecking } 2025-05-07T08:51:44.385224Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501623829288719398:2559] txid# 281474976710665, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 11], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:51:44.626429Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037888 not found
: Info: Success, code: 4 2025-05-07T08:51:44.927090Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7501623807813881918:2129];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:44.930330Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 3000, MsgBus: 12359 2025-05-07T08:51:45.759126Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7501623834322776937:2058];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:45.759186Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004687/r3tmp/tmpuZnPOK/pdisk_1.dat 2025-05-07T08:51:46.127315Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:46.127407Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:46.128991Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:51:46.148607Z node 3 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3000, node 3 2025-05-07T08:51:46.357337Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:51:46.357381Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:51:46.357398Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:51:46.357530Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12359 TClient is connected to server localhost:12359 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:51:47.399626Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:47.427456Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480 >> KqpIndexes::UniqAndNoUniqSecondaryIndex >> PersQueueSdkReadSessionTest::ReadSessionWithExplicitlySpecifiedPartitions [GOOD] >> PersQueueSdkReadSessionTest::SettingsValidation >> KqpPg::InsertNoTargetColumns_SerialNotNull+useSink [GOOD] >> KqpPg::InsertNoTargetColumns_SerialNotNull-useSink |89.6%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_index_build/test-results/unittest/{meta.json ... results_accumulator.log} |89.6%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_transfer/test-results/unittest/{meta.json ... results_accumulator.log} |89.6%| [TA] {RESULT} $(B)/ydb/core/statistics/database/ut/test-results/unittest/{meta.json ... results_accumulator.log} |89.6%| [LD] {RESULT} $(B)/ydb/core/security/certificate_check/ut/ydb-core-security-certificate_check-ut >> Compression::WriteRAW [GOOD] >> Compression::WriteGZIP >> KqpPg::EquiJoin+useSink [GOOD] >> KqpPg::EquiJoin-useSink |89.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_external_data_source/ydb-core-tx-schemeshard-ut_external_data_source |89.6%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_external_data_source/ydb-core-tx-schemeshard-ut_external_data_source |89.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_external_data_source/ydb-core-tx-schemeshard-ut_external_data_source >> KqpPg::CreateTempTable [FAIL] >> KqpPg::CreateTempTableSerial >> KqpPg::PgCreateTable [GOOD] >> KqpPg::PgUpdate+useSink >> DataShardSnapshots::VolatileSnapshotMerge [GOOD] >> DataShardSnapshots::VolatileSnapshotAndLocalMKQLUpdate >> DataShardSnapshots::LockedWriteBulkUpsertConflict-UseSink [GOOD] >> DataShardSnapshots::LockedWriteDistributedCommitAborted+UseSink >> VDiskTest::HugeBlobWrite [GOOD] >> KqpIndexes::SecondaryIndexOrderBy >> TPQTest::TestPartitionTotalQuota [GOOD] >> TPQTest::TestPartitionPerConsumerQuota |89.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/security/ldap_auth_provider/ut/ydb-core-security-ldap_auth_provider-ut |89.6%| [LD] {RESULT} $(B)/ydb/core/security/ldap_auth_provider/ut/ydb-core-security-ldap_auth_provider-ut |89.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/security/ldap_auth_provider/ut/ydb-core-security-ldap_auth_provider-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk2/unittest >> VDiskTest::HugeBlobWrite [GOOD] Test command err: Put id# [29:1:1:0:0:1048576:1] totalSize# 0 blobValueIndex# 45 Trim Put id# [25:1:1:0:0:1572864:1] totalSize# 1048576 blobValueIndex# 56 Put id# [8:1:1:0:0:40960:1] totalSize# 2621440 blobValueIndex# 20 Put id# [70:1:1:0:0:589824:1] totalSize# 2662400 blobValueIndex# 30 Change MinHugeBlobSize# 8192 Put id# [84:1:1:0:0:10:1] totalSize# 3252224 blobValueIndex# 7 Put id# [68:1:1:0:0:1048576:1] totalSize# 3252234 blobValueIndex# 47 Put id# [40:1:1:0:0:589824:1] totalSize# 4300810 blobValueIndex# 37 Put id# [31:1:1:0:0:10:1] totalSize# 4890634 blobValueIndex# 3 Put id# [38:1:1:0:0:10:1] totalSize# 4890644 blobValueIndex# 8 Put id# [5:1:1:0:0:1572864:1] totalSize# 4890654 blobValueIndex# 54 Put id# [30:1:1:0:0:1048576:1] totalSize# 6463518 blobValueIndex# 40 Put id# [29:1:2:0:0:1048576:1] totalSize# 7512094 blobValueIndex# 44 Put id# [100:1:1:0:0:40960:1] totalSize# 8560670 blobValueIndex# 26 Change MinHugeBlobSize# 524288 Restart Put id# [14:1:1:0:0:40960:1] totalSize# 8601630 blobValueIndex# 29 Change MinHugeBlobSize# 8192 Trim Put id# [23:1:1:0:0:1572864:1] totalSize# 8642590 blobValueIndex# 52 Put id# [36:1:1:0:0:1572864:1] totalSize# 10215454 blobValueIndex# 59 Trim Put id# [14:1:2:0:0:589824:1] totalSize# 11788318 blobValueIndex# 37 Change MinHugeBlobSize# 61440 Put id# [18:1:1:0:0:40960:1] totalSize# 12378142 blobValueIndex# 25 Trim Put id# [61:1:1:0:0:10:1] totalSize# 12419102 blobValueIndex# 0 Trim Put id# [89:1:1:0:0:1572864:1] totalSize# 12419112 blobValueIndex# 51 Put id# [5:1:2:0:0:40960:1] totalSize# 13991976 blobValueIndex# 20 Change MinHugeBlobSize# 65536 Put id# [81:1:1:0:0:1048576:1] totalSize# 14032936 blobValueIndex# 41 Change MinHugeBlobSize# 61440 Put id# [68:1:2:0:0:10:1] totalSize# 15081512 blobValueIndex# 2 Put id# [79:1:1:0:0:40960:1] totalSize# 15081522 blobValueIndex# 29 Trim Put id# [18:1:2:0:0:40960:1] totalSize# 15122482 blobValueIndex# 27 Trim Put id# [9:1:1:0:0:1572864:1] totalSize# 15163442 blobValueIndex# 51 Put id# [90:1:1:0:0:40960:1] totalSize# 16736306 blobValueIndex# 23 Put id# [18:1:3:0:0:1572864:1] totalSize# 16777266 blobValueIndex# 59 Put id# [31:1:2:0:0:1024:1] totalSize# 18350130 blobValueIndex# 15 Put id# [98:1:1:0:0:1024:1] totalSize# 18351154 blobValueIndex# 11 Change MinHugeBlobSize# 524288 Put id# [79:1:2:0:0:1048576:1] totalSize# 18352178 blobValueIndex# 46 Put id# [15:1:1:0:0:10:1] totalSize# 19400754 blobValueIndex# 5 Put id# [37:1:1:0:0:1048576:1] totalSize# 19400764 blobValueIndex# 40 Change MinHugeBlobSize# 65536 Put id# [27:1:1:0:0:1048576:1] totalSize# 20449340 blobValueIndex# 47 Put id# [84:1:2:0:0:1572864:1] totalSize# 21497916 blobValueIndex# 52 Put id# [56:1:1:0:0:1024:1] totalSize# 23070780 blobValueIndex# 15 Restart Put id# [25:1:2:0:0:1048576:1] totalSize# 23071804 blobValueIndex# 49 Put id# [65:1:1:0:0:40960:1] totalSize# 24120380 blobValueIndex# 25 Put id# [68:1:3:0:0:10:1] totalSize# 24161340 blobValueIndex# 6 Put id# [2:1:1:0:0:1048576:1] totalSize# 24161350 blobValueIndex# 45 Put id# [76:1:1:0:0:589824:1] totalSize# 25209926 blobValueIndex# 36 Put id# [23:1:2:0:0:1024:1] totalSize# 25799750 blobValueIndex# 14 Trim Put id# [20:1:1:0:0:1024:1] totalSize# 25800774 blobValueIndex# 18 Put id# [17:1:1:0:0:1024:1] totalSize# 25801798 blobValueIndex# 10 Trim Put id# [59:1:1:0:0:1048576:1] totalSize# 25802822 blobValueIndex# 41 Put id# [47:1:1:0:0:589824:1] totalSize# 26851398 blobValueIndex# 34 Change MinHugeBlobSize# 12288 Put id# [99:1:1:0:0:10:1] totalSize# 27441222 blobValueIndex# 7 Trim Put id# [61:1:2:0:0:1048576:1] totalSize# 27441232 blobValueIndex# 49 Change MinHugeBlobSize# 65536 Put id# [89:1:2:0:0:1048576:1] totalSize# 28489808 blobValueIndex# 44 Put id# [82:1:1:0:0:1024:1] totalSize# 29538384 blobValueIndex# 11 Put id# [2:1:2:0:0:589824:1] totalSize# 29539408 blobValueIndex# 30 Put id# [62:1:1:0:0:40960:1] totalSize# 30129232 blobValueIndex# 25 Restart Put id# [45:1:1:0:0:40960:1] totalSize# 30170192 blobValueIndex# 28 Trim Put id# [47:1:2:0:0:1572864:1] totalSize# 30211152 blobValueIndex# 53 Put id# [93:1:1:0:0:589824:1] totalSize# 31784016 blobValueIndex# 32 Put id# [4:1:1:0:0:1572864:1] totalSize# 32373840 blobValueIndex# 55 Change MinHugeBlobSize# 12288 Put id# [19:1:1:0:0:589824:1] totalSize# 33946704 blobValueIndex# 32 Change MinHugeBlobSize# 8192 Put id# [28:1:1:0:0:1572864:1] totalSize# 34536528 blobValueIndex# 58 Put id# [47:1:3:0:0:1048576:1] totalSize# 36109392 blobValueIndex# 42 Put id# [64:1:1:0:0:1024:1] totalSize# 37157968 blobValueIndex# 16 Trim Put id# [15:1:2:0:0:1572864:1] totalSize# 37158992 blobValueIndex# 52 Put id# [60:1:1:0:0:1048576:1] totalSize# 38731856 blobValueIndex# 40 Put id# [89:1:3:0:0:1572864:1] totalSize# 39780432 blobValueIndex# 58 Put id# [24:1:1:0:0:10:1] totalSize# 41353296 blobValueIndex# 0 Put id# [28:1:2:0:0:10:1] totalSize# 41353306 blobValueIndex# 9 Put id# [96:1:1:0:0:40960:1] totalSize# 41353316 blobValueIndex# 24 Put id# [37:1:2:0:0:1572864:1] totalSize# 41394276 blobValueIndex# 51 Put id# [92:1:1:0:0:1024:1] totalSize# 42967140 blobValueIndex# 15 Put id# [92:1:2:0:0:1572864:1] totalSize# 42968164 blobValueIndex# 56 Put id# [32:1:1:0:0:1048576:1] totalSize# 44541028 blobValueIndex# 48 Put id# [75:1:1:0:0:1024:1] totalSize# 45589604 blobValueIndex# 15 Put id# [62:1:2:0:0:589824:1] totalSize# 45590628 blobValueIndex# 31 Put id# [82:1:2:0:0:1024:1] totalSize# 46180452 blobValueIndex# 15 Put id# [52:1:1:0:0:1024:1] totalSize# 46181476 blobValueIndex# 18 Put id# [83:1:1:0:0:589824:1] totalSize# 46182500 blobValueIndex# 34 Put id# [51:1:1:0:0:10:1] totalSize# 46772324 blobValueIndex# 2 Put id# [37:1:3:0:0:10:1] totalSize# 46772334 blobValueIndex# 7 Trim Put id# [16:1:1:0:0:10:1] totalSize# 46772344 blobValueIndex# 9 Put id# [34:1:1:0:0:1572864:1] totalSize# 46772354 blobValueIndex# 55 Change MinHugeBlobSize# 12288 Put id# [44:1:1:0:0:589824:1] totalSize# 48345218 blobValueIndex# 36 Restart Put id# [80:1:1:0:0:10:1] totalSize# 48935042 blobValueIndex# 7 Put id# [13:1:1:0:0:1572864:1] totalSize# 48935052 blobValueIndex# 52 Put id# [88:1:1:0:0:40960:1] totalSize# 50507916 blobValueIndex# 21 Trim Put id# [89:1:4:0:0:1572864:1] totalSize# 50548876 blobValueIndex# 50 Put id# [66:1:1:0:0:10:1] totalSize# 52121740 blobValueIndex# 3 Trim Put id# [100:1:2:0:0:40960:1] totalSize# 52121750 blobValueIndex# 23 Change MinHugeBlobSize# 524288 Put id# [75:1:2:0:0:1024:1] totalSize# 52162710 blobValueIndex# 11 Put id# [57:1:1:0:0:1024:1] totalSize# 52163734 blobValueIndex# 16 Change MinHugeBlobSize# 65536 Put id# [53:1:1:0:0:1572864:1] totalSize# 52164758 blobValueIndex# 58 Put id# [62:1:3:0:0:1048576:1] totalSize# 53737622 blobValueIndex# 42 Put id# [72:1:1:0:0:589824:1] totalSize# 54786198 blobValueIndex# 39 Put id# [41:1:1:0:0:1048576:1] totalSize# 55376022 blobValueIndex# 42 Put id# [89:1:5:0:0:1048576:1] totalSize# 56424598 blobValueIndex# 48 Put id# [72:1:2:0:0:589824:1] totalSize# 57473174 blobValueIndex# 39 Put id# [17:1:2:0:0:1572864:1] totalSize# 58062998 blobValueIndex# 51 Put id# [83:1:2:0:0:589824:1] totalSize# 59635862 blobValueIndex# 31 Put id# [55:1:1:0:0:589824:1] totalSize# 60225686 blobValueIndex# 32 Change MinHugeBlobSize# 61440 Put id# [91:1:1:0:0:1048576:1] totalSize# 60815510 blobValueIndex# 46 Put id# [34:1:2:0:0:1048576:1] totalSize# 61864086 blobValueIndex# 45 Put id# [64:1:2:0:0:1572864:1] totalSize# 62912662 blobValueIndex# 55 Put id# [31:1:3:0:0:1024:1] totalSize# 64485526 blobValueIndex# 15 Change MinHugeBlobSize# 12288 Put id# [59:1:2:0:0:1048576:1] totalSize# 64486550 blobValueIndex# 49 Trim Put id# [89:1:6:0:0:1024:1] totalSize# 65535126 blobValueIndex# 18 Put id# [49:1:1:0:0:40960:1] totalSize# 65536150 blobValueIndex# 21 Put id# [84:1:3:0:0:10:1] totalSize# 65577110 blobValueIndex# 4 Put id# [52:1:2:0:0:40960:1] totalSize# 65577120 blobValueIndex# 29 Trim Put id# [65:1:2:0:0:1024:1] totalSize# 65618080 blobValueIndex# 15 Trim Put id# [62:1:4:0:0:40960:1] totalSize# 65619104 blobValueIndex# 21 Trim Put id# [24:1:2:0:0:10:1] totalSize# 65660064 blobValueIndex# 4 Trim Put id# [99:1:2:0:0:40960:1] totalSize# 65660074 blobValueIndex# 24 Put id# [96:1:2:0:0:589824:1] totalSize# 65701034 blobValueIndex# 32 Put id# [45:1:2:0:0:589824:1] totalSize# 66290858 blobValueIndex# 36 Put id# [62:1:5:0:0:1048576:1] totalSize# 66880682 blobValueIndex# 45 Put id# [47:1:4:0:0:10:1] totalSize# 67929258 blobValueIndex# 7 Put id# [16:1:2:0:0:40960:1] totalSize# 67929268 blobValueIndex# 25 Trim Put id# [6:1:1:0:0:1048576:1] totalSize# 67970228 blobValueIndex# 49 Put id# [33:1:1:0:0:1024:1] totalSize# 69018804 blobValueIndex# 10 Put id# [11:1:1:0:0:1572864:1] totalSize# 69019828 blobValueIndex# 53 Put id# [43:1:1:0:0:589824:1] totalSize# 70592692 blobValueIndex# 30 Put id# [76:1:2:0:0:40960:1] totalSize# 71182516 blobValueIndex# 28 Put id# [56:1:2:0:0:589824:1] totalSize# 71223476 blobValueIndex# 33 Change MinHugeBlobSize# 65536 Put id# [7:1:1:0:0:10:1] totalSize# 71813300 blobValueIndex# 0 Trim Put id# [52:1:3:0:0:1048576:1] totalSize# 71813310 blobValueIndex# 41 Put id# [1:1:1:0:0:589824:1] totalSize# 72861886 blobValueIndex# 34 Put id# [3:1:1:0:0:1024:1] totalSize# 73451710 blobValueIndex# 16 Put id# [39:1:1:0:0:40960:1] totalSize# 73452734 blobValueIndex# 22 Put id# [100:1:3:0:0:1572864:1] totalSize# 73493694 blobValueIndex# 53 Put id# [17:1:3:0:0:10:1] totalSize# 75066558 blobValueIndex# 0 Put id# [2:1:3:0:0:1048576:1] totalSize# 75066568 blobValueIndex# 47 Put id# [34:1:3:0:0:1048576:1] totalSize# 76115144 blobValueIndex# 41 Change MinHugeBlobSize# 8192 Put id# [23:1:3:0:0:1572864:1] totalSize# 77163720 blobValueIndex# 58 Put id# [44:1:2:0:0:589824:1] totalSize# 78736584 blobValueIndex# 31 Change MinHugeBlobSize# 61440 Trim Put id# [31:1:4:0:0:40960:1] totalSize# 79326408 blobValueIndex# 23 Put id# [22:1:1:0:0:40960:1] totalSize# 79367368 blobValueIndex# 20 Put id# [83:1:3:0:0:10:1] totalSize# 79408328 blobValueIndex# 2 Trim Put id# [90:1:2:0:0:10:1] totalSize# 79408338 blobValueIndex# 7 Trim Restart Put id# [77:1:1:0:0:1572864:1] totalSize# 79408348 blobValueIndex# 58 Put id# [9:1:2:0:0:40960:1] totalSize# 80981212 blobValueIndex# 21 Put id# [79:1:3:0:0:1572864:1] totalSize# 81022172 blobValueIndex# 50 Change MinHugeBlobSize# 524288 Put id# [49:1:2:0:0:10:1] totalSize# 82595036 blobValueIndex# 8 Put id# [74:1:1:0:0:1048576:1] totalSize# 82595046 blobValueIndex# 42 Restart Put id# [90:1:3:0:0:1572864:1] totalSize# 83643622 blobValueIndex# 58 Put id# [56:1:3:0:0:1024:1] totalSize# 85216486 blobValueIndex# 18 Put id# [86:1:1:0:0:1048576:1] totalSize# 85217510 blobValueIndex# 40 Put id# [30:1:2:0:0:40960:1] totalSize# 86266086 blobValueIndex# 27 Put id# [35:1:1:0:0:10:1] totalSize# 86307046 blobValueIndex# 7 Put id# [46:1:1:0:0:40960:1] totalSize# 86307056 blobValueIndex# 25 Put id# [87:1:1:0:0:40960:1] totalSize# 86348016 blobValueIndex# 29 Trim Put id# [42:1:1:0:0:1572864:1] totalSize# 86388976 blobValueIndex# 56 Trim Put id# [3:1:2:0:0:1024:1] totalSize# 87961840 blobValueIndex# 18 Put id# [28:1:3:0:0:1572864:1] totalSize# 87962864 blobValueIndex# 59 Trim Put id# [73:1:1:0:0:1024:1] totalSize# 89535728 blobValueIndex# 19 Put id# [95:1:1:0:0:1572864:1] totalSize# 89536752 blobValueIndex# 55 Put id# [94:1:1:0:0:1572864:1] totalSize# 91109616 blobValueIndex# 57 Put id# [79:1:4:0:0:10:1] totalSize# 92682480 blobValueIndex# 1 Put id# [66:1:2:0:0:1048576:1] totalSize# 92682490 blobValueIndex# 47 Restart Put id# [59:1:3:0:0:40960:1] totalSize# 93731066 blobValueIndex# 25 Put id# [30:1:3:0:0:1024:1] totalSize# 93772026 blobValueIndex# 19 Put id# [72:1:3:0:0:1572864:1] totalSize# 93773050 blobValueIndex# 56 Put id# [24:1:3:0:0:1048576:1] totalSize# 95345914 blobValueIndex# 47 Restart Put id# [84:1:4:0:0:1024:1] totalSize# 96394490 blobValueIndex# 13 Put id# [6:1:2:0:0:1048576:1] totalSize# 96395514 blobValueIndex# 41 Put id# [58:1:1:0:0:10:1] totalSize# 97444090 blobValueIndex# 0 Put id# [30:1:4:0:0:1024:1] totalSize# 97444100 blobValueIndex# 10 Change MinHugeBlobSize# 819 ... 992590926 blobValueIndex# 22 Put id# [28:1:22:0:0:1572864:1] totalSize# 992631886 blobValueIndex# 56 Put id# [31:1:20:0:0:1572864:1] totalSize# 994204750 blobValueIndex# 51 Restart Put id# [47:1:15:0:0:589824:1] totalSize# 995777614 blobValueIndex# 35 Trim Put id# [46:1:24:0:0:1024:1] totalSize# 996367438 blobValueIndex# 17 Put id# [6:1:20:0:0:1572864:1] totalSize# 996368462 blobValueIndex# 54 Put id# [2:1:26:0:0:10:1] totalSize# 997941326 blobValueIndex# 3 Put id# [41:1:18:0:0:1048576:1] totalSize# 997941336 blobValueIndex# 40 Put id# [29:1:16:0:0:589824:1] totalSize# 998989912 blobValueIndex# 36 Change MinHugeBlobSize# 12288 Put id# [98:1:14:0:0:40960:1] totalSize# 999579736 blobValueIndex# 20 Put id# [91:1:17:0:0:589824:1] totalSize# 999620696 blobValueIndex# 39 Put id# [76:1:17:0:0:10:1] totalSize# 1000210520 blobValueIndex# 9 Put id# [39:1:18:0:0:1048576:1] totalSize# 1000210530 blobValueIndex# 42 Put id# [90:1:24:0:0:1048576:1] totalSize# 1001259106 blobValueIndex# 40 Put id# [51:1:24:0:0:10:1] totalSize# 1002307682 blobValueIndex# 7 Put id# [61:1:24:0:0:1572864:1] totalSize# 1002307692 blobValueIndex# 52 Put id# [62:1:23:0:0:1572864:1] totalSize# 1003880556 blobValueIndex# 57 Put id# [55:1:26:0:0:40960:1] totalSize# 1005453420 blobValueIndex# 24 Trim Put id# [32:1:19:0:0:10:1] totalSize# 1005494380 blobValueIndex# 2 Put id# [28:1:23:0:0:589824:1] totalSize# 1005494390 blobValueIndex# 36 Put id# [56:1:16:0:0:589824:1] totalSize# 1006084214 blobValueIndex# 34 Trim Put id# [30:1:20:0:0:589824:1] totalSize# 1006674038 blobValueIndex# 35 Trim Put id# [75:1:15:0:0:10:1] totalSize# 1007263862 blobValueIndex# 7 Put id# [30:1:21:0:0:589824:1] totalSize# 1007263872 blobValueIndex# 37 Put id# [28:1:24:0:0:10:1] totalSize# 1007853696 blobValueIndex# 1 Put id# [27:1:19:0:0:589824:1] totalSize# 1007853706 blobValueIndex# 37 Restart Put id# [12:1:15:0:0:40960:1] totalSize# 1008443530 blobValueIndex# 29 Put id# [94:1:14:0:0:40960:1] totalSize# 1008484490 blobValueIndex# 24 Change MinHugeBlobSize# 65536 Restart Put id# [34:1:19:0:0:1024:1] totalSize# 1008525450 blobValueIndex# 13 Put id# [10:1:17:0:0:1572864:1] totalSize# 1008526474 blobValueIndex# 55 Put id# [22:1:16:0:0:1048576:1] totalSize# 1010099338 blobValueIndex# 40 Change MinHugeBlobSize# 61440 Put id# [28:1:25:0:0:1048576:1] totalSize# 1011147914 blobValueIndex# 41 Put id# [92:1:15:0:0:40960:1] totalSize# 1012196490 blobValueIndex# 29 Put id# [52:1:19:0:0:1572864:1] totalSize# 1012237450 blobValueIndex# 57 Put id# [89:1:27:0:0:589824:1] totalSize# 1013810314 blobValueIndex# 31 Put id# [84:1:27:0:0:589824:1] totalSize# 1014400138 blobValueIndex# 39 Put id# [53:1:27:0:0:1048576:1] totalSize# 1014989962 blobValueIndex# 41 Put id# [18:1:24:0:0:10:1] totalSize# 1016038538 blobValueIndex# 0 Put id# [60:1:25:0:0:589824:1] totalSize# 1016038548 blobValueIndex# 38 Put id# [74:1:19:0:0:1048576:1] totalSize# 1016628372 blobValueIndex# 44 Put id# [23:1:31:0:0:1572864:1] totalSize# 1017676948 blobValueIndex# 50 Trim Put id# [5:1:18:0:0:1048576:1] totalSize# 1019249812 blobValueIndex# 46 Put id# [35:1:25:0:0:40960:1] totalSize# 1020298388 blobValueIndex# 28 Put id# [11:1:22:0:0:10:1] totalSize# 1020339348 blobValueIndex# 3 Trim Put id# [33:1:17:0:0:10:1] totalSize# 1020339358 blobValueIndex# 1 Put id# [75:1:16:0:0:1048576:1] totalSize# 1020339368 blobValueIndex# 49 Put id# [16:1:16:0:0:40960:1] totalSize# 1021387944 blobValueIndex# 20 Put id# [85:1:22:0:0:1024:1] totalSize# 1021428904 blobValueIndex# 18 Change MinHugeBlobSize# 8192 Put id# [95:1:19:0:0:40960:1] totalSize# 1021429928 blobValueIndex# 22 Put id# [26:1:19:0:0:10:1] totalSize# 1021470888 blobValueIndex# 0 Put id# [36:1:19:0:0:40960:1] totalSize# 1021470898 blobValueIndex# 21 Put id# [44:1:16:0:0:1024:1] totalSize# 1021511858 blobValueIndex# 14 Put id# [80:1:19:0:0:1048576:1] totalSize# 1021512882 blobValueIndex# 48 Put id# [92:1:16:0:0:1572864:1] totalSize# 1022561458 blobValueIndex# 52 Trim Put id# [100:1:17:0:0:40960:1] totalSize# 1024134322 blobValueIndex# 27 Trim Put id# [27:1:20:0:0:1048576:1] totalSize# 1024175282 blobValueIndex# 49 Put id# [12:1:16:0:0:589824:1] totalSize# 1025223858 blobValueIndex# 30 Put id# [98:1:15:0:0:1572864:1] totalSize# 1025813682 blobValueIndex# 55 Put id# [11:1:23:0:0:1024:1] totalSize# 1027386546 blobValueIndex# 18 Put id# [89:1:28:0:0:10:1] totalSize# 1027387570 blobValueIndex# 5 Put id# [46:1:25:0:0:10:1] totalSize# 1027387580 blobValueIndex# 0 Put id# [95:1:20:0:0:1048576:1] totalSize# 1027387590 blobValueIndex# 46 Put id# [41:1:19:0:0:40960:1] totalSize# 1028436166 blobValueIndex# 28 Restart Put id# [77:1:21:0:0:1024:1] totalSize# 1028477126 blobValueIndex# 17 Put id# [31:1:21:0:0:10:1] totalSize# 1028478150 blobValueIndex# 5 Put id# [9:1:25:0:0:40960:1] totalSize# 1028478160 blobValueIndex# 21 Put id# [24:1:23:0:0:10:1] totalSize# 1028519120 blobValueIndex# 5 Put id# [70:1:13:0:0:1048576:1] totalSize# 1028519130 blobValueIndex# 49 Trim Put id# [27:1:21:0:0:1024:1] totalSize# 1029567706 blobValueIndex# 18 Change MinHugeBlobSize# 65536 Put id# [88:1:22:0:0:1024:1] totalSize# 1029568730 blobValueIndex# 13 Put id# [54:1:25:0:0:589824:1] totalSize# 1029569754 blobValueIndex# 38 Put id# [23:1:32:0:0:1572864:1] totalSize# 1030159578 blobValueIndex# 52 Put id# [7:1:15:0:0:589824:1] totalSize# 1031732442 blobValueIndex# 32 Put id# [52:1:20:0:0:40960:1] totalSize# 1032322266 blobValueIndex# 27 Put id# [73:1:13:0:0:1572864:1] totalSize# 1032363226 blobValueIndex# 58 Trim Put id# [78:1:24:0:0:40960:1] totalSize# 1033936090 blobValueIndex# 21 Put id# [16:1:17:0:0:1024:1] totalSize# 1033977050 blobValueIndex# 14 Put id# [19:1:21:0:0:1572864:1] totalSize# 1033978074 blobValueIndex# 54 Put id# [16:1:18:0:0:1024:1] totalSize# 1035550938 blobValueIndex# 14 Put id# [99:1:18:0:0:1048576:1] totalSize# 1035551962 blobValueIndex# 40 Restart Put id# [17:1:19:0:0:40960:1] totalSize# 1036600538 blobValueIndex# 22 Trim Put id# [5:1:19:0:0:40960:1] totalSize# 1036641498 blobValueIndex# 20 Put id# [48:1:22:0:0:40960:1] totalSize# 1036682458 blobValueIndex# 25 Put id# [34:1:20:0:0:10:1] totalSize# 1036723418 blobValueIndex# 2 Put id# [34:1:21:0:0:10:1] totalSize# 1036723428 blobValueIndex# 1 Put id# [98:1:16:0:0:40960:1] totalSize# 1036723438 blobValueIndex# 24 Put id# [53:1:28:0:0:589824:1] totalSize# 1036764398 blobValueIndex# 31 Put id# [7:1:16:0:0:589824:1] totalSize# 1037354222 blobValueIndex# 33 Put id# [40:1:19:0:0:1048576:1] totalSize# 1037944046 blobValueIndex# 44 Put id# [1:1:26:0:0:1572864:1] totalSize# 1038992622 blobValueIndex# 57 Trim Put id# [1:1:27:0:0:40960:1] totalSize# 1040565486 blobValueIndex# 22 Put id# [41:1:20:0:0:589824:1] totalSize# 1040606446 blobValueIndex# 32 Put id# [30:1:22:0:0:40960:1] totalSize# 1041196270 blobValueIndex# 21 Trim Put id# [2:1:27:0:0:10:1] totalSize# 1041237230 blobValueIndex# 7 Trim Put id# [15:1:13:0:0:1048576:1] totalSize# 1041237240 blobValueIndex# 44 Change MinHugeBlobSize# 61440 Put id# [35:1:26:0:0:1024:1] totalSize# 1042285816 blobValueIndex# 11 Put id# [88:1:23:0:0:10:1] totalSize# 1042286840 blobValueIndex# 0 Put id# [79:1:21:0:0:40960:1] totalSize# 1042286850 blobValueIndex# 29 Put id# [4:1:22:0:0:10:1] totalSize# 1042327810 blobValueIndex# 7 Put id# [64:1:28:0:0:1024:1] totalSize# 1042327820 blobValueIndex# 14 Put id# [86:1:12:0:0:589824:1] totalSize# 1042328844 blobValueIndex# 37 Put id# [74:1:20:0:0:1048576:1] totalSize# 1042918668 blobValueIndex# 43 Put id# [55:1:27:0:0:589824:1] totalSize# 1043967244 blobValueIndex# 37 Put id# [46:1:26:0:0:589824:1] totalSize# 1044557068 blobValueIndex# 37 Put id# [24:1:24:0:0:40960:1] totalSize# 1045146892 blobValueIndex# 23 Put id# [5:1:20:0:0:589824:1] totalSize# 1045187852 blobValueIndex# 37 Put id# [63:1:15:0:0:40960:1] totalSize# 1045777676 blobValueIndex# 29 Change MinHugeBlobSize# 65536 Put id# [5:1:21:0:0:1572864:1] totalSize# 1045818636 blobValueIndex# 58 Put id# [76:1:18:0:0:1572864:1] totalSize# 1047391500 blobValueIndex# 50 Put id# [65:1:17:0:0:1572864:1] totalSize# 1048964364 blobValueIndex# 55 Put id# [61:1:25:0:0:1024:1] totalSize# 1050537228 blobValueIndex# 15 Change MinHugeBlobSize# 12288 Trim Put id# [75:1:17:0:0:10:1] totalSize# 1050538252 blobValueIndex# 6 Put id# [41:1:21:0:0:40960:1] totalSize# 1050538262 blobValueIndex# 21 Put id# [88:1:24:0:0:1572864:1] totalSize# 1050579222 blobValueIndex# 52 Put id# [6:1:21:0:0:1048576:1] totalSize# 1052152086 blobValueIndex# 46 Restart Put id# [6:1:22:0:0:1572864:1] totalSize# 1053200662 blobValueIndex# 53 Trim Put id# [27:1:22:0:0:40960:1] totalSize# 1054773526 blobValueIndex# 24 Trim Put id# [3:1:18:0:0:40960:1] totalSize# 1054814486 blobValueIndex# 24 Put id# [99:1:19:0:0:10:1] totalSize# 1054855446 blobValueIndex# 2 Put id# [1:1:28:0:0:1572864:1] totalSize# 1054855456 blobValueIndex# 51 Put id# [71:1:16:0:0:1572864:1] totalSize# 1056428320 blobValueIndex# 53 Put id# [23:1:33:0:0:589824:1] totalSize# 1058001184 blobValueIndex# 36 Put id# [93:1:20:0:0:1024:1] totalSize# 1058591008 blobValueIndex# 15 Put id# [36:1:20:0:0:1572864:1] totalSize# 1058592032 blobValueIndex# 53 Put id# [61:1:26:0:0:589824:1] totalSize# 1060164896 blobValueIndex# 39 Change MinHugeBlobSize# 61440 Put id# [64:1:29:0:0:1048576:1] totalSize# 1060754720 blobValueIndex# 49 Restart Put id# [2:1:28:0:0:10:1] totalSize# 1061803296 blobValueIndex# 0 Put id# [88:1:25:0:0:40960:1] totalSize# 1061803306 blobValueIndex# 23 Put id# [94:1:15:0:0:1024:1] totalSize# 1061844266 blobValueIndex# 15 Put id# [78:1:25:0:0:589824:1] totalSize# 1061845290 blobValueIndex# 30 Trim Put id# [69:1:23:0:0:1048576:1] totalSize# 1062435114 blobValueIndex# 40 Put id# [9:1:26:0:0:1572864:1] totalSize# 1063483690 blobValueIndex# 58 Put id# [34:1:22:0:0:1048576:1] totalSize# 1065056554 blobValueIndex# 40 Restart Put id# [30:1:23:0:0:589824:1] totalSize# 1066105130 blobValueIndex# 37 Put id# [94:1:16:0:0:40960:1] totalSize# 1066694954 blobValueIndex# 24 Put id# [76:1:19:0:0:1572864:1] totalSize# 1066735914 blobValueIndex# 53 Trim Put id# [69:1:24:0:0:10:1] totalSize# 1068308778 blobValueIndex# 4 Put id# [41:1:22:0:0:10:1] totalSize# 1068308788 blobValueIndex# 6 Trim Put id# [17:1:20:0:0:10:1] totalSize# 1068308798 blobValueIndex# 9 Put id# [19:1:22:0:0:1572864:1] totalSize# 1068308808 blobValueIndex# 57 Put id# [13:1:14:0:0:1024:1] totalSize# 1069881672 blobValueIndex# 15 Put id# [74:1:21:0:0:10:1] totalSize# 1069882696 blobValueIndex# 2 Trim Put id# [46:1:27:0:0:1024:1] totalSize# 1069882706 blobValueIndex# 19 Put id# [93:1:21:0:0:40960:1] totalSize# 1069883730 blobValueIndex# 25 Put id# [93:1:22:0:0:40960:1] totalSize# 1069924690 blobValueIndex# 23 Restart Put id# [62:1:24:0:0:589824:1] totalSize# 1069965650 blobValueIndex# 35 Restart Put id# [65:1:18:0:0:1024:1] totalSize# 1070555474 blobValueIndex# 11 Change MinHugeBlobSize# 12288 Put id# [86:1:13:0:0:1572864:1] totalSize# 1070556498 blobValueIndex# 56 Put id# [65:1:19:0:0:10:1] totalSize# 1072129362 blobValueIndex# 2 Restart Put id# [60:1:26:0:0:40960:1] totalSize# 1072129372 blobValueIndex# 25 Put id# [49:1:21:0:0:10:1] totalSize# 1072170332 blobValueIndex# 6 Put id# [71:1:17:0:0:1048576:1] totalSize# 1072170342 blobValueIndex# 42 Put id# [12:1:17:0:0:1024:1] totalSize# 1073218918 blobValueIndex# 14 Put id# [42:1:27:0:0:589824:1] totalSize# 1073219942 blobValueIndex# 36 Put id# [13:1:15:0:0:1048576:1] totalSize# 1073809766 blobValueIndex# 49 Put id# [58:1:18:0:0:40960:1] totalSize# 1074858342 blobValueIndex# 22 Trim Put id# [98:1:17:0:0:40960:1] totalSize# 1074899302 blobValueIndex# 25 Put id# [73:1:14:0:0:10:1] totalSize# 1074940262 blobValueIndex# 1 Put id# [36:1:21:0:0:1024:1] totalSize# 1074940272 blobValueIndex# 11 Put id# [78:1:26:0:0:1572864:1] totalSize# 1074941296 blobValueIndex# 50 Put id# [58:1:19:0:0:1024:1] totalSize# 1076514160 blobValueIndex# 16 Put id# [62:1:25:0:0:40960:1] totalSize# 1076515184 blobValueIndex# 29 Put id# [83:1:24:0:0:10:1] totalSize# 1076556144 blobValueIndex# 3 Trim Restart >> KqpPg::AlterSequence [FAIL] >> KqpPg::AlterColumnSetDefaultFromSequence |89.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_kqp_scan/ydb-core-tx-datashard-ut_kqp_scan |89.6%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_kqp_scan/ydb-core-tx-datashard-ut_kqp_scan |89.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_kqp_scan/ydb-core-tx-datashard-ut_kqp_scan >> KqpPg::InsertFromSelect_Simple-useSink [GOOD] >> KqpPg::InsertFromSelect_NoReorder-useSink |89.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/arrow/ydb-core-kqp-ut-arrow |89.6%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/arrow/ydb-core-kqp-ut-arrow |89.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/arrow/ydb-core-kqp-ut-arrow |89.6%| [TA] $(B)/ydb/core/blobstorage/ut_vdisk2/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpUniqueIndex::UpsertExplicitNullInComplexFk >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-5 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-6 >> KqpPg::InsertValuesFromTableWithDefaultNegativeCase-useSink [GOOD] |89.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/ydb/table_split_ut/ydb-services-ydb-table_split_ut |89.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/ydb/table_split_ut/ydb-services-ydb-table_split_ut |89.6%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_vdisk2/test-results/unittest/{meta.json ... results_accumulator.log} |89.6%| [LD] {RESULT} $(B)/ydb/services/ydb/table_split_ut/ydb-services-ydb-table_split_ut >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_PreferedPartition_InactiveActor_Test [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_PreferedPartition_OtherPartition_Test |89.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/cost/ydb-core-kqp-ut-cost |89.6%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/cost/ydb-core-kqp-ut-cost >> KqpPg::TableSelect+useSink [GOOD] >> KqpPg::TableSelect-useSink |89.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/cost/ydb-core-kqp-ut-cost ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/pg/unittest >> KqpPg::InsertValuesFromTableWithDefaultNegativeCase-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 24902, MsgBus: 63161 2025-05-07T08:50:54.785391Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623615369744506:2274];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:54.785445Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001cbf/r3tmp/tmp5p7Hi4/pdisk_1.dat 2025-05-07T08:50:55.281343Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:55.286516Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:55.286631Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:55.288447Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24902, node 1 2025-05-07T08:50:55.466626Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:55.466657Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:55.466665Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:55.466841Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63161 TClient is connected to server localhost:63161 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:50:56.224639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:56.245848Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:50:58.364221Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623632549614122:2331], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:58.364350Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:58.401362Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T08:50:58.574238Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623632549614229:2343], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:58.574311Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:58.582959Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 2025-05-07T08:50:58.635463Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623632549614307:2354], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:58.635550Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:58.635904Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623632549614312:2357], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:58.639401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710660:3, at schemeshard: 72057594046644480 2025-05-07T08:50:58.649300Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623632549614314:2358], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2025-05-07T08:50:58.737650Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623632549614365:2441] txid# 281474976710661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 22510, MsgBus: 9301 2025-05-07T08:51:00.176919Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501623638431015496:2065];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:00.177456Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001cbf/r3tmp/tmpy4BFxv/pdisk_1.dat 2025-05-07T08:51:00.388340Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:51:00.390460Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:00.390555Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:00.391752Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22510, node 2 2025-05-07T08:51:00.450565Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:51:00.450589Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:51:00.450598Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:51:00.450726Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9301 TClient is connected to server localhost:9301 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:51:01.100882Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:03.502471Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501623651315918021:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:03.502567Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:03.504811Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501623651315918033:2333], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:03.509181Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T08:51:03.524608Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7501623651315918035:2334], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T08:51:03.612670Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501623651315918086:2330] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist ... onnected -> Connecting 2025-05-07T08:51:42.313554Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9121, node 10 2025-05-07T08:51:42.406779Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:51:42.406811Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:51:42.406823Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:51:42.407020Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6540 TClient is connected to server localhost:6540 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:51:43.514672Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:47.034285Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7501623817514355011:2132];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:47.034388Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:51:49.184505Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7501623851874093965:2335], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:49.184743Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:49.190473Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7501623851874093977:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:49.197684Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480 2025-05-07T08:51:49.279162Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7501623851874093979:2339], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-05-07T08:51:49.364338Z node 10 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [10:7501623851874094031:2344] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:51:49.428076Z node 10 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [10:7501623851874094040:2343], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiCreateTable!
:1:1: Error: Failed to parse default expr for typename int4, error reason: Error while converting text to binary: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: invalid input syntax for type integer: "text" 2025-05-07T08:51:49.436826Z node 10 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=10&id=N2MxYWRjNjQtOWNhNTJiZTAtYjk0OWEzMWMtYjYxYzE2YzI=, ActorId: [10:7501623851874093963:2334], ActorState: ExecuteState, TraceId: 01jtmz34shevwb1wstzh5dr0g9, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiCreateTable!
:1:1: Error: Failed to parse default expr for typename int4, error reason: Error while converting text to binary: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: invalid input syntax for type integer: "text" Trying to start YDB, gRPC: 20034, MsgBus: 65227 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001cbf/r3tmp/tmpgcj8MJ/pdisk_1.dat 2025-05-07T08:51:51.506296Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:51:51.756219Z node 11 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:51:51.852882Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:51.853029Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:51.855193Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20034, node 11 2025-05-07T08:51:51.968240Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:51:51.968271Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:51:51.968282Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:51:51.968452Z node 11 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:65227 TClient is connected to server localhost:65227 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:51:52.933679Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:52.958783Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:52:02.355684Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7501623907806575134:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:02.355848Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:02.358611Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7501623907806575154:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:02.369658Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480 2025-05-07T08:52:02.413238Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [11:7501623907806575156:2341], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-05-07T08:52:02.510837Z node 11 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [11:7501623907806575207:2351] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:52:02.539106Z node 11 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [11:7501623907806575216:2345], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiCreateTable!
:1:1: Error: Failed to parse default expr for typename int4, error reason: Error while converting text to binary: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: invalid input syntax for type integer: "text" 2025-05-07T08:52:02.541826Z node 11 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=11&id=MTJlZjZiZTgtNjcyOWU4NWMtYTdjODFiMTYtNzE4MTg4Mw==, ActorId: [11:7501623907806575131:2335], ActorState: ExecuteState, TraceId: 01jtmz3e0hahhvwjsw98fesm3j, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiCreateTable!
:1:1: Error: Failed to parse default expr for typename int4, error reason: Error while converting text to binary: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: invalid input syntax for type integer: "text" >> KikimrIcGateway::TestLoadServiceAccountSecretValueFromExternalDataSourceMetadata [GOOD] >> KikimrIcGateway::TestLoadMdbBasicSecretValueFromExternalDataSourceMetadata >> DataShardSnapshots::VolatileSnapshotAndLocalMKQLUpdate [GOOD] >> DataShardSnapshots::VolatileSnapshotReadTable >> KqpPg::CreateTempTableSerial [FAIL] >> KqpPg::DropSequence |89.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/statistics/aggregator/ut/ydb-core-statistics-aggregator-ut |89.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/statistics/aggregator/ut/ydb-core-statistics-aggregator-ut |89.6%| [LD] {RESULT} $(B)/ydb/core/statistics/aggregator/ut/ydb-core-statistics-aggregator-ut >> KqpUniqueIndex::UpdateOnFkAlreadyExist [GOOD] >> KqpUniqueIndex::UpdateImplicitNullInComplexFk2 >> KqpPg::PgUpdate+useSink [GOOD] >> KqpPg::PgUpdate-useSink >> KqpIndexes::ForbidViewModification >> KikimrIcGateway::TestLoadTokenSecretValueFromExternalDataSourceMetadata [GOOD] >> KikimrIcGateway::TestSecretsExistingValidation >> KqpIndexes::SelectConcurentTX [GOOD] >> KqpIndexes::SelectConcurentTX2 >> KqpPg::InsertNoTargetColumns_SerialNotNull-useSink [GOOD] >> BasicUsage::WriteAndReadSomeMessagesWithSyncCompression [GOOD] >> BasicUsage::WriteAndReadSomeMessagesWithNoCompression >> TPDiskTest::DeviceHaltTooLong [GOOD] >> TPDiskTest::ChangePDiskKey >> ReadSessionImplTest::DataReceivedCallbackReal [GOOD] >> ReadSessionImplTest::DataReceivedCallback >> DataShardSnapshots::LockedWriteDistributedCommitAborted+UseSink [GOOD] >> DataShardSnapshots::LockedWriteDistributedCommitAborted-UseSink >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-6 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-7 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/pg/unittest >> KqpPg::InsertNoTargetColumns_SerialNotNull-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 21446, MsgBus: 2923 2025-05-07T08:50:53.697926Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623611635635167:2190];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:53.698007Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001cf4/r3tmp/tmpAqzKwB/pdisk_1.dat 2025-05-07T08:50:54.298539Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:54.298649Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:54.300987Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:50:54.330819Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21446, node 1 2025-05-07T08:50:54.451534Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:54.451569Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:54.451579Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:54.451723Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2923 TClient is connected to server localhost:2923 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:50:55.115166Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:55.145115Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:50:57.493449Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623628815504870:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:57.493677Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:57.494181Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623628815504897:2333], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:57.498320Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480 2025-05-07T08:50:57.511388Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-05-07T08:50:57.511625Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623628815504899:2334], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-05-07T08:50:57.582375Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623628815504950:2334] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:50:57.639810Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 24555, MsgBus: 27925 2025-05-07T08:50:59.114498Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501623633765541694:2162];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001cf4/r3tmp/tmpi8ckfc/pdisk_1.dat 2025-05-07T08:50:59.227529Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:50:59.416418Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:59.416513Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:59.417884Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:50:59.422391Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24555, node 2 2025-05-07T08:50:59.514550Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:59.514575Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:59.514581Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:59.514713Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27925 TClient is connected to server localhost:27925 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:51:00.301529Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:00.310471Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:51:02.758592Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501623646650444116:2333], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:02.758658Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501623646650444103:2329], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:02.758797Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:02.763654Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480 2025-05-07T08:51:02.798616Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7501623646650444119:2334], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-05-07T08:51:02.872988Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501623646650444170:2332] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:51:02.913031Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 22278, MsgBus: 26455 2025-05-07T08:51:04.595866Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7501623655143436902:2190];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:04.595910Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001cf4/r3tmp/tmpduZf7K/pdisk_1.dat 2025-05-07T08:51:04.758833Z node 3 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025 ... TClient is connected to server localhost:24372 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:51:46.216793Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:46.227193Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T08:51:49.585424Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7501623830127832640:2134];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:49.585521Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:51:52.984901Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7501623864487571602:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:52.985123Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:52.999005Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7501623864487571624:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:53.004775Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T08:51:53.048346Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7501623864487571626:2342], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T08:51:53.102212Z node 10 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [10:7501623868782538973:2348] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:51:53.219606Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-05-07T08:51:53.470609Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 2025-05-07T08:51:53.722456Z node 10 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [10:7501623868782539214:2370], status: BAD_REQUEST, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiWriteTable!
:1:1: Error: Missing not null column in input: c. All not null columns should be initialized, code: 2032 2025-05-07T08:51:53.725323Z node 10 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=10&id=NGViNGU5ZDEtY2EwMzVhZmEtMjk5YmUyYTEtZDViY2M2MzM=, ActorId: [10:7501623868782539211:2368], ActorState: ExecuteState, TraceId: 01jtmz3em2edn4ghywa6hsmvwb, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: Trying to start YDB, gRPC: 4579, MsgBus: 22631 2025-05-07T08:51:56.207887Z node 11 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[11:7501623881477248321:2067];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:56.207961Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001cf4/r3tmp/tmp653Kx7/pdisk_1.dat 2025-05-07T08:51:56.858068Z node 11 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4579, node 11 2025-05-07T08:51:56.923138Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:56.923310Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:56.931681Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:51:57.122706Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:51:57.122737Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:51:57.122749Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:51:57.122919Z node 11 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22631 TClient is connected to server localhost:22631 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:52:00.009558Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:01.253386Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[11:7501623881477248321:2067];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:01.253943Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:52:07.112827Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7501623928721889251:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:07.112969Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:07.113470Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7501623928721889266:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:07.119884Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480 2025-05-07T08:52:07.157168Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [11:7501623928721889268:2342], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-05-07T08:52:07.259997Z node 11 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [11:7501623928721889319:2352] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:52:07.308536Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 2025-05-07T08:52:07.490237Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 2025-05-07T08:52:07.659573Z node 11 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [11:7501623928721889562:2369], status: BAD_REQUEST, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiWriteTable!
:1:1: Error: Missing not null column in input: c. All not null columns should be initialized, code: 2032 2025-05-07T08:52:07.662517Z node 11 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=11&id=OGM0YzgxZGUtZDcwNjFjNy1kZDg5YTQyLTI1MDkwZGM0, ActorId: [11:7501623928721889560:2368], ActorState: ExecuteState, TraceId: 01jtmz3wa6ee4f8ecz2ytm9qvd, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: >> KqpPg::EquiJoin-useSink [GOOD] >> KqpPg::ExplainColumnsReorder >> KqpPg::AlterColumnSetDefaultFromSequence [FAIL] >> KqpPg::CreateTableIfNotExists_GenericQuery >> KqpPg::InsertFromSelect_NoReorder-useSink [GOOD] >> KqpPg::InsertFromSelect_Serial+useSink >> TPQTest::TestPartitionPerConsumerQuota [GOOD] >> TPQTest::TestPQPartialRead >> ReadSessionImplTest::DataReceivedCallback [GOOD] >> KqpIndexes::UniqAndNoUniqSecondaryIndex [GOOD] >> KqpIndexes::SelectFromIndexesAndFreeSpaceLogicDoesntTimeout >> TPDiskTest::ChangePDiskKey [GOOD] >> TPDiskTest::RecreateWithInvalidPDiskKey |89.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/replication/ydb_proxy/ut/ydb-core-tx-replication-ydb_proxy-ut |89.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/ydb_proxy/ut/ydb-core-tx-replication-ydb_proxy-ut |89.6%| [LD] {RESULT} $(B)/ydb/core/tx/replication/ydb_proxy/ut/ydb-core-tx-replication-ydb_proxy-ut >> Viewer::StorageGroupOutputWithSpaceCheckDependsOnUsage [GOOD] >> Viewer::SimpleFeatureFlags >> TPDiskTest::RecreateWithInvalidPDiskKey [GOOD] >> TPDiskTest::SmallDisk10Gb >> KqpMultishardIndex::SortedRangeReadDesc ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> ReadSessionImplTest::DataReceivedCallback [GOOD] Test command err: 2025-05-07T08:51:36.476227Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.476247Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.476263Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:51:36.476772Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:51:36.490707Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-05-07T08:51:36.490842Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.491173Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-05-07T08:51:36.491690Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.491831Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-05-07T08:51:36.491952Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-05-07T08:51:36.492003Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 3 bytes 2025-05-07T08:51:36.492885Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.492913Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.492933Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:51:36.493299Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:51:36.493955Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-05-07T08:51:36.494144Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.494358Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-05-07T08:51:36.494796Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.494929Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-05-07T08:51:36.495046Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-05-07T08:51:36.495093Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 3 bytes 2025-05-07T08:51:36.496109Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.496132Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.496156Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:51:36.496588Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:51:36.497351Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-05-07T08:51:36.497491Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.497707Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-05-07T08:51:36.498496Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.498781Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-05-07T08:51:36.498904Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-05-07T08:51:36.498963Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 3 bytes 2025-05-07T08:51:36.499881Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.499933Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.499953Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:51:36.500304Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:51:36.501102Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-05-07T08:51:36.501227Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.501561Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-05-07T08:51:36.503275Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.503900Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-05-07T08:51:36.504022Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-05-07T08:51:36.504089Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 3 bytes 2025-05-07T08:51:36.505111Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.505132Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.505167Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:51:36.505585Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:51:36.506412Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-05-07T08:51:36.506541Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.506780Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-05-07T08:51:36.507226Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.507366Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-05-07T08:51:36.507478Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-05-07T08:51:36.507522Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 0 bytes 2025-05-07T08:51:36.508333Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.508377Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.508397Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:51:36.508739Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:51:36.509457Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-05-07T08:51:36.509591Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.509828Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-05-07T08:51:36.510238Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.510361Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-05-07T08:51:36.510436Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-05-07T08:51:36.510470Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 0 bytes 2025-05-07T08:51:36.511457Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.511479Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.511502Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:51:36.511840Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:51:36.512391Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-05-07T08:51:36.512514Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.512713Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-05-07T08:51:36.513437Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.513602Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-05-07T08:51:36.513691Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-05-07T08:51:36.513729Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 0 bytes 2025-05-07T08:51:36.514721Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.514748Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.514772Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:51:36.515114Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:51:36.515813Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-05-07T08:51:36.515948Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.516222Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-05-07T08:51:36.517809Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:36.518250Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-05-07T08:51:36.518334Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-05-07T08:51:36.518377Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 3 bytes 2025-05-07T08:51:36.551213Z :ReadSession INFO: Random seed for debugging is 1746607896551180 2025-05-07T08:51:36.922387Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623795274476049:2279];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:36.923860Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00363c/r3tmp/tmpqE0nm7/pdisk_1.dat 2025-05-07T08:51:37.131580Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-05-07T08:51:37.136330Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-05-07T08:51:37.183732Z node 2 :METADATA_ ... ignId:1) committing to position 3 prev 2 end 3 by cookie 3 2025-05-07T08:51:59.023687Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:342: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-05-07T08:51:59.023726Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2788: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-05-07T08:51:59.023831Z node 2 :PERSQUEUE DEBUG: partition.cpp:3264: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 user user offset is set to 3 (startOffset 0) session shared/user_1_1_3284479289729163483_v1 2025-05-07T08:51:59.023945Z node 2 :PERSQUEUE DEBUG: partition.cpp:2182: [PQ: 72075186224037892, Partition: 0, State: StateIdle] === DumpKeyValueRequest === 2025-05-07T08:51:59.023963Z node 2 :PERSQUEUE DEBUG: partition.cpp:2183: [PQ: 72075186224037892, Partition: 0, State: StateIdle] --- delete ---------------- 2025-05-07T08:51:59.023980Z node 2 :PERSQUEUE DEBUG: partition.cpp:2191: [PQ: 72075186224037892, Partition: 0, State: StateIdle] --- write ----------------- 2025-05-07T08:51:59.023996Z node 2 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037892, Partition: 0, State: StateIdle] i0000000000 2025-05-07T08:51:59.024009Z node 2 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037892, Partition: 0, State: StateIdle] m0000000000cuser 2025-05-07T08:51:59.024022Z node 2 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037892, Partition: 0, State: StateIdle] m0000000000uuser 2025-05-07T08:51:59.024037Z node 2 :PERSQUEUE DEBUG: partition.cpp:2196: [PQ: 72075186224037892, Partition: 0, State: StateIdle] --- rename ---------------- 2025-05-07T08:51:59.024052Z node 2 :PERSQUEUE DEBUG: partition.cpp:2201: [PQ: 72075186224037892, Partition: 0, State: StateIdle] =========================== 2025-05-07T08:51:59.024078Z node 2 :PERSQUEUE DEBUG: read.h:262: CacheProxy. Passthrough write request to KV 2025-05-07T08:51:59.027460Z node 1 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 1 consumer shared/user session shared/user_1_1_3284479289729163483_v1 TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1) initDone 1 event { Cookie: 3 } 2025-05-07T08:51:59.027510Z node 1 :PQ_READ_PROXY DEBUG: partition_actor.cpp:940: session cookie 1 consumer shared/user session shared/user_1_1_3284479289729163483_v1 TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1) commit done to position 3 endOffset 3 with cookie 3 2025-05-07T08:51:59.027551Z node 1 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:696: session cookie 1 consumer shared/user session shared/user_1_1_3284479289729163483_v1 replying for commits: assignId# 1, from# 3, to# 3, offset# 3 2025-05-07T08:51:59.026976Z node 2 :PERSQUEUE DEBUG: partition_read.cpp:774: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 user user readTimeStamp for offset 3 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-05-07T08:51:59.027036Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-05-07T08:51:59.027107Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:377: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 3 2025-05-07T08:51:59.030383Z :DEBUG: [/Root] [/Root] [3b07e5e8-117530a4-d8a89e33-218e5265] [dc1] Committed response: { cookies { assign_id: 1 partition_cookie: 3 } } 2025-05-07T08:51:59.062050Z :INFO: [] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|35197d9a-38a679f7-f8a65d27-d0d60e79_0] Write session will now close 2025-05-07T08:51:59.062138Z :DEBUG: [] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|35197d9a-38a679f7-f8a65d27-d0d60e79_0] Write session: aborting 2025-05-07T08:51:59.062721Z :INFO: [] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|35197d9a-38a679f7-f8a65d27-d0d60e79_0] Write session: gracefully shut down, all writes complete 2025-05-07T08:51:59.062769Z :DEBUG: [] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|35197d9a-38a679f7-f8a65d27-d0d60e79_0] Write session: destroy 2025-05-07T08:51:59.072998Z node 1 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 4 sessionId: test-message-group-id|35197d9a-38a679f7-f8a65d27-d0d60e79_0 grpc read done: success: 0 data: 2025-05-07T08:51:59.073026Z node 1 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 4 sessionId: test-message-group-id|35197d9a-38a679f7-f8a65d27-d0d60e79_0 grpc read failed 2025-05-07T08:51:59.073068Z node 1 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 4 sessionId: test-message-group-id|35197d9a-38a679f7-f8a65d27-d0d60e79_0 grpc closed 2025-05-07T08:51:59.073085Z node 1 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 4 sessionId: test-message-group-id|35197d9a-38a679f7-f8a65d27-d0d60e79_0 is DEAD 2025-05-07T08:51:59.073737Z node 1 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-05-07T08:51:59.074497Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037892] server disconnected, pipe [1:7501623889763759277:2618] destroyed 2025-05-07T08:51:59.074574Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:138: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-05-07T08:52:01.245888Z node 1 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1252: session cookie 1 consumer shared/user session shared/user_1_1_3284479289729163483_v1 TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1) wait data in partition inited, cookie 5 from offset3 2025-05-07T08:52:09.017558Z node 1 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1252: session cookie 1 consumer shared/user session shared/user_1_1_3284479289729163483_v1 TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1) wait data in partition inited, cookie 6 from offset3 2025-05-07T08:52:09.068878Z :INFO: [/Root] [/Root] [3b07e5e8-117530a4-d8a89e33-218e5265] Closing read session. Close timeout: 0.000000s 2025-05-07T08:52:09.068984Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): dc1:test-topic:0:1:2:3 2025-05-07T08:52:09.069066Z :INFO: [/Root] [/Root] [3b07e5e8-117530a4-d8a89e33-218e5265] Counters: { Errors: 0 CurrentSessionLifetimeMs: 17022 BytesRead: 24 MessagesRead: 3 BytesReadCompressed: 24 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-05-07T08:52:09.069181Z :NOTICE: [/Root] [/Root] [3b07e5e8-117530a4-d8a89e33-218e5265] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-05-07T08:52:09.069250Z :DEBUG: [/Root] [/Root] [3b07e5e8-117530a4-d8a89e33-218e5265] [dc1] Abort session to cluster 2025-05-07T08:52:09.069865Z :NOTICE: [/Root] [/Root] [3b07e5e8-117530a4-d8a89e33-218e5265] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-05-07T08:52:09.078943Z node 1 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/user session shared/user_1_1_3284479289729163483_v1 grpc read done: success# 0, data# { } 2025-05-07T08:52:09.078993Z node 1 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer shared/user session shared/user_1_1_3284479289729163483_v1 grpc read failed 2025-05-07T08:52:09.079039Z node 1 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer shared/user session shared/user_1_1_3284479289729163483_v1 grpc closed 2025-05-07T08:52:09.082678Z node 1 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer shared/user session shared/user_1_1_3284479289729163483_v1 is DEAD 2025-05-07T08:52:09.094628Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2433: [PQ: 72075186224037892] Destroy direct read session shared/user_1_1_3284479289729163483_v1 2025-05-07T08:52:09.094676Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037892] server disconnected, pipe [1:7501623863993955078:2550] destroyed 2025-05-07T08:52:09.098640Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:138: Direct read cache: server session deregistered: shared/user_1_1_3284479289729163483_v1 2025-05-07T08:52:09.139109Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [1:7501623863993955075:2547] disconnected; active server actors: 1 2025-05-07T08:52:09.139831Z node 1 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--test-topic] pipe [1:7501623863993955075:2547] client user disconnected session shared/user_1_1_3284479289729163483_v1 2025-05-07T08:52:09.776132Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1073: TxId: 281474976710726, task: 1, CA Id [1:7501623937008399956:2691]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 0 2025-05-07T08:52:09.807367Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1073: TxId: 281474976710726, task: 1, CA Id [1:7501623937008399956:2691]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-05-07T08:52:09.867297Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1073: TxId: 281474976710726, task: 1, CA Id [1:7501623937008399956:2691]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-05-07T08:52:09.951791Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1073: TxId: 281474976710726, task: 1, CA Id [1:7501623937008399956:2691]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-05-07T08:52:10.036780Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1073: TxId: 281474976710726, task: 1, CA Id [1:7501623937008399956:2691]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-05-07T08:52:11.217046Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:52:11.217133Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:52:11.217190Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:52:11.217549Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:52:11.218300Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-05-07T08:52:11.218511Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:52:11.219554Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-05-07T08:52:11.220387Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-05-07T08:52:11.220900Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-05-07T08:52:11.221129Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (2-2) 2025-05-07T08:52:11.221228Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-05-07T08:52:11.221290Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-05-07T08:52:11.221324Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (2-2) 2025-05-07T08:52:11.221498Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2025-05-07T08:52:11.221544Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes >> TPDiskTest::SmallDisk10Gb [GOOD] >> TPDiskTest::PDiskIncreaseLogChunksLimitAfterRestart >> DataShardSnapshots::VolatileSnapshotReadTable [GOOD] >> DataShardSnapshots::VolatileSnapshotRefreshDiscard >> PersQueueSdkReadSessionTest::SettingsValidation [GOOD] >> PersQueueSdkReadSessionTest::SpecifyClustersExplicitly >> KqpUniqueIndex::UpsertExplicitNullInComplexFk [GOOD] >> KqpUniqueIndex::UpsertImplicitNullInComplexFk >> Compression::WriteGZIP [GOOD] >> Compression::WriteZSTD |89.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/replication/service/ut_worker/ydb-core-tx-replication-service-ut_worker |89.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/service/ut_worker/ydb-core-tx-replication-service-ut_worker |89.6%| [LD] {RESULT} $(B)/ydb/core/tx/replication/service/ut_worker/ydb-core-tx-replication-service-ut_worker >> KqpIndexes::UniqIndexComplexPkComplexFkOverlap >> KqpIndexes::ForbidViewModification [GOOD] >> KqpIndexes::IndexOr >> KqpPg::DropSequence [FAIL] >> KqpPg::DeleteWithQueryService+useSink >> BasicUsage::TSimpleWriteSession_AutoSeqNo_BasicUsage [GOOD] >> BasicUsage::TWriteSession_AutoBatching [GOOD] >> BasicUsage::TWriteSession_BatchingProducesContinueTokens [GOOD] >> BasicUsage::BrokenCredentialsProvider >> KqpIndexes::SecondaryIndexOrderBy [GOOD] >> KqpIndexes::SecondaryIndexOrderBy2 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-7 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-8 >> KqpPg::PgUpdate-useSink [GOOD] >> KqpPg::JoinWithQueryService-StreamLookup >> KikimrIcGateway::TestSecretsExistingValidation [GOOD] >> KqpUniqueIndex::UpdateImplicitNullInComplexFk2 [GOOD] |89.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/yql/ydb-core-kqp-ut-yql |89.6%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/yql/ydb-core-kqp-ut-yql |89.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/yql/ydb-core-kqp-ut-yql >> KqpIndexes::SelectConcurentTX2 [GOOD] >> KqpIndexes::SelectFromAsyncIndexedTable >> TPDiskTest::PDiskIncreaseLogChunksLimitAfterRestart [GOOD] >> TPDiskTest::AllRequestsAreAnsweredOnPDiskRestart >> DataShardSnapshots::LockedWriteDistributedCommitAborted-UseSink [GOOD] >> DataShardSnapshots::LockedWriteDistributedCommitCrossConflict+UseSink >> KqpPg::CreateTableIfNotExists_GenericQuery [GOOD] >> KqpPg::AlterColumnSetDefaultFromSequenceWithSchemaname ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/provider/ut/unittest >> KikimrIcGateway::TestSecretsExistingValidation [GOOD] Test command err: Trying to start YDB, gRPC: 26445, MsgBus: 6194 2025-05-07T08:51:41.214705Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623813891132389:2196];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:41.220761Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004677/r3tmp/tmp8Yqhds/pdisk_1.dat 2025-05-07T08:51:41.739766Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:41.739886Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:41.743327Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:51:41.767183Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26445, node 1 2025-05-07T08:51:41.902327Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:51:41.902349Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:51:41.902374Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:51:41.902493Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6194 TClient is connected to server localhost:6194 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:51:42.781510Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:42.810999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:51:45.664506Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623831071002143:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:45.664654Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:46.121726Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 2025-05-07T08:51:46.174895Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623813891132389:2196];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:46.174972Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:51:46.249271Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 2025-05-07T08:51:46.293040Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:51:46.340496Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:51:46.434875Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623835365969760:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:46.434987Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:46.435473Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623835365969765:2370], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:46.440295Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710664:3, at schemeshard: 72057594046644480 2025-05-07T08:51:46.450840Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623835365969767:2371], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710664 completed, doublechecking } 2025-05-07T08:51:46.560440Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623835365969818:2569] txid# 281474976710665, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 11], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 31468, MsgBus: 8359 2025-05-07T08:51:47.714878Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501623841426018346:2065];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:47.714914Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004677/r3tmp/tmpqyDYdU/pdisk_1.dat 2025-05-07T08:51:48.125170Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:51:48.203064Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:48.203150Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:48.211417Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 31468, node 2 2025-05-07T08:51:48.433329Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:51:48.433350Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:51:48.433357Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:51:48.433471Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8359 TClient is connected to server localhost:8359 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:51:49.367762Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:49.396584Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:49.597122Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:49.986672Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:50.084134Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:52.722450Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7501623841426018346:2065];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:52.722526Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect p ... 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7501623875785759082:2480], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:51:55.426603Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501623875785759133:3431] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:51:56.811389Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:1, at schemeshard: 72057594046644480 2025-05-07T08:51:58.353150Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710679:0, at schemeshard: 72057594046644480 2025-05-07T08:51:59.597773Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710686:1, at schemeshard: 72057594046644480 2025-05-07T08:52:01.004550Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710693:0, at schemeshard: 72057594046644480 2025-05-07T08:52:01.712981Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710698:0, at schemeshard: 72057594046644480 2025-05-07T08:52:02.499414Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710704:0, at schemeshard: 72057594046644480 2025-05-07T08:52:03.090881Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7261: Cannot get console configs 2025-05-07T08:52:03.090909Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:52:03.136148Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480 2025-05-07T08:52:03.237887Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480 2025-05-07T08:52:06.901770Z node 2 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jtmz3hqb8htfge7kq8ccgt9x", SessionId: ydb://session/3?node_id=2&id=ZWVmNjk2ZWMtOGM3MzNlNDktZjM1MzU2ODEtNTVmYjVhMDE=, Slow query, duration: 10.120200s, status: SUCCESS, user: UNAUTHENTICATED, results: 0b, text: "CREATE OBJECT myTokenSecretId (TYPE SECRET) WITH value = `token`;", parameters: 0b 2025-05-07T08:52:07.174733Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710734:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 2707, MsgBus: 15228 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004677/r3tmp/tmpiQf3er/pdisk_1.dat 2025-05-07T08:52:10.048009Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:52:10.155396Z node 3 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:52:10.192087Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:52:10.192200Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:52:10.199360Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2707, node 3 2025-05-07T08:52:10.417595Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:52:10.417620Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:52:10.417628Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:52:10.417764Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15228 TClient is connected to server localhost:15228 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-05-07T08:52:11.846342Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-05-07T08:52:11.863760Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:12.012386Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:12.353331Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:12.468231Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:16.440692Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7501623966064127081:2410], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:16.440793Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:16.537204Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:52:16.628674Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:52:16.715042Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:52:16.790426Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:52:16.895105Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:52:16.970216Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:52:17.029632Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:52:17.209510Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7501623970359095043:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:17.209614Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:17.210033Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7501623970359095048:2476], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:17.214787Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:52:17.229007Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7501623970359095050:2477], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:52:17.336104Z node 3 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [3:7501623970359095111:3429] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpUniqueIndex::UpdateImplicitNullInComplexFk2 [GOOD] Test command err: Trying to start YDB, gRPC: 28896, MsgBus: 24439 2025-05-07T08:51:52.387341Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623864219163051:2203];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:52.387529Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003c89/r3tmp/tmponc9QV/pdisk_1.dat 2025-05-07T08:51:53.203898Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:53.203981Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:53.206756Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:51:53.214376Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28896, node 1 2025-05-07T08:51:53.442599Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:51:53.442623Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:51:53.442630Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:51:53.442752Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24439 TClient is connected to server localhost:24439 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:51:54.793066Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:54.860299Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:55.084568Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:55.459507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:55.564046Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:57.370268Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623864219163051:2203];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:57.370329Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:51:59.594036Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623894283935669:2412], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:59.594167Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:00.720194Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:52:00.767770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:52:00.838880Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:52:00.887243Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:52:00.940902Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:52:00.987547Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:52:01.074435Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:52:01.169942Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623902873870956:2479], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:01.170082Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:01.170525Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623902873870961:2482], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:01.175470Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:52:01.188416Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623902873870963:2483], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:52:01.243897Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623902873871014:3429] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:52:02.685220Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:05.894938Z node 1 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jtmz3sp28638ntthaznqq5x2, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTgyNGNlY2UtNWIzODUzOTctZDY5ZWU4OWYtMWEyNDJhNDI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-05-07T08:52:05.907609Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=1&id=NTgyNGNlY2UtNWIzODUzOTctZDY5ZWU4OWYtMWEyNDJhNDI=, ActorId: [1:7501623911463806643:2584], ActorState: ExecuteState, TraceId: 01jtmz3sp28638ntthaznqq5x2, Create QueryResponse for error on request, msg: 2025-05-07T08:52:06.927153Z node 1 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jtmz3tn183cxrq0490n8hyeq, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTgyNGNlY2UtNWIzODUzOTctZDY5ZWU4OWYtMWEyNDJhNDI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-05-07T08:52:06.927503Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=1&id=NTgyNGNlY2UtNWIzODUzOTctZDY5ZWU4OWYtMWEyNDJhNDI=, ActorId: [1:7501623911463806643:2584], ActorState: ExecuteState, TraceId: 01jtmz3tn183cxrq0490n8hyeq, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 18094, MsgBus: 29080 2025-05-07T08:52:08.039092Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501623933464102159:2201];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:08.039165Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003c89/r3tmp/tmp4MSPEH/pdisk_1.dat 2025-05-07T08:52:08.307388Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:52:08.326502Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:52:08.326634Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:52:08.328748Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18094, node 2 2025-05-07T08:52:08.422673Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:52:08.422701Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:52:08.422712Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:52:08.422864Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29080 TClient is connected to server localhost:29080 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:52:09.004029Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:09.030867Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:52:09.045690Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:09.202262Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:09.492226Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:09.627087Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:12.355417Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501623950643972830:2407], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:12.355535Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:12.436819Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:52:12.484989Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:52:12.534337Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:52:12.599013Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:52:12.655761Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:52:12.749841Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:52:12.865085Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:52:13.008319Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501623954938940783:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:13.008384Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:13.008721Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501623954938940788:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:13.012950Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:52:13.039383Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-05-07T08:52:13.039712Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7501623954938940790:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:52:13.042405Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7501623933464102159:2201];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:13.042469Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:52:13.105567Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501623954938940844:3411] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:52:14.772237Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:18.826608Z node 2 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jtmz45pe99baktertan7yym8, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Mjc5NmNjZWQtNTJhZGI0ZjctMTQ5OTc5MDUtNzJkZWNjNWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-05-07T08:52:18.826901Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=2&id=Mjc5NmNjZWQtNTJhZGI0ZjctMTQ5OTc5MDUtNzJkZWNjNWU=, ActorId: [2:7501623963528876520:2576], ActorState: ExecuteState, TraceId: 01jtmz45pe99baktertan7yym8, Create QueryResponse for error on request, msg: 2025-05-07T08:52:20.519396Z node 2 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jtmz47gv71hdfavk7ge3rws4, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Mjc5NmNjZWQtNTJhZGI0ZjctMTQ5OTc5MDUtNzJkZWNjNWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-05-07T08:52:20.519778Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=2&id=Mjc5NmNjZWQtNTJhZGI0ZjctMTQ5OTc5MDUtNzJkZWNjNWU=, ActorId: [2:7501623963528876520:2576], ActorState: ExecuteState, TraceId: 01jtmz47gv71hdfavk7ge3rws4, Create QueryResponse for error on request, msg: >> KqpPg::ExplainColumnsReorder [GOOD] >> KqpPg::InsertFromSelect_Serial+useSink [GOOD] >> KqpPg::InsertFromSelect_Serial-useSink >> TPDiskTest::AllRequestsAreAnsweredOnPDiskRestart [GOOD] >> TPDiskTest::ChunkWriteDifferentOffsetAndSize >> DataShardSnapshots::VolatileSnapshotRefreshDiscard [GOOD] >> DataShardSnapshots::VolatileSnapshotTimeout >> KqpIndexes::SecondaryIndexUpsert1DeleteUpdate >> KqpVectorIndexes::OrderByCosineSimilarityNotNullableLevel1 >> TPDiskTest::ChunkWriteDifferentOffsetAndSize [GOOD] >> TPDiskTest::PlainChunksWriteReadALot |89.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/runtime/ydb-core-kqp-ut-runtime |89.6%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/runtime/ydb-core-kqp-ut-runtime |89.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/runtime/ydb-core-kqp-ut-runtime ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/pg/unittest >> KqpPg::ExplainColumnsReorder [GOOD] Test command err: Trying to start YDB, gRPC: 26869, MsgBus: 18057 2025-05-07T08:50:53.403724Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623610716551836:2066];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:53.410769Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00207f/r3tmp/tmpGAG3Xc/pdisk_1.dat 2025-05-07T08:50:54.031667Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26869, node 1 2025-05-07T08:50:54.110153Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:54.110187Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:54.110197Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:54.110304Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:50:54.127247Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:54.127358Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:54.129447Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:18057 TClient is connected to server localhost:18057 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:50:54.676824Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:57.029718Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623627896421684:2331], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:57.029843Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:57.030297Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623627896421696:2334], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:57.035373Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480 2025-05-07T08:50:57.066070Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623627896421698:2335], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-05-07T08:50:57.143239Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623627896421749:2336] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 8126, MsgBus: 1925 2025-05-07T08:50:57.960574Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501623626204729526:2063];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:57.960633Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00207f/r3tmp/tmpUq1Wtd/pdisk_1.dat 2025-05-07T08:50:58.111362Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:58.124830Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:58.124910Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:58.127281Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8126, node 2 2025-05-07T08:50:58.270550Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:58.270581Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:58.270588Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:58.270724Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1925 TClient is connected to server localhost:1925 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:50:58.811461Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:58.818795Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T08:51:02.059876Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501623647679566668:2335], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:02.059904Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501623647679566660:2332], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:02.059957Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:02.063301Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T08:51:02.072974Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7501623647679566674:2336], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T08:51:02.129162Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501623647679566725:2332] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 21325, MsgBus: 22067 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00207f/r3tmp/tmpP59GDL/pdisk_1.dat 2025-05-07T08:51:03.182554Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:51:03.316329Z node 3 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:51:03.347018Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:03.347116Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:03.352564Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21325, node 3 2025-05-07T08:51:03.510528Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:51:03.510548Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:51:03.510555Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:51:03.510708Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22067 TClient is connected to server localhost:22067 W ... hanges)" severity: 1 } Trying to start YDB, gRPC: 18915, MsgBus: 10423 2025-05-07T08:51:56.767387Z node 11 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[11:7501623880433799198:2211];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00207f/r3tmp/tmpoJPCHb/pdisk_1.dat 2025-05-07T08:51:56.831363Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:51:57.049240Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:57.058233Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:57.108272Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18915, node 11 2025-05-07T08:51:57.238262Z node 11 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:51:57.250688Z node 11 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T08:51:57.266310Z node 11 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T08:51:57.390763Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:51:57.390795Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:51:57.390809Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:51:57.391008Z node 11 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10423 TClient is connected to server localhost:10423 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:51:59.376811Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:01.652015Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[11:7501623880433799198:2211];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:01.652126Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:52:08.663575Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7501623931973407281:2345], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:08.663726Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:08.689532Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T08:52:08.806804Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 2025-05-07T08:52:08.916336Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7501623931973407460:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:08.916538Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:08.920056Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7501623931973407465:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:08.929697Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710660:3, at schemeshard: 72057594046644480 2025-05-07T08:52:08.972639Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [11:7501623931973407467:2369], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2025-05-07T08:52:09.058759Z node 11 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [11:7501623936268374814:2465] txid# 281474976710661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:52:21.849082Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [12:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:52:21.849473Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:52:21.849740Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00207f/r3tmp/tmpojLkn2/pdisk_1.dat 2025-05-07T08:52:22.461206Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:52:22.573544Z node 12 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:52:22.652396Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:52:22.652634Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:52:22.667645Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:52:22.803933Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:642:2550], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:22.804120Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:652:2555], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:22.804290Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:22.821314Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715657:3, at schemeshard: 72057594046644480 2025-05-07T08:52:22.988089Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [12:656:2558], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715657 completed, doublechecking } 2025-05-07T08:52:23.058558Z node 12 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [12:726:2597] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } PreparedQuery: "d014a195-a1433fc-e3629b42-3d688156" QueryAst: "(\n(let $1 (PgType \'int4))\n(let $2 \'(\'(\'\"_logical_id\" \'218) \'(\'\"_id\" \'\"d7e642de-5c460328-e9432836-f716f6b1\") \'(\'\"_partition_mode\" \'\"single\")))\n(let $3 (DqPhyStage \'() (lambda \'() (Iterator (AsList (AsStruct \'(\'\"x\" (PgConst \'1 $1)) \'(\'\"y\" (PgConst \'2 $1)))))) $2))\n(let $4 (DqCnResult (TDqOutput $3 \'\"0\") \'(\'\"y\" \'\"x\")))\n(return (KqpPhysicalQuery \'((KqpPhysicalTx \'($3) \'($4) \'() \'(\'(\'\"type\" \'\"generic\")))) \'((KqpTxResultBinding (ListType (StructType \'(\'\"x\" $1) \'(\'\"y\" $1))) \'\"0\" \'\"0\")) \'(\'(\'\"type\" \'\"query\"))))\n)\n" QueryPlan: "{\"Plan\":{\"Plans\":[{\"PlanNodeId\":2,\"Plans\":[{\"PlanNodeId\":1,\"Operators\":[{\"Inputs\":[],\"Iterator\":\"[{x: \\\"1\\\",y: \\\"2\\\"}]\",\"Name\":\"Iterator\"}],\"Node Type\":\"ConstantExpr\"}],\"Node Type\":\"ResultSet\",\"PlanNodeType\":\"ResultSet\"}],\"Node Type\":\"Query\",\"Stats\":{\"ResourcePoolId\":\"default\"},\"PlanNodeType\":\"Query\"},\"meta\":{\"version\":\"0.2\",\"type\":\"query\"},\"tables\":[],\"SimplifiedPlan\":{\"PlanNodeId\":0,\"Plans\":[{\"PlanNodeId\":1,\"Node Type\":\"ResultSet\",\"PlanNodeType\":\"ResultSet\"}],\"Node Type\":\"Query\",\"OptimizerStats\":{\"EquiJoinsCount\":0,\"JoinsCount\":0},\"PlanNodeType\":\"Query\"}}" YdbResults { columns { name: "y" type { pg_type { oid: 23 } } } columns { name: "x" type { pg_type { oid: 23 } } } } QueryDiagnostics: "" >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-8 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-9 >> KqpMultishardIndex::SortedRangeReadDesc [GOOD] >> KqpMultishardIndex::SortByPk >> KqpIndexes::SelectFromIndexesAndFreeSpaceLogicDoesntTimeout [GOOD] >> KqpIndexes::Uint8Index >> KikimrIcGateway::TestLoadMdbBasicSecretValueFromExternalDataSourceMetadata [GOOD] >> KqpVectorIndexes::OrderByCosineDistanceNullableLevel1 >> Viewer::SimpleFeatureFlags [GOOD] >> BasicUsage::WriteAndReadSomeMessagesWithNoCompression [GOOD] >> BasicUsage::TWriteSession_WriteAndReadAndCommitRandomMessages >> KqpPg::JoinWithQueryService-StreamLookup [GOOD] >> KqpPg::PgAggregate+useSink >> KqpIndexes::IndexOr [GOOD] >> KqpIndexes::IndexFilterPushDown ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/provider/ut/unittest >> KikimrIcGateway::TestLoadMdbBasicSecretValueFromExternalDataSourceMetadata [GOOD] Test command err: Trying to start YDB, gRPC: 32016, MsgBus: 18824 2025-05-07T08:51:37.415989Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623800295954937:2126];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:37.416035Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004680/r3tmp/tmpuvB80y/pdisk_1.dat 2025-05-07T08:51:37.927019Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:51:37.931100Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:37.931186Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:37.943635Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 32016, node 1 2025-05-07T08:51:38.021642Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:51:38.021673Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:51:38.021685Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:51:38.021801Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18824 TClient is connected to server localhost:18824 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:51:38.694040Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:38.716905Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:51:38.748721Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:38.916402Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:39.111672Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:39.181168Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:41.300900Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623817475825704:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:41.301052Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:41.761306Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:51:41.846273Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:51:41.883647Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:51:41.923648Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:51:41.977464Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:51:42.026559Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:51:42.106261Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:51:42.214144Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623821770793667:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:42.214275Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:42.218167Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623821770793672:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:42.230011Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:51:42.250241Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623821770793674:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:51:42.352669Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623821770793727:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:51:42.418443Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623800295954937:2126];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:42.418515Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:51:43.639598Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-05-07T08:51:43.658409Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalTable, opId: 281474976710673:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 12449, MsgBus: 30203 2025-05-07T08:51:44.599069Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501623830388196781:2193];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:44.599111Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004680/r3tmp/tmpozc4CS/pdisk_1.dat 2025-05-07T08:51:44.917390Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:51:44.939426Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:44.939530Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:44.945584Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12449, node 2 2025-05-07T08:51:45.126697Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:51:45.126719Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:51:45.126728Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:51:45.126850Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30203 TClient is connected to server localhost:30203 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" P ... 8:51:59.902341Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:52:02.531221Z node 2 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jtmz3d3mfe0zegbqakpy4gez", SessionId: ydb://session/3?node_id=2&id=ZjM4NzI1MWMtZDQzMjhlYjYtNzEzZTIzZDctZTViZmI2YTU=, Slow query, duration: 10.476565s, status: SUCCESS, user: UNAUTHENTICATED, results: 0b, text: "CREATE OBJECT mySaSecretId (TYPE SECRET) WITH value = `mySaSecretValue`;", parameters: 0b 2025-05-07T08:52:02.722230Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710718:0, at schemeshard: 72057594046644480 2025-05-07T08:52:02.737703Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalTable, opId: 281474976710719:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 20947, MsgBus: 21002 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004680/r3tmp/tmp2LNsye/pdisk_1.dat 2025-05-07T08:52:06.933558Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:52:07.008832Z node 3 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:52:07.048014Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:52:07.048121Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:52:07.053266Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20947, node 3 2025-05-07T08:52:07.206687Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:52:07.206714Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:52:07.206723Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:52:07.206875Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21002 TClient is connected to server localhost:21002 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:52:08.054451Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:08.087667Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:08.237589Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:08.676923Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:08.825385Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:13.311706Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7501623952311340123:2410], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:13.311818Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:13.428813Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-05-07T08:52:13.531470Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-05-07T08:52:13.639376Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-05-07T08:52:13.701382Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-05-07T08:52:13.784865Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-05-07T08:52:13.944938Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-05-07T08:52:14.045821Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-05-07T08:52:14.198766Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7501623956606308105:2474], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:14.198852Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:14.199224Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7501623956606308110:2477], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:14.204336Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-05-07T08:52:14.226359Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7501623956606308112:2478], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-05-07T08:52:14.326198Z node 3 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [3:7501623956606308165:3428] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:52:16.361206Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:1, at schemeshard: 72057594046644480 2025-05-07T08:52:17.165323Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480 2025-05-07T08:52:18.219295Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715680:1, at schemeshard: 72057594046644480 2025-05-07T08:52:19.112960Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715685:0, at schemeshard: 72057594046644480 2025-05-07T08:52:20.045920Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715690:0, at schemeshard: 72057594046644480 2025-05-07T08:52:20.952168Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715693:0, at schemeshard: 72057594046644480 2025-05-07T08:52:21.879096Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480 2025-05-07T08:52:21.996418Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480 2025-05-07T08:52:22.002517Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7261: Cannot get console configs 2025-05-07T08:52:22.002542Z node 3 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:52:27.157936Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715727:0, at schemeshard: 72057594046644480 >> KqpPg::DeleteWithQueryService+useSink [GOOD] >> KqpPg::DeleteWithQueryService-useSink |89.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/ydb_convert/ut/ydb-core-ydb_convert-ut |89.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/ydb_convert/ut/ydb-core-ydb_convert-ut |89.7%| [LD] {RESULT} $(B)/ydb/core/ydb_convert/ut/ydb-core-ydb_convert-ut >> KqpIndexes::SelectFromAsyncIndexedTable [GOOD] >> KqpIndexes::SecondaryIndexOrderBy2 [GOOD] >> KqpIndexes::SecondaryIndexInsert1 >> KqpUniqueIndex::UpsertImplicitNullInComplexFk [GOOD] >> KqpPg::TableArrayInsert-useSink [GOOD] >> KqpPg::Returning+useSink >> TYardTest::TestLogWriteCutEqualRandomWait [GOOD] >> TYardTest::TestLogWriteCutUnequal >> KqpPg::AlterColumnSetDefaultFromSequenceWithSchemaname [FAIL] >> KqpPg::CheckPgAutoParams+useSink >> KqpUniqueIndex::InsertFkAlreadyExist >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-9 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-49 >> PersQueueSdkReadSessionTest::SpecifyClustersExplicitly [GOOD] >> PersQueueSdkReadSessionTest::StopResumeReadingData ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpUniqueIndex::UpsertImplicitNullInComplexFk [GOOD] Test command err: Trying to start YDB, gRPC: 29074, MsgBus: 29159 2025-05-07T08:52:04.646805Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623913993348350:2199];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:04.647360Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003c45/r3tmp/tmpCo5Yl2/pdisk_1.dat 2025-05-07T08:52:05.252942Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:52:05.266668Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:52:05.266977Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:52:05.269892Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29074, node 1 2025-05-07T08:52:05.375051Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:52:05.375092Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:52:05.375104Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:52:05.375249Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29159 TClient is connected to server localhost:29159 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:52:06.266455Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:06.283807Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:52:06.293546Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:06.520930Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:06.782815Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:06.895991Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:09.646363Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623913993348350:2199];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:09.646438Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:52:09.786566Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623935468186343:2409], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:09.786707Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:10.408046Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:52:10.455457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:52:10.498696Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:52:10.545142Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:52:10.626817Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:52:10.705868Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:52:10.777226Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:52:10.851305Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623939763154304:2475], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:10.851365Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:10.851537Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623939763154309:2478], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:10.856298Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:52:10.874016Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623939763154311:2479], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:52:10.956684Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623939763154362:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:52:12.081564Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 waiting... Trying to start YDB, gRPC: 5084, MsgBus: 23749 2025-05-07T08:52:16.654849Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501623964980045661:2192];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:16.654912Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003c45/r3tmp/tmptPrHFL/pdisk_1.dat 2025-05-07T08:52:16.877007Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:52:16.896908Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:52:16.897006Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:52:16.900228Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5084, node 2 2025-05-07T08:52:17.022887Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:52:17.022922Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:52:17.022939Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:52:17.023108Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23749 TClient is connected to server localhost:23749 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:52:17.534113Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:17.551651Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:17.691373Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:17.942247Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:18.032074Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:21.227412Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501623986454883649:2407], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:21.227493Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:21.353066Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-05-07T08:52:21.426575Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-05-07T08:52:21.494876Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-05-07T08:52:21.592118Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-05-07T08:52:21.645539Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7501623964980045661:2192];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:21.645760Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:52:21.668450Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-05-07T08:52:21.787327Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-05-07T08:52:21.873237Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-05-07T08:52:22.040394Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501623990749851612:2471], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:22.040493Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:22.040947Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501623990749851617:2474], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:22.045397Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-05-07T08:52:22.077483Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7501623990749851619:2475], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-05-07T08:52:22.143195Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501623990749851670:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:52:23.737268Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:31.784301Z node 2 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jtmz4jq168rnsw3t6qgan82z, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzI0MTUwN2YtZTkzNzRlMGUtM2MxNmY3NGEtODA5ODFhZmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-05-07T08:52:31.797905Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=2&id=MzI0MTUwN2YtZTkzNzRlMGUtM2MxNmY3NGEtODA5ODFhZmM=, ActorId: [2:7501623999339787317:2576], ActorState: ExecuteState, TraceId: 01jtmz4jq168rnsw3t6qgan82z, Create QueryResponse for error on request, msg: 2025-05-07T08:52:31.854695Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7261: Cannot get console configs 2025-05-07T08:52:31.854725Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::SelectFromAsyncIndexedTable [GOOD] Test command err: Trying to start YDB, gRPC: 2888, MsgBus: 26995 2025-05-07T08:51:55.459844Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623876198107303:2196];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:55.460443Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003c74/r3tmp/tmpWfeIhV/pdisk_1.dat 2025-05-07T08:51:56.220575Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:51:56.255372Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:56.255462Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:56.263321Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2888, node 1 2025-05-07T08:51:56.631163Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:51:56.631184Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:51:56.631191Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:51:56.631293Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26995 TClient is connected to server localhost:26995 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:51:58.260564Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:58.313458Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:58.600151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:51:58.912916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 2025-05-07T08:51:59.067558Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:00.418224Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623876198107303:2196];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:00.418285Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:52:02.238703Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623906262879904:2411], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:02.238793Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:03.424730Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:52:03.478824Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:52:03.586865Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:52:03.649081Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:52:03.754615Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:52:03.964466Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:52:04.065255Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:52:04.194993Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623914852815178:2480], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:04.195108Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:04.196927Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623914852815184:2483], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:04.204425Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:52:04.218987Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623914852815186:2484], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:52:04.281814Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623914852815237:3433] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:52:06.490402Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:7501623923442750128:3617], Recipient [1:7501623880493074896:2196]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:52:06.490466Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:52:06.490480Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T08:52:06.490549Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122432, Sender [1:7501623923442750124:3614], Recipient [1:7501623880493074896:2196]: {TEvModifySchemeTransaction txid# 281474976710672 TabletId# 72057594046644480} 2025-05-07T08:52:06.490571Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4851: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-05-07T08:52:06.599226Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateIndexedTable CreateIndexedTable { TableDescription { Name: "TestTable" Columns { Name: "Key" Type: "String" NotNull: false } Columns { Name: "Index2" Type: "String" NotNull: false } Columns { Name: "Value" Type: "String" NotNull: false } KeyColumnNames: "Key" PartitionConfig { ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } Temporary: false } IndexDescription { Name: "Index" KeyColumnNames: "Index2" Type: EIndexTypeGlobal IndexImplTableDescriptions { PartitionConfig { } } } } } TxId: 281474976710672 TabletId: 72057594046644480 PeerName: "ipv6:[::1]:47712" , at schemeshard: 72057594046644480 2025-05-07T08:52:06.599724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_indexed_table.cpp:101: TCreateTableIndex construct operation table path: /Root/TestTable domain path id: [OwnerId: 72057594046644480, LocalPathId: 1] domain path: /Root shardsToCreate: 2 GetShardsInside: 34 MaxShards: 200000 2025-05-07T08:52:06.600148Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /Root/TestTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-05-07T08:52:06.600279Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:433: TCreateTable Propose, path: /Root/TestTable, opId: 281474976710672:0, schema: Name: "TestTable" Columns { Name: "Key" Type: "String" NotNull: false } Columns { Name: "Index2" Type: "String" NotNull: false } Columns { Name: "Value" Type: "String" NotNull: false } KeyColumnNames: "Key" PartitionConfig { ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } ... :22.571102Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:52:22.571205Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:52:22.579479Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10934, node 3 2025-05-07T08:52:22.743698Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:52:22.743727Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:52:22.743739Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:52:22.743927Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3095 TClient is connected to server localhost:3095 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:52:23.594184Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:23.600880Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:52:23.612216Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:23.763706Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:24.070313Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:24.212949Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:27.150321Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7501623993366334766:2204];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:27.162338Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:52:28.057800Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7501624019136140072:2409], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:28.057910Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:28.238602Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:52:28.327306Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:52:28.419835Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:52:28.473434Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:52:28.540764Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:52:28.620586Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:52:28.709456Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:52:28.827841Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7501624019136140740:2474], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:28.827925Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:28.828144Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7501624019136140745:2477], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:28.832392Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:52:28.852410Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7501624019136140747:2478], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:52:28.908192Z node 3 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [3:7501624019136140798:3429] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:52:30.892067Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-05-07T08:52:31.329931Z node 3 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:2019: ActorId: [3:7501624032021043178:2521] TxId: 281474976710673. Ctx: { TraceId: 01jtmz4ket70n7a793zwy04g1h, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NWUxMWM0ZWMtNGZjMGVlZC0yMzc2N2ZiOC05NjI3NDc0NA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Read operation can be performed on async index table: [72057594046644480:19:1] only with StaleRO isolation level 2025-05-07T08:52:31.330207Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=3&id=NWUxMWM0ZWMtNGZjMGVlZC0yMzc2N2ZiOC05NjI3NDc0NA==, ActorId: [3:7501624027726075687:2521], ActorState: ExecuteState, TraceId: 01jtmz4ket70n7a793zwy04g1h, Create QueryResponse for error on request, msg: 2025-05-07T08:52:31.349137Z node 3 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:2019: ActorId: [3:7501624032021043191:2521] TxId: 281474976710675. Ctx: { TraceId: 01jtmz4kfe8jmrv88jmj83zh4v, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NWUxMWM0ZWMtNGZjMGVlZC0yMzc2N2ZiOC05NjI3NDc0NA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Read operation can be performed on async index table: [72057594046644480:19:1] only with StaleRO isolation level 2025-05-07T08:52:31.349362Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=3&id=NWUxMWM0ZWMtNGZjMGVlZC0yMzc2N2ZiOC05NjI3NDc0NA==, ActorId: [3:7501624027726075687:2521], ActorState: ExecuteState, TraceId: 01jtmz4kfe8jmrv88jmj83zh4v, Create QueryResponse for error on request, msg: 2025-05-07T08:52:31.365893Z node 3 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:2019: ActorId: [3:7501624032021043200:2521] TxId: 281474976710677. Ctx: { TraceId: 01jtmz4kg1dn5rj7g3s7dtdv8w, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NWUxMWM0ZWMtNGZjMGVlZC0yMzc2N2ZiOC05NjI3NDc0NA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Read operation can be performed on async index table: [72057594046644480:19:1] only with StaleRO isolation level 2025-05-07T08:52:31.366176Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=3&id=NWUxMWM0ZWMtNGZjMGVlZC0yMzc2N2ZiOC05NjI3NDc0NA==, ActorId: [3:7501624027726075687:2521], ActorState: ExecuteState, TraceId: 01jtmz4kg1dn5rj7g3s7dtdv8w, Create QueryResponse for error on request, msg: 2025-05-07T08:52:31.384504Z node 3 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:2019: ActorId: [3:7501624032021043209:2521] TxId: 281474976710679. Ctx: { TraceId: 01jtmz4kgp2beybf2psrc63p9j, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NWUxMWM0ZWMtNGZjMGVlZC0yMzc2N2ZiOC05NjI3NDc0NA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Read operation can be performed on async index table: [72057594046644480:19:1] only with StaleRO isolation level 2025-05-07T08:52:31.384720Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=3&id=NWUxMWM0ZWMtNGZjMGVlZC0yMzc2N2ZiOC05NjI3NDc0NA==, ActorId: [3:7501624027726075687:2521], ActorState: ExecuteState, TraceId: 01jtmz4kgp2beybf2psrc63p9j, Create QueryResponse for error on request, msg: >> KqpPg::InsertFromSelect_Serial-useSink [GOOD] >> KqpPg::InsertNoTargetColumns_ColumnOrder+useSink >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_PreferedPartition_OtherPartition_Test [GOOD] >> DataShardSnapshots::LockedWriteDistributedCommitCrossConflict+UseSink [GOOD] >> DataShardSnapshots::LockedWriteCleanupOnSplit+UseSink |89.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/actorlib_impl/ut/ydb-core-actorlib_impl-ut |89.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/actorlib_impl/ut/ydb-core-actorlib_impl-ut |89.7%| [LD] {RESULT} $(B)/ydb/core/actorlib_impl/ut/ydb-core-actorlib_impl-ut >> KqpIndexes::SecondaryIndexUpsert1DeleteUpdate [GOOD] >> KqpIndexes::SecondaryIndexUpsert2Update ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_PreferedPartition_OtherPartition_Test [GOOD] Test command err: 2025-05-07T08:50:59.209390Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623634433355955:2278];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:59.209589Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:50:59.318053Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501623635317130532:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:59.318170Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:50:59.580319Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003e92/r3tmp/tmp6IZKPf/pdisk_1.dat 2025-05-07T08:50:59.627969Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-05-07T08:51:00.249405Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:51:00.372046Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:51:00.376103Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:51:00.402293Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:00.402417Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:00.403465Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:00.403513Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:00.423648Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-05-07T08:51:00.423772Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:51:00.434421Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27894, node 1 2025-05-07T08:51:00.699404Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/zvgn/003e92/r3tmp/yandexoTcsIq.tmp 2025-05-07T08:51:00.699443Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/zvgn/003e92/r3tmp/yandexoTcsIq.tmp 2025-05-07T08:51:00.699603Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/zvgn/003e92/r3tmp/yandexoTcsIq.tmp 2025-05-07T08:51:00.699750Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:51:00.766183Z INFO: TTestServer started on Port 13249 GrpcPort 27894 TClient is connected to server localhost:13249 PQClient connected to localhost:27894 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:51:01.238376Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:51:01.366423Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... 2025-05-07T08:51:04.182401Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623655908193334:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:04.182990Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:04.183789Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623655908193347:2343], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:04.195298Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480 2025-05-07T08:51:04.203628Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623655908193386:2347], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:04.203696Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:04.213132Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623634433355955:2278];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:04.213193Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:51:04.294069Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7501623635317130532:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:04.294128Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:51:04.311826Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623655908193349:2344], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-05-07T08:51:04.634153Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623655908193432:2770] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:51:04.687933Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7501623656791967416:2319], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T08:51:04.689733Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=2&id=YTNjOGEwNzktYzcyZmZmZTQtMzExZWZhNDYtMTMwYjg2MjI=, ActorId: [2:7501623656791967374:2311], ActorState: ExecuteState, TraceId: 01jtmz1yccf863g1as0e8k7sce, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T08:51:04.695948Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T08:51:04.721446Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7501623655908193472:2353], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T08:51:04.722609Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=1&id=M2I0MzhjZWItZGM4YTcwM2UtYzhmYWFjOGMtMmIxMmZkNDE=, ActorId: [1:7501623655908193332:2338], ActorState: ExecuteState, TraceId: 01jtmz1yax0kf1qwjd5mnbv7br, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T08:51:04.723003Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T08:51:04.726946Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOp ... rectness of table path and user permissions., code: 2003 2025-05-07T08:52:16.846212Z node 10 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=10&id=ZGZiMjQzNzAtZTc0ODg2NWQtOWY3ZDBjZjYtNmQ5ODI0ZmM=, ActorId: [10:7501623964209753125:2317], ActorState: ExecuteState, TraceId: 01jtmz44zwb6gxh4awstw8rs93, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T08:52:16.846760Z node 10 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T08:52:16.865115Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-05-07T08:52:17.213048Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-05-07T08:52:17.855921Z node 9 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715667. Ctx: { TraceId: 01jtmz45ya09v7y0jpg8cchmw9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=9&id=YzZhMjQyNDMtM2RhYzEwMGEtZThmYzAwM2MtNWRlZWFhZDI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root === CheckClustersList. Subcribe to ClusterTracker from [9:7501623969431059846:3114] 2025-05-07T08:52:22.570470Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7261: Cannot get console configs 2025-05-07T08:52:22.570516Z node 9 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded === CheckClustersList. Ok 2025-05-07T08:52:23.512281Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715678:1, at schemeshard: 72057594046644480 2025-05-07T08:52:24.691837Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715685:0, at schemeshard: 72057594046644480 2025-05-07T08:52:25.791306Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715691:0, at schemeshard: 72057594046644480 2025-05-07T08:52:27.309712Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715697:0, at schemeshard: 72057594046644480 2025-05-07T08:52:29.309786Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715702:0, at schemeshard: 72057594046644480 2025-05-07T08:52:30.236420Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715706:0, at schemeshard: 72057594046644480 Run query: --!syntax_v1 UPSERT INTO `//Root/.metadata/TopicPartitionsMapping` (Hash, Topic, ProducerId, CreateTime, AccessTime, Partition, SeqNo) VALUES (16261273835729377752, "Root", "00415F536F757263655F3130", 1746607951522, 1746607951522, 0, 13); 2025-05-07T08:52:31.687009Z node 9 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715712. Ctx: { TraceId: 01jtmz4kpf7yxrvpwstjcshfqx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=9&id=MmIyOWY0MWYtMmQ2MWZkMmMtZjhhMWNlYjUtZDhiYTliYzc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:52:31.731566Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint64; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `//Root/.metadata/TopicPartitionsMapping` WHERE Hash == $Hash AND Topic == $Topic AND ProducerId == $SourceId; 2025-05-07T08:52:31.731599Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; DECLARE $SeqNo AS Uint64; UPSERT INTO `//Root/.metadata/TopicPartitionsMapping` (Hash, Topic, ProducerId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-05-07T08:52:31.731609Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `//Root/.metadata/TopicPartitionsMapping` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND ProducerId = $SourceId AND Partition = $Partition; 2025-05-07T08:52:31.731631Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__sm_chooser_actor.h:116: TPartitionChooser [9:7501624029560603482:3896] (SourceId=A_Source_10, PreferedPartition=1) GetOwnershipFast Partition=1 TabletId=1001 2025-05-07T08:52:31.731754Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 269877760, Sender [9:7501624029560603483:3896], Recipient [9:7501623995200863906:3231]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 1001 Status: OK ServerId: [9:7501624029560603482:3896] Leader: 1 Dead: 0 Generation: 1 VersionInfo: } 2025-05-07T08:52:31.731855Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 271188557, Sender [9:7501624029560603482:3896], Recipient [9:7501623995200863906:3231]: NKikimrPQ.TEvCheckPartitionStatusRequest Partition: 1 SourceId: "A_Source_10" 2025-05-07T08:52:31.731923Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__sm_chooser_actor.h:139: StateOwnershipFast, received event# 271188558, Sender [9:7501623995200863906:3231], Recipient [9:7501624029560603482:3896]: NKikimrPQ.TEvCheckPartitionStatusResponse Status: Active 2025-05-07T08:52:31.731960Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:88: TPartitionChooser [9:7501624029560603482:3896] (SourceId=A_Source_10, PreferedPartition=1) InitTable: SourceId=A_Source_10 TopicsAreFirstClassCitizen=1 UseSrcIdMetaMappingInFirstClass=1 2025-05-07T08:52:31.732039Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 65543, Sender [9:7501624029560603482:3896], Recipient [9:7501623995200863906:3231]: NActors::TEvents::TEvPoison 2025-05-07T08:52:31.732237Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:101: StateInitTable, received event# 277020685, Sender [9:7501623922186417843:2069], Recipient [9:7501624029560603482:3896]: NKikimr::NMetadata::NProvider::TEvManagerPrepared 2025-05-07T08:52:31.732262Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:111: TPartitionChooser [9:7501624029560603482:3896] (SourceId=A_Source_10, PreferedPartition=1) StartKqpSession 2025-05-07T08:52:31.736219Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:132: StateCreateKqpSession, received event# 271646728, Sender [9:7501623922186417884:2102], Recipient [9:7501624029560603482:3896]: NKikimrKqp.TEvCreateSessionResponse Error: "" Response { SessionId: "ydb://session/3?node_id=9&id=YmEzMzBlMTQtNmIwMDMyN2QtYmQ3ZTY0YS04N2UyZjQ5Nw==" NodeId: 9 } YdbStatus: SUCCESS ResourceExhausted: false 2025-05-07T08:52:31.736265Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:142: TPartitionChooser [9:7501624029560603482:3896] (SourceId=A_Source_10, PreferedPartition=1) Select from the table Received TEvChooseError: MessageGroupId A_Source_10 is already bound to PartitionGroupId 1, but client provided 2. MessageGroupId->PartitionGroupId binding cannot be changed, either use another MessageGroupId, specify PartitionGroupId 1, or do not specify PartitionGroupId at all. Run query: --!syntax_v1 SELECT Partition, SeqNo FROM `//Root/.metadata/TopicPartitionsMapping` WHERE Hash = 16261273835729377752 AND Topic = "Root" AND ProducerId = "00415F536F757263655F3130" 2025-05-07T08:52:31.966700Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:163: StateSelect, received event# 271646721, Sender [9:7501623922186417884:2102], Recipient [9:7501624029560603482:3896]: NKikimrKqp.TEvQueryResponse Response { SessionId: "ydb://session/3?node_id=9&id=YmEzMzBlMTQtNmIwMDMyN2QtYmQ3ZTY0YS04N2UyZjQ5Nw==" PreparedQuery: "7340487f-beebcbe6-84d3d7cd-1a996c9d" QueryParameters { Name: "$Hash" Type { Kind: Data Data { Scheme: 4 } } } QueryParameters { Name: "$Topic" Type { Kind: Data Data { Scheme: 4608 } } } QueryParameters { Name: "$SourceId" Type { Kind: Data Data { Scheme: 4608 } } } TxMeta { id: "01jtmz4m2m0yjjdr6w8fxc1gen" } YdbResults { columns { name: "Partition" type { optional_type { item { type_id: UINT32 } } } } columns { name: "CreateTime" type { optional_type { item { type_id: UINT64 } } } } columns { name: "AccessTime" type { optional_type { item { type_id: UINT64 } } } } columns { name: "SeqNo" type { optional_type { item { type_id: UINT64 } } } } rows { items { uint32_value: 0 } items { uint64_value: 1746607951522 } items { uint64_value: 1746607951522 } items { uint64_value: 13 } } } QueryDiagnostics: "" } YdbStatus: SUCCESS ConsumedRu: 142 2025-05-07T08:52:31.966907Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:151: TPartitionChooser [9:7501624029560603482:3896] (SourceId=A_Source_10, PreferedPartition=1) Selected from table PartitionId=0 SeqNo=13 2025-05-07T08:52:31.966931Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__sm_chooser_actor.h:209: TPartitionChooser [9:7501624029560603482:3896] (SourceId=A_Source_10, PreferedPartition=1) OnPartitionChosen 2025-05-07T08:52:31.966987Z node 9 :PQ_PARTITION_CHOOSER INFO: partition_chooser_impl__abstract_chooser_actor.h:312: TPartitionChooser [9:7501624029560603482:3896] (SourceId=A_Source_10, PreferedPartition=1) ReplyError: MessageGroupId A_Source_10 is already bound to PartitionGroupId 1, but client provided 2. MessageGroupId->PartitionGroupId binding cannot be changed, either use another MessageGroupId, specify PartitionGroupId 1, or do not specify PartitionGroupId at all. 2025-05-07T08:52:32.359520Z node 9 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715715. Ctx: { TraceId: 01jtmz4m499dtgd9hy2f18ynpm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=9&id=YWE4MmE1ZjgtZmNiOGU2OTMtOTA4Y2IyZTctZThmMmFlMmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root |89.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kesus/proxy/ut/ydb-core-kesus-proxy-ut >> Compression::WriteZSTD [GOOD] >> Compression::WriteWithMixedCodecs |89.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kesus/proxy/ut/ydb-core-kesus-proxy-ut |89.7%| [LD] {RESULT} $(B)/ydb/core/kesus/proxy/ut/ydb-core-kesus-proxy-ut >> KqpIndexes::Uint8Index [GOOD] >> BasicUsage::BrokenCredentialsProvider [GOOD] >> KqpIndexes::UniqIndexComplexPkComplexFkOverlap [GOOD] >> KqpIndexes::UniqAndNoUniqSecondaryIndexWithCover >> KqpMultishardIndex::SortByPk [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/viewer/ut/unittest >> Viewer::SimpleFeatureFlags [GOOD] Test command err: BASE_PERF = 4.341263923 2025-05-07T08:50:46.580190Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:337:2380], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:50:46.580357Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:50:46.580433Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] TServer::EnableGrpc on GrpcPort 7154, node 1 TClient is connected to server localhost:5869 2025-05-07T08:50:57.389301Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:322:2365], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:50:57.389494Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:50:57.389726Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] TServer::EnableGrpc on GrpcPort 26760, node 2 TClient is connected to server localhost:9940 2025-05-07T08:51:08.247328Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:340:2383], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:51:08.247545Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:51:08.247655Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] TServer::EnableGrpc on GrpcPort 16740, node 3 TClient is connected to server localhost:17875 2025-05-07T08:51:19.428944Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:339:2381], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:51:19.429440Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:51:19.429795Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] TServer::EnableGrpc on GrpcPort 13101, node 4 TClient is connected to server localhost:9721 2025-05-07T08:51:29.852545Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:320:2363], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:51:29.853328Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:51:29.853504Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] TServer::EnableGrpc on GrpcPort 26954, node 5 TClient is connected to server localhost:23706 2025-05-07T08:51:43.680126Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:340:2383], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:51:43.680470Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:51:43.680640Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] TServer::EnableGrpc on GrpcPort 19065, node 6 TClient is connected to server localhost:3428 2025-05-07T08:52:07.307969Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:342:2384], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:52:07.308723Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:52:07.308938Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] TServer::EnableGrpc on GrpcPort 16086, node 7 TClient is connected to server localhost:28770 2025-05-07T08:52:14.350673Z node 8 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[8:7501623958099472356:2190];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:14.435327Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-05-07T08:52:15.036334Z node 8 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:52:15.230041Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:52:15.230201Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:52:15.235923Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15640, node 8 2025-05-07T08:52:15.578304Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:52:15.578344Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:52:15.578359Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:52:15.578653Z node 8 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5125 2025-05-07T08:52:19.330144Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[8:7501623958099472356:2190];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:19.330238Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> Viewer::JsonStorageListingV2GroupIdFilter [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::Uint8Index [GOOD] Test command err: Trying to start YDB, gRPC: 18045, MsgBus: 10328 2025-05-07T08:51:55.700459Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623877557551039:2193];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:55.700492Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003c6b/r3tmp/tmpoHfVvQ/pdisk_1.dat 2025-05-07T08:51:56.595464Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:51:56.613446Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:56.613533Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:56.631328Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18045, node 1 2025-05-07T08:51:56.968610Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:51:56.968634Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:51:56.968654Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:51:56.968771Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10328 TClient is connected to server localhost:10328 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:51:58.440268Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:58.530826Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:58.984022Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:59.423211Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:59.509557Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:00.706142Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623877557551039:2193];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:00.706222Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:52:02.726307Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623907622323675:2412], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:02.726412Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:03.570802Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:52:03.620800Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:52:03.661219Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:52:03.768742Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:52:03.851696Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:52:03.933464Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:52:03.995327Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:52:04.115802Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623916212258936:2478], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:04.115878Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:04.116211Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623916212258941:2481], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:04.120500Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:52:04.137744Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623916212258943:2482], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:52:04.202343Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623916212258994:3442] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:52:05.537129Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:7501623920507226589:3619], Recipient [1:7501623881852518631:2193]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:52:05.537174Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:52:05.537185Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T08:52:05.537225Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122432, Sender [1:7501623920507226585:3616], Recipient [1:7501623881852518631:2193]: {TEvModifySchemeTransaction txid# 281474976710672 TabletId# 72057594046644480} 2025-05-07T08:52:05.537238Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4851: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-05-07T08:52:05.694910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateIndexedTable CreateIndexedTable { TableDescription { Name: "TestTable" Columns { Name: "Key" Type: "String" NotNull: false } Columns { Name: "Value1" Type: "String" NotNull: false } Columns { Name: "Value2" Type: "String" NotNull: false } KeyColumnNames: "Key" PartitionConfig { ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } Temporary: false } IndexDescription { Name: "Index1Uniq" KeyColumnNames: "Value1" Type: EIndexTypeGlobalUnique IndexImplTableDescriptions { PartitionConfig { } } } IndexDescription { Name: "Index2NotUniq" KeyColumnNames: "Value2" Type: EIndexTypeGlobal IndexImplTableDescriptions { PartitionConfig { } } } } } TxId: 281474976710672 TabletId: 72057594046644480 PeerName: "ipv6:[::1]:42340" , at schemeshard: 72057594046644480 2025-05-07T08:52:05.695455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_indexed_table.cpp:101: TCreateTableIndex construct operation table path: /Root/TestTable domain path id: [OwnerId: 72057594046644480, LocalPathId: 1] domain path: /Root shardsToCreate: 3 GetShardsInside: 34 MaxShards: 200000 2025-05-07T08:52:05.696018Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /Root/TestTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-05-07T08:52:05.696166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:433: TCreateTable Propose, path: /Root/TestTable, opId: 281474976710672:0, schema: Name: "TestTable" Columns { Name: "Key" Type: "String" NotNull: false } Columns { Name: "Value1" Type: "String" NotNull: false } Columns { Name: "Value2" Type: "String" NotNull: false } KeyColumnNames: "Key" Partit ... :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:19 data size 0 row count 0 2025-05-07T08:52:26.136847Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037906 maps to shardIdx: 72057594046644480:19 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-05-07T08:52:26.136853Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037906, followerId 0 2025-05-07T08:52:26.136873Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:19 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-05-07T08:52:26.136882Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186224037906 2025-05-07T08:52:26.136926Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:52:26.138382Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [2:7501623951764654726:2149]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-05-07T08:52:26.138414Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5015: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-05-07T08:52:26.138428Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:588: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 Trying to start YDB, gRPC: 22996, MsgBus: 10033 2025-05-07T08:52:27.765035Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7501624011940666637:2196];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:27.802456Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003c6b/r3tmp/tmplrDud1/pdisk_1.dat 2025-05-07T08:52:28.173369Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:52:28.173459Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:52:28.194525Z node 3 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:52:28.218948Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22996, node 3 2025-05-07T08:52:28.413253Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:52:28.413288Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:52:28.413299Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:52:28.413452Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10033 TClient is connected to server localhost:10033 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:52:29.443000Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:29.461319Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:52:29.473452Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:29.630976Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:29.885812Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:30.012022Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:32.750278Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7501624011940666637:2196];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:32.750365Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:52:33.333168Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7501624037710471921:2409], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:33.333281Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:33.390756Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:52:33.431526Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:52:33.477236Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:52:33.539718Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:52:33.587834Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:52:33.655053Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:52:33.732282Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:52:33.889840Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7501624037710472588:2474], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:33.890091Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:33.890347Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7501624037710472593:2477], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:33.894395Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:52:33.907079Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7501624037710472595:2478], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:52:34.006451Z node 3 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [3:7501624042005439942:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:52:35.849474Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-05-07T08:52:36.124932Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480 2025-05-07T08:52:36.212022Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480 2025-05-07T08:52:36.346781Z node 3 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill >> Viewer::JsonStorageListingV2NodeIdFilter >> TPQTest::TestPQPartialRead [GOOD] >> TPQTest::TestPQRead >> KqpPrefixedVectorIndexes::OrderByCosineSimilarityNullableLevel1 [GOOD] >> KqpPrefixedVectorIndexes::OrderByCosineSimilarityNotNullableLevel2 >> TLdapUtilsSearchFilterCreatorTest::GetFilterWithoutLoginPlaceholders [GOOD] >> TLdapUtilsUrisCreatorTest::CreateUrisFromHostnames [GOOD] >> TLdapUtilsUrisCreatorTest::CreateUrisFromIpV4List [GOOD] >> TLdapUtilsUrisCreatorTest::CreateUrisFromIpV6List [GOOD] >> TLdapUtilsUrisCreatorTest::CreateUrisFromHostnamesLdapsScheme [GOOD] >> TLdapUtilsUrisCreatorTest::CreateUrisFromHostnamesUnknownScheme [GOOD] |89.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain/ydb-core-tx-schemeshard-ut_extsubdomain |89.7%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain/ydb-core-tx-schemeshard-ut_extsubdomain |89.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain/ydb-core-tx-schemeshard-ut_extsubdomain ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpMultishardIndex::SortByPk [GOOD] Test command err: Trying to start YDB, gRPC: 21369, MsgBus: 8820 2025-05-07T08:52:15.479371Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623960892760012:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:15.479427Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003c42/r3tmp/tmp2H60Ku/pdisk_1.dat 2025-05-07T08:52:16.323731Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:52:16.333139Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:52:16.338167Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:52:16.339947Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21369, node 1 2025-05-07T08:52:16.690661Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:52:16.690685Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:52:16.690699Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:52:16.690832Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8820 TClient is connected to server localhost:8820 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:52:17.893019Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:17.935485Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:18.146730Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:18.517023Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:18.616713Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:20.482120Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623960892760012:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:20.482180Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:52:21.900829Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623986662565447:2411], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:21.900947Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:22.540741Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:52:22.608889Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:52:22.655977Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:52:22.704447Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:52:22.753264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:52:22.831967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:52:22.914436Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:52:22.999514Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623990957533405:2477], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:22.999691Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:22.999993Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623990957533410:2480], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:23.004027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:52:23.018014Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623990957533412:2481], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:52:23.109805Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623995252500761:3429] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:52:24.436833Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 waiting... Trying to start YDB, gRPC: 29903, MsgBus: 30246 2025-05-07T08:52:27.504131Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501624013346636706:2202];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:27.570801Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003c42/r3tmp/tmp1vMynE/pdisk_1.dat 2025-05-07T08:52:27.812703Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:52:27.843406Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:52:27.843510Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:52:27.846849Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29903, node 2 2025-05-07T08:52:28.067518Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:52:28.067547Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:52:28.067558Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:52:28.067704Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30246 TClient is connected to server localhost:30246 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-05-07T08:52:28.887330Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-05-07T08:52:28.897284Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:52:28.906277Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:29.008820Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:29.180482Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:29.282341Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:32.480405Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7501624013346636706:2202];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:32.480476Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:52:32.618970Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501624034821474685:2407], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:32.619412Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:32.685585Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:52:32.769224Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:52:32.815019Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:52:32.858583Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:52:32.904180Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:52:32.997611Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:52:33.115077Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:52:33.207015Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501624039116442644:2471], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:33.207100Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:33.207316Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501624039116442649:2474], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:33.210679Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:52:33.238194Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7501624039116442651:2475], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:52:33.296663Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501624039116442702:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:52:34.629617Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 waiting... ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> BasicUsage::BrokenCredentialsProvider [GOOD] Test command err: 2025-05-07T08:51:37.070751Z :MaxByteSizeEqualZero INFO: Random seed for debugging is 1746607897070720 2025-05-07T08:51:37.512067Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623800545845897:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:37.512149Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:51:37.614487Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501623800171991398:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:37.621549Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:51:37.798555Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-05-07T08:51:37.807639Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0035d7/r3tmp/tmpccqiJw/pdisk_1.dat 2025-05-07T08:51:38.107600Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:51:38.140192Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:38.140315Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:38.141457Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:38.141502Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:38.145440Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-05-07T08:51:38.145575Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:51:38.146814Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6097, node 1 2025-05-07T08:51:38.381080Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/zvgn/0035d7/r3tmp/yandexvtk2HS.tmp 2025-05-07T08:51:38.381116Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/zvgn/0035d7/r3tmp/yandexvtk2HS.tmp 2025-05-07T08:51:38.381298Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/zvgn/0035d7/r3tmp/yandexvtk2HS.tmp 2025-05-07T08:51:38.381438Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:51:38.491749Z INFO: TTestServer started on Port 65477 GrpcPort 6097 TClient is connected to server localhost:65477 PQClient connected to localhost:6097 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:51:39.024231Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... waiting... 2025-05-07T08:51:41.865517Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623817725716086:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:41.873912Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501623817351860877:2310], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:41.874030Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:41.874439Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501623817351860904:2313], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:41.884416Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:41.886762Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623817725716100:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:41.897380Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480 2025-05-07T08:51:41.926791Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501623817351860907:2125] txid# 281474976715657, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-05-07T08:51:41.970640Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623817725716103:2343], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-05-07T08:51:41.970846Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7501623817351860906:2314], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-05-07T08:51:42.051175Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623822020683486:2680] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:51:42.072643Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501623821646828230:2131] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:51:42.314817Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:51:42.325024Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7501623822020683496:2349], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T08:51:42.326247Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=1&id=NWQwMzJiNWMtNWYyYTI5NC0xZDIwZmU0YS04ODRmMTVlOA==, ActorId: [1:7501623817725716083:2337], ActorState: ExecuteState, TraceId: 01jtmz33521wxesws21m95fecf, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T08:51:42.328729Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T08:51:42.332574Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7501623821646828237:2318], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T08:51:42.342183Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=2&id=MWRhZGE5NTktNzFiNTQ0NDYtZGY4M2Q2YTMtYTQzNDQ3YTk=, ActorId: [2:7501623817351860875:2309], ActorState: ExecuteState, TraceId: 01jtmz334z01rr2tszabskxetm, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T08:51:42.354792Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T08:51:42.514096Z n ... CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `/Root/PQ/SourceIdMeta2` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND SourceId = $SourceId AND Partition = $Partition; 2025-05-07T08:52:34.509773Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:111: TPartitionChooser [5:7501624043357912943:2497] (SourceId=src, PreferedPartition=(NULL)) StartKqpSession 2025-05-07T08:52:34.515868Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:142: TPartitionChooser [5:7501624043357912943:2497] (SourceId=src, PreferedPartition=(NULL)) Select from the table 2025-05-07T08:52:34.902042Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__old_chooser_actor.h:67: TPartitionChooser [5:7501624043357912943:2497] (SourceId=src, PreferedPartition=(NULL)) RequestPQRB 2025-05-07T08:52:34.907096Z node 5 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][rt3.dc1--test-topic] pipe [5:7501624043357913005:2497] connected; active server actors: 1 2025-05-07T08:52:34.907289Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__old_chooser_actor.h:80: TPartitionChooser [5:7501624043357912943:2497] (SourceId=src, PreferedPartition=(NULL)) Received partition 0 from PQRB for SourceId=src 2025-05-07T08:52:34.907313Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:174: TPartitionChooser [5:7501624043357912943:2497] (SourceId=src, PreferedPartition=(NULL)) Update the table 2025-05-07T08:52:34.907786Z node 5 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [5:7501624043357913005:2497] disconnected; active server actors: 1 2025-05-07T08:52:34.907805Z node 5 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037893][rt3.dc1--test-topic] pipe [5:7501624043357913005:2497] disconnected no session 2025-05-07T08:52:35.043718Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:183: TPartitionChooser [5:7501624043357912943:2497] (SourceId=src, PreferedPartition=(NULL)) HandleUpdate PartitionPersisted=0 Status=SUCCESS 2025-05-07T08:52:35.043760Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [5:7501624043357912943:2497] (SourceId=src, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=(NULL) 2025-05-07T08:52:35.043779Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:268: TPartitionChooser [5:7501624043357912943:2497] (SourceId=src, PreferedPartition=(NULL)) Start idle 2025-05-07T08:52:35.043811Z node 5 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 1 sessionId: partition: 0 expectedGeneration: (NULL) 2025-05-07T08:52:35.050377Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72075186224037892] server connected, pipe [5:7501624047652880323:2497], now have 1 active actors on pipe 2025-05-07T08:52:35.051043Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:342: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-05-07T08:52:35.051075Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2788: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-05-07T08:52:35.051154Z node 6 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src|107a4297-9976f80f-3b1b9273-bf2b4c06_0 generated for partition 0 topic 'rt3.dc1--test-topic' owner src 2025-05-07T08:52:35.051273Z node 6 :PERSQUEUE DEBUG: partition_write.cpp:35: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-05-07T08:52:35.051332Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:377: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-05-07T08:52:35.050764Z node 5 :PQ_WRITE_PROXY DEBUG: writer.cpp:798: TPartitionWriter 72075186224037892 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037892, NodeId 6, Generation: 1 2025-05-07T08:52:35.052160Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:342: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-05-07T08:52:35.052185Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2788: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-05-07T08:52:35.052256Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:377: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-05-07T08:52:35.054599Z :INFO: [] MessageGroupId [src] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1746607955054 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-05-07T08:52:35.054726Z :INFO: [] MessageGroupId [src] SessionId [] Write session established. Init response: session_id: "src|107a4297-9976f80f-3b1b9273-bf2b4c06_0" topic: "test-topic" cluster: "dc1" supported_codecs: CODEC_RAW supported_codecs: CODEC_GZIP supported_codecs: CODEC_LZOP 2025-05-07T08:52:35.052573Z node 5 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 1 partition: 0 MaxSeqNo: 0 sessionId: src|107a4297-9976f80f-3b1b9273-bf2b4c06_0 2025-05-07T08:52:35.058120Z :INFO: [] MessageGroupId [src] SessionId [src|107a4297-9976f80f-3b1b9273-bf2b4c06_0] Write session: close. Timeout = 0 ms 2025-05-07T08:52:35.058178Z :INFO: [] MessageGroupId [src] SessionId [src|107a4297-9976f80f-3b1b9273-bf2b4c06_0] Write session will now close 2025-05-07T08:52:35.058226Z :DEBUG: [] MessageGroupId [src] SessionId [src|107a4297-9976f80f-3b1b9273-bf2b4c06_0] Write session: aborting 2025-05-07T08:52:35.058758Z :INFO: [] MessageGroupId [src] SessionId [src|107a4297-9976f80f-3b1b9273-bf2b4c06_0] Write session: gracefully shut down, all writes complete 2025-05-07T08:52:35.058806Z :DEBUG: [] MessageGroupId [src] SessionId [src|107a4297-9976f80f-3b1b9273-bf2b4c06_0] Write session: destroy 2025-05-07T08:52:35.112196Z node 5 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: src|107a4297-9976f80f-3b1b9273-bf2b4c06_0 grpc read done: success: 0 data: 2025-05-07T08:52:35.112225Z node 5 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 1 sessionId: src|107a4297-9976f80f-3b1b9273-bf2b4c06_0 grpc read failed 2025-05-07T08:52:35.112255Z node 5 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 1 sessionId: src|107a4297-9976f80f-3b1b9273-bf2b4c06_0 grpc closed 2025-05-07T08:52:35.112273Z node 5 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: src|107a4297-9976f80f-3b1b9273-bf2b4c06_0 is DEAD 2025-05-07T08:52:35.113081Z node 5 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-05-07T08:52:35.115039Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037892] server disconnected, pipe [5:7501624047652880323:2497] destroyed 2025-05-07T08:52:35.115076Z node 6 :PERSQUEUE DEBUG: partition_write.cpp:138: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-05-07T08:52:35.132341Z :INFO: [/Root] [/Root] [f4924f1b-dc338ad9-a389344c-719c43e] Starting read session 2025-05-07T08:52:35.132399Z :DEBUG: [/Root] [/Root] [f4924f1b-dc338ad9-a389344c-719c43e] Starting session to cluster null (localhost:23233) 2025-05-07T08:52:35.134199Z :DEBUG: [/Root] [/Root] [f4924f1b-dc338ad9-a389344c-719c43e] [null] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:52:35.134251Z :DEBUG: [/Root] [/Root] [f4924f1b-dc338ad9-a389344c-719c43e] [null] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:52:35.134293Z :DEBUG: [/Root] [/Root] [f4924f1b-dc338ad9-a389344c-719c43e] [null] Reconnecting session to cluster null in 0.000000s 2025-05-07T08:52:35.135627Z :ERROR: [/Root] [/Root] [f4924f1b-dc338ad9-a389344c-719c43e] [null] Got error. Status: CLIENT_UNAUTHENTICATED. Description:
: Error: Can't get Authentication info from CredentialsProvider. ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp:451: exception during creation 2025-05-07T08:52:35.135697Z :DEBUG: [/Root] [/Root] [f4924f1b-dc338ad9-a389344c-719c43e] [null] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:52:35.135742Z :DEBUG: [/Root] [/Root] [f4924f1b-dc338ad9-a389344c-719c43e] [null] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:52:35.135881Z :INFO: [/Root] [/Root] [f4924f1b-dc338ad9-a389344c-719c43e] [null] Closing session to cluster: SessionClosed { Status: CLIENT_UNAUTHENTICATED Issues: "
: Error: Failed to establish connection to server "" ( cluster null). Attempts done: 1
: Error: Can't get Authentication info from CredentialsProvider. ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp:451: exception during creation " } Get event on client 2025-05-07T08:52:35.136067Z :NOTICE: [/Root] [/Root] [f4924f1b-dc338ad9-a389344c-719c43e] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-05-07T08:52:35.136105Z :DEBUG: [/Root] [/Root] [f4924f1b-dc338ad9-a389344c-719c43e] [null] Abort session to cluster Got close event: SessionClosed { Status: CLIENT_UNAUTHENTICATED Issues: "
: Error: Failed to establish connection to server "" ( cluster null). Attempts done: 1
: Error: Can't get Authentication info from CredentialsProvider. ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp:451: exception during creation " }2025-05-07T08:52:35.136187Z :INFO: [/Root] [/Root] [f4924f1b-dc338ad9-a389344c-719c43e] Closing read session. Close timeout: 0.000000s 2025-05-07T08:52:35.136229Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2025-05-07T08:52:35.136276Z :INFO: [/Root] [/Root] [f4924f1b-dc338ad9-a389344c-719c43e] Counters: { Errors: 1 CurrentSessionLifetimeMs: 3 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-05-07T08:52:35.136377Z :NOTICE: [/Root] [/Root] [f4924f1b-dc338ad9-a389344c-719c43e] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-05-07T08:52:35.316882Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7261: Cannot get console configs 2025-05-07T08:52:35.316940Z node 5 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:52:36.574466Z node 5 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1073: TxId: 281474976715687, task: 1, CA Id [5:7501624051947847695:2530]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 0 2025-05-07T08:52:36.610602Z node 5 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1073: TxId: 281474976715687, task: 1, CA Id [5:7501624051947847695:2530]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-05-07T08:52:36.671459Z node 5 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1073: TxId: 281474976715687, task: 1, CA Id [5:7501624051947847695:2530]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-05-07T08:52:36.750860Z node 5 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1073: TxId: 281474976715687, task: 1, CA Id [5:7501624051947847695:2530]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-05-07T08:52:36.834842Z node 5 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1073: TxId: 281474976715687, task: 1, CA Id [5:7501624051947847695:2530]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-05-07T08:52:36.962785Z node 5 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1073: TxId: 281474976715687, task: 1, CA Id [5:7501624051947847695:2530]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 >> KqpPg::PgAggregate+useSink [GOOD] >> KqpPg::PgAggregate-useSink |89.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/security/ldap_auth_provider/ut/unittest >> TLdapUtilsUrisCreatorTest::CreateUrisFromHostnamesUnknownScheme [GOOD] |89.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-49 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-50 >> KqpIndexes::SecondaryIndexInsert1 [GOOD] |89.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> KqpPg::DeleteWithQueryService-useSink [GOOD] >> KqpIndexes::IndexFilterPushDown [GOOD] >> KqpScripting::StreamExecuteYqlScriptSeveralQueries >> TPDiskTest::PlainChunksWriteReadALot [GOOD] >> TPDiskTest::ChunkWriteBadOffset >> YdbProxy::ListDirectory >> KqpScanSpilling::SelfJoin ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::SecondaryIndexInsert1 [GOOD] Test command err: Trying to start YDB, gRPC: 26197, MsgBus: 10482 2025-05-07T08:52:00.216308Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623898538461695:2141];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:00.230854Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003c4e/r3tmp/tmpsUQLje/pdisk_1.dat 2025-05-07T08:52:01.057743Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:52:01.057836Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:52:01.064426Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:52:01.126514Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26197, node 1 2025-05-07T08:52:01.470474Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:52:01.470495Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:52:01.470502Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:52:01.470617Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10482 TClient is connected to server localhost:10482 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:52:02.592721Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:52:02.649400Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T08:52:02.971388Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:03.417643Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:52:03.620431Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 2025-05-07T08:52:05.221454Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623898538461695:2141];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:05.221584Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:52:06.655942Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623924308267047:2411], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:06.656054Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:07.080517Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:52:07.112865Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:52:07.150148Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:52:07.205575Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:52:07.258329Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:52:07.353742Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:52:07.427921Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:52:07.537702Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623928603235008:2476], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:07.537805Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:07.538336Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623928603235013:2479], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:07.544275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:52:07.567250Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623928603235015:2480], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:52:07.655782Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623928603235068:3436] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:52:09.027061Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:7501623937193169950:3613], Recipient [1:7501623898538462020:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:52:09.027107Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:52:09.027124Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T08:52:09.027178Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122432, Sender [1:7501623937193169946:3610], Recipient [1:7501623898538462020:2185]: {TEvModifySchemeTransaction txid# 281474976710672 TabletId# 72057594046644480} 2025-05-07T08:52:09.027195Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4851: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-05-07T08:52:09.186329Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateIndexedTable CreateIndexedTable { TableDescription { Name: "TestTable" Columns { Name: "Key" Type: "Int64" NotNull: false } Columns { Name: "Index2" Type: "Int64" NotNull: false } Columns { Name: "Value" Type: "String" NotNull: false } KeyColumnNames: "Key" PartitionConfig { ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } Temporary: false } IndexDescription { Name: "Index" KeyColumnNames: "Index2" Type: EIndexTypeGlobal IndexImplTableDescriptions { PartitionConfig { } } } } } TxId: 281474976710672 TabletId: 72057594046644480 PeerName: "ipv6:[::1]:48104" , at schemeshard: 72057594046644480 2025-05-07T08:52:09.187007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_indexed_table.cpp:101: TCreateTableIndex construct operation table path: /Root/TestTable domain path id: [OwnerId: 72057594046644480, LocalPathId: 1] domain path: /Root shardsToCreate: 2 GetShardsInside: 34 MaxShards: 200000 2025-05-07T08:52:09.187573Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /Root/TestTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-05-07T08:52:09.187761Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:433: TCreateTable Propose, path: /Root/TestTable, opId: 281474976710672:0, schema: Name: "TestTable" Columns { Name: "Key" Type: "Int64" NotNull: false } Columns { Name: "Index2" Type: "Int64" NotNull: false } Columns { Name: "Value" Type: "String" NotNull: false } KeyColumnNames: "Key" PartitionConfig { ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } ... D_SCHEME Origin: 72075186224037923 Status: COMPLETE TxId: 281474976715672 Step: 1746607959747 OrderId: 281474976715672 ExecLatency: 0 ProposeLatency: 17 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186224037923 CpuTimeUsec: 1519 } } 2025-05-07T08:52:39.722739Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T08:52:39.723070Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 281474976715672:2, at schemeshard: 72057594046644480 2025-05-07T08:52:39.723083Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:52:39.723353Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [3:7501624065360692809:3679], Recipient [3:7501624035295919344:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:52:39.723377Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:52:39.723389Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T08:52:39.723588Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269551620, Sender [3:7501624065360692734:2520], Recipient [3:7501624035295919344:2185]: NKikimrTxDataShard.TEvSchemaChanged Source { RawX1: 7501624065360692734 RawX2: 4503612512274904 } Origin: 72075186224037923 State: 2 TxId: 281474976715672 Step: 0 Generation: 1 2025-05-07T08:52:39.723605Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4872: StateWork, processing event TEvDataShard::TEvSchemaChanged 2025-05-07T08:52:39.723656Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046644480, at schemeshard: 72057594046644480, message: Source { RawX1: 7501624065360692734 RawX2: 4503612512274904 } Origin: 72075186224037923 State: 2 TxId: 281474976715672 Step: 0 Generation: 1 2025-05-07T08:52:39.723669Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 281474976715672, tablet: 72075186224037923, partId: 2 2025-05-07T08:52:39.723741Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 281474976715672:2, at schemeshard: 72057594046644480, message: Source { RawX1: 7501624065360692734 RawX2: 4503612512274904 } Origin: 72075186224037923 State: 2 TxId: 281474976715672 Step: 0 Generation: 1 2025-05-07T08:52:39.723756Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1005: NTableState::TProposedWaitParts operationId# 281474976715672:2 HandleReply TEvSchemaChanged at tablet: 72057594046644480 2025-05-07T08:52:39.723803Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1009: NTableState::TProposedWaitParts operationId# 281474976715672:2 HandleReply TEvSchemaChanged at tablet: 72057594046644480 message: Source { RawX1: 7501624065360692734 RawX2: 4503612512274904 } Origin: 72075186224037923 State: 2 TxId: 281474976715672 Step: 0 Generation: 1 2025-05-07T08:52:39.723828Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976715672:2, shardIdx: 72057594046644480:36, datashard: 72075186224037923, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046644480 2025-05-07T08:52:39.723836Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 281474976715672:2, at schemeshard: 72057594046644480 2025-05-07T08:52:39.723846Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 281474976715672:2, datashard: 72075186224037923, at schemeshard: 72057594046644480 2025-05-07T08:52:39.723859Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976715672:2 129 -> 240 2025-05-07T08:52:39.723943Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T08:52:39.724098Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877764, Sender [3:7501624065360692797:3668], Recipient [3:7501624035295919344:2185]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:52:39.724115Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4938: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:52:39.724123Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5761: Server pipe is reset, at schemeshard: 72057594046644480 2025-05-07T08:52:39.724283Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 281474976715672:2, at schemeshard: 72057594046644480 2025-05-07T08:52:39.724293Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:52:39.724305Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 281474976715672:2 2025-05-07T08:52:39.724353Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [3:7501624065360692734:2520] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715672 at schemeshard: 72057594046644480 2025-05-07T08:52:39.724431Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435072, Sender [3:7501624035295919344:2185], Recipient [3:7501624035295919344:2185]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-05-07T08:52:39.724448Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4857: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-05-07T08:52:39.724482Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976715672:2, at schemeshard: 72057594046644480 2025-05-07T08:52:39.724501Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046644480] TDone opId# 281474976715672:2 ProgressState 2025-05-07T08:52:39.724570Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T08:52:39.724599Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715672:2 progress is 3/3 2025-05-07T08:52:39.724618Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 3/3 2025-05-07T08:52:39.724659Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715672:2 progress is 3/3 2025-05-07T08:52:39.724669Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 3/3 2025-05-07T08:52:39.724684Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976715672, ready parts: 3/3, is published: true 2025-05-07T08:52:39.724720Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:7501624065360692699:2517] message: TxId: 281474976715672 2025-05-07T08:52:39.724741Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 3/3 2025-05-07T08:52:39.724768Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715672:0 2025-05-07T08:52:39.724792Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976715672:0 2025-05-07T08:52:39.724906Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 17] was 4 2025-05-07T08:52:39.724920Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715672:1 2025-05-07T08:52:39.724926Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976715672:1 2025-05-07T08:52:39.724940Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 18] was 3 2025-05-07T08:52:39.724948Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715672:2 2025-05-07T08:52:39.724954Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976715672:2 2025-05-07T08:52:39.724984Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 19] was 3 2025-05-07T08:52:39.725389Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:52:39.725441Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [3:7501624065360692699:2517] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715672 at schemeshard: 72057594046644480 2025-05-07T08:52:39.727989Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877764, Sender [3:7501624065360692714:3612], Recipient [3:7501624035295919344:2185]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:52:39.728021Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4938: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:52:39.728033Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5761: Server pipe is reset, at schemeshard: 72057594046644480 2025-05-07T08:52:39.730234Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877764, Sender [3:7501624065360692809:3679], Recipient [3:7501624035295919344:2185]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:52:39.730261Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4938: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:52:39.730271Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5761: Server pipe is reset, at schemeshard: 72057594046644480 2025-05-07T08:52:39.822253Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7501624035295919344:2185]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T08:52:39.822291Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T08:52:39.822336Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [3:7501624035295919344:2185], Recipient [3:7501624035295919344:2185]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T08:52:39.822353Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime >> KqpYql::EvaluateIf >> TPDiskTest::ChunkWriteBadOffset [GOOD] >> KqpPg::Returning+useSink [GOOD] >> KqpPg::Returning-useSink >> KqpPg::InsertNoTargetColumns_ColumnOrder+useSink [GOOD] >> KqpPg::InsertNoTargetColumns_ColumnOrder-useSink >> KqpScanSpilling::SpillingPragmaParseError ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::IndexFilterPushDown [GOOD] Test command err: Trying to start YDB, gRPC: 64141, MsgBus: 12968 2025-05-07T08:52:09.598868Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623938224306014:2070];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:09.599892Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003c44/r3tmp/tmpiHosHg/pdisk_1.dat 2025-05-07T08:52:10.103832Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:52:10.103920Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:52:10.107399Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:52:10.114850Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 64141, node 1 2025-05-07T08:52:10.294618Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:52:10.294668Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:52:10.294675Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:52:10.294798Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12968 TClient is connected to server localhost:12968 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:52:11.696867Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:11.738459Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:52:11.768623Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:11.975217Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:12.207547Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:12.296881Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:14.515207Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623959699144156:2408], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:14.515316Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:14.602200Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623938224306014:2070];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:14.602273Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:52:14.831011Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:52:14.895903Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:52:14.948464Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:52:14.994397Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:52:15.031173Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:52:15.081756Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:52:15.114229Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:52:15.189530Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623963994112113:2472], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:15.189641Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:15.190230Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623963994112118:2475], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:15.198231Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:52:15.214971Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623963994112120:2476], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:52:15.322726Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623963994112171:3428] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:52:16.549595Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:7501623968289079745:3605], Recipient [1:7501623942519273721:2192]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:52:16.549636Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:52:16.549646Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T08:52:16.549686Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122432, Sender [1:7501623968289079741:3602], Recipient [1:7501623942519273721:2192]: {TEvModifySchemeTransaction txid# 281474976710672 TabletId# 72057594046644480} 2025-05-07T08:52:16.549701Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4851: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-05-07T08:52:16.594982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateIndexedTable CreateIndexedTable { TableDescription { Name: "TestTable" Columns { Name: "Key" Type: "String" NotNull: false } Columns { Name: "Index2" Type: "String" NotNull: false } Columns { Name: "Value" Type: "String" NotNull: false } KeyColumnNames: "Key" PartitionConfig { ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } Temporary: false } IndexDescription { Name: "Index" KeyColumnNames: "Index2" Type: EIndexTypeGlobal IndexImplTableDescriptions { PartitionConfig { } } } } } TxId: 281474976710672 TabletId: 72057594046644480 PeerName: "ipv6:[::1]:42440" , at schemeshard: 72057594046644480 2025-05-07T08:52:16.595515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_indexed_table.cpp:101: TCreateTableIndex construct operation table path: /Root/TestTable domain path id: [OwnerId: 72057594046644480, LocalPathId: 1] domain path: /Root shardsToCreate: 2 GetShardsInside: 34 MaxShards: 200000 2025-05-07T08:52:16.595958Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /Root/TestTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-05-07T08:52:16.596098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:433: TCreateTable Propose, path: /Root/TestTable, opId: 281474976710672:0, schema: Name: "TestTable" Columns { Name: "Key" Type: "String" NotNull: false } Columns { Name: "Index2" Type: "String" NotNull: false } Columns { Name: "Value" Type: "String" NotNull: fa ... opose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-05-07T08:52:23.296603Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7501623997787607871:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-05-07T08:52:23.376594Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501623997787607922:3413] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:52:23.504471Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7501623976312769269:2226];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:23.504589Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:52:25.491301Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-05-07T08:52:25.631392Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480 2025-05-07T08:52:25.726168Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 4724, MsgBus: 65135 2025-05-07T08:52:31.157721Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7501624028625200766:2198];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:31.194562Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003c44/r3tmp/tmp3Do5Vg/pdisk_1.dat 2025-05-07T08:52:31.460945Z node 3 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:52:31.523042Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:52:31.523133Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:52:31.528550Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4724, node 3 2025-05-07T08:52:31.760961Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:52:31.760987Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:52:31.760997Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:52:31.761131Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:65135 TClient is connected to server localhost:65135 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:52:32.459462Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:32.478866Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:52:32.498602Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:32.610117Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:32.804908Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:32.897441Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:36.143308Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7501624050100038758:2407], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:36.143415Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:36.143472Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7501624028625200766:2198];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:36.143541Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:52:36.228789Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:52:36.317728Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:52:36.414310Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:52:36.497026Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:52:36.582961Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:52:36.667608Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:52:36.767947Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:52:36.885050Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7501624050100039432:2471], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:36.885158Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:36.885441Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7501624050100039437:2474], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:36.892277Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:52:36.903149Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7501624050100039439:2475], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:52:36.986566Z node 3 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [3:7501624050100039490:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:52:38.407857Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-05-07T08:52:38.509996Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480 2025-05-07T08:52:38.621510Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480 |89.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/persqueue/dread_cache_service/ut/ydb-core-persqueue-dread_cache_service-ut |89.7%| [LD] {RESULT} $(B)/ydb/core/persqueue/dread_cache_service/ut/ydb-core-persqueue-dread_cache_service-ut |89.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/persqueue/dread_cache_service/ut/ydb-core-persqueue-dread_cache_service-ut >> TCertificateAuthUtilsTest::GenerateAndVerifyCertificates >> DataShardSnapshots::LockedWriteCleanupOnSplit+UseSink [GOOD] >> DataShardSnapshots::LockedWriteCleanupOnSplit-UseSink >> KqpUniqueIndex::InsertFkAlreadyExist [GOOD] >> KqpUniqueIndex::InsertFkDuplicate ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/pdisk/ut/unittest >> TPDiskTest::ChunkWriteBadOffset [GOOD] Test command err: restart# 0 start with noop scheduler# 0 end with noop scheduler# 0 all chunk reads are received all chunk writes are received all log writes are received restart# 0 start with noop scheduler# 1 end with noop scheduler# 0 all chunk reads are received all chunk writes are received all log writes are received restart# 0 start with noop scheduler# 0 end with noop scheduler# 1 all chunk reads are received all chunk writes are received all log writes are received restart# 0 start with noop scheduler# 1 end with noop scheduler# 1 all chunk reads are received all chunk writes are received all log writes are received restart# 1 start with noop scheduler# 0 end with noop scheduler# 0 restart all chunk reads are received all chunk writes are received all log writes are received restart# 1 start with noop scheduler# 1 end with noop scheduler# 0 restart all chunk reads are received all chunk writes are received all log writes are received restart# 1 start with noop scheduler# 0 end with noop scheduler# 1 restart all chunk reads are received all chunk writes are received all log writes are received restart# 1 start with noop scheduler# 1 end with noop scheduler# 1 restart all chunk reads are received all chunk writes are received all log writes are received plainDataChunks# 0 seed# 1746607944818340 offset# 0 size# 9765 9765 ?= 9765 offset# 146304 size# 20441 20441 ?= 20441 offset# 1170432 size# 22060 22060 ?= 22060 offset# 1881632 size# 18568 18568 ?= 18568 offset# 1938528 size# 28768 28768 ?= 28768 offset# 2141728 size# 15572 15572 ?= 15572 offset# 2645664 size# 27085 27085 ?= 27085 offset# 3377184 size# 24809 24809 ?= 24809 offset# 3954272 size# 32233 32233 ?= 32233 offset# 4515104 size# 18012 18012 ?= 18012 offset# 5644896 size# 29102 29102 ?= 29102 offset# 6530848 size# 25945 25945 ?= 25945 offset# 6551168 size# 28017 28017 ?= 28017 offset# 7120128 size# 18152 18152 ?= 18152 offset# 7876032 size# 26504 26504 ?= 26504 offset# 9152128 size# 18974 18974 ?= 18974 offset# 10318496 size# 14362 14362 ?= 14362 offset# 10728960 size# 10741 10741 ?= 10741 offset# 11558016 size# 31688 31688 ?= 31688 offset# 11777472 size# 21899 21899 ?= 21899 offset# 12700000 size# 1650 1650 ?= 1650 offset# 13854176 size# 29496 29496 ?= 29496 offset# 14569440 size# 23764 23764 ?= 23764 offset# 15353792 size# 19620 19620 ?= 19620 offset# 16572992 size# 9114 9114 ?= 9114 offset# 16759936 size# 28589 28589 ?= 28589 offset# 17259808 size# 19377 19377 ?= 19377 offset# 17743424 size# 1052 1052 ?= 1052 offset# 17800320 size# 505 505 ?= 505 offset# 18775680 size# 19306 19306 ?= 19306 offset# 19633184 size# 13091 13091 ?= 13091 offset# 20315936 size# 27030 27030 ?= 27030 offset# 21075904 size# 5130 5130 ?= 5130 offset# 21124672 size# 5795 5795 ?= 5795 offset# 21730208 size# 18924 18924 ?= 18924 offset# 22933152 size# 24776 24776 ?= 24776 offset# 23400512 size# 6253 6253 ?= 6253 offset# 23993856 size# 19214 19214 ?= 19214 offset# 24424640 size# 31429 31429 ?= 31429 offset# 25103328 size# 32277 32277 ?= 32277 offset# 26212800 size# 18171 18171 ?= 18171 offset# 26968704 size# 16895 16895 ?= 16895 offset# 28078176 size# 17004 17004 ?= 17004 offset# 28143200 size# 3185 3185 ?= 3185 offset# 29289248 size# 10960 10960 ?= 10960 offset# 30545024 size# 29 29 ?= 29 offset# 31682944 size# 30095 30095 ?= 30095 offset# 31727648 size# 24517 24517 ?= 24517 offset# 32215328 size# 13432 13432 ?= 13432 offset# 32637984 size# 5992 5992 ?= 5992 offset# 32963104 size# 26397 26397 ?= 26397 offset# 33251648 size# 30489 30489 ?= 30489 offset# 34340800 size# 13159 13159 ?= 13159 offset# 34958528 size# 21356 21356 ?= 21356 offset# 35251136 size# 7219 7219 ?= 7219 offset# 35413696 size# 14770 14770 ?= 14770 offset# 36376864 size# 14431 14431 ?= 14431 offset# 37262816 size# 27619 27619 ?= 27619 offset# 38242240 size# 14455 14455 ?= 14455 offset# 38400736 size# 6434 6434 ?= 6434 offset# 39506144 size# 6674 6674 ?= 6674 offset# 40477440 size# 9173 9173 ?= 9173 offset# 40725344 size# 249 249 ?= 249 offset# 41765728 size# 26696 26696 ?= 26696 offset# 42728896 size# 29547 29547 ?= 29547 offset# 43570144 size# 17138 17138 ?= 17138 offset# 43899328 size# 4724 4724 ?= 4724 offset# 44671488 size# 3319 3319 ?= 3319 offset# 44679616 size# 11667 11667 ?= 11667 offset# 44817792 size# 8106 8106 ?= 8106 offset# 45736256 size# 32202 32202 ?= 32202 offset# 46321472 size# 10544 10544 ?= 10544 offset# 47565056 size# 18108 18108 ?= 18108 offset# 48881792 size# 27201 27201 ?= 27201 offset# 49556416 size# 4109 4109 ?= 4109 offset# 50210720 size# 13410 13410 ?= 13410 offset# 50515520 size# 31556 31556 ?= 31556 offset# 51267360 size# 22526 22526 ?= 22526 offset# 52600352 size# 21058 21058 ?= 21058 offset# 53738272 size# 1344 1344 ?= 1344 offset# 54392576 size# 25400 25400 ?= 25400 offset# 54985920 size# 15891 15891 ?= 15891 offset# 55473600 size# 17483 17483 ?= 17483 offset# 56266080 size# 631 631 ?= 631 offset# 57294272 size# 907 907 ?= 907 offset# 58208672 size# 7784 7784 ?= 7784 offset# 58753248 size# 5489 5489 ?= 5489 offset# 59505088 size# 9241 9241 ?= 9241 offset# 60419488 size# 13161 13161 ?= 13161 offset# 61756544 size# 18121 18121 ?= 18121 offset# 62264544 size# 501 501 ?= 501 offset# 63471552 size# 18216 18216 ?= 18216 offset# 64016128 size# 28457 28457 ?= 28457 offset# 64442848 size# 7874 7874 ?= 7874 offset# 65641728 size# 22259 22259 ?= 22259 offset# 66320416 size# 3492 3492 ?= 3492 offset# 67661536 size# 329 329 ?= 329 offset# 68132960 size# 27068 27068 ?= 27068 offset# 68685664 size# 16460 16460 ?= 16460 offset# 68758816 size# 589 589 ?= 589 offset# 69250560 size# 20074 20074 ?= 20074 offset# 70311264 size# 26700 26700 ?= 26700 offset# 70794880 size# 26281 26281 ?= 26281 offset# 71802752 size# 27098 27098 ?= 27098 offset# 71863712 size# 20572 20572 ?= 20572 offset# 72371712 size# 17527 17527 ?= 17527 offset# 73277984 size# 17893 17893 ?= 17893 offset# 74094848 size# 17618 17618 ?= 17618 offset# 74411840 size# 13646 13646 ?= 13646 offset# 75651360 size# 18811 18811 ?= 18811 offset# 76374752 size# 21446 21446 ?= 21446 offset# 76456032 size# 26221 26221 ?= 26221 offset# 77203808 size# 12705 12705 ?= 12705 offset# 78146656 size# 5040 5040 ?= 5040 offset# 79248000 size# 25823 25823 ?= 25823 offset# 80491584 size# 22101 22101 ?= 22101 offset# 80987392 size# 8018 8018 ?= 8018 offset# 81300320 size# 19304 19304 ?= 19304 offset# 81844896 size# 25117 25117 ?= 25117 offset# 82235040 size# 4142 4142 ?= 4142 offset# 83011264 size# 11377 11377 ?= 11377 offset# 83332320 size# 3097 3097 ?= 3097 offset# 84571840 size# 24110 24110 ?= 24110 offset# 85445600 size# 506 506 ?= 506 offset# 85600032 size# 29057 29057 ?= 29057 offset# 85888576 size# 30093 30093 ?= 30093 offset# 86205568 size# 23985 23985 ?= 23985 offset# 86315296 size# 15754 15754 ?= 15754 offset# 86368128 size# 26624 26624 ?= 26624 offset# 87193120 size# 4367 4367 ?= 4367 offset# 88424512 size# 26256 26256 ?= 26256 offset# 88497664 size# 19149 19149 ?= 19149 offset# 89513664 size# 13963 13963 ?= 13963 offset# 90793824 size# 321 321 ?= 321 offset# 91708224 size# 21040 21040 ?= 21040 offset# 92143072 size# 17311 17311 ?= 17311 offset# 92927424 size# 30880 30880 ?= 30880 offset# 92972128 size# 24783 24783 ?= 24783 offset# 93256608 size# 933 933 ?= 933 offset# 93484192 size# 23920 23920 ?= 23920 offset# 93866208 size# 15713 15713 ?= 15713 offset# 94479872 size# 28602 28602 ?= 28602 offset# 95747840 size# 17957 17957 ?= 17957 offset# 96166432 size# 29852 29852 ?= 29852 offset# 97483168 size# 10187 10187 ?= 10187 offset# 98820224 size# 15507 15507 ?= 15507 offset# 99612704 size# 20722 20722 ?= 20722 offset# 100291392 size# 24348 24348 ?= 24348 offset# 101575616 size# 13962 13962 ?= 13962 offset# 102567232 size# 1739 1739 ?= 1739 offset# 103697024 size# 10729 10729 ?= 10729 offset# 104274112 size# 6357 6357 ?= 6357 offset# 104960928 size# 13633 13633 ?= 13633 offset# 106033824 size# 1477 1477 ?= 1477 offset# 106314240 size# 14760 14760 ?= 14760 offset# 106846624 size# 29232 29232 ?= 29232 offset# 106964480 size# 4843 4843 ?= 4843 offset# 107610656 size# 8910 8910 ?= 8910 offset# 108179616 size# 3013 3013 ?= 3013 offset# 109045248 size# 11935 11935 ?= 11935 offset# 109622336 size# 22469 22469 ?= 22469 offset# 110516416 size# 19989 19989 ?= 19989 offset# 111186976 size# 11278 11278 ?= 11278 offset# 111556800 size# 18700 18700 ?= 18700 offset# 112503712 size# 7299 7299 ?= 7299 offset# 113820448 size# 7978 7978 ?= 7978 offset# 115043712 size# 28233 28233 ?= 28233 offset# 116352320 size# 5137 5137 ?= 5137 offset# 117664992 size# 12269 12269 ?= 12269 offset# 118022624 size# 15902 15902 ?= 15902 offset# 118904512 size# 660 660 ?= 660 offset# 119969280 size# 2793 2793 ?= 2793 offset# 121241312 size# 27963 27963 ?= 27963 offset# 122037856 size# 30240 30240 ?= 30240 offset# 122285760 size# 31966 31966 ?= 31966 offset# 122623072 size# 16576 16576 ?= 16576 offset# 123086368 size# 27836 27836 ?= 27836 offset# 123313952 size# 6540 6540 ?= 6540 offset# 124386848 size# 2054 2054 ?= 2054 offset# 124821696 size# 26762 26762 ?= 26762 offset# 125752352 size# 625 625 ?= 625 offset# 126475744 size# 23547 23547 ?= 23547 offset# 127442976 size# 1231 1231 ?= 1231 offset# 128454912 size# 14407 14407 ?= 14407 offset# 128491488 size# 3338 3338 ?= 3338 offset# 129166112 size# 24768 24768 ?= 24768 offset# 129905760 size# 12852 12852 ?= 12852 offset# 131234688 size# 19794 19794 ?= 19794 offset# 131836160 size# 7550 7550 ?= 7550 offset# 132657088 size# 6037 6037 ?= 6037 offset# 132957824 size# 18821 18821 ?= 18821 offset# 133957568 size# 3215 3215 ?= 3215 offset# 134896352 size# 15443 15443 ?= 15443 plainDataChunks# 1 seed# 1746607945694229 offset# 0 size# 24915 24915 ?= 24915 offset# 1146880 size# 18266 18266 ?= 18266 offset# 1814528 size# 14670 14670 ?= 14670 offset# 3059712 size# 9940 9940 ?= 9940 offset# 4374528 size# 7166 7166 ?= 7166 offset# 4980736 size# 1069 1069 ?= 1069 offset# 6299648 size# 15353 15353 ?= 15353 offset# 6447104 size# 11347 11347 ?= 11347 offset# 7188480 size# 20904 20904 ?= 20904 offset# 7393280 size# 18100 18100 ?= 18100 offset# 8069120 size# 12224 12224 ?= 12224 offset# 8790016 size# 2748 2748 ?= 2748 offset# 9519104 size# 27800 27800 ?= 27800 offset# 10186752 size# 23909 23909 ?= 23909 offset# 10686464 size# 4774 4774 ?= 4774 offset# 11624448 size# 9880 9880 ?= 9880 offset# 12546048 size# 5849 5849 ?= 5849 offset# 12890112 size# 20496 20496 ?= 20496 offset# 13725696 size# 29740 29740 ?= 29740 offset# 14626816 size# 22138 22138 ?= 22138 offset# 15331328 size# 24793 24793 ?= 24793 offset# 15556608 size# 28794 28794 ?= 28794 offset# 16728064 size# 31583 31583 ?= 31583 offset# 17092608 size# 31497 31497 ?= 31497 offset# 17100800 size# 24694 24694 ?= 24694 offset# 18423808 size# 30950 30950 ?= 30950 offset# 18866176 size# 13252 13252 ?= 13252 offset# 19529728 size# 11096 11096 ?= 11096 offset# 19795968 size# 18818 18818 ?= 18818 offset# 20369408 size# 28721 28721 ?= 28721 offset# 21323776 size# 11771 11771 ?= 11771 offset# 21815296 size# 30219 30219 ?= 30219 offset# 22319104 size# 20030 20030 ?= 20030 offset# 22765568 size# 29286 29286 ?= 29286 offset# 23552000 size# 3607 3607 ?= 3607 offset# 23773184 size# 23084 23084 ?= 23084 offset# 25055232 size# 30990 30990 ?= 30990 offset# 25788416 size# 10307 10307 ?= 10307 offset# 26439680 size# 19723 19723 ?= 19723 offset# 27402240 size# 8099 8099 ?= 8099 offset# 27762688 size# 1704 1704 ?= 1704 offset# 28491776 size# 21495 21495 ?= 21495 offset# 29638656 size# 30041 30041 ?= 30041 offset# 30072832 size# 8592 8592 ?= 8592 offset# 31416320 size# 27088 27088 ?= 27088 offset# 31911936 size# 30558 30558 ?= 30558 offset# 32415744 size# 6115 6115 ?= 6115 offset# 32763904 size# 29361 29361 ?= 29361 offset# 32952320 size# 18148 18148 ?= 18148 offset# 33042432 size# 8643 8643 ?= 8643 offset# 34275328 size# 8859 8859 ?= 8859 offset# 34766848 size# 16058 16058 ?= 16058 offset# 35377152 size# 16484 16484 ?= 16484 offset# 36540416 size# 11211 11211 ?= 11211 offset# 37097472 size# 25663 25663 ?= 25663 offset# 37711872 size# 21958 21958 ?= 21958 offset# 37756928 size# 24954 24954 ?= 24954 offset# 38412288 size# 24079 24079 ?= 24079 offset# 39559168 size# 29493 29493 ?= 29493 offset# 40689664 size# 32035 32035 ?= 32035 offset# 41246720 size# 14572 14572 ?= 14572 offset# 41496576 size# 29119 29119 ?= 29119 offset# 42520576 size# 7659 7659 ?= 7659 offset# 43499520 size# 31192 31192 ?= 31192 offset# 44785664 size# 12616 12616 ?= 12616 offset# 44974080 size# 1507 1507 ?= 1507 offset# 45326336 size# 1552 1552 ?= 1552 offset# 45531136 size# 2924 2924 ?= 2924 offset# 46276608 size# 31385 31385 ?= 31385 offset# 47091712 size# 22218 22218 ?= 22218 offset# 47923200 size# 5326 5326 ?= 5326 offset# 48840704 size# 11458 11458 ?= 11458 offset# 49246208 size# 31489 31489 ?= 31489 offset# 49680384 size# 1082 1082 ?= 1082 offset# 50372608 size# 2304 2304 ?= 2304 offset# 50376704 size# 17992 17992 ?= 17992 offset# 50499584 size# 32307 32307 ?= 32307 offset# 51474432 size# 20647 20647 ?= 20647 offset# 52318208 size# 4224 4224 ?= 4224 offset# 53018624 size# 29610 29610 ?= 29610 offset# 53608448 size# 26437 26437 ?= 26437 offset# 54202368 size# 10794 10794 ?= 10794 offset# 54415360 size# 1414 1414 ?= 1414 offset# 55402496 size# 21822 21822 ?= 21822 offset# 56295424 size# 27628 27628 ?= 27628 offset# 56369152 size# 7872 7872 ?= 7872 offset# 56778752 size# 32258 32258 ?= 32258 offset# 58040320 size# 3860 3860 ?= 3860 offset# 58884096 size# 11357 11357 ?= 11357 offset# 59715584 size# 11062 11062 ?= 11062 offset# 60452864 size# 25786 25786 ?= 25786 offset# 61046784 size# 17844 17844 ?= 17844 offset# 61784064 size# 9614 9614 ?= 9614 offset# 62644224 size# 21088 21088 ?= 21088 offset# 63762432 size# 30048 30048 ?= 30048 offset# 64212992 size# 24157 24157 ?= 24157 offset# 64237568 size# 20430 20430 ?= 20430 offset# 65064960 size# 30980 30980 ?= 30980 offset# 65265664 size# 17368 17368 ?= 17368 offset# 66048000 size# 6640 6640 ?= 6640 offset# 66060288 size# 18995 18995 ?= 18995 offset# 66727936 size# 24195 24195 ?= 24195 offset# 67305472 size# 14805 14805 ?= 14805 offset# 67727360 size# 17141 17141 ?= 17141 offset# 68534272 size# 14443 14443 ?= 14443 offset# 69021696 size# 10496 10496 ?= 10496 offset# 69099520 size# 15960 15960 ?= 15960 offset# 70373376 size# 14018 14018 ?= 14018 offset# 71184384 size# 10908 10908 ?= 10908 offset# 71847936 size# 21975 21975 ?= 21975 offset# 72290304 size# 11976 11976 ?= 11976 offset# 73469952 size# 8654 8654 ?= 8654 offset# 74395648 size# 5231 5231 ?= 5231 offset# 75173888 size# 31176 31176 ?= 31176 offset# 75534336 size# 13 13 ?= 13 offset# 76562432 size# 3015 3015 ?= 3015 offset# 77602816 size# 15640 15640 ?= 15640 offset# 77647872 size# 14516 14516 ?= 14516 offset# 77828096 size# 10793 10793 ?= 10793 offset# 78467072 size# 25095 25095 ?= 25095 offset# 78839808 size# 23949 23949 ?= 23949 offset# 79020032 size# 9557 9557 ?= 9557 offset# 79818752 size# 16914 16914 ?= 16914 offset# 79867904 size# 18014 18014 ?= 18014 offset# 80764928 size# 12104 12104 ?= 12104 offset# 81698816 size# 8234 8234 ?= 8234 offset# 82653184 size# 12966 12966 ?= 12966 offset# 83779584 size# 17486 17486 ?= 17486 offset# 84164608 size# 5600 5600 ?= 5600 offset# 85012480 size# 2003 2003 ?= 2003 offset# 86138880 size# 18422 18422 ?= 18422 offset# 86499328 size# 25510 25510 ?= 25510 offset# 87674880 size# 26618 26618 ?= 26618 offset# 88535040 size# 31980 31980 ?= 31980 offset# 88915968 size# 15988 15988 ?= 15988 offset# 90148864 size# 26909 26909 ?= 26909 offset# 90898432 size# 941 941 ?= 941 offset# 91426816 size# 1379 1379 ?= 1379 offset# 91508736 size# 17479 17479 ?= 17479 offset# 91779072 size# 11059 11059 ?= 11059 offset# 92520448 size# 286 286 ?= 286 offset# 92680192 size# 23708 23708 ?= 23708 offset# 93016064 size# 26734 26734 ?= 26734 offset# 93511680 size# 7591 7591 ?= 7591 offset# 94191616 size# 12 12 ?= 12 offset# 95125504 size# 18548 18548 ?= 18548 offset# 95879168 size# 17138 17138 ?= 17138 offset# 96489472 size# 11187 11187 ?= 11187 offset# 97722368 size# 31810 31810 ?= 31810 offset# 98848768 size# 21976 21976 ?= 21976 offset# 100032512 size# 6365 6365 ?= 6365 offset# 100958208 size# 22375 22375 ?= 22375 offset# 101818368 size# 3652 3652 ?= 3652 offset# 103100416 size# 18335 18335 ?= 18335 offset# 103211008 size# 9835 9835 ?= 9835 offset# 103747584 size# 355 355 ?= 355 offset# 104513536 size# 10031 10031 ?= 10031 offset# 104841216 size# 13099 13099 ?= 13099 offset# 105598976 size# 17529 17529 ?= 17529 offset# 105951232 size# 12645 12645 ?= 12645 offset# 106692608 size# 21044 21044 ?= 21044 offset# 107745280 size# 2140 2140 ?= 2140 offset# 108060672 size# 5260 5260 ?= 5260 offset# 108978176 size# 14224 14224 ?= 14224 offset# 110030848 size# 14754 14754 ?= 14754 offset# 110284800 size# 6358 6358 ?= 6358 offset# 111247360 size# 19297 19297 ?= 19297 offset# 111722496 size# 25307 25307 ?= 25307 offset# 111853568 size# 22279 22279 ?= 22279 offset# 111898624 size# 25243 25243 ?= 25243 offset# 111984640 size# 3706 3706 ?= 3706 offset# 112979968 size# 19358 19358 ?= 19358 offset# 114081792 size# 9765 9765 ?= 9765 offset# 115064832 size# 32069 32069 ?= 32069 offset# 115101696 size# 15503 15503 ?= 15503 offset# 115560448 size# 2068 2068 ?= 2068 offset# 116658176 size# 31011 31011 ?= 31011 offset# 117010432 size# 10076 10076 ?= 10076 offset# 117088256 size# 21634 21634 ?= 21634 offset# 117227520 size# 13950 13950 ?= 13950 offset# 117932032 size# 10486 10486 ?= 10486 offset# 118726656 size# 5891 5891 ?= 5891 offset# 119758848 size# 11398 11398 ?= 11398 offset# 120725504 size# 18899 18899 ?= 18899 offset# 121229312 size# 29010 29010 ?= 29010 offset# 121995264 size# 19039 19039 ?= 19039 offset# 122847232 size# 21511 21511 ?= 21511 offset# 123412480 size# 717 717 ?= 717 offset# 123834368 size# 15009 15009 ?= 15009 offset# 124997632 size# 18353 18353 ?= 18353 offset# 125046784 size# 2755 2755 ?= 2755 offset# 125722624 size# 27785 27785 ?= 27785 offset# 126521344 size# 21619 21619 ?= 21619 offset# 127619072 size# 28811 28811 ?= 28811 offset# 127913984 size# 20749 20749 ?= 20749 offset# 128196608 size# 15655 15655 ?= 15655 offset# 128331776 size# 13021 13021 ?= 13021 offset# 128589824 size# 3087 3087 ?= 3087 offset# 129773568 size# 5971 5971 ?= 5971 offset# 130904064 size# 23752 23752 ?= 23752 offset# 132083712 size# 511 511 ?= 511 offset# 132145152 size# 16467 16467 ?= 16467 offset# 132657152 size# 12425 12425 ?= 12425 offset# 133709824 size# 8711 8711 ?= 8711 offset# 133967872 size# 13612 13612 ?= 13612 offset# 134000640 size# 1953 1953 ?= 1953 offset# 134901760 size# 9286 9286 ?= 9286 seed# 1746607946470210 total_speed# 0.2147483648 GB/s seed# 1746607963173013 >> TCertificateAuthUtilsTest::GenerateAndVerifyCertificates [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::TTzDateTime [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::TTzTimeStamp [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::UuidType [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::VariantTuple [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::VariantStruct [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::Void [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::Tuple [GOOD] >> KqpPg::V1CreateTable [GOOD] >> KqpPg::ValuesInsert+useSink |89.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_bsvolume/ydb-core-tx-schemeshard-ut_bsvolume |89.7%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_bsvolume/ydb-core-tx-schemeshard-ut_bsvolume |89.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_bsvolume/ydb-core-tx-schemeshard-ut_bsvolume |89.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/security/certificate_check/ut/unittest >> TCertificateAuthUtilsTest::GenerateAndVerifyCertificates [GOOD] >> DataShardSnapshots::VolatileSnapshotTimeout [GOOD] >> DataShardSnapshots::VolatileSnapshotTimeoutRefresh >> CellsFromTupleTest::CellsFromTupleSuccess [GOOD] >> CellsFromTupleTest::CellsFromTupleSuccessPg |89.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/ydb_convert/ut/unittest >> ConvertMiniKQLTypeToYdbTypeTest::Tuple [GOOD] >> CellsFromTupleTest::CellsFromTupleSuccessPg [GOOD] >> CellsFromTupleTest::CellsFromTupleFails [GOOD] >> CellsFromTupleTest::CellsFromTupleFailsPg [GOOD] >> CompressionTests::Zstd [GOOD] >> CompressionTests::Unsupported [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::DecimalType [GOOD] >> KqpIndexes::SecondaryIndexUpsert2Update [GOOD] >> KqpIndexes::SecondaryIndexUpdateOnUsingIndex |89.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/ydb_convert/ut/unittest >> ConvertMiniKQLTypeToYdbTypeTest::DecimalType [GOOD] >> TInterconnectTest::OldFormat >> YdbIndexTable::MultiShardTableOneIndex [GOOD] >> YdbIndexTable::MultiShardTableOneIndexDataColumn >> TExternalDataSourceTest::ReplaceExternalDataStoreShouldFailIfEntityOfAnotherTypeWithSameNameExists >> KqpPg::TableSelect-useSink [GOOD] >> KqpPg::TableInsert+useSink >> TInterconnectTest::OldFormat [GOOD] >> TInterconnectTest::OldFormatSuppressVersionCheckOnNew >> KqpVectorIndexes::OrderByCosineSimilarityNullableLevel2 [GOOD] >> KqpVectorIndexes::VectorIndexIsNotUpdatable |89.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/proxy/ut/unittest >> YdbProxy::ListDirectory [GOOD] >> YdbProxy::DropTopic >> TInterconnectTest::OldFormatSuppressVersionCheckOnNew [GOOD] >> TInterconnectTest::OldFormatSuppressVersionCheckOnOld >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-50 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-51 |89.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/proxy/ut/unittest >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalSchemeShard-AlterDatabaseCreateHiveFirst-false >> TInterconnectTest::OldFormatSuppressVersionCheckOnOld [GOOD] >> TInterconnectTest::OldFormatSuppressVersionCheck >> BasicUsage::TWriteSession_WriteAndReadAndCommitRandomMessages [GOOD] >> BasicUsage::TWriteSession_WriteAndReadAndCommitRandomMessagesNoClusterDiscovery |89.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/storagepoolmon/ut/ydb-core-blobstorage-storagepoolmon-ut |89.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/storagepoolmon/ut/ydb-core-blobstorage-storagepoolmon-ut |89.7%| [LD] {RESULT} $(B)/ydb/core/blobstorage/storagepoolmon/ut/ydb-core-blobstorage-storagepoolmon-ut >> TInterconnectTest::OldFormatSuppressVersionCheck [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/actorlib_impl/ut/unittest >> TInterconnectTest::OldFormatSuppressVersionCheck [GOOD] Test command err: 2025-05-07T08:52:48.244190Z node 4 :INTERCONNECT WARN: interconnect_handshake.cpp:501: Handshake [4:20:2056] [node 3] ICH09 Neither CompatibilityInfo nor VersionTag of the peer can be validated, accepting by default 2025-05-07T08:52:48.966601Z node 5 :INTERCONNECT WARN: interconnect_handshake.cpp:501: Handshake [5:18:2057] [node 6] ICH09 Neither CompatibilityInfo nor VersionTag of the peer can be validated, accepting by default 2025-05-07T08:52:49.498261Z node 8 :INTERCONNECT WARN: interconnect_handshake.cpp:501: Handshake [8:20:2056] [node 7] ICH09 Neither CompatibilityInfo nor VersionTag of the peer can be validated, accepting by default 2025-05-07T08:52:49.500947Z node 7 :INTERCONNECT WARN: interconnect_handshake.cpp:501: Handshake [7:18:2057] [node 8] ICH09 Neither CompatibilityInfo nor VersionTag of the peer can be validated, accepting by default >> KqpPg::PgAggregate-useSink [GOOD] >> KqpPg::MkqlTerminate >> TSchemeShardExtSubDomainTest::Create >> KqpScripting::StreamExecuteYqlScriptSeveralQueries [GOOD] >> KqpScripting::StreamExecuteYqlScriptSeveralQueriesComplex >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalSchemeShard-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalSchemeShard-AlterDatabaseCreateHiveFirst-true >> TSchemeShardExtSubDomainTest::Fake [GOOD] >> TSchemeShardExtSubDomainTest::CreateWithOnlyDotsNotAllowed >> KqpScanSpilling::SelfJoin [GOOD] >> KqpIndexes::UniqAndNoUniqSecondaryIndexWithCover [GOOD] >> KqpIndexes::UpdateDeletePlan+UseSink >> TSchemeShardExtSubDomainTest::Create [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlter >> KqpYql::EvaluateIf [GOOD] >> KqpYql::EvaluateFor >> TSchemeShardExtSubDomainTest::AlterWithPlainAlterSubdomain >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalSchemeShard-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalHive-AlterDatabaseCreateHiveFirst-false >> KqpScanSpilling::SpillingPragmaParseError [GOOD] >> TSchemeShardExtSubDomainTest::CreateWithOnlyDotsNotAllowed [GOOD] >> TSchemeShardExtSubDomainTest::NothingInsideGSS-AlterDatabaseCreateHiveFirst-false >> YdbProxy::DropTopic [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/runtime/unittest >> KqpScanSpilling::SelfJoin [GOOD] Test command err: cwd: /home/runner/.ya/build/build_root/zvgn/0028d9/ydb/core/kqp/ut/runtime/test-results/unittest/testing_out_stuff/chunk5 Trying to start YDB, gRPC: 8132, MsgBus: 6828 2025-05-07T08:52:43.273775Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624082366297408:2063];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:43.273899Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028d9/r3tmp/tmpOGWa7G/pdisk_1.dat 2025-05-07T08:52:43.723947Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:52:43.781234Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:52:43.781340Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 8132, node 1 2025-05-07T08:52:43.785251Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:52:43.950576Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:52:43.950598Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:52:43.950605Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:52:43.950706Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6828 TClient is connected to server localhost:6828 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:52:44.897151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:44.964246Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:45.121747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:45.425317Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:45.525831Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:47.642857Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624099546168263:2408], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:47.642966Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:47.956378Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:52:48.009004Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:52:48.058553Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:52:48.116269Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:52:48.179822Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:52:48.229569Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:52:48.269019Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501624082366297408:2063];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:48.269349Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:52:48.275296Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:52:48.367518Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624103841136215:2471], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:48.367724Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:48.369342Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624103841136220:2474], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:48.374488Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:52:48.391297Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624103841136222:2475], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:52:48.493278Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501624103841136275:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:52:50.150107Z node 1 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:148;event=channel_info;ch_size=50;ch_count=1;ch_limit=50;inputs=0;input_channels_count=0; 2025-05-07T08:52:50.150332Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:134: SelfId: [1:7501624112431071163:2517], TxId: 281474976710672, task: 1. Ctx: { SessionId : ydb://session/3?node_id=1&id=NjZhYjZlNTEtNGI0YWU2OTAtNjY2NjMwODAtNDNiYzA4OWU=. TraceId : 01jtmz55qsfr0zt7p8jxw8tsee. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Start compute actor [1:7501624112431071163:2517], task: 1 2025-05-07T08:52:50.150356Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:141: SelfId: [1:7501624112431071163:2517], TxId: 281474976710672, task: 1. Ctx: { SessionId : ydb://session/3?node_id=1&id=NjZhYjZlNTEtNGI0YWU2OTAtNjY2NjMwODAtNDNiYzA4OWU=. TraceId : 01jtmz55qsfr0zt7p8jxw8tsee. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Set execution timeout 299.893335s 2025-05-07T08:52:50.154065Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1458: SelfId: [1:7501624112431071163:2517], TxId: 281474976710672, task: 1. Ctx: { SessionId : ydb://session/3?node_id=1&id=NjZhYjZlNTEtNGI0YWU2OTAtNjY2NjMwODAtNDNiYzA4OWU=. TraceId : 01jtmz55qsfr0zt7p8jxw8tsee. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Create sink for output 0 { Sink { Type: "KqpTableSink" Settings { type_url: "type.googleapis.com/NKikimrKqp.TKqpTableSinkSettings" value: "\032\036\n\016/Root/KeyValue\020\200\202\224\204\200\200\200\200\001\030\006(\001\"\t\n\003Key\020\001 \004*\t\n\003Key\020\001 \004*\014\n\005Value\020\002 \201 0\220\200\200\200\200\200@8\001@\000H\000R\022\t\267\177\365\213b\037\033h\021\325\t\000\000\001\000\020\000X\000`\000h\000h\001x\000" } } } 2025-05-07T08:52:50.154223Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [1:7501624112431071163:2517], TxId: 281474976710672, task: 1. Ctx: { SessionId : ydb://session/3?node_id=1&id=NjZhYjZlNTEtNGI0YWU2OTAtNjY2NjMwODAtNDNiYzA4OWU=. TraceId : 01jtmz55qsfr0zt7p8jxw8tsee. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. CA StateFunc 271646926 2025-05-07T08:52:50.154271Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1074: SelfId: [1:7501624112431071163:2517], TxId: 281474976710672, task: 1. Ctx: { SessionId : ydb://session/3?node_id=1&id=NjZhYjZlNTEtNGI0YWU2OTAtNjY2NjMwODAtNDNiYzA4OWU=. TraceId : 01jtmz55qsfr0zt7p8jxw8tsee. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Received channels info: 2025-05-07T08:52:50.154367Z node 1 :KQP_COMPUTE DEBUG: dq_sync_compu ... task: 3. Ctx: { SessionId : ydb://session/3?node_id=1&id=NTI2NDc3NWEtZDA3MTE1MTItMjc4ZGM2LTE0OGZkMDZj. TraceId : 01jtmz56ncbw4ananwktbykwsc. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646922 2025-05-07T08:52:51.477369Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [1:7501624116726038686:2573], TxId: 281474976710683, task: 3. Ctx: { SessionId : ydb://session/3?node_id=1&id=NTI2NDc3NWEtZDA3MTE1MTItMjc4ZGM2LTE0OGZkMDZj. TraceId : 01jtmz56ncbw4ananwktbykwsc. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646922 2025-05-07T08:52:51.477428Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [1:7501624116726038686:2573], TxId: 281474976710683, task: 3. Ctx: { SessionId : ydb://session/3?node_id=1&id=NTI2NDc3NWEtZDA3MTE1MTItMjc4ZGM2LTE0OGZkMDZj. TraceId : 01jtmz56ncbw4ananwktbykwsc. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646922 2025-05-07T08:52:51.477733Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [1:7501624116726038685:2572], TxId: 281474976710683, task: 2. Ctx: { TraceId : 01jtmz56ncbw4ananwktbykwsc. SessionId : ydb://session/3?node_id=1&id=NTI2NDc3NWEtZDA3MTE1MTItMjc4ZGM2LTE0OGZkMDZj. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646927 2025-05-07T08:52:51.477759Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [1:7501624116726038685:2572], TxId: 281474976710683, task: 2. Ctx: { TraceId : 01jtmz56ncbw4ananwktbykwsc. SessionId : ydb://session/3?node_id=1&id=NTI2NDc3NWEtZDA3MTE1MTItMjc4ZGM2LTE0OGZkMDZj. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646922 2025-05-07T08:52:51.477848Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [1:7501624116726038685:2572], TxId: 281474976710683, task: 2. Ctx: { TraceId : 01jtmz56ncbw4ananwktbykwsc. SessionId : ydb://session/3?node_id=1&id=NTI2NDc3NWEtZDA3MTE1MTItMjc4ZGM2LTE0OGZkMDZj. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-05-07T08:52:51.478108Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [1:7501624116726038686:2573], TxId: 281474976710683, task: 3. Ctx: { SessionId : ydb://session/3?node_id=1&id=NTI2NDc3NWEtZDA3MTE1MTItMjc4ZGM2LTE0OGZkMDZj. TraceId : 01jtmz56ncbw4ananwktbykwsc. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646923 2025-05-07T08:52:51.478133Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:163: TxId: 281474976710683, task: 3. Finish input channelId: 3, from: [1:7501624116726038685:2572] 2025-05-07T08:52:51.478152Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [1:7501624116726038686:2573], TxId: 281474976710683, task: 3. Ctx: { SessionId : ydb://session/3?node_id=1&id=NTI2NDc3NWEtZDA3MTE1MTItMjc4ZGM2LTE0OGZkMDZj. TraceId : 01jtmz56ncbw4ananwktbykwsc. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646922 2025-05-07T08:52:51.478182Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [1:7501624116726038685:2572], TxId: 281474976710683, task: 2. Ctx: { TraceId : 01jtmz56ncbw4ananwktbykwsc. SessionId : ydb://session/3?node_id=1&id=NTI2NDc3NWEtZDA3MTE1MTItMjc4ZGM2LTE0OGZkMDZj. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646927 2025-05-07T08:52:51.478194Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [1:7501624116726038685:2572], TxId: 281474976710683, task: 2. Ctx: { TraceId : 01jtmz56ncbw4ananwktbykwsc. SessionId : ydb://session/3?node_id=1&id=NTI2NDc3NWEtZDA3MTE1MTItMjc4ZGM2LTE0OGZkMDZj. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646922 2025-05-07T08:52:51.478219Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [1:7501624116726038686:2573], TxId: 281474976710683, task: 3. Ctx: { SessionId : ydb://session/3?node_id=1&id=NTI2NDc3NWEtZDA3MTE1MTItMjc4ZGM2LTE0OGZkMDZj. TraceId : 01jtmz56ncbw4ananwktbykwsc. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646922 2025-05-07T08:52:51.478246Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710683, task: 2. Tasks execution finished, don't wait for ack delivery in input channelId: 1, seqNo: [10] 2025-05-07T08:52:51.478268Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710683, task: 2. Tasks execution finished, don't wait for ack delivery in input channelId: 2, seqNo: [10] 2025-05-07T08:52:51.478282Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976710683, task: 2. Tasks execution finished 2025-05-07T08:52:51.478296Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [1:7501624116726038685:2572], TxId: 281474976710683, task: 2. Ctx: { TraceId : 01jtmz56ncbw4ananwktbykwsc. SessionId : ydb://session/3?node_id=1&id=NTI2NDc3NWEtZDA3MTE1MTItMjc4ZGM2LTE0OGZkMDZj. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Compute state finished. All channels and sinks finished 2025-05-07T08:52:51.478317Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [1:7501624116726038686:2573], TxId: 281474976710683, task: 3. Ctx: { SessionId : ydb://session/3?node_id=1&id=NTI2NDc3NWEtZDA3MTE1MTItMjc4ZGM2LTE0OGZkMDZj. TraceId : 01jtmz56ncbw4ananwktbykwsc. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646922 2025-05-07T08:52:51.478380Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976710683, task: 2. pass away 2025-05-07T08:52:51.478501Z node 1 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:66;problem=finish_compute_actor;tx_id=281474976710683;task_id=2;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-05-07T08:52:51.478882Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [1:7501624116726038686:2573], TxId: 281474976710683, task: 3. Ctx: { SessionId : ydb://session/3?node_id=1&id=NTI2NDc3NWEtZDA3MTE1MTItMjc4ZGM2LTE0OGZkMDZj. TraceId : 01jtmz56ncbw4ananwktbykwsc. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646922 2025-05-07T08:52:51.479120Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [1:7501624116726038686:2573], TxId: 281474976710683, task: 3. Ctx: { SessionId : ydb://session/3?node_id=1&id=NTI2NDc3NWEtZDA3MTE1MTItMjc4ZGM2LTE0OGZkMDZj. TraceId : 01jtmz56ncbw4ananwktbykwsc. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646922 2025-05-07T08:52:51.479196Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [1:7501624116726038686:2573], TxId: 281474976710683, task: 3. Ctx: { SessionId : ydb://session/3?node_id=1&id=NTI2NDc3NWEtZDA3MTE1MTItMjc4ZGM2LTE0OGZkMDZj. TraceId : 01jtmz56ncbw4ananwktbykwsc. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646922 2025-05-07T08:52:51.479246Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [1:7501624116726038686:2573], TxId: 281474976710683, task: 3. Ctx: { SessionId : ydb://session/3?node_id=1&id=NTI2NDc3NWEtZDA3MTE1MTItMjc4ZGM2LTE0OGZkMDZj. TraceId : 01jtmz56ncbw4ananwktbykwsc. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-05-07T08:52:51.517079Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [1:7501624116726038686:2573], TxId: 281474976710683, task: 3. Ctx: { SessionId : ydb://session/3?node_id=1&id=NTI2NDc3NWEtZDA3MTE1MTItMjc4ZGM2LTE0OGZkMDZj. TraceId : 01jtmz56ncbw4ananwktbykwsc. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646922 2025-05-07T08:52:51.517177Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [1:7501624116726038686:2573], TxId: 281474976710683, task: 3. Ctx: { SessionId : ydb://session/3?node_id=1&id=NTI2NDc3NWEtZDA3MTE1MTItMjc4ZGM2LTE0OGZkMDZj. TraceId : 01jtmz56ncbw4ananwktbykwsc. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-05-07T08:52:51.517374Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:2027: SelfId: [1:7501624116726038686:2573], TxId: 281474976710683, task: 3. Ctx: { SessionId : ydb://session/3?node_id=1&id=NTI2NDc3NWEtZDA3MTE1MTItMjc4ZGM2LTE0OGZkMDZj. TraceId : 01jtmz56ncbw4ananwktbykwsc. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Send stats to executor actor [1:7501624116726038679:2565] TaskId: 3 Stats: CpuTimeUs: 14610 Tasks { TaskId: 3 StageId: 2 CpuTimeUs: 1833 FinishTimeMs: 1746607971517 InputRows: 10 InputBytes: 500 OutputRows: 10 OutputBytes: 500 ResultRows: 10 ResultBytes: 500 ComputeCpuTimeUs: 1268 BuildCpuTimeUs: 565 WaitOutputTimeUs: 21465 HostName: "ghrun-sykirh5vua" NodeId: 1 CreateTimeMs: 1746607971418 CurrentWaitOutputTimeUs: 43 UpdateTimeMs: 1746607971517 } MaxMemoryUsage: 104857600 2025-05-07T08:52:51.520632Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [1:7501624116726038686:2573], TxId: 281474976710683, task: 3. Ctx: { SessionId : ydb://session/3?node_id=1&id=NTI2NDc3NWEtZDA3MTE1MTItMjc4ZGM2LTE0OGZkMDZj. TraceId : 01jtmz56ncbw4ananwktbykwsc. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646922 2025-05-07T08:52:51.520695Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710683, task: 3. Tasks execution finished, don't wait for ack delivery in input channelId: 3, seqNo: [11] 2025-05-07T08:52:51.520705Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976710683, task: 3. Tasks execution finished 2025-05-07T08:52:51.520724Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [1:7501624116726038686:2573], TxId: 281474976710683, task: 3. Ctx: { SessionId : ydb://session/3?node_id=1&id=NTI2NDc3NWEtZDA3MTE1MTItMjc4ZGM2LTE0OGZkMDZj. TraceId : 01jtmz56ncbw4ananwktbykwsc. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Compute state finished. All channels and sinks finished 2025-05-07T08:52:51.520803Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976710683, task: 3. pass away 2025-05-07T08:52:51.520881Z node 1 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:66;problem=finish_compute_actor;tx_id=281474976710683;task_id=3;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-05-07T08:52:51.522549Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746607971451, txId: 281474976710682] shutting down >> KqpPg::InsertNoTargetColumns_ColumnOrder-useSink [GOOD] >> KqpPg::InsertNoTargetColumns_NotOneSize+useSink >> TSchemeShardExtSubDomainTest::CreateAndAlter [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlter-ExternalHive ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/runtime/unittest >> KqpScanSpilling::SpillingPragmaParseError [GOOD] Test command err: cwd: /home/runner/.ya/build/build_root/zvgn/0028d3/ydb/core/kqp/ut/runtime/test-results/unittest/testing_out_stuff/chunk9 Trying to start YDB, gRPC: 18771, MsgBus: 29571 2025-05-07T08:52:44.359660Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624085539759622:2196];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:44.360291Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028d3/r3tmp/tmpm9rqaL/pdisk_1.dat 2025-05-07T08:52:44.948499Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:52:44.979373Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:52:44.979501Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:52:44.987774Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18771, node 1 2025-05-07T08:52:45.270582Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:52:45.270621Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:52:45.270638Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:52:45.270782Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29571 TClient is connected to server localhost:29571 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:52:46.504055Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:46.551699Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:52:46.563969Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:46.821928Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:47.179081Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:47.317298Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:49.344266Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501624085539759622:2196];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:49.344356Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:52:49.626481Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624107014597622:2409], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:49.626609Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:49.997021Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:52:50.059223Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:52:50.114139Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:52:50.187703Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:52:50.245192Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:52:50.332768Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:52:50.413216Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:52:50.495047Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624111309565589:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:50.495128Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:50.495359Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624111309565594:2476], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:50.499528Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:52:50.521821Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624111309565596:2477], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:52:50.614777Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501624111309565651:3433] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:52:52.004687Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7501624115604533226:2523], status: GENERIC_ERROR, issues:
: Error: Pre type annotation, code: 1020
:3:40: Error: Bad "EnableSpillingNodes" setting for "$all" cluster: (yexception) tools/enum_parser/enum_serialization_runtime/enum_runtime.cpp:70: Key 'GraceJoin1' not found in enum NYql::NDq::EEnabledSpillingNodes. Valid options are: 'None', 'GraceJoin', 'Aggregation', 'All'. 2025-05-07T08:52:52.004940Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=1&id=NDJlMGU5NGEtNzA0M2YxOGQtYTg2Zjk5MjUtYzU4YWU2NWY=, ActorId: [1:7501624115604533219:2519], ActorState: ExecuteState, TraceId: 01jtmz57jn4c7kw32nd3hdcezc, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalHive-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalHive-AlterDatabaseCreateHiveFirst-true >> PersQueueSdkReadSessionTest::StopResumeReadingData [GOOD] >> ReadSessionImplTest::CreatePartitionStream [GOOD] >> ReadSessionImplTest::BrokenCompressedData >> TSchemeShardExtSubDomainTest::NothingInsideGSS-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::Drop >> TExternalDataSourceTest::ReplaceExternalDataStoreShouldFailIfEntityOfAnotherTypeWithSameNameExists [GOOD] >> ReadSessionImplTest::BrokenCompressedData [GOOD] >> ReadSessionImplTest::CommitOffsetTwiceIsError [GOOD] >> ReadSessionImplTest::CommonHandler [GOOD] >> TSchemeShardExtSubDomainTest::AlterWithPlainAlterSubdomain [GOOD] >> TSchemeShardExtSubDomainTest::AlterWithPlainAlterSubdomain-ExternalHive ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/ydb_proxy/ut/unittest >> YdbProxy::DropTopic [GOOD] Test command err: 2025-05-07T08:52:43.295301Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624080657082212:2203];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:43.299556Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0048eb/r3tmp/tmp2B8k2P/pdisk_1.dat 2025-05-07T08:52:43.925139Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:52:43.952158Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:52:43.952289Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:52:43.957703Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:1697 TServer::EnableGrpc on GrpcPort 11236, node 1 2025-05-07T08:52:44.470450Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:52:44.470473Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:52:44.470481Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:52:44.470617Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1697 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:52:45.131403Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:45.160603Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:52:45.234707Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-05-07T08:52:48.735138Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501624103789214334:2196];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:48.735542Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0048eb/r3tmp/tmpB20i8F/pdisk_1.dat 2025-05-07T08:52:49.042974Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:52:49.072470Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:52:49.072544Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:52:49.074142Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2888 TServer::EnableGrpc on GrpcPort 10974, node 2 2025-05-07T08:52:49.571200Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:52:49.571223Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:52:49.571229Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:52:49.571348Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2888 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:52:50.207284Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:50.432302Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropPersQueueGroup, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-05-07T08:52:50.452312Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037888 not found 2025-05-07T08:52:50.471146Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037889 not found 2025-05-07T08:52:50.490966Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501624112379149577:2398] txid# 281474976715660, issues: { message: "Path does not exist" issue_code: 200200 severity: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_data_source/unittest >> TExternalDataSourceTest::ReplaceExternalDataStoreShouldFailIfEntityOfAnotherTypeWithSameNameExists [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:52:52.447685Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:52:52.458184Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:52:52.458270Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:52:52.458310Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:52:52.458354Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:52:52.458384Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:52:52.458481Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:52:52.458557Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:52:52.459294Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:52:52.470156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:52:52.824217Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:52:52.824286Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:52:52.869797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:52:52.870204Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:52:52.891314Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:52:52.931734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:52:52.939883Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:52:52.958317Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:52:53.002605Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:52:53.074872Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:52:53.180887Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:52:53.181024Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:52:53.181160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:52:53.181224Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:52:53.181282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:52:53.218301Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:52:53.247408Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:52:53.515743Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:52:53.530719Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:53.589547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:52:53.613576Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:52:53.613717Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:53.631323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:52:53.641190Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:52:53.641531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:53.662176Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:52:53.662279Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:52:53.662317Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:52:53.676420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:53.676514Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:52:53.676564Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:52:53.686956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:53.687030Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:53.687086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:52:53.687156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:52:53.705688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:52:53.730898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:52:53.731165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:52:53.741452Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:52:53.741658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:52:53.751563Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:52:53.751949Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:52:53.752008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:52:53.762102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:52:53.762276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:52:53.771215Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:52:53.771280Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:52:53.771504Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:52:53.771560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-05-07T08:52:53.907624Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 101, path id: 2 2025-05-07T08:52:53.908080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:52:53.908126Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 101:0 ProgressState 2025-05-07T08:52:53.908212Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T08:52:53.908241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T08:52:53.908287Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T08:52:53.908329Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T08:52:53.908364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-05-07T08:52:53.908402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T08:52:53.908437Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:0 2025-05-07T08:52:53.908466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 101:0 2025-05-07T08:52:53.908552Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T08:52:53.908594Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-05-07T08:52:53.908653Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 4 2025-05-07T08:52:53.908736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 2 2025-05-07T08:52:53.909357Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T08:52:53.909470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T08:52:53.909512Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-05-07T08:52:53.909549Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 4 2025-05-07T08:52:53.909596Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:52:53.910564Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T08:52:53.910645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T08:52:53.910670Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-05-07T08:52:53.910698Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 2 2025-05-07T08:52:53.910726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-05-07T08:52:53.910786Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-05-07T08:52:53.915438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T08:52:53.915982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-05-07T08:52:53.916230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-05-07T08:52:53.934085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-05-07T08:52:53.946699Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-05-07T08:52:53.946910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-05-07T08:52:53.946968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:298:2289] TestWaitNotification: OK eventTxId 101 2025-05-07T08:52:53.947676Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/UniqueName" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:52:53.947984Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/UniqueName" took 290us result status StatusSuccess 2025-05-07T08:52:53.958731Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/UniqueName" PathDescription { Self { Name: "UniqueName" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeView CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ViewVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } ViewDescription { Name: "UniqueName" PathId { OwnerId: 72057594046678944 LocalId: 2 } Version: 1 QueryText: "Some query" CapturedContext { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 102 2025-05-07T08:52:53.973673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalDataSource CreateExternalDataSource { Name: "UniqueName" SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Auth { None { } } ReplaceIfExists: true } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:52:53.978415Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_data_source.cpp:336: [72057594046678944] CreateNewExternalDataSource, opId 102:0, feature flag EnableReplaceIfExistsForExternalEntities 1, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalDataSource FailOnExist: false CreateExternalDataSource { Name: "UniqueName" SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Auth { None { } } ReplaceIfExists: true } 2025-05-07T08:52:53.978618Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_external_data_source.cpp:212: [72057594046678944] TAlterExternalDataSource Propose: opId# 102:0, path# /MyRoot/UniqueName 2025-05-07T08:52:53.978788Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 102:1, propose status:StatusNameConflict, reason: Check failed: path: '/MyRoot/UniqueName', error: unexpected path type (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeView, state: EPathStateNoChanges), expected types: EPathTypeExternalDataSource, at schemeshard: 72057594046678944 2025-05-07T08:52:54.025379Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 102, response: Status: StatusNameConflict Reason: "Check failed: path: \'/MyRoot/UniqueName\', error: unexpected path type (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeView, state: EPathStateNoChanges), expected types: EPathTypeExternalDataSource" TxId: 102 SchemeshardId: 72057594046678944 PathId: 2 PathCreateTxId: 101, at schemeshard: 72057594046678944 2025-05-07T08:52:54.025625Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusNameConflict, reason: Check failed: path: '/MyRoot/UniqueName', error: unexpected path type (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeView, state: EPathStateNoChanges), expected types: EPathTypeExternalDataSource, operation: CREATE EXTERNAL DATA SOURCE, path: /MyRoot/UniqueName TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-05-07T08:52:54.026032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-05-07T08:52:54.026104Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-05-07T08:52:54.026599Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-05-07T08:52:54.026707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T08:52:54.026740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:306:2297] TestWaitNotification: OK eventTxId 102 >> TSchemeShardExtSubDomainTest::AlterWithPlainAlterSubdomain-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::AlterWithPlainAlterSubdomain-AlterDatabaseCreateHiveFirst >> TSchemeShardExtSubDomainTest::CreateAndAlter-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlter-AlterDatabaseCreateHiveFirst >> DataShardSnapshots::LockedWriteCleanupOnSplit-UseSink [GOOD] >> DataShardSnapshots::LockedWriteCleanupOnCopyTable+UseSink >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalHive-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalSysViewProcessor-AlterDatabaseCreateHiveFirst-false >> KqpUniqueIndex::InsertFkDuplicate [GOOD] >> TSchemeShardExtSubDomainTest::AlterTwiceAndWithPlainAlterSubdomain >> TSchemeShardExtSubDomainTest::Drop [GOOD] >> TSchemeShardExtSubDomainTest::Drop-ExternalHive >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-51 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-52 >> TSchemeShardExtSubDomainTest::AlterWithPlainAlterSubdomain-AlterDatabaseCreateHiveFirst [GOOD] >> TSchemeShardExtSubDomainTest::AlterWithPlainAlterSubdomain-AlterDatabaseCreateHiveFirst-ExternalHive >> TSchemeShardExtSubDomainTest::CreateAndAlterWithoutEnablingTx ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> ReadSessionImplTest::CommonHandler [GOOD] Test command err: 2025-05-07T08:51:36.762397Z :ReadSession INFO: Random seed for debugging is 1746607896762366 2025-05-07T08:51:37.140627Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623797933598307:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:37.140995Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:51:37.549674Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501623797694888250:2221];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:37.550497Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003625/r3tmp/tmpQ3olTB/pdisk_1.dat 2025-05-07T08:51:37.574818Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-05-07T08:51:37.626734Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:51:37.804718Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:37.804808Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:37.831550Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:37.831641Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:37.840022Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:51:37.844257Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-05-07T08:51:37.845095Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:51:37.877227Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26916, node 1 2025-05-07T08:51:37.950691Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T08:51:37.950732Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T08:51:38.050910Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/zvgn/003625/r3tmp/yandexm5mIbV.tmp 2025-05-07T08:51:38.050943Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/zvgn/003625/r3tmp/yandexm5mIbV.tmp 2025-05-07T08:51:38.066762Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/zvgn/003625/r3tmp/yandexm5mIbV.tmp 2025-05-07T08:51:38.143316Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:51:38.230250Z INFO: TTestServer started on Port 22892 GrpcPort 26916 TClient is connected to server localhost:22892 PQClient connected to localhost:26916 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:51:38.704043Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... waiting... 2025-05-07T08:51:41.511177Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501623814874757600:2312], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:41.511261Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501623814874757575:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:41.511731Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:41.541045Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623815113468519:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:41.541170Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:41.541383Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623815113468531:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:41.545705Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715657:3, at schemeshard: 72057594046644480 2025-05-07T08:51:41.573437Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623815113468535:2615] txid# 281474976710661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-05-07T08:51:41.631306Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623815113468533:2343], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715657 completed, doublechecking } 2025-05-07T08:51:41.631725Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7501623814874757604:2313], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715657 completed, doublechecking } 2025-05-07T08:51:41.699976Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623815113468629:2676] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:51:41.725705Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501623814874757634:2131] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:51:42.087379Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:51:42.093246Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7501623814874757641:2318], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T08:51:42.093493Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7501623815113468639:2349], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T08:51:42.094209Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=2&id=MWJmMTkxOGEtYmM2MDZjMGUtZTZhZmFhZjYtYjRhM2E0MjQ=, ActorId: [2:7501623814874757572:2308], ActorState: ExecuteState, TraceId: 01jtmz32t1adek8pssk9fhhzhr, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T08:51:42.096459Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T08:51:42.097505Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=1&id=NTg4NDkyOWUtNjk0MDNlNDMtYTg3ZWRjOC02OGVkNDQxMQ==, ActorId: [1:7501623815113468493:2337], ActorState: ExecuteState, TraceId: 01jtmz32t477hyaxdppsdpga4p, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T08:51:42.097899Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access per ... ition_actor.cpp:880: session cookie 1 consumer shared/user session shared/user_7_1_6137283140583379116_v1 after read state TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1) EndOffset 3 ReadOffset 3 ReadGuid fe4cb922-97fff43b-8bc4307c-aa0b6844 has messages 1 2025-05-07T08:52:51.815370Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1917: session cookie 1 consumer shared/user session shared/user_7_1_6137283140583379116_v1 read done: guid# fe4cb922-97fff43b-8bc4307c-aa0b6844, partition# TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1), size# 220 2025-05-07T08:52:51.815403Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2079: session cookie 1 consumer shared/user session shared/user_7_1_6137283140583379116_v1 response to read: guid# fe4cb922-97fff43b-8bc4307c-aa0b6844 2025-05-07T08:52:51.815741Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2122: session cookie 1 consumer shared/user session shared/user_7_1_6137283140583379116_v1 Process answer. Aval parts: 0 2025-05-07T08:52:51.817296Z :DEBUG: [/Root] [/Root] [79b78d48-f194192c-358f1e4a-43a2bfe0] [dc1] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:52:51.817763Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/user session shared/user_7_1_6137283140583379116_v1 grpc read done: success# 1, data# { read { } } 2025-05-07T08:52:51.817885Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1816: session cookie 1 consumer shared/user session shared/user_7_1_6137283140583379116_v1 got read request: guid# 81fa1708-ec43812-afc9f7d-71d5a864 2025-05-07T08:52:51.818137Z :DEBUG: [/Root] Decompression task done. Partition/PartitionSessionId: 0 (2-2) 2025-05-07T08:52:51.822574Z :DEBUG: [/Root] Take Data. Partition 0. Read: {0, 0} (2-2) 2025-05-07T08:52:51.822676Z :DEBUG: [/Root] [/Root] [79b78d48-f194192c-358f1e4a-43a2bfe0] [dc1] The application data is transferred to the client. Number of messages 1, size 8 bytes DataReceived { PartitionStreamId: 1 PartitionId: 0 Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "dc1". Topic: "test-topic" Partition: 0 PartitionKey: "" Information: { Offset: 2 SeqNo: 3 MessageGroupId: "test-message-group-id" CreateTime: 2025-05-07T08:52:50.659000Z WriteTime: 2025-05-07T08:52:50.665000Z Ip: "ipv6:[::1]:46734" UncompressedSize: 8 Meta: { "logtype": "unknown", "ident": "unknown", "server": "ipv6:[::1]:46734" } } } } 2025-05-07T08:52:51.822920Z :INFO: [/Root] [/Root] [79b78d48-f194192c-358f1e4a-43a2bfe0] Closing read session. Close timeout: 3.000000s 2025-05-07T08:52:51.822977Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): dc1:test-topic:0:1:2:2 2025-05-07T08:52:51.823031Z :INFO: [/Root] [/Root] [79b78d48-f194192c-358f1e4a-43a2bfe0] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1702 BytesRead: 24 MessagesRead: 3 BytesReadCompressed: 84 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-05-07T08:52:51.823742Z :INFO: [/Root] [/Root] [79b78d48-f194192c-358f1e4a-43a2bfe0] Closing read session. Close timeout: 0.000000s 2025-05-07T08:52:51.823798Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): dc1:test-topic:0:1:2:2 2025-05-07T08:52:51.823852Z :INFO: [/Root] [/Root] [79b78d48-f194192c-358f1e4a-43a2bfe0] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1703 BytesRead: 24 MessagesRead: 3 BytesReadCompressed: 84 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-05-07T08:52:51.823974Z :NOTICE: [/Root] [/Root] [79b78d48-f194192c-358f1e4a-43a2bfe0] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-05-07T08:52:51.825563Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/user session shared/user_7_1_6137283140583379116_v1 grpc read done: success# 0, data# { } 2025-05-07T08:52:51.825593Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer shared/user session shared/user_7_1_6137283140583379116_v1 grpc read failed 2025-05-07T08:52:51.825621Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer shared/user session shared/user_7_1_6137283140583379116_v1 grpc closed 2025-05-07T08:52:51.825670Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer shared/user session shared/user_7_1_6137283140583379116_v1 is DEAD 2025-05-07T08:52:51.827054Z node 8 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [7:7501624112879844745:2545] disconnected; active server actors: 1 2025-05-07T08:52:51.827112Z node 8 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--test-topic] pipe [7:7501624112879844745:2545] client user disconnected session shared/user_7_1_6137283140583379116_v1 2025-05-07T08:52:51.827545Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2433: [PQ: 72075186224037892] Destroy direct read session shared/user_7_1_6137283140583379116_v1 2025-05-07T08:52:51.827599Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037892] server disconnected, pipe [7:7501624112879844747:2548] destroyed 2025-05-07T08:52:51.827666Z node 7 :PQ_READ_PROXY DEBUG: caching_service.cpp:138: Direct read cache: server session deregistered: shared/user_7_1_6137283140583379116_v1 2025-05-07T08:52:54.292391Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:52:54.292446Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:52:54.292483Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:52:54.294980Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:52:54.296399Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-05-07T08:52:54.296699Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:52:54.305206Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: 13. Commit offset: 31 2025-05-07T08:52:54.308028Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:52:54.308085Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:52:54.308137Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:52:54.330554Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:52:54.346437Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-05-07T08:52:54.346650Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:52:54.347910Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-05-07T08:52:54.349623Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-05-07T08:52:54.359728Z :INFO: Error decompressing data: (TZLibDecompressorError) util/stream/zlib.cpp:143: inflate error(incorrect header check) 2025-05-07T08:52:54.359887Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-3) 2025-05-07T08:52:54.362274Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-05-07T08:52:54.362357Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-05-07T08:52:54.362411Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-05-07T08:52:54.362482Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 3, size 16 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { DataDecompressionError: "(TZLibDecompressorError) util/stream/zlib.cpp:143: inflate error(incorrect header check)" Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } } 2025-05-07T08:52:54.367966Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:52:54.368012Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:52:54.368058Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:52:54.375886Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:52:54.376460Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-05-07T08:52:54.376651Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:52:54.378304Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-05-07T08:52:54.379378Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:52:54.380787Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-05-07T08:52:54.386128Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-05-07T08:52:54.386238Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2025-05-07T08:52:54.386389Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 2). Partition stream id: 1 2025-05-07T08:52:54.391247Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:52:54.391304Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:52:54.391361Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:52:54.391910Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:52:54.394723Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-05-07T08:52:54.394937Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:52:54.399035Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:52:54.399254Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-05-07T08:52:54.399363Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-05-07T08:52:54.399481Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes |89.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/query_stats/ut/ydb-core-sys_view-query_stats-ut |89.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/sys_view/query_stats/ut/ydb-core-sys_view-query_stats-ut |89.7%| [LD] {RESULT} $(B)/ydb/core/sys_view/query_stats/ut/ydb-core-sys_view-query_stats-ut >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalSysViewProcessor-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalSysViewProcessor-AlterDatabaseCreateHiveFirst-true |89.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/dread_cache_service/ut/unittest >> KqpPg::CheckPgAutoParams+useSink [GOOD] >> KqpPg::CheckPgAutoParams-useSink >> KqpIndexes::SecondaryIndexUpdateOnUsingIndex [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpUniqueIndex::InsertFkDuplicate [GOOD] Test command err: Trying to start YDB, gRPC: 18553, MsgBus: 20957 2025-05-07T08:52:33.780500Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624038722236337:2128];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:33.780546Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003c2b/r3tmp/tmpxwUjHW/pdisk_1.dat 2025-05-07T08:52:34.591810Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:52:34.591916Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:52:34.600574Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18553, node 1 2025-05-07T08:52:34.850720Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:52:34.850743Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:52:34.850750Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:52:34.850861Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:52:34.989403Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TClient is connected to server localhost:20957 TClient is connected to server localhost:20957 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:52:36.293384Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:36.326553Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:52:36.356826Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:36.616763Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:36.926057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:37.012549Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:38.781830Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501624038722236337:2128];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:38.781921Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:52:39.256094Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624064492041702:2409], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:39.256231Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:39.766004Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:52:39.802306Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:52:39.853730Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:52:39.891894Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:52:39.928507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:52:39.977766Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:52:40.057063Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:52:40.115952Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624068787009666:2474], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:40.116065Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:40.116354Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624068787009671:2477], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:40.120263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:52:40.132676Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624068787009673:2478], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:52:40.222494Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501624068787009726:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:52:41.612563Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:43.583478Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [1:7501624081671912856:2623], TxId: 281474976710677, task: 1. Ctx: { SessionId : ydb://session/3?node_id=1&id=NmUxYzM4NzUtNzUxOTMwZTItNzAyMDk1ODgtNDYzMGYxNmI=. CustomerSuppliedId : . TraceId : 01jtmz4yzc79yr08gft9gp3mdc. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. InternalError: PRECONDITION_FAILED KIKIMR_CONSTRAINT_VIOLATION: {
: Error: Conflict with existing key., code: 2012 }. 2025-05-07T08:52:43.583869Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1216: SelfId: [1:7501624081671912857:2624], TxId: 281474976710677, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=NmUxYzM4NzUtNzUxOTMwZTItNzAyMDk1ODgtNDYzMGYxNmI=. TraceId : 01jtmz4yzc79yr08gft9gp3mdc. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [1:7501624081671912853:2578], status: PRECONDITION_FAILED, reason: {
: Error: Terminate execution } 2025-05-07T08:52:43.584256Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=1&id=NmUxYzM4NzUtNzUxOTMwZTItNzAyMDk1ODgtNDYzMGYxNmI=, ActorId: [1:7501624077376945348:2578], ActorState: ExecuteState, TraceId: 01jtmz4yzc79yr08gft9gp3mdc, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 13334, MsgBus: 16009 2025-05-07T08:52:45.037302Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501624091660659281:2193];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:45.037339Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003c2b/r3tmp/tmpmSrV5C/pdisk_1.dat 2025-05-07T08:52:45.396705Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:52:45.408002Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:52:45.408086Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:52:45.416468Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13334, node 2 2025-05-07T08:52:45.630135Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:52:45.630156Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:52:45.630164Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:52:45.630279Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16009 TClient is connected to server localhost:16009 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:52:46.437219Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:46.454724Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:52:46.464981Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:46.648033Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:46.852718Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:52:46.940994Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 2025-05-07T08:52:50.040048Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7501624091660659281:2193];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:50.040127Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:52:50.041322Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501624113135497296:2407], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:50.041406Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:50.180112Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:52:50.228281Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:52:50.289401Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:52:50.369707Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:52:50.430852Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:52:50.522054Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:52:50.607165Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:52:50.713141Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501624113135497958:2471], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:50.713239Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:50.713689Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501624113135497963:2474], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:50.719839Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:52:50.736174Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7501624113135497965:2475], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:52:50.800758Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501624113135498016:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:52:52.151025Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:54.064173Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [2:7501624130315368448:2620], TxId: 281474976710677, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=2&id=YTU2MGFmZTMtNjJmNmM3YzctZDJiM2NiYjctMjAxOTBkMTc=. TraceId : 01jtmz594qavzcw9varrcnn0r2. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: PRECONDITION_FAILED KIKIMR_CONSTRAINT_VIOLATION: {
: Error: Duplicated keys found., code: 2012 }. 2025-05-07T08:52:54.064433Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1216: SelfId: [2:7501624130315368450:2621], TxId: 281474976710677, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=2&id=YTU2MGFmZTMtNjJmNmM3YzctZDJiM2NiYjctMjAxOTBkMTc=. TraceId : 01jtmz594qavzcw9varrcnn0r2. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [2:7501624130315368445:2576], status: PRECONDITION_FAILED, reason: {
: Error: Terminate execution } 2025-05-07T08:52:54.064743Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=2&id=YTU2MGFmZTMtNjJmNmM3YzctZDJiM2NiYjctMjAxOTBkMTc=, ActorId: [2:7501624121725433654:2576], ActorState: ExecuteState, TraceId: 01jtmz594qavzcw9varrcnn0r2, Create QueryResponse for error on request, msg: >> Compression::WriteWithMixedCodecs [GOOD] >> PersQueueSdkReadSessionTest::ReadSessionWithAbort >> TSchemeShardExtSubDomainTest::AlterTwiceAndWithPlainAlterSubdomain [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeSetParams-AlterDatabaseCreateHiveFirst-false >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterAddStoragePool-ExternalHive >> TSchemeShardExtSubDomainTest::CreateAndAlter-AlterDatabaseCreateHiveFirst [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlter-AlterDatabaseCreateHiveFirst-ExternalHive >> TSchemeShardExtSubDomainTest::CreateAndAlterWithoutEnablingTx [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterWithoutEnablingTx-AlterDatabaseCreateHiveFirst >> YdbIndexTable::MultiShardTableOneIndexIndexOverlapDataColumn [GOOD] >> YdbIndexTable::MultiShardTableOneIndexPkOverlap >> TSchemeShardExtSubDomainTest::AlterWithPlainAlterSubdomain-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::AlterTwiceAndWithPlainAlterSubdomain-ExternalHive >> TSchemeShardExtSubDomainTest::Drop-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::Drop-AlterDatabaseCreateHiveFirst >> KqpScripting::StreamExecuteYqlScriptSeveralQueriesComplex [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalSysViewProcessor-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalStatisticsAggregator-AlterDatabaseCreateHiveFirst-false >> KqpPg::Returning-useSink [GOOD] >> KqpPg::SelectIndex+useSink >> TSchemeShardExtSubDomainTest::AlterCantChangeSetParams-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeSetParams-AlterDatabaseCreateHiveFirst-true >> KqpVectorIndexes::VectorIndexIsNotUpdatable [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::SecondaryIndexUpdateOnUsingIndex [GOOD] Test command err: Trying to start YDB, gRPC: 28855, MsgBus: 13418 2025-05-07T08:52:25.501543Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624006392837049:2206];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:25.503501Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003c39/r3tmp/tmpj6egnD/pdisk_1.dat 2025-05-07T08:52:26.210928Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:52:26.227710Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:52:26.227831Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:52:26.231642Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28855, node 1 2025-05-07T08:52:26.343182Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:52:26.343211Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:52:26.343224Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:52:26.343363Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13418 TClient is connected to server localhost:13418 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:52:27.145676Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:27.179309Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:27.335957Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:27.702520Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:27.842171Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:29.847208Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624023572707745:2408], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:29.847312Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:30.149500Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:52:30.190695Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:52:30.231088Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:52:30.280588Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:52:30.330721Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:52:30.386653Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:52:30.449253Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:52:30.490091Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501624006392837049:2206];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:30.490157Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:52:30.550844Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624027867675703:2471], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:30.550975Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:30.551559Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624027867675708:2474], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:30.556537Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:52:30.580374Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624027867675710:2475], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:52:30.635460Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501624027867675763:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:52:32.233614Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:7501624010687804603:2187]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T08:52:32.233670Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T08:52:32.233729Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [1:7501624010687804603:2187], Recipient [1:7501624010687804603:2187]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T08:52:32.233748Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T08:52:32.253197Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:7501624036457610637:3603], Recipient [1:7501624010687804603:2187]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:52:32.253231Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:52:32.253242Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T08:52:32.253280Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122432, Sender [1:7501624036457610633:3600], Recipient [1:7501624010687804603:2187]: {TEvModifySchemeTransaction txid# 281474976710672 TabletId# 72057594046644480} 2025-05-07T08:52:32.253294Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4851: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-05-07T08:52:32.369485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateIndexedTable CreateIndexedTable { TableDescription { Name: "TestTable" Columns { Name: "Key" Type: "String" NotNull: false } Columns { Name: "Index2" Type: "String" NotNull: false } Columns { Name: "Value" Type: "String" NotNull: false } KeyColumnNames: "Key" PartitionConfig { ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } Temporary: false } IndexDescription { Name: "Index" KeyColumnNames: "Index2" Type: EIndexTypeGlobal IndexImplTableDescriptions { PartitionConfig { } } } } } TxId: 281474976710672 TabletId: 72057594046644480 PeerName: "ipv6:[::1]:58800" , at schemeshard: 72057594046644480 2025-05-07T08:52:32.370341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_indexed_table.cpp:101: TCreateTableIndex construct operation table path: /Root/TestTable domain path id: [OwnerId: 72057594046644480, ... -05-07T08:52:54.069755Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:52:54.070268Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [3:7501624129088173766:3675], Recipient [3:7501624099023400257:2140]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:52:54.070293Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:52:54.070310Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T08:52:54.070578Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269551620, Sender [3:7501624124793206395:2519], Recipient [3:7501624099023400257:2140]: NKikimrTxDataShard.TEvSchemaChanged Source { RawX1: 7501624124793206395 RawX2: 4503612512274903 } Origin: 72075186224037923 State: 2 TxId: 281474976710672 Step: 0 Generation: 1 2025-05-07T08:52:54.070596Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4872: StateWork, processing event TEvDataShard::TEvSchemaChanged 2025-05-07T08:52:54.070652Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046644480, at schemeshard: 72057594046644480, message: Source { RawX1: 7501624124793206395 RawX2: 4503612512274903 } Origin: 72075186224037923 State: 2 TxId: 281474976710672 Step: 0 Generation: 1 2025-05-07T08:52:54.070670Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 281474976710672, tablet: 72075186224037923, partId: 0 2025-05-07T08:52:54.070770Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 281474976710672:0, at schemeshard: 72057594046644480, message: Source { RawX1: 7501624124793206395 RawX2: 4503612512274903 } Origin: 72075186224037923 State: 2 TxId: 281474976710672 Step: 0 Generation: 1 2025-05-07T08:52:54.070794Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1005: NTableState::TProposedWaitParts operationId# 281474976710672:0 HandleReply TEvSchemaChanged at tablet: 72057594046644480 2025-05-07T08:52:54.070863Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1009: NTableState::TProposedWaitParts operationId# 281474976710672:0 HandleReply TEvSchemaChanged at tablet: 72057594046644480 message: Source { RawX1: 7501624124793206395 RawX2: 4503612512274903 } Origin: 72075186224037923 State: 2 TxId: 281474976710672 Step: 0 Generation: 1 2025-05-07T08:52:54.070901Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976710672:0, shardIdx: 72057594046644480:35, datashard: 72075186224037923, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046644480 2025-05-07T08:52:54.070914Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 281474976710672:0, at schemeshard: 72057594046644480 2025-05-07T08:52:54.070930Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 281474976710672:0, datashard: 72075186224037923, at schemeshard: 72057594046644480 2025-05-07T08:52:54.070949Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976710672:0 129 -> 240 2025-05-07T08:52:54.071088Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T08:52:54.071378Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 281474976710672:0, at schemeshard: 72057594046644480 2025-05-07T08:52:54.071389Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:52:54.071401Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 281474976710672:0 2025-05-07T08:52:54.071455Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [3:7501624124793206395:2519] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976710672 at schemeshard: 72057594046644480 2025-05-07T08:52:54.071542Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435072, Sender [3:7501624099023400257:2140], Recipient [3:7501624099023400257:2140]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-05-07T08:52:54.071559Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4857: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-05-07T08:52:54.071591Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710672:0, at schemeshard: 72057594046644480 2025-05-07T08:52:54.071613Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046644480] TDone opId# 281474976710672:0 ProgressState 2025-05-07T08:52:54.071689Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T08:52:54.071705Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710672:0 progress is 3/3 2025-05-07T08:52:54.071718Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976710672 ready parts: 3/3 2025-05-07T08:52:54.071738Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710672:0 progress is 3/3 2025-05-07T08:52:54.071746Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976710672 ready parts: 3/3 2025-05-07T08:52:54.071761Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976710672, ready parts: 3/3, is published: true 2025-05-07T08:52:54.071801Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:7501624124793206362:2516] message: TxId: 281474976710672 2025-05-07T08:52:54.071821Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976710672 ready parts: 3/3 2025-05-07T08:52:54.071846Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976710672:0 2025-05-07T08:52:54.071859Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976710672:0 2025-05-07T08:52:54.071973Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 17] was 4 2025-05-07T08:52:54.071987Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976710672:1 2025-05-07T08:52:54.071994Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976710672:1 2025-05-07T08:52:54.072009Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 18] was 3 2025-05-07T08:52:54.072016Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976710672:2 2025-05-07T08:52:54.072022Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976710672:2 2025-05-07T08:52:54.072049Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 19] was 3 2025-05-07T08:52:54.072442Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:52:54.072491Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [3:7501624124793206362:2516] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710672 at schemeshard: 72057594046644480 2025-05-07T08:52:54.076524Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877764, Sender [3:7501624124793206372:3604], Recipient [3:7501624099023400257:2140]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:52:54.076555Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4938: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:52:54.076566Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5761: Server pipe is reset, at schemeshard: 72057594046644480 2025-05-07T08:52:54.076995Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877764, Sender [3:7501624129088173766:3675], Recipient [3:7501624099023400257:2140]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:52:54.077014Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4938: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:52:54.077022Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5761: Server pipe is reset, at schemeshard: 72057594046644480 2025-05-07T08:52:54.433041Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7501624099023400257:2140]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T08:52:54.433091Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T08:52:54.433139Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [3:7501624099023400257:2140], Recipient [3:7501624099023400257:2140]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T08:52:54.433157Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T08:52:54.984442Z node 3 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-05-07T08:52:55.434155Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7501624099023400257:2140]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T08:52:55.434203Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T08:52:55.434254Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [3:7501624099023400257:2140], Recipient [3:7501624099023400257:2140]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T08:52:55.434271Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T08:52:55.960266Z node 3 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-05-07T08:52:55.996163Z node 3 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill >> TSchemeShardExtSubDomainTest::CreateAndAlterWithoutEnablingTx-AlterDatabaseCreateHiveFirst [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterWithExternalHive-AlterDatabaseCreateHiveFirst-false >> TBSV::ShardsNotLeftInShardsToDelete >> TSchemeShardExtSubDomainTest::CreateAndAlter-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterAddStoragePool >> TSchemeShardExtSubDomainTest::AlterTwiceAndWithPlainAlterSubdomain-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::AlterTwiceAndWithPlainAlterSubdomain-AlterDatabaseCreateHiveFirst >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterAddStoragePool-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterAddStoragePool-AlterDatabaseCreateHiveFirst-ExternalHive |89.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_bsvolume/unittest |89.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/persqueue/ut/slow/ydb-core-persqueue-ut-slow |89.7%| [LD] {RESULT} $(B)/ydb/core/persqueue/ut/slow/ydb-core-persqueue-ut-slow |89.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/persqueue/ut/slow/ydb-core-persqueue-ut-slow >> TBSV::CleanupDroppedVolumesOnRestart ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::StreamExecuteYqlScriptSeveralQueriesComplex [GOOD] Test command err: Trying to start YDB, gRPC: 26965, MsgBus: 4780 2025-05-07T08:52:43.061183Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624082932457899:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:43.061276Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002bad/r3tmp/tmpU7ZD2X/pdisk_1.dat 2025-05-07T08:52:43.567233Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:52:43.589551Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:52:43.589651Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:52:43.594120Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26965, node 1 2025-05-07T08:52:43.829911Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:52:43.829950Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:52:43.829962Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:52:43.830232Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4780 TClient is connected to server localhost:4780 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:52:44.821178Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:44.862454Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:45.076540Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:45.354561Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:45.464166Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:47.704022Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624100112328743:2408], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:47.704143Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:48.061818Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501624082932457899:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:48.061927Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:52:48.172344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:52:48.212895Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:52:48.288265Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:52:48.343595Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:52:48.387170Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:52:48.477607Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:52:48.574551Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:52:48.720501Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624104407296709:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:48.720568Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:48.720866Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624104407296714:2476], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:48.725161Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:52:48.742235Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624104407296716:2477], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:52:48.815343Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501624104407296767:3429] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 8197, MsgBus: 12534 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002bad/r3tmp/tmp7kOEF8/pdisk_1.dat 2025-05-07T08:52:51.487734Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:52:51.577193Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:52:51.596016Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:52:51.596122Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:52:51.597897Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8197, node 2 2025-05-07T08:52:51.665235Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:52:51.665259Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:52:51.665279Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:52:51.665414Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12534 TClient is connected to server localhost:12534 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:52:52.203682Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:52.229409Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:52.320260Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:52.479805Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:52:52.568689Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 2025-05-07T08:52:54.992208Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501624129778610558:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:54.992290Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:55.047602Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-05-07T08:52:55.092367Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-05-07T08:52:55.135708Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-05-07T08:52:55.177524Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-05-07T08:52:55.227563Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-05-07T08:52:55.296904Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-05-07T08:52:55.388998Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-05-07T08:52:55.522543Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501624134073578509:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:55.522658Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:55.523031Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501624134073578514:2472], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:55.527340Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-05-07T08:52:55.560790Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7501624134073578516:2473], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-05-07T08:52:55.617326Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501624134073578569:3423] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Result: [[[[101u]]];[[[102u]]];[[[103u]]];[[[104u]]];[[[105u]]]] >> TSchemeShardExtSubDomainTest::Drop-AlterDatabaseCreateHiveFirst [GOOD] >> TSchemeShardExtSubDomainTest::Drop-AlterDatabaseCreateHiveFirst-ExternalHive >> KqpPg::MkqlTerminate [GOOD] >> KqpPg::NoSelectFullScan >> TSchemeShardExtSubDomainTest::CreateAndAlterWithExternalHive-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterWithExternalHive-AlterDatabaseCreateHiveFirst-true >> TSchemeShardExtSubDomainTest::AlterTwiceAndWithPlainAlterSubdomain-AlterDatabaseCreateHiveFirst [GOOD] >> TSchemeShardExtSubDomainTest::AlterTwiceAndWithPlainAlterSubdomain-AlterDatabaseCreateHiveFirst-ExternalHive >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalStatisticsAggregator-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalStatisticsAggregator-AlterDatabaseCreateHiveFirst-true >> KqpYql::EvaluateFor [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeSetParams-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::AlterRequiresParamCombinations-AlterDatabaseCreateHiveFirst-false >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterAddStoragePool-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterSameStoragePools >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterAddStoragePool [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterAddStoragePool-AlterDatabaseCreateHiveFirst >> TBSV::ShardsNotLeftInShardsToDelete [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpVectorIndexes::VectorIndexIsNotUpdatable [GOOD] Test command err: Trying to start YDB, gRPC: 11496, MsgBus: 6455 2025-05-07T08:51:49.884503Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623851683653680:2196];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:49.884942Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003c82/r3tmp/tmp5P3nRr/pdisk_1.dat 2025-05-07T08:51:50.480976Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:51:50.490988Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:50.491133Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:50.494395Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11496, node 1 2025-05-07T08:51:50.714658Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:51:50.714682Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:51:50.714690Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:51:50.714817Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6455 TClient is connected to server localhost:6455 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:51:52.036269Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:52.105122Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:52.532697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:53.007207Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:53.192950Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:54.886275Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623851683653680:2196];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:54.899720Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:51:56.192170Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623881748426278:2411], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:56.192330Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:57.208069Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:51:57.294909Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:51:57.342902Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:51:57.420896Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:51:57.543366Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:51:57.630367Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:51:57.749845Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:51:57.906061Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623886043394248:2476], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:57.906190Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:57.906666Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623886043394253:2479], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:57.918947Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:51:57.961726Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623886043394255:2480], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:51:58.039441Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623890338361604:3437] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:51:59.689266Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:7501623894633329189:3613], Recipient [1:7501623855978621228:2176]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:51:59.689312Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:51:59.689324Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T08:51:59.689363Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122432, Sender [1:7501623894633329185:3610], Recipient [1:7501623855978621228:2176]: {TEvModifySchemeTransaction txid# 281474976710672 TabletId# 72057594046644480} 2025-05-07T08:51:59.689376Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4851: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-05-07T08:51:59.776140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "TestTable" Columns { Name: "pk" Type: "Int64" NotNull: false } Columns { Name: "emb" Type: "String" NotNull: false } Columns { Name: "data" Type: "String" NotNull: false } KeyColumnNames: "pk" PartitionConfig { PartitioningPolicy { MinPartitionsCount: 3 } ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } SplitBoundary { KeyPrefix { Tuple { Optional { Int64: 4 } } } } SplitBoundary { KeyPrefix { Tuple { Optional { Int64: 6 } } } } Temporary: false } CreateIndexedTable { } } TxId: 281474976710672 TabletId: 72057594046644480 PeerName: "ipv6:[::1]:51646" , at schemeshard: 72057594046644480 2025-05-07T08:51:59.776561Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /Root/TestTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-05-07T08:51:59.776757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:433: TCreateTable Propose, path: /Root/TestTable, opId: 281474976710672:0, schema: Name: "TestTable" Columns { Name: "pk" Type: "Int64" NotNull: false } Columns { Name: "emb" Type: "String" NotNull: false } Columns { Name: "data" Type: "String" NotNull: false } KeyColumnNames: "pk" PartitionConfig { PartitioningPolicy { MinPartitionsCount: 3 } ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } SplitBoundary { KeyPrefix { Tuple { Optional { Int64: 4 } } } } SplitBoundary { KeyPrefix { Tuple { Optional { Int64: 6 } } } } Temporary: false, at schemeshard: 72057594046644480 2025-05-07T08:51:59.777175Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_i ... athId: 17], IndexType: EIndexTypeGlobalVectorKmeansTree, IndexName: index2, IndexColumn: emb, State: Unlocking, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [2:7501624141633093199:2603], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710766, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710767, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976710772, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 0, UnlockTxStatus: StatusSuccess, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 29, upload bytes: 655, read rows: 32, read bytes: 677 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-05-07T08:52:57.821880Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.cpp:181: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: AllocateTxId 281474976715680 2025-05-07T08:52:57.822020Z node 2 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:2606: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvAllocateResult, BuildIndexId: 281474976715680, txId# 281474976710774 2025-05-07T08:52:57.822078Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:2613: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvAllocateResult, buildInfo: TBuildInfo{ IndexBuildId: 281474976715680, Uid: , DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1], TablePathId: [OwnerId: 72057594046644480, LocalPathId: 17], IndexType: EIndexTypeGlobalVectorKmeansTree, IndexName: index2, IndexColumn: emb, State: Unlocking, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [2:7501624141633093199:2603], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710766, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710767, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976710772, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 0, UnlockTxStatus: StatusSuccess, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 29, upload bytes: 655, read rows: 32, read bytes: 677 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-05-07T08:52:57.822428Z node 2 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1105: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 281474976715680 Unlocking TBuildInfo{ IndexBuildId: 281474976715680, Uid: , DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1], TablePathId: [OwnerId: 72057594046644480, LocalPathId: 17], IndexType: EIndexTypeGlobalVectorKmeansTree, IndexName: index2, IndexColumn: emb, State: Unlocking, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [2:7501624141633093199:2603], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710766, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710767, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976710772, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710774, UnlockTxStatus: StatusSuccess, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 29, upload bytes: 655, read rows: 32, read bytes: 677 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-05-07T08:52:57.822594Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:465: UnlockPropose 281474976715680 Unlocking Transaction { WorkingDir: "/Root" OperationType: ESchemeOpDropLock LockConfig { Name: "TestTable" } LockGuard { OwnerTxId: 281474976710766 } Internal: true } TxId: 281474976710774 TabletId: 72057594046644480 FailOnExist: true 2025-05-07T08:52:57.823607Z node 2 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:2450: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvModifySchemeTransactionResult, BuildIndexId: 281474976715680, cookie: 281474976715680, txId: 281474976710774, status: StatusAccepted 2025-05-07T08:52:57.823707Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:2454: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvModifySchemeTransactionResult, buildInfo: TBuildInfo{ IndexBuildId: 281474976715680, Uid: , DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1], TablePathId: [OwnerId: 72057594046644480, LocalPathId: 17], IndexType: EIndexTypeGlobalVectorKmeansTree, IndexName: index2, IndexColumn: emb, State: Unlocking, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [2:7501624141633093199:2603], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710766, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710767, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976710772, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710774, UnlockTxStatus: StatusSuccess, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 29, upload bytes: 655, read rows: 32, read bytes: 677 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }}, record: Status: StatusAccepted TxId: 281474976710774 SchemeshardId: 72057594046644480 PathId: 17 2025-05-07T08:52:57.824670Z node 2 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1105: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 281474976715680 Unlocking TBuildInfo{ IndexBuildId: 281474976715680, Uid: , DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1], TablePathId: [OwnerId: 72057594046644480, LocalPathId: 17], IndexType: EIndexTypeGlobalVectorKmeansTree, IndexName: index2, IndexColumn: emb, State: Unlocking, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [2:7501624141633093199:2603], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710766, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710767, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976710772, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710774, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 29, upload bytes: 655, read rows: 32, read bytes: 677 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-05-07T08:52:57.827307Z node 2 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:2321: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, txId# 281474976710774, buildInfoId: 281474976715680 2025-05-07T08:52:57.827376Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:2324: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, txId# 281474976710774, buildInfo: TBuildInfo{ IndexBuildId: 281474976715680, Uid: , DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1], TablePathId: [OwnerId: 72057594046644480, LocalPathId: 17], IndexType: EIndexTypeGlobalVectorKmeansTree, IndexName: index2, IndexColumn: emb, State: Unlocking, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [2:7501624141633093199:2603], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710766, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710767, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976710772, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710774, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 29, upload bytes: 655, read rows: 32, read bytes: 677 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-05-07T08:52:57.827674Z node 2 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1105: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 281474976715680 Unlocking TBuildInfo{ IndexBuildId: 281474976715680, Uid: , DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1], TablePathId: [OwnerId: 72057594046644480, LocalPathId: 17], IndexType: EIndexTypeGlobalVectorKmeansTree, IndexName: index2, IndexColumn: emb, State: Unlocking, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [2:7501624141633093199:2603], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710766, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710767, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976710772, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710774, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 29, upload bytes: 655, read rows: 32, read bytes: 677 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-05-07T08:52:57.827694Z node 2 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:25: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Unlocking to Done 2025-05-07T08:52:57.827933Z node 2 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1105: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 281474976715680 Done TBuildInfo{ IndexBuildId: 281474976715680, Uid: , DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1], TablePathId: [OwnerId: 72057594046644480, LocalPathId: 17], IndexType: EIndexTypeGlobalVectorKmeansTree, IndexName: index2, IndexColumn: emb, State: Done, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [2:7501624141633093199:2603], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710766, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710767, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976710772, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710774, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 29, upload bytes: 655, read rows: 32, read bytes: 677 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-05-07T08:52:57.827948Z node 2 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:325: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 281474976715680, subscribers count# 1 2025-05-07T08:52:57.828249Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__get.cpp:19: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: DoExecute DatabaseName: "/Root" IndexBuildId: 281474976715680 2025-05-07T08:52:57.828458Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:93: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: Reply Status: SUCCESS IndexBuild { Id: 281474976715680 State: STATE_DONE Settings { source_path: "/Root/TestTable" index { name: "index2" index_columns: "emb" global_vector_kmeans_tree_index { } } max_shards_in_flight: 32 ScanSettings { } } Progress: 100 } 2025-05-07T08:52:57.832689Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037932 not found 2025-05-07T08:52:57.832727Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037934 not found 2025-05-07T08:52:57.832850Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037933 not found 2025-05-07T08:52:57.901904Z node 2 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill |89.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_bsvolume/unittest >> TSchemeShardExtSubDomainTest::AlterRequiresParamCombinations-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::AlterRequiresParamCombinations-AlterDatabaseCreateHiveFirst-true >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterSameStoragePools [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterSameStoragePools-ExternalHive ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_bsvolume/unittest >> TBSV::ShardsNotLeftInShardsToDelete [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:52:59.607315Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:52:59.607431Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:52:59.607481Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:52:59.607522Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:52:59.607572Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:52:59.607638Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:52:59.607722Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:52:59.607803Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:52:59.608687Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:52:59.609222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:52:59.699914Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:52:59.699998Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:52:59.720456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:52:59.720715Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:52:59.720952Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:52:59.728361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:52:59.728716Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:52:59.729524Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:52:59.729767Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:52:59.734798Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:52:59.736501Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:52:59.736584Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:52:59.736669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:52:59.736724Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:52:59.736783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:52:59.737070Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:52:59.753722Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:52:59.913269Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:52:59.913549Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:59.913838Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:52:59.915099Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:52:59.915196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:59.918303Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:52:59.918500Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:52:59.918774Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:59.918848Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:52:59.918898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:52:59.918935Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:52:59.922881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:59.922961Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:52:59.922996Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:52:59.927993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:59.928076Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:59.928168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:52:59.928229Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:52:59.932203Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:52:59.935020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:52:59.935334Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:52:59.936643Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:52:59.936845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:52:59.936926Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:52:59.937253Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:52:59.937320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:52:59.937490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:52:59.937560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:52:59.942850Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:52:59.942910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:52:59.943148Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:52:59.943191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 7594046678944, at schemeshard: 72057594046678944 2025-05-07T08:53:00.127968Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_bsv.cpp:40: TDropBlockStoreVolume TPropose, operationId: 102:0 HandleReply TEvOperationPlan, step: 5000003, at schemeshard: 72057594046678944 2025-05-07T08:53:00.128120Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T08:53:00.128317Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T08:53:00.128363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:53:00.128408Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T08:53:00.128442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:53:00.128505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:53:00.128590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-05-07T08:53:00.128644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: false 2025-05-07T08:53:00.128698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:53:00.128745Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-05-07T08:53:00.128780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 102:0 2025-05-07T08:53:00.128928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T08:53:00.128985Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 102, publications: 2, subscribers: 0 2025-05-07T08:53:00.129033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-05-07T08:53:00.129073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 2], 18446744073709551615 2025-05-07T08:53:00.139798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-05-07T08:53:00.139976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-05-07T08:53:00.141000Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-05-07T08:53:00.141100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-05-07T08:53:00.142225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-05-07T08:53:00.142281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-05-07T08:53:00.142532Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:53:00.142577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:53:00.142762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T08:53:00.142923Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:53:00.144745Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 102, path id: 1 2025-05-07T08:53:00.144825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 102, path id: 2 FAKE_COORDINATOR: Erasing txId 102 2025-05-07T08:53:00.145559Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:53:00.145726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:53:00.145780Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-05-07T08:53:00.145833Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-05-07T08:53:00.145885Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-05-07T08:53:00.146508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T08:53:00.146569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T08:53:00.146666Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:53:00.147192Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:53:00.147281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:53:00.147319Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-05-07T08:53:00.147355Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-05-07T08:53:00.147392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:53:00.147488Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-05-07T08:53:00.148376Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 2025-05-07T08:53:00.148553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-05-07T08:53:00.148897Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 2025-05-07T08:53:00.149178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-05-07T08:53:00.154505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-05-07T08:53:00.155574Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-05-07T08:53:00.155910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-05-07T08:53:00.158169Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-05-07T08:53:00.158271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-05-07T08:53:00.158679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-05-07T08:53:00.158734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-05-07T08:53:00.159313Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-05-07T08:53:00.159469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T08:53:00.159512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:393:2373] TestWaitNotification: OK eventTxId 102 wait until 72075186233409546 is deleted wait until 72075186233409547 is deleted 2025-05-07T08:53:00.159999Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409546 2025-05-07T08:53:00.160141Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409547 Deleted tabletId 72075186233409546 Deleted tabletId 72075186233409547 { Type { Kind: Struct Struct { Member { Name: "ShardsToDelete" Type { Kind: Optional Optional { Item { Kind: Struct Struct { Member { Name: "List" Type { Kind: List List { Item { Kind: Struct Struct { Member { Name: "ShardIdx" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } Member { Name: "Truncated" Type { Kind: Data Data { Scheme: 6 } } } } } } } } } } Value { Struct { Optional { Struct { } Struct { Bool: false } } } } } >> TSchemeShardExtSubDomainTest::CreateAndAlterWithExternalHive-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent-ExternalHive >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalStatisticsAggregator-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::AlterTwiceAndWithPlainAlterSubdomain-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] >> TBSV::CleanupDroppedVolumesOnRestart [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterAddStoragePool-AlterDatabaseCreateHiveFirst [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::EvaluateFor [GOOD] Test command err: Trying to start YDB, gRPC: 30307, MsgBus: 31567 2025-05-07T08:52:43.419215Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624081367844383:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:43.419274Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002b91/r3tmp/tmpMX6WpU/pdisk_1.dat 2025-05-07T08:52:44.134872Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:52:44.134970Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:52:44.159172Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:52:44.164659Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 30307, node 1 2025-05-07T08:52:44.432637Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:52:44.432677Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:52:44.432686Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:52:44.432802Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31567 TClient is connected to server localhost:31567 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:52:45.472093Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:45.506738Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:52:45.531126Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:45.887984Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:46.168641Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:46.307831Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:48.422157Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501624081367844383:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:48.422218Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:52:49.016156Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624107137649816:2409], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:49.016253Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:49.339917Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:52:49.429719Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:52:49.528811Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:52:49.618266Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:52:49.683057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:52:49.814203Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:52:49.918911Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:52:50.085152Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624111432617780:2475], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:50.085271Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:50.085721Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624111432617785:2478], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:50.091433Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:52:50.105619Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624111432617787:2479], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:52:50.200245Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501624111432617838:3433] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 21020, MsgBus: 61599 2025-05-07T08:52:52.805049Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501624119628909290:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:52.805109Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002b91/r3tmp/tmpNuncH3/pdisk_1.dat 2025-05-07T08:52:53.039240Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:52:53.058632Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:52:53.058715Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:52:53.071341Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21020, node 2 2025-05-07T08:52:53.310564Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:52:53.310586Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:52:53.310593Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:52:53.310708Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61599 TClient is connected to server localhost:61599 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:52:54.018662Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:54.030700Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T08:52:54.052210Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:54.169136Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:52:54.402066Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-05-07T08:52:54.536675Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:57.284127Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501624141103747408:2407], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:57.284221Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:57.359651Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-05-07T08:52:57.423769Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-05-07T08:52:57.512933Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-05-07T08:52:57.548496Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-05-07T08:52:57.584925Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-05-07T08:52:57.621533Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-05-07T08:52:57.695325Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-05-07T08:52:57.791388Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501624141103748073:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:57.791493Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:57.791917Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501624141103748078:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:57.796282Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-05-07T08:52:57.806175Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7501624119628909290:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:57.806233Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:52:57.813069Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7501624141103748080:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-05-07T08:52:57.888772Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501624141103748134:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } |89.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_bsvolume/unittest |89.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/storagepoolmon/ut/unittest >> TSchemeShardExtSubDomainTest::Drop-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_extsubdomain/unittest >> TSchemeShardExtSubDomainTest::AlterTwiceAndWithPlainAlterSubdomain-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:52:53.435474Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:52:53.435553Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:52:53.435587Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:52:53.435620Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:52:53.435659Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:52:53.435688Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:52:53.435741Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:52:53.435829Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:52:53.436481Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:52:53.436824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:52:53.532950Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:52:53.533005Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:52:53.549646Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:52:53.549861Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:52:53.550050Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:52:53.561556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:52:53.561869Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:52:53.562625Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:52:53.562843Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:52:53.566647Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:52:53.568385Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:52:53.568488Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:52:53.568598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:52:53.568655Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:52:53.568721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:52:53.569012Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:52:53.581005Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:52:53.737319Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:52:53.737534Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:53.737747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:52:53.737938Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:52:53.738019Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:53.740483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:52:53.740639Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:52:53.740849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:53.740984Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:52:53.741037Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:52:53.741094Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:52:53.742944Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:53.742989Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:52:53.743028Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:52:53.745177Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:53.745229Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:53.745299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:52:53.745346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:52:53.749007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:52:53.751047Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:52:53.751276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:52:53.752012Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:52:53.752175Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:52:53.752243Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:52:53.752546Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:52:53.752605Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:52:53.752808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:52:53.752881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:52:53.759441Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:52:53.759501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:52:53.759702Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:52:53.759776Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... pose ProgressState leave, operationId 103:0, at tablet# 72057594046678944 2025-05-07T08:53:01.113622Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 103 ready parts: 1/1 2025-05-07T08:53:01.113774Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 103 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:53:01.117324Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 103:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:103 msg type: 269090816 2025-05-07T08:53:01.117500Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 103, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 103 at step: 5000004 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 103 at step: 5000004 2025-05-07T08:53:01.117884Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000004, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:53:01.118053Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 103 Coordinator: 72057594046316545 AckTo { RawX1: 128 RawX2: 30064773225 } } Step: 5000004 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:53:01.118109Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 103:0, at tablet# 72057594046678944 2025-05-07T08:53:01.118448Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 103:0 128 -> 240 2025-05-07T08:53:01.118517Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 103:0, at tablet# 72057594046678944 2025-05-07T08:53:01.118655Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-05-07T08:53:01.118826Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:575: Send TEvUpdateTenantSchemeShard, to actor: [7:394:2365], msg: TabletId: 72057594046678944 Generation: 2 StoragePools { Name: "pool-1" Kind: "hdd" } SubdomainVersion: 4, at schemeshard: 72057594046678944 2025-05-07T08:53:01.121080Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5782: Handle TEvUpdateTenantSchemeShard, at schemeshard: 72075186234409546, msg: TabletId: 72057594046678944 Generation: 2 StoragePools { Name: "pool-1" Kind: "hdd" } SubdomainVersion: 4 2025-05-07T08:53:01.121207Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:79: TTxUpdateTenant DoExecute, msg: TabletId: 72057594046678944 Generation: 2 StoragePools { Name: "pool-1" Kind: "hdd" } SubdomainVersion: 4, at schemeshard: 72075186234409546 2025-05-07T08:53:01.121395Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:588: Cannot publish paths for unknown operation id#0 FAKE_COORDINATOR: Erasing txId 103 2025-05-07T08:53:01.121726Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:53:01.121766Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T08:53:01.121921Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:53:01.121960Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [7:206:2208], at schemeshard: 72057594046678944, txId: 103, path id: 2 2025-05-07T08:53:01.122092Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-05-07T08:53:01.122160Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:787: [72057594046678944] TSyncHive, operationId 103:0, ProgressState, NeedSyncHive: 0 2025-05-07T08:53:01.122200Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 103:0 240 -> 240 2025-05-07T08:53:01.123177Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 6 PathOwnerId: 72057594046678944, cookie: 103 2025-05-07T08:53:01.123318Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 6 PathOwnerId: 72057594046678944, cookie: 103 2025-05-07T08:53:01.123370Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-05-07T08:53:01.123419Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 6 2025-05-07T08:53:01.123471Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 7 2025-05-07T08:53:01.123571Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 103, ready parts: 0/1, is published: true 2025-05-07T08:53:01.127176Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5769: Handle TEvSyncTenantSchemeShard, at schemeshard: 72057594046678944, msg: DomainSchemeShard: 72057594046678944 DomainPathId: 2 TabletID: 72075186234409546 Generation: 2 EffectiveACLVersion: 0 SubdomainVersion: 4 UserAttributesVersion: 1 TenantHive: 72075186233409546 TenantSysViewProcessor: 18446744073709551615 TenantRootACL: "" TenantStatisticsAggregator: 18446744073709551615 TenantGraphShard: 18446744073709551615 2025-05-07T08:53:01.127272Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:26: TTxSyncTenant DoExecute, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T08:53:01.127381Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:567: DoUpdateTenant no hasChanges, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], tenantLink: TSubDomainsLinks::TLink { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2], Generation: 2, ActorId:[7:394:2365], EffectiveACLVersion: 0, SubdomainVersion: 4, UserAttributesVersion: 1, TenantHive: 72075186233409546, TenantSysViewProcessor: 18446744073709551615, TenantStatisticsAggregator: 18446744073709551615, TenantGraphShard: 18446744073709551615, TenantRootACL: }, subDomain->GetVersion(): 4, actualEffectiveACLVersion: 0, actualUserAttrsVersion: 1, tenantHive: 72075186233409546, tenantSysViewProcessor: 18446744073709551615, at schemeshard: 72057594046678944 2025-05-07T08:53:01.127486Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186234409546 2025-05-07T08:53:01.127521Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186234409546, txId: 0, path id: [OwnerId: 72075186234409546, LocalPathId: 1] 2025-05-07T08:53:01.127648Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186234409546 2025-05-07T08:53:01.127680Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [7:482:2425], at schemeshard: 72075186234409546, txId: 0, path id: 1 2025-05-07T08:53:01.128817Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-05-07T08:53:01.128876Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 103:0 ProgressState 2025-05-07T08:53:01.129037Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-05-07T08:53:01.129095Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-05-07T08:53:01.129194Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-05-07T08:53:01.129239Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-05-07T08:53:01.129288Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: true 2025-05-07T08:53:01.129340Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-05-07T08:53:01.129391Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 103:0 2025-05-07T08:53:01.129428Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 103:0 2025-05-07T08:53:01.129519Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-05-07T08:53:01.130168Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72075186234409546, msg: Owner: 72075186234409546 Generation: 2 LocalPathId: 1 Version: 6 PathOwnerId: 72075186234409546, cookie: 0 2025-05-07T08:53:01.130291Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-05-07T08:53:01.130387Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:36: TTxSyncTenant DoComplete, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 TestModificationResult got TxId: 103, wait until txId: 104 TestModificationResults wait txId: 104 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 103 2025-05-07T08:53:01.135552Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-05-07T08:53:01.135617Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-05-07T08:53:01.136095Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-05-07T08:53:01.136216Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-05-07T08:53:01.136255Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [7:568:2509] TestWaitNotification: OK eventTxId 103 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_extsubdomain/unittest >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalStatisticsAggregator-AlterDatabaseCreateHiveFirst-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:52:50.355824Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:52:50.355977Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:52:50.356038Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:52:50.356082Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:52:50.356137Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:52:50.356175Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:52:50.356267Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:52:50.356354Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:52:50.357235Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:52:50.357735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:52:50.525423Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:52:50.525516Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:52:50.547100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:52:50.547274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:52:50.547492Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:52:50.569672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:52:50.581014Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:52:50.582216Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:52:50.582819Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:52:50.588530Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:52:50.590784Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:52:50.590898Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:52:50.591678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:52:50.591798Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:52:50.591909Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:52:50.592196Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:52:50.610817Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:52:50.827993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:52:50.828341Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:50.828652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:52:50.829007Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:52:50.829123Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:50.832598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:52:50.832826Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:52:50.833124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:50.833201Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:52:50.833257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:52:50.833303Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:52:50.836351Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:50.836439Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:52:50.836514Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:52:50.839463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:50.839552Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:50.839623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:52:50.839701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:52:50.844780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:52:50.847974Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:52:50.848274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:52:50.849648Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:52:50.849869Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:52:50.849956Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:52:50.850473Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:52:50.850574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:52:50.850826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:52:50.850952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:52:50.854458Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:52:50.854527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:52:50.854771Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:52:50.854841Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... D DEBUG: schemeshard__operation_common_subdomain.cpp:120: NSubDomainState::TConfigureParts operationId# 102:0 Got OK TEvConfigureStatus from tablet# 72075186233409549 shardIdx# 72057594046678944:4 at schemeshard# 72057594046678944 2025-05-07T08:53:01.129644Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 102:0 3 -> 128 2025-05-07T08:53:01.142015Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:53:01.142242Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:53:01.142309Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:53:01.142395Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 102:0, at tablet# 72057594046678944 2025-05-07T08:53:01.142470Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 102 ready parts: 1/1 2025-05-07T08:53:01.142652Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 102 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:53:01.160797Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 102:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:102 msg type: 269090816 2025-05-07T08:53:01.160991Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 102, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 2025-05-07T08:53:01.161411Z node 8 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000003, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:53:01.161560Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 102 Coordinator: 72057594046316545 AckTo { RawX1: 134 RawX2: 34359740525 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:53:01.161623Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 102:0, at tablet# 72057594046678944 2025-05-07T08:53:01.161992Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 102:0 128 -> 240 2025-05-07T08:53:01.162070Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 102:0, at tablet# 72057594046678944 2025-05-07T08:53:01.162228Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-05-07T08:53:01.162352Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:567: DoUpdateTenant no hasChanges, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], tenantLink: TSubDomainsLinks::TLink { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2], Generation: 2, ActorId:[8:364:2339], EffectiveACLVersion: 0, SubdomainVersion: 2, UserAttributesVersion: 1, TenantHive: 18446744073709551615, TenantSysViewProcessor: 18446744073709551615, TenantStatisticsAggregator: 72075186233409549, TenantGraphShard: 18446744073709551615, TenantRootACL: }, subDomain->GetVersion(): 2, actualEffectiveACLVersion: 0, actualUserAttrsVersion: 1, tenantHive: 18446744073709551615, tenantSysViewProcessor: 18446744073709551615, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 102 2025-05-07T08:53:01.167526Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:53:01.167599Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T08:53:01.167837Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:53:01.167900Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [8:206:2208], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-05-07T08:53:01.168310Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:53:01.168384Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:787: [72057594046678944] TSyncHive, operationId 102:0, ProgressState, NeedSyncHive: 0 2025-05-07T08:53:01.168434Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 102:0 240 -> 240 2025-05-07T08:53:01.169200Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:53:01.169338Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:53:01.169391Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-05-07T08:53:01.169442Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 4 2025-05-07T08:53:01.169508Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 7 2025-05-07T08:53:01.169625Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true 2025-05-07T08:53:01.185520Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:53:01.185620Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 102:0 ProgressState 2025-05-07T08:53:01.185776Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T08:53:01.185829Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:53:01.185888Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T08:53:01.185949Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:53:01.186040Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-05-07T08:53:01.186146Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [8:304:2295] message: TxId: 102 2025-05-07T08:53:01.186220Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:53:01.186284Z node 8 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-05-07T08:53:01.186334Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 102:0 2025-05-07T08:53:01.186647Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-05-07T08:53:01.187457Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-05-07T08:53:01.189555Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T08:53:01.189650Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [8:510:2449] TestWaitNotification: OK eventTxId 102 TestModificationResults wait txId: 103 2025-05-07T08:53:01.192913Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { Name: "USER_0" ExternalStatisticsAggregator: false } } TxId: 103 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:53:01.193122Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1102: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 103:0, feature flag EnableAlterDatabaseCreateHiveFirst 1, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { Name: "USER_0" ExternalStatisticsAggregator: false } 2025-05-07T08:53:01.193188Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1108: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 103:0, path /MyRoot/USER_0 2025-05-07T08:53:01.193353Z node 8 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 103:0, explain: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: ExternalStatisticsAggregator could only be added, not removed, at schemeshard: 72057594046678944 2025-05-07T08:53:01.193415Z node 8 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 103:1, propose status:StatusInvalidParameter, reason: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: ExternalStatisticsAggregator could only be added, not removed, at schemeshard: 72057594046678944 2025-05-07T08:53:01.196473Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 103, response: Status: StatusInvalidParameter Reason: "Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: ExternalStatisticsAggregator could only be added, not removed" TxId: 103 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:53:01.196703Z node 8 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 103, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: ExternalStatisticsAggregator could only be added, not removed, operation: ALTER DATABASE, path: /MyRoot/USER_0 TestModificationResult got TxId: 103, wait until txId: 103 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_bsvolume/unittest >> TBSV::CleanupDroppedVolumesOnRestart [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:53:00.178568Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:53:00.178685Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:53:00.178739Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:53:00.178775Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:53:00.178819Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:53:00.178847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:53:00.178907Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:53:00.178978Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:53:00.181809Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:53:00.182272Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:53:00.270824Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:53:00.270903Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:53:00.303172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:53:00.303301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:53:00.303470Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:53:00.319759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:53:00.320420Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:53:00.321214Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:53:00.321580Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:53:00.324471Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:53:00.326230Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:53:00.326305Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:53:00.326404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:53:00.326457Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:53:00.326518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:53:00.326737Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:53:00.334719Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:53:00.535258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:53:00.535498Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:53:00.535726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:53:00.535957Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:53:00.536011Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:53:00.539013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:53:00.539146Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:53:00.539359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:53:00.539444Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:53:00.539486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:53:00.539521Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:53:00.547269Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:53:00.547350Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:53:00.547428Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:53:00.559516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:53:00.559610Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:53:00.559688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:53:00.559740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:53:00.563927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:53:00.566557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:53:00.566778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:53:00.567954Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:53:00.568139Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:53:00.568213Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:53:00.568556Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:53:00.568642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:53:00.568839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:53:00.568958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:53:00.571611Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:53:00.571672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:53:00.571890Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:53:00.571957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 678944 2025-05-07T08:53:00.844469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:53:00.844734Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:407:2383] sender: [1:473:2058] recipient: [1:15:2062] 2025-05-07T08:53:00.906971Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/BSVolume" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:53:00.907274Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/BSVolume" took 286us result status StatusPathDoesNotExist 2025-05-07T08:53:00.907442Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/BSVolume\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/BSVolume" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-05-07T08:53:00.908424Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 Leader for TabletID 72057594046678944 is [1:407:2383] sender: [1:474:2058] recipient: [1:102:2137] Leader for TabletID 72057594046678944 is [1:407:2383] sender: [1:477:2058] recipient: [1:476:2436] Leader for TabletID 72057594046678944 is [1:407:2383] sender: [1:478:2058] recipient: [1:15:2062] Leader for TabletID 72057594046678944 is [1:479:2437] sender: [1:480:2058] recipient: [1:476:2436] 2025-05-07T08:53:00.982437Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:53:00.982577Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:53:00.982619Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:53:00.982673Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:53:00.982711Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:53:00.982743Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:53:00.982806Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:53:00.982882Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:53:00.983558Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:53:00.983887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:53:01.007399Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:53:01.008772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:53:01.008946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:53:01.009069Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:53:01.009104Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:53:01.009426Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:53:01.010265Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit for Paths, read records: 1, at schemeshard: 72057594046678944 2025-05-07T08:53:01.010405Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1457: TTxInit for UserAttributes, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:53:01.010488Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1483: TTxInit for UserAttributesAlterData, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:53:01.010967Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1785: TTxInit for Tables, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:53:01.011062Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:450: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-05-07T08:53:01.011240Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2011: TTxInit for Columns, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:53:01.011329Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2071: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:53:01.011450Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2129: TTxInit for Shards, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:53:01.011542Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2215: TTxInit for TablePartitions, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:53:01.011658Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2281: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:53:01.011862Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2431: TTxInit for ChannelsBinding, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:53:01.012138Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2810: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:53:01.012252Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2889: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:53:01.012619Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3387: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:53:01.012705Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3423: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:53:01.012911Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3637: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:53:01.013046Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3782: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:53:01.013136Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3799: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:53:01.013349Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3959: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:53:01.013440Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3975: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:53:01.013569Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4260: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-05-07T08:53:01.013794Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4558: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-05-07T08:53:01.013927Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4705: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-05-07T08:53:01.015781Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4732: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-05-07T08:53:01.015858Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4759: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-05-07T08:53:01.026260Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:53:01.026405Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:53:01.026871Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:53:01.026945Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:53:01.027011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:53:01.030013Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:479:2437] sender: [1:540:2058] recipient: [1:15:2062] 2025-05-07T08:53:01.063066Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/BSVolume" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:53:01.063363Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/BSVolume" took 328us result status StatusPathDoesNotExist 2025-05-07T08:53:01.063535Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/BSVolume\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/BSVolume" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TSchemeShardExtSubDomainTest::AlterRequiresParamCombinations-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::AlterNameConflicts-AlterDatabaseCreateHiveFirst-false >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent-AlterDatabaseCreateHiveFirst >> TYardTest::TestLogWriteCutUnequal [GOOD] >> TYardTest::TestLogMultipleWriteRead ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_extsubdomain/unittest >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterAddStoragePool-AlterDatabaseCreateHiveFirst [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:52:51.778975Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:52:51.779094Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:52:51.779137Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:52:51.779176Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:52:51.779241Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:52:51.779291Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:52:51.779351Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:52:51.779420Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:52:51.780232Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:52:51.780664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:52:51.889189Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:52:51.889259Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:52:51.919288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:52:51.919460Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:52:51.919654Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:52:51.947839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:52:51.948448Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:52:51.949146Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:52:51.949512Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:52:51.952397Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:52:51.954146Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:52:51.954223Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:52:51.954297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:52:51.954353Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:52:51.954416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:52:51.954623Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:52:51.966012Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:52:52.108818Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:52:52.109071Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:52.109309Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:52:52.109574Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:52:52.109643Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:52.119055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:52:52.119237Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:52:52.119498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:52.119551Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:52:52.119611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:52:52.119663Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:52:52.127405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:52.127512Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:52:52.127555Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:52:52.130084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:52.130144Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:52.130219Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:52:52.130277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:52:52.134476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:52:52.136946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:52:52.137173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:52:52.138226Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:52:52.138422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:52:52.138505Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:52:52.138838Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:52:52.138920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:52:52.139123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:52:52.139213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:52:52.141654Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:52:52.141714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:52:52.141919Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:52:52.141962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... de 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-05-07T08:53:01.334418Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 104:0 2025-05-07T08:53:01.334459Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 104:0 2025-05-07T08:53:01.334527Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-05-07T08:53:01.334577Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 104, publications: 1, subscribers: 0 2025-05-07T08:53:01.334620Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 2], 6 2025-05-07T08:53:01.337384Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5782: Handle TEvUpdateTenantSchemeShard, at schemeshard: 72075186233409546, msg: TabletId: 72057594046678944 Generation: 2 UserAttributes { Key: "user__attr_1" Value: "value" } UserAttributesVersion: 2 2025-05-07T08:53:01.337542Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:79: TTxUpdateTenant DoExecute, msg: TabletId: 72057594046678944 Generation: 2 UserAttributes { Key: "user__attr_1" Value: "value" } UserAttributesVersion: 2, at schemeshard: 72075186233409546 2025-05-07T08:53:01.337788Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:588: Cannot publish paths for unknown operation id#0 2025-05-07T08:53:01.338237Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:53:01.338300Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T08:53:01.338576Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:53:01.338632Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [7:206:2208], at schemeshard: 72057594046678944, txId: 104, path id: 2 FAKE_COORDINATOR: Erasing txId 104 2025-05-07T08:53:01.339412Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 6 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T08:53:01.339549Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 6 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T08:53:01.339610Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-05-07T08:53:01.339673Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 6 2025-05-07T08:53:01.339734Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-05-07T08:53:01.339850Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 0 2025-05-07T08:53:01.343472Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5769: Handle TEvSyncTenantSchemeShard, at schemeshard: 72057594046678944, msg: DomainSchemeShard: 72057594046678944 DomainPathId: 2 TabletID: 72075186233409546 Generation: 2 EffectiveACLVersion: 0 SubdomainVersion: 3 UserAttributesVersion: 2 TenantHive: 18446744073709551615 TenantSysViewProcessor: 18446744073709551615 TenantRootACL: "" TenantStatisticsAggregator: 18446744073709551615 TenantGraphShard: 18446744073709551615 2025-05-07T08:53:01.343584Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:26: TTxSyncTenant DoExecute, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T08:53:01.343722Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:567: DoUpdateTenant no hasChanges, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], tenantLink: TSubDomainsLinks::TLink { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2], Generation: 2, ActorId:[7:355:2334], EffectiveACLVersion: 0, SubdomainVersion: 3, UserAttributesVersion: 2, TenantHive: 18446744073709551615, TenantSysViewProcessor: 18446744073709551615, TenantStatisticsAggregator: 18446744073709551615, TenantGraphShard: 18446744073709551615, TenantRootACL: }, subDomain->GetVersion(): 3, actualEffectiveACLVersion: 0, actualUserAttrsVersion: 2, tenantHive: 18446744073709551615, tenantSysViewProcessor: 18446744073709551615, at schemeshard: 72057594046678944 2025-05-07T08:53:01.343865Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186233409546 2025-05-07T08:53:01.343904Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 0, path id: [OwnerId: 72075186233409546, LocalPathId: 1] 2025-05-07T08:53:01.344079Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186233409546 2025-05-07T08:53:01.344118Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [7:445:2397], at schemeshard: 72075186233409546, txId: 0, path id: 1 2025-05-07T08:53:01.344953Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-05-07T08:53:01.345062Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:36: TTxSyncTenant DoComplete, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T08:53:01.345237Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 6 PathOwnerId: 72075186233409546, cookie: 0 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 104 2025-05-07T08:53:01.345531Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2025-05-07T08:53:01.345595Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2025-05-07T08:53:01.346149Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-05-07T08:53:01.346274Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-05-07T08:53:01.346326Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [7:541:2491] TestWaitNotification: OK eventTxId 104 2025-05-07T08:53:01.346956Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:53:01.347201Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 280us result status StatusSuccess 2025-05-07T08:53:01.347678Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 2 ChildrenVersion: 1 SubDomainVersion: 3 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 3 PlanResolution: 50 Coordinators: 72075186233409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409548 SchemeShard: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "hdd" } StoragePools { Name: "pool-2" Kind: "hdd-1" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } UserAttributes { Key: "user__attr_1" Value: "value" } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:53:01.348423Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72075186233409546 2025-05-07T08:53:01.348641Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72075186233409546 describe path "/MyRoot/USER_0" took 245us result status StatusSuccess 2025-05-07T08:53:01.349080Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "MyRoot/USER_0" PathId: 1 SchemeshardId: 72075186233409546 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 2 ChildrenVersion: 1 SubDomainVersion: 3 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 2 ProcessingParams { Version: 3 PlanResolution: 50 Coordinators: 72075186233409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409548 SchemeShard: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "hdd" } StoragePools { Name: "pool-2" Kind: "hdd-1" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot/USER_0" } } UserAttributes { Key: "user__attr_1" Value: "value" } } PathId: 1 PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 |89.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/skeleton/ut/ydb-core-blobstorage-vdisk-skeleton-ut >> KqpIndexes::UpdateDeletePlan+UseSink [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterSameStoragePools-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterSameStoragePools-AlterDatabaseCreateHiveFirst |89.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/vdisk/skeleton/ut/ydb-core-blobstorage-vdisk-skeleton-ut |89.8%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/skeleton/ut/ydb-core-blobstorage-vdisk-skeleton-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_extsubdomain/unittest >> TSchemeShardExtSubDomainTest::Drop-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:52:52.365793Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:52:52.365944Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:52:52.366007Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:52:52.366072Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:52:52.366125Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:52:52.366164Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:52:52.366289Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:52:52.366368Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:52:52.367274Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:52:52.367690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:52:52.450763Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:52:52.450834Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:52:52.468688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:52:52.468867Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:52:52.469015Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:52:52.479404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:52:52.479732Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:52:52.480720Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:52:52.480969Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:52:52.484657Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:52:52.486211Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:52:52.486292Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:52:52.486475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:52:52.486545Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:52:52.486675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:52:52.486934Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:52:52.495461Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:52:52.658757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:52:52.659021Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:52.659291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:52:52.659572Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:52:52.659642Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:52.676226Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:52:52.676364Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:52:52.676566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:52.676619Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:52:52.676684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:52:52.676721Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:52:52.684500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:52.684592Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:52:52.684638Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:52:52.691165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:52.691238Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:52.691326Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:52:52.691388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:52:52.699661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:52:52.703146Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:52:52.703398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:52:52.704452Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:52:52.704622Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:52:52.704692Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:52:52.704994Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:52:52.705064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:52:52.705266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:52:52.705364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:52:52.711604Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:52:52.711674Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:52:52.711875Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:52:52.711926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 7594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 2025-05-07T08:53:01.566354Z node 6 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186234409547 2025-05-07T08:53:01.566619Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-05-07T08:53:01.567007Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-05-07T08:53:01.568463Z node 6 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186234409546 2025-05-07T08:53:01.568876Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-05-07T08:53:01.569163Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T08:53:01.571205Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 Forgetting tablet 72075186234409547 2025-05-07T08:53:01.577223Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-05-07T08:53:01.577719Z node 6 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 TabletID: 72075186234409548 2025-05-07T08:53:01.577920Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-05-07T08:53:01.578306Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 Forgetting tablet 72075186234409546 2025-05-07T08:53:01.579432Z node 6 :TX_DATASHARD ERROR: datashard.cpp:3573: Datashard's schemeshard pipe destroyed while no messages to sent at 72075186234409549 2025-05-07T08:53:01.579507Z node 6 :TX_DATASHARD ERROR: datashard.cpp:3573: Datashard's schemeshard pipe destroyed while no messages to sent at 72075186234409550 Forgetting tablet 72075186234409548 2025-05-07T08:53:01.581221Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-05-07T08:53:01.581472Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T08:53:01.592006Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T08:53:01.592106Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T08:53:01.592299Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-05-07T08:53:01.592934Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T08:53:01.593005Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T08:53:01.593100Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:53:01.604768Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-05-07T08:53:01.604864Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-05-07T08:53:01.605016Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-05-07T08:53:01.605046Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186234409547 2025-05-07T08:53:01.605131Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-05-07T08:53:01.605166Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186234409546 2025-05-07T08:53:01.608226Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-05-07T08:53:01.608328Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186234409548 2025-05-07T08:53:01.608611Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-05-07T08:53:01.608711Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 2025-05-07T08:53:01.609090Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 105: send EvNotifyTxCompletion 2025-05-07T08:53:01.609153Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 105 2025-05-07T08:53:01.609715Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 105, at schemeshard: 72057594046678944 2025-05-07T08:53:01.609863Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-05-07T08:53:01.609913Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [6:785:2694] TestWaitNotification: OK eventTxId 105 2025-05-07T08:53:01.610706Z node 6 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/dir/table_1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:53:01.610978Z node 6 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/dir/table_1" took 334us result status StatusPathDoesNotExist 2025-05-07T08:53:01.611176Z node 6 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0/dir/table_1\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0/dir/table_1" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-05-07T08:53:01.611822Z node 6 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:53:01.612021Z node 6 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 225us result status StatusPathDoesNotExist 2025-05-07T08:53:01.612187Z node 6 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-05-07T08:53:01.612796Z node 6 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:53:01.618780Z node 6 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 5.93ms result status StatusSuccess 2025-05-07T08:53:01.619385Z node 6 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KqpPg::InsertNoTargetColumns_NotOneSize+useSink [GOOD] >> KqpPg::InsertNoTargetColumns_NotOneSize-useSink |89.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/storagepoolmon/ut/unittest |89.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/storagepoolmon/ut/unittest >> TSchemeShardExtSubDomainTest::AlterNameConflicts-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::AlterNameConflicts-AlterDatabaseCreateHiveFirst-true |89.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/storagepoolmon/ut/unittest >> TYardTest::TestLogMultipleWriteRead [GOOD] >> TYardTest::TestLogContinuityPersistence >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-52 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-53 >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent-AlterDatabaseCreateHiveFirst [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent-AlterDatabaseCreateHiveFirst-ExternalHive |89.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/storagepoolmon/ut/unittest |89.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/storagepoolmon/ut/unittest |89.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_locks/ydb-core-tx-datashard-ut_locks |89.8%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_locks/ydb-core-tx-datashard-ut_locks >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterSameStoragePools-AlterDatabaseCreateHiveFirst [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterSameStoragePools-AlterDatabaseCreateHiveFirst-ExternalHive |89.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_locks/ydb-core-tx-datashard-ut_locks |89.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/with_offset_ranges_mode_ut |89.8%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/with_offset_ranges_mode_ut |89.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/with_offset_ranges_mode_ut |89.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/query_stats/ut/unittest |89.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/ydb/ut/ydb-core-fq-libs-ydb-ut |89.8%| [LD] {RESULT} $(B)/ydb/core/fq/libs/ydb/ut/ydb-core-fq-libs-ydb-ut |89.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/ydb/ut/ydb-core-fq-libs-ydb-ut |89.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_column_build/ydb-core-tx-schemeshard-ut_column_build |89.8%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_column_build/ydb-core-tx-schemeshard-ut_column_build |89.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_column_build/ydb-core-tx-schemeshard-ut_column_build ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::UpdateDeletePlan+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 17611, MsgBus: 28583 2025-05-07T08:52:18.557178Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623974970724995:2068];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:18.558039Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003c3f/r3tmp/tmp6Bzkov/pdisk_1.dat 2025-05-07T08:52:19.469665Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:52:19.505296Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:52:19.517663Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:52:19.520326Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17611, node 1 2025-05-07T08:52:19.832677Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:52:19.832696Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:52:19.832714Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:52:19.832832Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28583 TClient is connected to server localhost:28583 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:52:21.057287Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:21.101409Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:21.352520Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:21.641900Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:21.762017Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:23.566981Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623974970724995:2068];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:23.567040Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:52:24.647748Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624000740530414:2410], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:24.647841Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:25.244560Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:52:25.307273Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:52:25.367105Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:52:25.487599Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:52:25.576390Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:52:25.659422Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:52:25.751301Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:52:25.891044Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624005035498381:2476], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:25.891114Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:25.891452Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624005035498386:2479], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:25.895458Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:52:25.911664Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-05-07T08:52:25.911860Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624005035498388:2480], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:52:25.977699Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501624005035498439:3429] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:52:27.246126Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:7501624013625433332:3606], Recipient [1:7501623979265692724:2200]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:52:27.246170Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:52:27.246182Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T08:52:27.246227Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122432, Sender [1:7501624013625433328:3603], Recipient [1:7501623979265692724:2200]: {TEvModifySchemeTransaction txid# 281474976710672 TabletId# 72057594046644480} 2025-05-07T08:52:27.246244Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4851: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-05-07T08:52:27.352221Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateIndexedTable CreateIndexedTable { TableDescription { Name: "TestTable" Columns { Name: "k1" Type: "String" NotNull: false } Columns { Name: "k2" Type: "String" NotNull: false } Columns { Name: "fk1" Type: "String" NotNull: false } Columns { Name: "fk2" Type: "Int32" NotNull: false } Columns { Name: "fk3" Type: "Uint64" NotNull: false } Columns { Name: "Value" Type: "String" NotNull: false } KeyColumnNames: "k1" KeyColumnNames: "k2" PartitionConfig { ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } Temporary: false } IndexDescription { Name: "Index" KeyColumnNames: "fk1" KeyColumnNames: "fk2" KeyColumnNames: "fk3" KeyColumnNames: "k2" Type: EIndexTypeGlobalUnique IndexImplTableDescriptions { PartitionConfig { } } } } } TxId: 281474976710672 TabletId: 72057594046644480 PeerName: "ipv6:[::1]:45444" , at schemeshard: 72057594046644480 2025-05-07T08:52:27.352694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_indexed_table.cpp:101: TCreateTableIndex construct operation table path: /Root/TestTable domain path id: [OwnerId: 72057594046644480, LocalPathId: 1] domain path: /Root shardsToCreate: 2 GetShardsInside: 34 MaxShards: 200000 2025-05-07T08:52:27.353205Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /Root/TestTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-05-07T08:52:27.353348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:433: TCreateTable ... d, operationId: 281474976715672:2, shardIdx: 72057594046644480:36, datashard: 72075186224037923, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046644480 2025-05-07T08:53:00.544332Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 281474976715672:2, at schemeshard: 72057594046644480 2025-05-07T08:53:00.544342Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 281474976715672:2, datashard: 72075186224037923, at schemeshard: 72057594046644480 2025-05-07T08:53:00.544355Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976715672:2 129 -> 240 2025-05-07T08:53:00.544430Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T08:53:00.544545Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 281474976715672:2, at schemeshard: 72057594046644480 2025-05-07T08:53:00.544553Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:53:00.544604Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 281474976715672:0, at schemeshard: 72057594046644480 2025-05-07T08:53:00.544609Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:53:00.544644Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 281474976715672:0, at schemeshard: 72057594046644480 2025-05-07T08:53:00.544652Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:53:00.544662Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 281474976715672:0 2025-05-07T08:53:00.544702Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [3:7501624154880106231:2523] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715672 at schemeshard: 72057594046644480 2025-05-07T08:53:00.544751Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435072, Sender [3:7501624120520365494:2146], Recipient [3:7501624120520365494:2146]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-05-07T08:53:00.544764Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4857: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-05-07T08:53:00.544788Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976715672:0, at schemeshard: 72057594046644480 2025-05-07T08:53:00.544805Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046644480] TDone opId# 281474976715672:0 ProgressState 2025-05-07T08:53:00.544883Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T08:53:00.544902Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715672:0 progress is 2/3 2025-05-07T08:53:00.544915Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 2/3 2025-05-07T08:53:00.544931Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715672:0 progress is 2/3 2025-05-07T08:53:00.544940Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 2/3 2025-05-07T08:53:00.544951Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976715672, ready parts: 2/3, is published: true 2025-05-07T08:53:00.545198Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 281474976715672:2, at schemeshard: 72057594046644480 2025-05-07T08:53:00.545205Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:53:00.545211Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 281474976715672:2 2025-05-07T08:53:00.545840Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [3:7501624154880106232:2524] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715672 at schemeshard: 72057594046644480 2025-05-07T08:53:00.545983Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435072, Sender [3:7501624120520365494:2146], Recipient [3:7501624120520365494:2146]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-05-07T08:53:00.546002Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4857: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-05-07T08:53:00.546043Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976715672:2, at schemeshard: 72057594046644480 2025-05-07T08:53:00.546066Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046644480] TDone opId# 281474976715672:2 ProgressState 2025-05-07T08:53:00.546145Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T08:53:00.546161Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715672:2 progress is 3/3 2025-05-07T08:53:00.546173Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 3/3 2025-05-07T08:53:00.546196Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715672:2 progress is 3/3 2025-05-07T08:53:00.546208Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 3/3 2025-05-07T08:53:00.546221Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976715672, ready parts: 3/3, is published: true 2025-05-07T08:53:00.546266Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:7501624154880106205:2521] message: TxId: 281474976715672 2025-05-07T08:53:00.546292Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 3/3 2025-05-07T08:53:00.546320Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715672:0 2025-05-07T08:53:00.546333Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976715672:0 2025-05-07T08:53:00.546491Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 17] was 4 2025-05-07T08:53:00.546513Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715672:1 2025-05-07T08:53:00.546521Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976715672:1 2025-05-07T08:53:00.546541Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 18] was 3 2025-05-07T08:53:00.546549Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715672:2 2025-05-07T08:53:00.546555Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976715672:2 2025-05-07T08:53:00.546591Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 19] was 3 2025-05-07T08:53:00.546956Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:53:00.547245Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:53:00.547306Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [3:7501624154880106205:2521] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715672 at schemeshard: 72057594046644480 2025-05-07T08:53:00.548448Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877764, Sender [3:7501624154880106306:3670], Recipient [3:7501624120520365494:2146]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:53:00.548485Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4938: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:53:00.548499Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5761: Server pipe is reset, at schemeshard: 72057594046644480 2025-05-07T08:53:00.548539Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877764, Sender [3:7501624154880106214:3604], Recipient [3:7501624120520365494:2146]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:53:00.548556Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4938: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:53:00.548566Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5761: Server pipe is reset, at schemeshard: 72057594046644480 2025-05-07T08:53:00.551300Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877764, Sender [3:7501624154880106305:3669], Recipient [3:7501624120520365494:2146]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:53:00.551327Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4938: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:53:00.551339Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5761: Server pipe is reset, at schemeshard: 72057594046644480 2025-05-07T08:53:00.773789Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7501624120520365494:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T08:53:00.773834Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T08:53:00.773879Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [3:7501624120520365494:2146], Recipient [3:7501624120520365494:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T08:53:00.773901Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime >> TSchemeShardExtSubDomainTest::AlterNameConflicts-AlterDatabaseCreateHiveFirst-true [GOOD] >> QueryStats::Ranges [GOOD] |89.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/query_stats/ut/unittest |89.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/query_stats/ut/unittest >> TPQTestSlow::TestOnDiskStoredSourceIds >> TYardTest::TestLogContinuityPersistence [GOOD] >> TYardTest::TestLogContinuityPersistenceLarge |89.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/slow/unittest |89.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/query_stats/ut/unittest >> QueryStats::Ranges [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] |89.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/slow/unittest >> KqpMultishardIndex::WriteIntoRenamingSyncIndex [GOOD] >> KqpMultishardIndex::WriteIntoRenamingAsyncIndex >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterSameStoragePools-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_extsubdomain/unittest >> TSchemeShardExtSubDomainTest::AlterNameConflicts-AlterDatabaseCreateHiveFirst-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:52:56.564221Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:52:56.564315Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:52:56.564371Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:52:56.564413Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:52:56.564462Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:52:56.564493Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:52:56.564549Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:52:56.564633Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:52:56.565442Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:52:56.565817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:52:56.654774Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:52:56.654843Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:52:56.671138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:52:56.671264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:52:56.671424Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:52:56.680712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:52:56.681282Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:52:56.682115Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:52:56.682453Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:52:56.685280Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:52:56.687115Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:52:56.687196Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:52:56.687261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:52:56.687316Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:52:56.687379Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:52:56.688186Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:52:56.696925Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:52:56.854796Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:52:56.855135Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:56.855403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:52:56.855687Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:52:56.855786Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:56.863055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:52:56.863262Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:52:56.863494Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:56.863565Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:52:56.863635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:52:56.863680Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:52:56.866156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:56.866248Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:52:56.866295Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:52:56.868673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:56.868767Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:56.868835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:52:56.868893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:52:56.872834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:52:56.875268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:52:56.875503Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:52:56.876579Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:52:56.876759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:52:56.876833Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:52:56.877139Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:52:56.877197Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:52:56.877402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:52:56.877498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:52:56.880165Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:52:56.880266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:52:56.880487Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:52:56.880532Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... n_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:53:03.963842Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:53:03.966460Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:53:03.966556Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:53:03.966618Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:53:03.968957Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:53:03.969023Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:53:03.969091Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:53:03.969167Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:53:03.969393Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:53:03.971473Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:53:03.971693Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:53:03.972715Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:53:03.972891Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 128 RawX2: 30064773225 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:53:03.972954Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:53:03.973283Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:53:03.973351Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:53:03.973586Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:53:03.973697Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:53:03.976327Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:53:03.976396Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:53:03.976671Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:53:03.976725Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [7:206:2208], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-05-07T08:53:03.977043Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:53:03.977107Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 1:0 ProgressState 2025-05-07T08:53:03.977246Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-05-07T08:53:03.977295Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:53:03.977348Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-05-07T08:53:03.977399Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:53:03.977453Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-05-07T08:53:03.977510Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:53:03.977568Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1:0 2025-05-07T08:53:03.977610Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 1:0 2025-05-07T08:53:03.977710Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:53:03.977763Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-05-07T08:53:03.977813Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-05-07T08:53:03.978704Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-05-07T08:53:03.978862Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-05-07T08:53:03.978923Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-05-07T08:53:03.978978Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-05-07T08:53:03.979039Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:53:03.979207Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-05-07T08:53:03.983188Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-05-07T08:53:03.983831Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-05-07T08:53:03.984646Z node 7 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [7:269:2260] Bootstrap 2025-05-07T08:53:04.014247Z node 7 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [7:269:2260] Become StateWork (SchemeCache [7:274:2265]) 2025-05-07T08:53:04.017374Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { PlanResolution: 50 Coordinators: 1 Mediators: 1 Name: "USER_1" ExternalSchemeShard: true } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:53:04.017615Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1102: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 101:0, feature flag EnableAlterDatabaseCreateHiveFirst 1, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { PlanResolution: 50 Coordinators: 1 Mediators: 1 Name: "USER_1" ExternalSchemeShard: true } 2025-05-07T08:53:04.017692Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1108: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 101:0, path /MyRoot/USER_1 2025-05-07T08:53:04.017885Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 101:0, explain: Invalid AlterExtSubDomain request: Check failed: path: '/MyRoot/USER_1', error: path hasn't been resolved, nearest resolved path: '/MyRoot' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), at schemeshard: 72057594046678944 2025-05-07T08:53:04.017953Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 101:1, propose status:StatusPathDoesNotExist, reason: Invalid AlterExtSubDomain request: Check failed: path: '/MyRoot/USER_1', error: path hasn't been resolved, nearest resolved path: '/MyRoot' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), at schemeshard: 72057594046678944 2025-05-07T08:53:04.019879Z node 7 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [7:269:2260] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-05-07T08:53:04.027978Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 101, response: Status: StatusPathDoesNotExist Reason: "Invalid AlterExtSubDomain request: Check failed: path: \'/MyRoot/USER_1\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:53:04.028221Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusPathDoesNotExist, reason: Invalid AlterExtSubDomain request: Check failed: path: '/MyRoot/USER_1', error: path hasn't been resolved, nearest resolved path: '/MyRoot' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), operation: ALTER DATABASE, path: /MyRoot/USER_1 2025-05-07T08:53:04.031903Z node 7 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 >> SlowTopicAutopartitioning::CDC_Write >> TPQTestSlow::TestWriteVeryBigMessage >> KqpVectorIndexes::OrderByCosineSimilarityNotNullableLevel1 [GOOD] >> KqpVectorIndexes::OrderByCosineDistanceNullableLevel2 |89.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/query_stats/ut/unittest |89.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/base/ut_board_subscriber/ydb-core-base-ut_board_subscriber |89.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/base/ut_board_subscriber/ydb-core-base-ut_board_subscriber |89.8%| [LD] {RESULT} $(B)/ydb/core/base/ut_board_subscriber/ydb-core-base-ut_board_subscriber |89.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/query_stats/ut/unittest |89.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/query_stats/ut/unittest >> DataShardSnapshots::LockedWriteCleanupOnCopyTable+UseSink [GOOD] >> DataShardSnapshots::LockedWriteCleanupOnCopyTable-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_extsubdomain/unittest >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:52:57.193573Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:52:57.193662Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:52:57.193703Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:52:57.193742Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:52:57.193800Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:52:57.193836Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:52:57.193892Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:52:57.194191Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:52:57.195006Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:52:57.195485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:52:57.293711Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:52:57.293776Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:52:57.309016Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:52:57.309149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:52:57.309312Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:52:57.318083Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:52:57.320681Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:52:57.321328Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:52:57.321661Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:52:57.323977Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:52:57.325511Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:52:57.325575Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:52:57.325626Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:52:57.325668Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:52:57.325704Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:52:57.325898Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:52:57.332566Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:52:57.464603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:52:57.464854Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:57.465074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:52:57.465296Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:52:57.465355Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:57.467681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:52:57.467854Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:52:57.468150Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:57.468203Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:52:57.468242Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:52:57.468273Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:52:57.470343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:57.470425Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:52:57.470466Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:52:57.472443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:57.472490Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:57.472554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:52:57.472603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:52:57.476433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:52:57.479921Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:52:57.480136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:52:57.481140Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:52:57.481285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:52:57.481343Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:52:57.481646Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:52:57.481702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:52:57.481882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:52:57.481960Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:52:57.484342Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:52:57.484400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:52:57.484633Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:52:57.484688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-05-07T08:53:04.666484Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-05-07T08:53:04.666522Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-05-07T08:53:04.666573Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 7 2025-05-07T08:53:04.666661Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 103, ready parts: 0/1, is published: true 2025-05-07T08:53:04.685852Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72075186233409546 at ss 72057594046678944 2025-05-07T08:53:04.685942Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:3 hive 72075186233409546 at ss 72057594046678944 2025-05-07T08:53:04.686012Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72075186233409546 at ss 72057594046678944 2025-05-07T08:53:04.686043Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:4 hive 72075186233409546 at ss 72057594046678944 2025-05-07T08:53:04.686248Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-05-07T08:53:04.686306Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 103:0 ProgressState 2025-05-07T08:53:04.686461Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-05-07T08:53:04.686514Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-05-07T08:53:04.686567Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-05-07T08:53:04.686610Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-05-07T08:53:04.686664Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: true 2025-05-07T08:53:04.686717Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-05-07T08:53:04.686763Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 103:0 2025-05-07T08:53:04.686823Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 103:0 2025-05-07T08:53:04.687057Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-05-07T08:53:04.689376Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-05-07T08:53:04.689731Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-05-07T08:53:04.690044Z node 7 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 2025-05-07T08:53:04.699235Z node 7 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186234409547 2025-05-07T08:53:04.699625Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-05-07T08:53:04.700030Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-05-07T08:53:04.701248Z node 7 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186234409546 2025-05-07T08:53:04.701620Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-05-07T08:53:04.703573Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T08:53:04.704818Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 Forgetting tablet 72075186234409547 2025-05-07T08:53:04.709516Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-05-07T08:53:04.709792Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T08:53:04.710187Z node 7 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 TabletID: 72075186234409548 Forgetting tablet 72075186234409546 Forgetting tablet 72075186234409548 2025-05-07T08:53:04.712411Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-05-07T08:53:04.712672Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T08:53:04.722226Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T08:53:04.722345Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T08:53:04.722535Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-05-07T08:53:04.723065Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T08:53:04.723141Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T08:53:04.723229Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:53:04.729051Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-05-07T08:53:04.729143Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-05-07T08:53:04.729388Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-05-07T08:53:04.729427Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186234409547 2025-05-07T08:53:04.729486Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-05-07T08:53:04.729508Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186234409546 2025-05-07T08:53:04.732186Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-05-07T08:53:04.732294Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186234409548 2025-05-07T08:53:04.732523Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-05-07T08:53:04.732614Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-05-07T08:53:04.732918Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-05-07T08:53:04.732972Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-05-07T08:53:04.733468Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-05-07T08:53:04.733611Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-05-07T08:53:04.733664Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [7:581:2522] TestWaitNotification: OK eventTxId 103 2025-05-07T08:53:04.734344Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:53:04.734608Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 316us result status StatusPathDoesNotExist 2025-05-07T08:53:04.734789Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 |89.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/query_stats/ut/unittest |89.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/query_stats/ut/unittest >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent [GOOD] >> DataShardSnapshots::VolatileSnapshotTimeoutRefresh [GOOD] >> DataShardSnapshots::VolatileSnapshotCleanupOnReboot >> TVPatchTests::FindingPartsWhenSeveralPartsExist >> TVPatchTests::PatchPartGetError |89.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/query_stats/ut/unittest >> TVPatchTests::FullPatchTest [GOOD] >> TVPatchTests::FullPatchTestSpecialCase1 [GOOD] >> TVPatchTests::FindingPartsWhenSeveralPartsExist [GOOD] >> TVPatchTests::FindingPartsWithTimeout >> TOosLogicTests::RenderHtml [GOOD] >> TVPatchTests::FindingPartsWhenError >> TVPatchTests::PatchPartGetError [GOOD] |89.9%| [TA] $(B)/ydb/core/sys_view/query_stats/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TVPatchTests::FindingPartsWithTimeout [GOOD] >> TVPatchTests::FindingPartsWhenError [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_extsubdomain/unittest >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:52:58.071741Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:52:58.071831Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:52:58.071881Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:52:58.071926Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:52:58.071972Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:52:58.072008Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:52:58.072063Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:52:58.072133Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:52:58.072905Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:52:58.073296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:52:58.157473Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:52:58.157533Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:52:58.174259Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:52:58.174418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:52:58.174607Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:52:58.183592Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:52:58.184186Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:52:58.184905Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:52:58.185219Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:52:58.187460Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:52:58.189138Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:52:58.189224Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:52:58.189291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:52:58.189338Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:52:58.189385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:52:58.189573Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:52:58.196222Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:52:58.343155Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:52:58.343459Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:58.343733Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:52:58.343984Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:52:58.344042Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:58.351672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:52:58.351862Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:52:58.352103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:58.352169Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:52:58.352215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:52:58.352252Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:52:58.355429Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:58.355500Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:52:58.355554Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:52:58.357578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:58.357623Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:52:58.357664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:52:58.357712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:52:58.360819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:52:58.362565Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:52:58.362772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:52:58.363701Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:52:58.363841Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:52:58.363908Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:52:58.364209Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:52:58.364274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:52:58.364506Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:52:58.364596Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:52:58.366997Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:52:58.367050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:52:58.367278Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:52:58.367325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... , msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 103 2025-05-07T08:53:06.196984Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-05-07T08:53:06.197022Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-05-07T08:53:06.197063Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:53:06.197851Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-05-07T08:53:06.197952Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-05-07T08:53:06.198086Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-05-07T08:53:06.198122Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-05-07T08:53:06.198158Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-05-07T08:53:06.198234Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 103, ready parts: 0/1, is published: true 2025-05-07T08:53:06.200826Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-05-07T08:53:06.200896Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:3 hive 72057594037968897 at ss 72057594046678944 2025-05-07T08:53:06.200927Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-05-07T08:53:06.201757Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-05-07T08:53:06.201817Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 103:0 ProgressState 2025-05-07T08:53:06.202003Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-05-07T08:53:06.202061Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-05-07T08:53:06.202123Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-05-07T08:53:06.202173Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-05-07T08:53:06.202226Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: true 2025-05-07T08:53:06.202287Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-05-07T08:53:06.202340Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 103:0 2025-05-07T08:53:06.202409Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 103:0 2025-05-07T08:53:06.202649Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-05-07T08:53:06.204461Z node 7 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 2025-05-07T08:53:06.204754Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-05-07T08:53:06.205113Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T08:53:06.205773Z node 7 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186233409548 Forgetting tablet 72075186233409546 2025-05-07T08:53:06.211291Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:53:06.215688Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-05-07T08:53:06.220222Z node 7 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 2025-05-07T08:53:06.220639Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 Forgetting tablet 72075186233409548 2025-05-07T08:53:06.222192Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-05-07T08:53:06.222564Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 Forgetting tablet 72075186233409547 2025-05-07T08:53:06.223711Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-05-07T08:53:06.223953Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T08:53:06.224694Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T08:53:06.224788Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T08:53:06.224953Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-05-07T08:53:06.225507Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T08:53:06.225578Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T08:53:06.225667Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:53:06.229450Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-05-07T08:53:06.229572Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-05-07T08:53:06.230632Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-05-07T08:53:06.230680Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-05-07T08:53:06.233548Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-05-07T08:53:06.233635Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-05-07T08:53:06.233850Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-05-07T08:53:06.234018Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-05-07T08:53:06.234327Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-05-07T08:53:06.234414Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-05-07T08:53:06.234989Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-05-07T08:53:06.235126Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-05-07T08:53:06.235182Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [7:536:2485] TestWaitNotification: OK eventTxId 103 2025-05-07T08:53:06.235779Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:53:06.236031Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 300us result status StatusPathDoesNotExist 2025-05-07T08:53:06.236254Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TVPatchTests::PatchPartFastXorDiffDisorder >> TVPatchTests::PatchPartOk >> KqpPg::SelectIndex+useSink [GOOD] >> KqpPg::SelectIndex-useSink |89.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/skeleton/ut/unittest >> TVPatchTests::FullPatchTestSpecialCase1 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/skeleton/ut/unittest >> TVPatchTests::PatchPartGetError [GOOD] Test command err: Recv 65537 2025-05-07T08:53:07.064961Z node 1 :BS_VDISK_PATCH INFO: {BSVSP03@skeleton_vpatch_actor.cpp:190} [0:1:0:0:0] TEvVPatch: bootstrapped; OriginalBlobId# [1:2:3:4:6:10:0] Deadline# 1970-01-01T00:00:01.000000Z Send NKikimr::TEvBlobStorage::TEvVGet Recv NKikimr::TEvBlobStorage::TEvVGetResult 2025-05-07T08:53:07.067448Z node 1 :BS_VDISK_PATCH INFO: {BSVSP06@skeleton_vpatch_actor.cpp:266} [0:1:0:0:0] TEvVPatch: received parts index; OriginalBlobId# [1:2:3:4:6:10:0] Status# OK ResultSize# 1 2025-05-07T08:53:07.067542Z node 1 :BS_VDISK_PATCH INFO: {BSVSP04@skeleton_vpatch_actor.cpp:226} [0:1:0:0:0] TEvVPatch: sended found parts; OriginalBlobId# [1:2:3:4:6:10:0] FoundParts# [1] Status# OK Send NKikimr::TEvBlobStorage::TEvVPatchFoundParts Recv NKikimr::TEvBlobStorage::TEvVPatchDiff 2025-05-07T08:53:07.067818Z node 1 :BS_VDISK_PATCH INFO: {BSVSP09@skeleton_vpatch_actor.cpp:577} [0:1:0:0:0] TEvVPatch: received diff; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 XorReceiver# no ParityPart# no ForceEnd# no 2025-05-07T08:53:07.067908Z node 1 :BS_VDISK_PATCH INFO: {BSVSP05@skeleton_vpatch_actor.cpp:246} [0:1:0:0:0] TEvVPatch: send vGet for pulling part data; OriginalBlobId# [1:2:3:4:6:10:0] PullingPart# 1 Send NKikimr::TEvBlobStorage::TEvVGet Recv NKikimr::TEvBlobStorage::TEvVGetResult 2025-05-07T08:53:07.068126Z node 1 :BS_VDISK_PATCH INFO: {BSVSP07@skeleton_vpatch_actor.cpp:315} [0:1:0:0:0] TEvVPatch: send patch result; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 Status# ERROR ErrorReason# Recieve not OK status from VGetResult, received status# ERROR Send NKikimr::TEvBlobStorage::TEvVPatchResult 2025-05-07T08:53:07.068229Z node 1 :BS_VDISK_PATCH DEBUG: {BSVSP17@skeleton_vpatch_actor.cpp:727} [0:1:0:0:0] NotifySkeletonAboutDying; Send NKikimr::TEvVPatchDyingRequest Recv NKikimr::TEvVPatchDyingConfirm >> TVPatchTests::PatchPartFastXorDiffDisorder [GOOD] >> TVPatchTests::PatchPartOk [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/skeleton/ut/unittest >> TVPatchTests::FindingPartsWhenError [GOOD] Test command err: Recv 65537 2025-05-07T08:53:07.336641Z node 1 :BS_VDISK_PATCH INFO: {BSVSP03@skeleton_vpatch_actor.cpp:190} [0:1:0:0:0] TEvVPatch: bootstrapped; OriginalBlobId# [1:2:3:4:6:10:0] Deadline# 1970-01-01T00:00:01.000000Z Send NKikimr::TEvBlobStorage::TEvVGet Recv NKikimr::TEvBlobStorage::TEvVGetResult 2025-05-07T08:53:07.337840Z node 1 :BS_VDISK_PATCH INFO: {BSVSP06@skeleton_vpatch_actor.cpp:266} [0:1:0:0:0] TEvVPatch: received parts index; OriginalBlobId# [1:2:3:4:6:10:0] Status# ERROR ResultSize# 1 2025-05-07T08:53:07.337926Z node 1 :BS_VDISK_PATCH INFO: {BSVSP04@skeleton_vpatch_actor.cpp:226} [0:1:0:0:0] TEvVPatch: sended found parts; OriginalBlobId# [1:2:3:4:6:10:0] FoundParts# [] Status# ERROR Send NKikimr::TEvBlobStorage::TEvVPatchFoundParts 2025-05-07T08:53:07.338153Z node 1 :BS_VDISK_PATCH DEBUG: {BSVSP17@skeleton_vpatch_actor.cpp:727} [0:1:0:0:0] NotifySkeletonAboutDying; Send NKikimr::TEvVPatchDyingRequest Recv NKikimr::TEvVPatchDyingConfirm ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/skeleton/ut/unittest >> TVPatchTests::FindingPartsWithTimeout [GOOD] Test command err: Recv 65537 2025-05-07T08:53:07.031619Z node 1 :BS_VDISK_PATCH INFO: {BSVSP03@skeleton_vpatch_actor.cpp:190} [0:1:0:0:0] TEvVPatch: bootstrapped; OriginalBlobId# [1:2:3:4:6:10:0] Deadline# 1970-01-01T00:00:01.000000Z Send NKikimr::TEvBlobStorage::TEvVGet Recv NKikimr::TEvBlobStorage::TEvVGetResult 2025-05-07T08:53:07.032857Z node 1 :BS_VDISK_PATCH INFO: {BSVSP06@skeleton_vpatch_actor.cpp:266} [0:1:0:0:0] TEvVPatch: received parts index; OriginalBlobId# [1:2:3:4:6:10:0] Status# OK ResultSize# 1 2025-05-07T08:53:07.032952Z node 1 :BS_VDISK_PATCH INFO: {BSVSP04@skeleton_vpatch_actor.cpp:226} [0:1:0:0:0] TEvVPatch: sended found parts; OriginalBlobId# [1:2:3:4:6:10:0] FoundParts# [1 2] Status# OK Send NKikimr::TEvBlobStorage::TEvVPatchFoundParts Recv NKikimr::TEvBlobStorage::TEvVPatchDiff 2025-05-07T08:53:07.033190Z node 1 :BS_VDISK_PATCH INFO: {BSVSP09@skeleton_vpatch_actor.cpp:577} [0:1:0:0:0] TEvVPatch: received diff; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 XorReceiver# no ParityPart# no ForceEnd# yes 2025-05-07T08:53:07.033268Z node 1 :BS_VDISK_PATCH INFO: {BSVSP07@skeleton_vpatch_actor.cpp:315} [0:1:0:0:0] TEvVPatch: received force end; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 Status# OK ErrorReason# Send NKikimr::TEvBlobStorage::TEvVPatchResult 2025-05-07T08:53:07.033354Z node 1 :BS_VDISK_PATCH DEBUG: {BSVSP17@skeleton_vpatch_actor.cpp:727} [0:1:0:0:0] NotifySkeletonAboutDying; Send NKikimr::TEvVPatchDyingRequest Recv NKikimr::TEvVPatchDyingConfirm Recv 65537 2025-05-07T08:53:07.380398Z node 2 :BS_VDISK_PATCH INFO: {BSVSP03@skeleton_vpatch_actor.cpp:190} [0:1:0:0:0] TEvVPatch: bootstrapped; OriginalBlobId# [1:2:3:4:6:10:0] Deadline# 1970-01-01T00:00:01.000000Z Send NKikimr::TEvBlobStorage::TEvVGet Recv NActors::TEvents::TEvWakeup 2025-05-07T08:53:07.390956Z node 2 :BS_VDISK_PATCH ERROR: {BSVSP11@skeleton_vpatch_actor.cpp:734} [0:1:0:0:0] TEvVPatch: the vpatch actor died due to a deadline, before receiving diff; 2025-05-07T08:53:07.391060Z node 2 :BS_VDISK_PATCH INFO: {BSVSP04@skeleton_vpatch_actor.cpp:226} [0:1:0:0:0] TEvVPatch: sended found parts; OriginalBlobId# [1:2:3:4:6:10:0] FoundParts# [] Status# ERROR Send NKikimr::TEvBlobStorage::TEvVPatchFoundParts 2025-05-07T08:53:07.391152Z node 2 :BS_VDISK_PATCH DEBUG: {BSVSP17@skeleton_vpatch_actor.cpp:727} [0:1:0:0:0] NotifySkeletonAboutDying; Send NKikimr::TEvVPatchDyingRequest Recv NKikimr::TEvVPatchDyingConfirm >> ApplyClusterEndpointTest::NoPorts [GOOD] >> ApplyClusterEndpointTest::PortFromCds [GOOD] >> ApplyClusterEndpointTest::PortFromDriver [GOOD] >> BasicUsage::MaxByteSizeEqualZero >> BasicUsage::TWriteSession_WriteAndReadAndCommitRandomMessagesNoClusterDiscovery [GOOD] >> BasicUsage::TWriteSession_WriteEncoded >> TYardTest::TestLogContinuityPersistenceLarge [GOOD] >> TYardTest::TestLogWriteLsnConsistency >> ColumnBuildTest::BaseCase |89.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_column_build/unittest |89.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_column_build/unittest >> TYardTest::TestLogWriteLsnConsistency [GOOD] >> TYardTest::TestLotsOfTinyAsyncLogLatency ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/skeleton/ut/unittest >> TVPatchTests::PatchPartFastXorDiffDisorder [GOOD] Test command err: Recv 65537 2025-05-07T08:53:07.979987Z node 1 :BS_VDISK_PATCH INFO: {BSVSP03@skeleton_vpatch_actor.cpp:190} [0:1:0:0:0] TEvVPatch: bootstrapped; OriginalBlobId# [1:2:3:4:6:100:0] Deadline# 1970-01-01T00:00:01.000000Z Send NKikimr::TEvBlobStorage::TEvVGet Recv NKikimr::TEvBlobStorage::TEvVGetResult 2025-05-07T08:53:07.981092Z node 1 :BS_VDISK_PATCH INFO: {BSVSP06@skeleton_vpatch_actor.cpp:266} [0:1:0:0:0] TEvVPatch: received parts index; OriginalBlobId# [1:2:3:4:6:100:0] Status# OK ResultSize# 1 2025-05-07T08:53:07.981156Z node 1 :BS_VDISK_PATCH INFO: {BSVSP04@skeleton_vpatch_actor.cpp:226} [0:1:0:0:0] TEvVPatch: sended found parts; OriginalBlobId# [1:2:3:4:6:100:0] FoundParts# [5] Status# OK Send NKikimr::TEvBlobStorage::TEvVPatchFoundParts Recv NKikimr::TEvBlobStorage::TEvVPatchXorDiff 2025-05-07T08:53:07.981388Z node 1 :BS_VDISK_PATCH INFO: {BSVSP13@skeleton_vpatch_actor.cpp:674} [0:1:0:0:0] TEvVPatch: received xor diff; OriginalBlobId# [1:2:3:4:6:100:0] PatchedBlobId# [1:3:3:4:6:100:0] FromPart# 4 ToPart# 0 HasBuffer# no ReceivedXorDiffCount# 1/0 Send NKikimr::TEvBlobStorage::TEvVPatchXorDiffResult 2025-05-07T08:53:07.981458Z node 1 :BS_VDISK_PATCH DEBUG: {BSVSP17@skeleton_vpatch_actor.cpp:727} [0:1:0:0:0] NotifySkeletonAboutDying; Send NKikimr::TEvVPatchDyingRequest Recv NKikimr::TEvBlobStorage::TEvVPatchDiff 2025-05-07T08:53:07.981579Z node 1 :BS_VDISK_PATCH INFO: {BSVSP07@skeleton_vpatch_actor.cpp:315} [0:1:0:0:0] TEvVPatch: send patch result; OriginalBlobId# [1:2:3:4:6:100:0] PatchedBlobId# [1:3:3:4:6:100:0] OriginalPartId# 0 PatchedPartId# 0 Status# ERROR ErrorReason# [XorDiff from datapart] the start of the diff at index 0 righter than the start of the diff at index 1; PrevDiffStart# 2 DiffStart# 0 Send NKikimr::TEvBlobStorage::TEvVPatchResult Recv NKikimr::TEvVPatchDyingConfirm ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/skeleton/ut/unittest >> TVPatchTests::PatchPartOk [GOOD] Test command err: Recv 65537 2025-05-07T08:53:07.968931Z node 1 :BS_VDISK_PATCH INFO: {BSVSP03@skeleton_vpatch_actor.cpp:190} [0:1:0:0:0] TEvVPatch: bootstrapped; OriginalBlobId# [1:2:3:4:6:10:0] Deadline# 1970-01-01T00:00:01.000000Z Send NKikimr::TEvBlobStorage::TEvVGet Recv NKikimr::TEvBlobStorage::TEvVGetResult 2025-05-07T08:53:07.970049Z node 1 :BS_VDISK_PATCH INFO: {BSVSP06@skeleton_vpatch_actor.cpp:266} [0:1:0:0:0] TEvVPatch: received parts index; OriginalBlobId# [1:2:3:4:6:10:0] Status# OK ResultSize# 1 2025-05-07T08:53:07.970136Z node 1 :BS_VDISK_PATCH INFO: {BSVSP04@skeleton_vpatch_actor.cpp:226} [0:1:0:0:0] TEvVPatch: sended found parts; OriginalBlobId# [1:2:3:4:6:10:0] FoundParts# [1] Status# OK Send NKikimr::TEvBlobStorage::TEvVPatchFoundParts Recv NKikimr::TEvBlobStorage::TEvVPatchDiff 2025-05-07T08:53:07.970371Z node 1 :BS_VDISK_PATCH INFO: {BSVSP09@skeleton_vpatch_actor.cpp:577} [0:1:0:0:0] TEvVPatch: received diff; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 XorReceiver# no ParityPart# no ForceEnd# no 2025-05-07T08:53:07.970440Z node 1 :BS_VDISK_PATCH INFO: {BSVSP05@skeleton_vpatch_actor.cpp:246} [0:1:0:0:0] TEvVPatch: send vGet for pulling part data; OriginalBlobId# [1:2:3:4:6:10:0] PullingPart# 1 Send NKikimr::TEvBlobStorage::TEvVGet Recv NKikimr::TEvBlobStorage::TEvVGetResult 2025-05-07T08:53:07.970671Z node 1 :BS_VDISK_PATCH INFO: {BSVSP08@skeleton_vpatch_actor.cpp:383} [0:1:0:0:0] TEvVPatch: received part data; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 DataParts# 4 ReceivedBlobId# [1:2:3:4:6:10:1] Status# OK ResultSize# 1 ParityPart# no 2025-05-07T08:53:07.970755Z node 1 :BS_VDISK_PATCH INFO: {BSVSP14@skeleton_vpatch_actor.cpp:462} [0:1:0:0:0] TEvVPatch: send xor diffs; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 XorDiffCount# 0 2025-05-07T08:53:07.970844Z node 1 :BS_VDISK_PATCH INFO: {BSVSP15@skeleton_vpatch_actor.cpp:502} [0:1:0:0:0] TEvVPatch: send vPut; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 ReceivedXorDiffs# 0 ExpectedXorDiffs# 0 Send NKikimr::TEvBlobStorage::TEvVPut Recv NKikimr::TEvBlobStorage::TEvVPutResult 2025-05-07T08:53:07.971030Z node 1 :BS_VDISK_PATCH INFO: {BSVSP10@skeleton_vpatch_actor.cpp:627} [0:1:0:0:0] TEvVPatch: received put result; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 Status# OK 2025-05-07T08:53:07.971079Z node 1 :BS_VDISK_PATCH INFO: {BSVSP07@skeleton_vpatch_actor.cpp:315} [0:1:0:0:0] TEvVPatch: send patch result; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 Status# OK ErrorReason# Send NKikimr::TEvBlobStorage::TEvVPatchResult 2025-05-07T08:53:07.971148Z node 1 :BS_VDISK_PATCH DEBUG: {BSVSP17@skeleton_vpatch_actor.cpp:727} [0:1:0:0:0] NotifySkeletonAboutDying; Send NKikimr::TEvVPatchDyingRequest Recv NKikimr::TEvVPatchDyingConfirm |89.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/partition_stats/ut/ydb-core-sys_view-partition_stats-ut |89.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/sys_view/partition_stats/ut/ydb-core-sys_view-partition_stats-ut |89.9%| [TA] {RESULT} $(B)/ydb/core/sys_view/query_stats/ut/test-results/unittest/{meta.json ... results_accumulator.log} |89.9%| [LD] {RESULT} $(B)/ydb/core/sys_view/partition_stats/ut/ydb-core-sys_view-partition_stats-ut >> ColumnBuildTest::ValidDefaultValue |89.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_column_build/unittest >> ColumnBuildTest::AlreadyExists |89.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_column_build/unittest |89.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_board_subscriber/unittest >> KqpVectorIndexes::OrderByCosineDistanceNullableLevel1 [GOOD] >> KqpVectorIndexes::OrderByCosineDistanceNotNullableLevel3 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-53 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-54 |89.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/actors/ut/ydb-core-fq-libs-actors-ut |89.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/actors/ut/ydb-core-fq-libs-actors-ut >> ColumnBuildTest::BuildColumnDoesnotRestoreDeletedRows |89.9%| [LD] {RESULT} $(B)/ydb/core/fq/libs/actors/ut/ydb-core-fq-libs-actors-ut >> TBoardSubscriberTest::SimpleSubscriber >> TBoardSubscriberTest::ManySubscribersManyPublisher >> TBoardSubscriberTest::NotAvailableByShutdown |89.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_board_subscriber/unittest >> TBoardSubscriberTest::SimpleSubscriber [GOOD] >> TBoardSubscriberTest::ManySubscribersManyPublisher [GOOD] >> KqpPg::NoSelectFullScan [GOOD] >> KqpPg::LongDomainName >> TBoardSubscriberTest::NotAvailableByShutdown [GOOD] |89.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/keyvalue/ut/ydb-core-keyvalue-ut |89.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/keyvalue/ut/ydb-core-keyvalue-ut |89.9%| [LD] {RESULT} $(B)/ydb/core/keyvalue/ut/ydb-core-keyvalue-ut |89.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_board_subscriber/unittest >> TBoardSubscriberTest::ManySubscribersManyPublisher [GOOD] |89.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_board_subscriber/unittest >> TBoardSubscriberTest::SimpleSubscriber [GOOD] |89.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_board_subscriber/unittest >> TBoardSubscriberTest::ReconnectReplica |89.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_board_subscriber/unittest >> TBoardSubscriberTest::NotAvailableByShutdown [GOOD] |89.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_board_subscriber/unittest >> TBoardSubscriberTest::ReconnectReplica [GOOD] >> TBoardSubscriberTest::DropByDisconnect >> KqpPg::InsertNoTargetColumns_NotOneSize-useSink [GOOD] >> KqpPg::InsertNoTargetColumns_Alter+useSink |89.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_board_subscriber/unittest >> TBoardSubscriberTest::ReconnectReplica [GOOD] >> TBoardSubscriberTest::DropByDisconnect [GOOD] |89.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/grpc_streaming/ut/ydb-core-grpc_streaming-ut |89.9%| [LD] {RESULT} $(B)/ydb/core/grpc_streaming/ut/ydb-core-grpc_streaming-ut |89.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/grpc_streaming/ut/ydb-core-grpc_streaming-ut |89.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_board_subscriber/unittest |89.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_board_subscriber/unittest >> TBoardSubscriberTest::DropByDisconnect [GOOD] >> PersQueueSdkReadSessionTest::SpecifyClustersExplicitly >> ColumnBuildTest::CancelBuild >> ReadSessionImplTest::ReconnectOnTmpError [GOOD] >> ReadSessionImplTest::ReconnectOnTmpErrorAndThenTimeout [GOOD] >> ReadSessionImplTest::ReconnectOnTimeout [GOOD] >> ReadSessionImplTest::ReconnectOnTimeoutAndThenCreate [GOOD] >> ReadSessionImplTest::ReconnectsAfterFailure [GOOD] >> ReadSessionImplTest::SimpleDataHandlers >> ColumnBuildTest::AlreadyExists [GOOD] |89.9%| [TA] $(B)/ydb/core/base/ut_board_subscriber/test-results/unittest/{meta.json ... results_accumulator.log} >> ReadSessionImplTest::SimpleDataHandlers [GOOD] >> ReadSessionImplTest::SimpleDataHandlersWithCommit |89.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_column_build/unittest >> ReadSessionImplTest::SimpleDataHandlersWithCommit [GOOD] >> PersQueueSdkReadSessionTest::ReadSessionWithAbort [GOOD] >> PersQueueSdkReadSessionTest::ReadSessionWithClose >> ReadSessionImplTest::UsesOnRetryStateDuringRetries [GOOD] >> RetryPolicy::TWriteSession_TestPolicy ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_column_build/unittest >> ColumnBuildTest::AlreadyExists [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:53:11.111275Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:53:11.111373Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:53:11.111424Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:53:11.111464Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:53:11.111510Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:53:11.111533Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:53:11.111584Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:53:11.111649Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:53:11.112397Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:53:11.112793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:53:11.199325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:53:11.199383Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:53:11.217144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:53:11.217281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:53:11.217477Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:53:11.227800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:53:11.228446Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:53:11.229155Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:53:11.229547Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:53:11.232407Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:53:11.234121Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:53:11.234195Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:53:11.234253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:53:11.234330Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:53:11.234403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:53:11.234628Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:53:11.247591Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:53:11.378303Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:53:11.378578Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:53:11.378863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:53:11.379125Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:53:11.379186Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:53:11.381865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:53:11.382047Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:53:11.382310Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:53:11.382419Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:53:11.382466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:53:11.382500Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:53:11.384727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:53:11.384794Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:53:11.384835Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:53:11.387930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:53:11.388001Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:53:11.388050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:53:11.388116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:53:11.392036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:53:11.394618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:53:11.394825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:53:11.395932Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:53:11.396111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:53:11.396163Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:53:11.396478Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:53:11.396536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:53:11.396739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:53:11.396872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:53:11.399637Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:53:11.399689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:53:11.399874Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:53:11.399935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... hSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "index" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 3 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409550 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409551 SchemeShard: 72075186233409549 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SharedHive: 72057594037968897 ServerlessComputeResourcesMode: EServerlessComputeResourcesModeShared } } PathId: 2 PathOwnerId: 72075186233409549, at schemeshard: 72075186233409549 2025-05-07T08:53:14.552660Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__create.cpp:23: TIndexBuilder::TXTYPE_CREATE_INDEX_BUILD: DoExecute TxId: 106 DatabaseName: "/MyRoot/ServerLessDB" Settings { source_path: "/MyRoot/ServerLessDB/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "value" default_from_literal { type { type_id: UINT64 } value { uint64_value: 10 } } } } ScanSettings { MaxBatchRows: 1 } } 2025-05-07T08:53:14.560334Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1105: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 AlterMainTable TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: AlterMainTable, IsCancellationRequested: 0, Issue: , SubscribersCount: 0, CreateSender: [1:1152:3023], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 0, LockTxStatus: StatusSuccess, LockTxDone: 0, InitiateTxId: 0, InitiateTxStatus: StatusSuccess, InitiateTxDone: 0, SnapshotStepId: 0, ApplyTxId: 0, ApplyTxStatus: StatusSuccess, ApplyTxDone: 0, UnlockTxId: 0, UnlockTxStatus: StatusSuccess, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-05-07T08:53:14.560409Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.cpp:181: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: AllocateTxId 106 2025-05-07T08:53:14.560643Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 106, at schemeshard: 72075186233409549 2025-05-07T08:53:14.560746Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:2606: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvAllocateResult, BuildIndexId: 106, txId# 281474976725757 2025-05-07T08:53:14.560854Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:2613: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvAllocateResult, buildInfo: TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: AlterMainTable, IsCancellationRequested: 0, Issue: , SubscribersCount: 0, CreateSender: [1:1152:3023], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 0, LockTxStatus: StatusSuccess, LockTxDone: 0, InitiateTxId: 0, InitiateTxStatus: StatusSuccess, InitiateTxDone: 0, SnapshotStepId: 0, ApplyTxId: 0, ApplyTxStatus: StatusSuccess, ApplyTxDone: 0, UnlockTxId: 0, UnlockTxStatus: StatusSuccess, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-05-07T08:53:14.566810Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1105: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 AlterMainTable TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: AlterMainTable, IsCancellationRequested: 0, Issue: , SubscribersCount: 0, CreateSender: [1:1152:3023], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 0, LockTxStatus: StatusSuccess, LockTxDone: 0, InitiateTxId: 0, InitiateTxStatus: StatusSuccess, InitiateTxDone: 0, SnapshotStepId: 0, ApplyTxId: 0, ApplyTxStatus: StatusSuccess, ApplyTxDone: 0, UnlockTxId: 0, UnlockTxStatus: StatusSuccess, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-05-07T08:53:14.567156Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:414: AlterMainTablePropose 106 AlterMainTable Transaction { WorkingDir: "/MyRoot/ServerLessDB" OperationType: ESchemeOpAlterTable AlterTable { Name: "Table" Columns { Name: "value" Type: "Uint64" DefaultFromLiteral { type { type_id: UINT64 } value { uint64_value: 10 } } IsBuildInProgress: true } } Internal: true } TxId: 281474976725757 TabletId: 72075186233409549 FailOnExist: true 2025-05-07T08:53:14.569756Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/ServerLessDB" OperationType: ESchemeOpAlterTable AlterTable { Name: "Table" Columns { Name: "value" Type: "Uint64" DefaultFromLiteral { type { type_id: UINT64 } value { uint64_value: 10 } } IsBuildInProgress: true } } Internal: true } TxId: 281474976725757 TabletId: 72075186233409549 FailOnExist: true , at schemeshard: 72075186233409549 2025-05-07T08:53:14.574210Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_table.cpp:508: TAlterTable Propose, path: /MyRoot/ServerLessDB/Table, pathId: , opId: 281474976725757:0, at schemeshard: 72075186233409549 2025-05-07T08:53:14.574685Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976725757:1, propose status:StatusInvalidParameter, reason: Cannot alter type for column 'value', at schemeshard: 72075186233409549 2025-05-07T08:53:14.587383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 281474976725757, response: Status: StatusInvalidParameter Reason: "Cannot alter type for column \'value\'" TxId: 281474976725757 SchemeshardId: 72075186233409549, at schemeshard: 72075186233409549 2025-05-07T08:53:14.587615Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976725757, database: /MyRoot/ServerLessDB, subject: , status: StatusInvalidParameter, reason: Cannot alter type for column 'value', operation: ALTER TABLE, path: /MyRoot/ServerLessDB/Table 2025-05-07T08:53:14.587831Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6657: Handle: TEvModifySchemeTransactionResult: txId# 281474976725757, status# StatusInvalidParameter 2025-05-07T08:53:14.587924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6659: Message: Status: StatusInvalidParameter Reason: "Cannot alter type for column \'value\'" TxId: 281474976725757 SchemeshardId: 72075186233409549 2025-05-07T08:53:14.588010Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:2450: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvModifySchemeTransactionResult, BuildIndexId: 106, cookie: 106, txId: 281474976725757, status: StatusInvalidParameter 2025-05-07T08:53:14.588147Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:2454: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvModifySchemeTransactionResult, buildInfo: TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: AlterMainTable, IsCancellationRequested: 0, Issue: , SubscribersCount: 0, CreateSender: [1:1152:3023], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 0, LockTxStatus: StatusSuccess, LockTxDone: 0, InitiateTxId: 0, InitiateTxStatus: StatusSuccess, InitiateTxDone: 0, SnapshotStepId: 0, ApplyTxId: 0, ApplyTxStatus: StatusSuccess, ApplyTxDone: 0, UnlockTxId: 0, UnlockTxStatus: StatusSuccess, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }}, record: Status: StatusInvalidParameter Reason: "Cannot alter type for column \'value\'" TxId: 281474976725757 SchemeshardId: 72075186233409549 2025-05-07T08:53:14.588947Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:2419: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuilder::TTxReply: ReplyOnCreation, BuildIndexId: 106, status: BAD_REQUEST, error: At AlterMainTable state got unsuccess propose result, status: StatusInvalidParameter, reason: Cannot alter type for column 'value', replyTo: [1:1152:3023] 2025-05-07T08:53:14.589306Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:2420: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Message: TxId: 106 Status: BAD_REQUEST Issues { message: "At AlterMainTable state got unsuccess propose result, status: StatusInvalidParameter, reason: Cannot alter type for column \'value\'" severity: 1 } IndexBuild { Id: 106 Issues { message: "At AlterMainTable state got unsuccess propose result, status: StatusInvalidParameter, reason: Cannot alter type for column \'value\'" severity: 1 } State: STATE_PREPARING Settings { source_path: "/MyRoot/ServerLessDB/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "value" default_from_literal { type { type_id: UINT64 } value { uint64_value: 10 } } } } ScanSettings { MaxBatchRows: 1 } } Progress: 0 } BUILDINDEX RESPONSE CREATE: NKikimrIndexBuilder.TEvCreateResponse TxId: 106 Status: BAD_REQUEST Issues { message: "At AlterMainTable state got unsuccess propose result, status: StatusInvalidParameter, reason: Cannot alter type for column \'value\'" severity: 1 } IndexBuild { Id: 106 Issues { message: "At AlterMainTable state got unsuccess propose result, status: StatusInvalidParameter, reason: Cannot alter type for column \'value\'" severity: 1 } State: STATE_PREPARING Settings { source_path: "/MyRoot/ServerLessDB/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "value" default_from_literal { type { type_id: UINT64 } value { uint64_value: 10 } } } } ScanSettings { MaxBatchRows: 1 } } Progress: 0 } ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> ReadSessionImplTest::SimpleDataHandlersWithCommit [GOOD] Test command err: 2025-05-07T08:53:14.825533Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:14.825587Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:14.825613Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:53:14.826137Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-05-07T08:53:14.826206Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:14.826238Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:14.827596Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.007214s 2025-05-07T08:53:14.828191Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:53:14.828622Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-05-07T08:53:14.828740Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:14.829768Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:14.829808Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:14.829841Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:53:14.830192Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-05-07T08:53:14.830226Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:14.830267Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:14.830306Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.009701s 2025-05-07T08:53:14.830744Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:53:14.831088Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-05-07T08:53:14.831156Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:14.831886Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:14.831899Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:14.831912Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:53:14.832167Z :ERROR: [db] [sessionid] [cluster] Got error. Status: TIMEOUT. Description:
: Error: Failed to establish connection to server. Attempts done: 1 2025-05-07T08:53:14.832198Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:14.832220Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:14.832272Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.180376s 2025-05-07T08:53:14.832551Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:53:14.832909Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-05-07T08:53:14.832958Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:14.833580Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:14.833594Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:14.833606Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:53:14.834032Z :ERROR: [db] [sessionid] [cluster] Got error. Status: TIMEOUT. Description:
: Error: Failed to establish connection to server. Attempts done: 1 2025-05-07T08:53:14.834081Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:14.834098Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:14.834156Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.237133s 2025-05-07T08:53:14.834687Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:53:14.835083Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-05-07T08:53:14.835163Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:14.835894Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:14.835909Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:14.835929Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:53:14.836130Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:53:14.836523Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-05-07T08:53:14.847041Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:14.848082Z :ERROR: [db] [sessionid] [cluster] Got error. Status: TRANSPORT_UNAVAILABLE. Description:
: Error: GRpc error: (14): 2025-05-07T08:53:14.848135Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:14.848162Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:14.848249Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.162213s 2025-05-07T08:53:14.848434Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster 2025-05-07T08:53:14.849853Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:14.849870Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:14.849904Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:53:14.850180Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:53:14.850601Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-05-07T08:53:14.850771Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:14.851589Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-05-07T08:53:14.952984Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:14.953215Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2025-05-07T08:53:14.953273Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-05-07T08:53:14.953320Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (2-2) 2025-05-07T08:53:14.953406Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 6 bytes 2025-05-07T08:53:15.056399Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2025-05-07T08:53:15.056557Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster 2025-05-07T08:53:15.057779Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:15.057814Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:15.057836Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:53:15.058344Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:53:15.059016Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-05-07T08:53:15.059196Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:15.059622Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-05-07T08:53:15.160708Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:15.160923Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2025-05-07T08:53:15.160978Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-05-07T08:53:15.161020Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (2-2) 2025-05-07T08:53:15.161103Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [0, 3). Partition stream id: 1 2025-05-07T08:53:15.161209Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 6 bytes 2025-05-07T08:53:15.161407Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2025-05-07T08:53:15.161522Z :DEBUG: [db] [sessionid] [cluster] Committed response: { cookies { assign_id: 1 partition_cookie: 1 } } 2025-05-07T08:53:15.162204Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster >> ColumnBuildTest::BaseCase [GOOD] >> DataShardSnapshots::VolatileSnapshotCleanupOnReboot [GOOD] >> DataShardSnapshots::VolatileSnapshotCleanupOnFinish |89.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/partition_stats/ut/unittest |89.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/partition_stats/ut/unittest >> ColumnBuildTest::ValidDefaultValue [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_column_build/unittest >> ColumnBuildTest::BaseCase [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:53:09.281445Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:53:09.281547Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:53:09.281597Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:53:09.281635Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:53:09.281686Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:53:09.281717Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:53:09.281774Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:53:09.281847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:53:09.294888Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:53:09.295368Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:53:09.426945Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:53:09.427015Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:53:09.444677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:53:09.444807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:53:09.444988Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:53:09.468366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:53:09.469001Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:53:09.469697Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:53:09.470097Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:53:09.479526Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:53:09.481367Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:53:09.481449Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:53:09.481504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:53:09.481574Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:53:09.481637Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:53:09.481827Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:53:09.491631Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:53:09.653558Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:53:09.653834Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:53:09.654164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:53:09.654466Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:53:09.654540Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:53:09.659166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:53:09.659325Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:53:09.659576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:53:09.659677Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:53:09.659728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:53:09.659765Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:53:09.667055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:53:09.667139Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:53:09.667183Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:53:09.675137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:53:09.675253Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:53:09.675321Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:53:09.675392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:53:09.683504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:53:09.694923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:53:09.695143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:53:09.696199Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:53:09.696367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:53:09.696421Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:53:09.696728Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:53:09.696783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:53:09.696964Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:53:09.697066Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:53:09.703122Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:53:09.703196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:53:09.703418Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:53:09.703483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... Execute, operationId: 281474976725761:0, at schemeshard: 72075186233409549 2025-05-07T08:53:15.472397Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_lock.cpp:30: [72075186233409549] TDropLock TPropose opId# 281474976725761:0 ProgressState 2025-05-07T08:53:15.472447Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 281474976725761 ready parts: 1/1 2025-05-07T08:53:15.472557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72075186233409550 message:Transaction { AffectedSet { TabletId: 72075186233409549 Flags: 2 } ExecLevel: 0 TxId: 281474976725761 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72075186233409550 2025-05-07T08:53:15.476817Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1105: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Unlocking TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Unlocking, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:1152:3023], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976725758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 450, ApplyTxId: 281474976725760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976725761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }, Billed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }} 2025-05-07T08:53:15.477323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 281474976725761:4294967295 from tablet: 72075186233409549 to tablet: 72075186233409550 cookie: 0:281474976725761 msg type: 269090816 2025-05-07T08:53:15.477442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 281474976725761, partId: 4294967295, tablet: 72075186233409550 2025-05-07T08:53:15.477633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976725761, at schemeshard: 72075186233409549 2025-05-07T08:53:15.477670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976725761, ready parts: 0/1, is published: true 2025-05-07T08:53:15.477713Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976725761, at schemeshard: 72075186233409549 2025-05-07T08:53:15.492670Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877763, Sender [1:1826:3689], Recipient [1:760:2649]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72075186233409549 ClientId: [1:1826:3689] ServerId: [1:1828:3691] } 2025-05-07T08:53:15.492759Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3164: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-05-07T08:53:15.563684Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 650, transactions count in step: 1, at schemeshard: 72075186233409549 2025-05-07T08:53:15.563827Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976725761 AckTo { RawX1: 0 RawX2: 0 } } Step: 650 MediatorID: 72075186233409551 TabletID: 72075186233409549, at schemeshard: 72075186233409549 2025-05-07T08:53:15.563891Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_lock.cpp:44: [72075186233409549] TDropLock TPropose opId# 281474976725761:0 HandleReply TEvOperationPlan: step# 650 2025-05-07T08:53:15.563942Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976725761:0 128 -> 240 2025-05-07T08:53:15.572896Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976725761:0, at schemeshard: 72075186233409549 2025-05-07T08:53:15.572981Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72075186233409549] TDone opId# 281474976725761:0 ProgressState 2025-05-07T08:53:15.573068Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976725761:0 progress is 1/1 2025-05-07T08:53:15.573101Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976725761 ready parts: 1/1 2025-05-07T08:53:15.573137Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976725761:0 progress is 1/1 2025-05-07T08:53:15.573165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976725761 ready parts: 1/1 2025-05-07T08:53:15.573207Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976725761, ready parts: 1/1, is published: true 2025-05-07T08:53:15.573281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:572:2510] message: TxId: 281474976725761 2025-05-07T08:53:15.573325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976725761 ready parts: 1/1 2025-05-07T08:53:15.573357Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976725761:0 2025-05-07T08:53:15.573383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976725761:0 2025-05-07T08:53:15.573459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72075186233409549, LocalPathId: 2] was 3 2025-05-07T08:53:15.586921Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6706: Handle: TEvNotifyTxCompletionResult: txId# 281474976725761 2025-05-07T08:53:15.587031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6708: Message: TxId: 281474976725761 2025-05-07T08:53:15.587131Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:2321: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, txId# 281474976725761, buildInfoId: 106 2025-05-07T08:53:15.587233Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:2324: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, txId# 281474976725761, buildInfo: TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Unlocking, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:1152:3023], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976725758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 450, ApplyTxId: 281474976725760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976725761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }, Billed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }} 2025-05-07T08:53:15.595643Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1105: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Unlocking TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Unlocking, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:1152:3023], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976725758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 450, ApplyTxId: 281474976725760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976725761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }, Billed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }} 2025-05-07T08:53:15.595747Z node 1 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:25: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Unlocking to Done 2025-05-07T08:53:15.598450Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1105: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Done TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Done, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:1152:3023], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976725758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 450, ApplyTxId: 281474976725760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976725761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }, Billed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }} 2025-05-07T08:53:15.598518Z node 1 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:325: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 106, subscribers count# 1 2025-05-07T08:53:15.598705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-05-07T08:53:15.598748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [1:1172:3043] TestWaitNotification: OK eventTxId 106 2025-05-07T08:53:15.601447Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__get.cpp:19: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: DoExecute DatabaseName: "/MyRoot/ServerLessDB" IndexBuildId: 106 2025-05-07T08:53:15.601778Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:93: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: Reply Status: SUCCESS IndexBuild { Id: 106 State: STATE_DONE Settings { source_path: "/MyRoot/ServerLessDB/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "DefaultValue" default_from_literal { type { type_id: UINT64 } value { uint64_value: 10 } } } } ScanSettings { MaxBatchRows: 1 } } Progress: 100 } BUILDINDEX RESPONSE Get: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 106 State: STATE_DONE Settings { source_path: "/MyRoot/ServerLessDB/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "DefaultValue" default_from_literal { type { type_id: UINT64 } value { uint64_value: 10 } } } } ScanSettings { MaxBatchRows: 1 } } Progress: 100 } >> Viewer::JsonStorageListingV1GroupIdFilter [GOOD] >> Viewer::JsonStorageListingV1NodeIdFilter |89.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/partition_stats/ut/unittest >> DataShardSnapshots::LockedWriteCleanupOnCopyTable-UseSink [GOOD] >> DataShardSnapshots::DelayedWriteReadableAfterSplit ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_column_build/unittest >> ColumnBuildTest::ValidDefaultValue [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:53:10.101095Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:53:10.101196Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:53:10.101241Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:53:10.101282Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:53:10.101336Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:53:10.101375Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:53:10.101438Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:53:10.101512Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:53:10.102385Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:53:10.102774Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:53:10.192918Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:53:10.193002Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:53:10.219016Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:53:10.219148Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:53:10.219334Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:53:10.230390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:53:10.230954Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:53:10.231496Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:53:10.231833Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:53:10.234573Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:53:10.236306Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:53:10.236387Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:53:10.236444Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:53:10.236491Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:53:10.236543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:53:10.236738Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:53:10.244964Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:53:10.375574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:53:10.375861Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:53:10.376191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:53:10.376498Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:53:10.376580Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:53:10.385102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:53:10.385261Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:53:10.385499Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:53:10.385614Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:53:10.385672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:53:10.385709Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:53:10.388515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:53:10.388604Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:53:10.388647Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:53:10.391212Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:53:10.391296Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:53:10.391358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:53:10.391447Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:53:10.412418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:53:10.415405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:53:10.415685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:53:10.416908Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:53:10.417101Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:53:10.417159Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:53:10.417500Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:53:10.417562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:53:10.417757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:53:10.417922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:53:10.421996Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:53:10.422075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:53:10.422307Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:53:10.422397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... ecute, operationId: 281474976725761:0, at schemeshard: 72075186233409549 2025-05-07T08:53:16.084622Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_lock.cpp:30: [72075186233409549] TDropLock TPropose opId# 281474976725761:0 ProgressState 2025-05-07T08:53:16.084678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 281474976725761 ready parts: 1/1 2025-05-07T08:53:16.084787Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72075186233409550 message:Transaction { AffectedSet { TabletId: 72075186233409549 Flags: 2 } ExecLevel: 0 TxId: 281474976725761 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72075186233409550 2025-05-07T08:53:16.090303Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1105: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Unlocking TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Unlocking, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:1152:3023], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976725758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 450, ApplyTxId: 281474976725760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976725761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }, Billed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }} 2025-05-07T08:53:16.090864Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 281474976725761:4294967295 from tablet: 72075186233409549 to tablet: 72075186233409550 cookie: 0:281474976725761 msg type: 269090816 2025-05-07T08:53:16.090989Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 281474976725761, partId: 4294967295, tablet: 72075186233409550 2025-05-07T08:53:16.091216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976725761, at schemeshard: 72075186233409549 2025-05-07T08:53:16.091252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976725761, ready parts: 0/1, is published: true 2025-05-07T08:53:16.091301Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976725761, at schemeshard: 72075186233409549 2025-05-07T08:53:16.106062Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877763, Sender [1:1826:3689], Recipient [1:760:2649]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72075186233409549 ClientId: [1:1826:3689] ServerId: [1:1828:3691] } 2025-05-07T08:53:16.106152Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3164: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-05-07T08:53:16.164912Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 650, transactions count in step: 1, at schemeshard: 72075186233409549 2025-05-07T08:53:16.165062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976725761 AckTo { RawX1: 0 RawX2: 0 } } Step: 650 MediatorID: 72075186233409551 TabletID: 72075186233409549, at schemeshard: 72075186233409549 2025-05-07T08:53:16.165127Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_lock.cpp:44: [72075186233409549] TDropLock TPropose opId# 281474976725761:0 HandleReply TEvOperationPlan: step# 650 2025-05-07T08:53:16.165176Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976725761:0 128 -> 240 2025-05-07T08:53:16.168942Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976725761:0, at schemeshard: 72075186233409549 2025-05-07T08:53:16.169043Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72075186233409549] TDone opId# 281474976725761:0 ProgressState 2025-05-07T08:53:16.169136Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976725761:0 progress is 1/1 2025-05-07T08:53:16.169168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976725761 ready parts: 1/1 2025-05-07T08:53:16.169209Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976725761:0 progress is 1/1 2025-05-07T08:53:16.169242Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976725761 ready parts: 1/1 2025-05-07T08:53:16.169274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976725761, ready parts: 1/1, is published: true 2025-05-07T08:53:16.169353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:572:2510] message: TxId: 281474976725761 2025-05-07T08:53:16.169401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976725761 ready parts: 1/1 2025-05-07T08:53:16.169431Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976725761:0 2025-05-07T08:53:16.169458Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976725761:0 2025-05-07T08:53:16.169537Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72075186233409549, LocalPathId: 2] was 3 2025-05-07T08:53:16.179278Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6706: Handle: TEvNotifyTxCompletionResult: txId# 281474976725761 2025-05-07T08:53:16.179426Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6708: Message: TxId: 281474976725761 2025-05-07T08:53:16.179521Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:2321: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, txId# 281474976725761, buildInfoId: 106 2025-05-07T08:53:16.179639Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:2324: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, txId# 281474976725761, buildInfo: TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Unlocking, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:1152:3023], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976725758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 450, ApplyTxId: 281474976725760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976725761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }, Billed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }} 2025-05-07T08:53:16.187750Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1105: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Unlocking TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Unlocking, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:1152:3023], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976725758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 450, ApplyTxId: 281474976725760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976725761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }, Billed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }} 2025-05-07T08:53:16.187848Z node 1 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:25: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Unlocking to Done 2025-05-07T08:53:16.191422Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1105: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Done TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Done, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:1152:3023], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976725758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 450, ApplyTxId: 281474976725760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976725761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }, Billed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }} 2025-05-07T08:53:16.191547Z node 1 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:325: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 106, subscribers count# 1 2025-05-07T08:53:16.191748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-05-07T08:53:16.191797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [1:1172:3043] TestWaitNotification: OK eventTxId 106 2025-05-07T08:53:16.194762Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__get.cpp:19: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: DoExecute DatabaseName: "/MyRoot/ServerLessDB" IndexBuildId: 106 2025-05-07T08:53:16.195137Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:93: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: Reply Status: SUCCESS IndexBuild { Id: 106 State: STATE_DONE Settings { source_path: "/MyRoot/ServerLessDB/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "ColumnValue" default_from_literal { type { type_id: UINT64 } value { uint64_value: 1111 } } } } ScanSettings { MaxBatchRows: 1 } } Progress: 100 } BUILDINDEX RESPONSE Get: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 106 State: STATE_DONE Settings { source_path: "/MyRoot/ServerLessDB/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "ColumnValue" default_from_literal { type { type_id: UINT64 } value { uint64_value: 1111 } } } } ScanSettings { MaxBatchRows: 1 } } Progress: 100 } >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-54 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-55 |90.0%| [TA] {RESULT} $(B)/ydb/core/base/ut_board_subscriber/test-results/unittest/{meta.json ... results_accumulator.log} >> TKeyValueTest::TestWriteReadRangeDataLimitThenLimitWorks >> TDatabaseResolverTests::ResolveTwoDataStreamsFirstError >> TKeyValueTest::TestWriteReadDeleteWithRestartsThenResponseOk >> TDatabaseResolverTests::ResolveTwoDataStreamsFirstError [GOOD] >> TKeyValueTest::TestWrite200KDeleteThenResponseErrorNewApi >> KqpPg::SelectIndex-useSink [GOOD] >> KqpPg::TableDeleteAllData+useSink >> KeyValueReadStorage::ReadOk [GOOD] >> KeyValueReadStorage::ReadNotWholeBlobOk [GOOD] >> KeyValueReadStorage::ReadOneItemError [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/actors/ut/unittest >> TDatabaseResolverTests::ResolveTwoDataStreamsFirstError [GOOD] Test command err: 2025-05-07T08:53:18.903806Z node 1 :FQ_DATABASE_RESOLVER ERROR: database_resolver.cpp:175: TraceId: traceId ResponseProcessor::Handle(HttpIncomingResponse): error=Error while trying to resolve managed DataStreams database with id etn021us5r9rhld1vgb1 via HTTP request to: endpoint 'ydbc.ydb.cloud.yandex.net:8789', url '/ydbc/cloud-prod/database?databaseId=etn021us5r9rhld1vgb1': Status: 404 Response body: {"message":"Database not found"} >> TKeyValueTest::TestInlineWriteReadRangeLimitThenLimitWorks ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> KeyValueReadStorage::ReadOneItemError [GOOD] Test command err: 2025-05-07T08:53:19.437563Z 1 00h00m00.000000s :KEYVALUE INFO: {KV20@keyvalue_storage_read_request.cpp:209} Received GetResult KeyValue# 1 GroupId# 3 Status# OK ResponseSz# 1 ErrorReason# ReadRequestCookie# 0 2025-05-07T08:53:19.442099Z 1 00h00m00.000000s :KEYVALUE INFO: {KV34@keyvalue_storage_read_request.cpp:492} Send respose KeyValue# 1 Status# RSTATUS_OK ReadRequestCookie# 0 2025-05-07T08:53:19.449208Z 1 00h00m00.000000s :KEYVALUE INFO: {KV20@keyvalue_storage_read_request.cpp:209} Received GetResult KeyValue# 1 GroupId# 3 Status# OK ResponseSz# 1 ErrorReason# ReadRequestCookie# 0 2025-05-07T08:53:19.449296Z 1 00h00m00.000000s :KEYVALUE INFO: {KV34@keyvalue_storage_read_request.cpp:492} Send respose KeyValue# 1 Status# RSTATUS_OK ReadRequestCookie# 0 2025-05-07T08:53:19.458291Z 1 00h00m00.000000s :KEYVALUE INFO: {KV20@keyvalue_storage_read_request.cpp:209} Received GetResult KeyValue# 1 GroupId# 3 Status# OK ResponseSz# 1 ErrorReason# ReadRequestCookie# 0 2025-05-07T08:53:19.458449Z 1 00h00m00.000000s :KEYVALUE ERROR: {KV317@keyvalue_storage_read_request.cpp:310} Unexpected EvGetResult. KeyValue# 1 Status# OK Id# [1:2:3:2:0:1:0] ResponseStatus# ERROR Deadline# 586524-01-19T08:01:49.551615Z Now# 1970-01-01T00:00:00.000000Z SentAt# 1970-01-01T00:00:00.000000Z GotAt# 2025-05-07T08:53:19.457824Z ErrorReason# >> TKeyValueTest::TestWriteReadPatchRead >> TKeyValueTest::TestWriteReadPatchRead [GOOD] >> TKeyValueTest::TestWriteReadDeleteWithRestartsThenResponseOkWithNewApi >> KqpVectorIndexes::OrderByCosineDistanceNotNullableLevel3 [GOOD] >> TYardTest::TestLotsOfTinyAsyncLogLatency [GOOD] >> TYardTest::TestLogLatency >> ColumnBuildTest::BuildColumnDoesnotRestoreDeletedRows [GOOD] >> ColumnBuildTest::CancelBuild [GOOD] |90.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_streaming/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_column_build/unittest >> ColumnBuildTest::BuildColumnDoesnotRestoreDeletedRows [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:53:11.699842Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:53:11.699933Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:53:11.699969Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:53:11.700004Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:53:11.700059Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:53:11.700091Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:53:11.700142Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:53:11.700213Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:53:11.700912Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:53:11.701233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:53:11.832118Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:53:11.832183Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:53:11.855314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:53:11.855447Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:53:11.855627Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:53:11.875181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:53:11.875755Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:53:11.876407Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:53:11.876699Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:53:11.882291Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:53:11.883946Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:53:11.884016Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:53:11.884068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:53:11.884126Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:53:11.884182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:53:11.884350Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:53:11.901987Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:53:12.059220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:53:12.059464Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:53:12.059721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:53:12.059984Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:53:12.060046Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:53:12.066935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:53:12.067088Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:53:12.067331Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:53:12.067417Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:53:12.067465Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:53:12.067502Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:53:12.074915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:53:12.074993Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:53:12.075067Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:53:12.082927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:53:12.083012Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:53:12.083062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:53:12.083135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:53:12.097812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:53:12.110856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:53:12.111091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:53:12.112147Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:53:12.112329Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:53:12.112395Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:53:12.112709Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:53:12.112778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:53:12.112982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:53:12.113075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:53:12.119073Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:53:12.119135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:53:12.119345Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:53:12.119410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... lMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'28))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-05-07T08:53:20.370530Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 268830210, Sender [1:2057:3920], Recipient [1:760:2649]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'29))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-05-07T08:53:20.380230Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 268830210, Sender [1:2058:3921], Recipient [1:760:2649]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'30))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-05-07T08:53:20.389630Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 268830210, Sender [1:2059:3922], Recipient [1:760:2649]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'31))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-05-07T08:53:20.402718Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 268830210, Sender [1:2060:3923], Recipient [1:760:2649]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'32))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-05-07T08:53:20.414548Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 268830210, Sender [1:2061:3924], Recipient [1:760:2649]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'33))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-05-07T08:53:20.423434Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 268830210, Sender [1:2062:3925], Recipient [1:760:2649]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'34))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-05-07T08:53:20.436361Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 268830210, Sender [1:2063:3926], Recipient [1:760:2649]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'35))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-05-07T08:53:20.448480Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 268830210, Sender [1:2064:3927], Recipient [1:760:2649]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'36))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-05-07T08:53:20.457112Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 268830210, Sender [1:2065:3928], Recipient [1:760:2649]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'37))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-05-07T08:53:20.465541Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 268830210, Sender [1:2066:3929], Recipient [1:760:2649]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'38))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-05-07T08:53:20.478298Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 268830210, Sender [1:2067:3930], Recipient [1:760:2649]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'39))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-05-07T08:53:20.487758Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 268830210, Sender [1:2068:3931], Recipient [1:760:2649]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'40))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-05-07T08:53:20.496566Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 268830210, Sender [1:2069:3932], Recipient [1:760:2649]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'41))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-05-07T08:53:20.509671Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 268830210, Sender [1:2070:3933], Recipient [1:760:2649]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'42))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-05-07T08:53:20.519519Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 268830210, Sender [1:2071:3934], Recipient [1:760:2649]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'43))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-05-07T08:53:20.528659Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 268830210, Sender [1:2072:3935], Recipient [1:760:2649]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'44))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-05-07T08:53:20.538680Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 268830210, Sender [1:2073:3936], Recipient [1:760:2649]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'45))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-05-07T08:53:20.554588Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 268830210, Sender [1:2074:3937], Recipient [1:760:2649]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'46))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-05-07T08:53:20.563600Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 268830210, Sender [1:2075:3938], Recipient [1:760:2649]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'47))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-05-07T08:53:20.572857Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 268830210, Sender [1:2076:3939], Recipient [1:760:2649]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'48))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-05-07T08:53:20.583594Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 268830210, Sender [1:2077:3940], Recipient [1:760:2649]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'49))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-05-07T08:53:20.592750Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 268830210, Sender [1:2078:3941], Recipient [1:760:2649]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'50))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } >> TYardTest::TestLogLatency [GOOD] >> TYardTest::TestMultiYardFirstRecordToKeep >> TColumnShardTestReadWrite::CompactionInGranule_PKInt64 [GOOD] >> KqpPg::LongDomainName [GOOD] >> TGRpcStreamingTest::ReadFinish ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_column_build/unittest >> ColumnBuildTest::CancelBuild [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:53:15.613545Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:53:15.613651Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:53:15.613703Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:53:15.613738Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:53:15.613780Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:53:15.613832Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:53:15.613883Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:53:15.613946Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:53:15.614699Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:53:15.615074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:53:15.711738Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:53:15.711801Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:53:15.731754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:53:15.731989Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:53:15.732205Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:53:15.738514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:53:15.738875Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:53:15.739551Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:53:15.739739Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:53:15.743021Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:53:15.744403Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:53:15.744531Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:53:15.744609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:53:15.744655Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:53:15.744695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:53:15.744909Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:53:15.752607Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:53:15.878180Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:53:15.878472Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:53:15.878709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:53:15.878945Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:53:15.879001Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:53:15.886957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:53:15.887098Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:53:15.887328Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:53:15.887392Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:53:15.887434Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:53:15.887467Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:53:15.894897Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:53:15.894998Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:53:15.895042Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:53:15.902858Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:53:15.902924Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:53:15.902974Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:53:15.903040Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:53:15.906817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:53:15.914813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:53:15.915042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:53:15.916081Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:53:15.916224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:53:15.916285Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:53:15.916581Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:53:15.916639Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:53:15.916811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:53:15.916890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:53:15.927084Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:53:15.927144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:53:15.927370Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:53:15.927412Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... ARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710761, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Add transaction: 281474976710761 at step: 5000007 FAKE_COORDINATOR: advance: minStep5000007 State->FrontStep: 5000006 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710761 at step: 5000007 2025-05-07T08:53:20.932325Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000007, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:53:20.932476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710761 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000007 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:53:20.932536Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_lock.cpp:44: [72057594046678944] TDropLock TPropose opId# 281474976710761:0 HandleReply TEvOperationPlan: step# 5000007 2025-05-07T08:53:20.932582Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976710761:0 128 -> 240 2025-05-07T08:53:20.936435Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710761:0, at schemeshard: 72057594046678944 2025-05-07T08:53:20.936516Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 281474976710761:0 ProgressState 2025-05-07T08:53:20.936627Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710761:0 progress is 1/1 2025-05-07T08:53:20.936661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-05-07T08:53:20.936701Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710761:0 progress is 1/1 2025-05-07T08:53:20.936729Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-05-07T08:53:20.936765Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976710761, ready parts: 1/1, is published: true 2025-05-07T08:53:20.936884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:124:2150] message: TxId: 281474976710761 2025-05-07T08:53:20.936942Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-05-07T08:53:20.936987Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976710761:0 2025-05-07T08:53:20.937028Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976710761:0 2025-05-07T08:53:20.937123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 12 FAKE_COORDINATOR: Erasing txId 281474976710761 2025-05-07T08:53:20.942182Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6706: Handle: TEvNotifyTxCompletionResult: txId# 281474976710761 2025-05-07T08:53:20.942335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6708: Message: TxId: 281474976710761 2025-05-07T08:53:20.942439Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:2321: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, txId# 281474976710761, buildInfoId: 102 2025-05-07T08:53:20.942556Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:2324: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, txId# 281474976710761, buildInfo: TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Cancellation_Unlocking, IsCancellationRequested: 1, Issue: , SubscribersCount: 1, CreateSender: [1:1169:3023], AlterMainTableTxId: 281474976710757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976710758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976710760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-05-07T08:53:20.945695Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1105: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Cancellation_Unlocking TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Cancellation_Unlocking, IsCancellationRequested: 1, Issue: , SubscribersCount: 1, CreateSender: [1:1169:3023], AlterMainTableTxId: 281474976710757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976710758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976710760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-05-07T08:53:20.945804Z node 1 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:25: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Cancellation_Unlocking to Cancelled 2025-05-07T08:53:20.948837Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1105: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Cancelled TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Cancelled, IsCancellationRequested: 1, Issue: , SubscribersCount: 1, CreateSender: [1:1169:3023], AlterMainTableTxId: 281474976710757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976710758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976710760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-05-07T08:53:20.948937Z node 1 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:325: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 102, subscribers count# 1 2025-05-07T08:53:20.949142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T08:53:20.949196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:1193:3047] TestWaitNotification: OK eventTxId 102 2025-05-07T08:53:20.951879Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__get.cpp:19: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: DoExecute DatabaseName: "/MyRoot" IndexBuildId: 102 2025-05-07T08:53:20.952232Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:93: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: Reply Status: SUCCESS IndexBuild { Id: 102 State: STATE_CANCELLED Settings { source_path: "/MyRoot/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "DefaultValue" default_from_literal { type { type_id: UINT64 } value { uint64_value: 10 } } } } ScanSettings { MaxBatchRows: 1 } } Progress: 0 } BUILDINDEX RESPONSE Get: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 102 State: STATE_CANCELLED Settings { source_path: "/MyRoot/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "DefaultValue" default_from_literal { type { type_id: UINT64 } value { uint64_value: 10 } } } } ScanSettings { MaxBatchRows: 1 } } Progress: 0 } 2025-05-07T08:53:20.954931Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:53:20.955252Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table" took 355us result status StatusSuccess 2025-05-07T08:53:20.955795Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table" PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 4 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "index" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } Columns { Name: "DefaultValue" Type: "Uint64" TypeId: 4 Id: 4 NotNull: false DefaultFromLiteral { type { type_id: UINT64 } value { uint64_value: 10 } } IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 4 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 10 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 10 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpVectorIndexes::OrderByCosineDistanceNotNullableLevel3 [GOOD] Test command err: Trying to start YDB, gRPC: 17263, MsgBus: 5593 2025-05-07T08:52:28.834788Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624017051823311:2266];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:28.836822Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003c2e/r3tmp/tmpFZEwIt/pdisk_1.dat 2025-05-07T08:52:29.463983Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:52:29.464123Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:52:29.465896Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:52:29.520332Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17263, node 1 2025-05-07T08:52:29.605650Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T08:52:29.606194Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T08:52:29.714489Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:52:29.714510Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:52:29.714517Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:52:29.714640Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5593 TClient is connected to server localhost:5593 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:52:30.519814Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:30.539256Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:52:30.546497Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:30.743854Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:30.927030Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:31.029137Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:32.983222Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624034231693956:2407], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:32.983339Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:33.336839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:52:33.374140Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:52:33.412668Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:52:33.470846Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:52:33.505380Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:52:33.553248Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:52:33.598557Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:52:33.690699Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624038526661915:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:33.690778Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:33.691014Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624038526661920:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:33.695264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:52:33.707875Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624038526661922:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:52:33.817756Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501624038526661973:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:52:33.834057Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501624017051823311:2266];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:33.834160Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:52:35.061095Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:7501624047116596847:3606], Recipient [1:7501624021346790848:2201]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:52:35.061137Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:52:35.061155Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T08:52:35.061199Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122432, Sender [1:7501624047116596843:3603], Recipient [1:7501624021346790848:2201]: {TEvModifySchemeTransaction txid# 281474976710672 TabletId# 72057594046644480} 2025-05-07T08:52:35.061213Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4851: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-05-07T08:52:35.109718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "TestTable" Columns { Name: "pk" Type: "Int64" NotNull: false } Columns { Name: "emb" Type: "String" NotNull: false } Columns { Name: "data" Type: "String" NotNull: false } KeyColumnNames: "pk" PartitionConfig { PartitioningPolicy { MinPartitionsCount: 3 } ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } SplitBoundary { KeyPrefix { Tuple { Optional { Int64: 4 } } } } SplitBoundary { KeyPrefix { Tuple { Optional { Int64: 6 } } } } Temporary: false } CreateIndexedTable { } } TxId: 281474976710672 TabletId: 72057594046644480 PeerName: "ipv6:[::1]:40726" , at schemeshard: 72057594046644480 2025-05-07T08:52:35.110143Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /Root/TestTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-05-07T08:52:35.110312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:433: TCreateTable Propose, path: /Root/TestTable, opId: 281474976710672:0, schema: Name: "TestTable" Columns { Name: "pk" Type: "Int64" NotNull: false } Columns { Name: "emb" Type: "String" NotNull: false } Columns { Name: "data" Type: "String" NotNull: false } KeyColumnNames: "pk" PartitionConfig { PartitioningPolicy { MinPartition ... 0: StateWork, received event# 2146435072, Sender [2:7501624197439269577:2145], Recipient [2:7501624197439269577:2145]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-05-07T08:53:19.488174Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4857: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-05-07T08:53:19.488213Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976715772:0, at schemeshard: 72057594046644480 2025-05-07T08:53:19.488230Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046644480] TDone opId# 281474976715772:0 ProgressState 2025-05-07T08:53:19.488299Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T08:53:19.488313Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715772:0 progress is 1/1 2025-05-07T08:53:19.488331Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715772 ready parts: 1/1 2025-05-07T08:53:19.488350Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715772:0 progress is 1/1 2025-05-07T08:53:19.488358Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715772 ready parts: 1/1 2025-05-07T08:53:19.488373Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976715772, ready parts: 1/1, is published: true 2025-05-07T08:53:19.488406Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:7501624197439269577:2145] message: TxId: 281474976715772 2025-05-07T08:53:19.488426Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715772 ready parts: 1/1 2025-05-07T08:53:19.488438Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715772:0 2025-05-07T08:53:19.488445Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976715772:0 2025-05-07T08:53:19.488481Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 17] was 6 2025-05-07T08:53:19.489001Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:53:19.489049Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [2:7501624197439269577:2145] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715772 at schemeshard: 72057594046644480 2025-05-07T08:53:19.489147Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124998, Sender [2:7501624197439269577:2145], Recipient [2:7501624197439269577:2145]: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715772 2025-05-07T08:53:19.489164Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4997: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletionResult 2025-05-07T08:53:19.489181Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6706: Handle: TEvNotifyTxCompletionResult: txId# 281474976715772 2025-05-07T08:53:19.489198Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6708: Message: TxId: 281474976715772 2025-05-07T08:53:19.489241Z node 2 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:2321: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, txId# 281474976715772, buildInfoId: 281474976710674 2025-05-07T08:53:19.489302Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:2324: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, txId# 281474976715772, buildInfo: TBuildInfo{ IndexBuildId: 281474976710674, Uid: , DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1], TablePathId: [OwnerId: 72057594046644480, LocalPathId: 17], IndexType: EIndexTypeGlobalVectorKmeansTree, IndexName: index, IndexColumn: emb, State: Unlocking, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [2:7501624231799010487:2542], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976715757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976715758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976715768, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976715772, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 40, upload bytes: 1049, read rows: 43, read bytes: 966 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-05-07T08:53:19.489357Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T08:53:19.489666Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:53:19.489776Z node 2 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1105: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 281474976710674 Unlocking TBuildInfo{ IndexBuildId: 281474976710674, Uid: , DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1], TablePathId: [OwnerId: 72057594046644480, LocalPathId: 17], IndexType: EIndexTypeGlobalVectorKmeansTree, IndexName: index, IndexColumn: emb, State: Unlocking, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [2:7501624231799010487:2542], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976715757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976715758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976715768, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976715772, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 40, upload bytes: 1049, read rows: 43, read bytes: 966 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-05-07T08:53:19.489808Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T08:53:19.489823Z node 2 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:25: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Unlocking to Done 2025-05-07T08:53:19.490018Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:53:19.490098Z node 2 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1105: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 281474976710674 Done TBuildInfo{ IndexBuildId: 281474976710674, Uid: , DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1], TablePathId: [OwnerId: 72057594046644480, LocalPathId: 17], IndexType: EIndexTypeGlobalVectorKmeansTree, IndexName: index, IndexColumn: emb, State: Done, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [2:7501624231799010487:2542], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976715757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976715758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976715768, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976715772, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 40, upload bytes: 1049, read rows: 43, read bytes: 966 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-05-07T08:53:19.490113Z node 2 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:325: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 281474976710674, subscribers count# 1 2025-05-07T08:53:19.490131Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T08:53:19.490156Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:53:19.490196Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [2:7501624231799010487:2542] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710674 at schemeshard: 72057594046644480 2025-05-07T08:53:19.490651Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 274792450, Sender [2:7501624231799010487:2542], Recipient [2:7501624197439269577:2145]: NKikimrIndexBuilder.TEvGetRequest DatabaseName: "/Root" IndexBuildId: 281474976710674 2025-05-07T08:53:19.490671Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4974: StateWork, processing event TEvIndexBuilder::TEvGetRequest 2025-05-07T08:53:19.490743Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__get.cpp:19: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: DoExecute DatabaseName: "/Root" IndexBuildId: 281474976710674 2025-05-07T08:53:19.490934Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:93: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: Reply Status: SUCCESS IndexBuild { Id: 281474976710674 State: STATE_DONE Settings { source_path: "/Root/TestTable" index { name: "index" index_columns: "emb" global_vector_kmeans_tree_index { } } max_shards_in_flight: 32 ScanSettings { } } Progress: 100 } 2025-05-07T08:53:19.490950Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T08:53:19.491000Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:53:19.491090Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [2:7501624231799010487:2542] msg type: 274792451 msg: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 281474976710674 State: STATE_DONE Settings { source_path: "/Root/TestTable" index { name: "index" index_columns: "emb" global_vector_kmeans_tree_index { } } max_shards_in_flight: 32 ScanSettings { } } Progress: 100 } at schemeshard: 72057594046644480 2025-05-07T08:53:19.496798Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877764, Sender [2:7501624231799010490:3728], Recipient [2:7501624197439269577:2145]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:53:19.496835Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4938: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:53:19.496847Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5761: Server pipe is reset, at schemeshard: 72057594046644480 2025-05-07T08:53:19.512056Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122945, Sender [2:7501624236093978775:4491], Recipient [2:7501624197439269577:2145]: NKikimrSchemeOp.TDescribePath Path: "/Root/TestTable" Options { ShowPrivateTable: false } 2025-05-07T08:53:19.512101Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4852: StateWork, processing event TEvSchemeShard::TEvDescribeScheme >> KqpPg::CheckPgAutoParams-useSink [GOOD] >> TYardTest::TestMultiYardFirstRecordToKeep [GOOD] >> TYardTest::TestLogOverwriteRestarts |90.0%| [TA] $(B)/ydb/core/tx/schemeshard/ut_column_build/test-results/unittest/{meta.json ... results_accumulator.log} |90.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_streaming/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/pg/unittest >> KqpPg::DeleteWithQueryService-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 3796, MsgBus: 14601 2025-05-07T08:50:53.224469Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623610689786264:2203];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:53.226479Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002091/r3tmp/tmpDU8yEz/pdisk_1.dat 2025-05-07T08:50:53.843182Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:53.857959Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:53.861186Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:53.864324Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3796, node 1 2025-05-07T08:50:54.154039Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:54.154070Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:54.154113Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:54.170174Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14601 TClient is connected to server localhost:14601 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:50:54.969721Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:55.001311Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:50:57.356268Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623627869655964:2332], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:57.356392Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:57.411394Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T08:50:57.547393Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623627869656100:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:57.547483Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:57.547742Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623627869656105:2345], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:57.550748Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480 2025-05-07T08:50:57.559101Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480 2025-05-07T08:50:57.559404Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623627869656107:2346], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-05-07T08:50:57.617079Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623627869656158:2416] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 1 1 2025-05-07T08:50:58.212114Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623610689786264:2203];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:58.212193Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 1 Trying to start YDB, gRPC: 22515, MsgBus: 20091 2025-05-07T08:50:59.335065Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501623634235563929:2205];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002091/r3tmp/tmpA0x1Cs/pdisk_1.dat 2025-05-07T08:50:59.432317Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:50:59.464748Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:59.485895Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:59.486002Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:59.488891Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22515, node 2 2025-05-07T08:50:59.538529Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:59.538562Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:59.538570Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:59.538712Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20091 TClient is connected to server localhost:20091 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:51:00.053632Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:00.073021Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T08:51:02.758120Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501623647120466321:2331], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:02.758216Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:02.770525Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T08:51:02.869813Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501623647120466457:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:02.869888Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:02.870249Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501623647120466462:2344], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have acces ... T08:52:19.913149Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:52:19.923568Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29108, node 11 2025-05-07T08:52:20.230817Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:52:20.230843Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:52:20.230858Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:52:20.231051Z node 11 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21590 TClient is connected to server localhost:21590 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:52:22.008216Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:24.306164Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[11:7501623979597827308:2078];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:24.306272Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:52:28.722187Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7501624018252533621:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:28.722342Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:28.761528Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T08:52:28.903649Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7501624018252533725:2352], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:28.903783Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:28.904341Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7501624018252533730:2355], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:28.909793Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480 2025-05-07T08:52:28.958708Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [11:7501624018252533732:2356], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-05-07T08:52:29.047367Z node 11 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [11:7501624022547501079:2408] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 5502, MsgBus: 16085 2025-05-07T08:52:31.623996Z node 12 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[12:7501624029652870908:2125];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:31.624069Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002091/r3tmp/tmpj4RYdU/pdisk_1.dat 2025-05-07T08:52:32.153699Z node 12 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:52:32.181881Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:52:32.183657Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:52:32.187463Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5502, node 12 2025-05-07T08:52:32.406846Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:52:32.406877Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:52:32.406891Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:52:32.407085Z node 12 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16085 TClient is connected to server localhost:16085 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:52:34.004062Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:36.626358Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[12:7501624029652870908:2125];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:36.626493Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:52:40.041763Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7501624068307577188:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:40.042070Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:40.056213Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T08:52:40.192979Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7501624068307577294:2350], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:40.193142Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:40.193459Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7501624068307577299:2353], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:40.200165Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480 2025-05-07T08:52:40.223986Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [12:7501624068307577301:2354], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-05-07T08:52:40.289655Z node 12 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [12:7501624068307577352:2406] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } |90.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/src/client/federated_topic/ut/ydb-public-sdk-cpp-src-client-federated_topic-ut >> TAsyncIndexTests::MergeIndexWithReboots[TabletReboots] [GOOD] |90.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/public/sdk/cpp/src/client/federated_topic/ut/ydb-public-sdk-cpp-src-client-federated_topic-ut |90.0%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_column_build/test-results/unittest/{meta.json ... results_accumulator.log} |90.0%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/federated_topic/ut/ydb-public-sdk-cpp-src-client-federated_topic-ut |90.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_streaming/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/pg/unittest >> KqpPg::LongDomainName [GOOD] Test command err: Trying to start YDB, gRPC: 19519, MsgBus: 1370 2025-05-07T08:50:52.681530Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623605655579300:2270];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:52.681590Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0020dd/r3tmp/tmps9Hgu5/pdisk_1.dat 2025-05-07T08:50:53.378728Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:53.380049Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:53.380153Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:53.384671Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19519, node 1 2025-05-07T08:50:53.589553Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:53.589583Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:53.589599Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:53.589671Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1370 TClient is connected to server localhost:1370 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:50:54.723998Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:54.755206Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:50:57.170892Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623627130416236:2335], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:57.171000Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623627130416228:2332], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:57.171071Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:57.175686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480 2025-05-07T08:50:57.190182Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-05-07T08:50:57.190448Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623627130416242:2336], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-05-07T08:50:57.268021Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623627130416293:2336] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:50:57.681958Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623605655579300:2270];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:57.682076Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 29429, MsgBus: 26098 2025-05-07T08:50:58.289709Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501623632381894655:2064];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:58.289742Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0020dd/r3tmp/tmpYSi4or/pdisk_1.dat 2025-05-07T08:50:58.428922Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:58.445371Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:58.445442Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:58.446757Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29429, node 2 2025-05-07T08:50:58.574736Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:58.574758Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:58.574764Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:58.574857Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26098 TClient is connected to server localhost:26098 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:50:59.183136Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:59.190906Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T08:51:02.178634Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501623649561764481:2331], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:02.178700Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501623649561764493:2334], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:02.178753Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:02.184663Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T08:51:02.196817Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7501623649561764495:2335], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T08:51:02.249474Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501623649561764546:2331] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 5174, MsgBus: 12827 2025-05-07T08:51:03.113110Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7501623653122028045:2064];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:03.113164Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0020dd/r3tmp/tmpeJ33DB/pdisk_1.dat 2025-05-07T08:51:03.312117Z node 3 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:51:03.327480Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, ... at schemeshard: 72057594046644480 2025-05-07T08:53:05.125655Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7501624156491437333:2069];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:05.125748Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:53:07.225708Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7501624186556209068:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:07.225810Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7501624186556209042:2335], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:07.225911Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:07.231092Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480 2025-05-07T08:53:07.265449Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7501624186556209078:2339], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-05-07T08:53:07.335644Z node 10 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [10:7501624186556209132:2344] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:53:07.378912Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 {"Plan":{"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["pgbench_accounts"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","E-Size":"No estimate","ReadRanges":["aid (null, 3)","aid [7, 7]"],"Name":"TableRangeScan","Inputs":[],"Path":"\/Root\/pgbench_accounts","E-Rows":"No estimate","Table":"pgbench_accounts","ReadRangesKeys":["aid"],"ReadColumns":["abalance"],"E-Cost":"No estimate","ReadRangesExpectedSize":2}],"Node Type":"TableRangeScan"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Operators":[{"Inputs":[{"ExternalPlanNodeId":2}],"SortBy":"input.abalance","Name":"Sort"}],"Node Type":"Sort"}],"Node Type":"ResultSet_1","PlanNodeType":"ResultSet"}],"Node Type":"Query","Stats":{"ResourcePoolId":"default"},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/pgbench_accounts","reads":[{"columns":["abalance"],"scan_by":["aid (null, 3)","aid [7, 7]"],"type":"Scan"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":2,"Plans":[{"PlanNodeId":4,"Operators":[{"Scan":"Parallel","E-Size":"No estimate","ReadRanges":["aid (null, 3)","aid [7, 7]"],"Name":"TableRangeScan","Path":"\/Root\/pgbench_accounts","E-Rows":"No estimate","Table":"pgbench_accounts","ReadRangesKeys":["aid"],"ReadColumns":["abalance"],"E-Cost":"No estimate","ReadRangesExpectedSize":2}],"Node Type":"TableRangeScan"}],"Operators":[{"SortBy":"input.abalance","Name":"Sort"}],"Node Type":"Sort"}],"Node Type":"ResultSet_1","PlanNodeType":"ResultSet"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} {"Plan":{"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["pgbench_accounts"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","ReadRange":["aid (4, 3)"],"E-Size":"No estimate","Name":"TableRangeScan","Inputs":[],"Path":"\/Root\/pgbench_accounts","E-Rows":"No estimate","Table":"pgbench_accounts","ReadColumns":["abalance"],"E-Cost":"No estimate"}],"Node Type":"TableRangeScan"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Node Type":"Collect"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","Stats":{"ResourcePoolId":"default"},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/pgbench_accounts","reads":[{"columns":["abalance"],"scan_by":["aid (4, 3)"],"type":"Scan"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":4,"Operators":[{"Scan":"Parallel","ReadRange":["aid (4, 3)"],"E-Size":"No estimate","Name":"TableRangeScan","Path":"\/Root\/pgbench_accounts","E-Rows":"No estimate","Table":"pgbench_accounts","ReadColumns":["abalance"],"E-Cost":"No estimate"}],"Node Type":"TableRangeScan"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} Trying to start YDB, gRPC: 6222, MsgBus: 21726 2025-05-07T08:53:12.124749Z node 11 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[11:7501624207069678129:2063];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:12.125546Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0020dd/r3tmp/tmpp1xpkd/pdisk_1.dat 2025-05-07T08:53:12.440739Z node 11 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:53:12.488557Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:53:12.488691Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:53:12.492487Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6222, node 11 2025-05-07T08:53:12.686833Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:53:12.686865Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:53:12.686878Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:53:12.687058Z node 11 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21726 TClient is connected to server localhost:21726 WaitRootIsUp 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'... TClient::Ls request: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_D... (TRUNCATED) WaitRootIsUp 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' success. 2025-05-07T08:53:13.958366Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:53:17.126218Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[11:7501624207069678129:2063];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:17.137595Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:53:20.783729Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7501624241429417148:2338], DatabaseId: /aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:20.783728Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7501624241429417173:2341], DatabaseId: /aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:20.783875Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:20.790488Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T08:53:20.830243Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [11:7501624241429417177:2342], DatabaseId: /aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T08:53:20.924766Z node 11 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [11:7501624241429417228:2347] txid# 281474976715659, issues: { message: "Check failed: path: \'/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:53:20.973118Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 >> TGRpcStreamingTest::SimpleEcho >> TGRpcStreamingTest::ClientNeverWrites ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionInGranule_PKInt64 [GOOD] Test command err: 2025-05-07T08:50:16.625551Z node 1 :BLOB_CACHE NOTICE: ctor_logger.h:56: MaxCacheDataSize: 20971520 InFlightDataSize: 0 2025-05-07T08:50:16.744530Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-05-07T08:50:16.788970Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-05-07T08:50:16.789280Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-05-07T08:50:16.797651Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-05-07T08:50:16.797958Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-05-07T08:50:16.798321Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-05-07T08:50:16.798526Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-05-07T08:50:16.798655Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-05-07T08:50:16.798785Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-05-07T08:50:16.798929Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-05-07T08:50:16.799066Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-05-07T08:50:16.799195Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-05-07T08:50:16.799345Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-05-07T08:50:16.799473Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-05-07T08:50:16.799604Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-05-07T08:50:16.830838Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-05-07T08:50:16.831052Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:137;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=11;current_normalizer=CLASS_NAME=Granules; 2025-05-07T08:50:16.831112Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-05-07T08:50:16.831336Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-05-07T08:50:16.831581Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-05-07T08:50:16.831671Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-05-07T08:50:16.831723Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-05-07T08:50:16.831817Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-05-07T08:50:16.831904Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-05-07T08:50:16.831965Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-05-07T08:50:16.832002Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-05-07T08:50:16.832193Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-05-07T08:50:16.832259Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-05-07T08:50:16.832305Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-05-07T08:50:16.832336Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-05-07T08:50:16.832426Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-05-07T08:50:16.832479Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-05-07T08:50:16.832523Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanInsertionDedup;id=CleanInsertionDedup; 2025-05-07T08:50:16.832570Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=8;type=CleanInsertionDedup; 2025-05-07T08:50:16.832667Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanInsertionDedup;id=8; 2025-05-07T08:50:16.832722Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-05-07T08:50:16.832775Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-05-07T08:50:16.832849Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-05-07T08:50:16.832892Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-05-07T08:50:16.832946Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-05-07T08:50:16.833204Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-05-07T08:50:16.833258Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-05-07T08:50:16.833303Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-05-07T08:50:16.833518Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-05-07T08:50:16.833566Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-05-07T08:50:16.833598Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-05-07T08:50:16.833792Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-05-07T08:50:16.833835Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-05-07T08:50:16.833867Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-05-07T08:50:16.834930Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-05-07T08:50:16.835076Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-05-07T08:50:16.835140Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-05-07T08:50:16.835180Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-05-07T08:50:16.835652Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=61; 2025-05-07T08:50:16.835779Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=44; ... ;column_id:8;chunk_idx:42;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:43;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:44;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:45;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:46;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:47;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:48;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:49;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:50;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:51;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:52;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:53;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:54;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:55;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:56;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:57;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:58;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:59;blob_range:[NO_BLOB:0:2688];;column_id:8;chunk_idx:60;blob_range:[NO_BLOB:0:2688];;column_id:8;chunk_idx:61;blob_range:[NO_BLOB:0:2688];;column_id:8;chunk_idx:62;blob_range:[NO_BLOB:0:2688];;column_id:8;chunk_idx:63;blob_range:[NO_BLOB:0:2688];;column_id:8;chunk_idx:64;blob_range:[NO_BLOB:0:2688];;column_id:8;chunk_idx:65;blob_range:[NO_BLOB:0:2688];;column_id:8;chunk_idx:66;blob_range:[NO_BLOB:0:2688];;column_id:8;chunk_idx:67;blob_range:[NO_BLOB:0:2688];;column_id:8;chunk_idx:68;blob_range:[NO_BLOB:0:2680];;column_id:8;chunk_idx:69;blob_range:[NO_BLOB:0:2680];;column_id:8;chunk_idx:70;blob_range:[NO_BLOB:0:2680];;column_id:8;chunk_idx:71;blob_range:[NO_BLOB:0:2672];;column_id:8;chunk_idx:72;blob_range:[NO_BLOB:0:2664];;column_id:8;chunk_idx:73;blob_range:[NO_BLOB:0:8448];;column_id:9;chunk_idx:0;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:1;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:2;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:3;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:4;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:5;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:6;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:7;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:8;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:9;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:10;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:11;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:12;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:13;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:14;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:15;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:16;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:17;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:18;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:19;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:20;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:21;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:22;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:23;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:24;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:25;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:26;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:27;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:28;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:29;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:30;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:31;blob_range:[NO_BLOB:0:2680];;column_id:9;chunk_idx:32;blob_range:[NO_BLOB:0:2680];;column_id:9;chunk_idx:33;blob_range:[NO_BLOB:0:2680];;column_id:9;chunk_idx:34;blob_range:[NO_BLOB:0:2672];;column_id:9;chunk_idx:35;blob_range:[NO_BLOB:0:2664];;column_id:9;chunk_idx:36;blob_range:[NO_BLOB:0:8464];;column_id:9;chunk_idx:37;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:38;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:39;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:40;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:41;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:42;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:43;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:44;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:45;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:46;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:47;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:48;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:49;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:50;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:51;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:52;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:53;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:54;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:55;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:56;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:57;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:58;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:59;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:60;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:61;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:62;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:63;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:64;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:65;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:66;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:67;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:68;blob_range:[NO_BLOB:0:2680];;column_id:9;chunk_idx:69;blob_range:[NO_BLOB:0:2680];;column_id:9;chunk_idx:70;blob_range:[NO_BLOB:0:2680];;column_id:9;chunk_idx:71;blob_range:[NO_BLOB:0:2672];;column_id:9;chunk_idx:72;blob_range:[NO_BLOB:0:2664];;column_id:9;chunk_idx:73;blob_range:[NO_BLOB:0:8448];;column_id:7;chunk_idx:0;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:1;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:2;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:3;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:4;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:5;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:6;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:7;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:8;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:9;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:10;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:11;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:12;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:13;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:14;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:15;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:16;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:17;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:18;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:19;blob_range:[NO_BLOB:0:2752];;column_id:7;chunk_idx:20;blob_range:[NO_BLOB:0:2752];;column_id:7;chunk_idx:21;blob_range:[NO_BLOB:0:2752];;column_id:7;chunk_idx:22;blob_range:[NO_BLOB:0:2752];;column_id:7;chunk_idx:23;blob_range:[NO_BLOB:0:2752];;column_id:7;chunk_idx:24;blob_range:[NO_BLOB:0:2744];;column_id:7;chunk_idx:25;blob_range:[NO_BLOB:0:2744];;column_id:7;chunk_idx:26;blob_range:[NO_BLOB:0:9040];;column_id:7;chunk_idx:27;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:28;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:29;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:30;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:31;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:32;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:33;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:34;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:35;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:36;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:37;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:38;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:39;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:40;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:41;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:42;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:43;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:44;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:45;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:46;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:47;blob_range:[NO_BLOB:0:2752];;column_id:7;chunk_idx:48;blob_range:[NO_BLOB:0:2752];;column_id:7;chunk_idx:49;blob_range:[NO_BLOB:0:2752];;column_id:7;chunk_idx:50;blob_range:[NO_BLOB:0:2752];;column_id:7;chunk_idx:51;blob_range:[NO_BLOB:0:2744];;column_id:7;chunk_idx:52;blob_range:[NO_BLOB:0:2744];;column_id:7;chunk_idx:53;blob_range:[NO_BLOB:0:9024];;column_id:5;chunk_idx:0;blob_range:[NO_BLOB:0:2696];;column_id:5;chunk_idx:1;blob_range:[NO_BLOB:0:2696];;column_id:5;chunk_idx:2;blob_range:[NO_BLOB:0:2696];;column_id:5;chunk_idx:3;blob_range:[NO_BLOB:0:2696];;column_id:5;chunk_idx:4;blob_range:[NO_BLOB:0:2696];;column_id:5;chunk_idx:5;blob_range:[NO_BLOB:0:2696];;column_id:5;chunk_idx:6;blob_range:[NO_BLOB:0:2696];;column_id:5;chunk_idx:7;blob_range:[NO_BLOB:0:2696];;column_id:5;chunk_idx:8;blob_range:[NO_BLOB:0:2688];;column_id:5;chunk_idx:9;blob_range:[NO_BLOB:0:2688];;column_id:5;chunk_idx:10;blob_range:[NO_BLOB:0:2688];;column_id:5;chunk_idx:11;blob_range:[NO_BLOB:0:2688];;column_id:5;chunk_idx:12;blob_range:[NO_BLOB:0:2688];;column_id:5;chunk_idx:13;blob_range:[NO_BLOB:0:2680];;column_id:5;chunk_idx:14;blob_range:[NO_BLOB:0:2680];;column_id:5;chunk_idx:15;blob_range:[NO_BLOB:0:2672];;column_id:5;chunk_idx:16;blob_range:[NO_BLOB:0:9456];;column_id:5;chunk_idx:17;blob_range:[NO_BLOB:0:2696];;column_id:5;chunk_idx:18;blob_range:[NO_BLOB:0:2696];;column_id:5;chunk_idx:19;blob_range:[NO_BLOB:0:2696];;column_id:5;chunk_idx:20;blob_range:[NO_BLOB:0:2696];;column_id:5;chunk_idx:21;blob_range:[NO_BLOB:0:2696];;column_id:5;chunk_idx:22;blob_range:[NO_BLOB:0:2696];;column_id:5;chunk_idx:23;blob_range:[NO_BLOB:0:2696];;column_id:5;chunk_idx:24;blob_range:[NO_BLOB:0:2696];;column_id:5;chunk_idx:25;blob_range:[NO_BLOB:0:2688];;column_id:5;chunk_idx:26;blob_range:[NO_BLOB:0:2688];;column_id:5;chunk_idx:27;blob_range:[NO_BLOB:0:2688];;column_id:5;chunk_idx:28;blob_range:[NO_BLOB:0:2688];;column_id:5;chunk_idx:29;blob_range:[NO_BLOB:0:2688];;column_id:5;chunk_idx:30;blob_range:[NO_BLOB:0:2680];;column_id:5;chunk_idx:31;blob_range:[NO_BLOB:0:2680];;column_id:5;chunk_idx:32;blob_range:[NO_BLOB:0:2672];;column_id:5;chunk_idx:33;blob_range:[NO_BLOB:0:9448];;;;switched=(portion_id:44;path_id:1;records_count:25002;schema_version:1;level:0;;column_size:2167032;index_size:0;meta:((produced=INSERTED;)););(portion_id:48;path_id:1;records_count:25002;schema_version:1;level:0;;column_size:2586528;index_size:0;meta:((produced=SPLIT_COMPACTED;)););(portion_id:49;path_id:1;records_count:25002;schema_version:1;level:0;;column_size:2167032;index_size:0;meta:((produced=INSERTED;)););; 2025-05-07T08:53:21.681729Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: event_type=NKikimr::NBlobCache::TEvBlobCache::TEvReadBlobRangeResult;tablet_id=9437184;parent_id=[1:5507:7499];fline=general_compaction.cpp:135;event=blobs_created;appended=1;switched=3; 2025-05-07T08:53:21.687843Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5507:7499];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:50;event=TEvWriteIndex;count=1; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=write_controller.h:65;event=IWriteController aborted;reason=TTxWriteDraft aborted before complete; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=compacted_blob_constructor.cpp:47;event=TCompactedWriteController::DoAbort;reason=TTxWriteDraft aborted before complete; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TCompactedWriteController destructed with WriteIndexEv and WriteIndexEv->IndexChanges;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; >> TGRpcStreamingTest::ClientDisconnects >> KqpPg::InsertNoTargetColumns_Alter+useSink [GOOD] >> KqpPg::InsertNoTargetColumns_Alter-useSink >> KqpPrefixedVectorIndexes::OrderByCosineSimilarityNotNullableLevel2 [GOOD] |90.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_streaming/ut/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-55 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-56 >> DataShardSnapshots::VolatileSnapshotCleanupOnFinish [GOOD] >> DataShardSnapshots::VolatileSnapshotRenameTimeout >> TGRpcStreamingTest::WritesDoneFromClient >> BasicUsage::MaxByteSizeEqualZero [GOOD] >> BasicUsage::TSimpleWriteSession_AutoSeqNo_BasicUsage >> TGRpcStreamingTest::ReadFinish [GOOD] >> TGRpcStreamingTest::WriteAndFinishWorks ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpPrefixedVectorIndexes::OrderByCosineSimilarityNotNullableLevel2 [GOOD] Test command err: Trying to start YDB, gRPC: 21065, MsgBus: 22114 2025-05-07T08:51:53.391021Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623867887381679:2070];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:53.391910Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003c7e/r3tmp/tmp5yTmK6/pdisk_1.dat 2025-05-07T08:51:54.446064Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:51:54.452087Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:54.452235Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:54.458388Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:51:54.471407Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21065, node 1 2025-05-07T08:51:54.698495Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:51:54.698520Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:51:54.698528Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:51:54.698660Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22114 TClient is connected to server localhost:22114 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:51:55.873358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:55.935331Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:56.202451Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:56.588444Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:56.712306Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:58.395050Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623867887381679:2070];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:58.395131Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:52:00.448069Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623897952154411:2412], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:00.448161Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:00.993626Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:52:01.089677Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:52:01.169576Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:52:01.266155Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:52:01.308773Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:52:01.364839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:52:01.464589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:52:01.610441Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623902247122387:2479], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:01.610540Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:01.610912Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623902247122392:2482], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:01.615238Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:52:01.643750Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623902247122394:2483], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:52:01.754589Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623902247122448:3440] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:52:04.678988Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:7501623915132024639:3626], Recipient [1:7501623872182349389:2195]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:52:04.679034Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:52:04.679046Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T08:52:04.679091Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122432, Sender [1:7501623915132024635:3623], Recipient [1:7501623872182349389:2195]: {TEvModifySchemeTransaction txid# 281474976710672 TabletId# 72057594046644480} 2025-05-07T08:52:04.679108Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4851: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-05-07T08:52:04.772321Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "TestTable" Columns { Name: "pk" Type: "Int64" NotNull: false } Columns { Name: "user" Type: "String" NotNull: false } Columns { Name: "emb" Type: "String" NotNull: false } Columns { Name: "data" Type: "String" NotNull: false } KeyColumnNames: "pk" PartitionConfig { PartitioningPolicy { MinPartitionsCount: 3 } ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } SplitBoundary { KeyPrefix { Tuple { Optional { Int64: 40 } } } } SplitBoundary { KeyPrefix { Tuple { Optional { Int64: 60 } } } } Temporary: false } CreateIndexedTable { } } TxId: 281474976710672 TabletId: 72057594046644480 PeerName: "ipv6:[::1]:37166" , at schemeshard: 72057594046644480 2025-05-07T08:52:04.772792Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /Root/TestTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-05-07T08:52:04.772976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:433: TCreateTable Propose, path: /Root/TestTable, opId: 281474976710672:0, schema: Name: "TestTable" Columns { Name: "pk" Type: "Int64" NotNull: false } Columns { Name: "user" Type: "String" NotNull: false } Columns { Name: "emb" Type: "String" NotNull: false } Columns { Name: "data" Type: "String" NotNull: false } KeyColumnNames: "pk" PartitionConfig { PartitioningPolicy { MinPartitionsCount: 3 } ColumnFamilies { Id: 0 StorageConfig { SysLog { Preferre ... HEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 12 shard idx 72057594046644480:32 data size 1032 row count 5 2025-05-07T08:53:24.815604Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037919 maps to shardIdx: 72057594046644480:32 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 12], pathId map=TuplePrimaryDescending, is column=0, is olap=0, RowCount 5, DataSize 1032 2025-05-07T08:53:24.815614Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037919, followerId 0 2025-05-07T08:53:24.815643Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:32 with partCount# 0, rowCount# 5, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-05-07T08:53:24.815656Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186224037919 2025-05-07T08:53:24.815708Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:53:24.816441Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [2:7501624065979887455:2146]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-05-07T08:53:24.816467Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5015: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-05-07T08:53:24.816483Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:588: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 2025-05-07T08:53:24.918287Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:7501624065979887455:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T08:53:24.918356Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T08:53:24.918411Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [2:7501624065979887455:2146], Recipient [2:7501624065979887455:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T08:53:24.918428Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T08:53:25.835375Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269553162, Sender [2:7501624070274855046:2316], Recipient [2:7501624065979887455:2146]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037889 TableLocalId: 2 Generation: 1 Round: 3 TableStats { DataSize: 800 RowCount: 3 IndexSize: 0 InMemSize: 800 LastAccessTime: 1746607966388 LastUpdateTime: 1746607966388 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 3 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 151 Memory: 137784 } ShardState: 2 UserTablePartOwners: 72075186224037889 NodeId: 2 StartTime: 1746607960564 TableOwnerId: 72057594046644480 FollowerId: 0 2025-05-07T08:53:25.835426Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4877: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-05-07T08:53:25.835468Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:561: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037889 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 2] state 'Ready' dataSize 800 rowCount 3 cpuUsage 0.0151 2025-05-07T08:53:25.835576Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:568: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037889 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 2] raw table stats: DataSize: 800 RowCount: 3 IndexSize: 0 InMemSize: 800 LastAccessTime: 1746607966388 LastUpdateTime: 1746607966388 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 3 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-05-07T08:53:25.835604Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:608: Will delay TTxStoreTableStats on# 0.099996s, queue# 1 2025-05-07T08:53:25.835785Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269553162, Sender [2:7501624070274855040:2315], Recipient [2:7501624065979887455:2146]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037888 TableLocalId: 2 Generation: 1 Round: 3 TableStats { DataSize: 800 RowCount: 3 IndexSize: 0 InMemSize: 800 LastAccessTime: 1746607966388 LastUpdateTime: 1746607966388 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 3 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 65 Memory: 137784 } ShardState: 2 UserTablePartOwners: 72075186224037888 NodeId: 2 StartTime: 1746607960564 TableOwnerId: 72057594046644480 FollowerId: 0 2025-05-07T08:53:25.835801Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4877: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-05-07T08:53:25.835820Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:561: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037888 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 2] state 'Ready' dataSize 800 rowCount 3 cpuUsage 0.0065 2025-05-07T08:53:25.835909Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:568: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037888 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 2] raw table stats: DataSize: 800 RowCount: 3 IndexSize: 0 InMemSize: 800 LastAccessTime: 1746607966388 LastUpdateTime: 1746607966388 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 3 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-05-07T08:53:25.922346Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:7501624065979887455:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T08:53:25.922396Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T08:53:25.922446Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [2:7501624065979887455:2146], Recipient [2:7501624065979887455:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T08:53:25.922465Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T08:53:25.934703Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [2:7501624065979887455:2146]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-05-07T08:53:25.934753Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5015: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-05-07T08:53:25.934779Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:588: Started TEvPersistStats at tablet 72057594046644480, queue size# 2 2025-05-07T08:53:25.934834Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:599: Will execute TTxStoreStats, queue# 2 2025-05-07T08:53:25.934851Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:608: Will delay TTxStoreTableStats on# 0.000748s, queue# 2 2025-05-07T08:53:25.934911Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046644480:2 data size 800 row count 3 2025-05-07T08:53:25.934978Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037889 maps to shardIdx: 72057594046644480:2 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 2], pathId map=TwoShard, is column=0, is olap=0, RowCount 3, DataSize 800 2025-05-07T08:53:25.934995Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037889, followerId 0 2025-05-07T08:53:25.935054Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:2 with partCount# 0, rowCount# 3, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-05-07T08:53:25.935113Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186224037889 2025-05-07T08:53:25.935148Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046644480:1 data size 800 row count 3 2025-05-07T08:53:25.935183Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037888 maps to shardIdx: 72057594046644480:1 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 2], pathId map=TwoShard, is column=0, is olap=0, RowCount 3, DataSize 800 2025-05-07T08:53:25.935194Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037888, followerId 0 2025-05-07T08:53:25.935228Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:1 with partCount# 0, rowCount# 3, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-05-07T08:53:25.935240Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186224037888 2025-05-07T08:53:25.935290Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:53:25.940965Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [2:7501624065979887455:2146]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-05-07T08:53:25.941014Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5015: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-05-07T08:53:25.941031Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:588: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 >> TKeyValueTest::TestInlineWriteReadWithRestartsWithNotCorrectUTF8NewApi >> TGRpcStreamingTest::SimpleEcho [GOOD] >> TGRpcStreamingTest::ClientNeverWrites [GOOD] |90.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/scheme_board/ut_monitoring/ydb-core-tx-scheme_board-ut_monitoring |90.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/scheme_board/ut_monitoring/ydb-core-tx-scheme_board-ut_monitoring ------- [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_streaming/ut/unittest >> TGRpcStreamingTest::ReadFinish [GOOD] Test command err: 2025-05-07T08:53:22.931955Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624250240350369:2137];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:22.932024Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0041a9/r3tmp/tmpBkb0jY/pdisk_1.dat 2025-05-07T08:53:23.522530Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:53:23.543494Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:53:23.543601Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:53:23.548132Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:53:23.752815Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:227: [0x51f00002a080] stream accepted Name# Session ok# true peer# ipv6:[::1]:34538 2025-05-07T08:53:23.758143Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:301: [0x51f00002a080] facade attach Name# Session actor# [1:7501624254535318105:2257] peer# ipv6:[::1]:34538 2025-05-07T08:53:23.758211Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:325: [0x51f00002a080] facade read Name# Session peer# ipv6:[::1]:34538 2025-05-07T08:53:23.758335Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:511: [0x51f00002a080] facade finish Name# Session peer# ipv6:[::1]:34538 grpc status# (0) message# 2025-05-07T08:53:23.758835Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:353: [0x51f00002a080] read finished Name# Session ok# false data# peer# ipv6:[::1]:34538 2025-05-07T08:53:23.758880Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:268: [0x51f00002a080] stream done notification Name# Session ok# true peer# ipv6:[::1]:34538 2025-05-07T08:53:23.758917Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:547: [0x51f00002a080] stream finished Name# Session ok# true peer# ipv6:[::1]:34538 grpc status# (0) message# 2025-05-07T08:53:23.758980Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:580: [0x51f00002a080] deregistering request Name# Session peer# ipv6:[::1]:34538 (finish done) 2025-05-07T08:53:23.759303Z node 1 :GRPC_SERVER DEBUG: grpc_streaming_ut.cpp:265: Received TEvReadFinished, success = 0 |90.0%| [LD] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_monitoring/ydb-core-tx-scheme_board-ut_monitoring >> TGRpcStreamingTest::ClientDisconnects [GOOD] >> TKeyValueTest::TestWriteTrimWithRestartsThenResponseOk ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/pg/unittest >> KqpPg::CheckPgAutoParams-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 8825, MsgBus: 28807 2025-05-07T08:50:53.969369Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623610468398058:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:53.969417Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001cd4/r3tmp/tmp1PaDBE/pdisk_1.dat 2025-05-07T08:50:54.557416Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:54.587664Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:54.587783Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:54.593034Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8825, node 1 2025-05-07T08:50:54.754093Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:54.754114Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:54.754130Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:54.754239Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28807 TClient is connected to server localhost:28807 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:50:55.330803Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:55.347853Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:50:57.502785Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T08:50:57.708280Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-05-07T08:50:57.712745Z node 1 :TX_DATASHARD ERROR: finish_propose_unit.cpp:245: Prepare transaction failed. txid 281474976710660 at tablet 72075186224037888 errors: WRONG_SHARD_STATE (Interrupted operation [0:281474976710660] at 72075186224037888 while waiting for scan finish) | 2025-05-07T08:50:57.712846Z node 1 :TX_DATASHARD ERROR: finish_propose_unit.cpp:174: Errors while proposing transaction txid 281474976710660 at tablet 72075186224037888 status: ERROR errors: WRONG_SHARD_STATE (Interrupted operation [0:281474976710660] at 72075186224037888 while waiting for scan finish) | \x62797465612030 \x62797465612030 \x62797465612031 \x62797465612031 \x62797465612032 \x62797465612032 \x62797465612033 \x62797465612033 \x62797465612034 \x62797465612034 \x62797465612035 \x62797465612035 \x62797465612036 \x62797465612036 \x62797465612037 \x62797465612037 \x62797465612038 \x62797465612038 \x62797465612039 \x62797465612039 2025-05-07T08:50:57.771084Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 \x62797465612030 \x62797465612030 \x62797465612031 \x62797465612031 \x62797465612032 \x62797465612032 \x62797465612033 \x62797465612033 \x62797465612034 \x62797465612034 \x62797465612035 \x62797465612035 \x62797465612036 \x62797465612036 \x62797465612037 \x62797465612037 \x62797465612038 \x62797465612038 \x62797465612039 \x62797465612039 2025-05-07T08:50:57.900623Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:50:57.975380Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill {"\\x6130","\\x623130"} {"\\x6130","\\x623130"} {"\\x6131","\\x623131"} {"\\x6131","\\x623131"} {"\\x6132","\\x623132"} {"\\x6132","\\x623132"} {"\\x6133","\\x623133"} {"\\x6133","\\x623133"} {"\\x6134","\\x623134"} {"\\x6134","\\x623134"} {"\\x6135","\\x623135"} {"\\x6135","\\x623135"} {"\\x6136","\\x623136"} {"\\x6136","\\x623136"} {"\\x6137","\\x623137"} {"\\x6137","\\x623137"} {"\\x6138","\\x623138"} {"\\x6138","\\x623138"} {"\\x6139","\\x623139"} {"\\x6139","\\x623139"} 2025-05-07T08:50:58.054132Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480 2025-05-07T08:50:58.144496Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill {"\\x6130","\\x623130"} {"\\x6130","\\x623130"} {"\\x6131","\\x623131"} {"\\x6131","\\x623131"} {"\\x6132","\\x623132"} {"\\x6132","\\x623132"} {"\\x6133","\\x623133"} {"\\x6133","\\x623133"} {"\\x6134","\\x623134"} {"\\x6134","\\x623134"} {"\\x6135","\\x623135"} {"\\x6135","\\x623135"} {"\\x6136","\\x623136"} {"\\x6136","\\x623136"} {"\\x6137","\\x623137"} {"\\x6137","\\x623137"} {"\\x6138","\\x623138"} {"\\x6138","\\x623138"} {"\\x6139","\\x623139"} {"\\x6139","\\x623139"} 2025-05-07T08:50:58.255380Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480 2025-05-07T08:50:58.396738Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill f f t t 2025-05-07T08:50:58.445609Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710679:0, at schemeshard: 72057594046644480 2025-05-07T08:50:58.498957Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill f f t t 2025-05-07T08:50:58.549874Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710683:0, at schemeshard: 72057594046644480 2025-05-07T08:50:58.638793Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill {f,f} {f,f} {t,t} {t,t} 2025-05-07T08:50:58.685612Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710687:0, at schemeshard: 72057594046644480 {f,f} {f,f} {t,t} {t,t} 2025-05-07T08:50:58.832895Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710690:0, at schemeshard: 72057594046644480 2025-05-07T08:50:58.899018Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 2025-05-07T08:50:58.959276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710694:0, at schemeshard: 72057594046644480 2025-05-07T08:50:58.970083Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623610468398058:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:58.970422Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 2025-05-07T08:50:59.093288Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710697:0, at schemeshard: 72057594046644480 {0,0} {0,0} {1,1} {1,1} {2,2} {2,2} {3,3} {3,3} {4,4} {4,4} {5,5} {5,5} {6,6} {6,6} {7,7} {7,7} {8,8} {8,8} {9,9} {9,9} 2025-05-07T08:50:59.212895Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710700:0, at schemeshard: 72057594046644480 {0,0} {0,0} {1,1} {1,1} {2,2} {2,2} {3,3} {3,3} {4,4} {4,4} {5,5} {5,5} {6,6} {6,6} {7,7} {7,7} {8,8} {8,8} {9,9} {9,9} 2025-05-07T08:50:59.413334Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710703:0, at schemeshard: 72057594046644480 2025-05-07T08:50:59.479209Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 2025-05-07T08:50:59.552360Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710707:0, at schemeshard: 72057594046644480 2025-05-07T08:50:59.638636Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 2025-05-07T08:50:59.721932Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710711:0, at schemeshard: 72057594046644480 {0,0} {0,0} {1,1} {1,1} {2,2} {2,2} {3,3} {3,3} {4,4} {4,4} {5,5} {5,5} {6,6} {6,6} {7,7} {7,7} {8,8} {8,8} {9,9} {9,9} 2025-05-07T08:50:59.940762Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710714:0, at schemeshard: 72057594046644480 2025-05-07T08:51:00.064896Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill { ... nected 2025-05-07T08:53:07.786866Z node 14 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:53:07.786902Z node 14 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:53:07.786922Z node 14 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:53:07.787141Z node 14 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24208 TClient is connected to server localhost:24208 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:53:09.141425Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:53:09.166202Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:53:12.329321Z node 14 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[14:7501624184724463334:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:12.341649Z node 14 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:53:15.581320Z node 14 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [14:7501624219084202357:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:15.581474Z node 14 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [14:7501624219084202347:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:15.581757Z node 14 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:15.593255Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480 2025-05-07T08:53:15.620195Z node 14 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [14:7501624219084202362:2342], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-05-07T08:53:15.678827Z node 14 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [14:7501624219084202413:2347] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:53:15.723833Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 2025-05-07T08:53:16.215255Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:53:17.139258Z node 14 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:468: Get parsing result with error, self: [14:7501624227674137342:2404], owner: [14:7501624219084202312:2328], statement id: 0 2025-05-07T08:53:17.139736Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=14&id=MzZjOGY2ZjItZGUxODcyNjctMTNlYzRkYjctODVlMGNjZTE=, ActorId: [14:7501624227674137340:2403], ActorState: ExecuteState, TraceId: 01jtmz606d2gx4w61q8bp06v60, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-05-07T08:53:17.698747Z node 14 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [14:7501624227674137374:2416], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: RemovePrefixMembers, At function: PgSelect
: Error: At function: PgSetItem
:1:1: Error: At function: PgWhere
:2:55: Error: At function: PgOp
:2:55: Error: Unable to find an overload for operator = with given argument type(s): (text,int4) 2025-05-07T08:53:17.700850Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=14&id=YjNkZmJmYTktOTEzMWQwMzUtNmJmZTU5ODQtZjJlOGM3Njc=, ActorId: [14:7501624227674137371:2414], ActorState: ExecuteState, TraceId: 01jtmz60pp7fesgyfgt8pby3wa, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-05-07T08:53:17.793025Z node 14 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [14:7501624227674137386:2422], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: RemovePrefixMembers, At function: PgSelect
: Error: At function: PgSetItem
:1:1: Error: At function: PgWhere
:2:57: Error: At function: PgAnd
:2:67: Error: At function: PgOp
:2:67: Error: Unable to find an overload for operator = with given argument type(s): (text,int4) 2025-05-07T08:53:17.794419Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=14&id=YWRhMjg2ZDUtNmY1NWU1ZGEtNGZlMmEyMWItMWM0MjBiNA==, ActorId: [14:7501624227674137383:2420], ActorState: ExecuteState, TraceId: 01jtmz60rp6pfmkrhdkz17e3xf, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-05-07T08:53:17.835738Z node 14 :KQP_EXECUTER CRIT: kqp_literal_executer.cpp:112: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jtmz60vk5drcttvx843a8cqe, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=14&id=N2Y4YzYxODItMzQyNTQyYmMtMzJkOWQ5YS05NGJjZDVmNA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, unexpected exception caught: (NKikimr::NMiniKQL::TTerminateException) Terminate was called, reason(51): ERROR: invalid input syntax for type integer: "a" 2025-05-07T08:53:17.836102Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=14&id=N2Y4YzYxODItMzQyNTQyYmMtMzJkOWQ5YS05NGJjZDVmNA==, ActorId: [14:7501624227674137395:2426], ActorState: ExecuteState, TraceId: 01jtmz60vk5drcttvx843a8cqe, Create QueryResponse for error on request, msg: 2025-05-07T08:53:17.974537Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480 2025-05-07T08:53:18.218596Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480 2025-05-07T08:53:18.526273Z node 14 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [14:7501624231969104865:2452], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiWriteTable!
:1:1: Error: values have 3 columns, INSERT INTO expects: 2 2025-05-07T08:53:18.529471Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=14&id=ODUwMDE3ODItYjg3Njg2YzUtNDExYWRiNDEtNWQ2YmUzNmM=, ActorId: [14:7501624231969104862:2450], ActorState: ExecuteState, TraceId: 01jtmz61e2cmr0h2451393662q, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-05-07T08:53:18.824002Z node 14 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [14:7501624231969104879:2459], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiWriteTable!
:1:1: Error: Failed to convert type: List> to List>
:1:1: Error: Failed to convert 'id': pgunknown to Optional
:1:1: Error: Row type mismatch for table: db.[/Root/PgTable2] 2025-05-07T08:53:18.826281Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=14&id=YjEwYjAxODctOTUwNDFhMTMtNmFmMzgxMjgtMWFkYTM1Nzc=, ActorId: [14:7501624231969104874:2456], ActorState: ExecuteState, TraceId: 01jtmz61kafrj4cepc12yvv5js, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-05-07T08:53:19.555622Z node 14 :KQP_EXECUTER CRIT: kqp_literal_executer.cpp:112: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jtmz61vw5zt2y8jj54744rzd, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=14&id=YjJjYTg3YjQtYTA1MmEyODUtYjVjYzVjNjEtMTdhODNjYzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, unexpected exception caught: (NKikimr::NMiniKQL::TTerminateException) Terminate was called, reason(51): ERROR: invalid input syntax for type integer: "a" 2025-05-07T08:53:19.556404Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=14&id=YjJjYTg3YjQtYTA1MmEyODUtYjVjYzVjNjEtMTdhODNjYzA=, ActorId: [14:7501624231969104888:2463], ActorState: ExecuteState, TraceId: 01jtmz61vw5zt2y8jj54744rzd, Create QueryResponse for error on request, msg: 2025-05-07T08:53:19.630925Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480 2025-05-07T08:53:20.534045Z node 14 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 14, TabletId: 72075186224037892 not found 2025-05-07T08:53:20.567380Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710681:0, at schemeshard: 72057594046644480 |90.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/batch_operations/ydb-core-kqp-ut-batch_operations |90.0%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/batch_operations/ydb-core-kqp-ut-batch_operations |90.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/batch_operations/ydb-core-kqp-ut-batch_operations |90.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/persqueue_v1/ut/new_schemecache_ut/ydb-services-persqueue_v1-ut-new_schemecache_ut |90.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/persqueue_v1/ut/new_schemecache_ut/ydb-services-persqueue_v1-ut-new_schemecache_ut |90.0%| [LD] {RESULT} $(B)/ydb/services/persqueue_v1/ut/new_schemecache_ut/ydb-services-persqueue_v1-ut-new_schemecache_ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_streaming/ut/unittest >> TGRpcStreamingTest::SimpleEcho [GOOD] Test command err: 2025-05-07T08:53:25.414901Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624262139057588:2069];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:25.415957Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004175/r3tmp/tmpCcaZtU/pdisk_1.dat 2025-05-07T08:53:26.491928Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:53:26.754612Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:53:26.754718Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:53:26.759596Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:53:26.787511Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:53:26.893621Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:227: [0x51f000029280] stream accepted Name# Session ok# true peer# ipv6:[::1]:55862 2025-05-07T08:53:26.894002Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:301: [0x51f000029280] facade attach Name# Session actor# [1:7501624266434025418:2261] peer# ipv6:[::1]:55862 2025-05-07T08:53:26.894047Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:325: [0x51f000029280] facade read Name# Session peer# ipv6:[::1]:55862 2025-05-07T08:53:26.894424Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:353: [0x51f000029280] read finished Name# Session ok# true data# peer# ipv6:[::1]:55862 2025-05-07T08:53:26.894497Z node 1 :GRPC_SERVER DEBUG: grpc_streaming_ut.cpp:142: Received TEvReadFinished, success = 1 2025-05-07T08:53:26.894518Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:401: [0x51f000029280] facade write Name# Session data# peer# ipv6:[::1]:55862 2025-05-07T08:53:26.894760Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:511: [0x51f000029280] facade finish Name# Session peer# ipv6:[::1]:55862 grpc status# (0) message# 2025-05-07T08:53:26.895607Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:456: [0x51f000029280] write finished Name# Session ok# true peer# ipv6:[::1]:55862 2025-05-07T08:53:26.895858Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:547: [0x51f000029280] stream finished Name# Session ok# true peer# ipv6:[::1]:55862 grpc status# (0) message# 2025-05-07T08:53:26.895962Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:268: [0x51f000029280] stream done notification Name# Session ok# true peer# ipv6:[::1]:55862 2025-05-07T08:53:26.896549Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:580: [0x51f000029280] deregistering request Name# Session peer# ipv6:[::1]:55862 (finish done) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_streaming/ut/unittest >> TGRpcStreamingTest::ClientNeverWrites [GOOD] Test command err: 2025-05-07T08:53:25.710652Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624263696143428:2192];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:25.711079Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00416d/r3tmp/tmpY6g6o4/pdisk_1.dat 2025-05-07T08:53:26.724648Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:53:26.787699Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:53:26.787797Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:53:26.813621Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:53:26.815428Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:53:26.907382Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:227: [0x51f00002a080] stream accepted Name# Session ok# true peer# ipv6:[::1]:58782 2025-05-07T08:53:26.907781Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:301: [0x51f00002a080] facade attach Name# Session actor# [1:7501624267991111137:2261] peer# ipv6:[::1]:58782 2025-05-07T08:53:26.907813Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:325: [0x51f00002a080] facade read Name# Session peer# ipv6:[::1]:58782 2025-05-07T08:53:26.907862Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:401: [0x51f00002a080] facade write Name# Session data# peer# ipv6:[::1]:58782 2025-05-07T08:53:26.908097Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:511: [0x51f00002a080] facade finish Name# Session peer# ipv6:[::1]:58782 grpc status# (0) message# 2025-05-07T08:53:26.908134Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:456: [0x51f00002a080] write finished Name# Session ok# true peer# ipv6:[::1]:58782 2025-05-07T08:53:26.908405Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:353: [0x51f00002a080] read finished Name# Session ok# false data# peer# ipv6:[::1]:58782 2025-05-07T08:53:26.908436Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:268: [0x51f00002a080] stream done notification Name# Session ok# true peer# ipv6:[::1]:58782 2025-05-07T08:53:26.908475Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:547: [0x51f00002a080] stream finished Name# Session ok# true peer# ipv6:[::1]:58782 grpc status# (0) message# 2025-05-07T08:53:26.908524Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:580: [0x51f00002a080] deregistering request Name# Session peer# ipv6:[::1]:58782 (finish done) 2025-05-07T08:53:26.908818Z node 1 :GRPC_SERVER DEBUG: grpc_streaming_ut.cpp:187: Received TEvWriteFinished, success = 1 2025-05-07T08:53:26.908841Z node 1 :GRPC_SERVER DEBUG: grpc_streaming_ut.cpp:181: Received TEvReadFinished, success = 0 2025-05-07T08:53:26.908849Z node 1 :GRPC_SERVER DEBUG: grpc_streaming_ut.cpp:194: Received TEvNotifiedWhenDone ------- [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_streaming/ut/unittest >> TGRpcStreamingTest::ClientDisconnects [GOOD] Test command err: 2025-05-07T08:53:26.698382Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624267836027081:2194];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:26.699280Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004050/r3tmp/tmpZ6H2wt/pdisk_1.dat 2025-05-07T08:53:27.151698Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:53:27.151821Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:53:27.157083Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:53:27.211109Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:53:27.279725Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:268: [0x51f000029280] stream done notification Name# Session ok# true peer# ipv6:[::1]:47306 2025-05-07T08:53:27.286227Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:227: [0x51f000029280] stream accepted Name# Session ok# true peer# ipv6:[::1]:47306 2025-05-07T08:53:27.287540Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:301: [0x51f000029280] facade attach Name# Session actor# [1:7501624272130994776:2257] peer# ipv6:[::1]:47306 2025-05-07T08:53:27.287565Z node 1 :GRPC_SERVER DEBUG: grpc_streaming_ut.cpp:230: Received TEvNotifiedWhenDone 2025-05-07T08:53:27.290539Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:547: [0x51f000029280] stream finished Name# Session ok# false peer# unknown grpc status# (1) message# Request abandoned 2025-05-07T08:53:27.290573Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:580: [0x51f000029280] deregistering request Name# Session peer# unknown (finish done) >> TKeyValueTest::TestEmptyWriteReadDeleteWithRestartsThenResponseOk |90.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/rm_service/ut/ydb-core-kqp-rm_service-ut |90.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/rm_service/ut/ydb-core-kqp-rm_service-ut |90.0%| [LD] {RESULT} $(B)/ydb/core/kqp/rm_service/ut/ydb-core-kqp-rm_service-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::MergeIndexWithReboots[TabletReboots] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:116:2058] recipient: [1:111:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:116:2058] recipient: [1:111:2142] Leader for TabletID 72057594046678944 is [1:123:2149] sender: [1:126:2058] recipient: [1:108:2140] Leader for TabletID 72057594046447617 is [1:129:2154] sender: [1:131:2058] recipient: [1:109:2141] Leader for TabletID 72057594046316545 is [1:134:2157] sender: [1:136:2058] recipient: [1:111:2142] 2025-05-07T08:48:40.227450Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:48:40.227548Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:48:40.227603Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:48:40.227647Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:48:40.227702Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:48:40.227730Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:48:40.227782Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:48:40.227846Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:48:40.228538Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:48:40.228959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:48:40.374930Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7610: Got new config: QueryServiceConfig { AvailableExternalDataSources: "ObjectStorage" AvailableExternalDataSources: "ClickHouse" AvailableExternalDataSources: "PostgreSQL" AvailableExternalDataSources: "MySQL" AvailableExternalDataSources: "Ydb" AvailableExternalDataSources: "YT" AvailableExternalDataSources: "Greenplum" AvailableExternalDataSources: "MsSQLServer" AvailableExternalDataSources: "Oracle" AvailableExternalDataSources: "Logging" AvailableExternalDataSources: "Solomon" } 2025-05-07T08:48:40.375047Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:48:40.376155Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# ObjectStorage, ClickHouse, PostgreSQL, MySQL, Ydb, YT, Greenplum, MsSQLServer, Oracle, Logging, Solomon Leader for TabletID 72057594046447617 is [1:129:2154] sender: [1:175:2058] recipient: [1:15:2062] 2025-05-07T08:48:40.423178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:48:40.423328Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:48:40.423488Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:48:40.429771Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:48:40.429960Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:48:40.430500Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:48:40.430736Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:48:40.433113Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:48:40.434741Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:48:40.434813Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:48:40.435021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:48:40.435072Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:48:40.435190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:48:40.435362Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2212] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2212] Leader for TabletID 72057594037968897 is [1:217:2216] sender: [1:218:2058] recipient: [1:211:2212] 2025-05-07T08:48:40.443601Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:123:2149] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:48:40.598468Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:48:40.598732Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:48:40.599069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:48:40.599399Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:48:40.599512Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:48:40.603619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:48:40.603812Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:48:40.604049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:48:40.604120Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:48:40.604165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:48:40.604203Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:48:40.611026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:48:40.611108Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:48:40.611156Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:48:40.615567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:48:40.615637Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:48:40.615722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:48:40.615786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:48:40.626221Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:48:40.631689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:48:40.631980Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:134:2157] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:48:40.633215Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:48:40.633398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 134 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 ... Status: StatusSuccess Path: "/MyRoot/Table" PathDescription { Self { Name: "Table" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "indexed" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableIndexes { Name: "UserDefinedIndex" LocalPathId: 4 Type: EIndexTypeGlobalAsync State: EIndexStateReady KeyColumnNames: "indexed" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409548 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:53:23.273300Z node 104 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-05-07T08:53:23.273629Z node 104 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndex/indexImplTable" took 372us result status StatusSuccess 2025-05-07T08:53:23.274607Z node 104 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "indexed" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "indexed" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409549 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TKeyValueTest::TestWriteReadWithRestartsThenResponseOk >> TKeyValueTest::TestWriteReadDeleteWithRestartsAndCatchCollectGarbageEvents >> TKeyValueTest::TestInlineEmptyWriteReadDeleteWithRestartsThenResponseOk >> TKeyValueTest::TestWriteReadWithRestartsThenResponseOkNewApi >> TKeyValueTest::TestRenameWorks >> TKeyValueTest::TestWriteReadDeleteWithRestartsAndCatchCollectGarbageEvents [GOOD] >> TKeyValueTest::TestWriteLongKey >> TGRpcStreamingTest::WritesDoneFromClient [GOOD] >> BasicUsage::GetAllStartPartitionSessions >> PersQueueSdkReadSessionTest::SpecifyClustersExplicitly [GOOD] >> PersQueueSdkReadSessionTest::StopResumeReadingData ------- [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_streaming/ut/unittest >> TGRpcStreamingTest::WritesDoneFromClient [GOOD] Test command err: 2025-05-07T08:53:28.303802Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624275596769827:2196];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:28.304353Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00402d/r3tmp/tmpPXPPmU/pdisk_1.dat 2025-05-07T08:53:29.442181Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:53:29.442308Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:53:29.445736Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:53:29.459032Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:53:29.478621Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:53:29.785685Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:227: [0x51f00002bc80] stream accepted Name# Session ok# true peer# ipv6:[::1]:33060 2025-05-07T08:53:29.826644Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:301: [0x51f00002bc80] facade attach Name# Session actor# [1:7501624279891737524:2261] peer# ipv6:[::1]:33060 2025-05-07T08:53:29.826696Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:325: [0x51f00002bc80] facade read Name# Session peer# ipv6:[::1]:33060 2025-05-07T08:53:29.834045Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:353: [0x51f00002bc80] read finished Name# Session ok# false data# peer# ipv6:[::1]:33060 2025-05-07T08:53:29.838096Z node 1 :GRPC_SERVER DEBUG: grpc_streaming_ut.cpp:302: Received TEvReadFinished, success = 0 2025-05-07T08:53:29.838180Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:511: [0x51f00002bc80] facade finish Name# Session peer# ipv6:[::1]:33060 grpc status# (9) message# Everything is A-OK 2025-05-07T08:53:29.842101Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:268: [0x51f00002bc80] stream done notification Name# Session ok# true peer# ipv6:[::1]:33060 2025-05-07T08:53:29.842198Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:547: [0x51f00002bc80] stream finished Name# Session ok# true peer# ipv6:[::1]:33060 grpc status# (9) message# Everything is A-OK 2025-05-07T08:53:29.842289Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:580: [0x51f00002bc80] deregistering request Name# Session peer# ipv6:[::1]:33060 (finish done) 2025-05-07T08:53:29.846267Z node 1 :GRPC_SERVER DEBUG: grpc_streaming_ut.cpp:312: Received TEvNotifiedWhenDone >> TGRpcStreamingTest::WriteAndFinishWorks [GOOD] |90.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/cms/ut/ydb-services-cms-ut |90.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/cms/ut/ydb-services-cms-ut |90.0%| [LD] {RESULT} $(B)/ydb/services/cms/ut/ydb-services-cms-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_streaming/ut/unittest >> TGRpcStreamingTest::WriteAndFinishWorks [GOOD] Test command err: 2025-05-07T08:53:29.598810Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624279785254624:2070];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:29.599720Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003ff8/r3tmp/tmpWtq6J4/pdisk_1.dat 2025-05-07T08:53:30.496108Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:53:30.496215Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:53:30.503404Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:53:30.541651Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:53:30.770667Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:227: [0x51f000029280] stream accepted Name# Session ok# true peer# ipv6:[::1]:48248 2025-05-07T08:53:30.771232Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:301: [0x51f000029280] facade attach Name# Session actor# [1:7501624284080222458:2262] peer# ipv6:[::1]:48248 2025-05-07T08:53:30.771259Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:401: [0x51f000029280] facade write Name# Session data# peer# ipv6:[::1]:48248 2025-05-07T08:53:30.771597Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:396: [0x51f000029280] facade write Name# Session data# peer# ipv6:[::1]:48248 grpc status# (0) message# 2025-05-07T08:53:30.772343Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:456: [0x51f000029280] write finished Name# Session ok# true peer# ipv6:[::1]:48248 2025-05-07T08:53:30.772690Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:268: [0x51f000029280] stream done notification Name# Session ok# true peer# ipv6:[::1]:48248 2025-05-07T08:53:30.772874Z node 1 :GRPC_SERVER DEBUG: grpc_streaming_ut.cpp:347: Received TEvWriteFinished, success = 1 2025-05-07T08:53:30.772990Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:456: [0x51f000029280] write finished Name# Session ok# true peer# ipv6:[::1]:48248 2025-05-07T08:53:30.773028Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:547: [0x51f000029280] stream finished Name# Session ok# true peer# ipv6:[::1]:48248 grpc status# (0) message# 2025-05-07T08:53:30.773079Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:580: [0x51f000029280] deregistering request Name# Session peer# ipv6:[::1]:48248 (finish done) 2025-05-07T08:53:30.773230Z node 1 :GRPC_SERVER DEBUG: grpc_streaming_ut.cpp:347: Received TEvWriteFinished, success = 1 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-56 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-57 >> BasicUsage::TWriteSession_WriteEncoded [GOOD] >> CompressExecutor::TestReorderedExecutor >> BasicUsage::WaitEventBlocksBeforeDiscovery |90.0%| [TA] $(B)/ydb/core/grpc_streaming/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> PersQueueSdkReadSessionTest::ReadSessionWithClose [GOOD] >> PersQueueSdkReadSessionTest::ReadSessionWithCloseNotCommitted >> KqpPg::TableInsert+useSink [GOOD] >> KqpPg::TableInsert-useSink >> KqpPg::InsertNoTargetColumns_Alter-useSink [GOOD] >> KqpPg::InsertNoTargetColumns_Serial+useSink >> BasicUsage::WriteSessionNoAvailableDatabase >> KqpPg::ValuesInsert+useSink [GOOD] >> KqpPg::ValuesInsert-useSink >> TKeyValueTest::TestInlineWriteReadRangeLimitThenLimitWorks [GOOD] >> TKeyValueTest::TestInlineWriteReadRangeLimitThenLimitWorksNewApi >> TKeyValueTest::TestInlineWriteReadWithRestartsWithNotCorrectUTF8NewApi [GOOD] >> TKeyValueTest::TestLargeWriteAndDelete |90.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/cms/console/ut/ydb-core-cms-console-ut |90.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/cms/console/ut/ydb-core-cms-console-ut |90.0%| [TA] {RESULT} $(B)/ydb/core/grpc_streaming/ut/test-results/unittest/{meta.json ... results_accumulator.log} |90.0%| [LD] {RESULT} $(B)/ydb/core/cms/console/ut/ydb-core-cms-console-ut |90.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/service/ydb-core-kqp-ut-service |90.0%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/service/ydb-core-kqp-ut-service |90.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/service/ydb-core-kqp-ut-service >> TKeyValueTest::TestWriteTrimWithRestartsThenResponseOk [GOOD] >> TKeyValueTest::TestWriteToExtraChannelThenReadMixedChannelsReturnsOkNewApi |90.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/statistics/service/ut/ydb-core-statistics-service-ut |90.0%| [LD] {RESULT} $(B)/ydb/core/statistics/service/ut/ydb-core-statistics-service-ut |90.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/statistics/service/ut/ydb-core-statistics-service-ut >> TKeyValueTest::TestWriteReadDeleteWithRestartsThenResponseOkWithNewApi [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-57 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-58 >> KqpMultishardIndex::WriteIntoRenamingAsyncIndex [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestWriteReadDeleteWithRestartsThenResponseOkWithNewApi [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:54:2057] recipient: [2:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:54:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:57:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:74:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:54:2057] recipient: [3:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:54:2057] recipient: [3:52:2095] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:57:2057] recipient: [3:52:2095] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:74:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:76:2057] recipient: [3:36:2083] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:79:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:80:2057] recipient: [3:78:2110] Leader for TabletID 72057594037927937 is [3:81:2111] sender: [3:82:2057] recipient: [3:78:2110] !Reboot 72057594037927937 (actor [3:56:2097]) rebooted! !Reboot 72057594037927937 (actor [3:56:2097]) tablet resolver refreshed! new actor is[3:81:2111] Leader for TabletID 72057594037927937 is [3:81:2111] sender: [3:135:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:54:2057] recipient: [4:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:54:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:57:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:74:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:56:2097]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:76:2057] recipient: [4:36:2083] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:78:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:80:2057] recipient: [4:79:2110] Leader for TabletID 72057594037927937 is [4:81:2111] sender: [4:82:2057] recipient: [4:79:2110] !Reboot 72057594037927937 (actor [4:56:2097]) rebooted! !Reboot 72057594037927937 (actor [4:56:2097]) tablet resolver refreshed! new actor is[4:81:2111] Leader for TabletID 72057594037927937 is [4:81:2111] sender: [4:135:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:54:2057] recipient: [5:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:54:2057] recipient: [5:50:2095] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:57:2057] recipient: [5:50:2095] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:74:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:77:2057] recipient: [5:36:2083] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:80:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:81:2057] recipient: [5:79:2110] Leader for TabletID 72057594037927937 is [5:82:2111] sender: [5:83:2057] recipient: [5:79:2110] !Reboot 72057594037927937 (actor [5:56:2097]) rebooted! !Reboot 72057594037927937 (actor [5:56:2097]) tablet resolver refreshed! new actor is[5:82:2111] Leader for TabletID 72057594037927937 is [5:82:2111] sender: [5:136:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:54:2057] recipient: [6:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:54:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:57:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:74:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:80:2057] recipient: [6:36:2083] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:83:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:84:2057] recipient: [6:82:2113] Leader for TabletID 72057594037927937 is [6:85:2114] sender: [6:86:2057] recipient: [6:82:2113] !Reboot 72057594037927937 (actor [6:56:2097]) rebooted! !Reboot 72057594037927937 (actor [6:56:2097]) tablet resolver refreshed! new actor is[6:85:2114] Leader for TabletID 72057594037927937 is [6:85:2114] sender: [6:139:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:54:2057] recipient: [7:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:54:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:57:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:74:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:56:2097]) on event NKikimr::TEvKeyValue::TEvRead ! Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:80:2057] recipient: [7:36:2083] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:83:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:84:2057] recipient: [7:82:2113] Leader for TabletID 72057594037927937 is [7:85:2114] sender: [7:86:2057] recipient: [7:82:2113] !Reboot 72057594037927937 (actor [7:56:2097]) rebooted! !Reboot 72057594037927937 (actor [7:56:2097]) tablet resolver refreshed! new actor is[7:85:2114] Leader for TabletID 72057594037927937 is [7:85:2114] sender: [7:139:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:54:2057] recipient: [8:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:54:2057] recipient: [8:50:2095] Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:57:2057] recipient: [8:50:2095] Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:74:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:56:2097]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:81:2057] recipient: [8:36:2083] Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:84:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:85:2057] recipient: [8:83:2113] Leader for TabletID 72057594037927937 is [8:86:2114] sender: [8:87:2057] recipient: [8:83:2113] !Reboot 72057594037927937 (actor [8:56:2097]) rebooted! !Reboot 72057594037927937 (actor [8:56:2097]) tablet resolver refreshed! new actor is[8:86:2114] Leader for TabletID 72057594037927937 is [8:86:2114] sender: [8:104:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:54:2057] recipient: [9:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:54:2057] recipient: [9:52:2095] Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:57:2057] recipient: [9:52:2095] Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:74:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:83:2057] recipient: [9:36:2083] Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:86:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:87:2057] recipient: [9:85:2115] Leader for TabletID 72057594037927937 is [9:88:2116] sender: [9:89:2057] recipient: [9:85:2115] !Reboot 72057594037927937 (actor [9:56:2097]) rebooted! !Reboot 72057594037927937 (actor [9:56:2097]) tablet resolver refreshed! new actor is[9:88:2116] Leader for TabletID 72057594037927937 is [9:88:2116] sender: [9:142:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:54:2057] recipient: [10:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:54:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:57:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:74:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:56:2097]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:83:2057] recipient: [10:36:2083] Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:86:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:87:2057] recipient: [10:85:2115] Leader for TabletID 72057594037927937 is [10:88:2116] sender: [10:89:2057] recipient: [10:85:2115] !Reboot 72057594037927937 (actor [10:56:2097]) rebooted! !Reboot 72057594037927937 (actor [10:56:2097]) tablet resolver refreshed! new actor is[10:88:2116] Leader for TabletID 72057594037927937 is [10:88:2116] sender: [10:142:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:54:2057] recipient: [11:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:54:2057] recipient: [11:51:2095] Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:57:2057] recipient: [11:51:2095] Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:74:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:84:2057] recipient: [11:36:2083] Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:87:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:88:2057] recipient: [11:86:2115] Leader for TabletID 72057594037927937 is [11:89:2116] sender: [11:90:2057] recipient: [11:86:2115] !Reboot 72057594037927937 (actor [11:56:2097]) rebooted! !Reboot 72057594037927937 (actor [11:56:2097]) tablet resolver refreshed! new actor is[11:89:2116] Leader for TabletID 72057594037927937 is [11:89:2116] sender: [11:143:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:54:2057] recipient: [12:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:54:2057] recipient: [12:52:2095] Leader for TabletID 72057594037927937 is [12:56:2097] sender: [12:57:2057] recipient: [12:52:2095] Leader for TabletID 72057594037927937 is [12:56:2097] sender: [12:74:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (actor [12:56:2097]) on event NKikimr::TEvKeyValue::TEvCollect ! Leader for TabletID 72057594037927937 is [12:56:2097] sender: [12:85:2057] recipient: [12:36:2083] Leader for TabletID 72057594037927937 is [12:56:2097] sender: [12:88:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [12:56:2097] sender: [12:89:2057] recipient: [12:87:2116] Leader for TabletID 72057594037927937 is [12:90:2117] sender: [12:91:2057] recipient: [12:87:2116] !Reboot 72057594037927937 (actor [12:56:2097]) rebooted! !Reboot 72057594037927937 (actor [12:56:2097]) tablet resolver refreshed! new actor is[12:90:2117] Leader for TabletID 72057594037927937 is [12:90:2117] sender: [12:110:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:54:2057] recipient: [13:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:54:2057] recipient: [13:51:2095] Leader for TabletID 72057594037927937 is [13:56:2097] sender: [13:57:2057] recipient: [13:51:2095] Leader for TabletID 72057594037927937 is [13:56:2097] sender: [13:74:2057] recipient: [13:14:2061] !Reboot 72057594037927937 (actor [13:56:2097]) on event NKikimr::TEvKeyValue::TEvCompleteGC ! Leader for TabletID 72057594037927937 is [13:56:2097] sender: [13:86:2057] recipient: [13:36:2083] Leader for TabletID 72057594037927937 is [13:56:2097] sender: [13:89:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [13:56:2097] sender: [13:90:2057] recipient: [13:88:2117] Leader for TabletID 72057594037927937 is [13:91:2118] sender: [13:92:2057] recipient: [13:88:2117] !Reboot 72057594037927937 (actor [13:56:2097]) rebooted! !Reboot 72057594037927937 (actor [13:56:2097]) tablet resolver refreshed! new actor is[13:91:2118] Leader for TabletID 72057594037927937 is [13:91:2118] sender: [13:111:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:54:2057] recipient: [14:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:54:2057] recipient: [14:51:2095] Leader for TabletID 72057594037927937 is [14:56:2097] sender: [14:57:2057] recipient: [14:51:2095] Leader for TabletID 72057594037927937 is [14:56:2097] sender: [14:74:2057] recipient: [14:14:2061] !Reboot 72057594037927937 (actor [14:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [14:56:2097] sender: [14:89:2057] recipient: [14:36:2083] Leader for TabletID 72057594037927937 is [14:56:2097] sender: [14:92:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [14:56:2097] sender: [14:93:2057] recipient: [14:91:2120] Leader for TabletID 72057594037927937 is [14:94:2121] sender: [14:95:2057] recipient: [14:91:2120] !Reboot 72057594037927937 (actor [14:56:2097]) rebooted! !Reboot 72057594037927937 (actor [14:56:2097]) tablet resolver refreshed! new actor is[14:94:2121] Leader for TabletID 72057594037927937 is [14:94:2121] sender: [14:148:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:54:2057] recipient: [15:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:54:2057] recipient: [15:51:2095] Leader for TabletID 72057594037927937 is [15:56:2097] sender: [15:57:2057] recipient: [15:51:2095] Leader for TabletID 72057594037927937 is [15:56:2097] sender: [15:74:2057] recipient: [15:14:2061] !Reboot 72057594037927937 (actor [15:56:2097]) on event NKikimr::TEvKeyValue::TEvRead ! Leader for TabletID 72057594037927937 is [15:56:2097] sender: [15:89:2057] recipient: [15:36:2083] Leader for TabletID 72057594037927937 is [15:56:2097] sender: [15:92:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [15:56:2097] sender: [15:93:2057] recipient: [15:91:2120] Leader for TabletID 72057594037927937 is [15:94:2121] sender: [15:95:2057] recipient: [15:91:2120] !Reboot 72057594037927937 (actor [15:56:2097]) rebooted! !Reboot 72057594037927937 (actor [15:56:2097]) tablet resolver refreshed! new actor is[15:94:2121] Leader for TabletID 72057594037927937 is [15:94:2121] sender: [15:148:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:54:2057] recipient: [16:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:54:2057] recipient: [16:51:2095] Leader for TabletID 72057594037927937 is [16:56:2097] sender: [16:57:2057] recipient: [16:51:2095] Leader for TabletID 72057594037927937 is [16:56:2097] sender: [16:74:2057] recipient: [16:14:2061] >> TKeyValueTest::TestWriteReadDeleteWithRestartsThenResponseOk [GOOD] >> TKeyValueTest::TestWriteReadDeleteWithRestartsAndCatchCollectGarbageEventsWithSlowInitialGC >> BasicUsage::TSimpleWriteSession_AutoSeqNo_BasicUsage [GOOD] >> BasicUsage::TWriteSession_AutoBatching [GOOD] >> BasicUsage::TWriteSession_BatchingProducesContinueTokens [GOOD] >> BasicUsage::BrokenCredentialsProvider >> TKeyValueTest::TestWriteLongKey [GOOD] >> TPQTest::TestSourceIdDropBySourceIdCount [GOOD] >> TPQTest::TestSetClientOffset >> TYardTest::TestLogOverwriteRestarts [GOOD] >> TYardTest::TestLogOwerwrite |90.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/datastreams/ut/ydb-services-datastreams-ut |90.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/datastreams/ut/ydb-services-datastreams-ut |90.1%| [LD] {RESULT} $(B)/ydb/services/datastreams/ut/ydb-services-datastreams-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestWriteLongKey [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:54:2057] recipient: [2:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:54:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:57:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:74:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:54:2057] recipient: [3:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:54:2057] recipient: [3:52:2095] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:57:2057] recipient: [3:52:2095] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:74:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:76:2057] recipient: [3:36:2083] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:79:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:80:2057] recipient: [3:78:2110] Leader for TabletID 72057594037927937 is [3:81:2111] sender: [3:82:2057] recipient: [3:78:2110] !Reboot 72057594037927937 (actor [3:56:2097]) rebooted! !Reboot 72057594037927937 (actor [3:56:2097]) tablet resolver refreshed! new actor is[3:81:2111] Leader for TabletID 72057594037927937 is [3:81:2111] sender: [3:135:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:54:2057] recipient: [4:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:54:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:57:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:74:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:56:2097]) on event NKikimr::TEvKeyValue::TEvAcquireLock ! Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:76:2057] recipient: [4:36:2083] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:78:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:80:2057] recipient: [4:79:2110] Leader for TabletID 72057594037927937 is [4:81:2111] sender: [4:82:2057] recipient: [4:79:2110] !Reboot 72057594037927937 (actor [4:56:2097]) rebooted! !Reboot 72057594037927937 (actor [4:56:2097]) tablet resolver refreshed! new actor is[4:81:2111] Leader for TabletID 72057594037927937 is [4:81:2111] sender: [4:135:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:54:2057] recipient: [5:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:54:2057] recipient: [5:50:2095] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:57:2057] recipient: [5:50:2095] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:74:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:77:2057] recipient: [5:36:2083] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:80:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:81:2057] recipient: [5:79:2110] Leader for TabletID 72057594037927937 is [5:82:2111] sender: [5:83:2057] recipient: [5:79:2110] !Reboot 72057594037927937 (actor [5:56:2097]) rebooted! !Reboot 72057594037927937 (actor [5:56:2097]) tablet resolver refreshed! new actor is[5:82:2111] Leader for TabletID 72057594037927937 is [5:82:2111] sender: [5:136:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:54:2057] recipient: [6:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:54:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:57:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:74:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:80:2057] recipient: [6:36:2083] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:83:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:84:2057] recipient: [6:82:2113] Leader for TabletID 72057594037927937 is [6:85:2114] sender: [6:86:2057] recipient: [6:82:2113] !Reboot 72057594037927937 (actor [6:56:2097]) rebooted! !Reboot 72057594037927937 (actor [6:56:2097]) tablet resolver refreshed! new actor is[6:85:2114] Leader for TabletID 72057594037927937 is [6:85:2114] sender: [6:139:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:54:2057] recipient: [7:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:54:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:57:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:74:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:56:2097]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:80:2057] recipient: [7:36:2083] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:83:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:84:2057] recipient: [7:82:2113] Leader for TabletID 72057594037927937 is [7:85:2114] sender: [7:86:2057] recipient: [7:82:2113] !Reboot 72057594037927937 (actor [7:56:2097]) rebooted! !Reboot 72057594037927937 (actor [7:56:2097]) tablet resolver refreshed! new actor is[7:85:2114] Leader for TabletID 72057594037927937 is [7:85:2114] sender: [7:139:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:54:2057] recipient: [8:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:54:2057] recipient: [8:50:2095] Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:57:2057] recipient: [8:50:2095] Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:74:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:81:2057] recipient: [8:36:2083] Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:84:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:85:2057] recipient: [8:83:2113] Leader for TabletID 72057594037927937 is [8:86:2114] sender: [8:87:2057] recipient: [8:83:2113] !Reboot 72057594037927937 (actor [8:56:2097]) rebooted! !Reboot 72057594037927937 (actor [8:56:2097]) tablet resolver refreshed! new actor is[8:86:2114] Leader for TabletID 72057594037927937 is [8:86:2114] sender: [8:140:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:54:2057] recipient: [9:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:54:2057] recipient: [9:52:2095] Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:57:2057] recipient: [9:52:2095] Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:74:2057] recipient: [9:14:2061] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpMultishardIndex::WriteIntoRenamingAsyncIndex [GOOD] Test command err: Trying to start YDB, gRPC: 19241, MsgBus: 25432 2025-05-07T08:51:52.262472Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623864194309911:2125];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:52.262534Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003c81/r3tmp/tmpTKj4Hd/pdisk_1.dat 2025-05-07T08:51:53.327357Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:51:53.337126Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:53.337231Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:53.350044Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:51:53.363598Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19241, node 1 2025-05-07T08:51:53.532948Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:51:53.532980Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:51:53.532989Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:51:53.533128Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25432 TClient is connected to server localhost:25432 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:51:54.329695Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:54.396389Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:54.801125Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:55.146260Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:55.271316Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:57.266113Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623864194309911:2125];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:57.281860Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:51:58.456186Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623889964115277:2410], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:58.456311Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:59.212489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:51:59.313837Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:51:59.411792Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:51:59.498020Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:51:59.555750Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:51:59.718902Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:51:59.799677Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:51:59.909947Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623894259083248:2477], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:59.910054Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:59.910454Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623894259083253:2480], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:59.918296Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:51:59.937834Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623894259083255:2481], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:52:00.043406Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623898554050602:3427] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:52:01.431351Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:04.075433Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1562: SelfId: [1:7501623915733921652:2765], TxId: 281474976710696, task: 2. Ctx: { SessionId : ydb://session/3?node_id=1&id=NjZjZjc5NGItNjRjZDRmYmQtYWIwZmI1MjQtMTU5YTQ1MDY=. CustomerSuppliedId : . TraceId : 01jtmz3rr38jgy96qcec4e8q6b. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InputTransform[0] fatal error: {
: Error: Read request aborted subissue: {
: Error: Wrong schemaversion 1 requested, table schemaversion 2 (shard# 72075186224037940 node# 1 state# Ready) } } 2025-05-07T08:52:04.172008Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [1:7501623915733921652:2765], TxId: 281474976710696, task: 2. Ctx: { SessionId : ydb://session/3?node_id=1&id=NjZjZjc5NGItNjRjZDRmYmQtYWIwZmI1MjQtMTU5YTQ1MDY=. CustomerSuppliedId : . TraceId : 01jtmz3rr38jgy96qcec4e8q6b. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: ABORTED DEFAULT_ERROR: {
: Error: Read request aborted subissue: {
: Error: Wrong schemaversion 1 requested, table schemaversion 2 (shard# 72075186224037940 node# 1 state# Ready) } }. 2025-05-07T08:52:04.172859Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1216: SelfId: [1:7501623915733921653:2766], TxId: 281474976710696, task: 3. Ctx: { TraceId : 01jtmz3rr38jgy96qcec4e8q6b. SessionId : ydb://session/3?node_id=1&id=NjZjZjc5NGItNjRjZDRmYmQtYWIwZmI1MjQtMTU5YTQ1MDY=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [1:7501623915733921634:2584], status: ABORTED, reason: {
: Error: Terminate execution } 2025-05-07T08:52:04.173323Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=1&id=NjZjZjc5NGItNjRjZDRmYmQtYWIwZmI1MjQtMTU5YTQ1MDY=, ActorId: [1:7501623902849018980:2584], ActorState: ExecuteState, TraceId: 01jtmz3rr38jgy96qcec4e8q6b, Create QueryResponse for error on request, msg: 2025-05-07T08:52:08.296822Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7261: Cannot get console configs 2025-05-07T08:52:08.296867Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:52:11.981660Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037925 not foun ... st: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:53:06.251735Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:53:06.264074Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T08:53:06.281062Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:53:06.375450Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:53:06.634459Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:53:06.759666Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:53:09.641906Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501624195478471488:2407], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:09.642032Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:09.723558Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-05-07T08:53:09.785745Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-05-07T08:53:09.834202Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-05-07T08:53:09.877112Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-05-07T08:53:09.933011Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-05-07T08:53:10.014695Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-05-07T08:53:10.142666Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-05-07T08:53:10.206284Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7501624178298600665:2065];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:10.206372Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:53:10.275643Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501624199773439439:2471], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:10.275747Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:10.276126Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501624199773439444:2474], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:10.280796Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-05-07T08:53:10.300112Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7501624199773439446:2475], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-05-07T08:53:10.401955Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501624199773439498:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:53:11.945522Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:53:20.466591Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7261: Cannot get console configs 2025-05-07T08:53:20.466627Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:53:22.474493Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037929 not found 2025-05-07T08:53:22.475179Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037928 not found 2025-05-07T08:53:22.488359Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037940 not found 2025-05-07T08:53:22.491442Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037939 not found 2025-05-07T08:53:25.375438Z node 2 :CHANGE_EXCHANGE ERROR: change_sender_table_base.cpp:211: [TableChangeSenderShard][72075186224037924:1][72075186224037925][2:7501624212658342983:2626] Apply status: status# 2, reason# 7 2025-05-07T08:53:25.399773Z node 2 :CHANGE_EXCHANGE ERROR: change_sender_table_base.cpp:211: [TableChangeSenderShard][72075186224037924:1][72075186224037933][2:7501624212658342984:2626] Apply status: status# 2, reason# 7 2025-05-07T08:53:25.464192Z node 2 :CHANGE_EXCHANGE ERROR: change_sender_table_base.cpp:88: [TableChangeSenderShard][72075186224037924:1][72075186224037925][2:7501624264197957555:2626] Handshake status: status# 2, reason# 7 2025-05-07T08:53:25.464277Z node 2 :CHANGE_EXCHANGE ERROR: change_sender_table_base.cpp:88: [TableChangeSenderShard][72075186224037924:1][72075186224037933][2:7501624264197957557:2626] Handshake status: status# 2, reason# 7 2025-05-07T08:53:25.497443Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037935 not found 2025-05-07T08:53:25.515649Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037936 not found 2025-05-07T08:53:25.556551Z node 2 :CHANGE_EXCHANGE ERROR: change_sender_table_base.cpp:88: [TableChangeSenderShard][72075186224037924:1][72075186224037925][2:7501624264197957764:2626] Handshake status: status# 2, reason# 7 2025-05-07T08:53:25.557692Z node 2 :CHANGE_EXCHANGE ERROR: change_sender_table_base.cpp:88: [TableChangeSenderShard][72075186224037924:1][72075186224037933][2:7501624264197957765:2626] Handshake status: status# 2, reason# 7 2025-05-07T08:53:25.575137Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037926 not found 2025-05-07T08:53:25.602837Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037937 not found 2025-05-07T08:53:25.602881Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037938 not found 2025-05-07T08:53:25.602901Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037932 not found 2025-05-07T08:53:25.611677Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037931 not found 2025-05-07T08:53:32.596445Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037927 not found 2025-05-07T08:53:32.596485Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037930 not found 2025-05-07T08:53:32.596505Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037942 not found 2025-05-07T08:53:35.512074Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037925 not found 2025-05-07T08:53:35.533673Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037934 not found 2025-05-07T08:53:35.533715Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037943 not found 2025-05-07T08:53:35.533734Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037941 not found 2025-05-07T08:53:35.533751Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037946 not found 2025-05-07T08:53:40.240441Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037933 not found >> TYardTest::TestLogOwerwrite [GOOD] >> TKeyValueTest::TestWriteReadDeleteWithRestartsAndCatchCollectGarbageEventsWithSlowInitialGC [GOOD] >> BasicUsage::SelectDatabaseByHash [GOOD] >> BasicUsage::SelectDatabase [GOOD] |90.1%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::SelectDatabase [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestWriteReadDeleteWithRestartsAndCatchCollectGarbageEventsWithSlowInitialGC [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:54:2057] recipient: [1:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:54:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:56:2097] sender: [1:57:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:56:2097] sender: [1:74:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:54:2057] recipient: [2:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:54:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:57:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:74:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:76:2057] recipient: [2:36:2083] Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:79:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:80:2057] recipient: [2:78:2110] Leader for TabletID 72057594037927937 is [2:81:2111] sender: [2:82:2057] recipient: [2:78:2110] !Reboot 72057594037927937 (actor [2:56:2097]) rebooted! !Reboot 72057594037927937 (actor [2:56:2097]) tablet resolver refreshed! new actor is[2:81:2111] Leader for TabletID 72057594037927937 is [2:81:2111] sender: [2:135:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:54:2057] recipient: [3:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:54:2057] recipient: [3:52:2095] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:57:2057] recipient: [3:52:2095] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:74:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:56:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:76:2057] recipient: [3:36:2083] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:78:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:80:2057] recipient: [3:79:2110] Leader for TabletID 72057594037927937 is [3:81:2111] sender: [3:82:2057] recipient: [3:79:2110] !Reboot 72057594037927937 (actor [3:56:2097]) rebooted! !Reboot 72057594037927937 (actor [3:56:2097]) tablet resolver refreshed! new actor is[3:81:2111] Leader for TabletID 72057594037927937 is [3:81:2111] sender: [3:135:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:54:2057] recipient: [4:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:54:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:57:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:74:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:77:2057] recipient: [4:36:2083] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:80:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:81:2057] recipient: [4:79:2110] Leader for TabletID 72057594037927937 is [4:82:2111] sender: [4:83:2057] recipient: [4:79:2110] !Reboot 72057594037927937 (actor [4:56:2097]) rebooted! !Reboot 72057594037927937 (actor [4:56:2097]) tablet resolver refreshed! new actor is[4:82:2111] Leader for TabletID 72057594037927937 is [4:82:2111] sender: [4:136:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:54:2057] recipient: [5:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:54:2057] recipient: [5:50:2095] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:57:2057] recipient: [5:50:2095] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:74:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:80:2057] recipient: [5:36:2083] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:83:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:84:2057] recipient: [5:82:2113] Leader for TabletID 72057594037927937 is [5:85:2114] sender: [5:86:2057] recipient: [5:82:2113] !Reboot 72057594037927937 (actor [5:56:2097]) rebooted! !Reboot 72057594037927937 (actor [5:56:2097]) tablet resolver refreshed! new actor is[5:85:2114] Leader for TabletID 72057594037927937 is [5:85:2114] sender: [5:139:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:54:2057] recipient: [6:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:54:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:57:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:74:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:56:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:80:2057] recipient: [6:36:2083] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:83:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:84:2057] recipient: [6:82:2113] Leader for TabletID 72057594037927937 is [6:85:2114] sender: [6:86:2057] recipient: [6:82:2113] !Reboot 72057594037927937 (actor [6:56:2097]) rebooted! !Reboot 72057594037927937 (actor [6:56:2097]) tablet resolver refreshed! new actor is[6:85:2114] Leader for TabletID 72057594037927937 is [6:85:2114] sender: [6:139:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:54:2057] recipient: [7:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:54:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:57:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:74:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:81:2057] recipient: [7:36:2083] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:84:2057] recipient: [7:83:2113] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:85:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:86:2114] sender: [7:87:2057] recipient: [7:83:2113] !Reboot 72057594037927937 (actor [7:56:2097]) rebooted! !Reboot 72057594037927937 (actor [7:56:2097]) tablet resolver refreshed! new actor is[7:86:2114] Leader for TabletID 72057594037927937 is [7:86:2114] sender: [7:140:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:54:2057] recipient: [8:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:54:2057] recipient: [8:50:2095] Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:57:2057] recipient: [8:50:2095] Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:74:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:83:2057] recipient: [8:36:2083] Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:86:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:87:2057] recipient: [8:85:2115] Leader for TabletID 72057594037927937 is [8:88:2116] sender: [8:89:2057] recipient: [8:85:2115] !Reboot 72057594037927937 (actor [8:56:2097]) rebooted! !Reboot 72057594037927937 (actor [8:56:2097]) tablet resolver refreshed! new actor is[8:88:2116] Leader for TabletID 72057594037927937 is [8:88:2116] sender: [8:142:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:54:2057] recipient: [9:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:54:2057] recipient: [9:52:2095] Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:57:2057] recipient: [9:52:2095] Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:74:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:56:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:83:2057] recipient: [9:36:2083] Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:86:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:87:2057] recipient: [9:85:2115] Leader for TabletID 72057594037927937 is [9:88:2116] sender: [9:89:2057] recipient: [9:85:2115] !Reboot 72057594037927937 (actor [9:56:2097]) rebooted! !Reboot 72057594037927937 (actor [9:56:2097]) tablet resolver refreshed! new actor is[9:88:2116] Leader for TabletID 72057594037927937 is [9:88:2116] sender: [9:142:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:54:2057] recipient: [10:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:54:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:57:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:74:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:84:2057] recipient: [10:36:2083] Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:87:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:88:2057] recipient: [10:86:2115] Leader for TabletID 72057594037927937 is [10:89:2116] sender: [10:90:2057] recipient: [10:86:2115] !Reboot 72057594037927937 (actor [10:56:2097]) rebooted! !Reboot 72057594037927937 (actor [10:56:2097]) tablet resolver refreshed! new actor is[10:89:2116] Leader for TabletID 72057594037927937 is [10:89:2116] sender: [10:143:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:54:2057] recipient: [11:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:54:2057] recipient: [11:51:2095] Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:57:2057] recipient: [11:51:2095] Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:74:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:56:2097]) on event NKikimr::TEvKeyValue::TEvCollect ! Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:85:2057] recipient: [11:36:2083] Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:87:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:89:2057] recipient: [11:88:2116] Leader for TabletID 72057594037927937 is [11:90:2117] sender: [11:91:2057] recipient: [11:88:2116] !Reboot 72057594037927937 (actor [11:56:2097]) rebooted! !Reboot 72057594037927937 (actor [11:56:2097]) tablet resolver refreshed! new actor is[11:90:2117] Leader for TabletID 72057594037927937 is [11:90:2117] sender: [11:110:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:54:2057] recipient: [12:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:54:2057] recipient: [12:52:2095] Leader for TabletID 72057594037927937 is [12:56:2097] sender: [12:57:2057] recipient: [12:52:2095] Leader for TabletID 72057594037927937 is [12:56:2097] sender: [12:74:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (actor [12:56:2097]) on event NKikimr::TEvKeyValue::TEvCompleteGC ! Leader for TabletID 72057594037927937 is [12:56:2097] sender: [12:86:2057] recipient: [12:36:2083] Leader for TabletID 72057594037927937 is [12:56:2097] sender: [12:89:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [12:56:2097] sender: [12:90:2057] recipient: [12:88:2117] Leader for TabletID 72057594037927937 is [12:91:2118] sender: [12:92:2057] recipient: [12:88:2117] !Reboot 72057594037927937 (actor [12:56:2097]) rebooted! !Reboot 72057594037927937 (actor [12:56:2097]) tablet resolver refreshed! new actor is[12:91:2118] Leader for TabletID 72057594037927937 is [12:91:2118] sender: [12:111:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:54:2057] recipient: [13:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:54:2057] recipient: [13:51:2095] Leader for TabletID 72057594037927937 is [13:56:2097] sender: [13:57:2057] recipient: [13:51:2095] Leader for TabletID 72057594037927937 is [13:56:2097] sender: [13:74:2057] recipient: [13:14:2061] !Reboot 72057594037927937 (actor [13:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [13:56:2097] sender: [13:89:2057] recipient: [13:36:2083] Leader for TabletID 72057594037927937 is [13:56:2097] sender: [13:91:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [13:56:2097] sender: [13:93:2057] recipient: [13:92:2120] Leader for TabletID 72057594037927937 is [13:94:2121] sender: [13:95:2057] recipient: [13:92:2120] !Reboot 72057594037927937 (actor [13:56:2097]) rebooted! !Reboot 72057594037927937 (actor [13:56:2097]) tablet resolver refreshed! new actor is[13:94:2121] Leader for TabletID 72057594037927937 is [13:94:2121] sender: [13:148:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:54:2057] recipient: [14:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:54:2057] recipient: [14:51:2095] Leader for TabletID 72057594037927937 is [14:56:2097] sender: [14:57:2057] recipient: [14:51:2095] Leader for TabletID 72057594037927937 is [14:56:2097] sender: [14:74:2057] recipient: [14:14:2061] !Reboot 72057594037927937 (actor [14:56:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [14:56:2097] sender: [14:89:2057] recipient: [14:36:2083] Leader for TabletID 72057594037927937 is [14:56:2097] sender: [14:92:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [14:56:2097] sender: [14:93:2057] recipient: [14:91:2120] Leader for TabletID 72057594037927937 is [14:94:2121] sender: [14:95:2057] recipient: [14:91:2120] !Reboot 72057594037927937 (actor [14:56:2097]) rebooted! !Reboot 72057594037927937 (actor [14:56:2097]) tablet resolver refreshed! new actor is[14:94:2121] Leader for TabletID 72057594037927937 is [14:94:2121] sender: [14:148:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:54:2057] recipient: [15:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:54:2057] recipient: [15:51:2095] Leader for TabletID 72057594037927937 is [15:56:2097] sender: [15:57:2057] recipient: [15:51:2095] Leader for TabletID 72057594037927937 is [15:56:2097] sender: [15:74:2057] recipient: [15:14:2061] !Reboot 72057594037927937 (actor [15:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [15:56:2097] sender: [15:90:2057] recipient: [15:36:2083] Leader for TabletID 72057594037927937 is [15:56:2097] sender: [15:93:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [15:56:2097] sender: [15:94:2057] recipient: [15:92:2120] Leader for TabletID 72057594037927937 is [15:95:2121] sender: [15:96:2057] recipient: [15:92:2120] !Reboot 72057594037927937 (actor [15:56:2097]) rebooted! !Reboot 72057594037927937 (actor [15:56:2097]) tablet resolver refreshed! new actor is[15:95:2121] Leader for TabletID 72057594037927937 is [15:95:2121] sender: [15:149:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:54:2057] recipient: [16:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:54:2057] recipient: [16:51:2095] Leader for TabletID 72057594037927937 is [16:56:2097] sender: [16:57:2057] recipient: [16:51:2095] Leader for TabletID 72057594037927937 is [16:56:2097] sender: [16:74:2057] recipient: [16:14:2061] |90.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_monitoring/unittest |90.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/pdisk/ut/unittest >> TYardTest::TestLogOwerwrite [GOOD] >> TKeyValueTest::TestWrite200KDeleteThenResponseErrorNewApi [GOOD] >> TKeyValueTest::TestWriteDeleteThenReadRemaining >> KqpPg::InsertNoTargetColumns_Serial+useSink [GOOD] >> BasicUsage::WriteSessionCloseWaitsForWrites >> DataShardSnapshots::DelayedWriteReadableAfterSplit [GOOD] >> DataShardSnapshots::DelayedWriteReplyAfterSplit >> DataShardSnapshots::VolatileSnapshotRenameTimeout [GOOD] >> DataShardSnapshots::UncommittedWriteRestartDuringCommit >> PersQueueSdkReadSessionTest::StopResumeReadingData [GOOD] >> ReadSessionImplTest::CreatePartitionStream [GOOD] >> ReadSessionImplTest::BrokenCompressedData [GOOD] >> ReadSessionImplTest::CommitOffsetTwiceIsError [GOOD] >> ReadSessionImplTest::DataReceivedCallback >> TPersQueueCommonTest::TestLimiterLimitsWithBlobsRateLimit |90.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_monitoring/unittest >> TKeyValueTest::TestRenameWorks [GOOD] >> TKeyValueTest::TestRenameWorksNewApi ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/pg/unittest >> KqpPg::InsertNoTargetColumns_Serial+useSink [GOOD] Test command err: Trying to start YDB, gRPC: 1481, MsgBus: 21232 2025-05-07T08:50:52.906800Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623604785832551:2141];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:52.907559Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00215b/r3tmp/tmpilxCkp/pdisk_1.dat 2025-05-07T08:50:53.471693Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:53.488736Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:53.488848Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:53.494556Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1481, node 1 2025-05-07T08:50:53.694483Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:53.694511Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:53.694534Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:53.694645Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21232 TClient is connected to server localhost:21232 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:50:54.633197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:54.668581Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 16 2025-05-07T08:50:57.244833Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T08:50:57.445243Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 2025-05-07T08:50:57.528150Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-05-07T08:50:57.554003Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623626260669781:2349], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:57.554072Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:57.554318Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623626260669793:2352], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:57.559534Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480 2025-05-07T08:50:57.574981Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623626260669795:2353], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-05-07T08:50:57.678062Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623626260669846:2446] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:50:57.905957Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623604785832551:2141];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:57.906037Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; f f t t 18 2025-05-07T08:50:58.140017Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:50:58.206455Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-05-07T08:50:58.217018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:50:58.312101Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 21 2025-05-07T08:50:58.672726Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480 2025-05-07T08:50:58.747079Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-05-07T08:50:58.753777Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480 2025-05-07T08:50:58.897112Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 23 2025-05-07T08:50:59.522811Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480 2025-05-07T08:50:59.616573Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-05-07T08:50:59.618180Z node 1 :TX_DATASHARD ERROR: finish_propose_unit.cpp:245: Prepare transaction failed. txid 281474976710679 at tablet 72075186224037894 errors: WRONG_SHARD_STATE (Interrupted operation [0:281474976710679] at 72075186224037894 while waiting for stream clearance) | 2025-05-07T08:50:59.619015Z node 1 :TX_DATASHARD ERROR: finish_propose_unit.cpp:174: Errors while proposing transaction txid 281474976710679 at tablet 72075186224037894 status: ERROR errors: WRONG_SHARD_STATE (Interrupted operation [0:281474976710679] at 72075186224037894 while waiting for stream clearance) | 2025-05-07T08:50:59.624586Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710680:0, at schemeshard: 72057594046644480 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 20 2025-05-07T08:51:00.114627Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710683:0, at schemeshard: 72057594046644480 2025-05-07T08:51:00.235502Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710684:0, at schemeshard: 72057594046644480 2025-05-07T08:51:00.330086Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 700 2025-05-07T08:51:00.756073Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710688:0, at schemeshard: 72057594046644480 2025-05-07T08:51:00.838660Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-05-07T08:51:00.839665Z node 1 :TX_DATASHARD ERROR: finish_propose_unit.cpp:245: Prepare transaction failed. txid 281474976710690 at tablet 72075186224037898 errors: WRONG_SHARD_STATE (Interrupted operation [0:281474976710690] at 72075186224037898 while waiting for scan finish) | 2025-05-07T08:51:00.840770Z node 1 :TX_DATASHARD ERROR: finish_propose_unit.cpp:174: Errors while proposing transaction txid 281474976710690 at tablet 72075186224037898 status: ERROR errors: WRONG_SHARD_STATE (Interrupted operation [0:281474976710690] at 72075186224037898 while waiting for scan finish) | 2025-05-07T08:51:00.843863Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710691:0, at schemeshard: 72057594046644480 2025-05-07T08:51:00.921839Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 0.5 0.5 1.5 1.5 2.5 2.5 3.5 3.5 4.5 4.5 5.5 5.5 6.5 6.5 7.5 7.5 8.5 8.5 9.5 9.5 701 2025-05-07T08:51:01.324530Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710695:0, at schemeshard: 72057594046644480 2025-05-07T08:51:01.389608Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-05-07T08:51:01.393199Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710697:0, at schemeshard: ... /home/runner/.ya/build/build_root/zvgn/00215b/r3tmp/tmpoLya4o/pdisk_1.dat 2025-05-07T08:53:27.696597Z node 11 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[11:7501624272425329733:2213];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:27.697001Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:53:27.821878Z node 11 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:53:27.847346Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:53:27.847502Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:53:27.856321Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7200, node 11 2025-05-07T08:53:28.142841Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:53:28.142874Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:53:28.142887Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:53:28.143079Z node 11 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:32445 TClient is connected to server localhost:32445 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:53:29.356580Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:53:32.382847Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[11:7501624272425329733:2213];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:32.382961Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:53:34.781149Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7501624302490101312:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:34.781285Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:34.781403Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7501624302490101325:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:34.788232Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480 2025-05-07T08:53:34.866032Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [11:7501624302490101327:2340], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-05-07T08:53:34.986666Z node 11 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [11:7501624302490101379:2346] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:53:35.031313Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 2025-05-07T08:53:35.882577Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 3816, MsgBus: 8442 2025-05-07T08:53:39.372149Z node 12 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[12:7501624322885780826:2128];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:39.372249Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00215b/r3tmp/tmpC1i88f/pdisk_1.dat 2025-05-07T08:53:39.794752Z node 12 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:53:39.858797Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:53:39.858960Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:53:39.867307Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3816, node 12 2025-05-07T08:53:40.085148Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:53:40.085182Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:53:40.085196Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:53:40.085404Z node 12 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8442 TClient is connected to server localhost:8442 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:53:41.969330Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:53:41.980743Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T08:53:44.374520Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[12:7501624322885780826:2128];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:44.374638Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:53:48.378962Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7501624361540487078:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:48.379089Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7501624361540487086:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:48.379203Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:48.387913Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T08:53:48.404384Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [12:7501624361540487115:2342], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T08:53:48.471494Z node 12 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [12:7501624361540487170:2350] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:53:48.656030Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 >> TPersQueueNewSchemeCacheTest::TestWriteStat1stClass >> TKeyValueTest::TestWriteReadRangeDataLimitThenLimitWorks [GOOD] >> TKeyValueTest::TestWriteReadRangeDataLimitThenLimitWorksNewApi >> ReadSessionImplTest::DataReceivedCallback [GOOD] >> ReadSessionImplTest::CommonHandler >> TPersQueueCommonTest::Auth_MultipleUpdateTokenRequestIterationsWithValidToken_GotUpdateTokenResponseForEachRequest >> ReadSessionImplTest::CommonHandler [GOOD] |90.1%| [TA] $(B)/ydb/core/blobstorage/pdisk/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> BasicUsage::GetAllStartPartitionSessions [GOOD] >> BasicUsage::PreferredDatabaseNoFallback |90.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/join/ydb-core-kqp-ut-join |90.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/join/ydb-core-kqp-ut-join |90.1%| [TA] {RESULT} $(B)/ydb/core/blobstorage/pdisk/ut/test-results/unittest/{meta.json ... results_accumulator.log} |90.1%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/join/ydb-core-kqp-ut-join >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-58 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-59 >> KqpRm::NotEnoughExecutionUnits >> KqpRm::DisonnectNodes ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> ReadSessionImplTest::CommonHandler [GOOD] Test command err: 2025-05-07T08:53:14.681245Z :SpecifyClustersExplicitly INFO: Random seed for debugging is 1746607994681208 2025-05-07T08:53:15.129822Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624217665600415:2275];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:15.130884Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:53:15.217027Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501624218784851982:2209];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:15.222122Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:53:15.396221Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-05-07T08:53:15.400721Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003482/r3tmp/tmpzuw6m5/pdisk_1.dat 2025-05-07T08:53:15.948030Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:53:15.962705Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:53:15.962832Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:53:15.970502Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:53:15.970592Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:53:15.977163Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:53:15.980283Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-05-07T08:53:15.982502Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2018, node 1 2025-05-07T08:53:16.330820Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/zvgn/003482/r3tmp/yandexEEOowa.tmp 2025-05-07T08:53:16.330865Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/zvgn/003482/r3tmp/yandexEEOowa.tmp 2025-05-07T08:53:16.331084Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/zvgn/003482/r3tmp/yandexEEOowa.tmp 2025-05-07T08:53:16.331226Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:53:16.423068Z INFO: TTestServer started on Port 19136 GrpcPort 2018 TClient is connected to server localhost:19136 PQClient connected to localhost:2018 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:53:17.184663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... waiting... 2025-05-07T08:53:17.386349Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 2025-05-07T08:53:20.118474Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501624217665600415:2275];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:20.118566Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:53:20.214176Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7501624218784851982:2209];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:20.214261Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:53:21.534851Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501624244554655936:2313], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:21.535017Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:21.540603Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501624244554655948:2316], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:21.548556Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715657:3, at schemeshard: 72057594046644480 2025-05-07T08:53:21.602453Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7501624244554655950:2317], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715657 completed, doublechecking } 2025-05-07T08:53:21.751250Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501624244554655978:2134] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:53:22.379040Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7501624244554655991:2321], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T08:53:22.380827Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=2&id=ZGUzODk4YjctODdjYjAzNy03OTMxMGEyNC01NWMwMTRkYQ==, ActorId: [2:7501624244554655934:2312], ActorState: ExecuteState, TraceId: 01jtmz64ez6egwh1my0g87jzq6, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T08:53:22.383184Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T08:53:22.383770Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7501624243435405116:2347], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T08:53:22.385275Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=1&id=NTY1YzEzN2QtNjUwMjJjMDAtZmM0ZTU3NzMtODZjMDg2ODQ=, ActorId: [1:7501624243435405081:2339], ActorState: ExecuteState, TraceId: 01jtmz64p17zv99f32436gxrqn, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T08:53:22.386620Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T08:53:22.389287Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 2025-05-07T08:53:22.641396Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:53:22.939449Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost:2018", true, true, 1000); 2025-05-07T08:53:23.553960Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710664. Ctx: { TraceId: 01jtmz662w0h1k1vb36ea0r0zc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTM0NmRjMzktYjkxNTllOS0yZjY5NDE4Yy05Y2UxODczZg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Dat ... : 1 PartitionId: 0 Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "dc1". Topic: "test-topic" Partition: 0 PartitionKey: "" Information: { Offset: 2 SeqNo: 3 MessageGroupId: "test-message-group-id" CreateTime: 2025-05-07T08:53:48.162000Z WriteTime: 2025-05-07T08:53:48.166000Z Ip: "ipv6:[::1]:38832" UncompressedSize: 8 Meta: { "logtype": "unknown", "ident": "unknown", "server": "ipv6:[::1]:38832" } } } } 2025-05-07T08:53:49.326578Z :INFO: [/Root] [/Root] [150aaad-afee2fad-6e7093e-d2081c9f] Closing read session. Close timeout: 3.000000s 2025-05-07T08:53:49.326626Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): dc1:test-topic:0:1:2:2 2025-05-07T08:53:49.326668Z :INFO: [/Root] [/Root] [150aaad-afee2fad-6e7093e-d2081c9f] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1466 BytesRead: 24 MessagesRead: 3 BytesReadCompressed: 84 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-05-07T08:53:49.327221Z :INFO: [/Root] [/Root] [150aaad-afee2fad-6e7093e-d2081c9f] Closing read session. Close timeout: 0.000000s 2025-05-07T08:53:49.327271Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): dc1:test-topic:0:1:2:2 2025-05-07T08:53:49.327316Z :INFO: [/Root] [/Root] [150aaad-afee2fad-6e7093e-d2081c9f] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1467 BytesRead: 24 MessagesRead: 3 BytesReadCompressed: 84 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-05-07T08:53:49.327429Z :NOTICE: [/Root] [/Root] [150aaad-afee2fad-6e7093e-d2081c9f] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-05-07T08:53:49.328679Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/user session shared/user_3_1_17280672327025595140_v1 grpc read done: success# 0, data# { } 2025-05-07T08:53:49.328711Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer shared/user session shared/user_3_1_17280672327025595140_v1 grpc read failed 2025-05-07T08:53:49.328745Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:1645: session cookie 1 consumer shared/user session shared/user_3_1_17280672327025595140_v1 closed 2025-05-07T08:53:49.329128Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer shared/user session shared/user_3_1_17280672327025595140_v1 is DEAD 2025-05-07T08:53:49.330875Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [3:7501624355113326044:2512] disconnected; active server actors: 1 2025-05-07T08:53:49.330901Z node 3 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--test-topic] pipe [3:7501624355113326044:2512] client user disconnected session shared/user_3_1_17280672327025595140_v1 2025-05-07T08:53:49.330282Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2433: [PQ: 72075186224037892] Destroy direct read session shared/user_3_1_17280672327025595140_v1 2025-05-07T08:53:49.330334Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037892] server disconnected, pipe [3:7501624355113326047:2515] destroyed 2025-05-07T08:53:49.330384Z node 4 :PQ_READ_PROXY DEBUG: caching_service.cpp:138: Direct read cache: server session deregistered: shared/user_3_1_17280672327025595140_v1 2025-05-07T08:53:50.111482Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1073: TxId: 281474976720693, task: 1, CA Id [3:7501624367998228119:2545]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 0 2025-05-07T08:53:51.018025Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:51.018080Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:51.018115Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:53:51.018509Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:53:51.019081Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-05-07T08:53:51.019295Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:51.019823Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: 13. Commit offset: 31 2025-05-07T08:53:51.021434Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:51.021477Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:51.021509Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:53:51.024321Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:53:51.025616Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-05-07T08:53:51.025781Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:51.027221Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-05-07T08:53:51.028437Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-05-07T08:53:51.029779Z :INFO: Error decompressing data: (TZLibDecompressorError) util/stream/zlib.cpp:143: inflate error(incorrect header check) 2025-05-07T08:53:51.029884Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-3) 2025-05-07T08:53:51.030117Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-05-07T08:53:51.030175Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-05-07T08:53:51.030222Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-05-07T08:53:51.030276Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 3, size 16 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { DataDecompressionError: "(TZLibDecompressorError) util/stream/zlib.cpp:143: inflate error(incorrect header check)" Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } } 2025-05-07T08:53:51.032374Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:51.032415Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:51.032447Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:53:51.033084Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:53:51.037335Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-05-07T08:53:51.037519Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:51.038244Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-05-07T08:53:51.039112Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:51.039338Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-05-07T08:53:51.039462Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-05-07T08:53:51.039526Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2025-05-07T08:53:51.039610Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [0, 2). Partition stream id: 1 2025-05-07T08:53:51.042215Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:51.042258Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:51.042300Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:53:51.051503Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:53:51.056170Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-05-07T08:53:51.056377Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:51.056702Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-05-07T08:53:51.057596Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-05-07T08:53:51.058077Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-05-07T08:53:51.058288Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (2-2) 2025-05-07T08:53:51.058381Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-05-07T08:53:51.058433Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-05-07T08:53:51.058477Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (2-2) 2025-05-07T08:53:51.058622Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2025-05-07T08:53:51.058660Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2025-05-07T08:53:53.069715Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:53.069762Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:53.069799Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:53:53.122297Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-05-07T08:53:53.131652Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-05-07T08:53:53.131884Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:53.134788Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:53.135029Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-05-07T08:53:53.135117Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-05-07T08:53:53.135201Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes |90.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_upload_rows/ydb-core-tx-datashard-ut_upload_rows |90.1%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_upload_rows/ydb-core-tx-datashard-ut_upload_rows |90.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_upload_rows/ydb-core-tx-datashard-ut_upload_rows >> YdbIndexTable::MultiShardTableOneIndexDataColumn [GOOD] >> YdbIndexTable::MultiShardTableOneIndexIndexOverlap >> KqpRm::NotEnoughExecutionUnits [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/rm_service/ut/unittest >> KqpRm::NotEnoughExecutionUnits [GOOD] Test command err: 2025-05-07T08:53:55.379737Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-05-07T08:53:55.380302Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2843} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/zvgn/003a2e/r3tmp/tmprSq7gx/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-05-07T08:53:55.385668Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:290} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/zvgn/003a2e/r3tmp/tmprSq7gx/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/zvgn/003a2e/r3tmp/tmprSq7gx/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 9193246483030718523 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-05-07T08:53:55.445222Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-05-07T08:53:55.445640Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-05-07T08:53:55.461672Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:599: Start KqpResourceManagerActor at [2:463:2100] with ResourceBroker at [2:434:2099] 2025-05-07T08:53:55.461839Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [2:464:2101] 2025-05-07T08:53:55.465555Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:599: Start KqpResourceManagerActor at [1:462:2340] with ResourceBroker at [1:433:2321] 2025-05-07T08:53:55.465682Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [1:465:2341] 2025-05-07T08:53:55.465946Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-05-07T08:53:55.466026Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-05-07T08:53:55.466069Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-05-07T08:53:55.466094Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-05-07T08:53:55.466309Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-05-07T08:53:55.481151Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: data_center update, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1746608035 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-05-07T08:53:55.481382Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-05-07T08:53:55.481476Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: data_center update, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1746608035 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-05-07T08:53:55.481811Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:465: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2025-05-07T08:53:55.481903Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:753: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2025-05-07T08:53:55.481936Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-05-07T08:53:55.482111Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: tenant updated, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1746608035 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-05-07T08:53:55.482325Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:465: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2025-05-07T08:53:55.482629Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:753: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2025-05-07T08:53:55.482657Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-05-07T08:53:55.482749Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: tenant updated, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1746608035 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-05-07T08:53:55.483293Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:479: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 0 2025-05-07T08:53:55.483385Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-05-07T08:53:55.483795Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-05-07T08:53:55.484248Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:479: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-05-07T08:53:55.484559Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 2 2025-05-07T08:53:55.484648Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-05-07T08:53:55.484836Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 2 2025-05-07T08:53:55.485032Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 2 2025-05-07T08:53:55.485162Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 >> TKeyValueTest::TestWriteReadWithRestartsThenResponseOkNewApi [GOOD] >> TKeyValueTest::TestWriteToExtraChannelThenReadMixedChannelsReturnsOk >> TGRpcCmsTest::SimpleTenantsTest >> KqpRm::DisonnectNodes [GOOD] >> CompressExecutor::TestReorderedExecutor [GOOD] >> CompressExecutor::TestExecutorMemUsage >> TKeyValueTest::TestInlineWriteReadRangeLimitThenLimitWorksNewApi [GOOD] >> TKeyValueTest::TestWriteDeleteThenReadRemaining [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/rm_service/ut/unittest >> KqpRm::DisonnectNodes [GOOD] Test command err: 2025-05-07T08:53:55.423976Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-05-07T08:53:55.424495Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2843} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/zvgn/003a2b/r3tmp/tmpBXmyyY/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-05-07T08:53:55.425046Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:290} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/zvgn/003a2b/r3tmp/tmpBXmyyY/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/zvgn/003a2b/r3tmp/tmpBXmyyY/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 8052037838888386694 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-05-07T08:53:55.468791Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-05-07T08:53:55.469082Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-05-07T08:53:55.485416Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:599: Start KqpResourceManagerActor at [2:463:2100] with ResourceBroker at [2:434:2099] 2025-05-07T08:53:55.485585Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [2:464:2101] 2025-05-07T08:53:55.485746Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:599: Start KqpResourceManagerActor at [1:462:2340] with ResourceBroker at [1:433:2321] 2025-05-07T08:53:55.485819Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [1:465:2341] 2025-05-07T08:53:55.486085Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-05-07T08:53:55.486142Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-05-07T08:53:55.486200Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-05-07T08:53:55.486223Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-05-07T08:53:55.486403Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-05-07T08:53:55.503297Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: data_center update, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1746608035 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-05-07T08:53:55.503558Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-05-07T08:53:55.503645Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: data_center update, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1746608035 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-05-07T08:53:55.503974Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:465: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2025-05-07T08:53:55.504098Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:753: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2025-05-07T08:53:55.504133Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-05-07T08:53:55.504233Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: tenant updated, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1746608035 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-05-07T08:53:55.504408Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:465: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2025-05-07T08:53:55.504713Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:753: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2025-05-07T08:53:55.504742Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-05-07T08:53:55.504836Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: tenant updated, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1746608035 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-05-07T08:53:55.505413Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:479: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 0 2025-05-07T08:53:55.505521Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-05-07T08:53:55.505933Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-05-07T08:53:55.508433Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:479: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-05-07T08:53:55.508729Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 2 2025-05-07T08:53:55.508820Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-05-07T08:53:55.509042Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 2 2025-05-07T08:53:55.509242Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 2 2025-05-07T08:53:55.509369Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-05-07T08:53:56.599336Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:423: Schedule Snapshot request 2025-05-07T08:53:56.599467Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:423: Schedule Snapshot request 2025-05-07T08:53:56.599843Z node 1 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594046447617] NodeDisconnected NodeId# 2 2025-05-07T08:53:56.599923Z node 1 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594037932033] NodeDisconnected NodeId# 2 2025-05-07T08:53:56.600761Z node 1 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594046578946] NodeDisconnected NodeId# 2 2025-05-07T08:53:56.601051Z node 2 :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [2:56:2073] ServerId# [1:355:2273] TabletId# 72057594037932033 PipeClientId# [2:56:2073] 2025-05-07T08:53:56.601366Z node 2 :TX_PROXY WARN: proxy_impl.cpp:227: actor# [2:145:2087] HANDLE TEvClientDestroyed from tablet# 72057594046447617 2025-05-07T08:53:56.601545Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 2 2025-05-07T08:53:56.601739Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:495: Subcriber is not available for info exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2025-05-07T08:53:56.601798Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:167: Kill previous info exchanger subscriber for 'kqpexch+/dc-1' at [2:467:2103], reason: tenant updated 2025-05-07T08:53:56.602161Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-05-07T08:53:56.604670Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:479: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-05-07T08:53:56.604862Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-05-07T08:53:56.950350Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:423: Schedule Snapshot request >> TKeyValueTest::TestWriteReadWithRestartsThenResponseOk [GOOD] >> TKeyValueTest::TestWriteReadWhileWriteWorks >> ColumnStatistics::CountMinSketchStatistics ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestInlineWriteReadRangeLimitThenLimitWorksNewApi [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:54:2057] recipient: [1:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:54:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:56:2097] sender: [1:57:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:56:2097] sender: [1:74:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:54:2057] recipient: [2:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:54:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:57:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:74:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:76:2057] recipient: [2:36:2083] Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:79:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:80:2057] recipient: [2:78:2110] Leader for TabletID 72057594037927937 is [2:81:2111] sender: [2:82:2057] recipient: [2:78:2110] !Reboot 72057594037927937 (actor [2:56:2097]) rebooted! !Reboot 72057594037927937 (actor [2:56:2097]) tablet resolver refreshed! new actor is[2:81:2111] Leader for TabletID 72057594037927937 is [2:81:2111] sender: [2:135:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:54:2057] recipient: [3:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:54:2057] recipient: [3:52:2095] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:57:2057] recipient: [3:52:2095] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:74:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:56:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:76:2057] recipient: [3:36:2083] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:78:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:80:2057] recipient: [3:79:2110] Leader for TabletID 72057594037927937 is [3:81:2111] sender: [3:82:2057] recipient: [3:79:2110] !Reboot 72057594037927937 (actor [3:56:2097]) rebooted! !Reboot 72057594037927937 (actor [3:56:2097]) tablet resolver refreshed! new actor is[3:81:2111] Leader for TabletID 72057594037927937 is [3:81:2111] sender: [3:135:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:54:2057] recipient: [4:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:54:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:57:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:74:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:77:2057] recipient: [4:36:2083] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:79:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:81:2057] recipient: [4:80:2110] Leader for TabletID 72057594037927937 is [4:82:2111] sender: [4:83:2057] recipient: [4:80:2110] !Reboot 72057594037927937 (actor [4:56:2097]) rebooted! !Reboot 72057594037927937 (actor [4:56:2097]) tablet resolver refreshed! new actor is[4:82:2111] Leader for TabletID 72057594037927937 is [4:82:2111] sender: [4:136:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:54:2057] recipient: [5:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:54:2057] recipient: [5:50:2095] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:57:2057] recipient: [5:50:2095] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:74:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:80:2057] recipient: [5:36:2083] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:83:2057] recipient: [5:82:2113] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:84:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:85:2114] sender: [5:86:2057] recipient: [5:82:2113] !Reboot 72057594037927937 (actor [5:56:2097]) rebooted! !Reboot 72057594037927937 (actor [5:56:2097]) tablet resolver refreshed! new actor is[5:85:2114] Leader for TabletID 72057594037927937 is [5:85:2114] sender: [5:139:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:54:2057] recipient: [6:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:54:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:57:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:74:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:56:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:80:2057] recipient: [6:36:2083] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:83:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:84:2057] recipient: [6:82:2113] Leader for TabletID 72057594037927937 is [6:85:2114] sender: [6:86:2057] recipient: [6:82:2113] !Reboot 72057594037927937 (actor [6:56:2097]) rebooted! !Reboot 72057594037927937 (actor [6:56:2097]) tablet resolver refreshed! new actor is[6:85:2114] Leader for TabletID 72057594037927937 is [6:85:2114] sender: [6:139:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:54:2057] recipient: [7:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:54:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:57:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:74:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:81:2057] recipient: [7:36:2083] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:84:2057] recipient: [7:83:2113] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:85:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:86:2114] sender: [7:87:2057] recipient: [7:83:2113] !Reboot 72057594037927937 (actor [7:56:2097]) rebooted! !Reboot 72057594037927937 (actor [7:56:2097]) tablet resolver refreshed! new actor is[7:86:2114] Leader for TabletID 72057594037927937 is [7:86:2114] sender: [7:140:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:54:2057] recipient: [8:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:54:2057] recipient: [8:50:2095] Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:57:2057] recipient: [8:50:2095] Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:74:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:83:2057] recipient: [8:36:2083] Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:86:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:87:2057] recipient: [8:85:2115] Leader for TabletID 72057594037927937 is [8:88:2116] sender: [8:89:2057] recipient: [8:85:2115] !Reboot 72057594037927937 (actor [8:56:2097]) rebooted! !Reboot 72057594037927937 (actor [8:56:2097]) tablet resolver refreshed! new actor is[8:88:2116] Leader for TabletID 72057594037927937 is [8:88:2116] sender: [8:142:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:54:2057] recipient: [9:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:54:2057] recipient: [9:52:2095] Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:57:2057] recipient: [9:52:2095] Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:74:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:56:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:83:2057] recipient: [9:36:2083] Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:86:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:87:2057] recipient: [9:85:2115] Leader for TabletID 72057594037927937 is [9:88:2116] sender: [9:89:2057] recipient: [9:85:2115] !Reboot 72057594037927937 (actor [9:56:2097]) rebooted! !Reboot 72057594037927937 (actor [9:56:2097]) tablet resolver refreshed! new actor is[9:88:2116] Leader for TabletID 72057594037927937 is [9:88:2116] sender: [9:142:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:54:2057] recipient: [10:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:54:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:57:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:74:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:84:2057] recipient: [10:36:2083] Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:87:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:88:2057] recipient: [10:86:2115] Leader for TabletID 72057594037927937 is [10:89:2116] sender: [10:90:2057] recipient: [10:86:2115] !Reboot 72057594037927937 (actor [10:56:2097]) rebooted! !Reboot 72057594037927937 (actor [10:56:2097]) tablet resolver refreshed! new actor is[10:89:2116] Leader for TabletID 72057594037927937 is [10:89:2116] sender: [10:143:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:54:2057] recipient: [11:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:54:2057] recipient: [11:51:2095] Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:57:2057] recipient: [11:51:2095] Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:74:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:54:2057] recipient: [12:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:54:2057] recipient: [12:52:2095] Leader for TabletID 72057594037927937 is [12:56:2097] sender: [12:57:2057] recipient: [12:52:2095] Leader for TabletID 72057594037927937 is [12:56:2097] sender: [12:74:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:54:2057] recipient: [13:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:54:2057] recipient: [13:51:2095] Leader for TabletID 72057594037927937 is [13:56:2097] sender: [13:57:2057] recipient: [13:51:2095] Leader for TabletID 72057594037927937 is [13:56:2097] sender: [13:74:2057] recipient: [13:14:2061] !Reboot 72057594037927937 (actor [13:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [13:56:2097] sender: [13:76:2057] recipient: [13:36:2083] Leader for TabletID 72057594037927937 is [13:56:2097] sender: [13:79:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [13:56:2097] sender: [13:80:2057] recipient: [13:78:2110] Leader for TabletID 72057594037927937 is [13:81:2111] sender: [13:82:2057] recipient: [13:78:2110] !Reboot 72057594037927937 (actor [13:56:2097]) rebooted! !Reboot 72057594037927937 (actor [13:56:2097]) tablet resolver refreshed! new actor is[13:81:2111] Leader for TabletID 72057594037927937 is [13:81:2111] sender: [13:135:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:54:2057] recipient: [14:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:54:2057] recipient: [14:51:2095] Leader for TabletID 72057594037927937 is [14:56:2097] sender: [14:57:2057] recipient: [14:51:2095] Leader for TabletID 72057594037927937 is [14:56:2097] sender: [14:74:2057] recipient: [14:14:2061] !Reboot 72057594037927937 (actor [14:56:2097]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [14:56:2097] sender: [14:76:2057] recipient: [14:36:2083] Leader for TabletID 72057594037927937 is [14:56:2097] sender: [14:79:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [14:56:2097] sender: [14:80:2057] recipient: [14:78:2110] Leader for TabletID 72057594037927937 is [14:81:2111] sender: [14:82:2057] recipient: [14:78:2110] !Reboot 72057594037927937 (actor [14:56:2097]) rebooted! !Reboot 72057594037927937 (actor [14:56:2097]) tablet resolver refreshed! new actor is[14:81:2111] Leader for TabletID 72057594037927937 is [14:81:2111] sender: [14:135:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:54:2057] recipient: [15:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:54:2057] recipient: [15:51:2095] Leader for TabletID 72057594037927937 is [15:56:2097] sender: [15:57:2057] recipient: [15:51:2095] Leader for TabletID 72057594037927937 is [15:56:2097] sender: [15:74:2057] recipient: [15:14:2061] !Reboot 72057594037927937 (actor [15:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [15:56:2097] sender: [15:77:2057] recipient: [15:36:2083] Leader for TabletID 72057594037927937 is [15:56:2097] sender: [15:79:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [15:56:2097] sender: [15:81:2057] recipient: [15:80:2110] Leader for TabletID 72057594037927937 is [15:82:2111] sender: [15:83:2057] recipient: [15:80:2110] !Reboot 72057594037927937 (actor [15:56:2097]) rebooted! !Reboot 72057594037927937 (actor [15:56:2097]) tablet resolver refreshed! new actor is[15:82:2111] Leader for TabletID 72057594037927937 is [15:82:2111] sender: [15:136:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:54:2057] recipient: [16:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:54:2057] recipient: [16:51:2095] Leader for TabletID 72057594037927937 is [16:56:2097] sender: [16:57:2057] recipient: [16:51:2095] Leader for TabletID 72057594037927937 is [16:56:2097] sender: [16:74:2057] recipient: [16:14:2061] !Reboot 72057594037927937 (actor [16:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [16:56:2097] sender: [16:80:2057] recipient: [16:36:2083] Leader for TabletID 72057594037927937 is [16:56:2097] sender: [16:83:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [16:56:2097] sender: [16:84:2057] recipient: [16:82:2113] Leader for TabletID 72057594037927937 is [16:85:2114] sender: [16:86:2057] recipient: [16:82:2113] !Reboot 72057594037927937 (actor [16:56:2097]) rebooted! !Reboot 72057594037927937 (actor [16:56:2097]) tablet resolver refreshed! new actor is[16:85:2114] Leader for TabletID 72057594037927937 is [16:85:2114] sender: [16:139:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:54:2057] recipient: [17:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:54:2057] recipient: [17:51:2095] Leader for TabletID 72057594037927937 is [17:56:2097] sender: [17:57:2057] recipient: [17:51:2095] Leader for TabletID 72057594037927937 is [17:56:2097] sender: [17:74:2057] recipient: [17:14:2061] !Reboot 72057594037927937 (actor [17:56:2097]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [17:56:2097] sender: [17:80:2057] recipient: [17:36:2083] Leader for TabletID 72057594037927937 is [17:56:2097] sender: [17:82:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [17:56:2097] sender: [17:84:2057] recipient: [17:83:2113] Leader for TabletID 72057594037927937 is [17:85:2114] sender: [17:86:2057] recipient: [17:83:2113] !Reboot 72057594037927937 (actor [17:56:2097]) rebooted! !Reboot 72057594037927937 (actor [17:56:2097]) tablet resolver refreshed! new actor is[17:85:2114] Leader for TabletID 72057594037927937 is [17:85:2114] sender: [17:139:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:54:2057] recipient: [18:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:54:2057] recipient: [18:51:2095] Leader for TabletID 72057594037927937 is [18:56:2097] sender: [18:57:2057] recipient: [18:51:2095] Leader for TabletID 72057594037927937 is [18:56:2097] sender: [18:74:2057] recipient: [18:14:2061] !Reboot 72057594037927937 (actor [18:56:2097]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [18:56:2097] sender: [18:81:2057] recipient: [18:36:2083] Leader for TabletID 72057594037927937 is [18:56:2097] sender: [18:84:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [18:56:2097] sender: [18:85:2057] recipient: [18:83:2113] Leader for TabletID 72057594037927937 is [18:86:2114] sender: [18:87:2057] recipient: [18:83:2113] !Reboot 72057594037927937 (actor [18:56:2097]) rebooted! !Reboot 72057594037927937 (actor [18:56:2097]) tablet resolver refreshed! new actor is[18:86:2114] Leader for TabletID 72057594037927937 is [18:86:2114] sender: [18:104:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:54:2057] recipient: [19:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:54:2057] recipient: [19:50:2095] Leader for TabletID 72057594037927937 is [19:56:2097] sender: [19:57:2057] recipient: [19:50:2095] Leader for TabletID 72057594037927937 is [19:56:2097] sender: [19:74:2057] recipient: [19:14:2061] !Reboot 72057594037927937 (actor [19:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [19:56:2097] sender: [19:83:2057] recipient: [19:36:2083] Leader for TabletID 72057594037927937 is [19:56:2097] sender: [19:86:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [19:56:2097] sender: [19:87:2057] recipient: [19:85:2115] Leader for TabletID 72057594037927937 is [19:88:2116] sender: [19:89:2057] recipient: [19:85:2115] !Reboot 72057594037927937 (actor [19:56:2097]) rebooted! !Reboot 72057594037927937 (actor [19:56:2097]) tablet resolver refreshed! new actor is[19:88:2116] Leader for TabletID 72057594037927937 is [19:88:2116] sender: [19:142:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:54:2057] recipient: [20:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:54:2057] recipient: [20:52:2095] Leader for TabletID 72057594037927937 is [20:56:2097] sender: [20:57:2057] recipient: [20:52:2095] Leader for TabletID 72057594037927937 is [20:56:2097] sender: [20:74:2057] recipient: [20:14:2061] !Reboot 72057594037927937 (actor [20:56:2097]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [20:56:2097] sender: [20:83:2057] recipient: [20:36:2083] Leader for TabletID 72057594037927937 is [20:56:2097] sender: [20:86:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [20:56:2097] sender: [20:87:2057] recipient: [20:85:2115] Leader for TabletID 72057594037927937 is [20:88:2116] sender: [20:89:2057] recipient: [20:85:2115] !Reboot 72057594037927937 (actor [20:56:2097]) rebooted! !Reboot 72057594037927937 (actor [20:56:2097]) tablet resolver refreshed! new actor is[20:88:2116] Leader for TabletID 72057594037927937 is [20:88:2116] sender: [20:142:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:54:2057] recipient: [21:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:54:2057] recipient: [21:50:2095] Leader for TabletID 72057594037927937 is [21:56:2097] sender: [21:57:2057] recipient: [21:50:2095] Leader for TabletID 72057594037927937 is [21:56:2097] sender: [21:74:2057] recipient: [21:14:2061] !Reboot 72057594037927937 (actor [21:56:2097]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [21:56:2097] sender: [21:84:2057] recipient: [21:36:2083] Leader for TabletID 72057594037927937 is [21:56:2097] sender: [21:87:2057] recipient: [21:14:2061] Leader for TabletID 72057594037927937 is [21:56:2097] sender: [21:88:2057] recipient: [21:86:2115] Leader for TabletID 72057594037927937 is [21:89:2116] sender: [21:90:2057] recipient: [21:86:2115] !Reboot 72057594037927937 (actor [21:56:2097]) rebooted! !Reboot 72057594037927937 (actor [21:56:2097]) tablet resolver refreshed! new actor is[21:89:2116] Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:54:2057] recipient: [22:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:54:2057] recipient: [22:51:2095] Leader for TabletID 72057594037927937 is [22:56:2097] sender: [22:57:2057] recipient: [22:51:2095] Leader for TabletID 72057594037927937 is [22:56:2097] sender: [22:74:2057] recipient: [22:14:2061] >> BasicUsage::WriteSessionNoAvailableDatabase [GOOD] >> BasicUsage::WriteSessionSwitchDatabases |90.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/nodewarden/ut/ydb-core-blobstorage-nodewarden-ut |90.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/nodewarden/ut/ydb-core-blobstorage-nodewarden-ut |90.1%| [LD] {RESULT} $(B)/ydb/core/blobstorage/nodewarden/ut/ydb-core-blobstorage-nodewarden-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestWriteDeleteThenReadRemaining [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:54:2057] recipient: [1:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:54:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:56:2097] sender: [1:57:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:56:2097] sender: [1:74:2057] recipient: [1:14:2061] 2025-05-07T08:53:32.144405Z node 1 :KEYVALUE ERROR: keyvalue_state.cpp:3012: KeyValue# 72057594037927937 PrepareExecuteTransactionRequest return flase, Marker# KV73 Submsg# KeyValue# 72057594037927937 Can't delete Range, in DeleteRange, total limit of deletions per request (100000) reached, Marker# KV90 Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:54:2057] recipient: [2:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:54:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:57:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:74:2057] recipient: [2:14:2061] 2025-05-07T08:53:49.291715Z node 2 :KEYVALUE ERROR: keyvalue_state.cpp:3012: KeyValue# 72057594037927937 PrepareExecuteTransactionRequest return flase, Marker# KV73 Submsg# KeyValue# 72057594037927937 Can't delete Range, in DeleteRange, total limit of deletions per request (100000) reached, Marker# KV90 Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:54:2057] recipient: [3:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:54:2057] recipient: [3:52:2095] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:57:2057] recipient: [3:52:2095] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:74:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:54:2057] recipient: [4:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:54:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:57:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:74:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:449:2057] recipient: [4:36:2083] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:452:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:453:2057] recipient: [4:451:2377] Leader for TabletID 72057594037927937 is [4:454:2378] sender: [4:455:2057] recipient: [4:451:2377] !Reboot 72057594037927937 (actor [4:56:2097]) rebooted! !Reboot 72057594037927937 (actor [4:56:2097]) tablet resolver refreshed! new actor is[4:454:2378] Leader for TabletID 72057594037927937 is [4:454:2378] sender: [4:508:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:54:2057] recipient: [5:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:54:2057] recipient: [5:50:2095] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:57:2057] recipient: [5:50:2095] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:74:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:56:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:449:2057] recipient: [5:36:2083] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:452:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:453:2057] recipient: [5:451:2377] Leader for TabletID 72057594037927937 is [5:454:2378] sender: [5:455:2057] recipient: [5:451:2377] !Reboot 72057594037927937 (actor [5:56:2097]) rebooted! !Reboot 72057594037927937 (actor [5:56:2097]) tablet resolver refreshed! new actor is[5:454:2378] Leader for TabletID 72057594037927937 is [5:454:2378] sender: [5:508:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:54:2057] recipient: [6:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:54:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:57:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:74:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:450:2057] recipient: [6:36:2083] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:453:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:454:2057] recipient: [6:452:2377] Leader for TabletID 72057594037927937 is [6:455:2378] sender: [6:456:2057] recipient: [6:452:2377] !Reboot 72057594037927937 (actor [6:56:2097]) rebooted! !Reboot 72057594037927937 (actor [6:56:2097]) tablet resolver refreshed! new actor is[6:455:2378] Leader for TabletID 72057594037927937 is [6:455:2378] sender: [6:509:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:54:2057] recipient: [7:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:54:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:57:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:74:2057] recipient: [7:14:2061] >> PersQueueSdkReadSessionTest::ReadSessionWithCloseNotCommitted [GOOD] >> PersQueueSdkReadSessionTest::ClosesAfterFailedConnectionToCds >> DataStreams::TestStreamStorageRetention >> TPersQueueCommonTest::TestLimiterLimitsWithBlobsRateLimit [GOOD] >> TPersQueueCommonTest::TestLimiterLimitsWithUserPayloadRateLimit >> DataStreams::TestGetRecordsStreamWithSingleShard >> BasicUsage::BrokenCredentialsProvider [GOOD] >> IncrementalBackup::ComplexRestoreBackupCollection+WithIncremental [FAIL] >> IncrementalBackup::ComplexRestoreBackupCollection-WithIncremental >> DataStreams::TestNonChargeableUser >> DataShardSnapshots::UncommittedWriteRestartDuringCommit [GOOD] >> DataShardSnapshots::UncommittedWriteRestartDuringCommitThenBulkErase >> TKeyValueTest::TestInlineEmptyWriteReadDeleteWithRestartsThenResponseOk [GOOD] >> TKeyValueTest::TestInlineEmptyWriteReadDeleteWithRestartsThenResponseOkNewApi >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-59 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-60 >> TPersQueueCommonTest::Auth_MultipleUpdateTokenRequestIterationsWithValidToken_GotUpdateTokenResponseForEachRequest [GOOD] >> TPersQueueCommonTest::Auth_WriteSessionWithValidTokenAndACEAndThenRemoveACEAndSendWriteRequest_SessionClosedWithUnauthorizedErrorAfterSuccessfullWriteResponse >> TPQTest::TestPQRead [GOOD] >> TPQTest::TestPQSmallRead ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> BasicUsage::BrokenCredentialsProvider [GOOD] Test command err: 2025-05-07T08:53:08.258148Z :MaxByteSizeEqualZero INFO: Random seed for debugging is 1746607988258101 2025-05-07T08:53:09.023266Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624192512489563:2212];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:09.023319Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:53:09.152016Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501624193210028933:2220];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:09.518086Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003498/r3tmp/tmpCNamkb/pdisk_1.dat 2025-05-07T08:53:09.543925Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-05-07T08:53:09.571604Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:53:10.107347Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:53:10.134149Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:53:10.163249Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:53:10.163356Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:53:10.175273Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:53:10.175349Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:53:10.203659Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-05-07T08:53:10.203861Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:53:10.205305Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22301, node 1 2025-05-07T08:53:10.629366Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/zvgn/003498/r3tmp/yandexopk167.tmp 2025-05-07T08:53:10.629395Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/zvgn/003498/r3tmp/yandexopk167.tmp 2025-05-07T08:53:10.629541Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/zvgn/003498/r3tmp/yandexopk167.tmp 2025-05-07T08:53:10.629666Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:53:10.762179Z INFO: TTestServer started on Port 32477 GrpcPort 22301 TClient is connected to server localhost:32477 PQClient connected to localhost:22301 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:53:11.538391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... waiting... 2025-05-07T08:53:14.022083Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501624192512489563:2212];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:14.022179Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:53:14.121383Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7501624193210028933:2220];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:14.121510Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:53:15.518151Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501624218979832889:2315], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:15.518786Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501624218979832881:2312], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:15.519065Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:15.529985Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720657:3, at schemeshard: 72057594046644480 2025-05-07T08:53:15.554382Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624218282294232:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:15.554532Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:15.578802Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624218282294244:2345], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:15.622773Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501624218282294269:2658] txid# 281474976710661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-05-07T08:53:15.664697Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624218282294268:2346], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720657 completed, doublechecking } 2025-05-07T08:53:15.665279Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7501624218979832895:2316], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720657 completed, doublechecking } 2025-05-07T08:53:15.768777Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501624218282294342:2698] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:53:15.787901Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501624218979832925:2134] txid# 281474976720658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:53:16.096330Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7501624218979832932:2321], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T08:53:16.100952Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:53:16.101370Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7501624218282294352:2352], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T08:53:16.102023Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=1&id=YjI3ZmIwMmEtYjZhNTM0MTAtMWEzOGRjNDItZDNjNzY5YjQ=, ActorId: [1:7501624218282294208:2338], ActorState: ExecuteState, TraceId: 01jtmz5ykp4pz366y2vc3f4fxf, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T08:53:16.102382Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } Y ... kie=1 sessionId= userAgent="pqv1 server" ip=ipv6:[::1]:60552 proto=v1 topic=test-topic durationSec=0 2025-05-07T08:53:59.400876Z node 5 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-05-07T08:53:59.405010Z node 5 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 1 sessionId: describe result for acl check 2025-05-07T08:53:59.405169Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint32; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `/Root/PQ/SourceIdMeta2` WHERE Hash == $Hash AND Topic == $Topic AND SourceId == $SourceId; 2025-05-07T08:53:59.405180Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64;DECLARE $SeqNo AS Uint64; UPSERT INTO `/Root/PQ/SourceIdMeta2` (Hash, Topic, SourceId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-05-07T08:53:59.405189Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `/Root/PQ/SourceIdMeta2` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND SourceId = $SourceId AND Partition = $Partition; 2025-05-07T08:53:59.405209Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:111: TPartitionChooser [5:7501624407931454051:2493] (SourceId=src, PreferedPartition=(NULL)) StartKqpSession 2025-05-07T08:53:59.408433Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:142: TPartitionChooser [5:7501624407931454051:2493] (SourceId=src, PreferedPartition=(NULL)) Select from the table 2025-05-07T08:53:59.601903Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__old_chooser_actor.h:67: TPartitionChooser [5:7501624407931454051:2493] (SourceId=src, PreferedPartition=(NULL)) RequestPQRB 2025-05-07T08:53:59.602200Z node 5 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][rt3.dc1--test-topic] pipe [5:7501624407931454098:2493] connected; active server actors: 1 2025-05-07T08:53:59.602251Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__old_chooser_actor.h:80: TPartitionChooser [5:7501624407931454051:2493] (SourceId=src, PreferedPartition=(NULL)) Received partition 0 from PQRB for SourceId=src 2025-05-07T08:53:59.602269Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:174: TPartitionChooser [5:7501624407931454051:2493] (SourceId=src, PreferedPartition=(NULL)) Update the table 2025-05-07T08:53:59.603876Z node 5 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [5:7501624407931454098:2493] disconnected; active server actors: 1 2025-05-07T08:53:59.603900Z node 5 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037893][rt3.dc1--test-topic] pipe [5:7501624407931454098:2493] disconnected no session 2025-05-07T08:53:59.739758Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:183: TPartitionChooser [5:7501624407931454051:2493] (SourceId=src, PreferedPartition=(NULL)) HandleUpdate PartitionPersisted=0 Status=SUCCESS 2025-05-07T08:53:59.739821Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [5:7501624407931454051:2493] (SourceId=src, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=(NULL) 2025-05-07T08:53:59.739852Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:268: TPartitionChooser [5:7501624407931454051:2493] (SourceId=src, PreferedPartition=(NULL)) Start idle 2025-05-07T08:53:59.739897Z node 5 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 1 sessionId: partition: 0 expectedGeneration: (NULL) 2025-05-07T08:53:59.746753Z node 5 :PQ_WRITE_PROXY DEBUG: writer.cpp:798: TPartitionWriter 72075186224037892 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037892, NodeId 6, Generation: 1 2025-05-07T08:53:59.746493Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72075186224037892] server connected, pipe [5:7501624407931454126:2493], now have 1 active actors on pipe 2025-05-07T08:53:59.750124Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:342: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-05-07T08:53:59.750202Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2788: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-05-07T08:53:59.750334Z node 6 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src|96c657eb-afaefda-e1ffe1b3-e3a57b9b_0 generated for partition 0 topic 'rt3.dc1--test-topic' owner src 2025-05-07T08:53:59.750494Z node 6 :PERSQUEUE DEBUG: partition_write.cpp:35: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-05-07T08:53:59.750572Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:377: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-05-07T08:53:59.752204Z node 5 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 1 partition: 0 MaxSeqNo: 0 sessionId: src|96c657eb-afaefda-e1ffe1b3-e3a57b9b_0 2025-05-07T08:53:59.751719Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:342: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-05-07T08:53:59.751755Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2788: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-05-07T08:53:59.751851Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:377: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-05-07T08:53:59.753551Z :INFO: [] MessageGroupId [src] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1746608039753 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-05-07T08:53:59.753710Z :INFO: [] MessageGroupId [src] SessionId [] Write session established. Init response: session_id: "src|96c657eb-afaefda-e1ffe1b3-e3a57b9b_0" topic: "test-topic" cluster: "dc1" supported_codecs: CODEC_RAW supported_codecs: CODEC_GZIP supported_codecs: CODEC_LZOP 2025-05-07T08:53:59.753957Z :INFO: [] MessageGroupId [src] SessionId [src|96c657eb-afaefda-e1ffe1b3-e3a57b9b_0] Write session: close. Timeout = 0 ms 2025-05-07T08:53:59.754029Z :INFO: [] MessageGroupId [src] SessionId [src|96c657eb-afaefda-e1ffe1b3-e3a57b9b_0] Write session will now close 2025-05-07T08:53:59.754072Z :DEBUG: [] MessageGroupId [src] SessionId [src|96c657eb-afaefda-e1ffe1b3-e3a57b9b_0] Write session: aborting 2025-05-07T08:53:59.754650Z :INFO: [] MessageGroupId [src] SessionId [src|96c657eb-afaefda-e1ffe1b3-e3a57b9b_0] Write session: gracefully shut down, all writes complete 2025-05-07T08:53:59.754702Z :DEBUG: [] MessageGroupId [src] SessionId [src|96c657eb-afaefda-e1ffe1b3-e3a57b9b_0] Write session: destroy 2025-05-07T08:53:59.755457Z node 5 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: src|96c657eb-afaefda-e1ffe1b3-e3a57b9b_0 grpc read done: success: 0 data: 2025-05-07T08:53:59.755500Z node 5 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 1 sessionId: src|96c657eb-afaefda-e1ffe1b3-e3a57b9b_0 grpc read failed 2025-05-07T08:53:59.755553Z node 5 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:818: session v1 closed cookie: 1 sessionId: src|96c657eb-afaefda-e1ffe1b3-e3a57b9b_0 2025-05-07T08:53:59.755572Z node 5 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: src|96c657eb-afaefda-e1ffe1b3-e3a57b9b_0 is DEAD 2025-05-07T08:53:59.756431Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037892] server disconnected, pipe [5:7501624407931454126:2493] destroyed 2025-05-07T08:53:59.756482Z node 6 :PERSQUEUE DEBUG: partition_write.cpp:138: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-05-07T08:53:59.755943Z node 5 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-05-07T08:53:59.771293Z :INFO: [/Root] [/Root] [ac7c4e12-f36c8440-602ab0f3-4b471a7b] Starting read session 2025-05-07T08:53:59.771379Z :DEBUG: [/Root] [/Root] [ac7c4e12-f36c8440-602ab0f3-4b471a7b] Starting session to cluster null (localhost:12144) 2025-05-07T08:53:59.775263Z :DEBUG: [/Root] [/Root] [ac7c4e12-f36c8440-602ab0f3-4b471a7b] [null] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:59.775429Z :DEBUG: [/Root] [/Root] [ac7c4e12-f36c8440-602ab0f3-4b471a7b] [null] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:59.775481Z :DEBUG: [/Root] [/Root] [ac7c4e12-f36c8440-602ab0f3-4b471a7b] [null] Reconnecting session to cluster null in 0.000000s 2025-05-07T08:53:59.786145Z :ERROR: [/Root] [/Root] [ac7c4e12-f36c8440-602ab0f3-4b471a7b] [null] Got error. Status: CLIENT_UNAUTHENTICATED. Description:
: Error: Can't get Authentication info from CredentialsProvider. ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp:451: exception during creation 2025-05-07T08:53:59.786234Z :DEBUG: [/Root] [/Root] [ac7c4e12-f36c8440-602ab0f3-4b471a7b] [null] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:59.786275Z :DEBUG: [/Root] [/Root] [ac7c4e12-f36c8440-602ab0f3-4b471a7b] [null] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:59.786425Z :INFO: [/Root] [/Root] [ac7c4e12-f36c8440-602ab0f3-4b471a7b] [null] Closing session to cluster: SessionClosed { Status: CLIENT_UNAUTHENTICATED Issues: "
: Error: Failed to establish connection to server "" ( cluster null). Attempts done: 1
: Error: Can't get Authentication info from CredentialsProvider. ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp:451: exception during creation " } Get event on client 2025-05-07T08:53:59.786617Z :NOTICE: [/Root] [/Root] [ac7c4e12-f36c8440-602ab0f3-4b471a7b] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-05-07T08:53:59.786662Z :DEBUG: [/Root] [/Root] [ac7c4e12-f36c8440-602ab0f3-4b471a7b] [null] Abort session to cluster Got close event: SessionClosed { Status: CLIENT_UNAUTHENTICATED Issues: "
: Error: Failed to establish connection to server "" ( cluster null). Attempts done: 1
: Error: Can't get Authentication info from CredentialsProvider. ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp:451: exception during creation " }2025-05-07T08:53:59.786743Z :INFO: [/Root] [/Root] [ac7c4e12-f36c8440-602ab0f3-4b471a7b] Closing read session. Close timeout: 0.000000s 2025-05-07T08:53:59.786778Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2025-05-07T08:53:59.786811Z :INFO: [/Root] [/Root] [ac7c4e12-f36c8440-602ab0f3-4b471a7b] Counters: { Errors: 1 CurrentSessionLifetimeMs: 15 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-05-07T08:53:59.786893Z :NOTICE: [/Root] [/Root] [ac7c4e12-f36c8440-602ab0f3-4b471a7b] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } >> TKeyValueTest::TestEmptyWriteReadDeleteWithRestartsThenResponseOk [GOOD] >> TKeyValueTest::TestEmptyWriteReadDeleteWithRestartsThenResponseOkNewApi >> YdbIndexTable::MultiShardTableOneUniqIndex [GOOD] >> YdbIndexTable::MultiShardTableOneUniqIndexDataColumn >> DataStreams::TestDeleteStream >> YdbIndexTable::MultiShardTableOneIndexPkOverlap [GOOD] >> TGRpcCmsTest::SimpleTenantsTest [GOOD] >> DataStreams::TestStreamStorageRetention [GOOD] >> DataStreams::TestStreamPagination >> TPQTest::TestSetClientOffset [GOOD] >> TPQTest::TestReadSubscription >> TKeyValueTest::TestWriteToExtraChannelThenReadMixedChannelsReturnsOkNewApi [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/cms/ut/unittest >> TGRpcCmsTest::SimpleTenantsTest [GOOD] Test command err: 2025-05-07T08:53:58.430649Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624406012807163:2279];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:58.430722Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002b10/r3tmp/tmpbPOVMu/pdisk_1.dat 2025-05-07T08:53:59.397265Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:53:59.438637Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:53:59.438729Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:53:59.446383Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:53:59.462935Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18930, node 1 2025-05-07T08:53:59.749587Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:53:59.749620Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:53:59.749631Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:53:59.749767Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15414 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:00.157016Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:00.247074Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:953: StateWork, received event# 273285120, Sender [1:7501624414602742339:2314], Recipient [1:7501624410307774753:2200]: NKikimr::NConsole::TEvConsole::TEvCreateTenantRequest { Request { path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "" } 2025-05-07T08:54:00.247126Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:956: StateWork, processing event TEvConsole::TEvCreateTenantRequest 2025-05-07T08:54:00.247144Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-05-07T08:54:00.247165Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-05-07T08:54:00.247268Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:69: TTxCreateTenant: Request { path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "" 2025-05-07T08:54:00.247382Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:363: Add tenant /Root/users/user-1 (txid = 1746608040246924) 2025-05-07T08:54:00.252817Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2532: Add tenant /Root/users/user-1 to database state=CREATING_POOLS coordinators=3 mediators=3 planresolution=10 timecastbucketspermediator=2 issue= txid=1746608040246924 subdomainversion=1 confirmedsubdomain=0 attrs= generation=1 errorcode=STATUS_CODE_UNSPECIFIED isExternalSubDomain=1 isExternalHive=1 isExternalSysViewProcessor=1 isExternalStatisticsAggregator=1 areResourcesShared=0 sharedDomainId= 2025-05-07T08:54:00.253077Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2591: Add tenant pool /Root/users/user-1:hdd to database kind=hdd config=BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } allocatednumgroups=0 state=NOT_ALLOCATED 2025-05-07T08:54:00.258582Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:373: TTxCreateTenant Complete 2025-05-07T08:54:00.259841Z node 1 :CMS_TENANTS TRACE: console__create_tenant.cpp:381: Send: NKikimr::NConsole::TEvConsole::TEvCreateTenantResponse { Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1746608040246924&action=1" } } } 2025-05-07T08:54:00.260078Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-05-07T08:54:00.260196Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:157: TPoolManip(/Root/users/user-1:hdd) Bootstrap 2025-05-07T08:54:00.260427Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:116: TPoolManip(/Root/users/user-1:hdd) read pool state: Request { Command { ReadStoragePool { BoxId: 999 Name: "/Root/users/user-1:hdd" } } } 2025-05-07T08:54:00.261067Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:197: TPoolManip(/Root/users/user-1:hdd) got read response: Status { Success: true } Success: true ConfigTxSeqNo: 5 2025-05-07T08:54:00.261302Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:130: TPoolManip(/Root/users/user-1:hdd) send pool request: Request { Command { DefineStoragePool { BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } } } } 2025-05-07T08:54:00.266338Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:243: TPoolManip(/Root/users/user-1:hdd) got config response: Status { Success: true } Success: true ConfigTxSeqNo: 6 2025-05-07T08:54:00.266457Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:167: TPoolManip(/Root/users/user-1:hdd) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2025-05-07T08:54:00.266550Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:953: StateWork, received event# 2146435079, Sender [1:7501624414602742344:2200], Recipient [1:7501624410307774753:2200]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2025-05-07T08:54:00.266598Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvPrivate::TEvPoolAllocated 2025-05-07T08:54:00.266633Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-05-07T08:54:00.266644Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-05-07T08:54:00.266707Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:28: TTxUpdatePoolState for pool /Root/users/user-1:hdd of /Root/users/user-1 state=ALLOCATED 2025-05-07T08:54:00.266727Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3000: Update pool state in database for /Root/users/user-1:hdd state=ALLOCATED allocatednumgroups=1 2025-05-07T08:54:00.266824Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update subdomain version in database for /Root/users/user-1 subdomainversion=2 2025-05-07T08:54:00.288223Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2025-05-07T08:54:00.288272Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-05-07T08:54:00.288283Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-05-07T08:54:00.288294Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-05-07T08:54:00.288396Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:23: TTxUpdateTenantState for tenant /Root/users/user-1 to CREATING_SUBDOMAIN 2025-05-07T08:54:00.288423Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3099: Update tenant state in database for /Root/users/user-1 state=CREATING_SUBDOMAIN txid=1746608040246924 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-05-07T08:54:00.295230Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:953: StateWork, received event# 273285131, Sender [1:7501624414602742358:2316], Recipient [1:7501624410307774753:2200]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1746608040246924&action=1" } UserToken: "" } 2025-05-07T08:54:00.295279Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:958: StateWork, processing event TEvConsole::TEvGetOperationRequest 2025-05-07T08:54:00.295553Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3293: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1746608040246924&action=1" } } 2025-05-07T08:54:00.297811Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:45: TTxUpdateTenantState complete for /Root/users/user-1 2025-05-07T08:54:00.297985Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-05-07T08:54:00.298033Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:743: TSubdomainManip(/Root/users/user-1)::Bootstrap 2025-05-07T08:54:00.298045Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:595: TSubDomainManip(/Root/users/user-1) create subdomain 2025-05-07T08:54:00.311559Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:619: TSubdomainManip(/Root/users/user-1) send subdomain creation cmd: NKikimrTxUserProxy.TEvProposeTransaction Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateExtSubDomain SubDomain { Name: "users/user-1" ExternalSchemeShard: true ExternalHive: true ExternalSysViewProcessor: true ExternalStatisticsAggregator: true GraphShard: true } } } ExecTimeoutPeriod: 18446744073709551615 DatabaseName: "Root" 2025-05-07T08:54:00.313361Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976710658:1, at schemeshard: 72057594046644480 2025-05-07T08:54:00.318711Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:791: TSubdomainManip(/Root/users/user-1) got propose result: Status: 53 TxId: 281474976710658 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 3 2025-05-07T08:54:00.318791Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:727: TSubdomainManip(/Root/users/user-1) send notification request: NKikimrScheme.TEvNotifyTxCompletion TxId: 281474976710658 2025-05-07T08:54:00.325130Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:763: TSubdomainManip(/Root/users/user-1) got TEvNotifyTxCompletionRegistered: TxId: 281474976710658 2025-05-07T08:54:00.340767Z node 1 :CMS_TENANTS DEBUG: console_tenants_man ... 86224037891 not found 2025-05-07T08:54:01.420057Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:197: TPoolManip(/Root/users/user-1:hdd) got read response: Status { Success: true StoragePool { BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" Geometry { } VDiskKind: "Default" Kind: "hdd" NumGroups: 2 PDiskFilter { Property { Type: ROT } } ScopeId { X1: 72057594046644480 X2: 3 } ItemConfigGeneration: 3 } } Success: true ConfigTxSeqNo: 13 2025-05-07T08:54:01.420177Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:150: TPoolManip(/Root/users/user-1:hdd) send pool request: Request { Command { DeleteStoragePool { BoxId: 999 StoragePoolId: 4 ItemConfigGeneration: 3 } } } 2025-05-07T08:54:01.477585Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037888 not found 2025-05-07T08:54:01.480029Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:305: TPoolManip(/Root/users/user-1:hdd) got config response: Status { Success: true } Success: true ConfigTxSeqNo: 14 2025-05-07T08:54:01.480155Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:953: StateWork, received event# 2146435081, Sender [1:7501624418897710290:2200], Recipient [1:7501624410307774753:2200]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolDeleted 2025-05-07T08:54:01.480214Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:966: StateWork, processing event TEvPrivate::TEvPoolDeleted 2025-05-07T08:54:01.480240Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-05-07T08:54:01.480251Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-05-07T08:54:01.480289Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:28: TTxUpdatePoolState for pool /Root/users/user-1:hdd of /Root/users/user-1 state=DELETED 2025-05-07T08:54:01.480326Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3000: Update pool state in database for /Root/users/user-1:hdd state=DELETED allocatednumgroups=0 2025-05-07T08:54:01.480904Z node 1 :HIVE WARN: tx__block_storage_result.cpp:56: HIVE#72057594037968897 THive::TTxBlockStorageResult retrying for 72075186224037888 because of ERROR 2025-05-07T08:54:01.533371Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:953: StateWork, received event# 273285131, Sender [1:7501624418897710306:2373], Recipient [1:7501624410307774753:2200]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1746608041299624&action=2" } UserToken: "" } 2025-05-07T08:54:01.533407Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:958: StateWork, processing event TEvConsole::TEvGetOperationRequest 2025-05-07T08:54:01.533673Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3293: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1746608041299624&action=2" } } 2025-05-07T08:54:01.573333Z node 1 :BS_PDISK ERROR: {BPD01@blobstorage_pdisk_impl.cpp:2935} PDiskId# 1 request from unregistered ownerId# 5 error in TLogWrite for ownerId# 5 ownerRound# 5 lsn# 169 PDiskId# 1 2025-05-07T08:54:01.573465Z node 1 :BS_PDISK ERROR: {BPD01@blobstorage_pdisk_impl.cpp:2935} PDiskId# 1 request from unregistered ownerId# 5 error in TLogWrite for ownerId# 5 ownerRound# 5 lsn# 170 PDiskId# 1 2025-05-07T08:54:01.573486Z node 1 :BS_PDISK ERROR: {BPD01@blobstorage_pdisk_impl.cpp:2935} PDiskId# 1 request from unregistered ownerId# 5 error in TLogWrite for ownerId# 5 ownerRound# 5 lsn# 171 PDiskId# 1 2025-05-07T08:54:01.573500Z node 1 :BS_PDISK ERROR: {BPD01@blobstorage_pdisk_impl.cpp:2935} PDiskId# 1 request from unregistered ownerId# 5 error in TLogWrite for ownerId# 5 ownerRound# 5 lsn# 172 PDiskId# 1 2025-05-07T08:54:01.578314Z node 1 :BS_PDISK ERROR: {BPD01@blobstorage_pdisk_impl.cpp:2935} PDiskId# 1 request from unregistered ownerId# 5 error in TLogWrite for ownerId# 5 ownerRound# 5 lsn# 173 PDiskId# 1 2025-05-07T08:54:01.578380Z node 1 :BS_PDISK ERROR: {BPD01@blobstorage_pdisk_impl.cpp:2935} PDiskId# 1 request from unregistered ownerId# 5 error in TLogWrite for ownerId# 5 ownerRound# 5 lsn# 174 PDiskId# 1 2025-05-07T08:54:01.578394Z node 1 :BS_PDISK ERROR: {BPD01@blobstorage_pdisk_impl.cpp:2935} PDiskId# 1 request from unregistered ownerId# 5 error in TLogWrite for ownerId# 5 ownerRound# 5 lsn# 175 PDiskId# 1 2025-05-07T08:54:01.578413Z node 1 :BS_PDISK ERROR: {BPD01@blobstorage_pdisk_impl.cpp:2935} PDiskId# 1 request from unregistered ownerId# 5 error in TLogWrite for ownerId# 5 ownerRound# 5 lsn# 176 PDiskId# 1 2025-05-07T08:54:01.578427Z node 1 :BS_PDISK ERROR: {BPD01@blobstorage_pdisk_impl.cpp:2935} PDiskId# 1 request from unregistered ownerId# 5 error in TLogWrite for ownerId# 5 ownerRound# 5 lsn# 177 PDiskId# 1 2025-05-07T08:54:01.578440Z node 1 :BS_PDISK ERROR: {BPD01@blobstorage_pdisk_impl.cpp:2935} PDiskId# 1 request from unregistered ownerId# 5 error in TLogWrite for ownerId# 5 ownerRound# 5 lsn# 178 PDiskId# 1 2025-05-07T08:54:01.578456Z node 1 :BS_PDISK ERROR: {BPD01@blobstorage_pdisk_impl.cpp:2935} PDiskId# 1 request from unregistered ownerId# 5 error in TLogWrite for ownerId# 5 ownerRound# 5 lsn# 179 PDiskId# 1 2025-05-07T08:54:01.578480Z node 1 :BS_PDISK ERROR: {BPD01@blobstorage_pdisk_impl.cpp:2935} PDiskId# 1 request from unregistered ownerId# 5 error in TLogWrite for ownerId# 5 ownerRound# 5 lsn# 180 PDiskId# 1 2025-05-07T08:54:01.578492Z node 1 :BS_PDISK ERROR: {BPD01@blobstorage_pdisk_impl.cpp:2935} PDiskId# 1 request from unregistered ownerId# 5 error in TLogWrite for ownerId# 5 ownerRound# 5 lsn# 181 PDiskId# 1 2025-05-07T08:54:01.578511Z node 1 :BS_PDISK ERROR: {BPD01@blobstorage_pdisk_impl.cpp:2935} PDiskId# 1 request from unregistered ownerId# 5 error in TLogWrite for ownerId# 5 ownerRound# 5 lsn# 182 PDiskId# 1 2025-05-07T08:54:01.578524Z node 1 :BS_PDISK ERROR: {BPD01@blobstorage_pdisk_impl.cpp:2935} PDiskId# 1 request from unregistered ownerId# 5 error in TLogWrite for ownerId# 5 ownerRound# 5 lsn# 183 PDiskId# 1 2025-05-07T08:54:01.578537Z node 1 :BS_PDISK ERROR: {BPD01@blobstorage_pdisk_impl.cpp:2935} PDiskId# 1 request from unregistered ownerId# 5 error in TLogWrite for ownerId# 5 ownerRound# 5 lsn# 184 PDiskId# 1 2025-05-07T08:54:01.578549Z node 1 :BS_PDISK ERROR: {BPD01@blobstorage_pdisk_impl.cpp:2935} PDiskId# 1 request from unregistered ownerId# 5 error in TLogWrite for ownerId# 5 ownerRound# 5 lsn# 185 PDiskId# 1 2025-05-07T08:54:01.578586Z node 1 :BS_PDISK ERROR: {BPD01@blobstorage_pdisk_impl.cpp:2935} PDiskId# 1 request from unregistered ownerId# 5 error in TLogWrite for ownerId# 5 ownerRound# 5 lsn# 186 PDiskId# 1 2025-05-07T08:54:01.578602Z node 1 :BS_PDISK ERROR: {BPD01@blobstorage_pdisk_impl.cpp:2935} PDiskId# 1 request from unregistered ownerId# 5 error in TLogWrite for ownerId# 5 ownerRound# 5 lsn# 187 PDiskId# 1 2025-05-07T08:54:01.578621Z node 1 :BS_PDISK ERROR: {BPD01@blobstorage_pdisk_impl.cpp:2935} PDiskId# 1 request from unregistered ownerId# 5 error in TLogWrite for ownerId# 5 ownerRound# 5 lsn# 188 PDiskId# 1 2025-05-07T08:54:01.578639Z node 1 :BS_PDISK ERROR: {BPD01@blobstorage_pdisk_impl.cpp:2935} PDiskId# 1 request from unregistered ownerId# 5 error in TLogWrite for ownerId# 5 ownerRound# 5 lsn# 189 PDiskId# 1 2025-05-07T08:54:01.582446Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2025-05-07T08:54:01.586581Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-05-07T08:54:01.586604Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-05-07T08:54:01.586629Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-05-07T08:54:01.586708Z node 1 :CMS_TENANTS DEBUG: console__remove_tenant_done.cpp:22: TTxRemoveTenantDone for tenant /Root/users/user-1 txid=1746608041299624 2025-05-07T08:54:01.586718Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2880: Remove computational units of /Root/users/user-1 from database txid=1746608041299624 issue= 2025-05-07T08:54:01.586729Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2911: Remove tenant /Root/users/user-1 from database txid=1746608041299624 issue= 2025-05-07T08:54:01.586742Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2916: Remove pool /Root/users/user-1:hdd from database 2025-05-07T08:54:01.586825Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3036: Add tenant removal info for /Root/users/user-1 txid=1746608041299624 code=SUCCESS errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-05-07T08:54:01.630530Z node 1 :CMS_TENANTS DEBUG: console__remove_tenant_done.cpp:34: TTxRemoveTenantDone Complete 2025-05-07T08:54:01.642324Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-05-07T08:54:01.655610Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:953: StateWork, received event# 273285131, Sender [1:7501624418897710330:2376], Recipient [1:7501624410307774753:2200]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1746608041299624&action=2" } UserToken: "" } 2025-05-07T08:54:01.655645Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:958: StateWork, processing event TEvConsole::TEvGetOperationRequest 2025-05-07T08:54:01.655874Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3293: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1746608041299624&action=2" ready: true status: SUCCESS } } 2025-05-07T08:54:01.676163Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:953: StateWork, received event# 273285122, Sender [1:7501624418897710334:2378], Recipient [1:7501624410307774753:2200]: NKikimr::NConsole::TEvConsole::TEvGetTenantStatusRequest { Request { path: "/Root/users/user-1" } UserToken: "" } 2025-05-07T08:54:01.676192Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:959: StateWork, processing event TEvConsole::TEvGetTenantStatusRequest 2025-05-07T08:54:01.676348Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3317: Send TEvConsole::TEvGetTenantStatusResponse: Response { operation { ready: true status: NOT_FOUND issues { message: "Unknown tenant /Root/users/user-1" severity: 1 } } } 2025-05-07T08:54:01.731331Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:953: StateWork, received event# 273285123, Sender [1:7501624418897710340:2379], Recipient [1:7501624410307774753:2200]: NKikimr::NConsole::TEvConsole::TEvListTenantsRequest { Request { } UserToken: "" } 2025-05-07T08:54:01.731376Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, processing event TEvConsole::TEvListTenantsRequest 2025-05-07T08:54:01.731577Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3361: Send TEvConsole::TEvListTenantsResponse: Response { operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Cms.ListDatabasesResult] { } } } } 2025-05-07T08:54:01.747248Z node 1 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 3 2025-05-07T08:54:01.747429Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-05-07T08:54:05.386280Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7501624414899015307:2094];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:05.386369Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/users/user-1/.metadata/initialization/migrations;error=timeout; >> TPQTestSlow::TestWriteVeryBigMessage [GOOD] |90.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/mind/hive/ut/ydb-core-mind-hive-ut |90.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/mind/hive/ut/ydb-core-mind-hive-ut |90.1%| [LD] {RESULT} $(B)/ydb/core/mind/hive/ut/ydb-core-mind-hive-ut >> BasicUsage::WriteSessionCloseWaitsForWrites [GOOD] >> BasicUsage::WriteSessionCloseIgnoresWrites >> DataStreams::TestGetRecordsStreamWithSingleShard [GOOD] >> DataStreams::TestGetRecords1MBMessagesOneByOneByTS ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestWriteToExtraChannelThenReadMixedChannelsReturnsOkNewApi [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:54:2057] recipient: [1:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:54:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:56:2097] sender: [1:57:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:56:2097] sender: [1:74:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:54:2057] recipient: [2:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:54:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:57:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:74:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:76:2057] recipient: [2:36:2083] Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:79:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:80:2057] recipient: [2:78:2110] Leader for TabletID 72057594037927937 is [2:81:2111] sender: [2:82:2057] recipient: [2:78:2110] !Reboot 72057594037927937 (actor [2:56:2097]) rebooted! !Reboot 72057594037927937 (actor [2:56:2097]) tablet resolver refreshed! new actor is[2:81:2111] Leader for TabletID 72057594037927937 is [2:81:2111] sender: [2:135:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:54:2057] recipient: [3:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:54:2057] recipient: [3:52:2095] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:57:2057] recipient: [3:52:2095] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:74:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:56:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:76:2057] recipient: [3:36:2083] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:78:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:80:2057] recipient: [3:79:2110] Leader for TabletID 72057594037927937 is [3:81:2111] sender: [3:82:2057] recipient: [3:79:2110] !Reboot 72057594037927937 (actor [3:56:2097]) rebooted! !Reboot 72057594037927937 (actor [3:56:2097]) tablet resolver refreshed! new actor is[3:81:2111] Leader for TabletID 72057594037927937 is [3:81:2111] sender: [3:135:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:54:2057] recipient: [4:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:54:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:57:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:74:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:77:2057] recipient: [4:36:2083] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:80:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:81:2057] recipient: [4:79:2110] Leader for TabletID 72057594037927937 is [4:82:2111] sender: [4:83:2057] recipient: [4:79:2110] !Reboot 72057594037927937 (actor [4:56:2097]) rebooted! !Reboot 72057594037927937 (actor [4:56:2097]) tablet resolver refreshed! new actor is[4:82:2111] Leader for TabletID 72057594037927937 is [4:82:2111] sender: [4:136:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:54:2057] recipient: [5:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:54:2057] recipient: [5:50:2095] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:57:2057] recipient: [5:50:2095] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:74:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:80:2057] recipient: [5:36:2083] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:83:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:84:2057] recipient: [5:82:2113] Leader for TabletID 72057594037927937 is [5:85:2114] sender: [5:86:2057] recipient: [5:82:2113] !Reboot 72057594037927937 (actor [5:56:2097]) rebooted! !Reboot 72057594037927937 (actor [5:56:2097]) tablet resolver refreshed! new actor is[5:85:2114] Leader for TabletID 72057594037927937 is [5:85:2114] sender: [5:139:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:54:2057] recipient: [6:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:54:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:57:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:74:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:56:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:80:2057] recipient: [6:36:2083] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:83:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:84:2057] recipient: [6:82:2113] Leader for TabletID 72057594037927937 is [6:85:2114] sender: [6:86:2057] recipient: [6:82:2113] !Reboot 72057594037927937 (actor [6:56:2097]) rebooted! !Reboot 72057594037927937 (actor [6:56:2097]) tablet resolver refreshed! new actor is[6:85:2114] Leader for TabletID 72057594037927937 is [6:85:2114] sender: [6:139:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:54:2057] recipient: [7:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:54:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:57:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:74:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:81:2057] recipient: [7:36:2083] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:84:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:85:2057] recipient: [7:83:2113] Leader for TabletID 72057594037927937 is [7:86:2114] sender: [7:87:2057] recipient: [7:83:2113] !Reboot 72057594037927937 (actor [7:56:2097]) rebooted! !Reboot 72057594037927937 (actor [7:56:2097]) tablet resolver refreshed! new actor is[7:86:2114] Leader for TabletID 72057594037927937 is [7:86:2114] sender: [7:140:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:54:2057] recipient: [8:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:54:2057] recipient: [8:50:2095] Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:57:2057] recipient: [8:50:2095] Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:74:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:54:2057] recipient: [9:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:54:2057] recipient: [9:52:2095] Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:57:2057] recipient: [9:52:2095] Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:74:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:54:2057] recipient: [10:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:54:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:57:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:74:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:76:2057] recipient: [10:36:2083] Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:79:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:80:2057] recipient: [10:78:2110] Leader for TabletID 72057594037927937 is [10:81:2111] sender: [10:82:2057] recipient: [10:78:2110] !Reboot 72057594037927937 (actor [10:56:2097]) rebooted! !Reboot 72057594037927937 (actor [10:56:2097]) tablet resolver refreshed! new actor is[10:81:2111] Leader for TabletID 72057594037927937 is [10:81:2111] sender: [10:135:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:54:2057] recipient: [11:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:54:2057] recipient: [11:51:2095] Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:57:2057] recipient: [11:51:2095] Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:74:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:56:2097]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:76:2057] recipient: [11:36:2083] Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:79:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:80:2057] recipient: [11:78:2110] Leader for TabletID 72057594037927937 is [11:81:2111] sender: [11:82:2057] recipient: [11:78:2110] !Reboot 72057594037927937 (actor [11:56:2097]) rebooted! !Reboot 72057594037927937 (actor [11:56:2097]) tablet resolver refreshed! new actor is[11:81:2111] Leader for TabletID 72057594037927937 is [11:81:2111] sender: [11:135:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:54:2057] recipient: [12:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:54:2057] recipient: [12:52:2095] Leader for TabletID 72057594037927937 is [12:56:2097] sender: [12:57:2057] recipient: [12:52:2095] Leader for TabletID 72057594037927937 is [12:56:2097] sender: [12:74:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (actor [12:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [12:56:2097] sender: [12:77:2057] recipient: [12:36:2083] Leader for TabletID 72057594037927937 is [12:56:2097] sender: [12:80:2057] recipient: [12:79:2110] Leader for TabletID 72057594037927937 is [12:56:2097] sender: [12:81:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [12:82:2111] sender: [12:83:2057] recipient: [12:79:2110] !Reboot 72057594037927937 (actor [12:56:2097]) rebooted! !Reboot 72057594037927937 (actor [12:56:2097]) tablet resolver refreshed! new actor is[12:82:2111] Leader for TabletID 72057594037927937 is [12:82:2111] sender: [12:136:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:54:2057] recipient: [13:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:54:2057] recipient: [13:51:2095] Leader for TabletID 72057594037927937 is [13:56:2097] sender: [13:57:2057] recipient: [13:51:2095] Leader for TabletID 72057594037927937 is [13:56:2097] sender: [13:74:2057] recipient: [13:14:2061] !Reboot 72057594037927937 (actor [13:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [13:56:2097] sender: [13:80:2057] recipient: [13:36:2083] Leader for TabletID 72057594037927937 is [13:56:2097] sender: [13:83:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [13:56:2097] sender: [13:84:2057] recipient: [13:82:2113] Leader for TabletID 72057594037927937 is [13:85:2114] sender: [13:86:2057] recipient: [13:82:2113] !Reboot 72057594037927937 (actor [13:56:2097]) rebooted! !Reboot 72057594037927937 (actor [13:56:2097]) tablet resolver refreshed! new actor is[13:85:2114] Leader for TabletID 72057594037927937 is [13:85:2114] sender: [13:139:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:54:2057] recipient: [14:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:54:2057] recipient: [14:51:2095] Leader for TabletID 72057594037927937 is [14:56:2097] sender: [14:57:2057] recipient: [14:51:2095] Leader for TabletID 72057594037927937 is [14:56:2097] sender: [14:74:2057] recipient: [14:14:2061] !Reboot 72057594037927937 (actor [14:56:2097]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [14:56:2097] sender: [14:80:2057] recipient: [14:36:2083] Leader for TabletID 72057594037927937 is [14:56:2097] sender: [14:83:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [14:56:2097] sender: [14:84:2057] recipient: [14:82:2113] Leader for TabletID 72057594037927937 is [14:85:2114] sender: [14:86:2057] recipient: [14:82:2113] !Reboot 72057594037927937 (actor [14:56:2097]) rebooted! !Reboot 72057594037927937 (actor [14:56:2097]) tablet resolver refreshed! new actor is[14:85:2114] Leader for TabletID 72057594037927937 is [14:85:2114] sender: [14:139:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:54:2057] recipient: [15:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:54:2057] recipient: [15:51:2095] Leader for TabletID 72057594037927937 is [15:56:2097] sender: [15:57:2057] recipient: [15:51:2095] Leader for TabletID 72057594037927937 is [15:56:2097] sender: [15:74:2057] recipient: [15:14:2061] !Reboot 72057594037927937 (actor [15:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [15:56:2097] sender: [15:81:2057] recipient: [15:36:2083] Leader for TabletID 72057594037927937 is [15:56:2097] sender: [15:84:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [15:56:2097] sender: [15:85:2057] recipient: [15:83:2113] Leader for TabletID 72057594037927937 is [15:86:2114] sender: [15:87:2057] recipient: [15:83:2113] !Reboot 72057594037927937 (actor [15:56:2097]) rebooted! !Reboot 72057594037927937 (actor [15:56:2097]) tablet resolver refreshed! new actor is[15:86:2114] Leader for TabletID 72057594037927937 is [15:86:2114] sender: [15:140:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:54:2057] recipient: [16:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:54:2057] recipient: [16:51:2095] Leader for TabletID 72057594037927937 is [16:56:2097] sender: [16:57:2057] recipient: [16:51:2095] Leader for TabletID 72057594037927937 is [16:56:2097] sender: [16:74:2057] recipient: [16:14:2061] !Reboot 72057594037927937 (actor [16:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [16:56:2097] sender: [16:84:2057] recipient: [16:36:2083] Leader for TabletID 72057594037927937 is [16:56:2097] sender: [16:87:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [16:56:2097] sender: [16:88:2057] recipient: [16:86:2116] Leader for TabletID 72057594037927937 is [16:89:2117] sender: [16:90:2057] recipient: [16:86:2116] !Reboot 72057594037927937 (actor [16:56:2097]) rebooted! !Reboot 72057594037927937 (actor [16:56:2097]) tablet resolver refreshed! new actor is[16:89:2117] Leader for TabletID 72057594037927937 is [16:89:2117] sender: [16:143:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:54:2057] recipient: [17:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:54:2057] recipient: [17:51:2095] Leader for TabletID 72057594037927937 is [17:56:2097] sender: [17:57:2057] recipient: [17:51:2095] Leader for TabletID 72057594037927937 is [17:56:2097] sender: [17:74:2057] recipient: [17:14:2061] !Reboot 72057594037927937 (actor [17:56:2097]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [17:56:2097] sender: [17:84:2057] recipient: [17:36:2083] Leader for TabletID 72057594037927937 is [17:56:2097] sender: [17:87:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [17:56:2097] sender: [17:88:2057] recipient: [17:86:2116] Leader for TabletID 72057594037927937 is [17:89:2117] sender: [17:90:2057] recipient: [17:86:2116] !Reboot 72057594037927937 (actor [17:56:2097]) rebooted! !Reboot 72057594037927937 (actor [17:56:2097]) tablet resolver refreshed! new actor is[17:89:2117] Leader for TabletID 72057594037927937 is [17:89:2117] sender: [17:143:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:54:2057] recipient: [18:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:54:2057] recipient: [18:51:2095] Leader for TabletID 72057594037927937 is [18:56:2097] sender: [18:57:2057] recipient: [18:51:2095] Leader for TabletID 72057594037927937 is [18:56:2097] sender: [18:74:2057] recipient: [18:14:2061] !Reboot 72057594037927937 (actor [18:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [18:56:2097] sender: [18:85:2057] recipient: [18:36:2083] Leader for TabletID 72057594037927937 is [18:56:2097] sender: [18:87:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [18:56:2097] sender: [18:89:2057] recipient: [18:88:2116] Leader for TabletID 72057594037927937 is [18:90:2117] sender: [18:91:2057] recipient: [18:88:2116] !Reboot 72057594037927937 (actor [18:56:2097]) rebooted! !Reboot 72057594037927937 (actor [18:56:2097]) tablet resolver refreshed! new actor is[18:90:2117] Leader for TabletID 72057594037927937 is [18:90:2117] sender: [18:144:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:54:2057] recipient: [19:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:54:2057] recipient: [19:50:2095] Leader for TabletID 72057594037927937 is [19:56:2097] sender: [19:57:2057] recipient: [19:50:2095] Leader for TabletID 72057594037927937 is [19:56:2097] sender: [19:74:2057] recipient: [19:14:2061] !Reboot 72057594037927937 (actor [19:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [19:56:2097] sender: [19:88:2057] recipient: [19:36:2083] Leader for TabletID 72057594037927937 is [19:56:2097] sender: [19:91:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [19:56:2097] sender: [19:92:2057] recipient: [19:90:2119] Leader for TabletID 72057594037927937 is [19:93:2120] sender: [19:94:2057] recipient: [19:90:2119] !Reboot 72057594037927937 (actor [19:56:2097]) rebooted! !Reboot 72057594037927937 (actor [19:56:2097]) tablet resolver refreshed! new actor is[19:93:2120] Leader for TabletID 72057594037927937 is [19:93:2120] sender: [19:147:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:54:2057] recipient: [20:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:54:2057] recipient: [20:52:2095] Leader for TabletID 72057594037927937 is [20:56:2097] sender: [20:57:2057] recipient: [20:52:2095] Leader for TabletID 72057594037927937 is [20:56:2097] sender: [20:74:2057] recipient: [20:14:2061] !Reboot 72057594037927937 (actor [20:56:2097]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [20:56:2097] sender: [20:88:2057] recipient: [20:36:2083] Leader for TabletID 72057594037927937 is [20:56:2097] sender: [20:91:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [20:56:2097] sender: [20:92:2057] recipient: [20:90:2119] Leader for TabletID 72057594037927937 is [20:93:2120] sender: [20:94:2057] recipient: [20:90:2119] !Reboot 72057594037927937 (actor [20:56:2097]) rebooted! !Reboot 72057594037927937 (actor [20:56:2097]) tablet resolver refreshed! new actor is[20:93:2120] Leader for TabletID 72057594037927937 is [20:93:2120] sender: [20:147:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:54:2057] recipient: [21:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:54:2057] recipient: [21:50:2095] Leader for TabletID 72057594037927937 is [21:56:2097] sender: [21:57:2057] recipient: [21:50:2095] Leader for TabletID 72057594037927937 is [21:56:2097] sender: [21:74:2057] recipient: [21:14:2061] !Reboot 72057594037927937 (actor [21:56:2097]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [21:56:2097] sender: [21:89:2057] recipient: [21:36:2083] Leader for TabletID 72057594037927937 is [21:56:2097] sender: [21:92:2057] recipient: [21:14:2061] Leader for TabletID 72057594037927937 is [21:56:2097] sender: [21:93:2057] recipient: [21:91:2119] Leader for TabletID 72057594037927937 is [21:94:2120] sender: [21:95:2057] recipient: [21:91:2119] !Reboot 72057594037927937 (actor [21:56:2097]) rebooted! !Reboot 72057594037927937 (actor [21:56:2097]) tablet resolver refreshed! new actor is[21:94:2120] Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:54:2057] recipient: [22:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:54:2057] recipient: [22:51:2095] Leader for TabletID 72057594037927937 is [22:56:2097] sender: [22:57:2057] recipient: [22:51:2095] Leader for TabletID 72057594037927937 is [22:56:2097] sender: [22:74:2057] recipient: [22:14:2061] >> TKeyValueTest::TestWriteReadRangeDataLimitThenLimitWorksNewApi [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/slow/unittest >> TPQTestSlow::TestWriteVeryBigMessage [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:103:2057] recipient: [1:101:2135] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:103:2057] recipient: [1:101:2135] Leader for TabletID 72057594037927937 is [1:107:2139] sender: [1:108:2057] recipient: [1:101:2135] 2025-05-07T08:53:06.095755Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:53:06.095868Z node 1 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:149:2057] recipient: [1:147:2170] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:149:2057] recipient: [1:147:2170] Leader for TabletID 72057594037927938 is [1:153:2174] sender: [1:154:2057] recipient: [1:147:2170] Leader for TabletID 72057594037927937 is [1:107:2139] sender: [1:179:2057] recipient: [1:14:2061] 2025-05-07T08:53:06.121083Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:53:06.140867Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037927937] Config applied version 1 actor [1:177:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 1 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-05-07T08:53:06.142181Z node 1 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:185:2198] 2025-05-07T08:53:06.145178Z node 1 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [1:185:2198] 2025-05-07T08:53:06.147630Z node 1 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [1:186:2199] 2025-05-07T08:53:06.149618Z node 1 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [1:186:2199] 2025-05-07T08:53:06.158484Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|b93a5cca-d0001a58-a467add8-de3ce9b6_0 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-05-07T08:53:06.167530Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|1a36563a-8c09022c-cbce6539-eb178a62_1 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-05-07T08:53:06.194175Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|e21d4c27-d6568cf6-4b8b0a9e-e5cd6fd7_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Leader for TabletID 72057594037927937 is [1:107:2139] sender: [1:245:2057] recipient: [1:99:2134] Leader for TabletID 72057594037927937 is [1:107:2139] sender: [1:248:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [1:107:2139] sender: [1:249:2057] recipient: [1:247:2248] Leader for TabletID 72057594037927937 is [1:250:2249] sender: [1:251:2057] recipient: [1:247:2248] 2025-05-07T08:53:06.280730Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:53:06.280807Z node 1 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037927937] doesn't have tx writes info 2025-05-07T08:53:06.281523Z node 1 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:299:2290] 2025-05-07T08:53:06.292567Z node 1 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [1:300:2291] 2025-05-07T08:53:06.334401Z node 1 :PERSQUEUE INFO: partition_init.cpp:773: [rt3.dc1--asdfgs--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-05-07T08:53:06.334513Z node 1 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 3 [1:299:2290] 2025-05-07T08:53:06.340815Z node 1 :PERSQUEUE INFO: partition_init.cpp:773: [rt3.dc1--asdfgs--topic:1:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-05-07T08:53:06.340917Z node 1 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 3 [1:300:2291] Leader for TabletID 72057594037927937 is [1:250:2249] sender: [1:330:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:103:2057] recipient: [2:101:2135] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:103:2057] recipient: [2:101:2135] Leader for TabletID 72057594037927937 is [2:107:2139] sender: [2:108:2057] recipient: [2:101:2135] 2025-05-07T08:53:07.187223Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:53:07.187320Z node 2 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:149:2057] recipient: [2:147:2170] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:149:2057] recipient: [2:147:2170] Leader for TabletID 72057594037927938 is [2:153:2174] sender: [2:154:2057] recipient: [2:147:2170] Leader for TabletID 72057594037927937 is [2:107:2139] sender: [2:179:2057] recipient: [2:14:2061] 2025-05-07T08:53:07.210334Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:53:07.211297Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037927937] Config applied version 2 actor [2:177:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 2 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 2 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 2 Important: false } 2025-05-07T08:53:07.212071Z node 2 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [2:185:2198] 2025-05-07T08:53:07.215033Z node 2 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [2:185:2198] 2025-05-07T08:53:07.217069Z node 2 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [2:186:2199] 2025-05-07T08:53:07.219719Z node 2 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [2:186:2199] 2025-05-07T08:53:07.228293Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|b89b6863-4c9594b6-ba3845e-5023ba88_0 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-05-07T08:53:07.237671Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|7d166a94-49edca07-67aaf281-f2d9e93b_1 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-05-07T08:53:07.271918Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|3e90e143-2dd3cfd8-f172486f-24451d6f_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default !Reboot 72057594037927937 (actor [2:107:2139]) on event NKikimr::TEvPersQueue::TEvOffsets ! Leader for TabletID 72057594037927937 is [2:107:2139] sender: [2:244:2057] recipient: [2:99:2134] Leader for TabletID 72057594037927937 is [2:107:2139] sender: [2:246:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:107:2139] sender: [2:248:2057] recipient: [2:247:2247] Leader for TabletID 72057594037927937 is [2:249:2248] sender: [2:250:2057] recipient: [2:247:2247] 2025-05-07T08:53:07.337425Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:53:07.337519Z node 2 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037927937] doesn't have tx writes info 2025-05-07T08:53:07.338498Z node 2 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [2:298:2289] 2025-05-07T08:53:07.341737Z node 2 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [2:299:2290] 2025-05-07T08:53:07.372409Z node 2 :PERSQUEUE INFO: partition_init.cpp:773: [rt3.dc1--asdfgs--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-05-07T08:53:07.372544Z node 2 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 3 [2:298:2289] 2025-05-07T08:53:07.380600Z node 2 :PERSQUEUE INFO: partition_init.cpp:773: [rt3.dc1--asdfgs--topic:1:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-05-07T08:53:07.380702Z node 2 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 3 [2:299:2290] !Reboot 72057594037927937 (actor [2:107:2139]) rebooted! !Reboot 72057594037927937 (actor [2:107:2139]) tablet resolver refreshed! new actor is[2:249:2248] Leader for TabletID 72057594037927937 is [2:249:2248] sender: [2:351:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:249:2248] sender: [2:354:2057] recipient: [2:99:2134] Leader for TabletID 72057594037927937 is [2:249:2248] sender: [2:357:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:249:2248] sender: [2:358:2057] recipient: [2:356:2324] Leader for TabletID 72057594037927937 is [2:359:2325] sender: [2:360:2057] recipient: [2:356:2324] 2025-05-07T08:53:08.838390Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:53:08.838485Z node 2 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037927937] doesn't have tx writes info 2025-05-07T08:53:08.839440Z node 2 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [2:410:2368] 2025-05-07T08:53:08.842592Z node 2 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [2:411:2369] 2025-05-07T08:53:08.875279Z node 2 :PERSQUEUE INFO: partition_init.cpp:773: [rt3.dc1--asdfgs--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-05-07T08:53:08.875409Z node 2 :PERSQUEUE INFO: partition.cpp:557: [P ... :05.186624Z node 53 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [53:390:2356] 2025-05-07T08:54:05.190046Z node 53 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [53:391:2357] 2025-05-07T08:54:05.224331Z node 53 :PERSQUEUE INFO: partition_init.cpp:773: [rt3.dc1--asdfgs--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-05-07T08:54:05.224444Z node 53 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 4 [53:390:2356] 2025-05-07T08:54:05.229173Z node 53 :PERSQUEUE INFO: partition_init.cpp:773: [rt3.dc1--asdfgs--topic:1:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-05-07T08:54:05.229256Z node 53 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 4 [53:391:2357] !Reboot 72057594037927937 (actor [53:250:2249]) rebooted! !Reboot 72057594037927937 (actor [53:250:2249]) tablet resolver refreshed! new actor is[53:339:2313] Leader for TabletID 72057594037927937 is [53:339:2313] sender: [53:442:2057] recipient: [53:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [54:103:2057] recipient: [54:101:2135] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [54:103:2057] recipient: [54:101:2135] Leader for TabletID 72057594037927937 is [54:107:2139] sender: [54:108:2057] recipient: [54:101:2135] 2025-05-07T08:54:07.502049Z node 54 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:54:07.502175Z node 54 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [54:149:2057] recipient: [54:147:2170] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [54:149:2057] recipient: [54:147:2170] Leader for TabletID 72057594037927938 is [54:153:2174] sender: [54:154:2057] recipient: [54:147:2170] Leader for TabletID 72057594037927937 is [54:107:2139] sender: [54:179:2057] recipient: [54:14:2061] 2025-05-07T08:54:07.527461Z node 54 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:54:07.528587Z node 54 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037927937] Config applied version 54 actor [54:177:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 54 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 54 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 54 Important: false } 2025-05-07T08:54:07.529326Z node 54 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [54:185:2198] 2025-05-07T08:54:07.532812Z node 54 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [54:185:2198] 2025-05-07T08:54:07.535248Z node 54 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [54:186:2199] 2025-05-07T08:54:07.538531Z node 54 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [54:186:2199] 2025-05-07T08:54:07.562516Z node 54 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|9cbdd127-7843039b-290183d9-4355b6ad_0 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-05-07T08:54:07.583570Z node 54 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|c8583933-e597d021-eccb03b6-bb6909e4_1 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-05-07T08:54:07.617577Z node 54 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|8db47863-e8f9a923-dd311965-540e2986_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Leader for TabletID 72057594037927937 is [54:107:2139] sender: [54:245:2057] recipient: [54:99:2134] Leader for TabletID 72057594037927937 is [54:107:2139] sender: [54:248:2057] recipient: [54:14:2061] Leader for TabletID 72057594037927937 is [54:107:2139] sender: [54:249:2057] recipient: [54:247:2248] Leader for TabletID 72057594037927937 is [54:250:2249] sender: [54:251:2057] recipient: [54:247:2248] 2025-05-07T08:54:07.698643Z node 54 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:54:07.698720Z node 54 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037927937] doesn't have tx writes info 2025-05-07T08:54:07.699924Z node 54 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [54:299:2290] 2025-05-07T08:54:07.703646Z node 54 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [54:300:2291] 2025-05-07T08:54:07.742107Z node 54 :PERSQUEUE INFO: partition_init.cpp:773: [rt3.dc1--asdfgs--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-05-07T08:54:07.742226Z node 54 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 3 [54:299:2290] 2025-05-07T08:54:07.750821Z node 54 :PERSQUEUE INFO: partition_init.cpp:773: [rt3.dc1--asdfgs--topic:1:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-05-07T08:54:07.750930Z node 54 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 3 [54:300:2291] Leader for TabletID 72057594037927937 is [54:250:2249] sender: [54:332:2057] recipient: [54:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [55:103:2057] recipient: [55:101:2135] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [55:103:2057] recipient: [55:101:2135] Leader for TabletID 72057594037927937 is [55:107:2139] sender: [55:108:2057] recipient: [55:101:2135] 2025-05-07T08:54:08.429018Z node 55 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:54:08.429099Z node 55 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [55:149:2057] recipient: [55:147:2170] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [55:149:2057] recipient: [55:147:2170] Leader for TabletID 72057594037927938 is [55:153:2174] sender: [55:154:2057] recipient: [55:147:2170] Leader for TabletID 72057594037927937 is [55:107:2139] sender: [55:179:2057] recipient: [55:14:2061] 2025-05-07T08:54:08.457379Z node 55 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:54:08.458505Z node 55 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037927937] Config applied version 55 actor [55:177:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 55 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 55 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 55 Important: false } 2025-05-07T08:54:08.459301Z node 55 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [55:185:2198] 2025-05-07T08:54:08.462603Z node 55 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [55:185:2198] 2025-05-07T08:54:08.464970Z node 55 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [55:186:2199] 2025-05-07T08:54:08.467194Z node 55 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [55:186:2199] 2025-05-07T08:54:08.482245Z node 55 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|d40c7cf1-fc5617c-9c4ca59b-814dd02_0 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-05-07T08:54:08.492618Z node 55 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|d94dbc59-aaec16a-aa5b6427-fc537469_1 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-05-07T08:54:08.530885Z node 55 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|931d1af3-63ff2bbe-49e24898-f24adf12_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Leader for TabletID 72057594037927937 is [55:107:2139] sender: [55:245:2057] recipient: [55:99:2134] Leader for TabletID 72057594037927937 is [55:107:2139] sender: [55:248:2057] recipient: [55:14:2061] Leader for TabletID 72057594037927937 is [55:107:2139] sender: [55:249:2057] recipient: [55:247:2248] Leader for TabletID 72057594037927937 is [55:250:2249] sender: [55:251:2057] recipient: [55:247:2248] 2025-05-07T08:54:08.602544Z node 55 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:54:08.602617Z node 55 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037927937] doesn't have tx writes info 2025-05-07T08:54:08.603610Z node 55 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [55:299:2290] 2025-05-07T08:54:08.612066Z node 55 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [55:300:2291] 2025-05-07T08:54:08.636024Z node 55 :PERSQUEUE INFO: partition_init.cpp:773: [rt3.dc1--asdfgs--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-05-07T08:54:08.636120Z node 55 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 3 [55:299:2290] 2025-05-07T08:54:08.652801Z node 55 :PERSQUEUE INFO: partition_init.cpp:773: [rt3.dc1--asdfgs--topic:1:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-05-07T08:54:08.652900Z node 55 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 3 [55:300:2291] Leader for TabletID 72057594037927937 is [55:250:2249] sender: [55:332:2057] recipient: [55:14:2061] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/idx_test/unittest >> YdbIndexTable::MultiShardTableOneIndexPkOverlap [GOOD] Test command err: Trying to start YDB, gRPC: 8033, MsgBus: 24530 2025-05-07T08:51:40.275705Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623813528203812:2068];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:40.275806Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001f06/r3tmp/tmpJREAFX/pdisk_1.dat 2025-05-07T08:51:40.830194Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:51:40.858220Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:40.858322Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:40.860177Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8033, node 1 2025-05-07T08:51:40.974669Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:51:40.974706Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:51:40.974723Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:51:40.974865Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24530 TClient is connected to server localhost:24530 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:51:41.747966Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:41.783860Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:51:41.797941Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:41.993390Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:42.237134Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:42.346156Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:44.461053Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623830708074633:2408], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:44.461223Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:44.944492Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:51:45.017708Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:51:45.085015Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:51:45.128832Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:51:45.204863Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:51:45.265838Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:51:45.293550Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623813528203812:2068];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:45.293594Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:51:45.354494Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:51:45.475859Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623835003042602:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:45.475930Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:45.476159Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623835003042607:2476], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:45.480702Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:51:45.500525Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-05-07T08:51:45.501256Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623835003042609:2477], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:51:45.588753Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623835003042660:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:51:47.133337Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-05-07T08:51:48.452315Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710673. Ctx: { TraceId: 01jtmz39jqff7v0wwws9pegyh1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTU2ZGUyZDAtNDdiZjFiMzMtNzBkZmM3ZmYtMTg0ZDY5MTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:51:48.492621Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710674. Ctx: { TraceId: 01jtmz39jx9bw6ces2wpsvhhaf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjM4NWRjZmYtMTZjYTg2OTMtZmM4Nzc1N2UtNGFjNTk4MGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:51:48.511670Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710675. Ctx: { TraceId: 01jtmz39jx4xd4y91ewkef45vy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjIyMWE4ZWEtNWYxMGU0NjUtYjU0NjU3MWQtNDY0NDMyMmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:51:48.603745Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710676. Ctx: { TraceId: 01jtmz39jx4xd4y91ewkef45vy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjIyMWE4ZWEtNWYxMGU0NjUtYjU0NjU3MWQtNDY0NDMyMmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:51:48.610966Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710679. Ctx: { TraceId: 01jtmz39jx9bw6ces2wpsvhhaf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjM4NWRjZmYtMTZjYTg2OTMtZmM4Nzc1N2UtNGFjNTk4MGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:51:48.611669Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710677. Ctx: { TraceId: 01jtmz39jqff7v0wwws9pegyh1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTU2ZGUyZDAtNDdiZjFiMzMtNzBkZmM3ZmYtMTg0ZDY5MTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:51:48.612290Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710678. Ctx: { TraceId: 01jtmz39mvdcz89f2pysknyvw4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_ ... zViNzQtYzdjNjg5Y2ItN2UwNGUyNjMtYTJiNDQ0Yg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:00.356285Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719455. Ctx: { TraceId: 01jtmz7act07hb7j7f6fxbf7vk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=N2FjZDEwZjYtNGM4NzVjYmItODQ4ODhmM2ItMjEzNGFhMGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:00.365055Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719456. Ctx: { TraceId: 01jtmz7act07hb7j7f6fxbf7vk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=N2FjZDEwZjYtNGM4NzVjYmItODQ4ODhmM2ItMjEzNGFhMGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:00.368554Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719457. Ctx: { TraceId: 01jtmz7act07hb7j7f6fxbf7vk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=N2FjZDEwZjYtNGM4NzVjYmItODQ4ODhmM2ItMjEzNGFhMGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:00.389005Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719458. Ctx: { TraceId: 01jtmz7adgcyznk1a460mtry6g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDM2NjAyMzQtYzQwNTlmNDgtMjBmZjYwOTgtMjg5MDYyMjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:00.394221Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719459. Ctx: { TraceId: 01jtmz7adgcyznk1a460mtry6g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDM2NjAyMzQtYzQwNTlmNDgtMjBmZjYwOTgtMjg5MDYyMjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:00.398795Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719460. Ctx: { TraceId: 01jtmz7adydm2bb95g1tnzb5pc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZWY5MDM5ZmMtMzNkZjk0MGUtYmE0NzBhNTUtM2JmNTI4ODk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:00.399214Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719461. Ctx: { TraceId: 01jtmz7adyb000axta1t7aakxs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTZmZTA1OGEtYWIxNzJjNTEtYTVmODc5MDItNTk4YzU2MWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:00.400957Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719462. Ctx: { TraceId: 01jtmz7adgcyznk1a460mtry6g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDM2NjAyMzQtYzQwNTlmNDgtMjBmZjYwOTgtMjg5MDYyMjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:00.406535Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719463. Ctx: { TraceId: 01jtmz7ae79g5txr66jz7x20v3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YzY1NTk2NDEtNGQxMDI5MjQtYzY2MDZkYmEtYzQyOTc5YTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:00.408825Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719464. Ctx: { TraceId: 01jtmz7adyb000axta1t7aakxs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTZmZTA1OGEtYWIxNzJjNTEtYTVmODc5MDItNTk4YzU2MWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:00.412753Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719465. Ctx: { TraceId: 01jtmz7adydm2bb95g1tnzb5pc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZWY5MDM5ZmMtMzNkZjk0MGUtYmE0NzBhNTUtM2JmNTI4ODk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:00.423089Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719466. Ctx: { TraceId: 01jtmz7adyb000axta1t7aakxs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTZmZTA1OGEtYWIxNzJjNTEtYTVmODc5MDItNTk4YzU2MWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:00.443154Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719467. Ctx: { TraceId: 01jtmz7aem1bj540pm6c95v4z0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWYxYzViNzQtYzdjNjg5Y2ItN2UwNGUyNjMtYTJiNDQ0Yg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:00.446874Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719468. Ctx: { TraceId: 01jtmz7ae79g5txr66jz7x20v3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YzY1NTk2NDEtNGQxMDI5MjQtYzY2MDZkYmEtYzQyOTc5YTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:00.459324Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719469. Ctx: { TraceId: 01jtmz7aem1bj540pm6c95v4z0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWYxYzViNzQtYzdjNjg5Y2ItN2UwNGUyNjMtYTJiNDQ0Yg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:00.473174Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719470. Ctx: { TraceId: 01jtmz7agh40t6t6ntpb2bz5kc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZjBjMjMzOTEtN2MwZmU0OTYtYzlmZGNmOWYtZGMzYzhlM2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:00.480805Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719471. Ctx: { TraceId: 01jtmz7agh40t6t6ntpb2bz5kc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZjBjMjMzOTEtN2MwZmU0OTYtYzlmZGNmOWYtZGMzYzhlM2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:00.483644Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719472. Ctx: { TraceId: 01jtmz7agh40t6t6ntpb2bz5kc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZjBjMjMzOTEtN2MwZmU0OTYtYzlmZGNmOWYtZGMzYzhlM2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:00.494629Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719473. Ctx: { TraceId: 01jtmz7ah74pp8p44mjpx7n33s, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=N2FjZDEwZjYtNGM4NzVjYmItODQ4ODhmM2ItMjEzNGFhMGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:00.501193Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719475. Ctx: { TraceId: 01jtmz7ah7cxy4va7wnqwr3d59, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZWY5MDM5ZmMtMzNkZjk0MGUtYmE0NzBhNTUtM2JmNTI4ODk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:00.503228Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719474. Ctx: { TraceId: 01jtmz7ah70tqf7h23tyn8w5xb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTZmZTA1OGEtYWIxNzJjNTEtYTVmODc5MDItNTk4YzU2MWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:00.512553Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719476. Ctx: { TraceId: 01jtmz7ahgbr0dw1pgrqwx8cza, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YzY1NTk2NDEtNGQxMDI5MjQtYzY2MDZkYmEtYzQyOTc5YTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:00.514970Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719477. Ctx: { TraceId: 01jtmz7ah7cxy4va7wnqwr3d59, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZWY5MDM5ZmMtMzNkZjk0MGUtYmE0NzBhNTUtM2JmNTI4ODk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:00.516711Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719478. Ctx: { TraceId: 01jtmz7ah70tqf7h23tyn8w5xb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTZmZTA1OGEtYWIxNzJjNTEtYTVmODc5MDItNTk4YzU2MWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:00.530822Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719479. Ctx: { TraceId: 01jtmz7aj55xvhvg4rsm2z28va, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWYxYzViNzQtYzdjNjg5Y2ItN2UwNGUyNjMtYTJiNDQ0Yg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:00.535687Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719480. Ctx: { TraceId: 01jtmz7aj55xvhvg4rsm2z28va, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWYxYzViNzQtYzdjNjg5Y2ItN2UwNGUyNjMtYTJiNDQ0Yg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:00.542139Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719481. Ctx: { TraceId: 01jtmz7ajs577qnm116ehdcj9g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=N2FjZDEwZjYtNGM4NzVjYmItODQ4ODhmM2ItMjEzNGFhMGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS 2025-05-07T08:54:00.554548Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719482. Ctx: { TraceId: 01jtmz7ak19tp82gq1b1nr71vz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZjBjMjMzOTEtN2MwZmU0OTYtYzlmZGNmOWYtZGMzYzhlM2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:00.559666Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719483. Ctx: { TraceId: 01jtmz7ak2ajemc4x4g4nz3xsa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDM2NjAyMzQtYzQwNTlmNDgtMjBmZjYwOTgtMjg5MDYyMjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:00.565407Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719485. Ctx: { TraceId: 01jtmz7ak19tp82gq1b1nr71vz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZjBjMjMzOTEtN2MwZmU0OTYtYzlmZGNmOWYtZGMzYzhlM2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:00.567850Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719484. Ctx: { TraceId: 01jtmz7aka8ddv1ceywb72s4av, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZWY5MDM5ZmMtMzNkZjk0MGUtYmE0NzBhNTUtM2JmNTI4ODk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:00.569516Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719486. Ctx: { TraceId: 01jtmz7ak2ajemc4x4g4nz3xsa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDM2NjAyMzQtYzQwNTlmNDgtMjBmZjYwOTgtMjg5MDYyMjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS 2025-05-07T08:54:00.583516Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719487. Ctx: { TraceId: 01jtmz7akzdcwr6fbqqpyn02a6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YzY1NTk2NDEtNGQxMDI5MjQtYzY2MDZkYmEtYzQyOTc5YTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS finished with status: SUCCESS finished with status: SUCCESS >> DataStreams::TestNonChargeableUser [GOOD] >> DataStreams::TestPutEmptyMessage >> TPersQueueCommonTest::TestLimiterLimitsWithUserPayloadRateLimit [GOOD] |90.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/mind/ut/ydb-core-mind-ut |90.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/mind/ut/ydb-core-mind-ut |90.1%| [LD] {RESULT} $(B)/ydb/core/mind/ut/ydb-core-mind-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestWriteReadRangeDataLimitThenLimitWorksNewApi [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:54:2057] recipient: [1:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:54:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:56:2097] sender: [1:57:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:56:2097] sender: [1:74:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:54:2057] recipient: [2:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:54:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:57:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:74:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:76:2057] recipient: [2:36:2083] Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:79:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:80:2057] recipient: [2:78:2110] Leader for TabletID 72057594037927937 is [2:81:2111] sender: [2:82:2057] recipient: [2:78:2110] !Reboot 72057594037927937 (actor [2:56:2097]) rebooted! !Reboot 72057594037927937 (actor [2:56:2097]) tablet resolver refreshed! new actor is[2:81:2111] Leader for TabletID 72057594037927937 is [2:81:2111] sender: [2:135:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:54:2057] recipient: [3:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:54:2057] recipient: [3:52:2095] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:57:2057] recipient: [3:52:2095] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:74:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:56:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:76:2057] recipient: [3:36:2083] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:78:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:80:2057] recipient: [3:79:2110] Leader for TabletID 72057594037927937 is [3:81:2111] sender: [3:82:2057] recipient: [3:79:2110] !Reboot 72057594037927937 (actor [3:56:2097]) rebooted! !Reboot 72057594037927937 (actor [3:56:2097]) tablet resolver refreshed! new actor is[3:81:2111] Leader for TabletID 72057594037927937 is [3:81:2111] sender: [3:135:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:54:2057] recipient: [4:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:54:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:57:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:74:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:77:2057] recipient: [4:36:2083] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:80:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:81:2057] recipient: [4:79:2110] Leader for TabletID 72057594037927937 is [4:82:2111] sender: [4:83:2057] recipient: [4:79:2110] !Reboot 72057594037927937 (actor [4:56:2097]) rebooted! !Reboot 72057594037927937 (actor [4:56:2097]) tablet resolver refreshed! new actor is[4:82:2111] Leader for TabletID 72057594037927937 is [4:82:2111] sender: [4:136:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:54:2057] recipient: [5:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:54:2057] recipient: [5:50:2095] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:57:2057] recipient: [5:50:2095] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:74:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:80:2057] recipient: [5:36:2083] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:83:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:84:2057] recipient: [5:82:2113] Leader for TabletID 72057594037927937 is [5:85:2114] sender: [5:86:2057] recipient: [5:82:2113] !Reboot 72057594037927937 (actor [5:56:2097]) rebooted! !Reboot 72057594037927937 (actor [5:56:2097]) tablet resolver refreshed! new actor is[5:85:2114] Leader for TabletID 72057594037927937 is [5:85:2114] sender: [5:139:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:54:2057] recipient: [6:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:54:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:57:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:74:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:56:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:80:2057] recipient: [6:36:2083] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:83:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:84:2057] recipient: [6:82:2113] Leader for TabletID 72057594037927937 is [6:85:2114] sender: [6:86:2057] recipient: [6:82:2113] !Reboot 72057594037927937 (actor [6:56:2097]) rebooted! !Reboot 72057594037927937 (actor [6:56:2097]) tablet resolver refreshed! new actor is[6:85:2114] Leader for TabletID 72057594037927937 is [6:85:2114] sender: [6:139:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:54:2057] recipient: [7:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:54:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:57:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:74:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:81:2057] recipient: [7:36:2083] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:84:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:85:2057] recipient: [7:83:2113] Leader for TabletID 72057594037927937 is [7:86:2114] sender: [7:87:2057] recipient: [7:83:2113] !Reboot 72057594037927937 (actor [7:56:2097]) rebooted! !Reboot 72057594037927937 (actor [7:56:2097]) tablet resolver refreshed! new actor is[7:86:2114] Leader for TabletID 72057594037927937 is [7:86:2114] sender: [7:140:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:54:2057] recipient: [8:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:54:2057] recipient: [8:50:2095] Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:57:2057] recipient: [8:50:2095] Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:74:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:83:2057] recipient: [8:36:2083] Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:85:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:87:2057] recipient: [8:86:2115] Leader for TabletID 72057594037927937 is [8:88:2116] sender: [8:89:2057] recipient: [8:86:2115] !Reboot 72057594037927937 (actor [8:56:2097]) rebooted! !Reboot 72057594037927937 (actor [8:56:2097]) tablet resolver refreshed! new actor is[8:88:2116] Leader for TabletID 72057594037927937 is [8:88:2116] sender: [8:142:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:54:2057] recipient: [9:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:54:2057] recipient: [9:52:2095] Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:57:2057] recipient: [9:52:2095] Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:74:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:56:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:83:2057] recipient: [9:36:2083] Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:86:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:87:2057] recipient: [9:85:2115] Leader for TabletID 72057594037927937 is [9:88:2116] sender: [9:89:2057] recipient: [9:85:2115] !Reboot 72057594037927937 (actor [9:56:2097]) rebooted! !Reboot 72057594037927937 (actor [9:56:2097]) tablet resolver refreshed! new actor is[9:88:2116] Leader for TabletID 72057594037927937 is [9:88:2116] sender: [9:142:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:54:2057] recipient: [10:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:54:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:57:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:74:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:84:2057] recipient: [10:36:2083] Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:87:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:88:2057] recipient: [10:86:2115] Leader for TabletID 72057594037927937 is [10:89:2116] sender: [10:90:2057] recipient: [10:86:2115] !Reboot 72057594037927937 (actor [10:56:2097]) rebooted! !Reboot 72057594037927937 (actor [10:56:2097]) tablet resolver refreshed! new actor is[10:89:2116] Leader for TabletID 72057594037927937 is [10:89:2116] sender: [10:143:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:54:2057] recipient: [11:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:54:2057] recipient: [11:51:2095] Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:57:2057] recipient: [11:51:2095] Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:74:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:54:2057] recipient: [12:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:54:2057] recipient: [12:52:2095] Leader for TabletID 72057594037927937 is [12:56:2097] sender: [12:57:2057] recipient: [12:52:2095] Leader for TabletID 72057594037927937 is [12:56:2097] sender: [12:74:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:54:2057] recipient: [13:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:54:2057] recipient: [13:51:2095] Leader for TabletID 72057594037927937 is [13:56:2097] sender: [13:57:2057] recipient: [13:51:2095] Leader for TabletID 72057594037927937 is [13:56:2097] sender: [13:74:2057] recipient: [13:14:2061] !Reboot 72057594037927937 (actor [13:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [13:56:2097] sender: [13:76:2057] recipient: [13:36:2083] Leader for TabletID 72057594037927937 is [13:56:2097] sender: [13:79:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [13:56:2097] sender: [13:80:2057] recipient: [13:78:2110] Leader for TabletID 72057594037927937 is [13:81:2111] sender: [13:82:2057] recipient: [13:78:2110] !Reboot 72057594037927937 (actor [13:56:2097]) rebooted! !Reboot 72057594037927937 (actor [13:56:2097]) tablet resolver refreshed! new actor is[13:81:2111] Leader for TabletID 72057594037927937 is [13:81:2111] sender: [13:135:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:54:2057] recipient: [14:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:54:2057] recipient: [14:51:2095] Leader for TabletID 72057594037927937 is [14:56:2097] sender: [14:57:2057] recipient: [14:51:2095] Leader for TabletID 72057594037927937 is [14:56:2097] sender: [14:74:2057] recipient: [14:14:2061] !Reboot 72057594037927937 (actor [14:56:2097]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [14:56:2097] sender: [14:76:2057] recipient: [14:36:2083] Leader for TabletID 72057594037927937 is [14:56:2097] sender: [14:79:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [14:56:2097] sender: [14:80:2057] recipient: [14:78:2110] Leader for TabletID 72057594037927937 is [14:81:2111] sender: [14:82:2057] recipient: [14:78:2110] !Reboot 72057594037927937 (actor [14:56:2097]) rebooted! !Reboot 72057594037927937 (actor [14:56:2097]) tablet resolver refreshed! new actor is[14:81:2111] Leader for TabletID 72057594037927937 is [14:81:2111] sender: [14:135:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:54:2057] recipient: [15:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:54:2057] recipient: [15:51:2095] Leader for TabletID 72057594037927937 is [15:56:2097] sender: [15:57:2057] recipient: [15:51:2095] Leader for TabletID 72057594037927937 is [15:56:2097] sender: [15:74:2057] recipient: [15:14:2061] !Reboot 72057594037927937 (actor [15:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [15:56:2097] sender: [15:77:2057] recipient: [15:36:2083] Leader for TabletID 72057594037927937 is [15:56:2097] sender: [15:80:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [15:56:2097] sender: [15:81:2057] recipient: [15:79:2110] Leader for TabletID 72057594037927937 is [15:82:2111] sender: [15:83:2057] recipient: [15:79:2110] !Reboot 72057594037927937 (actor [15:56:2097]) rebooted! !Reboot 72057594037927937 (actor [15:56:2097]) tablet resolver refreshed! new actor is[15:82:2111] Leader for TabletID 72057594037927937 is [15:82:2111] sender: [15:136:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:54:2057] recipient: [16:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:54:2057] recipient: [16:51:2095] Leader for TabletID 72057594037927937 is [16:56:2097] sender: [16:57:2057] recipient: [16:51:2095] Leader for TabletID 72057594037927937 is [16:56:2097] sender: [16:74:2057] recipient: [16:14:2061] !Reboot 72057594037927937 (actor [16:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [16:56:2097] sender: [16:80:2057] recipient: [16:36:2083] Leader for TabletID 72057594037927937 is [16:56:2097] sender: [16:83:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [16:56:2097] sender: [16:84:2057] recipient: [16:82:2113] Leader for TabletID 72057594037927937 is [16:85:2114] sender: [16:86:2057] recipient: [16:82:2113] !Reboot 72057594037927937 (actor [16:56:2097]) rebooted! !Reboot 72057594037927937 (actor [16:56:2097]) tablet resolver refreshed! new actor is[16:85:2114] Leader for TabletID 72057594037927937 is [16:85:2114] sender: [16:139:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:54:2057] recipient: [17:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:54:2057] recipient: [17:51:2095] Leader for TabletID 72057594037927937 is [17:56:2097] sender: [17:57:2057] recipient: [17:51:2095] Leader for TabletID 72057594037927937 is [17:56:2097] sender: [17:74:2057] recipient: [17:14:2061] !Reboot 72057594037927937 (actor [17:56:2097]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [17:56:2097] sender: [17:80:2057] recipient: [17:36:2083] Leader for TabletID 72057594037927937 is [17:56:2097] sender: [17:83:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [17:56:2097] sender: [17:84:2057] recipient: [17:82:2113] Leader for TabletID 72057594037927937 is [17:85:2114] sender: [17:86:2057] recipient: [17:82:2113] !Reboot 72057594037927937 (actor [17:56:2097]) rebooted! !Reboot 72057594037927937 (actor [17:56:2097]) tablet resolver refreshed! new actor is[17:85:2114] Leader for TabletID 72057594037927937 is [17:85:2114] sender: [17:139:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:54:2057] recipient: [18:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:54:2057] recipient: [18:51:2095] Leader for TabletID 72057594037927937 is [18:56:2097] sender: [18:57:2057] recipient: [18:51:2095] Leader for TabletID 72057594037927937 is [18:56:2097] sender: [18:74:2057] recipient: [18:14:2061] !Reboot 72057594037927937 (actor [18:56:2097]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [18:56:2097] sender: [18:81:2057] recipient: [18:36:2083] Leader for TabletID 72057594037927937 is [18:56:2097] sender: [18:84:2057] recipient: [18:83:2113] Leader for TabletID 72057594037927937 is [18:56:2097] sender: [18:85:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [18:86:2114] sender: [18:87:2057] recipient: [18:83:2113] !Reboot 72057594037927937 (actor [18:56:2097]) rebooted! !Reboot 72057594037927937 (actor [18:56:2097]) tablet resolver refreshed! new actor is[18:86:2114] Leader for TabletID 72057594037927937 is [18:86:2114] sender: [18:104:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:54:2057] recipient: [19:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:54:2057] recipient: [19:50:2095] Leader for TabletID 72057594037927937 is [19:56:2097] sender: [19:57:2057] recipient: [19:50:2095] Leader for TabletID 72057594037927937 is [19:56:2097] sender: [19:74:2057] recipient: [19:14:2061] !Reboot 72057594037927937 (actor [19:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [19:56:2097] sender: [19:83:2057] recipient: [19:36:2083] Leader for TabletID 72057594037927937 is [19:56:2097] sender: [19:85:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [19:56:2097] sender: [19:87:2057] recipient: [19:86:2115] Leader for TabletID 72057594037927937 is [19:88:2116] sender: [19:89:2057] recipient: [19:86:2115] !Reboot 72057594037927937 (actor [19:56:2097]) rebooted! !Reboot 72057594037927937 (actor [19:56:2097]) tablet resolver refreshed! new actor is[19:88:2116] Leader for TabletID 72057594037927937 is [19:88:2116] sender: [19:142:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:54:2057] recipient: [20:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:54:2057] recipient: [20:52:2095] Leader for TabletID 72057594037927937 is [20:56:2097] sender: [20:57:2057] recipient: [20:52:2095] Leader for TabletID 72057594037927937 is [20:56:2097] sender: [20:74:2057] recipient: [20:14:2061] !Reboot 72057594037927937 (actor [20:56:2097]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [20:56:2097] sender: [20:83:2057] recipient: [20:36:2083] Leader for TabletID 72057594037927937 is [20:56:2097] sender: [20:86:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [20:56:2097] sender: [20:87:2057] recipient: [20:85:2115] Leader for TabletID 72057594037927937 is [20:88:2116] sender: [20:89:2057] recipient: [20:85:2115] !Reboot 72057594037927937 (actor [20:56:2097]) rebooted! !Reboot 72057594037927937 (actor [20:56:2097]) tablet resolver refreshed! new actor is[20:88:2116] Leader for TabletID 72057594037927937 is [20:88:2116] sender: [20:142:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:54:2057] recipient: [21:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:54:2057] recipient: [21:50:2095] Leader for TabletID 72057594037927937 is [21:56:2097] sender: [21:57:2057] recipient: [21:50:2095] Leader for TabletID 72057594037927937 is [21:56:2097] sender: [21:74:2057] recipient: [21:14:2061] !Reboot 72057594037927937 (actor [21:56:2097]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [21:56:2097] sender: [21:84:2057] recipient: [21:36:2083] Leader for TabletID 72057594037927937 is [21:56:2097] sender: [21:87:2057] recipient: [21:14:2061] Leader for TabletID 72057594037927937 is [21:56:2097] sender: [21:88:2057] recipient: [21:86:2115] Leader for TabletID 72057594037927937 is [21:89:2116] sender: [21:90:2057] recipient: [21:86:2115] !Reboot 72057594037927937 (actor [21:56:2097]) rebooted! !Reboot 72057594037927937 (actor [21:56:2097]) tablet resolver refreshed! new actor is[21:89:2116] Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:54:2057] recipient: [22:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:54:2057] recipient: [22:51:2095] Leader for TabletID 72057594037927937 is [22:56:2097] sender: [22:57:2057] recipient: [22:51:2095] Leader for TabletID 72057594037927937 is [22:56:2097] sender: [22:74:2057] recipient: [22:14:2061] >> TSchemeShardTest::ManyDirs [GOOD] >> TSchemeShardTest::ListNotCreatedDirCase >> DataStreams::TestControlPlaneAndMeteringData >> TKeyValueTest::TestRenameWorksNewApi [GOOD] |90.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/replication/controller/ut_target_discoverer/replication-controller-ut_target_discoverer |90.1%| [LD] {RESULT} $(B)/ydb/core/tx/replication/controller/ut_target_discoverer/replication-controller-ut_target_discoverer |90.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/controller/ut_target_discoverer/replication-controller-ut_target_discoverer >> DataStreams::TestUpdateStream >> DataStreams::TestReservedResourcesMetering >> DataStreams::TestUpdateStorage >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-60 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-61 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestRenameWorksNewApi [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:54:2057] recipient: [1:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:54:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:56:2097] sender: [1:57:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:56:2097] sender: [1:74:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:54:2057] recipient: [2:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:54:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:57:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:74:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:76:2057] recipient: [2:36:2083] Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:79:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:80:2057] recipient: [2:78:2110] Leader for TabletID 72057594037927937 is [2:81:2111] sender: [2:82:2057] recipient: [2:78:2110] !Reboot 72057594037927937 (actor [2:56:2097]) rebooted! !Reboot 72057594037927937 (actor [2:56:2097]) tablet resolver refreshed! new actor is[2:81:2111] Leader for TabletID 72057594037927937 is [2:81:2111] sender: [2:135:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:54:2057] recipient: [3:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:54:2057] recipient: [3:52:2095] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:57:2057] recipient: [3:52:2095] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:74:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:56:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:76:2057] recipient: [3:36:2083] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:78:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:80:2057] recipient: [3:79:2110] Leader for TabletID 72057594037927937 is [3:81:2111] sender: [3:82:2057] recipient: [3:79:2110] !Reboot 72057594037927937 (actor [3:56:2097]) rebooted! !Reboot 72057594037927937 (actor [3:56:2097]) tablet resolver refreshed! new actor is[3:81:2111] Leader for TabletID 72057594037927937 is [3:81:2111] sender: [3:135:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:54:2057] recipient: [4:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:54:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:57:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:74:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:77:2057] recipient: [4:36:2083] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:80:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:81:2057] recipient: [4:79:2110] Leader for TabletID 72057594037927937 is [4:82:2111] sender: [4:83:2057] recipient: [4:79:2110] !Reboot 72057594037927937 (actor [4:56:2097]) rebooted! !Reboot 72057594037927937 (actor [4:56:2097]) tablet resolver refreshed! new actor is[4:82:2111] Leader for TabletID 72057594037927937 is [4:82:2111] sender: [4:136:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:54:2057] recipient: [5:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:54:2057] recipient: [5:50:2095] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:57:2057] recipient: [5:50:2095] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:74:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:80:2057] recipient: [5:36:2083] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:83:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:84:2057] recipient: [5:82:2113] Leader for TabletID 72057594037927937 is [5:85:2114] sender: [5:86:2057] recipient: [5:82:2113] !Reboot 72057594037927937 (actor [5:56:2097]) rebooted! !Reboot 72057594037927937 (actor [5:56:2097]) tablet resolver refreshed! new actor is[5:85:2114] Leader for TabletID 72057594037927937 is [5:85:2114] sender: [5:139:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:54:2057] recipient: [6:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:54:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:57:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:74:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:56:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:80:2057] recipient: [6:36:2083] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:83:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:84:2057] recipient: [6:82:2113] Leader for TabletID 72057594037927937 is [6:85:2114] sender: [6:86:2057] recipient: [6:82:2113] !Reboot 72057594037927937 (actor [6:56:2097]) rebooted! !Reboot 72057594037927937 (actor [6:56:2097]) tablet resolver refreshed! new actor is[6:85:2114] Leader for TabletID 72057594037927937 is [6:85:2114] sender: [6:139:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:54:2057] recipient: [7:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:54:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:57:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:74:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:81:2057] recipient: [7:36:2083] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:84:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:85:2057] recipient: [7:83:2113] Leader for TabletID 72057594037927937 is [7:86:2114] sender: [7:87:2057] recipient: [7:83:2113] !Reboot 72057594037927937 (actor [7:56:2097]) rebooted! !Reboot 72057594037927937 (actor [7:56:2097]) tablet resolver refreshed! new actor is[7:86:2114] Leader for TabletID 72057594037927937 is [7:86:2114] sender: [7:140:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:54:2057] recipient: [8:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:54:2057] recipient: [8:50:2095] Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:57:2057] recipient: [8:50:2095] Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:74:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:56:2097]) on event NKikimr::TEvKeyValue::TEvCollect ! Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:82:2057] recipient: [8:36:2083] Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:85:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:86:2057] recipient: [8:84:2114] Leader for TabletID 72057594037927937 is [8:87:2115] sender: [8:88:2057] recipient: [8:84:2114] !Reboot 72057594037927937 (actor [8:56:2097]) rebooted! !Reboot 72057594037927937 (actor [8:56:2097]) tablet resolver refreshed! new actor is[8:87:2115] Leader for TabletID 72057594037927937 is [8:87:2115] sender: [8:107:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:54:2057] recipient: [9:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:54:2057] recipient: [9:52:2095] Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:57:2057] recipient: [9:52:2095] Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:74:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:56:2097]) on event NKikimr::TEvKeyValue::TEvCompleteGC ! Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:83:2057] recipient: [9:36:2083] Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:86:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:87:2057] recipient: [9:85:2115] Leader for TabletID 72057594037927937 is [9:88:2116] sender: [9:89:2057] recipient: [9:85:2115] !Reboot 72057594037927937 (actor [9:56:2097]) rebooted! !Reboot 72057594037927937 (actor [9:56:2097]) tablet resolver refreshed! new actor is[9:88:2116] Leader for TabletID 72057594037927937 is [9:88:2116] sender: [9:108:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:54:2057] recipient: [10:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:54:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:57:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:74:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:86:2057] recipient: [10:36:2083] Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:89:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:90:2057] recipient: [10:88:2118] Leader for TabletID 72057594037927937 is [10:91:2119] sender: [10:92:2057] recipient: [10:88:2118] !Reboot 72057594037927937 (actor [10:56:2097]) rebooted! !Reboot 72057594037927937 (actor [10:56:2097]) tablet resolver refreshed! new actor is[10:91:2119] Leader for TabletID 72057594037927937 is [10:91:2119] sender: [10:145:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:54:2057] recipient: [11:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:54:2057] recipient: [11:51:2095] Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:57:2057] recipient: [11:51:2095] Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:74:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:56:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:86:2057] recipient: [11:36:2083] Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:89:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:90:2057] recipient: [11:88:2118] Leader for TabletID 72057594037927937 is [11:91:2119] sender: [11:92:2057] recipient: [11:88:2118] !Reboot 72057594037927937 (actor [11:56:2097]) rebooted! !Reboot 72057594037927937 (actor [11:56:2097]) tablet resolver refreshed! new actor is[11:91:2119] Leader for TabletID 72057594037927937 is [11:91:2119] sender: [11:145:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:54:2057] recipient: [12:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:54:2057] recipient: [12:52:2095] Leader for TabletID 72057594037927937 is [12:56:2097] sender: [12:57:2057] recipient: [12:52:2095] Leader for TabletID 72057594037927937 is [12:56:2097] sender: [12:74:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (actor [12:56:209 ... recipient: [15:78:2110] !Reboot 72057594037927937 (actor [15:56:2097]) rebooted! !Reboot 72057594037927937 (actor [15:56:2097]) tablet resolver refreshed! new actor is[15:81:2111] Leader for TabletID 72057594037927937 is [15:81:2111] sender: [15:135:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:54:2057] recipient: [16:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:54:2057] recipient: [16:51:2095] Leader for TabletID 72057594037927937 is [16:56:2097] sender: [16:57:2057] recipient: [16:51:2095] Leader for TabletID 72057594037927937 is [16:56:2097] sender: [16:74:2057] recipient: [16:14:2061] !Reboot 72057594037927937 (actor [16:56:2097]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [16:56:2097] sender: [16:76:2057] recipient: [16:36:2083] Leader for TabletID 72057594037927937 is [16:56:2097] sender: [16:79:2057] recipient: [16:78:2110] Leader for TabletID 72057594037927937 is [16:56:2097] sender: [16:80:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [16:81:2111] sender: [16:82:2057] recipient: [16:78:2110] !Reboot 72057594037927937 (actor [16:56:2097]) rebooted! !Reboot 72057594037927937 (actor [16:56:2097]) tablet resolver refreshed! new actor is[16:81:2111] Leader for TabletID 72057594037927937 is [16:81:2111] sender: [16:135:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:54:2057] recipient: [17:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:54:2057] recipient: [17:51:2095] Leader for TabletID 72057594037927937 is [17:56:2097] sender: [17:57:2057] recipient: [17:51:2095] Leader for TabletID 72057594037927937 is [17:56:2097] sender: [17:74:2057] recipient: [17:14:2061] !Reboot 72057594037927937 (actor [17:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [17:56:2097] sender: [17:77:2057] recipient: [17:36:2083] Leader for TabletID 72057594037927937 is [17:56:2097] sender: [17:80:2057] recipient: [17:79:2110] Leader for TabletID 72057594037927937 is [17:56:2097] sender: [17:81:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [17:82:2111] sender: [17:83:2057] recipient: [17:79:2110] !Reboot 72057594037927937 (actor [17:56:2097]) rebooted! !Reboot 72057594037927937 (actor [17:56:2097]) tablet resolver refreshed! new actor is[17:82:2111] Leader for TabletID 72057594037927937 is [17:82:2111] sender: [17:136:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:54:2057] recipient: [18:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:54:2057] recipient: [18:51:2095] Leader for TabletID 72057594037927937 is [18:56:2097] sender: [18:57:2057] recipient: [18:51:2095] Leader for TabletID 72057594037927937 is [18:56:2097] sender: [18:74:2057] recipient: [18:14:2061] !Reboot 72057594037927937 (actor [18:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [18:56:2097] sender: [18:80:2057] recipient: [18:36:2083] Leader for TabletID 72057594037927937 is [18:56:2097] sender: [18:83:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [18:56:2097] sender: [18:84:2057] recipient: [18:82:2113] Leader for TabletID 72057594037927937 is [18:85:2114] sender: [18:86:2057] recipient: [18:82:2113] !Reboot 72057594037927937 (actor [18:56:2097]) rebooted! !Reboot 72057594037927937 (actor [18:56:2097]) tablet resolver refreshed! new actor is[18:85:2114] Leader for TabletID 72057594037927937 is [18:85:2114] sender: [18:139:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:54:2057] recipient: [19:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:54:2057] recipient: [19:50:2095] Leader for TabletID 72057594037927937 is [19:56:2097] sender: [19:57:2057] recipient: [19:50:2095] Leader for TabletID 72057594037927937 is [19:56:2097] sender: [19:74:2057] recipient: [19:14:2061] !Reboot 72057594037927937 (actor [19:56:2097]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [19:56:2097] sender: [19:80:2057] recipient: [19:36:2083] Leader for TabletID 72057594037927937 is [19:56:2097] sender: [19:83:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [19:56:2097] sender: [19:84:2057] recipient: [19:82:2113] Leader for TabletID 72057594037927937 is [19:85:2114] sender: [19:86:2057] recipient: [19:82:2113] !Reboot 72057594037927937 (actor [19:56:2097]) rebooted! !Reboot 72057594037927937 (actor [19:56:2097]) tablet resolver refreshed! new actor is[19:85:2114] Leader for TabletID 72057594037927937 is [19:85:2114] sender: [19:139:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:54:2057] recipient: [20:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:54:2057] recipient: [20:52:2095] Leader for TabletID 72057594037927937 is [20:56:2097] sender: [20:57:2057] recipient: [20:52:2095] Leader for TabletID 72057594037927937 is [20:56:2097] sender: [20:74:2057] recipient: [20:14:2061] !Reboot 72057594037927937 (actor [20:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [20:56:2097] sender: [20:81:2057] recipient: [20:36:2083] Leader for TabletID 72057594037927937 is [20:56:2097] sender: [20:84:2057] recipient: [20:83:2113] Leader for TabletID 72057594037927937 is [20:56:2097] sender: [20:85:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [20:86:2114] sender: [20:87:2057] recipient: [20:83:2113] !Reboot 72057594037927937 (actor [20:56:2097]) rebooted! !Reboot 72057594037927937 (actor [20:56:2097]) tablet resolver refreshed! new actor is[20:86:2114] Leader for TabletID 72057594037927937 is [20:86:2114] sender: [20:140:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:54:2057] recipient: [21:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:54:2057] recipient: [21:50:2095] Leader for TabletID 72057594037927937 is [21:56:2097] sender: [21:57:2057] recipient: [21:50:2095] Leader for TabletID 72057594037927937 is [21:56:2097] sender: [21:74:2057] recipient: [21:14:2061] !Reboot 72057594037927937 (actor [21:56:2097]) on event NKikimr::TEvKeyValue::TEvCollect ! Leader for TabletID 72057594037927937 is [21:56:2097] sender: [21:82:2057] recipient: [21:36:2083] Leader for TabletID 72057594037927937 is [21:56:2097] sender: [21:85:2057] recipient: [21:84:2114] Leader for TabletID 72057594037927937 is [21:56:2097] sender: [21:86:2057] recipient: [21:14:2061] Leader for TabletID 72057594037927937 is [21:87:2115] sender: [21:88:2057] recipient: [21:84:2114] !Reboot 72057594037927937 (actor [21:56:2097]) rebooted! !Reboot 72057594037927937 (actor [21:56:2097]) tablet resolver refreshed! new actor is[21:87:2115] Leader for TabletID 72057594037927937 is [21:87:2115] sender: [21:107:2057] recipient: [21:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:54:2057] recipient: [22:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:54:2057] recipient: [22:51:2095] Leader for TabletID 72057594037927937 is [22:56:2097] sender: [22:57:2057] recipient: [22:51:2095] Leader for TabletID 72057594037927937 is [22:56:2097] sender: [22:74:2057] recipient: [22:14:2061] !Reboot 72057594037927937 (actor [22:56:2097]) on event NKikimr::TEvKeyValue::TEvCompleteGC ! Leader for TabletID 72057594037927937 is [22:56:2097] sender: [22:83:2057] recipient: [22:36:2083] Leader for TabletID 72057594037927937 is [22:56:2097] sender: [22:86:2057] recipient: [22:14:2061] Leader for TabletID 72057594037927937 is [22:56:2097] sender: [22:87:2057] recipient: [22:85:2115] Leader for TabletID 72057594037927937 is [22:88:2116] sender: [22:89:2057] recipient: [22:85:2115] !Reboot 72057594037927937 (actor [22:56:2097]) rebooted! !Reboot 72057594037927937 (actor [22:56:2097]) tablet resolver refreshed! new actor is[22:88:2116] Leader for TabletID 72057594037927937 is [22:88:2116] sender: [22:108:2057] recipient: [22:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [23:54:2057] recipient: [23:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [23:54:2057] recipient: [23:52:2095] Leader for TabletID 72057594037927937 is [23:56:2097] sender: [23:57:2057] recipient: [23:52:2095] Leader for TabletID 72057594037927937 is [23:56:2097] sender: [23:74:2057] recipient: [23:14:2061] !Reboot 72057594037927937 (actor [23:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [23:56:2097] sender: [23:86:2057] recipient: [23:36:2083] Leader for TabletID 72057594037927937 is [23:56:2097] sender: [23:89:2057] recipient: [23:14:2061] Leader for TabletID 72057594037927937 is [23:56:2097] sender: [23:90:2057] recipient: [23:88:2118] Leader for TabletID 72057594037927937 is [23:91:2119] sender: [23:92:2057] recipient: [23:88:2118] !Reboot 72057594037927937 (actor [23:56:2097]) rebooted! !Reboot 72057594037927937 (actor [23:56:2097]) tablet resolver refreshed! new actor is[23:91:2119] Leader for TabletID 72057594037927937 is [23:91:2119] sender: [23:145:2057] recipient: [23:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [24:54:2057] recipient: [24:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [24:54:2057] recipient: [24:51:2095] Leader for TabletID 72057594037927937 is [24:56:2097] sender: [24:57:2057] recipient: [24:51:2095] Leader for TabletID 72057594037927937 is [24:56:2097] sender: [24:74:2057] recipient: [24:14:2061] !Reboot 72057594037927937 (actor [24:56:2097]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [24:56:2097] sender: [24:86:2057] recipient: [24:36:2083] Leader for TabletID 72057594037927937 is [24:56:2097] sender: [24:89:2057] recipient: [24:14:2061] Leader for TabletID 72057594037927937 is [24:56:2097] sender: [24:90:2057] recipient: [24:88:2118] Leader for TabletID 72057594037927937 is [24:91:2119] sender: [24:92:2057] recipient: [24:88:2118] !Reboot 72057594037927937 (actor [24:56:2097]) rebooted! !Reboot 72057594037927937 (actor [24:56:2097]) tablet resolver refreshed! new actor is[24:91:2119] Leader for TabletID 72057594037927937 is [24:91:2119] sender: [24:145:2057] recipient: [24:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [25:54:2057] recipient: [25:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [25:54:2057] recipient: [25:51:2095] Leader for TabletID 72057594037927937 is [25:56:2097] sender: [25:57:2057] recipient: [25:51:2095] Leader for TabletID 72057594037927937 is [25:56:2097] sender: [25:74:2057] recipient: [25:14:2061] !Reboot 72057594037927937 (actor [25:56:2097]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [25:56:2097] sender: [25:87:2057] recipient: [25:36:2083] Leader for TabletID 72057594037927937 is [25:56:2097] sender: [25:90:2057] recipient: [25:14:2061] Leader for TabletID 72057594037927937 is [25:56:2097] sender: [25:91:2057] recipient: [25:89:2118] Leader for TabletID 72057594037927937 is [25:92:2119] sender: [25:93:2057] recipient: [25:89:2118] !Reboot 72057594037927937 (actor [25:56:2097]) rebooted! !Reboot 72057594037927937 (actor [25:56:2097]) tablet resolver refreshed! new actor is[25:92:2119] Leader for TabletID 72057594037927937 is [0:0:0] sender: [26:54:2057] recipient: [26:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [26:54:2057] recipient: [26:51:2095] Leader for TabletID 72057594037927937 is [26:56:2097] sender: [26:57:2057] recipient: [26:51:2095] Leader for TabletID 72057594037927937 is [26:56:2097] sender: [26:74:2057] recipient: [26:14:2061] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest >> TPersQueueCommonTest::TestLimiterLimitsWithUserPayloadRateLimit [GOOD] Test command err: === Server->StartServer(false); 2025-05-07T08:53:52.705293Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624379669379825:2204];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:52.712315Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:53:52.764797Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501624378883769194:2071];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:52.764832Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0034aa/r3tmp/tmpPslhtP/pdisk_1.dat 2025-05-07T08:53:53.071935Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-05-07T08:53:53.128865Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-05-07T08:53:53.504174Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:53:53.507657Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:53:53.510519Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:53:53.510602Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:53:53.512612Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:53:53.513758Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:53:53.515740Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-05-07T08:53:53.522939Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 62156, node 1 2025-05-07T08:53:53.744855Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/zvgn/0034aa/r3tmp/yandexo2zNRr.tmp 2025-05-07T08:53:53.744880Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/zvgn/0034aa/r3tmp/yandexo2zNRr.tmp 2025-05-07T08:53:53.745025Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/zvgn/0034aa/r3tmp/yandexo2zNRr.tmp 2025-05-07T08:53:53.745190Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:53:53.815689Z INFO: TTestServer started on Port 12474 GrpcPort 62156 TClient is connected to server localhost:12474 PQClient connected to localhost:62156 === TenantModeEnabled() = 1 === Init PQ - start server on port 62156 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:53:54.558923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976710657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-05-07T08:53:54.559076Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-05-07T08:53:54.559255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-05-07T08:53:54.559429Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976710657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-05-07T08:53:54.559458Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-05-07T08:53:54.562206Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 281474976710657, response: Status: StatusAccepted TxId: 281474976710657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-05-07T08:53:54.562596Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-05-07T08:53:54.562857Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-05-07T08:53:54.563027Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 281474976710657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-05-07T08:53:54.563044Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 281474976710657:0 ProgressState no shards to create, do next state 2025-05-07T08:53:54.563060Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976710657:0 2 -> 3 waiting... 2025-05-07T08:53:54.566467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-05-07T08:53:54.566535Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976710657:0 ProgressState, at schemeshard: 72057594046644480 2025-05-07T08:53:54.566549Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976710657:0 3 -> 128 2025-05-07T08:53:54.569062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-05-07T08:53:54.569095Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-05-07T08:53:54.569148Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976710657:0, at tablet# 72057594046644480 2025-05-07T08:53:54.569182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 281474976710657 ready parts: 1/1 2025-05-07T08:53:54.581138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976710657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:53:54.581564Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:53:54.581586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976710657, ready parts: 0/1, is published: true 2025-05-07T08:53:54.581631Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:53:54.593894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 281474976710657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976710657 msg type: 269090816 2025-05-07T08:53:54.594063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 281474976710657, partId: 4294967295, tablet: 72057594046316545 2025-05-07T08:53:54.602562Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 1746608034640, transactions count in step: 1, at schemeshard: 72057594046644480 2025-05-07T08:53:54.602725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1746608034640 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-05-07T08:53:54.602754Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-05-07T08:53:54.603009Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976710657:0 128 -> 240 2025-05-07T08:53:54.603033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-05-07T08:53:54.603181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-05-07T08:53:54.603236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 1], at schemeshard: 72057594046644480 2025-05-07T08:53:54.607395Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-05-07T08:53:54.607430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976710657, path id: [OwnerId: 72057594046644480, LocalPathId: 1] 2025-05-07T08:53:54.607795Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-05-07T08:53:54.607822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: sche ... ta/TopicPartitionsMapping` (Hash, Topic, ProducerId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-05-07T08:54:09.007853Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `//Root/.metadata/TopicPartitionsMapping` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND ProducerId = $SourceId AND Partition = $Partition; 2025-05-07T08:54:09.007886Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [3:7501624452463403278:2414] (SourceId=123, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=0 2025-05-07T08:54:09.007908Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 4 sessionId: partition: 0 expectedGeneration: (NULL) 2025-05-07T08:54:09.010714Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72075186224037893] server connected, pipe [3:7501624452463403281:2414], now have 1 active actors on pipe 2025-05-07T08:54:09.013377Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:798: TPartitionWriter 72075186224037893 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037893, NodeId 4, Generation: 1 2025-05-07T08:54:09.014346Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:342: Handle TEvRequest topic: 'topic' requestId: 2025-05-07T08:54:09.014392Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2788: [PQ: 72075186224037893] got client message batch for topic 'PQ/account/topic' partition 0 2025-05-07T08:54:09.014493Z node 4 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie 123|9b3f6ff7-8efeea37-c59b799f-68198995_0 generated for partition 0 topic 'PQ/account/topic' owner 123 2025-05-07T08:54:09.014606Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:35: [PQ: 72075186224037893, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-05-07T08:54:09.014662Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:377: Answer ok topic: 'topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-05-07T08:54:09.015442Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:342: Handle TEvRequest topic: 'topic' requestId: 2025-05-07T08:54:09.015466Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2788: [PQ: 72075186224037893] got client message batch for topic 'PQ/account/topic' partition 0 2025-05-07T08:54:09.015538Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:377: Answer ok topic: 'topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-05-07T08:54:09.015858Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 4 partition: 0 MaxSeqNo: 2 sessionId: 123|9b3f6ff7-8efeea37-c59b799f-68198995_0 2025-05-07T08:54:09.018508Z :INFO: [] MessageGroupId [123] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1746608049018 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-05-07T08:54:09.018625Z :INFO: [] MessageGroupId [123] SessionId [] Write session established. Init response: last_sequence_number: 2 session_id: "123|9b3f6ff7-8efeea37-c59b799f-68198995_0" topic: "PQ/account/topic" 2025-05-07T08:54:09.018911Z :DEBUG: [] MessageGroupId [123] SessionId [123|9b3f6ff7-8efeea37-c59b799f-68198995_0] Write 1 messages with Id from 1 to 1 2025-05-07T08:54:09.019049Z :DEBUG: [] MessageGroupId [123] SessionId [123|9b3f6ff7-8efeea37-c59b799f-68198995_0] Write session: try to update token 2025-05-07T08:54:09.019094Z :DEBUG: [] MessageGroupId [123] SessionId [123|9b3f6ff7-8efeea37-c59b799f-68198995_0] Send 1 message(s) (0 left), first sequence number is 3 2025-05-07T08:54:09.019294Z :INFO: [] MessageGroupId [123] SessionId [123|9b3f6ff7-8efeea37-c59b799f-68198995_0] Write session: close. Timeout = 10000 ms 2025-05-07T08:54:09.021134Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 4 sessionId: 123|9b3f6ff7-8efeea37-c59b799f-68198995_0 grpc read done: success: 1 data: write_request[data omitted] 2025-05-07T08:54:09.021505Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037893 (partition=0) Received event: NKikimr::NPQ::TEvPartitionWriter::TEvWriteRequest 2025-05-07T08:54:09.026192Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:342: Handle TEvRequest topic: 'topic' requestId: 2025-05-07T08:54:09.026244Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2788: [PQ: 72075186224037893] got client message batch for topic 'PQ/account/topic' partition 0 2025-05-07T08:54:09.026355Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:377: Answer ok topic: 'topic' partition: 0 messageNo: 0 requestId: cookie: 1 2025-05-07T08:54:09.026886Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037893 (partition=0) Received event: NActors::IEventHandle 2025-05-07T08:54:09.027478Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:342: Handle TEvRequest topic: 'topic' requestId: 2025-05-07T08:54:09.027511Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2788: [PQ: 72075186224037893] got client message batch for topic 'PQ/account/topic' partition 0 2025-05-07T08:54:09.027558Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037893] got client message topic: PQ/account/topic partition: 0 SourceId: '\000123' SeqNo: 3 partNo : 0 messageNo: 1 size 372 offset: -1 2025-05-07T08:54:09.027679Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1704: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Send write quota request. Topic: "PQ/account/topic". Partition: 0. Amount: 376. Cookie: 3 2025-05-07T08:54:09.027736Z node 4 :PERSQUEUE DEBUG: partition.cpp:3627: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Got quota. Topic: "PQ/account/topic". Partition: 0: Cookie: 3 2025-05-07T08:54:09.027893Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1233: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'PQ/account/topic' partition 0 part blob processing sourceId '\000123' seqNo 3 partNo 0 2025-05-07T08:54:09.028948Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1333: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'PQ/account/topic' partition 0 part blob complete sourceId '\000123' seqNo 3 partNo 0 FormedBlobsCount 0 NewHead: Offset 2 PartNo 0 PackedSize 443 count 1 nextOffset 3 batches 1 2025-05-07T08:54:09.029464Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1623: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Add new write blob: topic 'PQ/account/topic' partition 0 compactOffset 2,1 HeadOffset 0 endOffset 2 curOffset 3 d0000000000_00000000000000000002_00000_0000000001_00000| size 431 WTime 1746608049027 2025-05-07T08:54:09.029583Z node 4 :PERSQUEUE DEBUG: partition.cpp:2182: [PQ: 72075186224037893, Partition: 0, State: StateIdle] === DumpKeyValueRequest === 2025-05-07T08:54:09.029599Z node 4 :PERSQUEUE DEBUG: partition.cpp:2183: [PQ: 72075186224037893, Partition: 0, State: StateIdle] --- delete ---------------- 2025-05-07T08:54:09.029617Z node 4 :PERSQUEUE DEBUG: partition.cpp:2189: [PQ: 72075186224037893, Partition: 0, State: StateIdle] [x0000000000, x0000000001) 2025-05-07T08:54:09.029633Z node 4 :PERSQUEUE DEBUG: partition.cpp:2191: [PQ: 72075186224037893, Partition: 0, State: StateIdle] --- write ----------------- 2025-05-07T08:54:09.029649Z node 4 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037893, Partition: 0, State: StateIdle] m0000000000p123 2025-05-07T08:54:09.029659Z node 4 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037893, Partition: 0, State: StateIdle] d0000000000_00000000000000000002_00000_0000000001_00000| 2025-05-07T08:54:09.029670Z node 4 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037893, Partition: 0, State: StateIdle] i0000000000 2025-05-07T08:54:09.029688Z node 4 :PERSQUEUE DEBUG: partition.cpp:2196: [PQ: 72075186224037893, Partition: 0, State: StateIdle] --- rename ---------------- 2025-05-07T08:54:09.029704Z node 4 :PERSQUEUE DEBUG: partition.cpp:2201: [PQ: 72075186224037893, Partition: 0, State: StateIdle] =========================== 2025-05-07T08:54:09.029757Z node 4 :PERSQUEUE DEBUG: read.h:262: CacheProxy. Passthrough write request to KV 2025-05-07T08:54:09.029818Z node 4 :PERSQUEUE DEBUG: read.h:300: CacheProxy. Passthrough blob. Partition 0 offset 2 partNo 0 count 1 size 431 2025-05-07T08:54:09.039897Z node 4 :PERSQUEUE DEBUG: cache_eviction.h:315: Caching head blob in L1. Partition 0 offset 2 count 1 size 431 actorID [4:7501624445667279556:2335] 2025-05-07T08:54:09.040007Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72075186224037893, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 376 WriteNewSizeFromSupportivePartitions# 0 2025-05-07T08:54:09.040060Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037893, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-05-07T08:54:09.040103Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Answering for message sourceid: '\000123', Topic: 'PQ/account/topic', Partition: 0, SeqNo: 3, partNo: 0, Offset: 2 is stored on disk 2025-05-07T08:54:09.040323Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:377: Answer ok topic: 'topic' partition: 0 messageNo: 1 requestId: cookie: 1 2025-05-07T08:54:09.040799Z node 4 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037893' partition 0 offset 2 partno 0 count 1 parts 0 size 431 2025-05-07T08:54:09.041144Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037893 (partition=0) Received event: NActors::IEventHandle 2025-05-07T08:54:09.046570Z :DEBUG: [] MessageGroupId [123] SessionId [123|9b3f6ff7-8efeea37-c59b799f-68198995_0] Write session got write response: sequence_numbers: 3 offsets: 2 already_written: false write_statistics { persist_duration_ms: 11 } 2025-05-07T08:54:09.046643Z :DEBUG: [] MessageGroupId [123] SessionId [123|9b3f6ff7-8efeea37-c59b799f-68198995_0] Write session: acknoledged message 1 2025-05-07T08:54:09.119870Z :INFO: [] MessageGroupId [123] SessionId [123|9b3f6ff7-8efeea37-c59b799f-68198995_0] Write session will now close 2025-05-07T08:54:09.119945Z :DEBUG: [] MessageGroupId [123] SessionId [123|9b3f6ff7-8efeea37-c59b799f-68198995_0] Write session: aborting 2025-05-07T08:54:09.120465Z :INFO: [] MessageGroupId [123] SessionId [123|9b3f6ff7-8efeea37-c59b799f-68198995_0] Write session: gracefully shut down, all writes complete 2025-05-07T08:54:09.120519Z :DEBUG: [] MessageGroupId [123] SessionId [123|9b3f6ff7-8efeea37-c59b799f-68198995_0] Write session: destroy 2025-05-07T08:54:09.122861Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 4 sessionId: 123|9b3f6ff7-8efeea37-c59b799f-68198995_0 grpc read done: success: 0 data: 2025-05-07T08:54:09.122894Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 4 sessionId: 123|9b3f6ff7-8efeea37-c59b799f-68198995_0 grpc read failed 2025-05-07T08:54:09.123684Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:818: session v1 closed cookie: 4 sessionId: 123|9b3f6ff7-8efeea37-c59b799f-68198995_0 2025-05-07T08:54:09.123706Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 4 sessionId: 123|9b3f6ff7-8efeea37-c59b799f-68198995_0 is DEAD 2025-05-07T08:54:09.124516Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037893 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-05-07T08:54:09.129456Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037893] server disconnected, pipe [3:7501624452463403281:2414] destroyed 2025-05-07T08:54:09.129518Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:138: [PQ: 72075186224037893, Partition: 0, State: StateIdle] TPartition::DropOwner. >> TKeyValueTest::TestLargeWriteAndDelete [GOOD] >> KqpJoinOrder::OltpJoinTypeHintCBOTurnOFF >> TPersQueueCommonTest::Auth_WriteSessionWithValidTokenAndACEAndThenRemoveACEAndSendWriteRequest_SessionClosedWithUnauthorizedErrorAfterSuccessfullWriteResponse [GOOD] >> TPQTest::TestReadSubscription [GOOD] >> KqpVectorIndexes::OrderByCosineDistanceNullableLevel2 [GOOD] >> DataStreams::TestDeleteStream [GOOD] >> DataStreams::TestDeleteStreamWithEnforceFlag >> RetryPolicy::TWriteSession_TestPolicy [GOOD] >> DataShardSnapshots::UncommittedWriteRestartDuringCommitThenBulkErase [GOOD] >> DataShardSnapshots::UncommittedChangesRenameTable-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestLargeWriteAndDelete [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:54:2057] recipient: [1:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:54:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:56:2097] sender: [1:57:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:56:2097] sender: [1:74:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:54:2057] recipient: [2:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:54:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:57:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:74:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:76:2057] recipient: [2:36:2083] Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:79:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:80:2057] recipient: [2:78:2110] Leader for TabletID 72057594037927937 is [2:81:2111] sender: [2:82:2057] recipient: [2:78:2110] !Reboot 72057594037927937 (actor [2:56:2097]) rebooted! !Reboot 72057594037927937 (actor [2:56:2097]) tablet resolver refreshed! new actor is[2:81:2111] Leader for TabletID 72057594037927937 is [2:81:2111] sender: [2:135:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:54:2057] recipient: [3:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:54:2057] recipient: [3:52:2095] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:57:2057] recipient: [3:52:2095] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:74:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:56:2097]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:76:2057] recipient: [3:36:2083] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:78:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:80:2057] recipient: [3:79:2110] Leader for TabletID 72057594037927937 is [3:81:2111] sender: [3:82:2057] recipient: [3:79:2110] !Reboot 72057594037927937 (actor [3:56:2097]) rebooted! !Reboot 72057594037927937 (actor [3:56:2097]) tablet resolver refreshed! new actor is[3:81:2111] Leader for TabletID 72057594037927937 is [3:81:2111] sender: [3:135:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:54:2057] recipient: [4:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:54:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:57:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:74:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:77:2057] recipient: [4:36:2083] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:79:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:81:2057] recipient: [4:80:2110] Leader for TabletID 72057594037927937 is [4:82:2111] sender: [4:83:2057] recipient: [4:80:2110] !Reboot 72057594037927937 (actor [4:56:2097]) rebooted! !Reboot 72057594037927937 (actor [4:56:2097]) tablet resolver refreshed! new actor is[4:82:2111] Leader for TabletID 72057594037927937 is [4:82:2111] sender: [4:136:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:54:2057] recipient: [5:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:54:2057] recipient: [5:50:2095] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:57:2057] recipient: [5:50:2095] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:74:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:80:2057] recipient: [5:36:2083] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:83:2057] recipient: [5:82:2113] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:84:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:85:2114] sender: [5:86:2057] recipient: [5:82:2113] !Reboot 72057594037927937 (actor [5:56:2097]) rebooted! !Reboot 72057594037927937 (actor [5:56:2097]) tablet resolver refreshed! new actor is[5:85:2114] Leader for TabletID 72057594037927937 is [5:85:2114] sender: [5:139:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:54:2057] recipient: [6:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:54:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:57:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:74:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:56:2097]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:80:2057] recipient: [6:36:2083] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:83:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:84:2057] recipient: [6:82:2113] Leader for TabletID 72057594037927937 is [6:85:2114] sender: [6:86:2057] recipient: [6:82:2113] !Reboot 72057594037927937 (actor [6:56:2097]) rebooted! !Reboot 72057594037927937 (actor [6:56:2097]) tablet resolver refreshed! new actor is[6:85:2114] Leader for TabletID 72057594037927937 is [6:85:2114] sender: [6:139:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:54:2057] recipient: [7:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:54:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:57:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:74:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:56:2097]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:81:2057] recipient: [7:36:2083] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:84:2057] recipient: [7:83:2113] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:85:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:86:2114] sender: [7:87:2057] recipient: [7:83:2113] !Reboot 72057594037927937 (actor [7:56:2097]) rebooted! !Reboot 72057594037927937 (actor [7:56:2097]) tablet resolver refreshed! new actor is[7:86:2114] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:54:2057] recipient: [8:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:54:2057] recipient: [8:50:2095] Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:57:2057] recipient: [8:50:2095] Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:74:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:54:2057] recipient: [9:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:54:2057] recipient: [9:52:2095] Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:57:2057] recipient: [9:52:2095] Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:74:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:54:2057] recipient: [10:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:54:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:57:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:74:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:76:2057] recipient: [10:36:2083] Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:79:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:80:2057] recipient: [10:78:2110] Leader for TabletID 72057594037927937 is [10:81:2111] sender: [10:82:2057] recipient: [10:78:2110] !Reboot 72057594037927937 (actor [10:56:2097]) rebooted! !Reboot 72057594037927937 (actor [10:56:2097]) tablet resolver refreshed! new actor is[10:81:2111] Leader for TabletID 72057594037927937 is [10:81:2111] sender: [10:135:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:54:2057] recipient: [11:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:54:2057] recipient: [11:51:2095] Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:57:2057] recipient: [11:51:2095] Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:74:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:56:2097]) on event NKikimr::TEvKeyValue::TEvAcquireLock ! Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:76:2057] recipient: [11:36:2083] Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:79:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:80:2057] recipient: [11:78:2110] Leader for TabletID 72057594037927937 is [11:81:2111] sender: [11:82:2057] recipient: [11:78:2110] !Reboot 72057594037927937 (actor [11:56:2097]) rebooted! !Reboot 72057594037927937 (actor [11:56:2097]) tablet resolver refreshed! new actor is[11:81:2111] Leader for TabletID 72057594037927937 is [11:81:2111] sender: [11:135:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:54:2057] recipient: [12:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:54:2057] recipient: [12:52:2095] Leader for TabletID 72057594037927937 is [12:56:2097] sender: [12:57:2057] recipient: [12:52:2095] Leader for TabletID 72057594037927937 is [12:56:2097] sender: [12:74:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (actor [12:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [12:56:2097] sender: [12:77:2057] recipient: [12:36:2083] Leader for TabletID 72057594037927937 is [12:56:2097] sender: [12:80:2057] recipient: [12:79:2110] Leader for TabletID 72057594037927937 is [12:56:2097] sender: [12:81:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [12:82:2111] sender: [12:83:2057] recipient: [12:79:2110] !Reboot 72057594037927937 (actor [12:56:2097]) rebooted! !Reboot 72057594037927937 (actor [12:56:2097]) tablet resolver refreshed! new actor is[12:82:2111] Leader for TabletID 72057594037927937 is [12:82:2111] sender: [12:136:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:54:2057] recipient: [13:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:54:2057] recipient: [13:51:2095] Leader for TabletID 72057594037927937 is [13:56:2097] sender: [13:57:2057] recipient: [13:51:2095] Leader for TabletID 72057594037927937 is [13:56:2097] sender: [13:74:2057] recipient: [13:14:2061] !Reboot 72057594037927937 (actor [13:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [13:56:2097] sender: [13:80:2057] recipient: [13:36:2083] Leader for TabletID 72057594037927937 is [13:56:2097] sender: [13:83:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [13:56:2097] sender: [13:84:2057] recipient: [13:82:2113] Leader for TabletID 72057594037927937 is [13:85:2114] sender: [13:86:2057] recipient: [13:82:2113] !Reboot 72057594037927937 (actor [13:56:2097]) rebooted! !Reboot 72057594037927937 (actor [13:56:2097]) tablet resolver refreshed! new actor is[13:85:2114] Leader for TabletID 72057594037927937 is [13:85:2114] sender: [13:139:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:54:2057] recipient: [14:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:54:2057] recipient: [14:51:2095] Leader for TabletID 72057594037927937 is [14:56:2097] sender: [14:57:2057] recipient: [14:51:2095] Leader for TabletID 72057594037927937 is [14:56:2097] sender: [14:74:2057] recipient: [14:14:2061] !Reboot 72057594037927937 (actor [14:56:2097]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [14:56:2097] sender: [14:80:2057] recipient: [14:36:2083] Leader for TabletID 72057594037927937 is [14:56:2097] sender: [14:83:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [14:56:2097] sender: [14:84:2057] recipient: [14:82:2113] Leader for TabletID 72057594037927937 is [14:85:2114] sender: [14:86:2057] recipient: [14:82:2113] !Reboot 72057594037927937 (actor [14:56:2097]) rebooted! !Reboot 72057594037927937 (actor [14:56:2097]) tablet resolver refreshed! new actor is[14:85:2114] Leader for TabletID 72057594037927937 is [14:85:2114] sender: [14:139:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:54:2057] recipient: [15:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:54:2057] recipient: [15:51:2095] Leader for TabletID 72057594037927937 is [15:56:2097] sender: [15:57:2057] recipient: [15:51:2095] Leader for TabletID 72057594037927937 is [15:56:2097] sender: [15:74:2057] recipient: [15:14:2061] !Reboot 72057594037927937 (actor [15:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [15:56:2097] sender: [15:81:2057] recipient: [15:36:2083] Leader for TabletID 72057594037927937 is [15:56:2097] sender: [15:84:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [15:56:2097] sender: [15:85:2057] recipient: [15:83:2113] Leader for TabletID 72057594037927937 is [15:86:2114] sender: [15:87:2057] recipient: [15:83:2113] !Reboot 72057594037927937 (actor [15:56:2097]) rebooted! !Reboot 72057594037927937 (actor [15:56:2097]) tablet resolver refreshed! new actor is[15:86:2114] Leader for TabletID 72057594037927937 is [15:86:2114] sender: [15:140:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:54:2057] recipient: [16:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:54:2057] recipient: [16:51:2095] Leader for TabletID 72057594037927937 is [16:56:2097] sender: [16:57:2057] recipient: [16:51:2095] Leader for TabletID 72057594037927937 is [16:56:2097] sender: [16:74:2057] recipient: [16:14:2061] !Reboot 72057594037927937 (actor [16:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [16:56:2097] sender: [16:84:2057] recipient: [16:36:2083] Leader for TabletID 72057594037927937 is [16:56:2097] sender: [16:87:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [16:56:2097] sender: [16:88:2057] recipient: [16:86:2116] Leader for TabletID 72057594037927937 is [16:89:2117] sender: [16:90:2057] recipient: [16:86:2116] !Reboot 72057594037927937 (actor [16:56:2097]) rebooted! !Reboot 72057594037927937 (actor [16:56:2097]) tablet resolver refreshed! new actor is[16:89:2117] Leader for TabletID 72057594037927937 is [16:89:2117] sender: [16:143:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:54:2057] recipient: [17:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:54:2057] recipient: [17:51:2095] Leader for TabletID 72057594037927937 is [17:56:2097] sender: [17:57:2057] recipient: [17:51:2095] Leader for TabletID 72057594037927937 is [17:56:2097] sender: [17:74:2057] recipient: [17:14:2061] !Reboot 72057594037927937 (actor [17:56:2097]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [17:56:2097] sender: [17:84:2057] recipient: [17:36:2083] Leader for TabletID 72057594037927937 is [17:56:2097] sender: [17:87:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [17:56:2097] sender: [17:88:2057] recipient: [17:86:2116] Leader for TabletID 72057594037927937 is [17:89:2117] sender: [17:90:2057] recipient: [17:86:2116] !Reboot 72057594037927937 (actor [17:56:2097]) rebooted! !Reboot 72057594037927937 (actor [17:56:2097]) tablet resolver refreshed! new actor is[17:89:2117] Leader for TabletID 72057594037927937 is [17:89:2117] sender: [17:143:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:54:2057] recipient: [18:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:54:2057] recipient: [18:51:2095] Leader for TabletID 72057594037927937 is [18:56:2097] sender: [18:57:2057] recipient: [18:51:2095] Leader for TabletID 72057594037927937 is [18:56:2097] sender: [18:74:2057] recipient: [18:14:2061] !Reboot 72057594037927937 (actor [18:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [18:56:2097] sender: [18:85:2057] recipient: [18:36:2083] Leader for TabletID 72057594037927937 is [18:56:2097] sender: [18:87:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [18:56:2097] sender: [18:89:2057] recipient: [18:88:2116] Leader for TabletID 72057594037927937 is [18:90:2117] sender: [18:91:2057] recipient: [18:88:2116] !Reboot 72057594037927937 (actor [18:56:2097]) rebooted! !Reboot 72057594037927937 (actor [18:56:2097]) tablet resolver refreshed! new actor is[18:90:2117] Leader for TabletID 72057594037927937 is [18:90:2117] sender: [18:144:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:54:2057] recipient: [19:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:54:2057] recipient: [19:50:2095] Leader for TabletID 72057594037927937 is [19:56:2097] sender: [19:57:2057] recipient: [19:50:2095] Leader for TabletID 72057594037927937 is [19:56:2097] sender: [19:74:2057] recipient: [19:14:2061] >> RetryPolicy::TWriteSession_TestBrokenPolicy >> TTxDataShardUploadRows::TestUploadShadowRowsShadowDataSplitThenPublish >> IncrementalBackup::ComplexRestoreBackupCollection-WithIncremental [GOOD] >> TTxDataShardUploadRows::TestUploadRows >> DataShardSnapshots::DelayedWriteReplyAfterSplit [GOOD] >> DataShardSnapshots::DelayedWriteReadableAfterSplitAndReboot ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest >> TPersQueueCommonTest::Auth_WriteSessionWithValidTokenAndACEAndThenRemoveACEAndSendWriteRequest_SessionClosedWithUnauthorizedErrorAfterSuccessfullWriteResponse [GOOD] Test command err: === Server->StartServer(false); 2025-05-07T08:53:53.810718Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624380871933192:2082];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:53.810769Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:53:54.094537Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501624388450881953:2252];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:54.094879Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003455/r3tmp/tmpQ13TjV/pdisk_1.dat 2025-05-07T08:53:54.388117Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-05-07T08:53:54.384310Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-05-07T08:53:54.883114Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:53:55.218238Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:53:55.219923Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:53:55.244947Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:53:55.245035Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:53:55.271715Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:53:55.271792Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:53:55.296544Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-05-07T08:53:55.296662Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:53:55.302899Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26236, node 1 2025-05-07T08:53:55.661124Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/zvgn/003455/r3tmp/yandexOBFehi.tmp 2025-05-07T08:53:55.661172Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/zvgn/003455/r3tmp/yandexOBFehi.tmp 2025-05-07T08:53:55.661314Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/zvgn/003455/r3tmp/yandexOBFehi.tmp 2025-05-07T08:53:55.661465Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:53:55.808093Z INFO: TTestServer started on Port 61352 GrpcPort 26236 TClient is connected to server localhost:61352 PQClient connected to localhost:26236 === TenantModeEnabled() = 1 === Init PQ - start server on port 26236 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:53:56.611823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976715657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-05-07T08:53:56.612013Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:53:56.612215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-05-07T08:53:56.612475Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976715657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-05-07T08:53:56.612512Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:53:56.626509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 281474976715657, response: Status: StatusAccepted TxId: 281474976715657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-05-07T08:53:56.626657Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-05-07T08:53:56.626848Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:53:56.626901Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 281474976715657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-05-07T08:53:56.626915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 281474976715657:0 ProgressState no shards to create, do next state 2025-05-07T08:53:56.626925Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976715657:0 2 -> 3 2025-05-07T08:53:56.635102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:53:56.635156Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976715657:0 ProgressState, at schemeshard: 72057594046644480 2025-05-07T08:53:56.635180Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976715657:0 3 -> 128 waiting... 2025-05-07T08:53:56.641116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T08:53:56.641151Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976715657, ready parts: 0/1, is published: true 2025-05-07T08:53:56.641188Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T08:53:56.643711Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:53:56.643764Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:53:56.643824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976715657:0, at tablet# 72057594046644480 2025-05-07T08:53:56.644200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 281474976715657 ready parts: 1/1 2025-05-07T08:53:56.650881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976715657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:53:56.654950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 281474976715657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976715657 msg type: 269090816 2025-05-07T08:53:56.655122Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 281474976715657, partId: 4294967295, tablet: 72057594046316545 2025-05-07T08:53:56.659136Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 1746608036705, transactions count in step: 1, at schemeshard: 72057594046644480 2025-05-07T08:53:56.659296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1746608036705 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-05-07T08:53:56.659328Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976715657:0, at tablet# 72057594046644480 2025-05-07T08:53:56.659596Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976715657:0 128 -> 240 2025-05-07T08:53:56.659654Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976715657:0, at tablet# 72057594046644480 2025-05-07T08:53:56.660131Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-05-07T08:53:56.660206Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 1], at schemeshard: 72057594046644480 2025-05-07T08:53:56.662944Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-05-07T08:53:56.662968Z no ... 32-e87507f5-53e733ac-37a38659_0 2025-05-07T08:54:10.710967Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715665 ===Assert streaming op1 ===Assert streaming op2 2025-05-07T08:54:10.712910Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: test-group-id|cfd70432-e87507f5-53e733ac-37a38659_0 grpc read done: success: 1 data: write_request[data omitted] 2025-05-07T08:54:10.713197Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037889 (partition=0) Received event: NKikimr::NPQ::TEvPartitionWriter::TEvWriteRequest 2025-05-07T08:54:10.713646Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037889 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse ===ModifyAcl BEFORE MODIFY PERMISSIONS 2025-05-07T08:54:10.772290Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037889 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-05-07T08:54:10.782428Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root/acc" OperationType: ESchemeOpModifyACL ModifyACL { Name: "topic1" DiffACL: "\n\031\010\001\022\025\032\023test_user_0@builtin" } } TxId: 281474976715666 TabletId: 72057594046644480 PeerName: "ipv6:[::1]:36198" , at schemeshard: 72057594046644480 2025-05-07T08:54:10.782627Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_modify_acl.cpp:33: TModifyACL Propose, path: /Root/acc/topic1, operationId: 281474976715666:0, at schemeshard: 72057594046644480 2025-05-07T08:54:10.782746Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5180: ExamineTreeVFS visit path id [OwnerId: 72057594046644480, LocalPathId: 10] name: topic1 type: EPathTypePersQueueGroup state: EPathStateNoChanges stepDropped: 0 droppedTxId: 0 parent: [OwnerId: 72057594046644480, LocalPathId: 9] 2025-05-07T08:54:10.782757Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5196: ExamineTreeVFS run path id: [OwnerId: 72057594046644480, LocalPathId: 10] 2025-05-07T08:54:10.782899Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976715666:1, propose status:StatusSuccess, reason: , at schemeshard: 72057594046644480 2025-05-07T08:54:10.782923Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-05-07T08:54:10.783001Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715666:0 progress is 1/1 2025-05-07T08:54:10.783012Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715666 ready parts: 1/1 2025-05-07T08:54:10.783031Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715666:0 progress is 1/1 2025-05-07T08:54:10.783039Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715666 ready parts: 1/1 2025-05-07T08:54:10.783085Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 10] was 3 2025-05-07T08:54:10.783129Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976715666, ready parts: 1/1, is published: false 2025-05-07T08:54:10.783149Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 10], at schemeshard: 72057594046644480 2025-05-07T08:54:10.783161Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715666 ready parts: 1/1 2025-05-07T08:54:10.783174Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715666:0 2025-05-07T08:54:10.783185Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 281474976715666, publications: 1, subscribers: 0 2025-05-07T08:54:10.783198Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 281474976715666, [OwnerId: 72057594046644480, LocalPathId: 10], 4 2025-05-07T08:54:10.790636Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 281474976715666, response: Status: StatusSuccess TxId: 281474976715666 SchemeshardId: 72057594046644480, at schemeshard: 72057594046644480 2025-05-07T08:54:10.790875Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715666, database: /Root, subject: , status: StatusSuccess, operation: MODIFY ACL, path: /Root/acc/topic1, remove access: -():test_user_0@builtin:- 2025-05-07T08:54:10.791047Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-05-07T08:54:10.791062Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976715666, path id: [OwnerId: 72057594046644480, LocalPathId: 10] 2025-05-07T08:54:10.791257Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-05-07T08:54:10.791276Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [3:7501624431546835983:2378], at schemeshard: 72057594046644480, txId: 281474976715666, path id: 10 2025-05-07T08:54:10.792498Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 10 Version: 4 PathOwnerId: 72057594046644480, cookie: 281474976715666 2025-05-07T08:54:10.792615Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 10 Version: 4 PathOwnerId: 72057594046644480, cookie: 281474976715666 2025-05-07T08:54:10.792629Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046644480, txId: 281474976715666 2025-05-07T08:54:10.792647Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976715666, pathId: [OwnerId: 72057594046644480, LocalPathId: 10], version: 4 2025-05-07T08:54:10.792669Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 10] was 4 2025-05-07T08:54:10.792765Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976715666, subscribers: 0 ===Wait for session created with token with removed ACE to die2025-05-07T08:54:10.799037Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715666 2025-05-07T08:54:11.098269Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7501624461611608203:2406], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T08:54:11.098617Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=3&id=ZjE5OTkwZjYtODM1NTJkYzAtNTJlYjkwYjEtNjU3NGIwNWM=, ActorId: [3:7501624461611608196:2402], ActorState: ExecuteState, TraceId: 01jtmz7mvye6xabf2mf246ya1x, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T08:54:11.099111Z node 3 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T08:54:11.707161Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-05-07T08:54:11.708330Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 2 sessionId: test-group-id|cfd70432-e87507f5-53e733ac-37a38659_0 describe result for acl check 2025-05-07T08:54:11.708464Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:809: session v1 error cookie: 2 reason: access to topic 'Topic /Root/acc/topic1 in database: /Root' denied for 'test_user_0@builtin' due to 'no WriteTopic rights', Marker# PQ1125 sessionId: test-group-id|cfd70432-e87507f5-53e733ac-37a38659_0 2025-05-07T08:54:11.708884Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 2 sessionId: test-group-id|cfd70432-e87507f5-53e733ac-37a38659_0 is DEAD 2025-05-07T08:54:11.709180Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037889 (partition=0) Received event: NActors::TEvents::TEvPoison status: UNAUTHORIZED issues { message: "access to topic \'Topic /Root/acc/topic1 in database: /Root\' denied for \'test_user_0@builtin\' due to \'no WriteTopic rights\', Marker# PQ1125" issue_code: 500018 severity: 1 } 2025-05-07T08:54:12.128012Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7501624465906575524:2414], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T08:54:12.130245Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=3&id=NzdkMDE5YmUtNjlmYWQ1Ny1jYzg2ZTY5Zi1lMjYyMjBlZA==, ActorId: [3:7501624465906575522:2413], ActorState: ExecuteState, TraceId: 01jtmz7nw6b14ems5ykbv5y8wn, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T08:54:12.130700Z node 3 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpVectorIndexes::OrderByCosineDistanceNullableLevel2 [GOOD] Test command err: Trying to start YDB, gRPC: 3010, MsgBus: 12376 2025-05-07T08:52:25.546462Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624006099714510:2064];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:25.546533Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003c3d/r3tmp/tmpAvlV8G/pdisk_1.dat 2025-05-07T08:52:26.182712Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:52:26.201289Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:52:26.201450Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:52:26.203856Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3010, node 1 2025-05-07T08:52:26.438077Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:52:26.438109Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:52:26.438117Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:52:26.438234Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12376 TClient is connected to server localhost:12376 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:52:27.353466Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:27.420374Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:27.690428Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:28.073854Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:28.248896Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:52:30.550227Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501624006099714510:2064];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:52:30.550307Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:52:31.447068Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624031869519935:2409], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:31.447179Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:31.890796Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:52:31.955990Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:52:32.030226Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:52:32.072980Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:52:32.150602Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:52:32.217509Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:52:32.268419Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:52:32.377666Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624036164487900:2474], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:32.377743Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:32.377934Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624036164487905:2477], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:52:32.383810Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:52:32.400382Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624036164487907:2478], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:52:32.466478Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501624036164487960:3428] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:52:33.692967Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:7501624040459455557:3605], Recipient [1:7501624010394682207:2186]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:52:33.693015Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:52:33.693031Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T08:52:33.693081Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122432, Sender [1:7501624040459455553:3602], Recipient [1:7501624010394682207:2186]: {TEvModifySchemeTransaction txid# 281474976710672 TabletId# 72057594046644480} 2025-05-07T08:52:33.693099Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4851: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-05-07T08:52:33.749229Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "TestTable" Columns { Name: "pk" Type: "Int64" NotNull: true } Columns { Name: "emb" Type: "String" NotNull: true } Columns { Name: "data" Type: "String" NotNull: true } KeyColumnNames: "pk" PartitionConfig { PartitioningPolicy { MinPartitionsCount: 3 } ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } SplitBoundary { KeyPrefix { Tuple { Optional { Int64: 4 } } } } SplitBoundary { KeyPrefix { Tuple { Optional { Int64: 6 } } } } Temporary: false } CreateIndexedTable { } } TxId: 281474976710672 TabletId: 72057594046644480 PeerName: "ipv6:[::1]:59534" , at schemeshard: 72057594046644480 2025-05-07T08:52:33.749689Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /Root/TestTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-05-07T08:52:33.749871Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:433: TCreateTable Propose, path: /Root/TestTable, opId: 281474976710672:0, schema: Name: "TestTable" Columns { Name: "pk" Type: "Int64" NotNull: true } Columns { Name: "emb" Type: "String" NotNull: true } Columns { Name: "data" Type: "String" NotNull: true } KeyColumnNames: "pk" PartitionConfig { PartitioningPolicy { MinPartitionsCount: 3 } ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } SplitBoundary { KeyPrefix { Tuple { Optional { Int64: 4 } } } } SplitBoundary { KeyPrefix { Tuple { Optional { Int64: 6 } } } } Temporary: false, at schemeshard: 72057594046644480 2025-05-07T08:52:33.750891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.c ... cpp:568: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037906 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 5] raw table stats: DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-05-07T08:54:12.661315Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269553162, Sender [2:7501624185069696740:2363], Recipient [2:7501624180774728504:2139]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037909 TableLocalId: 5 Generation: 1 Round: 5 TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 23 Memory: 119352 GroupWriteThroughput { GroupID: 2181038080 Channel: 1 Throughput: 120 } GroupWriteThroughput { GroupID: 2181038080 Channel: 0 Throughput: 32 } } ShardState: 2 UserTablePartOwners: 72075186224037909 NodeId: 2 StartTime: 1746607987457 TableOwnerId: 72057594046644480 FollowerId: 0 2025-05-07T08:54:12.661330Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4877: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-05-07T08:54:12.661346Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:561: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037909 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 5] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0023 2025-05-07T08:54:12.661427Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:568: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037909 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 5] raw table stats: DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-05-07T08:54:12.661593Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269553162, Sender [2:7501624185069696750:2365], Recipient [2:7501624180774728504:2139]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037910 TableLocalId: 5 Generation: 1 Round: 5 TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 24 Memory: 119352 GroupWriteThroughput { GroupID: 2181038080 Channel: 1 Throughput: 120 } GroupWriteThroughput { GroupID: 2181038080 Channel: 0 Throughput: 32 } } ShardState: 2 UserTablePartOwners: 72075186224037910 NodeId: 2 StartTime: 1746607987459 TableOwnerId: 72057594046644480 FollowerId: 0 2025-05-07T08:54:12.661608Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4877: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-05-07T08:54:12.661626Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:561: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037910 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 5] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0024 2025-05-07T08:54:12.661698Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:568: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037910 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 5] raw table stats: DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-05-07T08:54:12.661867Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269553162, Sender [2:7501624185069696736:2359], Recipient [2:7501624180774728504:2139]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037901 TableLocalId: 5 Generation: 1 Round: 5 TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 45 Memory: 119352 GroupWriteThroughput { GroupID: 2181038080 Channel: 1 Throughput: 120 } GroupWriteThroughput { GroupID: 2181038080 Channel: 0 Throughput: 32 } } ShardState: 2 UserTablePartOwners: 72075186224037901 NodeId: 2 StartTime: 1746607987457 TableOwnerId: 72057594046644480 FollowerId: 0 2025-05-07T08:54:12.661883Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4877: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-05-07T08:54:12.661901Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:561: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037901 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 5] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0045 2025-05-07T08:54:12.662004Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:568: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037901 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 5] raw table stats: DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-05-07T08:54:12.662202Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269553162, Sender [2:7501624185069696749:2364], Recipient [2:7501624180774728504:2139]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037907 TableLocalId: 5 Generation: 1 Round: 5 TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 32 Memory: 119352 GroupWriteThroughput { GroupID: 2181038080 Channel: 1 Throughput: 120 } GroupWriteThroughput { GroupID: 2181038080 Channel: 0 Throughput: 32 } } ShardState: 2 UserTablePartOwners: 72075186224037907 NodeId: 2 StartTime: 1746607987457 TableOwnerId: 72057594046644480 FollowerId: 0 2025-05-07T08:54:12.662219Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4877: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-05-07T08:54:12.662239Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:561: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037907 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 5] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0032 2025-05-07T08:54:12.662320Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:568: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037907 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 5] raw table stats: DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-05-07T08:54:12.669558Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269553162, Sender [2:7501624185069696737:2360], Recipient [2:7501624180774728504:2139]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037902 TableLocalId: 5 Generation: 1 Round: 5 TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 27 Memory: 119352 GroupWriteThroughput { GroupID: 2181038080 Channel: 1 Throughput: 120 } GroupWriteThroughput { GroupID: 2181038080 Channel: 0 Throughput: 32 } } ShardState: 2 UserTablePartOwners: 72075186224037902 NodeId: 2 StartTime: 1746607987457 TableOwnerId: 72057594046644480 FollowerId: 0 2025-05-07T08:54:12.669611Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4877: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-05-07T08:54:12.669654Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:561: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037902 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 5] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0027 2025-05-07T08:54:12.669757Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:568: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037902 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 5] raw table stats: DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPQTest::TestReadSubscription [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:103:2057] recipient: [1:101:2135] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:103:2057] recipient: [1:101:2135] Leader for TabletID 72057594037927937 is [1:107:2139] sender: [1:108:2057] recipient: [1:101:2135] 2025-05-07T08:50:56.644582Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:50:56.644729Z node 1 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:149:2057] recipient: [1:147:2170] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:149:2057] recipient: [1:147:2170] Leader for TabletID 72057594037927938 is [1:153:2174] sender: [1:154:2057] recipient: [1:147:2170] Leader for TabletID 72057594037927937 is [1:107:2139] sender: [1:179:2057] recipient: [1:14:2061] 2025-05-07T08:50:56.672560Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:50:56.690476Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037927937] Config applied version 1 actor [1:177:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 1 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-05-07T08:50:56.691731Z node 1 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:185:2198] 2025-05-07T08:50:56.694577Z node 1 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [1:185:2198] 2025-05-07T08:50:56.697035Z node 1 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [1:186:2199] 2025-05-07T08:50:56.698991Z node 1 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [1:186:2199] 2025-05-07T08:50:56.709963Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner1|169467da-2c36c653-321277fb-6659d397_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner1 2025-05-07T08:50:56.710738Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner2|dd351430-bfa5c457-c18261a5-c2baeef0_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner2 2025-05-07T08:50:56.740491Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner1|202a2560-d7471df-75e733c5-3ca4eb93_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner1 Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:103:2057] recipient: [2:101:2135] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:103:2057] recipient: [2:101:2135] Leader for TabletID 72057594037927937 is [2:107:2139] sender: [2:108:2057] recipient: [2:101:2135] 2025-05-07T08:50:57.322790Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:50:57.322904Z node 2 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:149:2057] recipient: [2:147:2170] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:149:2057] recipient: [2:147:2170] Leader for TabletID 72057594037927938 is [2:153:2174] sender: [2:154:2057] recipient: [2:147:2170] Leader for TabletID 72057594037927937 is [2:107:2139] sender: [2:179:2057] recipient: [2:14:2061] 2025-05-07T08:50:57.350908Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:50:57.352017Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037927937] Config applied version 2 actor [2:177:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 2 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 2 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 2 Important: false } 2025-05-07T08:50:57.352801Z node 2 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [2:185:2198] 2025-05-07T08:50:57.355626Z node 2 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [2:185:2198] 2025-05-07T08:50:57.357658Z node 2 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [2:186:2199] 2025-05-07T08:50:57.359758Z node 2 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [2:186:2199] 2025-05-07T08:50:57.367926Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner1|826ca8ac-cc0d9f10-4c447164-b38f90ea_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner1 2025-05-07T08:50:57.368484Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner2|69c2211-ea033e1e-fc8674e5-8caa0ee9_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner2 2025-05-07T08:50:57.388667Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner1|b02c1d3a-11b02b4c-98736e9c-2d85e201_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner1 Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:103:2057] recipient: [3:101:2135] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:103:2057] recipient: [3:101:2135] Leader for TabletID 72057594037927937 is [3:107:2139] sender: [3:108:2057] recipient: [3:101:2135] 2025-05-07T08:50:57.724434Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:50:57.724518Z node 3 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [3:149:2057] recipient: [3:147:2170] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [3:149:2057] recipient: [3:147:2170] Leader for TabletID 72057594037927938 is [3:153:2174] sender: [3:154:2057] recipient: [3:147:2170] Leader for TabletID 72057594037927937 is [3:107:2139] sender: [3:179:2057] recipient: [3:14:2061] 2025-05-07T08:50:57.745801Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:50:57.746646Z node 3 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037927937] Config applied version 3 actor [3:177:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 3 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 3 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 3 Important: false } 2025-05-07T08:50:57.747267Z node 3 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [3:185:2198] 2025-05-07T08:50:57.749648Z node 3 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [3:185:2198] 2025-05-07T08:50:57.751546Z node 3 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [3:186:2199] 2025-05-07T08:50:57.753259Z node 3 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [3:186:2199] 2025-05-07T08:50:57.763343Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner1|91ffacfe-13cae0da-a9a19e92-9b88eaa9_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner1 2025-05-07T08:50:57.763834Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner2|a4c8b48d-e58d7bbb-488056b1-ae30bc65_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner2 2025-05-07T08:50:57.785120Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner1|f90bbebb-6cd99600-505dca4f-3926afff_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner1 Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:103:2057] recipient: [4:101:2135] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:103:2057] recipient: [4:101:2135] Leader for TabletID 72057594037927937 is [4:107:2139] sender: [4:108:2057] recipient: [4:101:2135] 2025-05-07T08:50:58.267807Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:50:58.267903Z node 4 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [4:149:2057] recipient: [4:147:2170] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [4:149:2057] recipient: [4:147:2170] Leader for TabletID 72057594037927938 is [4:153:2174] sender: [4:154:2057] recipient: [4:147:2170] Leader for TabletID 72057594037927937 is [4:107:2139] sender: [4:179:2057] recipient: [4:14:2061] 2025-05-07T08:50:58.318067Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:50:58.319031Z node 4 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037927937] Config applied version 4 actor [4:177:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 4 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 4 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 4 Important: false } 2025-05-07T08:50:58.319700Z node 4 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 0, State: StateInit] ... topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [101:185:2198] 2025-05-07T08:54:08.490973Z node 101 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [101:186:2199] 2025-05-07T08:54:08.492833Z node 101 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [101:186:2199] 2025-05-07T08:54:08.495922Z node 101 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 2, State: StateInit] bootstrapping 2 [101:187:2200] 2025-05-07T08:54:08.497706Z node 101 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 2 generation 2 [101:187:2200] 2025-05-07T08:54:08.500306Z node 101 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 3, State: StateInit] bootstrapping 3 [101:188:2201] 2025-05-07T08:54:08.501951Z node 101 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 3, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 3 generation 2 [101:188:2201] 2025-05-07T08:54:08.504365Z node 101 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 4, State: StateInit] bootstrapping 4 [101:189:2202] 2025-05-07T08:54:08.505950Z node 101 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 4, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 4 generation 2 [101:189:2202] 2025-05-07T08:54:08.519834Z node 101 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|97f8e43-d4c00b88-92427db7-6c0ae892_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-05-07T08:54:09.648680Z node 101 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|949b4c71-d139bffd-be612b3a-d46a0cf6_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-05-07T08:54:09.667111Z node 101 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|be3a7a72-5d849090-4bb4e534-d0adf7d_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Leader for TabletID 72057594037927937 is [0:0:0] sender: [102:103:2057] recipient: [102:101:2135] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [102:103:2057] recipient: [102:101:2135] Leader for TabletID 72057594037927937 is [102:107:2139] sender: [102:108:2057] recipient: [102:101:2135] 2025-05-07T08:54:10.543491Z node 102 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:54:10.543618Z node 102 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [102:149:2057] recipient: [102:147:2170] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [102:149:2057] recipient: [102:147:2170] Leader for TabletID 72057594037927938 is [102:153:2174] sender: [102:154:2057] recipient: [102:147:2170] Leader for TabletID 72057594037927937 is [102:107:2139] sender: [102:177:2057] recipient: [102:14:2061] 2025-05-07T08:54:10.579673Z node 102 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:54:10.582227Z node 102 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037927937] Config applied version 102 actor [102:175:2190] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 100 MaxSizeInPartition: 104857600 LifetimeSeconds: 172800 ImportantClientId: "user1" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 PartitionIds: 2 PartitionIds: 3 PartitionIds: 4 TopicName: "rt3.dc1--asdfgs--topic" Version: 102 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } Partitions { PartitionId: 2 } Partitions { PartitionId: 3 } Partitions { PartitionId: 4 } ReadRuleGenerations: 102 ReadRuleGenerations: 102 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } AllPartitions { PartitionId: 2 } AllPartitions { PartitionId: 3 } AllPartitions { PartitionId: 4 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 102 Important: false } Consumers { Name: "user1" Generation: 102 Important: true } 2025-05-07T08:54:10.586017Z node 102 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [102:183:2196] 2025-05-07T08:54:10.589899Z node 102 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [102:183:2196] 2025-05-07T08:54:10.594237Z node 102 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [102:184:2197] 2025-05-07T08:54:10.596903Z node 102 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [102:184:2197] 2025-05-07T08:54:10.600729Z node 102 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 2, State: StateInit] bootstrapping 2 [102:185:2198] 2025-05-07T08:54:10.603315Z node 102 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 2 generation 2 [102:185:2198] 2025-05-07T08:54:10.606400Z node 102 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 3, State: StateInit] bootstrapping 3 [102:186:2199] 2025-05-07T08:54:10.608876Z node 102 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 3, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 3 generation 2 [102:186:2199] 2025-05-07T08:54:10.612438Z node 102 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 4, State: StateInit] bootstrapping 4 [102:187:2200] 2025-05-07T08:54:10.615062Z node 102 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 4, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 4 generation 2 [102:187:2200] 2025-05-07T08:54:10.631554Z node 102 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|39789e79-ce710dd4-c36d5262-9efce049_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-05-07T08:54:11.685653Z node 102 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|31fa7c37-14ffdd1e-122b3940-761a9ac9_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-05-07T08:54:11.706887Z node 102 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|ccf31982-818e4eaa-b8134e19-565e30eb_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Leader for TabletID 72057594037927937 is [0:0:0] sender: [103:103:2057] recipient: [103:101:2135] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [103:103:2057] recipient: [103:101:2135] Leader for TabletID 72057594037927937 is [103:107:2139] sender: [103:108:2057] recipient: [103:101:2135] 2025-05-07T08:54:12.315767Z node 103 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:54:12.315867Z node 103 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [103:149:2057] recipient: [103:147:2170] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [103:149:2057] recipient: [103:147:2170] Leader for TabletID 72057594037927938 is [103:153:2174] sender: [103:154:2057] recipient: [103:147:2170] Leader for TabletID 72057594037927937 is [103:107:2139] sender: [103:179:2057] recipient: [103:14:2061] 2025-05-07T08:54:12.340940Z node 103 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:54:12.342891Z node 103 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037927937] Config applied version 103 actor [103:177:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 100 MaxSizeInPartition: 104857600 LifetimeSeconds: 172800 ImportantClientId: "user1" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 PartitionIds: 2 PartitionIds: 3 PartitionIds: 4 TopicName: "rt3.dc1--asdfgs--topic" Version: 103 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } Partitions { PartitionId: 2 } Partitions { PartitionId: 3 } Partitions { PartitionId: 4 } ReadRuleGenerations: 103 ReadRuleGenerations: 103 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } AllPartitions { PartitionId: 2 } AllPartitions { PartitionId: 3 } AllPartitions { PartitionId: 4 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 103 Important: false } Consumers { Name: "user1" Generation: 103 Important: true } 2025-05-07T08:54:12.344140Z node 103 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [103:185:2198] 2025-05-07T08:54:12.347157Z node 103 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [103:185:2198] 2025-05-07T08:54:12.350526Z node 103 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [103:186:2199] 2025-05-07T08:54:12.352867Z node 103 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [103:186:2199] 2025-05-07T08:54:12.355916Z node 103 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 2, State: StateInit] bootstrapping 2 [103:187:2200] 2025-05-07T08:54:12.358279Z node 103 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 2 generation 2 [103:187:2200] 2025-05-07T08:54:12.360999Z node 103 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 3, State: StateInit] bootstrapping 3 [103:188:2201] 2025-05-07T08:54:12.363734Z node 103 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 3, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 3 generation 2 [103:188:2201] 2025-05-07T08:54:12.367550Z node 103 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 4, State: StateInit] bootstrapping 4 [103:189:2202] 2025-05-07T08:54:12.370762Z node 103 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 4, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 4 generation 2 [103:189:2202] 2025-05-07T08:54:12.396409Z node 103 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|6994fbed-6d13eff2-c0cc6d9-acbd73a6_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-05-07T08:54:13.452618Z node 103 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|425b5d3d-41e93bc7-4fbf1c9d-5205c370_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-05-07T08:54:13.482418Z node 103 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|c7571b72-a84b7163-1b94e69-2e4e8786_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default >> TSchemeShardTest::ListNotCreatedDirCase [GOOD] >> TSchemeShardTest::ListNotCreatedIndexCase >> DataStreams::TestPutEmptyMessage [GOOD] >> DataStreams::TestListStreamConsumers >> TTxDataShardUploadRows::TestUploadShadowRowsShadowDataPublishThenSplit >> TBlobStorageWardenTest::TestFilterBadSerials [GOOD] >> TBlobStorageWardenTest::TestGivenPDiskFormatedWithGuid1AndCreatedWithGuid2WhenYardInitThenError >> DataStreams::TestStreamPagination [GOOD] >> DataStreams::TestShardPagination >> TBlobStorageWardenTest::TestSendUsefulMonitoring >> TBlobStorageWardenTest::TestDeleteStoragePool >> DataStreams::TestControlPlaneAndMeteringData [GOOD] >> DataStreams::ChangeBetweenRetentionModes >> DataStreams::TestUpdateStorage [GOOD] >> DataStreams::TestStreamTimeRetention >> DataStreams::TestUpdateStream [GOOD] >> DataStreams::Test_AutoPartitioning_Describe >> TSchemeShardTest::ListNotCreatedIndexCase [GOOD] >> TSchemeShardTest::FindSubDomainPathId |90.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/perf/ydb-core-kqp-ut-perf |90.1%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/perf/ydb-core-kqp-ut-perf |90.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/perf/ydb-core-kqp-ut-perf >> TTxDataShardUploadRows::TestUploadRows [GOOD] >> TTxDataShardUploadRows::TestUploadRowsDropColumnRace >> DataStreams::TestDeleteStreamWithEnforceFlag [GOOD] >> DataStreams::TestDeleteStreamWithEnforceFlagFalse >> TBlobStorageWardenTest::TestSendUsefulMonitoring [GOOD] >> TBlobStorageWardenTest::TestGivenPDiskFormatedWithGuid1AndCreatedWithGuid2WhenYardInitThenError [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-61 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-62 >> TBlobStorageWardenTest::TestDeleteStoragePool [GOOD] >> TBlobStorageWardenTest::TestBlockEncriptedGroup ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/nodewarden/ut/unittest >> TBlobStorageWardenTest::TestSendUsefulMonitoring [GOOD] Test command err: 2025-05-07T08:54:18.420750Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:1:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:0:0] targetVDisk# [2000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-05-07T08:54:18.421767Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:3:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:0:0] targetVDisk# [2000000:1:0:3:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-05-07T08:54:18.425033Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:3:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:1:0] targetVDisk# [2000000:1:0:3:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-05-07T08:54:18.426741Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:0:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:1:0] targetVDisk# [2000000:1:0:0:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-05-07T08:54:18.427836Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:3:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:2:0] targetVDisk# [2000000:1:0:3:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-05-07T08:54:18.429853Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:0:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:2:0] targetVDisk# [2000000:1:0:0:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-05-07T08:54:18.431185Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:1:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:2:0] targetVDisk# [2000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 tablet_helpers.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003ee7/r3tmp/tmpwDsa3w/pdisk_1.dat 2025-05-07T08:54:19.174477Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [5a9a1d6240d04444] bootstrap ActorId# [1:546:2464] Group# 33554432 BlobCount# 1 BlobIDs# [[72057594037932033:2:8:0:0:1303:0]] HandleClass# TabletLog Tactic# MinLatency RestartCounter# 0 Marker# BPP13 2025-05-07T08:54:19.174628Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [5a9a1d6240d04444] Id# [72057594037932033:2:8:0:0:1303:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-05-07T08:54:19.174667Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [5a9a1d6240d04444] Id# [72057594037932033:2:8:0:0:1303:0] restore disk# 1 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-05-07T08:54:19.174715Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [5a9a1d6240d04444] Id# [72057594037932033:2:8:0:0:1303:0] restore disk# 2 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-05-07T08:54:19.174739Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [5a9a1d6240d04444] Id# [72057594037932033:2:8:0:0:1303:0] restore disk# 3 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-05-07T08:54:19.174765Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [5a9a1d6240d04444] Id# [72057594037932033:2:8:0:0:1303:0] restore disk# 3 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-05-07T08:54:19.174789Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [5a9a1d6240d04444] Id# [72057594037932033:2:8:0:0:1303:0] restore disk# 3 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-05-07T08:54:19.174827Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [5a9a1d6240d04444] restore Id# [72057594037932033:2:8:0:0:1303:0] optimisticReplicas# 3 optimisticState# EBS_FULL Marker# BPG55 2025-05-07T08:54:19.174896Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [5a9a1d6240d04444] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72057594037932033:2:8:0:0:1303:1] Marker# BPG33 2025-05-07T08:54:19.174939Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [5a9a1d6240d04444] Sending missing VPut part# 0 to# 0 blob Id# [72057594037932033:2:8:0:0:1303:1] Marker# BPG32 2025-05-07T08:54:19.174982Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [5a9a1d6240d04444] partPlacement record partSituation# ESituation::Unknown to# 1 blob Id# [72057594037932033:2:8:0:0:1303:2] Marker# BPG33 2025-05-07T08:54:19.175009Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [5a9a1d6240d04444] Sending missing VPut part# 1 to# 1 blob Id# [72057594037932033:2:8:0:0:1303:2] Marker# BPG32 2025-05-07T08:54:19.175037Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [5a9a1d6240d04444] partPlacement record partSituation# ESituation::Unknown to# 2 blob Id# [72057594037932033:2:8:0:0:1303:3] Marker# BPG33 2025-05-07T08:54:19.175062Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [5a9a1d6240d04444] Sending missing VPut part# 2 to# 2 blob Id# [72057594037932033:2:8:0:0:1303:3] Marker# BPG32 2025-05-07T08:54:19.175239Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:65:2091] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:1303:3] FDS# 1303 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-05-07T08:54:19.175303Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:58:2084] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:1303:2] FDS# 1303 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-05-07T08:54:19.175352Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:79:2105] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:1303:1] FDS# 1303 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-05-07T08:54:19.190367Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [5a9a1d6240d04444] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:1303:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 8 } Cost# 90259 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 9 }}}} from# [2000000:1:0:3:0] Marker# BPP01 2025-05-07T08:54:19.190689Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [5a9a1d6240d04444] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:1303:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 9 } Cost# 90259 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 10 }}}} from# [2000000:1:0:0:0] Marker# BPP01 2025-05-07T08:54:19.190795Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [5a9a1d6240d04444] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:1303:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 10 } Cost# 90259 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 11 }}}} from# [2000000:1:0:1:0] Marker# BPP01 2025-05-07T08:54:19.190897Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [5a9a1d6240d04444] Result# TEvPutResult {Id# [72057594037932033:2:8:0:0:1303:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} GroupId# 33554432 Marker# BPP12 2025-05-07T08:54:19.190956Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [5a9a1d6240d04444] SendReply putResult# TEvPutResult {Id# [72057594037932033:2:8:0:0:1303:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-05-07T08:54:19.191160Z node 1 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 33554432 HandleClass# TabletLog Tactic# MinLatency History# THistory { Entries# [ TEvVPut{ TimestampMs# 1.039 sample PartId# [72057594037932033:2:8:0:0:1303:3] QueryCount# 1 VDiskId# [2000000:1:0:1:0] NodeId# 1 } TEvVPut{ TimestampMs# 1.04 sample PartId# [72057594037932033:2:8:0:0:1303:2] QueryCount# 1 VDiskId# [2000000:1:0:0:0] NodeId# 1 } TEvVPut{ TimestampMs# 1.04 sample PartId# [72057594037932033:2:8:0:0:1303:1] QueryCount# 1 VDiskId# [2000000:1:0:3:0] NodeId# 1 } TEvVPutResult{ TimestampMs# 16.102 VDiskId# [2000000:1:0:3:0] NodeId# 1 Status# OK } TEvVPutResult{ TimestampMs# 16.391 VDiskId# [2000000:1:0:0:0] NodeId# 1 Status# OK } TEvVPutResult{ TimestampMs# 16.508 VDiskId# [2000000:1:0:1:0] NodeId# 1 Status# OK } ] } 2025-05-07T08:54:19.303488Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [8d27cf9df52bfb78] bootstrap ActorId# [1:591:2501] Group# 33554432 BlobCount# 1 BlobIDs# [[72057594037932033:2:9:0:0:229:0]] HandleClass# TabletLog Tactic# MinLatency RestartCounter# 0 Marker# BPP13 2025-05-07T08:54:19.303635Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [8d27cf9df52bfb78] Id# [72057594037932033:2:9:0:0:229:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-05-07T08:54:19.303680Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [8d27cf9df52bfb78] Id# [72057594037932033:2:9:0:0:229:0] restore disk# 1 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-05-07T08:54:19.303709Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [8d27cf9df52bfb78] Id# [72057594037932033:2:9:0:0:229:0] restore disk# 2 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-05-07T08:54:19.303732Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [8d27cf9df52bfb78] Id# [72057594037932033:2:9:0:0:229:0] restore disk# 3 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-05-07T08:54:19.303762Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [8d27cf9df52bfb78] Id# [72057594037932033:2:9:0:0:229:0] restore disk# 3 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-05-07T08:54:19.303784Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [8d27cf9df52bfb78] Id# [72057594037932033:2:9:0:0:229:0] restore disk# 3 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-05-07T08:54:19.305518Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [8d27cf9df52bfb78] restore Id# [72057594037932033:2:9:0:0:229:0] optimisticReplicas# 3 optimisticState# EBS_FULL Marker# BPG55 2025-05-07T08:54:19.305611Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [8d27cf9df52bfb78] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72057594037932033:2:9:0:0:229:1] Marker# BPG33 2025-05-07T08:54:19.305647Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [8d27cf9df52bfb78] Sending missing VPut part# 0 to# 0 blob Id# [72057594037932033:2:9:0:0:229:1] Marker# BPG32 2025-05-07T08:54:19.305687Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [8d27cf9df52bfb78] partPlacement record partSituation# ESituation::Unknown to# 1 blob Id# [72057594037932033:2:9:0:0:229:2] Marker# BPG33 2025-05-07T08:54:19.305706Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [8d27cf9df52bfb78] Sending missing VPut part# 1 to# 1 blob Id# [72057594037932033:2:9:0:0:229:2] Marker# BPG32 2025-05-07T08:54:19.305724Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [8d27cf9df52bfb78] partPlacement record partSituation# ESituation::Unknown to# 2 blob Id# [72057594037932033:2:9:0:0:229:3] Marker# BPG33 2025-05-07T08:54:19.305743Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [8d27cf9df52bfb78] Sending missing VPut part# 2 to# 2 blob Id# [72057594037932033:2:9:0:0:229:3] Marker# BPG32 2025-05-07T08:54:19.305874Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:58:2084] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:9:0:0:229:3] FDS# 229 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-05-07T08:54:19.305928Z node 1 :BS_PROXY DEBUG: group_sessions.h:1 ... p# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 1 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-05-07T08:54:19.359601Z node 1 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 2 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-05-07T08:54:19.359723Z node 1 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 3 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-05-07T08:54:19.359828Z node 1 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 4 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-05-07T08:54:19.359920Z node 1 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 5 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-05-07T08:54:19.359972Z node 1 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 6 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-05-07T08:54:19.360022Z node 1 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 7 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-05-07T08:54:19.360048Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:183: Group# 2181038082 -> StateWork Marker# DSP11 2025-05-07T08:54:19.360083Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:78: Group# 2181038082 SetStateWork Marker# DSP15 2025-05-07T08:54:19.360123Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:290: EnsureMonitoring Group# 2181038082 IsLimitedKeyless# 0 Marker# DSP57 initialize full monitoring 2025-05-07T08:54:19.360936Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [1a43693427d0a82b] bootstrap ActorId# [1:603:2512] Group# 2181038082 BlobCount# 1 BlobIDs# [[1234:2:0:0:0:5:0]] HandleClass# TabletLog Tactic# Default RestartCounter# 0 Marker# BPP13 2025-05-07T08:54:19.361055Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [1a43693427d0a82b] Id# [1234:2:0:0:0:5:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-05-07T08:54:19.361096Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [1a43693427d0a82b] restore Id# [1234:2:0:0:0:5:0] optimisticReplicas# 1 optimisticState# EBS_FULL Marker# BPG55 2025-05-07T08:54:19.361147Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [1a43693427d0a82b] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [1234:2:0:0:0:5:1] Marker# BPG33 2025-05-07T08:54:19.361177Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [1a43693427d0a82b] Sending missing VPut part# 0 to# 0 blob Id# [1234:2:0:0:0:5:1] Marker# BPG32 2025-05-07T08:54:19.361283Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:596:2505] NKikimr::TEvBlobStorage::TEvVPut# {ID# [1234:2:0:0:0:5:1] FDS# 5 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-05-07T08:54:19.372904Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [1a43693427d0a82b] received {EvVPutResult Status# OK ID# [1234:2:0:0:0:5:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 } Cost# 80039 ExtQueueId# PutTabletLog IntQueueId# IntPutLog CostSettings# { SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257} Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 1 }}}} from# [82000002:1:0:0:0] Marker# BPP01 2025-05-07T08:54:19.373070Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [1a43693427d0a82b] Result# TEvPutResult {Id# [1234:2:0:0:0:5:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} GroupId# 2181038082 Marker# BPP12 2025-05-07T08:54:19.373140Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [1a43693427d0a82b] SendReply putResult# TEvPutResult {Id# [1234:2:0:0:0:5:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-05-07T08:54:19.373269Z node 1 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 2181038082 HandleClass# TabletLog Tactic# Default History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.512 sample PartId# [1234:2:0:0:0:5:1] QueryCount# 1 VDiskId# [82000002:1:0:0:0] NodeId# 1 } TEvVPutResult{ TimestampMs# 12.19 VDiskId# [82000002:1:0:0:0] NodeId# 1 Status# OK } ] } 2025-05-07T08:54:19.373943Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:146: Group# 2181038082 TEvConfigureProxy received GroupGeneration# IsLimitedKeyless# false Marker# DSP02 2025-05-07T08:54:19.380689Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:55: Group# 2181038082 SetStateUnconfigured Marker# DSP07 2025-05-07T08:54:19.380973Z node 2 :BS_PROXY DEBUG: dsproxy_impl.h:205: Group# 2181038082 HandleEnqueue# TEvCollectGarbage {TabletId# 1234 RecordGeneration# 4294967295 PerGenerationCounter# 4294967295 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 4294967295 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 1 IsMonitored# 1} Marker# DSP17 2025-05-07T08:54:19.381744Z node 2 :BS_NODE ERROR: {NW19@node_warden_group.cpp:211} error while parsing group GroupId# 2181038082 Err# LifeCyclePhase# KEY_NOT_LOADED Key.Id# "" Key.Version# 0 MainKey.Id# "/home/runner/.ya/build/build_root/zvgn/003ee7/r3tmp/tmpwDsa3w//key.txt" MainKey.Version# 1 GroupKeyNonce# 2181038082 2025-05-07T08:54:19.382807Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:146: Group# 2181038082 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# true Marker# DSP02 2025-05-07T08:54:19.382863Z node 2 :BS_PROXY NOTICE: dsproxy_state.cpp:294: EnsureMonitoring Group# 2181038082 IsLimitedKeyless# 1 fullIfPossible# 0 Marker# DSP58 2025-05-07T08:54:19.395641Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:605:2105] Create Queue# [2:607:2106] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:19.395825Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:605:2105] Create Queue# [2:608:2107] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:19.395942Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:605:2105] Create Queue# [2:609:2108] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:19.396068Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:605:2105] Create Queue# [2:610:2109] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:19.396188Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:605:2105] Create Queue# [2:611:2110] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:19.396310Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:605:2105] Create Queue# [2:612:2111] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:19.396432Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:605:2105] Create Queue# [2:613:2112] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:19.396462Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:29: Group# 2181038082 SetStateEstablishingSessions Marker# DSP03 2025-05-07T08:54:19.397795Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 1 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-05-07T08:54:19.397963Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 2 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-05-07T08:54:19.403580Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 3 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-05-07T08:54:19.403668Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 4 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-05-07T08:54:19.403875Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 5 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-05-07T08:54:19.403936Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 6 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-05-07T08:54:19.403998Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 7 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-05-07T08:54:19.404031Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:183: Group# 2181038082 -> StateWork Marker# DSP11 2025-05-07T08:54:19.404074Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:78: Group# 2181038082 SetStateWork Marker# DSP15 2025-05-07T08:54:19.404337Z node 2 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [2:607:2106] NKikimr::TEvBlobStorage::TEvVCollectGarbage# {TEvVCollectGarbage for [tablet:gen:cnt:channel]=[1234:4294967295:4294967295:0] collect=[4294967295:4294967295] cookie# 0 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/nodewarden/ut/unittest >> TBlobStorageWardenTest::TestGivenPDiskFormatedWithGuid1AndCreatedWithGuid2WhenYardInitThenError [GOOD] Test command err: 2025-05-07T08:54:17.698137Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:0:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:1:0] targetVDisk# [2000000:1:0:0:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-05-07T08:54:17.701101Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:0:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:2:0] targetVDisk# [2000000:1:0:0:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-05-07T08:54:17.701192Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:1:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:2:0] targetVDisk# [2000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 tablet_helpers.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003eec/r3tmp/tmpgGnYlw/pdisk_1.dat 2025-05-07T08:54:17.833281Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:0:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:3:0] targetVDisk# [2000000:1:0:0:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-05-07T08:54:17.833403Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:1:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:3:0] targetVDisk# [2000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-05-07T08:54:17.833460Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:2:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:3:0] targetVDisk# [2000000:1:0:2:0] oldSyncState# [0 0] DbBirthLsn# 0 Formatting pdisk Creating PDisk Creating pdisk Verify that PDisk returns ERROR 2025-05-07T08:54:18.516032Z node 1 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2843} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/zvgn/003eec/r3tmp/tmp1xFKEU//new_pdisk.dat": no such file. PDiskId# 1001 2025-05-07T08:54:18.516616Z node 1 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:290} PDiskId# 1001 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/zvgn/003eec/r3tmp/tmp1xFKEU//new_pdisk.dat": no such file. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/zvgn/003eec/r3tmp/tmp1xFKEU//new_pdisk.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 8787907549938605323 PDiskId# 1001 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 HashedMainKey[0]# 0x221976E60BD392C7 StartOwnerRound# 10 SectorMap# false EnableSectorEncryption # 1 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# Enable WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1001 2025-05-07T08:54:18.548062Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [e2e5f1b9c917f854] bootstrap ActorId# [1:541:2459] Group# 33554432 BlobCount# 1 BlobIDs# [[72057594037932033:2:8:0:0:352:0]] HandleClass# TabletLog Tactic# MinLatency RestartCounter# 0 Marker# BPP13 2025-05-07T08:54:18.548241Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [e2e5f1b9c917f854] Id# [72057594037932033:2:8:0:0:352:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-05-07T08:54:18.548285Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [e2e5f1b9c917f854] Id# [72057594037932033:2:8:0:0:352:0] restore disk# 1 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-05-07T08:54:18.548313Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [e2e5f1b9c917f854] Id# [72057594037932033:2:8:0:0:352:0] restore disk# 2 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-05-07T08:54:18.548337Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [e2e5f1b9c917f854] Id# [72057594037932033:2:8:0:0:352:0] restore disk# 3 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-05-07T08:54:18.548362Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [e2e5f1b9c917f854] Id# [72057594037932033:2:8:0:0:352:0] restore disk# 3 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-05-07T08:54:18.548389Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [e2e5f1b9c917f854] Id# [72057594037932033:2:8:0:0:352:0] restore disk# 3 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-05-07T08:54:18.548429Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [e2e5f1b9c917f854] restore Id# [72057594037932033:2:8:0:0:352:0] optimisticReplicas# 3 optimisticState# EBS_FULL Marker# BPG55 2025-05-07T08:54:18.548501Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [e2e5f1b9c917f854] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72057594037932033:2:8:0:0:352:1] Marker# BPG33 2025-05-07T08:54:18.548547Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [e2e5f1b9c917f854] Sending missing VPut part# 0 to# 0 blob Id# [72057594037932033:2:8:0:0:352:1] Marker# BPG32 2025-05-07T08:54:18.548593Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [e2e5f1b9c917f854] partPlacement record partSituation# ESituation::Unknown to# 1 blob Id# [72057594037932033:2:8:0:0:352:2] Marker# BPG33 2025-05-07T08:54:18.548621Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [e2e5f1b9c917f854] Sending missing VPut part# 1 to# 1 blob Id# [72057594037932033:2:8:0:0:352:2] Marker# BPG32 2025-05-07T08:54:18.548649Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [e2e5f1b9c917f854] partPlacement record partSituation# ESituation::Unknown to# 2 blob Id# [72057594037932033:2:8:0:0:352:3] Marker# BPG33 2025-05-07T08:54:18.548672Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [e2e5f1b9c917f854] Sending missing VPut part# 2 to# 2 blob Id# [72057594037932033:2:8:0:0:352:3] Marker# BPG32 2025-05-07T08:54:18.548822Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:65:2091] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:352:3] FDS# 352 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-05-07T08:54:18.548888Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:58:2084] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:352:2] FDS# 352 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-05-07T08:54:18.548932Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:79:2105] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:352:1] FDS# 352 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-05-07T08:54:18.563350Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [e2e5f1b9c917f854] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:352:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 10 } Cost# 82771 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 11 }}}} from# [2000000:1:0:0:0] Marker# BPP01 2025-05-07T08:54:18.563669Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [e2e5f1b9c917f854] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:352:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 10 } Cost# 82771 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 11 }}}} from# [2000000:1:0:1:0] Marker# BPP01 2025-05-07T08:54:18.563768Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [e2e5f1b9c917f854] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:352:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 4 } Cost# 82771 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 5 }}}} from# [2000000:1:0:3:0] Marker# BPP01 2025-05-07T08:54:18.563850Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [e2e5f1b9c917f854] Result# TEvPutResult {Id# [72057594037932033:2:8:0:0:352:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} GroupId# 33554432 Marker# BPP12 2025-05-07T08:54:18.563906Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [e2e5f1b9c917f854] SendReply putResult# TEvPutResult {Id# [72057594037932033:2:8:0:0:352:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-05-07T08:54:18.564102Z node 1 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 33554432 HandleClass# TabletLog Tactic# MinLatency History# THistory { Entries# [ TEvVPut{ TimestampMs# 1.067 sample PartId# [72057594037932033:2:8:0:0:352:3] QueryCount# 1 VDiskId# [2000000:1:0:1:0] NodeId# 1 } TEvVPut{ TimestampMs# 1.068 sample PartId# [72057594037932033:2:8:0:0:352:2] QueryCount# 1 VDiskId# [2000000:1:0:0:0] NodeId# 1 } TEvVPut{ TimestampMs# 1.068 sample PartId# [72057594037932033:2:8:0:0:352:1] QueryCount# 1 VDiskId# [2000000:1:0:3:0] NodeId# 1 } TEvVPutResult{ TimestampMs# 15.546 VDiskId# [2000000:1:0:0:0] NodeId# 1 Status# OK } TEvVPutResult{ TimestampMs# 15.811 VDiskId# [2000000:1:0:1:0] NodeId# 1 Status# OK } TEvVPutResult{ TimestampMs# 15.903 VDiskId# [2000000:1:0:3:0] NodeId# 1 Status# OK } ] } |90.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/tx_allocator/ut/ydb-core-tx-tx_allocator-ut |90.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/tx_allocator/ut/ydb-core-tx-tx_allocator-ut |90.2%| [LD] {RESULT} $(B)/ydb/core/tx/tx_allocator/ut/ydb-core-tx-tx_allocator-ut >> TTxDataShardUploadRows::TestUploadShadowRowsShadowDataSplitThenPublish [GOOD] >> TTxDataShardUploadRows::UploadRowsToReplicatedTable >> TSchemeShardTest::FindSubDomainPathId [GOOD] >> TSchemeShardTest::FindSubDomainPathIdActor >> TBlobStorageWardenTest::TestBlockEncriptedGroup [GOOD] >> TTxDataShardUploadRows::TestUploadShadowRowsShadowDataPublishThenSplit [GOOD] >> TTxDataShardUploadRows::TestUploadShadowRowsShadowDataAlterSplitThenPublish ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/nodewarden/ut/unittest >> TBlobStorageWardenTest::TestBlockEncriptedGroup [GOOD] Test command err: 2025-05-07T08:54:18.520332Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:0:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:1:0] targetVDisk# [2000000:1:0:0:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-05-07T08:54:18.522030Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:0:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:2:0] targetVDisk# [2000000:1:0:0:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-05-07T08:54:18.522501Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:1:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:2:0] targetVDisk# [2000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-05-07T08:54:18.523079Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:2:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:3:0] targetVDisk# [2000000:1:0:2:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-05-07T08:54:18.523658Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:0:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:3:0] targetVDisk# [2000000:1:0:0:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-05-07T08:54:18.524372Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:1:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:3:0] targetVDisk# [2000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 tablet_helpers.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003eda/r3tmp/tmpEWXUcE/pdisk_1.dat 2025-05-07T08:54:19.422786Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [084d0c3a19bee089] bootstrap ActorId# [1:478:2460] Group# 33554432 BlobCount# 1 BlobIDs# [[72057594037932033:2:8:0:0:1298:0]] HandleClass# TabletLog Tactic# MinLatency RestartCounter# 0 Marker# BPP13 2025-05-07T08:54:19.422963Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [084d0c3a19bee089] Id# [72057594037932033:2:8:0:0:1298:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-05-07T08:54:19.423004Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [084d0c3a19bee089] Id# [72057594037932033:2:8:0:0:1298:0] restore disk# 1 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-05-07T08:54:19.423029Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [084d0c3a19bee089] Id# [72057594037932033:2:8:0:0:1298:0] restore disk# 2 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-05-07T08:54:19.423055Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [084d0c3a19bee089] Id# [72057594037932033:2:8:0:0:1298:0] restore disk# 3 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-05-07T08:54:19.423120Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [084d0c3a19bee089] Id# [72057594037932033:2:8:0:0:1298:0] restore disk# 3 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-05-07T08:54:19.423149Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [084d0c3a19bee089] Id# [72057594037932033:2:8:0:0:1298:0] restore disk# 3 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-05-07T08:54:19.423190Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [084d0c3a19bee089] restore Id# [72057594037932033:2:8:0:0:1298:0] optimisticReplicas# 3 optimisticState# EBS_FULL Marker# BPG55 2025-05-07T08:54:19.423267Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [084d0c3a19bee089] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72057594037932033:2:8:0:0:1298:1] Marker# BPG33 2025-05-07T08:54:19.423309Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [084d0c3a19bee089] Sending missing VPut part# 0 to# 0 blob Id# [72057594037932033:2:8:0:0:1298:1] Marker# BPG32 2025-05-07T08:54:19.423345Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [084d0c3a19bee089] partPlacement record partSituation# ESituation::Unknown to# 1 blob Id# [72057594037932033:2:8:0:0:1298:2] Marker# BPG33 2025-05-07T08:54:19.423368Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [084d0c3a19bee089] Sending missing VPut part# 1 to# 1 blob Id# [72057594037932033:2:8:0:0:1298:2] Marker# BPG32 2025-05-07T08:54:19.423399Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [084d0c3a19bee089] partPlacement record partSituation# ESituation::Unknown to# 2 blob Id# [72057594037932033:2:8:0:0:1298:3] Marker# BPG33 2025-05-07T08:54:19.423421Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [084d0c3a19bee089] Sending missing VPut part# 2 to# 2 blob Id# [72057594037932033:2:8:0:0:1298:3] Marker# BPG32 2025-05-07T08:54:19.423609Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:46:2090] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:1298:3] FDS# 1298 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-05-07T08:54:19.423680Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:39:2083] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:1298:2] FDS# 1298 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-05-07T08:54:19.423722Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:60:2104] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:1298:1] FDS# 1298 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-05-07T08:54:19.442776Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [084d0c3a19bee089] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:1298:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 9 } Cost# 90220 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 10 }}}} from# [2000000:1:0:0:0] Marker# BPP01 2025-05-07T08:54:19.443000Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [084d0c3a19bee089] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:1298:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 10 } Cost# 90220 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 11 }}}} from# [2000000:1:0:1:0] Marker# BPP01 2025-05-07T08:54:19.443087Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [084d0c3a19bee089] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:1298:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 8 } Cost# 90220 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 9 }}}} from# [2000000:1:0:3:0] Marker# BPP01 2025-05-07T08:54:19.443167Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [084d0c3a19bee089] Result# TEvPutResult {Id# [72057594037932033:2:8:0:0:1298:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} GroupId# 33554432 Marker# BPP12 2025-05-07T08:54:19.443229Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [084d0c3a19bee089] SendReply putResult# TEvPutResult {Id# [72057594037932033:2:8:0:0:1298:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-05-07T08:54:19.443421Z node 1 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 33554432 HandleClass# TabletLog Tactic# MinLatency History# THistory { Entries# [ TEvVPut{ TimestampMs# 1.135 sample PartId# [72057594037932033:2:8:0:0:1298:3] QueryCount# 1 VDiskId# [2000000:1:0:1:0] NodeId# 1 } TEvVPut{ TimestampMs# 1.136 sample PartId# [72057594037932033:2:8:0:0:1298:2] QueryCount# 1 VDiskId# [2000000:1:0:0:0] NodeId# 1 } TEvVPut{ TimestampMs# 1.136 sample PartId# [72057594037932033:2:8:0:0:1298:1] QueryCount# 1 VDiskId# [2000000:1:0:3:0] NodeId# 1 } TEvVPutResult{ TimestampMs# 20.255 VDiskId# [2000000:1:0:0:0] NodeId# 1 Status# OK } TEvVPutResult{ TimestampMs# 20.421 VDiskId# [2000000:1:0:1:0] NodeId# 1 Status# OK } TEvVPutResult{ TimestampMs# 20.503 VDiskId# [2000000:1:0:3:0] NodeId# 1 Status# OK } ] } 2025-05-07T08:54:19.510833Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [b6b2c6548553d7a5] bootstrap ActorId# [1:523:2497] Group# 33554432 BlobCount# 1 BlobIDs# [[72057594037932033:2:9:0:0:224:0]] HandleClass# TabletLog Tactic# MinLatency RestartCounter# 0 Marker# BPP13 2025-05-07T08:54:19.511021Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [b6b2c6548553d7a5] Id# [72057594037932033:2:9:0:0:224:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-05-07T08:54:19.511089Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [b6b2c6548553d7a5] Id# [72057594037932033:2:9:0:0:224:0] restore disk# 1 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-05-07T08:54:19.511118Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [b6b2c6548553d7a5] Id# [72057594037932033:2:9:0:0:224:0] restore disk# 2 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-05-07T08:54:19.511142Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [b6b2c6548553d7a5] Id# [72057594037932033:2:9:0:0:224:0] restore disk# 3 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-05-07T08:54:19.511166Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [b6b2c6548553d7a5] Id# [72057594037932033:2:9:0:0:224:0] restore disk# 3 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-05-07T08:54:19.511194Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [b6b2c6548553d7a5] Id# [72057594037932033:2:9:0:0:224:0] restore disk# 3 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-05-07T08:54:19.511233Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [b6b2c6548553d7a5] restore Id# [72057594037932033:2:9:0:0:224:0] optimisticReplicas# 3 optimisticState# EBS_FULL Marker# BPG55 2025-05-07T08:54:19.511303Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [b6b2c6548553d7a5] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72057594037932033:2:9:0:0:224:1] Marker# BPG33 2025-05-07T08:54:19.511347Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [b6b2c6548553d7a5] Sending missing VPut part# 0 to# 0 blob Id# [72057594037932033:2:9:0:0:224:1] Marker# BPG32 2025-05-07T08:54:19.511389Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [b6b2c6548553d7a5] partPlacement record partSituation# ESituation::Unknown to# 1 blob Id# [72057594037932033:2:9:0:0:224:2] Marker# BPG33 2025-05-07T08:54:19.511413Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [b6b2c6548553d7a5] Sending missing VPut part# 1 to# 1 blob Id# [72057594037932033:2:9:0:0:224:2] Marker# BPG32 2025-05-07T08:54:19.511440Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [b6b2c6548553d7a5] partPlacement record partSituation# ESituation::Unknown to# 2 blob Id# [72057594037932033:2:9:0:0:224:3] Marker# BPG33 2025-05-07T08:54:19.511465Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [b6b2c6548553d7a5] Sending missing VPut part# 2 to# 2 blob Id# [72057594037932033:2:9:0:0:224:3] Marker# BPG32 2025-05-07T08:54:19.511637Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:39:2083] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:9:0:0:224:3] FDS# 224 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-05-07T08:54:19.511701Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:60:2104] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:9:0:0:224:2] FDS# 224 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-05-07T08:54:19.511748Z node 1 :BS_PROXY DEBUG: group_sessions. ... utLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 3 }}}} from# [82000002:1:0:0:0] Marker# BPP01 2025-05-07T08:54:22.044663Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [f913878b3da83702] Result# TEvPutResult {Id# [1234:2:0:0:0:5:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} GroupId# 2181038082 Marker# BPP12 2025-05-07T08:54:22.044746Z node 2 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [f913878b3da83702] SendReply putResult# TEvPutResult {Id# [1234:2:0:0:0:5:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-05-07T08:54:22.044891Z node 2 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 2181038082 HandleClass# TabletLog Tactic# Default History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.55 sample PartId# [1234:2:0:0:0:5:1] QueryCount# 1 VDiskId# [82000002:1:0:0:0] NodeId# 2 } TEvVPutResult{ TimestampMs# 3.938 VDiskId# [82000002:1:0:0:0] NodeId# 2 Status# OK } ] } 2025-05-07T08:54:22.045444Z node 3 :BS_PROXY INFO: dsproxy_state.cpp:146: Group# 2181038082 TEvConfigureProxy received GroupGeneration# IsLimitedKeyless# false Marker# DSP02 2025-05-07T08:54:22.045488Z node 3 :BS_PROXY INFO: dsproxy_state.cpp:55: Group# 2181038082 SetStateUnconfigured Marker# DSP07 2025-05-07T08:54:22.045570Z node 3 :BS_PROXY DEBUG: dsproxy_impl.h:205: Group# 2181038082 HandleEnqueue# TEvBlock {TabletId# 1234 Generation# 3 Deadline# 18446744073709551 IsMonitored# 1} Marker# DSP17 2025-05-07T08:54:22.046523Z node 3 :BS_NODE ERROR: {NW19@node_warden_group.cpp:211} error while parsing group GroupId# 2181038082 Err# LifeCyclePhase# KEY_NOT_LOADED Key.Id# "" Key.Version# 0 MainKey.Id# "/home/runner/.ya/build/build_root/zvgn/003eda/r3tmp/tmpQP0KqL//key.txt" MainKey.Version# 1 GroupKeyNonce# 2181038082 2025-05-07T08:54:22.047448Z node 3 :BS_PROXY INFO: dsproxy_state.cpp:146: Group# 2181038082 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# true Marker# DSP02 2025-05-07T08:54:22.047495Z node 3 :BS_PROXY NOTICE: dsproxy_state.cpp:294: EnsureMonitoring Group# 2181038082 IsLimitedKeyless# 1 fullIfPossible# 0 Marker# DSP58 2025-05-07T08:54:22.049463Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [3:603:2105] Create Queue# [3:605:2106] targetNodeId# 2 Marker# DSP01 2025-05-07T08:54:22.049610Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [3:603:2105] Create Queue# [3:606:2107] targetNodeId# 2 Marker# DSP01 2025-05-07T08:54:22.049726Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [3:603:2105] Create Queue# [3:607:2108] targetNodeId# 2 Marker# DSP01 2025-05-07T08:54:22.049839Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [3:603:2105] Create Queue# [3:608:2109] targetNodeId# 2 Marker# DSP01 2025-05-07T08:54:22.049946Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [3:603:2105] Create Queue# [3:609:2110] targetNodeId# 2 Marker# DSP01 2025-05-07T08:54:22.054389Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [3:603:2105] Create Queue# [3:610:2111] targetNodeId# 2 Marker# DSP01 2025-05-07T08:54:22.054542Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [3:603:2105] Create Queue# [3:611:2112] targetNodeId# 2 Marker# DSP01 2025-05-07T08:54:22.054570Z node 3 :BS_PROXY INFO: dsproxy_state.cpp:29: Group# 2181038082 SetStateEstablishingSessions Marker# DSP03 2025-05-07T08:54:22.061480Z node 3 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 1 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-05-07T08:54:22.061722Z node 3 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 2 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-05-07T08:54:22.061816Z node 3 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 3 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-05-07T08:54:22.061992Z node 3 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 4 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-05-07T08:54:22.062117Z node 3 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 5 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-05-07T08:54:22.062184Z node 3 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 6 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-05-07T08:54:22.062242Z node 3 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 7 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-05-07T08:54:22.062271Z node 3 :BS_PROXY INFO: dsproxy_state.cpp:183: Group# 2181038082 -> StateWork Marker# DSP11 2025-05-07T08:54:22.062308Z node 3 :BS_PROXY INFO: dsproxy_state.cpp:78: Group# 2181038082 SetStateWork Marker# DSP15 2025-05-07T08:54:22.062474Z node 3 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:150: [d70ef3c23a1a2346] bootstrap ActorId# [3:612:2113] Group# 2181038082 TabletId# 1234 Generation# 3 Deadline# 586524-01-19T08:01:49.551615Z RestartCounter# 0 Marker# DSPB05 2025-05-07T08:54:22.062526Z node 3 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:111: [d70ef3c23a1a2346] Sending TEvVBlock Tablet# 1234 Generation# 3 vdiskId# [82000002:1:0:0:0] node# 2 Marker# DSPB03 2025-05-07T08:54:22.062706Z node 3 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [3:605:2106] NKikimr::TEvBlobStorage::TEvVBlock# NKikimrBlobStorage.TEvVBlock TabletId: 1234 Generation: 3 VDiskID { GroupID: 2181038082 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } IssuerGuid: 6227659368209463211 MsgQoS { ExtQueueId: PutTabletLog } cookie# 0 2025-05-07T08:54:22.068426Z node 3 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:43: [d70ef3c23a1a2346] Handle TEvVBlockResult status# OK From# [82000002:1:0:0:0] NodeId# 2 Marker# DSPB01 2025-05-07T08:54:22.068508Z node 3 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:100: [d70ef3c23a1a2346] Result# TEvBlockResult {Status# OK} Marker# DSPB04 Sending TEvPut 2025-05-07T08:54:22.068844Z node 3 :BS_PROXY INFO: dsproxy_impl.h:309: Group# 2181038082 HandleError ev# TEvPut {Id# [1234:3:0:0:0:10:0] Size# 10 Deadline# 18446744073709551 HandleClass# TabletLog Tactic# Default} Response# TEvPutResult {Id# [1234:3:0:0:0:10:0] Status# ERROR StatusFlags# { } ErrorReason# "Created as LIMITED without keys. It happens when tenant keys are missing on the node." ApproximateFreeSpaceShare# 0} Marker# DSP31 Sending TEvPut 2025-05-07T08:54:22.069042Z node 3 :BS_PROXY DEBUG: dsproxy_impl.h:309: Group# 2181038082 HandleError ev# TEvPut {Id# [1234:4:0:0:0:10:0] Size# 10 Deadline# 18446744073709551 HandleClass# TabletLog Tactic# Default} Response# TEvPutResult {Id# [1234:4:0:0:0:10:0] Status# ERROR StatusFlags# { } ErrorReason# "Created as LIMITED without keys. It happens when tenant keys are missing on the node." ApproximateFreeSpaceShare# 0} Marker# DSP31 Sending TEvPut 2025-05-07T08:54:22.069361Z node 2 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [91379e686f748e92] bootstrap ActorId# [2:613:2511] Group# 2181038082 BlobCount# 1 BlobIDs# [[1234:2:0:0:0:11:0]] HandleClass# TabletLog Tactic# Default RestartCounter# 0 Marker# BPP13 2025-05-07T08:54:22.069510Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [91379e686f748e92] Id# [1234:2:0:0:0:11:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-05-07T08:54:22.069557Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [91379e686f748e92] restore Id# [1234:2:0:0:0:11:0] optimisticReplicas# 1 optimisticState# EBS_FULL Marker# BPG55 2025-05-07T08:54:22.069619Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [91379e686f748e92] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [1234:2:0:0:0:11:1] Marker# BPG33 2025-05-07T08:54:22.069661Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [91379e686f748e92] Sending missing VPut part# 0 to# 0 blob Id# [1234:2:0:0:0:11:1] Marker# BPG32 2025-05-07T08:54:22.069798Z node 2 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [2:592:2501] NKikimr::TEvBlobStorage::TEvVPut# {ID# [1234:2:0:0:0:11:1] FDS# 11 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-05-07T08:54:22.070045Z node 2 :BS_VDISK_PUT ERROR: blobstorage_skeleton.cpp:568: PDiskId# 1000 VDISK[82000002:_:0:0:0]: (2181038082) TEvVPut: failed to pass the Hull check; id# [1234:2:0:0:0:11:1] status# {Status# BLOCKED} Marker# BSVS03 2025-05-07T08:54:22.070346Z node 2 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [91379e686f748e92] received {EvVPutResult Status# BLOCKED ErrorReason# "blocked" ID# [1234:2:0:0:0:11:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 3 } Cost# 80086 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 4 }}}} from# [82000002:1:0:0:0] Marker# BPP01 2025-05-07T08:54:22.070438Z node 2 :BS_PROXY_PUT ERROR: dsproxy_put_impl.cpp:72: [91379e686f748e92] Result# TEvPutResult {Id# [1234:2:0:0:0:11:0] Status# BLOCKED StatusFlags# { } ErrorReason# "Got VPutResult status# BLOCKED from VDiskId# [82000002:1:0:0:0]" ApproximateFreeSpaceShare# 0} GroupId# 2181038082 Marker# BPP12 2025-05-07T08:54:22.070496Z node 2 :BS_PROXY_PUT NOTICE: dsproxy_put.cpp:486: [91379e686f748e92] SendReply putResult# TEvPutResult {Id# [1234:2:0:0:0:11:0] Status# BLOCKED StatusFlags# { } ErrorReason# "Got VPutResult status# BLOCKED from VDiskId# [82000002:1:0:0:0]" ApproximateFreeSpaceShare# 0} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-05-07T08:54:22.070606Z node 2 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 2181038082 HandleClass# TabletLog Tactic# Default History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.631 sample PartId# [1234:2:0:0:0:11:1] QueryCount# 1 VDiskId# [82000002:1:0:0:0] NodeId# 2 } ] } 2025-05-07T08:54:22.070971Z node 3 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [3:605:2106] NKikimr::TEvBlobStorage::TEvVCollectGarbage# {TEvVCollectGarbage for [tablet:gen:cnt:channel]=[1234:4294967295:4294967295:0] collect=[4294967295:4294967295] cookie# 0 >> TSchemeShardTest::FindSubDomainPathIdActor [GOOD] >> TSchemeShardTest::FindSubDomainPathIdActorAsync >> DataStreams::TestListStreamConsumers [GOOD] >> DataStreams::TestListShards1Shard >> PersQueueSdkReadSessionTest::ClosesAfterFailedConnectionToCds [GOOD] >> DataShardSnapshots::UncommittedChangesRenameTable-UseSink [GOOD] >> DataStreams::TestReservedResourcesMetering [GOOD] >> DataStreams::TestReservedStorageMetering >> DataStreams::ChangeBetweenRetentionModes [GOOD] >> DataStreams::TestCreateExistingStream >> TTxDataShardUploadRows::TestUploadRowsDropColumnRace [GOOD] >> TTxDataShardUploadRows::TestUploadRowsLocks >> DataStreams::TestShardPagination [GOOD] >> CompressExecutor::TestExecutorMemUsage [GOOD] >> DataStreams::Test_AutoPartitioning_Describe [GOOD] >> DataStreams::Test_Crreate_AutoPartitioning_Disabled >> DataStreams::TestDeleteStreamWithEnforceFlagFalse [GOOD] >> DataStreams::TestGetRecords1MBMessagesOneByOneBySeqNo >> TSchemeShardTest::FindSubDomainPathIdActorAsync [GOOD] >> TTxDataShardUploadRows::UploadRowsToReplicatedTable [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> PersQueueSdkReadSessionTest::ClosesAfterFailedConnectionToCds [GOOD] Test command err: 2025-05-07T08:51:36.926078Z :WriteRAW INFO: Random seed for debugging is 1746607896926048 2025-05-07T08:51:37.286146Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623796989133040:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:37.291567Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:51:37.334092Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501623796614687579:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:37.338404Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:51:37.588292Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0035f1/r3tmp/tmpAL46SC/pdisk_1.dat 2025-05-07T08:51:37.600170Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-05-07T08:51:37.910126Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:51:37.933768Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:37.933898Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:37.940300Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:37.940388Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:37.940757Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:51:37.955364Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-05-07T08:51:37.962618Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16239, node 1 2025-05-07T08:51:38.179837Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/zvgn/0035f1/r3tmp/yandex8tCZGT.tmp 2025-05-07T08:51:38.179873Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/zvgn/0035f1/r3tmp/yandex8tCZGT.tmp 2025-05-07T08:51:38.180055Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/zvgn/0035f1/r3tmp/yandex8tCZGT.tmp 2025-05-07T08:51:38.180210Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:51:38.331173Z INFO: TTestServer started on Port 4625 GrpcPort 16239 TClient is connected to server localhost:4625 PQClient connected to localhost:16239 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:51:38.786191Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... waiting... 2025-05-07T08:51:41.693283Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501623813794557082:2310], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:41.693423Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501623813794557094:2313], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:41.693499Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:41.733453Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623814169003242:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:41.733525Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623814169003233:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:41.733647Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:41.738840Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480 2025-05-07T08:51:41.817405Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501623813794557098:2125] txid# 281474976715657, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-05-07T08:51:41.855266Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623814169003247:2343], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-05-07T08:51:41.855797Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7501623813794557097:2314], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-05-07T08:51:41.940382Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623814169003342:2686] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:51:41.941564Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501623813794557125:2131] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:51:42.218213Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:51:42.220965Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7501623814169003355:2349], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T08:51:42.222309Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=1&id=OTYzODU3OWUtYWE4N2QyOWItZGFhOTc4YTYtNWY1Yzc0Y2Y=, ActorId: [1:7501623814169003230:2337], ActorState: ExecuteState, TraceId: 01jtmz3303cz9y91beff08nbd8, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T08:51:42.223923Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7501623813794557132:2318], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T08:51:42.225804Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=2&id=NjI1MGRkZDgtZTljNWVjZTYtMmZlYTBiYjYtMmVjYzk4YjE=, ActorId: [2:7501623813794557080:2309], ActorState: ExecuteState, TraceId: 01jtmz32zgfqhavsc1crfn0n7x, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T08:51:42.228708Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T08:51:42.228151Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T08:51:42.282512Z node 1 :META ... itionChooser [15:7501624493956705324:2515] (SourceId=src, PreferedPartition=(NULL)) Start idle 2025-05-07T08:54:19.694117Z node 15 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 1 sessionId: partition: 0 expectedGeneration: (NULL) 2025-05-07T08:54:19.697283Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72075186224037892] server connected, pipe [15:7501624493956705398:2515], now have 1 active actors on pipe 2025-05-07T08:54:19.700889Z node 15 :PQ_WRITE_PROXY DEBUG: writer.cpp:798: TPartitionWriter 72075186224037892 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037892, NodeId 16, Generation: 1 2025-05-07T08:54:19.701488Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:342: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-05-07T08:54:19.701556Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:2788: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-05-07T08:54:19.701690Z node 16 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src|be2ffdc6-ef59d333-a4d1f1f7-a319359_0 generated for partition 0 topic 'rt3.dc1--test-topic' owner src 2025-05-07T08:54:19.701835Z node 16 :PERSQUEUE DEBUG: partition_write.cpp:35: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-05-07T08:54:19.701919Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:377: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-05-07T08:54:19.702783Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:342: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-05-07T08:54:19.702829Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:2788: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-05-07T08:54:19.702938Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:377: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-05-07T08:54:19.703448Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 1 partition: 0 MaxSeqNo: 0 sessionId: src|be2ffdc6-ef59d333-a4d1f1f7-a319359_0 2025-05-07T08:54:19.704594Z :INFO: [] MessageGroupId [src] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1746608059704 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-05-07T08:54:19.704776Z :INFO: [] MessageGroupId [src] SessionId [] Write session established. Init response: session_id: "src|be2ffdc6-ef59d333-a4d1f1f7-a319359_0" topic: "test-topic" cluster: "dc1" supported_codecs: CODEC_RAW supported_codecs: CODEC_GZIP supported_codecs: CODEC_LZOP 2025-05-07T08:54:19.705033Z :INFO: [] MessageGroupId [src] SessionId [src|be2ffdc6-ef59d333-a4d1f1f7-a319359_0] Write session: close. Timeout = 0 ms 2025-05-07T08:54:19.705117Z :INFO: [] MessageGroupId [src] SessionId [src|be2ffdc6-ef59d333-a4d1f1f7-a319359_0] Write session will now close 2025-05-07T08:54:19.705203Z :DEBUG: [] MessageGroupId [src] SessionId [src|be2ffdc6-ef59d333-a4d1f1f7-a319359_0] Write session: aborting 2025-05-07T08:54:19.705841Z :INFO: [] MessageGroupId [src] SessionId [src|be2ffdc6-ef59d333-a4d1f1f7-a319359_0] Write session: gracefully shut down, all writes complete 2025-05-07T08:54:19.705912Z :DEBUG: [] MessageGroupId [src] SessionId [src|be2ffdc6-ef59d333-a4d1f1f7-a319359_0] Write session: destroy 2025-05-07T08:54:19.717429Z node 15 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: src|be2ffdc6-ef59d333-a4d1f1f7-a319359_0 grpc read done: success: 0 data: 2025-05-07T08:54:19.717476Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 1 sessionId: src|be2ffdc6-ef59d333-a4d1f1f7-a319359_0 grpc read failed 2025-05-07T08:54:19.717514Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 1 sessionId: src|be2ffdc6-ef59d333-a4d1f1f7-a319359_0 grpc closed 2025-05-07T08:54:19.717542Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: src|be2ffdc6-ef59d333-a4d1f1f7-a319359_0 is DEAD 2025-05-07T08:54:19.718697Z node 15 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-05-07T08:54:19.719331Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037892] server disconnected, pipe [15:7501624493956705398:2515] destroyed 2025-05-07T08:54:19.719421Z node 16 :PERSQUEUE DEBUG: partition_write.cpp:138: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-05-07T08:54:19.810666Z :INFO: [/Root] [/Root] [f531419a-633c2cd9-31a19c72-96dccf42] Starting read session 2025-05-07T08:54:19.810737Z :DEBUG: [/Root] [/Root] [f531419a-633c2cd9-31a19c72-96dccf42] Starting cluster discovery 2025-05-07T08:54:19.811060Z :INFO: [/Root] [/Root] [f531419a-633c2cd9-31a19c72-96dccf42] Cluster discovery request failed. Status: TRANSPORT_UNAVAILABLE. Issues: "
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:10819: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:10819
: Error: Endpoint list is empty for database /Root, cluster endpoint localhost:10819. " 2025-05-07T08:54:19.811120Z :DEBUG: [/Root] [/Root] [f531419a-633c2cd9-31a19c72-96dccf42] Restart cluster discovery in 0.006222s 2025-05-07T08:54:19.819065Z :DEBUG: [/Root] [/Root] [f531419a-633c2cd9-31a19c72-96dccf42] Starting cluster discovery 2025-05-07T08:54:19.819484Z :INFO: [/Root] [/Root] [f531419a-633c2cd9-31a19c72-96dccf42] Cluster discovery request failed. Status: TRANSPORT_UNAVAILABLE. Issues: "
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:10819: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:10819
: Error: Endpoint list is empty for database /Root, cluster endpoint localhost:10819. " 2025-05-07T08:54:19.819544Z :DEBUG: [/Root] [/Root] [f531419a-633c2cd9-31a19c72-96dccf42] Restart cluster discovery in 0.017089s 2025-05-07T08:54:19.838078Z :DEBUG: [/Root] [/Root] [f531419a-633c2cd9-31a19c72-96dccf42] Starting cluster discovery 2025-05-07T08:54:19.838427Z :INFO: [/Root] [/Root] [f531419a-633c2cd9-31a19c72-96dccf42] Cluster discovery request failed. Status: TRANSPORT_UNAVAILABLE. Issues: "
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:10819: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:10819
: Error: Endpoint list is empty for database /Root, cluster endpoint localhost:10819. " 2025-05-07T08:54:19.838480Z :DEBUG: [/Root] [/Root] [f531419a-633c2cd9-31a19c72-96dccf42] Restart cluster discovery in 0.020869s 2025-05-07T08:54:19.863907Z :DEBUG: [/Root] [/Root] [f531419a-633c2cd9-31a19c72-96dccf42] Starting cluster discovery 2025-05-07T08:54:19.864264Z :NOTICE: [/Root] [/Root] [f531419a-633c2cd9-31a19c72-96dccf42] Aborting read session. Description: SessionClosed { Status: TRANSPORT_UNAVAILABLE Issues: "
: Error: Failed to discover clusters
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:10819: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:10819
: Error: Endpoint list is empty for database /Root, cluster endpoint localhost:10819. " } 2025-05-07T08:54:19.864513Z :NOTICE: [/Root] [/Root] [f531419a-633c2cd9-31a19c72-96dccf42] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } SessionClosed { Status: TRANSPORT_UNAVAILABLE Issues: "
: Error: Failed to discover clusters
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:10819: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:10819
: Error: Endpoint list is empty for database /Root, cluster endpoint localhost:10819. " } 2025-05-07T08:54:19.864675Z :INFO: [/Root] [/Root] [f531419a-633c2cd9-31a19c72-96dccf42] Closing read session. Close timeout: 0.000000s 2025-05-07T08:54:19.864801Z :NOTICE: [/Root] [/Root] [f531419a-633c2cd9-31a19c72-96dccf42] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-05-07T08:54:21.015570Z node 15 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1937: ActorId: [15:7501624502546640053:2531] TxId: 281474976715689. Ctx: { TraceId: 01jtmz7y175p0w5efdxw7dfyr4, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=15&id=ZDQ2YTc4MTMtMWJkMTgzYTQtOWM4NDMzOTEtZmMwOGZmZTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. UNAVAILABLE: Failed to send EvStartKqpTasksRequest because node is unavailable: 16 2025-05-07T08:54:21.015761Z node 15 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1216: SelfId: [15:7501624502546640057:2531], TxId: 281474976715689, task: 3. Ctx: { CustomerSuppliedId : . TraceId : 01jtmz7y175p0w5efdxw7dfyr4. SessionId : ydb://session/3?node_id=15&id=ZDQ2YTc4MTMtMWJkMTgzYTQtOWM4NDMzOTEtZmMwOGZmZTU=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [15:7501624502546640053:2531], status: UNAVAILABLE, reason: {
: Error: Terminate execution } 2025-05-07T08:54:21.230103Z node 15 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976715690. Failed to resolve tablet: 72075186224037890 after several retries. 2025-05-07T08:54:21.230281Z node 15 :KQP_EXECUTER WARN: kqp_executer_impl.h:257: ActorId: [15:7501624502546640062:2539] TxId: 281474976715690. Ctx: { TraceId: 01jtmz7ykr4n0qta3zqmf7rtdh, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=15&id=NzgzNTEzNTgtNDdjMzA2ZDYtMjlkZGJkZGEtZGEzODY4NWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037890 after several retries. 2025-05-07T08:54:21.230551Z node 15 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=15&id=NzgzNTEzNTgtNDdjMzA2ZDYtMjlkZGJkZGEtZGEzODY4NWQ=, ActorId: [15:7501624502546640059:2539], ActorState: ExecuteState, TraceId: 01jtmz7ykr4n0qta3zqmf7rtdh, Create QueryResponse for error on request, msg: 2025-05-07T08:54:21.232302Z node 15 :PQ_METACACHE ERROR: msgbus_server_pq_metacache.cpp:260: Got error trying to perform request: { Response { QueryIssues { message: "Failed to resolve tablet: 72075186224037890 after several retries." severity: 1 } TxMeta { id: "01jtmz7yks6v3ws7kscp7jwsbs" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 } 2025-05-07T08:54:22.018408Z node 15 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=15&id=ZDQ2YTc4MTMtMWJkMTgzYTQtOWM4NDMzOTEtZmMwOGZmZTU=, ActorId: [15:7501624498251672731:2531], ActorState: ExecuteState, TraceId: 01jtmz7y175p0w5efdxw7dfyr4, Create QueryResponse for error on request, msg: 2025-05-07T08:54:22.020343Z node 15 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Execution" issue_code: 1060 severity: 2 issues { position { row: 3 column: 120 } message: "Cost Based Optimizer could not be applied to this query: couldn\'t load statistics" end_position { row: 3 column: 120 } issue_code: 8001 severity: 2 } } QueryIssues { message: "Kikimr cluster or one of its subsystems was unavailable." issue_code: 2005 severity: 1 issues { message: "Failed to send EvStartKqpTasksRequest because node is unavailable: 16" severity: 1 } } TxMeta { id: "01jtmz7yjjd6yxr8nwg4vf8ys8" } } YdbStatus: UNAVAILABLE ConsumedRu: 362 } ------- [TM] {asan, default-linux-x86_64, release} ydb/services/datastreams/ut/unittest >> DataStreams::TestShardPagination [GOOD] Test command err: 2025-05-07T08:54:01.116198Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624415312665726:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:01.116242Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002833/r3tmp/tmpcelLFb/pdisk_1.dat 2025-05-07T08:54:02.005385Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:02.040262Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:02.040376Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:02.052086Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8805, node 1 2025-05-07T08:54:02.346959Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:02.346985Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:02.346991Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:02.347096Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2505 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:02.968415Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:03.240514Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:2505 2025-05-07T08:54:03.511457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:03.530604Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480
: Error: retention hours and storage megabytes must fit one of: { hours : [0, 24], storage : [0, 0]}, { hours : [0, 168], storage : [51200, 1048576]}, provided values: hours 168, storage 40960, code: 500080 2025-05-07T08:54:04.169653Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501624428197569748:3421] txid# 281474976710661, issues: { message: "Check failed: path: \'/Root/stream_TestStreamStorageRetention\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 2], type: EPathTypePersQueueGroup, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:54:08.106229Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501624445174209139:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:08.106352Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002833/r3tmp/tmpVw86Sc/pdisk_1.dat 2025-05-07T08:54:08.549299Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:08.598654Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:08.598739Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:08.604478Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11466, node 4 2025-05-07T08:54:08.843043Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:08.843067Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:08.843077Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:08.843249Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12092 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:09.191318Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:09.298674Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:12092 2025-05-07T08:54:09.511136Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:13.090043Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7501624445174209139:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:13.097330Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:54:18.314908Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7501624490929820099:2075];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002833/r3tmp/tmpp3Qrd4/pdisk_1.dat 2025-05-07T08:54:18.379934Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:54:18.540279Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:18.604327Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:18.604446Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:18.607845Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25611, node 7 2025-05-07T08:54:18.838516Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:18.838545Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:18.838556Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:18.838736Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26460 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:19.281841Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:19.404085Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:26460 2025-05-07T08:54:19.759497Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... >> TKeyValueTest::TestWriteReadWhileWriteWorks [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_snapshot/unittest >> DataShardSnapshots::UncommittedChangesRenameTable-UseSink [GOOD] Test command err: 2025-05-07T08:51:44.495302Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:51:44.495488Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:51:44.495790Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002932/r3tmp/tmp2W853B/pdisk_1.dat 2025-05-07T08:51:45.028954Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:51:45.090209Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:51:45.154531Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:59:2106] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-05-07T08:51:45.155529Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-05-07T08:51:45.155828Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:45.155938Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:45.171325Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:51:45.261850Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:59:2106] Handle TEvProposeTransaction 2025-05-07T08:51:45.261924Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:59:2106] TxId# 281474976715657 ProcessProposeTransaction 2025-05-07T08:51:45.262120Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:59:2106] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:640:2548] 2025-05-07T08:51:45.500619Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1520: Actor# [1:640:2548] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-05-07T08:51:45.500739Z node 1 :TX_PROXY DEBUG: schemereq.cpp:563: Actor# [1:640:2548] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-05-07T08:51:45.501428Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1585: Actor# [1:640:2548] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-05-07T08:51:45.501530Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1575: Actor# [1:640:2548] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-05-07T08:51:45.501896Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1408: Actor# [1:640:2548] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-05-07T08:51:45.502169Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1455: Actor# [1:640:2548] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-05-07T08:51:45.502271Z node 1 :TX_PROXY DEBUG: schemereq.cpp:102: Actor# [1:640:2548] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-05-07T08:51:45.504240Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:51:45.504764Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1310: Actor# [1:640:2548] txid# 281474976715657 HANDLE EvClientConnected 2025-05-07T08:51:45.505597Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1332: Actor# [1:640:2548] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-05-07T08:51:45.505689Z node 1 :TX_PROXY DEBUG: schemereq.cpp:543: Actor# [1:640:2548] txid# 281474976715657 SEND to# [1:594:2519] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-05-07T08:51:45.567379Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828672, Sender [1:656:2563], Recipient [1:665:2569]: NKikimr::TEvTablet::TEvBoot 2025-05-07T08:51:45.568655Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828673, Sender [1:656:2563], Recipient [1:665:2569]: NKikimr::TEvTablet::TEvRestored 2025-05-07T08:51:45.569201Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:665:2569] 2025-05-07T08:51:45.569487Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T08:51:45.593866Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3111: StateInactive, received event# 268828684, Sender [1:656:2563], Recipient [1:665:2569]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-05-07T08:51:45.681156Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T08:51:45.681307Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T08:51:45.687397Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-05-07T08:51:45.687514Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-05-07T08:51:45.687595Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-05-07T08:51:45.688062Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T08:51:45.688263Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T08:51:45.688373Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:681:2569] in generation 1 2025-05-07T08:51:45.702663Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T08:51:45.786985Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-05-07T08:51:45.787268Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T08:51:45.787395Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:683:2579] 2025-05-07T08:51:45.787445Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T08:51:45.787475Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-05-07T08:51:45.787510Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:51:45.787751Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 2146435072, Sender [1:665:2569], Recipient [1:665:2569]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-05-07T08:51:45.787859Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3155: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-05-07T08:51:45.788266Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T08:51:45.788380Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T08:51:45.788489Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T08:51:45.788527Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:51:45.788567Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-05-07T08:51:45.788604Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-05-07T08:51:45.788651Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-05-07T08:51:45.788690Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T08:51:45.788733Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:51:45.788879Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877761, Sender [1:672:2573], Recipient [1:665:2569]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:51:45.788938Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:51:45.788989Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:672:2573], sessionId# [0:0:0] 2025-05-07T08:51:45.789444Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269549568, Sender [1:410:2405], Recipient [1:672:2573] 2025-05-07T08:51:45.789508Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3136: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-05-07T08:51:45.789615Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T08:51:45.789862Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-05-07T08:51:45.789920Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-05-07T08:51:45.790189Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-05-07T08:51:45.790256Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-05-07T08:51:45.790303Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-05-07T08:51:45.790344Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-05-07T08:51:45.790382Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-05-07T08:51:45.790717Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 ... eId: 01jtmz804t0pkn2v25xr8bdz0k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=NzYxMTEwZDEtN2UzNjI0NTEtM2JiYzc0NDEtZWJhMWUzMzQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. State: WaitResolveState, Executing KQP transaction on shard: 72075186224037888, tasks: [], lockTxId: (empty maybe), locks: Locks { LockId: 281474976715661 DataShard: 72075186224037888 Generation: 1 Counter: 0 SchemeShard: 72057594046644480 PathId: 2 HasWrites: true } Op: Rollback, immediate: 1 2025-05-07T08:54:22.751753Z node 13 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:1831: ActorId: [13:982:2683] TxId: 281474976715665. Ctx: { TraceId: 01jtmz804t0pkn2v25xr8bdz0k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=NzYxMTEwZDEtN2UzNjI0NTEtM2JiYzc0NDEtZWJhMWUzMzQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ExecuteDatashardTransaction traceId.verbosity: 0 2025-05-07T08:54:22.751820Z node 13 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2800: ActorId: [13:982:2683] TxId: 281474976715665. Ctx: { TraceId: 01jtmz804t0pkn2v25xr8bdz0k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=NzYxMTEwZDEtN2UzNjI0NTEtM2JiYzc0NDEtZWJhMWUzMzQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: 1, datashardTxs: 1, evWriteTxs: 0, topicTxs: 0, volatile: 0, immediate: 1, pending compute tasks0, useFollowers: 0 2025-05-07T08:54:22.751862Z node 13 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:136: ActorId: [13:982:2683] TxId: 281474976715665. Ctx: { TraceId: 01jtmz804t0pkn2v25xr8bdz0k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=NzYxMTEwZDEtN2UzNjI0NTEtM2JiYzc0NDEtZWJhMWUzMzQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: WaitResolveState, datashard 72075186224037888 not finished yet: Executing 2025-05-07T08:54:22.751910Z node 13 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:157: ActorId: [13:982:2683] TxId: 281474976715665. Ctx: { TraceId: 01jtmz804t0pkn2v25xr8bdz0k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=NzYxMTEwZDEtN2UzNjI0NTEtM2JiYzc0NDEtZWJhMWUzMzQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: WaitResolveState, waiting for 0 compute actor(s) and 1 datashard(s): DS 72075186224037888 (Executing), 2025-05-07T08:54:22.751951Z node 13 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:2362: ActorId: [13:982:2683] TxId: 281474976715665. Ctx: { TraceId: 01jtmz804t0pkn2v25xr8bdz0k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=NzYxMTEwZDEtN2UzNjI0NTEtM2JiYzc0NDEtZWJhMWUzMzQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: WaitResolveState, immediate tx, become ExecuteState 2025-05-07T08:54:22.752215Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269549568, Sender [13:982:2683], Recipient [13:951:2767]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_DATA SourceDeprecated { RawX1: 982 RawX2: 55834577531 } TxBody: " \0018\001j3\010\001\032\'\n#\t\215\023\000\000\000\000\001\000\021\000\000\001\000\000\020\000\001\030\001 \000)\000\001\205\000\000\000\000\0010\0028\001 \003\"\006\020\0020\000@\n\220\001\000" TxId: 281474976715665 ExecLevel: 0 Flags: 8 2025-05-07T08:54:22.752262Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3136: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-05-07T08:54:22.752382Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 2146435074, Sender [13:951:2767], Recipient [13:951:2767]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvDelayedProposeTransaction 2025-05-07T08:54:22.752422Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvDelayedProposeTransaction 2025-05-07T08:54:22.752499Z node 13 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T08:54:22.752677Z node 13 :TX_DATASHARD TRACE: key_validator.cpp:54: -- AddWriteRange: (Uint64 : 281474976715661, Uint64 : 72075186224037888, Uint64 : 72057594046644480, Uint64 : 2) table: [1:997:0] 2025-05-07T08:54:22.752797Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715665] at 72075186224037888 on unit CheckDataTx 2025-05-07T08:54:22.752854Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715665] at 72075186224037888 is Executed 2025-05-07T08:54:22.752899Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715665] at 72075186224037888 executing on unit CheckDataTx 2025-05-07T08:54:22.752932Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715665] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-05-07T08:54:22.752964Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715665] at 72075186224037888 on unit BuildAndWaitDependencies 2025-05-07T08:54:22.753004Z node 13 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v500/281474976715663 IncompleteEdge# v{min} UnprotectedReadEdge# v400/18446744073709551615 ImmediateWriteEdge# v400/18446744073709551615 ImmediateWriteEdgeReplied# v1000/18446744073709551615 2025-05-07T08:54:22.753051Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:281474976715665] at 72075186224037888 2025-05-07T08:54:22.753086Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715665] at 72075186224037888 is Executed 2025-05-07T08:54:22.753111Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715665] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-05-07T08:54:22.753138Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715665] at 72075186224037888 to execution unit ExecuteKqpDataTx 2025-05-07T08:54:22.753178Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715665] at 72075186224037888 on unit ExecuteKqpDataTx 2025-05-07T08:54:22.753249Z node 13 :TX_DATASHARD TRACE: execute_kqp_data_tx_unit.cpp:236: Operation [0:281474976715665] (execute_kqp_data_tx) at 72075186224037888 set memory limit 4193448 2025-05-07T08:54:22.753396Z node 13 :TX_DATASHARD TRACE: datashard_kqp.cpp:824: KqpEraseLock LockId: 281474976715661 DataShard: 72075186224037888 Generation: 1 Counter: 0 SchemeShard: 72057594046644480 PathId: 2 HasWrites: true 2025-05-07T08:54:22.753529Z node 13 :TX_DATASHARD TRACE: execute_kqp_data_tx_unit.cpp:476: add locks to result: 0 2025-05-07T08:54:22.753609Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715665] at 72075186224037888 is Executed 2025-05-07T08:54:22.753643Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715665] at 72075186224037888 executing on unit ExecuteKqpDataTx 2025-05-07T08:54:22.753686Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715665] at 72075186224037888 to execution unit FinishPropose 2025-05-07T08:54:22.753717Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715665] at 72075186224037888 on unit FinishPropose 2025-05-07T08:54:22.753785Z node 13 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715665 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose latency: 0 ms, status: COMPLETE 2025-05-07T08:54:22.753916Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715665] at 72075186224037888 is DelayComplete 2025-05-07T08:54:22.753950Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715665] at 72075186224037888 executing on unit FinishPropose 2025-05-07T08:54:22.754221Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715665] at 72075186224037888 to execution unit CompletedOperations 2025-05-07T08:54:22.754255Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715665] at 72075186224037888 on unit CompletedOperations 2025-05-07T08:54:22.754320Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715665] at 72075186224037888 is Executed 2025-05-07T08:54:22.754354Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715665] at 72075186224037888 executing on unit CompletedOperations 2025-05-07T08:54:22.754381Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:281474976715665] at 72075186224037888 has finished 2025-05-07T08:54:22.754453Z node 13 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T08:54:22.754488Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715665] at 72075186224037888 on unit FinishPropose 2025-05-07T08:54:22.754529Z node 13 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:54:22.754734Z node 13 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:1364: ActorId: [13:982:2683] TxId: 281474976715665. Ctx: { TraceId: 01jtmz804t0pkn2v25xr8bdz0k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=NzYxMTEwZDEtN2UzNjI0NTEtM2JiYzc0NDEtZWJhMWUzMzQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Got propose result, shard: 72075186224037888, status: COMPLETE, error: 2025-05-07T08:54:22.754901Z node 13 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2140: ActorId: [13:982:2683] TxId: 281474976715665. Ctx: { TraceId: 01jtmz804t0pkn2v25xr8bdz0k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=NzYxMTEwZDEtN2UzNjI0NTEtM2JiYzc0NDEtZWJhMWUzMzQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-05-07T08:54:22.755015Z node 13 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:838: ActorId: [13:982:2683] TxId: 281474976715665. Ctx: { TraceId: 01jtmz804t0pkn2v25xr8bdz0k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=NzYxMTEwZDEtN2UzNjI0NTEtM2JiYzc0NDEtZWJhMWUzMzQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.000000s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 2025-05-07T08:54:22.755174Z node 13 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2534: SessionId: ydb://session/3?node_id=13&id=NzYxMTEwZDEtN2UzNjI0NTEtM2JiYzc0NDEtZWJhMWUzMzQ=, ActorId: [13:838:2683], ActorState: CleanupState, TraceId: 01jtmz804t0pkn2v25xr8bdz0k, EndCleanup, isFinal: 0 2025-05-07T08:54:22.755384Z node 13 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2270: SessionId: ydb://session/3?node_id=13&id=NzYxMTEwZDEtN2UzNjI0NTEtM2JiYzc0NDEtZWJhMWUzMzQ=, ActorId: [13:838:2683], ActorState: CleanupState, TraceId: 01jtmz804t0pkn2v25xr8bdz0k, Sent query response back to proxy, proxyRequestId: 8, proxyId: [13:57:2104] 2025-05-07T08:54:23.065431Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877761, Sender [13:991:2793], Recipient [13:951:2767]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:54:23.065568Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:54:23.065677Z node 13 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [13:990:2792], serverId# [13:991:2793], sessionId# [0:0:0] 2025-05-07T08:54:23.065894Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269553224, Sender [13:594:2519], Recipient [13:951:2767]: NKikimr::TEvDataShard::TEvGetOpenTxs ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_upload_rows/unittest >> TTxDataShardUploadRows::UploadRowsToReplicatedTable [GOOD] Test command err: 2025-05-07T08:54:18.130848Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:54:18.131021Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:54:18.131309Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0035b0/r3tmp/tmpZvqoxu/pdisk_1.dat 2025-05-07T08:54:18.682218Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:54:18.754327Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:18.811900Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:18.812055Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:18.827258Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:54:18.914504Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:54:18.963373Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828672, Sender [1:656:2563], Recipient [1:665:2569]: NKikimr::TEvTablet::TEvBoot 2025-05-07T08:54:18.964684Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828673, Sender [1:656:2563], Recipient [1:665:2569]: NKikimr::TEvTablet::TEvRestored 2025-05-07T08:54:18.965220Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:665:2569] 2025-05-07T08:54:18.965507Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T08:54:18.974786Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3111: StateInactive, received event# 268828684, Sender [1:656:2563], Recipient [1:665:2569]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-05-07T08:54:19.007848Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T08:54:19.008009Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T08:54:19.009257Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-05-07T08:54:19.009325Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-05-07T08:54:19.009371Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-05-07T08:54:19.009645Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T08:54:19.009774Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T08:54:19.009830Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:681:2569] in generation 1 2025-05-07T08:54:19.022125Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T08:54:19.072082Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-05-07T08:54:19.072292Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T08:54:19.072416Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:683:2579] 2025-05-07T08:54:19.072452Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T08:54:19.072483Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-05-07T08:54:19.072517Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:54:19.072720Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 2146435072, Sender [1:665:2569], Recipient [1:665:2569]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-05-07T08:54:19.072778Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3155: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-05-07T08:54:19.073076Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T08:54:19.073201Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T08:54:19.073287Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T08:54:19.073330Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:54:19.073382Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-05-07T08:54:19.073430Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-05-07T08:54:19.073475Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-05-07T08:54:19.073506Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T08:54:19.073545Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:54:19.073651Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877761, Sender [1:672:2573], Recipient [1:665:2569]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:54:19.073687Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:54:19.073741Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:672:2573], sessionId# [0:0:0] 2025-05-07T08:54:19.076652Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269549568, Sender [1:410:2405], Recipient [1:672:2573] 2025-05-07T08:54:19.076723Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3136: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-05-07T08:54:19.076836Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T08:54:19.077048Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-05-07T08:54:19.077101Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-05-07T08:54:19.077178Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-05-07T08:54:19.077230Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-05-07T08:54:19.077263Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-05-07T08:54:19.077300Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-05-07T08:54:19.077335Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-05-07T08:54:19.077632Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-05-07T08:54:19.077673Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-05-07T08:54:19.077709Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-05-07T08:54:19.077737Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-05-07T08:54:19.077792Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-05-07T08:54:19.077817Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-05-07T08:54:19.077851Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-05-07T08:54:19.077882Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-05-07T08:54:19.077909Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-05-07T08:54:19.079410Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269746185, Sender [1:684:2580], Recipient [1:665:2569]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-05-07T08:54:19.079469Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:54:19.094582Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T08:54:19.094668Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-05-07T08:54:19.094722Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-05-07T08:54:19.094779Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose latency: 0 ms, status: PREPARED 2025-05-07T08:54:19.094879Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-05-07T08:54:19.267708Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877761, Sender [1:699:2589], Recipient [1:665:2569]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:54:19.267762Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:54:19.267797Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075 ... 72075186224037890 2025-05-07T08:54:21.490873Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-05-07T08:54:21.490916Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [3000:281474976715667] at 72075186224037890 on unit CompleteOperation 2025-05-07T08:54:21.490986Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [3000 : 281474976715667] from 72075186224037890 at tablet 72075186224037890 send result to client [1:1105:2884], exec latency: 0 ms, propose latency: 1 ms 2025-05-07T08:54:21.491041Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-05-07T08:54:24.977285Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:311:2354], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:54:24.977527Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:54:24.977632Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0035b0/r3tmp/tmprqztA2/pdisk_1.dat 2025-05-07T08:54:25.280677Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:54:25.313830Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:25.368860Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:25.368996Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:25.383206Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:54:25.465110Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:54:25.484581Z node 2 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [2:664:2568] 2025-05-07T08:54:25.484860Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T08:54:25.551375Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T08:54:25.551519Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T08:54:25.553024Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-05-07T08:54:25.553107Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-05-07T08:54:25.553164Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-05-07T08:54:25.553477Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T08:54:25.553614Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T08:54:25.553696Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [2:680:2568] in generation 1 2025-05-07T08:54:25.564388Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T08:54:25.564469Z node 2 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-05-07T08:54:25.564571Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T08:54:25.564649Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [2:682:2578] 2025-05-07T08:54:25.564689Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T08:54:25.564732Z node 2 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-05-07T08:54:25.564775Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:54:25.565214Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T08:54:25.565308Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T08:54:25.565402Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T08:54:25.565442Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:54:25.565485Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T08:54:25.565529Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:54:25.565902Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:661:2566], serverId# [2:671:2572], sessionId# [0:0:0] 2025-05-07T08:54:25.566035Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T08:54:25.566256Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-05-07T08:54:25.566328Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-05-07T08:54:25.567904Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:54:25.581431Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T08:54:25.581554Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-05-07T08:54:25.742041Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:697:2587], serverId# [2:699:2589], sessionId# [0:0:0] 2025-05-07T08:54:25.742654Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-05-07T08:54:25.742711Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:54:25.742884Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T08:54:25.742927Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-05-07T08:54:25.742973Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-05-07T08:54:25.743207Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-05-07T08:54:25.743357Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-05-07T08:54:25.743512Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T08:54:25.743573Z node 2 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-05-07T08:54:25.743961Z node 2 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-05-07T08:54:25.748406Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:54:25.752572Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-05-07T08:54:25.752636Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:54:25.753494Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-05-07T08:54:25.753566Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:54:25.755688Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:54:25.755744Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T08:54:25.755816Z node 2 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-05-07T08:54:25.755888Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [2:409:2404], exec latency: 0 ms, propose latency: 0 ms 2025-05-07T08:54:25.755947Z node 2 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-05-07T08:54:25.756030Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:54:25.757145Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:54:25.759266Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-05-07T08:54:25.759343Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-05-07T08:54:25.759968Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-05-07T08:54:25.765167Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:733:2615], serverId# [2:734:2616], sessionId# [0:0:0] 2025-05-07T08:54:25.765293Z node 2 :TX_DATASHARD NOTICE: datashard__op_rows.cpp:168: Rejecting bulk upsert request on datashard: tablet# 72075186224037888, error# Can't execute bulk upsert at replicated table >> BasicUsage::WriteSessionCloseIgnoresWrites [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestWriteReadWhileWriteWorks [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:54:2057] recipient: [1:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:54:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:56:2097] sender: [1:57:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:56:2097] sender: [1:74:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:54:2057] recipient: [2:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:54:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:57:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:74:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:76:2057] recipient: [2:36:2083] Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:79:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:80:2057] recipient: [2:78:2110] Leader for TabletID 72057594037927937 is [2:81:2111] sender: [2:82:2057] recipient: [2:78:2110] !Reboot 72057594037927937 (actor [2:56:2097]) rebooted! !Reboot 72057594037927937 (actor [2:56:2097]) tablet resolver refreshed! new actor is[2:81:2111] Leader for TabletID 72057594037927937 is [2:81:2111] sender: [2:135:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:54:2057] recipient: [3:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:54:2057] recipient: [3:52:2095] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:57:2057] recipient: [3:52:2095] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:74:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:56:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:76:2057] recipient: [3:36:2083] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:78:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:80:2057] recipient: [3:79:2110] Leader for TabletID 72057594037927937 is [3:81:2111] sender: [3:82:2057] recipient: [3:79:2110] !Reboot 72057594037927937 (actor [3:56:2097]) rebooted! !Reboot 72057594037927937 (actor [3:56:2097]) tablet resolver refreshed! new actor is[3:81:2111] Leader for TabletID 72057594037927937 is [3:81:2111] sender: [3:135:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:54:2057] recipient: [4:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:54:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:57:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:74:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:77:2057] recipient: [4:36:2083] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:80:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:81:2057] recipient: [4:79:2110] Leader for TabletID 72057594037927937 is [4:82:2111] sender: [4:83:2057] recipient: [4:79:2110] !Reboot 72057594037927937 (actor [4:56:2097]) rebooted! !Reboot 72057594037927937 (actor [4:56:2097]) tablet resolver refreshed! new actor is[4:82:2111] Leader for TabletID 72057594037927937 is [4:82:2111] sender: [4:136:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:54:2057] recipient: [5:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:54:2057] recipient: [5:50:2095] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:57:2057] recipient: [5:50:2095] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:74:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:80:2057] recipient: [5:36:2083] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:83:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:84:2057] recipient: [5:82:2113] Leader for TabletID 72057594037927937 is [5:85:2114] sender: [5:86:2057] recipient: [5:82:2113] !Reboot 72057594037927937 (actor [5:56:2097]) rebooted! !Reboot 72057594037927937 (actor [5:56:2097]) tablet resolver refreshed! new actor is[5:85:2114] Leader for TabletID 72057594037927937 is [5:85:2114] sender: [5:139:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:54:2057] recipient: [6:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:54:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:57:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:74:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:56:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:80:2057] recipient: [6:36:2083] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:83:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:84:2057] recipient: [6:82:2113] Leader for TabletID 72057594037927937 is [6:85:2114] sender: [6:86:2057] recipient: [6:82:2113] !Reboot 72057594037927937 (actor [6:56:2097]) rebooted! !Reboot 72057594037927937 (actor [6:56:2097]) tablet resolver refreshed! new actor is[6:85:2114] Leader for TabletID 72057594037927937 is [6:85:2114] sender: [6:139:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:54:2057] recipient: [7:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:54:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:57:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:74:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:81:2057] recipient: [7:36:2083] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:84:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:85:2057] recipient: [7:83:2113] Leader for TabletID 72057594037927937 is [7:86:2114] sender: [7:87:2057] recipient: [7:83:2113] !Reboot 72057594037927937 (actor [7:56:2097]) rebooted! !Reboot 72057594037927937 (actor [7:56:2097]) tablet resolver refreshed! new actor is[7:86:2114] Leader for TabletID 72057594037927937 is [7:86:2114] sender: [7:140:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:54:2057] recipient: [8:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:54:2057] recipient: [8:50:2095] Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:57:2057] recipient: [8:50:2095] Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:74:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:83:2057] recipient: [8:36:2083] Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:86:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:87:2057] recipient: [8:85:2115] Leader for TabletID 72057594037927937 is [8:88:2116] sender: [8:89:2057] recipient: [8:85:2115] !Reboot 72057594037927937 (actor [8:56:2097]) rebooted! !Reboot 72057594037927937 (actor [8:56:2097]) tablet resolver refreshed! new actor is[8:88:2116] Leader for TabletID 72057594037927937 is [8:88:2116] sender: [8:142:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:54:2057] recipient: [9:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:54:2057] recipient: [9:52:2095] Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:57:2057] recipient: [9:52:2095] Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:74:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:56:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:83:2057] recipient: [9:36:2083] Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:86:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:87:2057] recipient: [9:85:2115] Leader for TabletID 72057594037927937 is [9:88:2116] sender: [9:89:2057] recipient: [9:85:2115] !Reboot 72057594037927937 (actor [9:56:2097]) rebooted! !Reboot 72057594037927937 (actor [9:56:2097]) tablet resolver refreshed! new actor is[9:88:2116] Leader for TabletID 72057594037927937 is [9:88:2116] sender: [9:142:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:54:2057] recipient: [10:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:54:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:57:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:74:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:84:2057] recipient: [10:36:2083] Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:87:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:88:2057] recipient: [10:86:2115] Leader for TabletID 72057594037927937 is [10:89:2116] sender: [10:90:2057] recipient: [10:86:2115] !Reboot 72057594037927937 (actor [10:56:2097]) rebooted! !Reboot 72057594037927937 (actor [10:56:2097]) tablet resolver refreshed! new actor is[10:89:2116] Leader for TabletID 72057594037927937 is [10:89:2116] sender: [10:143:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:54:2057] recipient: [11:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:54:2057] recipient: [11:51:2095] Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:57:2057] recipient: [11:51:2095] Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:74:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:86:2057] recipient: [11:36:2083] Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:89:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:90:2057] recipient: [11:88:2117] Leader for TabletID 72057594037927937 is [11:91:2118] sender: [11:92:2057] recipient: [11:88:2117] !Reboot 72057594037927937 (actor [11:56:2097]) rebooted! !Reboot 72057594037927937 (actor [11:56:2097]) tablet resolver refreshed! new actor is[11:91:2118] Leader for TabletID 72057594037927937 is [11:91:2118] sender: [11:145:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:54:2057] recipient: [12:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:54:2057] recipient: [12:52:2095] Leader for TabletID 72057594037927937 is [12:56:2097] sender: [12:57:2057] recipient: [12:52:2095] Leader for TabletID 72057594037927937 is [12:56:2097] sender: [12:74:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (acto ... TabletID 72057594037927937 is [13:56:2097] sender: [13:89:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [13:56:2097] sender: [13:91:2057] recipient: [13:90:2117] Leader for TabletID 72057594037927937 is [13:92:2118] sender: [13:93:2057] recipient: [13:90:2117] !Reboot 72057594037927937 (actor [13:56:2097]) rebooted! !Reboot 72057594037927937 (actor [13:56:2097]) tablet resolver refreshed! new actor is[13:92:2118] Leader for TabletID 72057594037927937 is [13:92:2118] sender: [13:146:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:54:2057] recipient: [14:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:54:2057] recipient: [14:51:2095] Leader for TabletID 72057594037927937 is [14:56:2097] sender: [14:57:2057] recipient: [14:51:2095] Leader for TabletID 72057594037927937 is [14:56:2097] sender: [14:74:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:54:2057] recipient: [15:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:54:2057] recipient: [15:51:2095] Leader for TabletID 72057594037927937 is [15:56:2097] sender: [15:57:2057] recipient: [15:51:2095] Leader for TabletID 72057594037927937 is [15:56:2097] sender: [15:74:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:54:2057] recipient: [16:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:54:2057] recipient: [16:51:2095] Leader for TabletID 72057594037927937 is [16:56:2097] sender: [16:57:2057] recipient: [16:51:2095] Leader for TabletID 72057594037927937 is [16:56:2097] sender: [16:74:2057] recipient: [16:14:2061] !Reboot 72057594037927937 (actor [16:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [16:56:2097] sender: [16:76:2057] recipient: [16:36:2083] Leader for TabletID 72057594037927937 is [16:56:2097] sender: [16:78:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [16:56:2097] sender: [16:80:2057] recipient: [16:79:2110] Leader for TabletID 72057594037927937 is [16:81:2111] sender: [16:82:2057] recipient: [16:79:2110] !Reboot 72057594037927937 (actor [16:56:2097]) rebooted! !Reboot 72057594037927937 (actor [16:56:2097]) tablet resolver refreshed! new actor is[16:81:2111] Leader for TabletID 72057594037927937 is [16:81:2111] sender: [16:135:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:54:2057] recipient: [17:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:54:2057] recipient: [17:51:2095] Leader for TabletID 72057594037927937 is [17:56:2097] sender: [17:57:2057] recipient: [17:51:2095] Leader for TabletID 72057594037927937 is [17:56:2097] sender: [17:74:2057] recipient: [17:14:2061] !Reboot 72057594037927937 (actor [17:56:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [17:56:2097] sender: [17:76:2057] recipient: [17:36:2083] Leader for TabletID 72057594037927937 is [17:56:2097] sender: [17:79:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [17:56:2097] sender: [17:80:2057] recipient: [17:78:2110] Leader for TabletID 72057594037927937 is [17:81:2111] sender: [17:82:2057] recipient: [17:78:2110] !Reboot 72057594037927937 (actor [17:56:2097]) rebooted! !Reboot 72057594037927937 (actor [17:56:2097]) tablet resolver refreshed! new actor is[17:81:2111] Leader for TabletID 72057594037927937 is [17:81:2111] sender: [17:135:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:54:2057] recipient: [18:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:54:2057] recipient: [18:51:2095] Leader for TabletID 72057594037927937 is [18:56:2097] sender: [18:57:2057] recipient: [18:51:2095] Leader for TabletID 72057594037927937 is [18:56:2097] sender: [18:74:2057] recipient: [18:14:2061] !Reboot 72057594037927937 (actor [18:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [18:56:2097] sender: [18:77:2057] recipient: [18:36:2083] Leader for TabletID 72057594037927937 is [18:56:2097] sender: [18:79:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [18:56:2097] sender: [18:81:2057] recipient: [18:80:2110] Leader for TabletID 72057594037927937 is [18:82:2111] sender: [18:83:2057] recipient: [18:80:2110] !Reboot 72057594037927937 (actor [18:56:2097]) rebooted! !Reboot 72057594037927937 (actor [18:56:2097]) tablet resolver refreshed! new actor is[18:82:2111] Leader for TabletID 72057594037927937 is [18:82:2111] sender: [18:136:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:54:2057] recipient: [19:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:54:2057] recipient: [19:50:2095] Leader for TabletID 72057594037927937 is [19:56:2097] sender: [19:57:2057] recipient: [19:50:2095] Leader for TabletID 72057594037927937 is [19:56:2097] sender: [19:74:2057] recipient: [19:14:2061] !Reboot 72057594037927937 (actor [19:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [19:56:2097] sender: [19:80:2057] recipient: [19:36:2083] Leader for TabletID 72057594037927937 is [19:56:2097] sender: [19:83:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [19:56:2097] sender: [19:84:2057] recipient: [19:82:2113] Leader for TabletID 72057594037927937 is [19:85:2114] sender: [19:86:2057] recipient: [19:82:2113] !Reboot 72057594037927937 (actor [19:56:2097]) rebooted! !Reboot 72057594037927937 (actor [19:56:2097]) tablet resolver refreshed! new actor is[19:85:2114] Leader for TabletID 72057594037927937 is [19:85:2114] sender: [19:139:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:54:2057] recipient: [20:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:54:2057] recipient: [20:52:2095] Leader for TabletID 72057594037927937 is [20:56:2097] sender: [20:57:2057] recipient: [20:52:2095] Leader for TabletID 72057594037927937 is [20:56:2097] sender: [20:74:2057] recipient: [20:14:2061] !Reboot 72057594037927937 (actor [20:56:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [20:56:2097] sender: [20:80:2057] recipient: [20:36:2083] Leader for TabletID 72057594037927937 is [20:56:2097] sender: [20:82:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [20:56:2097] sender: [20:84:2057] recipient: [20:83:2113] Leader for TabletID 72057594037927937 is [20:85:2114] sender: [20:86:2057] recipient: [20:83:2113] !Reboot 72057594037927937 (actor [20:56:2097]) rebooted! !Reboot 72057594037927937 (actor [20:56:2097]) tablet resolver refreshed! new actor is[20:85:2114] Leader for TabletID 72057594037927937 is [20:85:2114] sender: [20:139:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:54:2057] recipient: [21:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:54:2057] recipient: [21:50:2095] Leader for TabletID 72057594037927937 is [21:56:2097] sender: [21:57:2057] recipient: [21:50:2095] Leader for TabletID 72057594037927937 is [21:56:2097] sender: [21:74:2057] recipient: [21:14:2061] !Reboot 72057594037927937 (actor [21:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [21:56:2097] sender: [21:81:2057] recipient: [21:36:2083] Leader for TabletID 72057594037927937 is [21:56:2097] sender: [21:83:2057] recipient: [21:14:2061] Leader for TabletID 72057594037927937 is [21:56:2097] sender: [21:85:2057] recipient: [21:84:2113] Leader for TabletID 72057594037927937 is [21:86:2114] sender: [21:87:2057] recipient: [21:84:2113] !Reboot 72057594037927937 (actor [21:56:2097]) rebooted! !Reboot 72057594037927937 (actor [21:56:2097]) tablet resolver refreshed! new actor is[21:86:2114] Leader for TabletID 72057594037927937 is [21:86:2114] sender: [21:140:2057] recipient: [21:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:54:2057] recipient: [22:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:54:2057] recipient: [22:51:2095] Leader for TabletID 72057594037927937 is [22:56:2097] sender: [22:57:2057] recipient: [22:51:2095] Leader for TabletID 72057594037927937 is [22:56:2097] sender: [22:74:2057] recipient: [22:14:2061] !Reboot 72057594037927937 (actor [22:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [22:56:2097] sender: [22:84:2057] recipient: [22:36:2083] Leader for TabletID 72057594037927937 is [22:56:2097] sender: [22:87:2057] recipient: [22:14:2061] Leader for TabletID 72057594037927937 is [22:56:2097] sender: [22:88:2057] recipient: [22:86:2116] Leader for TabletID 72057594037927937 is [22:89:2117] sender: [22:90:2057] recipient: [22:86:2116] !Reboot 72057594037927937 (actor [22:56:2097]) rebooted! !Reboot 72057594037927937 (actor [22:56:2097]) tablet resolver refreshed! new actor is[22:89:2117] Leader for TabletID 72057594037927937 is [22:89:2117] sender: [22:143:2057] recipient: [22:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [23:54:2057] recipient: [23:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [23:54:2057] recipient: [23:52:2095] Leader for TabletID 72057594037927937 is [23:56:2097] sender: [23:57:2057] recipient: [23:52:2095] Leader for TabletID 72057594037927937 is [23:56:2097] sender: [23:74:2057] recipient: [23:14:2061] !Reboot 72057594037927937 (actor [23:56:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [23:56:2097] sender: [23:84:2057] recipient: [23:36:2083] Leader for TabletID 72057594037927937 is [23:56:2097] sender: [23:87:2057] recipient: [23:14:2061] Leader for TabletID 72057594037927937 is [23:56:2097] sender: [23:88:2057] recipient: [23:86:2116] Leader for TabletID 72057594037927937 is [23:89:2117] sender: [23:90:2057] recipient: [23:86:2116] !Reboot 72057594037927937 (actor [23:56:2097]) rebooted! !Reboot 72057594037927937 (actor [23:56:2097]) tablet resolver refreshed! new actor is[23:89:2117] Leader for TabletID 72057594037927937 is [23:89:2117] sender: [23:143:2057] recipient: [23:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [24:54:2057] recipient: [24:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [24:54:2057] recipient: [24:51:2095] Leader for TabletID 72057594037927937 is [24:56:2097] sender: [24:57:2057] recipient: [24:51:2095] Leader for TabletID 72057594037927937 is [24:56:2097] sender: [24:74:2057] recipient: [24:14:2061] !Reboot 72057594037927937 (actor [24:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [24:56:2097] sender: [24:85:2057] recipient: [24:36:2083] Leader for TabletID 72057594037927937 is [24:56:2097] sender: [24:88:2057] recipient: [24:14:2061] Leader for TabletID 72057594037927937 is [24:56:2097] sender: [24:89:2057] recipient: [24:87:2116] Leader for TabletID 72057594037927937 is [24:90:2117] sender: [24:91:2057] recipient: [24:87:2116] !Reboot 72057594037927937 (actor [24:56:2097]) rebooted! !Reboot 72057594037927937 (actor [24:56:2097]) tablet resolver refreshed! new actor is[24:90:2117] Leader for TabletID 72057594037927937 is [24:90:2117] sender: [24:144:2057] recipient: [24:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [25:54:2057] recipient: [25:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [25:54:2057] recipient: [25:51:2095] Leader for TabletID 72057594037927937 is [25:56:2097] sender: [25:57:2057] recipient: [25:51:2095] Leader for TabletID 72057594037927937 is [25:56:2097] sender: [25:74:2057] recipient: [25:14:2061] ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> CompressExecutor::TestExecutorMemUsage [GOOD] Test command err: 2025-05-07T08:51:35.106064Z :WriteAndReadSomeMessagesWithAsyncCompression INFO: Random seed for debugging is 1746607895106029 2025-05-07T08:51:35.483088Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623788630749380:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:35.483224Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:51:35.524100Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501623787991125981:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:35.524164Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:51:35.712433Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-05-07T08:51:35.727363Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00365c/r3tmp/tmpK6FJ4Q/pdisk_1.dat 2025-05-07T08:51:35.952456Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:51:35.967495Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:35.967628Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:35.970459Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:35.970560Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:35.982867Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-05-07T08:51:35.983056Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:51:35.983968Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16943, node 1 2025-05-07T08:51:36.069204Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/zvgn/00365c/r3tmp/yandexB6adp4.tmp 2025-05-07T08:51:36.069250Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/zvgn/00365c/r3tmp/yandexB6adp4.tmp 2025-05-07T08:51:36.069430Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/zvgn/00365c/r3tmp/yandexB6adp4.tmp 2025-05-07T08:51:36.069606Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:51:36.112326Z INFO: TTestServer started on Port 20047 GrpcPort 16943 TClient is connected to server localhost:20047 PQClient connected to localhost:16943 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:51:36.390858Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... waiting... 2025-05-07T08:51:39.041171Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501623805170995480:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:39.041329Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:39.041954Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501623805170995493:2312], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:39.051562Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715657:3, at schemeshard: 72057594046644480 2025-05-07T08:51:39.088745Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7501623805170995495:2313], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715657 completed, doublechecking } 2025-05-07T08:51:39.189390Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501623805170995523:2130] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:51:39.445640Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 2025-05-07T08:51:39.458185Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7501623805170995530:2317], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T08:51:39.460060Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=2&id=NjkxZjBiOTQtYmQxMjc1NjYtZWRmMmU5MGMtZDBkNmRhYjg=, ActorId: [2:7501623805170995464:2308], ActorState: ExecuteState, TraceId: 01jtmz30cw9svbs5ye4tsjpxc9, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T08:51:39.468504Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7501623805810619617:2343], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T08:51:39.473818Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T08:51:39.470221Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=1&id=YzQ2OGQxMzQtZjlhM2VhNTktZjU5Yjc2NC00YzkyMTQ4Yw==, ActorId: [1:7501623805810619599:2336], ActorState: ExecuteState, TraceId: 01jtmz30fdb4y58nms0tjmnkmy, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T08:51:39.478238Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T08:51:39.678255Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:51:39.843059Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost:16943", true, true, 1000); 2025-05-07T08:51:40.156246Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710664. Ctx: { TraceId: 01jtmz31bd0ct8mh6vm75wd65w, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2VkNWM0ODAtNmE0ZGI3NzktMjljNDczNC1lNjhjYzhlMQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root === CheckClustersList. Subcribe to ClusterTracker from [1:7501623810105587330:2945] 2025-05-07T08:51:40.484322Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623788630749380:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:40.484398Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:51:40.528417Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7501623787991125981:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:40.528500Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-05-07T08:51:46.589031Z no ... UG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|cc44d9e0-aeaf287a-8e43f488-79cb6eea_0] Write session: send init request: init_request { topic: "test-topic" message_group_id: "test-message-group-id" preferred_cluster: "dc1" } 2025-05-07T08:54:21.928735Z node 15 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:107: new grpc connection 2025-05-07T08:54:21.928781Z node 15 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:141: new session created cookie 3 2025-05-07T08:54:21.929499Z node 15 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 3 sessionId: grpc read done: success: 1 data: init_request { topic: "test-topic" message_group_id: "test-message-group-id" preferred_cluster: "dc1" } 2025-05-07T08:54:21.929705Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:442: session request cookie: 3 topic: "test-topic" message_group_id: "test-message-group-id" preferred_cluster: "dc1" from ipv6:[::1]:40236 2025-05-07T08:54:21.929730Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:1535: write session: cookie=3 sessionId= userAgent="pqv1 server" ip=ipv6:[::1]:40236 proto=v1 topic=test-topic durationSec=0 2025-05-07T08:54:21.929743Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-05-07T08:54:21.932470Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 3 sessionId: describe result for acl check 2025-05-07T08:54:21.932658Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint32; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `/Root/PQ/SourceIdMeta2` WHERE Hash == $Hash AND Topic == $Topic AND SourceId == $SourceId; 2025-05-07T08:54:21.932675Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64;DECLARE $SeqNo AS Uint64; UPSERT INTO `/Root/PQ/SourceIdMeta2` (Hash, Topic, SourceId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-05-07T08:54:21.932692Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `/Root/PQ/SourceIdMeta2` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND SourceId = $SourceId AND Partition = $Partition; 2025-05-07T08:54:21.932714Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:111: TPartitionChooser [15:7501624502539874458:2567] (SourceId=test-message-group-id, PreferedPartition=(NULL)) StartKqpSession 2025-05-07T08:54:21.939391Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:142: TPartitionChooser [15:7501624502539874458:2567] (SourceId=test-message-group-id, PreferedPartition=(NULL)) Select from the table 2025-05-07T08:54:22.129105Z node 15 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976720697. Failed to resolve tablet: 72075186224037891 after several retries. 2025-05-07T08:54:22.129420Z node 15 :KQP_EXECUTER WARN: kqp_executer_impl.h:257: ActorId: [15:7501624502539874469:2569] TxId: 281474976720697. Ctx: { TraceId: 01jtmz7zfkcycvsj62c95va2yq, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=15&id=OWU3YWUzYjUtMTMxNWZkZi1jMzhmZmIwYi1kY2IzM2ZhYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037891 after several retries. 2025-05-07T08:54:22.129676Z node 15 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=15&id=OWU3YWUzYjUtMTMxNWZkZi1jMzhmZmIwYi1kY2IzM2ZhYg==, ActorId: [15:7501624502539874459:2569], ActorState: ExecuteState, TraceId: 01jtmz7zfkcycvsj62c95va2yq, Create QueryResponse for error on request, msg: 2025-05-07T08:54:22.132207Z node 15 :PQ_PARTITION_CHOOSER INFO: partition_chooser_impl__abstract_chooser_actor.h:312: TPartitionChooser [15:7501624502539874458:2567] (SourceId=test-message-group-id, PreferedPartition=(NULL)) ReplyError: kqp error Marker# PQ50 : Response { SessionId: "ydb://session/3?node_id=15&id=OWU3YWUzYjUtMTMxNWZkZi1jMzhmZmIwYi1kY2IzM2ZhYg==" QueryIssues { message: "Failed to resolve tablet: 72075186224037891 after several retries." severity: 1 } TxMeta { id: "01jtmz7zfm6yb5j05vvap8qfjg" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 2025-05-07T08:54:22.132373Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:809: session v1 error cookie: 3 reason: kqp error Marker# PQ50 : Response { SessionId: "ydb://session/3?node_id=15&id=OWU3YWUzYjUtMTMxNWZkZi1jMzhmZmIwYi1kY2IzM2ZhYg==" QueryIssues { message: "Failed to resolve tablet: 72075186224037891 after several retries." severity: 1 } TxMeta { id: "01jtmz7zfm6yb5j05vvap8qfjg" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 sessionId: 2025-05-07T08:54:22.132922Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 3 sessionId: is DEAD Test retry state: get retry delay 2025-05-07T08:54:22.134887Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|cc44d9e0-aeaf287a-8e43f488-79cb6eea_0] Got error. Status: UNAVAILABLE, Description:
: Error: kqp error Marker# PQ50 : Response { SessionId: "ydb://session/3?node_id=15&id=OWU3YWUzYjUtMTMxNWZkZi1jMzhmZmIwYi1kY2IzM2ZhYg==" QueryIssues { message: "Failed to resolve tablet: 72075186224037891 after several retries." severity: 1 } TxMeta { id: "01jtmz7zfm6yb5j05vvap8qfjg" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 , code: 500001 2025-05-07T08:54:22.134947Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|cc44d9e0-aeaf287a-8e43f488-79cb6eea_0] Write session will restart in 2.000000s 2025-05-07T08:54:22.135082Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|cc44d9e0-aeaf287a-8e43f488-79cb6eea_0] Write session: Do CDS request 2025-05-07T08:54:22.135123Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|cc44d9e0-aeaf287a-8e43f488-79cb6eea_0] Do schedule cds request after 2000 ms 2025-05-07T08:54:22.690809Z node 15 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976720699. Failed to resolve tablet: 72075186224037890 after several retries. 2025-05-07T08:54:22.690982Z node 15 :KQP_EXECUTER WARN: kqp_executer_impl.h:257: ActorId: [15:7501624506834841818:2573] TxId: 281474976720699. Ctx: { TraceId: 01jtmz801adbffqz1cefk0z1nc, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=15&id=ZjdkNTYwNjktYmIwMTRiNTgtMmZiZGUyZmEtNGMxOWNkNjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037890 after several retries. 2025-05-07T08:54:22.691262Z node 15 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=15&id=ZjdkNTYwNjktYmIwMTRiNTgtMmZiZGUyZmEtNGMxOWNkNjM=, ActorId: [15:7501624506834841815:2573], ActorState: ExecuteState, TraceId: 01jtmz801adbffqz1cefk0z1nc, Create QueryResponse for error on request, msg: 2025-05-07T08:54:22.692293Z node 15 :PQ_METACACHE ERROR: msgbus_server_pq_metacache.cpp:260: Got error trying to perform request: { Response { QueryIssues { message: "Failed to resolve tablet: 72075186224037890 after several retries." severity: 1 } TxMeta { id: "01jtmz801bdwtb8fab340vmd06" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 } 2025-05-07T08:54:22.915279Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|cc44d9e0-aeaf287a-8e43f488-79cb6eea_0] Write session: close. Timeout = 0 ms 2025-05-07T08:54:22.915364Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|cc44d9e0-aeaf287a-8e43f488-79cb6eea_0] Write session will now close 2025-05-07T08:54:22.915427Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|cc44d9e0-aeaf287a-8e43f488-79cb6eea_0] Write session: aborting 2025-05-07T08:54:22.916130Z :WARNING: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|cc44d9e0-aeaf287a-8e43f488-79cb6eea_0] Write session: could not confirm all writes in time or session aborted, perform hard shutdown 2025-05-07T08:54:22.916187Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|cc44d9e0-aeaf287a-8e43f488-79cb6eea_0] Write session: destroy 2025-05-07T08:54:23.047104Z node 16 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976710683. Failed to resolve tablet: 72075186224037890 after several retries. 2025-05-07T08:54:23.047256Z node 16 :KQP_EXECUTER WARN: kqp_executer_impl.h:257: ActorId: [16:7501624507129440814:2482] TxId: 281474976710683. Ctx: { TraceId: 01jtmz80c6f2gds7xb05ehdmr1, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=16&id=OTUzMjI4NzktY2JmZGM3ZDYtYTM5YmEyY2MtMTk4ZGIyNjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037890 after several retries. 2025-05-07T08:54:23.047485Z node 16 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=16&id=OTUzMjI4NzktY2JmZGM3ZDYtYTM5YmEyY2MtMTk4ZGIyNjg=, ActorId: [16:7501624507129440811:2482], ActorState: ExecuteState, TraceId: 01jtmz80c6f2gds7xb05ehdmr1, Create QueryResponse for error on request, msg: 2025-05-07T08:54:23.052760Z node 16 :PQ_METACACHE ERROR: msgbus_server_pq_metacache.cpp:260: Got error trying to perform request: { Response { QueryIssues { message: "Failed to resolve tablet: 72075186224037890 after several retries." severity: 1 } TxMeta { id: "01jtmz80c73p3awzan47qbwr0w" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 } 2025-05-07T08:54:23.650101Z node 15 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976720701. Failed to resolve tablet: 72075186224037888 after several retries. 2025-05-07T08:54:23.650306Z node 15 :KQP_EXECUTER WARN: kqp_executer_impl.h:257: ActorId: [15:7501624511129809185:2574] TxId: 281474976720701. Ctx: { TraceId: 01jtmz80e525yym35erch7qx5g, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=15&id=ZDZmYTg1ZGQtZjFlNjljMmYtOWMxZTkxZjktOTQ3Y2ZkNzY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037888 after several retries. 2025-05-07T08:54:23.650608Z node 15 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=15&id=ZDZmYTg1ZGQtZjFlNjljMmYtOWMxZTkxZjktOTQ3Y2ZkNzY=, ActorId: [15:7501624506834841865:2574], ActorState: ExecuteState, TraceId: 01jtmz80e525yym35erch7qx5g, Create QueryResponse for error on request, msg: 2025-05-07T08:54:23.660284Z node 15 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Execution" issue_code: 1060 severity: 2 issues { position { row: 3 column: 120 } message: "Cost Based Optimizer could not be applied to this query: couldn\'t load statistics" end_position { row: 3 column: 120 } issue_code: 8001 severity: 2 } } QueryIssues { message: "Failed to resolve tablet: 72075186224037888 after several retries." severity: 1 } TxMeta { id: "01jtmz80yq2xzvnjkym5aat2hh" } } YdbStatus: UNAVAILABLE ConsumedRu: 342 } >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-62 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-63 >> DataStreams::TestStreamTimeRetention [GOOD] >> DataStreams::TestUnsupported >> TPersQueueNewSchemeCacheTest::TestWriteStat1stClass [GOOD] >> TPersQueueNewSchemeCacheTest::TestWriteStat1stClassTopicAPI >> TSlotIndexesPoolTest::Expansion [GOOD] >> TKeyValueTest::TestWriteToExtraChannelThenReadMixedChannelsReturnsOk [GOOD] >> TargetDiscoverer::IndexedTable >> BasicUsage::WaitEventBlocksBeforeDiscovery [GOOD] >> BasicUsage::SimpleHandlers |90.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_target_discoverer/unittest |90.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TSlotIndexesPoolTest::Expansion [GOOD] >> TPQTest::TestPQSmallRead [GOOD] >> TPQTest::TestPQReadAhead ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::WriteSessionCloseIgnoresWrites [GOOD] Test command err: 2025-05-07T08:53:50.757089Z :WriteSessionCloseWaitsForWrites INFO: Random seed for debugging is 1746608030757056 2025-05-07T08:53:51.338605Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624374760330315:2278];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:51.338815Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:53:51.903180Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-05-07T08:53:51.858930Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501624373131193143:2232];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:51.859676Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003edf/r3tmp/tmp7YPuwE/pdisk_1.dat 2025-05-07T08:53:51.994727Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:53:52.494964Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:53:52.540771Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:53:52.569854Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:53:52.569961Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:53:52.579005Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:53:52.579089Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:53:52.592879Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:53:52.600937Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-05-07T08:53:52.610569Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10108, node 1 2025-05-07T08:53:53.034211Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/zvgn/003edf/r3tmp/yandexsSdLUJ.tmp 2025-05-07T08:53:53.034236Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/zvgn/003edf/r3tmp/yandexsSdLUJ.tmp 2025-05-07T08:53:53.034401Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/zvgn/003edf/r3tmp/yandexsSdLUJ.tmp 2025-05-07T08:53:53.034554Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:53:53.210829Z INFO: TTestServer started on Port 32458 GrpcPort 10108 TClient is connected to server localhost:32458 PQClient connected to localhost:10108 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:53:53.876408Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... waiting... 2025-05-07T08:53:56.322129Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501624374760330315:2278];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:56.322235Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:53:56.456679Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7501624373131193143:2232];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:56.456766Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:53:57.134073Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501624398900997080:2314], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:57.134201Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501624398900997047:2310], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:57.139323Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:57.145842Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720657:3, at schemeshard: 72057594046644480 2025-05-07T08:53:57.183130Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7501624398900997085:2315], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720657 completed, doublechecking } 2025-05-07T08:53:57.296556Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501624398900997113:2132] txid# 281474976720658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:53:57.816568Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7501624398900997128:2319], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T08:53:57.818228Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=2&id=NGNiZTEyOTUtNDc4ZTM2MmItOGI0NGFlZmMtMjcxMDY3Nw==, ActorId: [2:7501624398900997044:2308], ActorState: ExecuteState, TraceId: 01jtmz777yf82m5bkcqctas049, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T08:53:57.841416Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7501624400530134988:2345], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T08:53:57.847082Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T08:53:57.847159Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=1&id=YzdlYjQ5ZTUtYTNmMTRkODUtZjljYWEwNDUtYTg4NDA0ZjY=, ActorId: [1:7501624400530134937:2336], ActorState: ExecuteState, TraceId: 01jtmz77bh7974064cnzgwveqz, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T08:53:57.847553Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T08:53:57.853712Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 2025-05-07T08:53:58.063311Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-05-07T08:53:58.263766Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost:10108", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, false, 1000); 2025-05-07T08:53:58.759835Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715664. Ctx: { TraceId: 01jtmz78mhaxptr84gttsvj74a, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N ... zop" } TopicPath: "/Root/PQ/rt3.dc1--test-topic" YdbDatabasePath: "/Root" Consumers { Name: "user" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } Version: 0 Important: false } } ErrorCode: OK } } } === Topic created, have version: 1 2025-05-07T08:54:22.282325Z :DEBUG: [] MessageGroupId [src] SessionId [] Write session: try to update token 2025-05-07T08:54:22.282708Z :INFO: [] MessageGroupId [src] SessionId [] Write session: Do CDS request 2025-05-07T08:54:22.282746Z :INFO: [] MessageGroupId [src] SessionId [] Start write session. Will connect to endpoint: localhost:65243 2025-05-07T08:54:22.321111Z :DEBUG: [] MessageGroupId [src] SessionId [] Write session: send init request: init_request { topic: "test-topic" message_group_id: "src" } 2025-05-07T08:54:22.344259Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:107: new grpc connection 2025-05-07T08:54:22.344288Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:141: new session created cookie 1 2025-05-07T08:54:22.345852Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: grpc read done: success: 1 data: init_request { topic: "test-topic" message_group_id: "src" } 2025-05-07T08:54:22.345964Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:442: session request cookie: 1 topic: "test-topic" message_group_id: "src" from ipv6:[::1]:53982 2025-05-07T08:54:22.345990Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:1535: write session: cookie=1 sessionId= userAgent="pqv1 server" ip=ipv6:[::1]:53982 proto=v1 topic=test-topic durationSec=0 2025-05-07T08:54:22.345999Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-05-07T08:54:22.347545Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 1 sessionId: describe result for acl check 2025-05-07T08:54:22.347655Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint32; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `/Root/PQ/SourceIdMeta2` WHERE Hash == $Hash AND Topic == $Topic AND SourceId == $SourceId; 2025-05-07T08:54:22.347666Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64;DECLARE $SeqNo AS Uint64; UPSERT INTO `/Root/PQ/SourceIdMeta2` (Hash, Topic, SourceId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-05-07T08:54:22.347673Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `/Root/PQ/SourceIdMeta2` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND SourceId = $SourceId AND Partition = $Partition; 2025-05-07T08:54:22.347689Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:111: TPartitionChooser [3:7501624509455201327:2491] (SourceId=src, PreferedPartition=(NULL)) StartKqpSession 2025-05-07T08:54:22.349840Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:142: TPartitionChooser [3:7501624509455201327:2491] (SourceId=src, PreferedPartition=(NULL)) Select from the table 2025-05-07T08:54:22.515860Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__old_chooser_actor.h:67: TPartitionChooser [3:7501624509455201327:2491] (SourceId=src, PreferedPartition=(NULL)) RequestPQRB 2025-05-07T08:54:22.516704Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][rt3.dc1--test-topic] pipe [3:7501624509455201363:2491] connected; active server actors: 1 2025-05-07T08:54:22.516869Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__old_chooser_actor.h:80: TPartitionChooser [3:7501624509455201327:2491] (SourceId=src, PreferedPartition=(NULL)) Received partition 0 from PQRB for SourceId=src 2025-05-07T08:54:22.516889Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:174: TPartitionChooser [3:7501624509455201327:2491] (SourceId=src, PreferedPartition=(NULL)) Update the table 2025-05-07T08:54:22.518501Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [3:7501624509455201363:2491] disconnected; active server actors: 1 2025-05-07T08:54:22.518525Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037893][rt3.dc1--test-topic] pipe [3:7501624509455201363:2491] disconnected no session 2025-05-07T08:54:22.627938Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:183: TPartitionChooser [3:7501624509455201327:2491] (SourceId=src, PreferedPartition=(NULL)) HandleUpdate PartitionPersisted=0 Status=SUCCESS 2025-05-07T08:54:22.627977Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [3:7501624509455201327:2491] (SourceId=src, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=(NULL) 2025-05-07T08:54:22.627993Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:268: TPartitionChooser [3:7501624509455201327:2491] (SourceId=src, PreferedPartition=(NULL)) Start idle 2025-05-07T08:54:22.628020Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 1 sessionId: partition: 0 expectedGeneration: (NULL) 2025-05-07T08:54:22.629844Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:798: TPartitionWriter 72075186224037892 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037892, NodeId 4, Generation: 1 2025-05-07T08:54:22.629739Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72075186224037892] server connected, pipe [3:7501624509455201392:2491], now have 1 active actors on pipe 2025-05-07T08:54:22.630066Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:342: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-05-07T08:54:22.630103Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2788: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-05-07T08:54:22.630187Z node 4 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src|cc139bd8-992f8638-cac79b70-c2c91cbd_0 generated for partition 0 topic 'rt3.dc1--test-topic' owner src 2025-05-07T08:54:22.630296Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:35: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-05-07T08:54:22.630382Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:377: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-05-07T08:54:22.631802Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:342: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-05-07T08:54:22.631832Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2788: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-05-07T08:54:22.631903Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:377: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-05-07T08:54:22.632772Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 1 partition: 0 MaxSeqNo: 0 sessionId: src|cc139bd8-992f8638-cac79b70-c2c91cbd_0 2025-05-07T08:54:22.633510Z :INFO: [] MessageGroupId [src] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1746608062633 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-05-07T08:54:22.633619Z :INFO: [] MessageGroupId [src] SessionId [] Write session established. Init response: session_id: "src|cc139bd8-992f8638-cac79b70-c2c91cbd_0" topic: "test-topic" cluster: "dc1" supported_codecs: CODEC_RAW supported_codecs: CODEC_GZIP supported_codecs: CODEC_LZOP 2025-05-07T08:54:22.633820Z :INFO: [] MessageGroupId [src] SessionId [src|cc139bd8-992f8638-cac79b70-c2c91cbd_0] Write session: close. Timeout = 0 ms 2025-05-07T08:54:22.634481Z :INFO: [] MessageGroupId [src] SessionId [src|cc139bd8-992f8638-cac79b70-c2c91cbd_0] Write session will now close 2025-05-07T08:54:22.634526Z :DEBUG: [] MessageGroupId [src] SessionId [src|cc139bd8-992f8638-cac79b70-c2c91cbd_0] Write session: aborting 2025-05-07T08:54:22.634904Z :INFO: [] MessageGroupId [src] SessionId [src|cc139bd8-992f8638-cac79b70-c2c91cbd_0] Write session: gracefully shut down, all writes complete 2025-05-07T08:54:22.634947Z :DEBUG: [] MessageGroupId [src] SessionId [src|cc139bd8-992f8638-cac79b70-c2c91cbd_0] Write session: destroy 2025-05-07T08:54:22.636715Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: src|cc139bd8-992f8638-cac79b70-c2c91cbd_0 grpc read done: success: 0 data: 2025-05-07T08:54:22.636751Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 1 sessionId: src|cc139bd8-992f8638-cac79b70-c2c91cbd_0 grpc read failed 2025-05-07T08:54:22.646565Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:818: session v1 closed cookie: 1 sessionId: src|cc139bd8-992f8638-cac79b70-c2c91cbd_0 2025-05-07T08:54:22.646595Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: src|cc139bd8-992f8638-cac79b70-c2c91cbd_0 is DEAD 2025-05-07T08:54:22.647023Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-05-07T08:54:22.662107Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037892] server disconnected, pipe [3:7501624509455201392:2491] destroyed 2025-05-07T08:54:22.662177Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:138: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. Session was created >>> Ready to answer: ok 2025-05-07T08:54:22.786101Z :ERROR: [/Root] OnFederationDiscovery: Got error. Status: UNAVAILABLE. Description: 2025-05-07T08:54:25.074375Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7261: Cannot get console configs 2025-05-07T08:54:25.074410Z node 3 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:26.271823Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1073: TxId: 281474976710691, task: 1, CA Id [3:7501624526635070734:2525]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 0 2025-05-07T08:54:26.306086Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1073: TxId: 281474976710691, task: 1, CA Id [3:7501624526635070734:2525]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-05-07T08:54:26.366699Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1073: TxId: 281474976710691, task: 1, CA Id [3:7501624526635070734:2525]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-05-07T08:54:26.442184Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1073: TxId: 281474976710691, task: 1, CA Id [3:7501624526635070734:2525]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-05-07T08:54:26.516874Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1073: TxId: 281474976710691, task: 1, CA Id [3:7501624526635070734:2525]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-05-07T08:54:26.671308Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1073: TxId: 281474976710691, task: 1, CA Id [3:7501624526635070734:2525]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 >> TargetDiscoverer::Negative ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestWriteToExtraChannelThenReadMixedChannelsReturnsOk [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:54:2057] recipient: [1:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:54:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:56:2097] sender: [1:57:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:56:2097] sender: [1:74:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:54:2057] recipient: [2:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:54:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:57:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:74:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:76:2057] recipient: [2:36:2083] Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:79:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:80:2057] recipient: [2:78:2110] Leader for TabletID 72057594037927937 is [2:81:2111] sender: [2:82:2057] recipient: [2:78:2110] !Reboot 72057594037927937 (actor [2:56:2097]) rebooted! !Reboot 72057594037927937 (actor [2:56:2097]) tablet resolver refreshed! new actor is[2:81:2111] Leader for TabletID 72057594037927937 is [2:81:2111] sender: [2:135:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:54:2057] recipient: [3:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:54:2057] recipient: [3:52:2095] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:57:2057] recipient: [3:52:2095] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:74:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:56:2097]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:76:2057] recipient: [3:36:2083] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:78:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:80:2057] recipient: [3:79:2110] Leader for TabletID 72057594037927937 is [3:81:2111] sender: [3:82:2057] recipient: [3:79:2110] !Reboot 72057594037927937 (actor [3:56:2097]) rebooted! !Reboot 72057594037927937 (actor [3:56:2097]) tablet resolver refreshed! new actor is[3:81:2111] Leader for TabletID 72057594037927937 is [3:81:2111] sender: [3:135:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:54:2057] recipient: [4:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:54:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:57:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:74:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:77:2057] recipient: [4:36:2083] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:80:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:81:2057] recipient: [4:79:2110] Leader for TabletID 72057594037927937 is [4:82:2111] sender: [4:83:2057] recipient: [4:79:2110] !Reboot 72057594037927937 (actor [4:56:2097]) rebooted! !Reboot 72057594037927937 (actor [4:56:2097]) tablet resolver refreshed! new actor is[4:82:2111] Leader for TabletID 72057594037927937 is [4:82:2111] sender: [4:136:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:54:2057] recipient: [5:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:54:2057] recipient: [5:50:2095] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:57:2057] recipient: [5:50:2095] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:74:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:80:2057] recipient: [5:36:2083] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:83:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:84:2057] recipient: [5:82:2113] Leader for TabletID 72057594037927937 is [5:85:2114] sender: [5:86:2057] recipient: [5:82:2113] !Reboot 72057594037927937 (actor [5:56:2097]) rebooted! !Reboot 72057594037927937 (actor [5:56:2097]) tablet resolver refreshed! new actor is[5:85:2114] Leader for TabletID 72057594037927937 is [5:85:2114] sender: [5:139:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:54:2057] recipient: [6:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:54:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:57:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:74:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:56:2097]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:80:2057] recipient: [6:36:2083] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:83:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:84:2057] recipient: [6:82:2113] Leader for TabletID 72057594037927937 is [6:85:2114] sender: [6:86:2057] recipient: [6:82:2113] !Reboot 72057594037927937 (actor [6:56:2097]) rebooted! !Reboot 72057594037927937 (actor [6:56:2097]) tablet resolver refreshed! new actor is[6:85:2114] Leader for TabletID 72057594037927937 is [6:85:2114] sender: [6:139:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:54:2057] recipient: [7:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:54:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:57:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:74:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:56:2097]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:81:2057] recipient: [7:36:2083] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:84:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:85:2057] recipient: [7:83:2113] Leader for TabletID 72057594037927937 is [7:86:2114] sender: [7:87:2057] recipient: [7:83:2113] !Reboot 72057594037927937 (actor [7:56:2097]) rebooted! !Reboot 72057594037927937 (actor [7:56:2097]) tablet resolver refreshed! new actor is[7:86:2114] Leader for TabletID 72057594037927937 is [7:86:2114] sender: [7:104:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:54:2057] recipient: [8:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:54:2057] recipient: [8:50:2095] Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:57:2057] recipient: [8:50:2095] Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:74:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:83:2057] recipient: [8:36:2083] Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:86:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:87:2057] recipient: [8:85:2115] Leader for TabletID 72057594037927937 is [8:88:2116] sender: [8:89:2057] recipient: [8:85:2115] !Reboot 72057594037927937 (actor [8:56:2097]) rebooted! !Reboot 72057594037927937 (actor [8:56:2097]) tablet resolver refreshed! new actor is[8:88:2116] Leader for TabletID 72057594037927937 is [8:88:2116] sender: [8:142:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:54:2057] recipient: [9:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:54:2057] recipient: [9:52:2095] Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:57:2057] recipient: [9:52:2095] Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:74:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:56:2097]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:83:2057] recipient: [9:36:2083] Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:86:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:87:2057] recipient: [9:85:2115] Leader for TabletID 72057594037927937 is [9:88:2116] sender: [9:89:2057] recipient: [9:85:2115] !Reboot 72057594037927937 (actor [9:56:2097]) rebooted! !Reboot 72057594037927937 (actor [9:56:2097]) tablet resolver refreshed! new actor is[9:88:2116] Leader for TabletID 72057594037927937 is [9:88:2116] sender: [9:142:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:54:2057] recipient: [10:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:54:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:57:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:74:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:56:2097]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:84:2057] recipient: [10:36:2083] Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:87:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:88:2057] recipient: [10:86:2115] Leader for TabletID 72057594037927937 is [10:89:2116] sender: [10:90:2057] recipient: [10:86:2115] !Reboot 72057594037927937 (actor [10:56:2097]) rebooted! !Reboot 72057594037927937 (actor [10:56:2097]) tablet resolver refreshed! new actor is[10:89:2116] Leader for TabletID 72057594037927937 is [10:89:2116] sender: [10:107:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:54:2057] recipient: [11:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:54:2057] recipient: [11:51:2095] Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:57:2057] recipient: [11:51:2095] Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:74:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:86:2057] recipient: [11:36:2083] Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:89:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:90:2057] recipient: [11:88:2117] Leader for TabletID 72057594037927937 is [11:91:2118] sender: [11:92:2057] recipient: [11:88:2117] !Reboot 72057594037927937 (actor [11:56:2097]) rebooted! !Reboot 72057594037927937 (actor [11:56:2097]) tablet resolver refreshed! new actor is[11:91:2118] Leader for TabletID 72057594037927937 is [11:91:2118] sender: [11:145:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:54:2057] recipient: [12:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:54:2057] recipient: [12:52:2095] Leader for TabletID 72057594037927937 is [12:56:2097] sender: [12:57:2057] recipient: [12:52:2095] Leader for TabletID 72057594037927937 is [12:56:2097] sender: [12:74:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (a ... 594037927937 (actor [23:56:2097]) tablet resolver refreshed! new actor is[23:85:2114] Leader for TabletID 72057594037927937 is [23:85:2114] sender: [23:139:2057] recipient: [23:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [24:54:2057] recipient: [24:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [24:54:2057] recipient: [24:51:2095] Leader for TabletID 72057594037927937 is [24:56:2097] sender: [24:57:2057] recipient: [24:51:2095] Leader for TabletID 72057594037927937 is [24:56:2097] sender: [24:74:2057] recipient: [24:14:2061] !Reboot 72057594037927937 (actor [24:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [24:56:2097] sender: [24:81:2057] recipient: [24:36:2083] Leader for TabletID 72057594037927937 is [24:56:2097] sender: [24:84:2057] recipient: [24:83:2113] Leader for TabletID 72057594037927937 is [24:56:2097] sender: [24:85:2057] recipient: [24:14:2061] Leader for TabletID 72057594037927937 is [24:86:2114] sender: [24:87:2057] recipient: [24:83:2113] !Reboot 72057594037927937 (actor [24:56:2097]) rebooted! !Reboot 72057594037927937 (actor [24:56:2097]) tablet resolver refreshed! new actor is[24:86:2114] Leader for TabletID 72057594037927937 is [24:86:2114] sender: [24:140:2057] recipient: [24:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [25:54:2057] recipient: [25:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [25:54:2057] recipient: [25:51:2095] Leader for TabletID 72057594037927937 is [25:56:2097] sender: [25:57:2057] recipient: [25:51:2095] Leader for TabletID 72057594037927937 is [25:56:2097] sender: [25:74:2057] recipient: [25:14:2061] !Reboot 72057594037927937 (actor [25:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [25:56:2097] sender: [25:84:2057] recipient: [25:36:2083] Leader for TabletID 72057594037927937 is [25:56:2097] sender: [25:87:2057] recipient: [25:14:2061] Leader for TabletID 72057594037927937 is [25:56:2097] sender: [25:88:2057] recipient: [25:86:2116] Leader for TabletID 72057594037927937 is [25:89:2117] sender: [25:90:2057] recipient: [25:86:2116] !Reboot 72057594037927937 (actor [25:56:2097]) rebooted! !Reboot 72057594037927937 (actor [25:56:2097]) tablet resolver refreshed! new actor is[25:89:2117] Leader for TabletID 72057594037927937 is [25:89:2117] sender: [25:143:2057] recipient: [25:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [26:54:2057] recipient: [26:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [26:54:2057] recipient: [26:51:2095] Leader for TabletID 72057594037927937 is [26:56:2097] sender: [26:57:2057] recipient: [26:51:2095] Leader for TabletID 72057594037927937 is [26:56:2097] sender: [26:74:2057] recipient: [26:14:2061] !Reboot 72057594037927937 (actor [26:56:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [26:56:2097] sender: [26:84:2057] recipient: [26:36:2083] Leader for TabletID 72057594037927937 is [26:56:2097] sender: [26:87:2057] recipient: [26:14:2061] Leader for TabletID 72057594037927937 is [26:56:2097] sender: [26:88:2057] recipient: [26:86:2116] Leader for TabletID 72057594037927937 is [26:89:2117] sender: [26:90:2057] recipient: [26:86:2116] !Reboot 72057594037927937 (actor [26:56:2097]) rebooted! !Reboot 72057594037927937 (actor [26:56:2097]) tablet resolver refreshed! new actor is[26:89:2117] Leader for TabletID 72057594037927937 is [26:89:2117] sender: [26:143:2057] recipient: [26:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [27:54:2057] recipient: [27:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [27:54:2057] recipient: [27:51:2095] Leader for TabletID 72057594037927937 is [27:56:2097] sender: [27:57:2057] recipient: [27:51:2095] Leader for TabletID 72057594037927937 is [27:56:2097] sender: [27:74:2057] recipient: [27:14:2061] !Reboot 72057594037927937 (actor [27:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [27:56:2097] sender: [27:85:2057] recipient: [27:36:2083] Leader for TabletID 72057594037927937 is [27:56:2097] sender: [27:88:2057] recipient: [27:14:2061] Leader for TabletID 72057594037927937 is [27:56:2097] sender: [27:89:2057] recipient: [27:87:2116] Leader for TabletID 72057594037927937 is [27:90:2117] sender: [27:91:2057] recipient: [27:87:2116] !Reboot 72057594037927937 (actor [27:56:2097]) rebooted! !Reboot 72057594037927937 (actor [27:56:2097]) tablet resolver refreshed! new actor is[27:90:2117] Leader for TabletID 72057594037927937 is [27:90:2117] sender: [27:144:2057] recipient: [27:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [28:54:2057] recipient: [28:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [28:54:2057] recipient: [28:51:2095] Leader for TabletID 72057594037927937 is [28:56:2097] sender: [28:57:2057] recipient: [28:51:2095] Leader for TabletID 72057594037927937 is [28:56:2097] sender: [28:74:2057] recipient: [28:14:2061] !Reboot 72057594037927937 (actor [28:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [28:56:2097] sender: [28:88:2057] recipient: [28:36:2083] Leader for TabletID 72057594037927937 is [28:56:2097] sender: [28:91:2057] recipient: [28:14:2061] Leader for TabletID 72057594037927937 is [28:56:2097] sender: [28:92:2057] recipient: [28:90:2119] Leader for TabletID 72057594037927937 is [28:93:2120] sender: [28:94:2057] recipient: [28:90:2119] !Reboot 72057594037927937 (actor [28:56:2097]) rebooted! !Reboot 72057594037927937 (actor [28:56:2097]) tablet resolver refreshed! new actor is[28:93:2120] Leader for TabletID 72057594037927937 is [28:93:2120] sender: [28:147:2057] recipient: [28:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [29:54:2057] recipient: [29:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [29:54:2057] recipient: [29:51:2095] Leader for TabletID 72057594037927937 is [29:56:2097] sender: [29:57:2057] recipient: [29:51:2095] Leader for TabletID 72057594037927937 is [29:56:2097] sender: [29:74:2057] recipient: [29:14:2061] !Reboot 72057594037927937 (actor [29:56:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [29:56:2097] sender: [29:88:2057] recipient: [29:36:2083] Leader for TabletID 72057594037927937 is [29:56:2097] sender: [29:90:2057] recipient: [29:14:2061] Leader for TabletID 72057594037927937 is [29:56:2097] sender: [29:92:2057] recipient: [29:91:2119] Leader for TabletID 72057594037927937 is [29:93:2120] sender: [29:94:2057] recipient: [29:91:2119] !Reboot 72057594037927937 (actor [29:56:2097]) rebooted! !Reboot 72057594037927937 (actor [29:56:2097]) tablet resolver refreshed! new actor is[29:93:2120] Leader for TabletID 72057594037927937 is [29:93:2120] sender: [29:147:2057] recipient: [29:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [30:54:2057] recipient: [30:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [30:54:2057] recipient: [30:51:2095] Leader for TabletID 72057594037927937 is [30:56:2097] sender: [30:57:2057] recipient: [30:51:2095] Leader for TabletID 72057594037927937 is [30:56:2097] sender: [30:74:2057] recipient: [30:14:2061] !Reboot 72057594037927937 (actor [30:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [30:56:2097] sender: [30:89:2057] recipient: [30:36:2083] Leader for TabletID 72057594037927937 is [30:56:2097] sender: [30:92:2057] recipient: [30:14:2061] Leader for TabletID 72057594037927937 is [30:56:2097] sender: [30:93:2057] recipient: [30:91:2119] Leader for TabletID 72057594037927937 is [30:94:2120] sender: [30:95:2057] recipient: [30:91:2119] !Reboot 72057594037927937 (actor [30:56:2097]) rebooted! !Reboot 72057594037927937 (actor [30:56:2097]) tablet resolver refreshed! new actor is[30:94:2120] Leader for TabletID 72057594037927937 is [30:94:2120] sender: [30:148:2057] recipient: [30:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [31:54:2057] recipient: [31:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [31:54:2057] recipient: [31:51:2095] Leader for TabletID 72057594037927937 is [31:56:2097] sender: [31:57:2057] recipient: [31:51:2095] Leader for TabletID 72057594037927937 is [31:56:2097] sender: [31:74:2057] recipient: [31:14:2061] !Reboot 72057594037927937 (actor [31:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [31:56:2097] sender: [31:91:2057] recipient: [31:36:2083] Leader for TabletID 72057594037927937 is [31:56:2097] sender: [31:94:2057] recipient: [31:14:2061] Leader for TabletID 72057594037927937 is [31:56:2097] sender: [31:95:2057] recipient: [31:93:2121] Leader for TabletID 72057594037927937 is [31:96:2122] sender: [31:97:2057] recipient: [31:93:2121] !Reboot 72057594037927937 (actor [31:56:2097]) rebooted! !Reboot 72057594037927937 (actor [31:56:2097]) tablet resolver refreshed! new actor is[31:96:2122] Leader for TabletID 72057594037927937 is [31:96:2122] sender: [31:150:2057] recipient: [31:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [32:54:2057] recipient: [32:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [32:54:2057] recipient: [32:52:2095] Leader for TabletID 72057594037927937 is [32:56:2097] sender: [32:57:2057] recipient: [32:52:2095] Leader for TabletID 72057594037927937 is [32:56:2097] sender: [32:74:2057] recipient: [32:14:2061] !Reboot 72057594037927937 (actor [32:56:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [32:56:2097] sender: [32:91:2057] recipient: [32:36:2083] Leader for TabletID 72057594037927937 is [32:56:2097] sender: [32:94:2057] recipient: [32:14:2061] Leader for TabletID 72057594037927937 is [32:56:2097] sender: [32:95:2057] recipient: [32:93:2121] Leader for TabletID 72057594037927937 is [32:96:2122] sender: [32:97:2057] recipient: [32:93:2121] !Reboot 72057594037927937 (actor [32:56:2097]) rebooted! !Reboot 72057594037927937 (actor [32:56:2097]) tablet resolver refreshed! new actor is[32:96:2122] Leader for TabletID 72057594037927937 is [32:96:2122] sender: [32:150:2057] recipient: [32:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [33:54:2057] recipient: [33:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [33:54:2057] recipient: [33:51:2095] Leader for TabletID 72057594037927937 is [33:56:2097] sender: [33:57:2057] recipient: [33:51:2095] Leader for TabletID 72057594037927937 is [33:56:2097] sender: [33:74:2057] recipient: [33:14:2061] !Reboot 72057594037927937 (actor [33:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [33:56:2097] sender: [33:92:2057] recipient: [33:36:2083] Leader for TabletID 72057594037927937 is [33:56:2097] sender: [33:94:2057] recipient: [33:14:2061] Leader for TabletID 72057594037927937 is [33:56:2097] sender: [33:96:2057] recipient: [33:95:2121] Leader for TabletID 72057594037927937 is [33:97:2122] sender: [33:98:2057] recipient: [33:95:2121] !Reboot 72057594037927937 (actor [33:56:2097]) rebooted! !Reboot 72057594037927937 (actor [33:56:2097]) tablet resolver refreshed! new actor is[33:97:2122] Leader for TabletID 72057594037927937 is [33:97:2122] sender: [33:151:2057] recipient: [33:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [34:54:2057] recipient: [34:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [34:54:2057] recipient: [34:50:2095] Leader for TabletID 72057594037927937 is [34:56:2097] sender: [34:57:2057] recipient: [34:50:2095] Leader for TabletID 72057594037927937 is [34:56:2097] sender: [34:74:2057] recipient: [34:14:2061] >> TTxDataShardUploadRows::TestUploadShadowRowsShadowDataAlterSplitThenPublish [GOOD] >> TKeyValueTest::TestEmptyWriteReadDeleteWithRestartsThenResponseOkNewApi [GOOD] >> TKeyValueTest::TestGetStatusWorks |90.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_target_discoverer/unittest >> DataStreams::TestCreateExistingStream [GOOD] >> DataStreams::ListStreamsValidation |90.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_target_discoverer/unittest |90.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tablet_flat/ut/ydb-core-tablet_flat-ut |90.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tablet_flat/ut/ydb-core-tablet_flat-ut |90.2%| [LD] {RESULT} $(B)/ydb/core/tablet_flat/ut/ydb-core-tablet_flat-ut >> TKeyValueTest::TestInlineEmptyWriteReadDeleteWithRestartsThenResponseOkNewApi [GOOD] >> TTxDataShardUploadRows::TestUploadRowsLocks [GOOD] >> KqpQueryPerf::RangeLimitRead-QueryService >> DataStreams::TestListShards1Shard [GOOD] >> KqpQueryPerf::IdxLookupJoinThreeWay+QueryService |90.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_upload_rows/unittest >> TTxDataShardUploadRows::TestUploadShadowRowsShadowDataAlterSplitThenPublish [GOOD] Test command err: 2025-05-07T08:54:19.837459Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:54:19.837612Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:54:19.837857Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00359d/r3tmp/tmpMGDCdW/pdisk_1.dat 2025-05-07T08:54:20.356821Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:54:20.406776Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:20.460947Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:20.461101Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:20.474466Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:54:20.558552Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:54:20.604459Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828672, Sender [1:656:2563], Recipient [1:665:2569]: NKikimr::TEvTablet::TEvBoot 2025-05-07T08:54:20.605681Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828673, Sender [1:656:2563], Recipient [1:665:2569]: NKikimr::TEvTablet::TEvRestored 2025-05-07T08:54:20.606434Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:665:2569] 2025-05-07T08:54:20.606717Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T08:54:20.617768Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3111: StateInactive, received event# 268828684, Sender [1:656:2563], Recipient [1:665:2569]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-05-07T08:54:20.671438Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T08:54:20.671634Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T08:54:20.673390Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-05-07T08:54:20.673477Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-05-07T08:54:20.673548Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-05-07T08:54:20.673963Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T08:54:20.674213Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T08:54:20.674319Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:681:2569] in generation 1 2025-05-07T08:54:20.686411Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T08:54:20.726053Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-05-07T08:54:20.726295Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T08:54:20.726416Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:683:2579] 2025-05-07T08:54:20.726457Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T08:54:20.726490Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-05-07T08:54:20.726526Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:54:20.726746Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 2146435072, Sender [1:665:2569], Recipient [1:665:2569]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-05-07T08:54:20.726805Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3155: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-05-07T08:54:20.727132Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T08:54:20.727232Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T08:54:20.727350Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T08:54:20.727401Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:54:20.727459Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-05-07T08:54:20.727508Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-05-07T08:54:20.727552Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-05-07T08:54:20.727584Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T08:54:20.727626Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:54:20.727748Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877761, Sender [1:672:2573], Recipient [1:665:2569]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:54:20.727786Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:54:20.727851Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:672:2573], sessionId# [0:0:0] 2025-05-07T08:54:20.728306Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269549568, Sender [1:410:2405], Recipient [1:672:2573] 2025-05-07T08:54:20.728370Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3136: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-05-07T08:54:20.728469Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T08:54:20.728656Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-05-07T08:54:20.728700Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-05-07T08:54:20.728783Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-05-07T08:54:20.728852Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-05-07T08:54:20.728897Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-05-07T08:54:20.728935Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-05-07T08:54:20.728969Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-05-07T08:54:20.729252Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-05-07T08:54:20.729300Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-05-07T08:54:20.729335Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-05-07T08:54:20.729367Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-05-07T08:54:20.729413Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-05-07T08:54:20.729441Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-05-07T08:54:20.729470Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-05-07T08:54:20.729503Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-05-07T08:54:20.729536Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-05-07T08:54:20.731848Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269746185, Sender [1:684:2580], Recipient [1:665:2569]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-05-07T08:54:20.731910Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:54:20.742681Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T08:54:20.742764Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-05-07T08:54:20.742800Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-05-07T08:54:20.742848Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose latency: 0 ms, status: PREPARED 2025-05-07T08:54:20.742955Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-05-07T08:54:20.911822Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877761, Sender [1:699:2589], Recipient [1:665:2569]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:54:20.911889Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:54:20.911927Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075 ... line.cpp:1862: Execution status for [3500:281474976715668] at 72075186224037889 is DelayComplete 2025-05-07T08:54:28.985554Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715668] at 72075186224037889 executing on unit CompleteOperation 2025-05-07T08:54:28.985604Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715668] at 72075186224037889 to execution unit CompletedOperations 2025-05-07T08:54:28.985649Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715668] at 72075186224037889 on unit CompletedOperations 2025-05-07T08:54:28.985690Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715668] at 72075186224037889 is Executed 2025-05-07T08:54:28.985718Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715668] at 72075186224037889 executing on unit CompletedOperations 2025-05-07T08:54:28.985754Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [3500:281474976715668] at 72075186224037889 has finished 2025-05-07T08:54:28.985801Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:54:28.985839Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037889 2025-05-07T08:54:28.985889Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037889 has no attached operations 2025-05-07T08:54:28.985938Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037889 2025-05-07T08:54:29.003368Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-05-07T08:54:29.003463Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-05-07T08:54:29.003509Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [3500:281474976715668] at 72075186224037889 on unit CompleteOperation 2025-05-07T08:54:29.003593Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [3500 : 281474976715668] from 72075186224037889 at tablet 72075186224037889 send result to client [2:1138:2916], exec latency: 0 ms, propose latency: 0 ms 2025-05-07T08:54:29.003669Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-05-07T08:54:29.004074Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269287940, Sender [2:1138:2916], Recipient [2:963:2767]: NKikimrTx.TEvStreamClearanceResponse TxId: 281474976715668 Cleared: true 2025-05-07T08:54:29.004129Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvStreamClearanceResponse 2025-05-07T08:54:29.004231Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037890 step# 3500} 2025-05-07T08:54:29.004296Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-05-07T08:54:29.004331Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-05-07T08:54:29.004571Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 2146435072, Sender [2:963:2767], Recipient [2:963:2767]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-05-07T08:54:29.004615Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3155: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-05-07T08:54:29.004685Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037890 2025-05-07T08:54:29.004740Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 active 1 active planned 1 immediate 0 planned 1 2025-05-07T08:54:29.004802Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [3500:281474976715668] at 72075186224037890 for WaitForStreamClearance 2025-05-07T08:54:29.004836Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715668] at 72075186224037890 on unit WaitForStreamClearance 2025-05-07T08:54:29.004877Z node 2 :TX_DATASHARD TRACE: wait_for_stream_clearance_unit.cpp:156: Got stream clearance for [3500:281474976715668] at 72075186224037890 2025-05-07T08:54:29.004927Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715668] at 72075186224037890 is Executed 2025-05-07T08:54:29.004961Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715668] at 72075186224037890 executing on unit WaitForStreamClearance 2025-05-07T08:54:29.004995Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715668] at 72075186224037890 to execution unit ReadTableScan 2025-05-07T08:54:29.005027Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715668] at 72075186224037890 on unit ReadTableScan 2025-05-07T08:54:29.005308Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715668] at 72075186224037890 is Continue 2025-05-07T08:54:29.005341Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-05-07T08:54:29.005372Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037890 2025-05-07T08:54:29.005405Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:52: TPlanQueueUnit at 72075186224037890 out-of-order limits exceeded 2025-05-07T08:54:29.005443Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037890 2025-05-07T08:54:29.006415Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 2146435082, Sender [2:1158:2933], Recipient [2:963:2767]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvRegisterScanActor 2025-05-07T08:54:29.006463Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3160: StateWork, processing event TEvPrivate::TEvRegisterScanActor 2025-05-07T08:54:29.006768Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037890, TxId: 281474976715668, MessageQuota: 1 2025-05-07T08:54:29.007587Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037890, TxId: 281474976715668, Size: 54, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-05-07T08:54:29.009234Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037890, TxId: 281474976715668, PendingAcks: 0 2025-05-07T08:54:29.009294Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:717: Finish scan ShardId: 72075186224037890, TxId: 281474976715668, MessageQuota: 0 2025-05-07T08:54:29.093084Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037890 2025-05-07T08:54:29.093177Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715668, at: 72075186224037890 2025-05-07T08:54:29.093765Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 2146435072, Sender [2:963:2767], Recipient [2:963:2767]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-05-07T08:54:29.093818Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3155: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-05-07T08:54:29.093905Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037890 2025-05-07T08:54:29.093950Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 active 1 active planned 1 immediate 0 planned 1 2025-05-07T08:54:29.095670Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [3500:281474976715668] at 72075186224037890 for ReadTableScan 2025-05-07T08:54:29.095731Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715668] at 72075186224037890 on unit ReadTableScan 2025-05-07T08:54:29.095783Z node 2 :TX_DATASHARD TRACE: read_table_scan_unit.cpp:158: ReadTable scan complete for [3500:281474976715668] at 72075186224037890 error: , IsFatalError: 0 2025-05-07T08:54:29.095838Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715668] at 72075186224037890 is Executed 2025-05-07T08:54:29.095877Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715668] at 72075186224037890 executing on unit ReadTableScan 2025-05-07T08:54:29.095912Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715668] at 72075186224037890 to execution unit CompleteOperation 2025-05-07T08:54:29.095946Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715668] at 72075186224037890 on unit CompleteOperation 2025-05-07T08:54:29.096192Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715668] at 72075186224037890 is DelayComplete 2025-05-07T08:54:29.096234Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715668] at 72075186224037890 executing on unit CompleteOperation 2025-05-07T08:54:29.096273Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715668] at 72075186224037890 to execution unit CompletedOperations 2025-05-07T08:54:29.096319Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715668] at 72075186224037890 on unit CompletedOperations 2025-05-07T08:54:29.096358Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715668] at 72075186224037890 is Executed 2025-05-07T08:54:29.096388Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715668] at 72075186224037890 executing on unit CompletedOperations 2025-05-07T08:54:29.096420Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [3500:281474976715668] at 72075186224037890 has finished 2025-05-07T08:54:29.096457Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:54:29.096487Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037890 2025-05-07T08:54:29.096522Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037890 has no attached operations 2025-05-07T08:54:29.096558Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037890 2025-05-07T08:54:29.108836Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-05-07T08:54:29.108903Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-05-07T08:54:29.108946Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [3500:281474976715668] at 72075186224037890 on unit CompleteOperation 2025-05-07T08:54:29.109019Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [3500 : 281474976715668] from 72075186224037890 at tablet 72075186224037890 send result to client [2:1138:2916], exec latency: 0 ms, propose latency: 1 ms 2025-05-07T08:54:29.109076Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 >> TargetDiscoverer::InvalidCredentials |90.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/workload_service/ut/ydb-core-kqp-workload_service-ut |90.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/workload_service/ut/ydb-core-kqp-workload_service-ut |90.2%| [LD] {RESULT} $(B)/ydb/core/kqp/workload_service/ut/ydb-core-kqp-workload_service-ut >> TTxLocatorTest::TestSignificantRequestWhenRunReserveTx >> TTxLocatorTest::TestAllocateAll ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_upload_rows/unittest >> TTxDataShardUploadRows::TestUploadRowsLocks [GOOD] Test command err: 2025-05-07T08:54:18.030041Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:54:18.030276Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:54:18.030643Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0035a4/r3tmp/tmpzuHo1r/pdisk_1.dat 2025-05-07T08:54:18.456512Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:54:18.541564Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:18.615963Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:18.616089Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:18.631227Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:54:18.745717Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:54:18.867832Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:683:2578] 2025-05-07T08:54:18.868144Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T08:54:18.921319Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T08:54:18.921603Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T08:54:18.923481Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-05-07T08:54:18.923571Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-05-07T08:54:18.923626Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-05-07T08:54:18.924049Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T08:54:18.924394Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T08:54:18.924464Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:716:2578] in generation 1 2025-05-07T08:54:18.926149Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:685:2580] 2025-05-07T08:54:18.926361Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T08:54:18.938999Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T08:54:18.939268Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T08:54:18.940690Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-05-07T08:54:18.940758Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-05-07T08:54:18.940797Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-05-07T08:54:18.941115Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T08:54:18.941573Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037891 actor [1:688:2582] 2025-05-07T08:54:18.941797Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T08:54:18.949884Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T08:54:18.949959Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:736:2580] in generation 1 2025-05-07T08:54:18.951980Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T08:54:18.952288Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T08:54:18.953804Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037891 2025-05-07T08:54:18.953885Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037891 2025-05-07T08:54:18.953937Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037891 2025-05-07T08:54:18.955025Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T08:54:18.956135Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T08:54:18.956191Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037891 persisting started state actor id [1:746:2582] in generation 1 2025-05-07T08:54:18.956672Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037890 actor [1:691:2584] 2025-05-07T08:54:18.956883Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T08:54:18.967465Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T08:54:18.967622Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T08:54:18.968922Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037890 2025-05-07T08:54:18.969011Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037890 2025-05-07T08:54:18.969066Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037890 2025-05-07T08:54:18.969326Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T08:54:18.969445Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T08:54:18.969521Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037890 persisting started state actor id [1:752:2584] in generation 1 2025-05-07T08:54:18.985091Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T08:54:19.024992Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-05-07T08:54:19.025194Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T08:54:19.025320Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:757:2620] 2025-05-07T08:54:19.025360Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T08:54:19.025418Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-05-07T08:54:19.025455Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:54:19.025813Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T08:54:19.025854Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-05-07T08:54:19.025928Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T08:54:19.026120Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:758:2621] 2025-05-07T08:54:19.026151Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-05-07T08:54:19.026175Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-05-07T08:54:19.026198Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-05-07T08:54:19.026688Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T08:54:19.026807Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T08:54:19.026912Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T08:54:19.026961Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037891 2025-05-07T08:54:19.027048Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037891 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T08:54:19.027124Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037891, actorId: [1:759:2622] 2025-05-07T08:54:19.027149Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037891 2025-05-07T08:54:19.027170Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037891, state: WaitScheme 2025-05-07T08:54:19.027191Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037891 2025-05-07T08:54:19.027245Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T08:54:19.027273Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037890 2025-05-07T08:54:19.027346Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037890 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T08:54:19.027431Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037890, actorId: [1:760:2623] 2025-05-07T08:54:19.027456Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037890 2025-05-07T08:54:19.027488Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037890, state: WaitScheme 2025-05-07T08:54:19.027516Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-05-07T08:54:19.027815Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T08:54:19.027871Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:54:19.027924Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T08:54:19.027972Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:54:19.028023Z nod ... ails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-05-07T08:54:28.818494Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-05-07T08:54:28.818698Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T08:54:28.818763Z node 3 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-05-07T08:54:28.819300Z node 3 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-05-07T08:54:28.819816Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:54:28.828648Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-05-07T08:54:28.828734Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:54:28.829812Z node 3 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-05-07T08:54:28.829903Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:54:28.836230Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:54:28.836314Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T08:54:28.836377Z node 3 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-05-07T08:54:28.836472Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [3:419:2412], exec latency: 0 ms, propose latency: 0 ms 2025-05-07T08:54:28.836548Z node 3 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-05-07T08:54:28.836650Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:54:28.837403Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:54:28.840338Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-05-07T08:54:28.840584Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-05-07T08:54:28.840658Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-05-07T08:54:28.864187Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:731:2613], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:28.864351Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:742:2618], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:28.864455Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:28.871633Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T08:54:28.879215Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:54:29.097529Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:54:29.108035Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:745:2621], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T08:54:29.205939Z node 3 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [3:815:2660] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:54:29.953046Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715660. Ctx: { TraceId: 01jtmz867x20rm0de6zrrs6prg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=OTU3MjkxZjYtOTQyMTRiYzAtNTJkYTI1MTctNDc1NDc4YWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:29.962267Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:846:2677], serverId# [3:847:2678], sessionId# [0:0:0] 2025-05-07T08:54:29.962824Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:2] at 72075186224037888 2025-05-07T08:54:29.963047Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:410: Executed write operation for [0:2] at 72075186224037888, row count=3 2025-05-07T08:54:29.976145Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:54:30.262405Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jtmz87c68x1zppc9g70qctmv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YzdkYjc0NDMtOGQ1ZDcyYTYtODkzMTExNTEtYjc4NDFlMDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:30.264951Z node 3 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:2410: 72075186224037888 Acquired lock# 281474976715661, counter# 0 for [OwnerId: 72057594046644480, LocalPathId: 2] { items { uint32_value: 300 } } 2025-05-07T08:54:30.321695Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(36) Execute: at tablet# 72075186224037888 2025-05-07T08:54:30.339226Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(36) Complete: at tablet# 72075186224037888 2025-05-07T08:54:30.339342Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:54:30.339453Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2560: Waiting for PlanStep# 1501 from mediator time cast 2025-05-07T08:54:30.340418Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3780: Notified by mediator time cast with PlanStep# 1501 at tablet 72075186224037888 2025-05-07T08:54:30.340520Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:54:30.424966Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jtmz87p5e036qndv23enxwy5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YzdkYjc0NDMtOGQ1ZDcyYTYtODkzMTExNTEtYjc4NDFlMDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:30.438442Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:5] at 72075186224037888 2025-05-07T08:54:30.438784Z node 3 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=Operation is aborting because locks are not valid;tx_id=5; 2025-05-07T08:54:30.468936Z node 3 :TX_DATASHARD INFO: datashard_write_operation.cpp:684: Write transaction 5 at 72075186224037888 has an error: Operation is aborting because locks are not valid 2025-05-07T08:54:30.469329Z node 3 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 5 at tablet 72075186224037888 errors: Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because locks are not valid" issue_code: 2001 severity: 1 } 2025-05-07T08:54:30.469604Z node 3 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 5 at tablet 72075186224037888 Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because locks are not valid" issue_code: 2001 severity: 1 } 2025-05-07T08:54:30.469767Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:54:30.470126Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:748: SelfId: [3:903:2683], Table: `/Root/table-1` ([72057594046644480:2:1]), SessionActorId: [3:853:2683]Got LOCKS BROKEN for table `/Root/table-1`. ShardID=72075186224037888, Sink=[3:903:2683].{
: Error: Operation is aborting because locks are not valid, code: 2001 } 2025-05-07T08:54:30.470823Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:2833: SelfId: [3:896:2683], SessionActorId: [3:853:2683], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/table-1`., code: 2001
: Error: Operation is aborting because locks are not valid, code: 2001 . sessionActorId=[3:853:2683]. isRollback=0 2025-05-07T08:54:30.471334Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:1840: SessionId: ydb://session/3?node_id=3&id=YzdkYjc0NDMtOGQ1ZDcyYTYtODkzMTExNTEtYjc4NDFlMDA=, ActorId: [3:853:2683], ActorState: ExecuteState, TraceId: 01jtmz87p5e036qndv23enxwy5, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [3:897:2683] from: [3:896:2683] 2025-05-07T08:54:30.471577Z node 3 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1944: ActorId: [3:897:2683] TxId: 281474976715662. Ctx: { TraceId: 01jtmz87p5e036qndv23enxwy5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YzdkYjc0NDMtOGQ1ZDcyYTYtODkzMTExNTEtYjc4NDFlMDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/table-1`., code: 2001 subissue: {
: Error: Operation is aborting because locks are not valid, code: 2001 } } 2025-05-07T08:54:30.472058Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:6] at 72075186224037888 2025-05-07T08:54:30.472146Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:414: Skip empty write operation for [0:6] at 72075186224037888 2025-05-07T08:54:30.472453Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:54:30.472648Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=3&id=YzdkYjc0NDMtOGQ1ZDcyYTYtODkzMTExNTEtYjc4NDFlMDA=, ActorId: [3:853:2683], ActorState: ExecuteState, TraceId: 01jtmz87p5e036qndv23enxwy5, Create QueryResponse for error on request, msg: ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestInlineEmptyWriteReadDeleteWithRestartsThenResponseOkNewApi [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:54:2057] recipient: [1:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:54:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:56:2097] sender: [1:57:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:56:2097] sender: [1:74:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:54:2057] recipient: [2:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:54:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:57:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:74:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:76:2057] recipient: [2:36:2083] Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:79:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:80:2057] recipient: [2:78:2110] Leader for TabletID 72057594037927937 is [2:81:2111] sender: [2:82:2057] recipient: [2:78:2110] !Reboot 72057594037927937 (actor [2:56:2097]) rebooted! !Reboot 72057594037927937 (actor [2:56:2097]) tablet resolver refreshed! new actor is[2:81:2111] Leader for TabletID 72057594037927937 is [2:81:2111] sender: [2:135:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:54:2057] recipient: [3:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:54:2057] recipient: [3:52:2095] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:57:2057] recipient: [3:52:2095] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:74:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:56:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:76:2057] recipient: [3:36:2083] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:78:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:80:2057] recipient: [3:79:2110] Leader for TabletID 72057594037927937 is [3:81:2111] sender: [3:82:2057] recipient: [3:79:2110] !Reboot 72057594037927937 (actor [3:56:2097]) rebooted! !Reboot 72057594037927937 (actor [3:56:2097]) tablet resolver refreshed! new actor is[3:81:2111] Leader for TabletID 72057594037927937 is [3:81:2111] sender: [3:135:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:54:2057] recipient: [4:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:54:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:57:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:74:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:77:2057] recipient: [4:36:2083] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:79:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:81:2057] recipient: [4:80:2110] Leader for TabletID 72057594037927937 is [4:82:2111] sender: [4:83:2057] recipient: [4:80:2110] !Reboot 72057594037927937 (actor [4:56:2097]) rebooted! !Reboot 72057594037927937 (actor [4:56:2097]) tablet resolver refreshed! new actor is[4:82:2111] Leader for TabletID 72057594037927937 is [4:82:2111] sender: [4:136:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:54:2057] recipient: [5:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:54:2057] recipient: [5:50:2095] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:57:2057] recipient: [5:50:2095] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:74:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:80:2057] recipient: [5:36:2083] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:83:2057] recipient: [5:82:2113] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:84:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:85:2114] sender: [5:86:2057] recipient: [5:82:2113] !Reboot 72057594037927937 (actor [5:56:2097]) rebooted! !Reboot 72057594037927937 (actor [5:56:2097]) tablet resolver refreshed! new actor is[5:85:2114] Leader for TabletID 72057594037927937 is [5:85:2114] sender: [5:139:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:54:2057] recipient: [6:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:54:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:57:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:74:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:56:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:80:2057] recipient: [6:36:2083] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:83:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:84:2057] recipient: [6:82:2113] Leader for TabletID 72057594037927937 is [6:85:2114] sender: [6:86:2057] recipient: [6:82:2113] !Reboot 72057594037927937 (actor [6:56:2097]) rebooted! !Reboot 72057594037927937 (actor [6:56:2097]) tablet resolver refreshed! new actor is[6:85:2114] Leader for TabletID 72057594037927937 is [6:85:2114] sender: [6:139:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:54:2057] recipient: [7:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:54:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:57:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:74:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:81:2057] recipient: [7:36:2083] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:84:2057] recipient: [7:83:2113] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:85:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:86:2114] sender: [7:87:2057] recipient: [7:83:2113] !Reboot 72057594037927937 (actor [7:56:2097]) rebooted! !Reboot 72057594037927937 (actor [7:56:2097]) tablet resolver refreshed! new actor is[7:86:2114] Leader for TabletID 72057594037927937 is [7:86:2114] sender: [7:140:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:54:2057] recipient: [8:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:54:2057] recipient: [8:50:2095] Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:57:2057] recipient: [8:50:2095] Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:74:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:83:2057] recipient: [8:36:2083] Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:86:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:87:2057] recipient: [8:85:2115] Leader for TabletID 72057594037927937 is [8:88:2116] sender: [8:89:2057] recipient: [8:85:2115] !Reboot 72057594037927937 (actor [8:56:2097]) rebooted! !Reboot 72057594037927937 (actor [8:56:2097]) tablet resolver refreshed! new actor is[8:88:2116] Leader for TabletID 72057594037927937 is [8:88:2116] sender: [8:142:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:54:2057] recipient: [9:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:54:2057] recipient: [9:52:2095] Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:57:2057] recipient: [9:52:2095] Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:74:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:56:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:83:2057] recipient: [9:36:2083] Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:86:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:87:2057] recipient: [9:85:2115] Leader for TabletID 72057594037927937 is [9:88:2116] sender: [9:89:2057] recipient: [9:85:2115] !Reboot 72057594037927937 (actor [9:56:2097]) rebooted! !Reboot 72057594037927937 (actor [9:56:2097]) tablet resolver refreshed! new actor is[9:88:2116] Leader for TabletID 72057594037927937 is [9:88:2116] sender: [9:142:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:54:2057] recipient: [10:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:54:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:57:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:74:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:84:2057] recipient: [10:36:2083] Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:87:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:88:2057] recipient: [10:86:2115] Leader for TabletID 72057594037927937 is [10:89:2116] sender: [10:90:2057] recipient: [10:86:2115] !Reboot 72057594037927937 (actor [10:56:2097]) rebooted! !Reboot 72057594037927937 (actor [10:56:2097]) tablet resolver refreshed! new actor is[10:89:2116] Leader for TabletID 72057594037927937 is [10:89:2116] sender: [10:143:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:54:2057] recipient: [11:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:54:2057] recipient: [11:51:2095] Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:57:2057] recipient: [11:51:2095] Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:74:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:86:2057] recipient: [11:36:2083] Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:88:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:90:2057] recipient: [11:89:2117] Leader for TabletID 72057594037927937 is [11:91:2118] sender: [11:92:2057] recipient: [11:89:2117] !Reboot 72057594037927937 (actor [11:56:2097]) rebooted! !Reboot 72057594037927937 (actor [11:56:2097]) tablet resolver refreshed! new actor is[11:91:2118] Leader for TabletID 72057594037927937 is [11:91:2118] sender: [11:145:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:54:2057] recipient: [12:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:54:2057] recipient: [12:52:2095] Leader for TabletID 72057594037927937 is [12:56:2097] sender: [12:57:2057] recipient: [12:52:2095] Leader for TabletID 72057594037927937 is [12:56:2097] sender: [12:74:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (acto ... boot 72057594037927937 (actor [22:56:2097]) tablet resolver refreshed! new actor is[22:85:2114] Leader for TabletID 72057594037927937 is [22:85:2114] sender: [22:139:2057] recipient: [22:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [23:54:2057] recipient: [23:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [23:54:2057] recipient: [23:52:2095] Leader for TabletID 72057594037927937 is [23:56:2097] sender: [23:57:2057] recipient: [23:52:2095] Leader for TabletID 72057594037927937 is [23:56:2097] sender: [23:74:2057] recipient: [23:14:2061] !Reboot 72057594037927937 (actor [23:56:2097]) on event NKikimr::TEvKeyValue::TEvRead ! Leader for TabletID 72057594037927937 is [23:56:2097] sender: [23:80:2057] recipient: [23:36:2083] Leader for TabletID 72057594037927937 is [23:56:2097] sender: [23:83:2057] recipient: [23:14:2061] Leader for TabletID 72057594037927937 is [23:56:2097] sender: [23:84:2057] recipient: [23:82:2113] Leader for TabletID 72057594037927937 is [23:85:2114] sender: [23:86:2057] recipient: [23:82:2113] !Reboot 72057594037927937 (actor [23:56:2097]) rebooted! !Reboot 72057594037927937 (actor [23:56:2097]) tablet resolver refreshed! new actor is[23:85:2114] Leader for TabletID 72057594037927937 is [23:85:2114] sender: [23:139:2057] recipient: [23:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [24:54:2057] recipient: [24:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [24:54:2057] recipient: [24:51:2095] Leader for TabletID 72057594037927937 is [24:56:2097] sender: [24:57:2057] recipient: [24:51:2095] Leader for TabletID 72057594037927937 is [24:56:2097] sender: [24:74:2057] recipient: [24:14:2061] !Reboot 72057594037927937 (actor [24:56:2097]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [24:56:2097] sender: [24:81:2057] recipient: [24:36:2083] Leader for TabletID 72057594037927937 is [24:56:2097] sender: [24:84:2057] recipient: [24:83:2113] Leader for TabletID 72057594037927937 is [24:56:2097] sender: [24:85:2057] recipient: [24:14:2061] Leader for TabletID 72057594037927937 is [24:86:2114] sender: [24:87:2057] recipient: [24:83:2113] !Reboot 72057594037927937 (actor [24:56:2097]) rebooted! !Reboot 72057594037927937 (actor [24:56:2097]) tablet resolver refreshed! new actor is[24:86:2114] Leader for TabletID 72057594037927937 is [24:86:2114] sender: [24:104:2057] recipient: [24:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [25:54:2057] recipient: [25:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [25:54:2057] recipient: [25:51:2095] Leader for TabletID 72057594037927937 is [25:56:2097] sender: [25:57:2057] recipient: [25:51:2095] Leader for TabletID 72057594037927937 is [25:56:2097] sender: [25:74:2057] recipient: [25:14:2061] !Reboot 72057594037927937 (actor [25:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [25:56:2097] sender: [25:83:2057] recipient: [25:36:2083] Leader for TabletID 72057594037927937 is [25:56:2097] sender: [25:86:2057] recipient: [25:14:2061] Leader for TabletID 72057594037927937 is [25:56:2097] sender: [25:87:2057] recipient: [25:85:2115] Leader for TabletID 72057594037927937 is [25:88:2116] sender: [25:89:2057] recipient: [25:85:2115] !Reboot 72057594037927937 (actor [25:56:2097]) rebooted! !Reboot 72057594037927937 (actor [25:56:2097]) tablet resolver refreshed! new actor is[25:88:2116] Leader for TabletID 72057594037927937 is [25:88:2116] sender: [25:142:2057] recipient: [25:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [26:54:2057] recipient: [26:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [26:54:2057] recipient: [26:51:2095] Leader for TabletID 72057594037927937 is [26:56:2097] sender: [26:57:2057] recipient: [26:51:2095] Leader for TabletID 72057594037927937 is [26:56:2097] sender: [26:74:2057] recipient: [26:14:2061] !Reboot 72057594037927937 (actor [26:56:2097]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [26:56:2097] sender: [26:83:2057] recipient: [26:36:2083] Leader for TabletID 72057594037927937 is [26:56:2097] sender: [26:86:2057] recipient: [26:14:2061] Leader for TabletID 72057594037927937 is [26:56:2097] sender: [26:87:2057] recipient: [26:85:2115] Leader for TabletID 72057594037927937 is [26:88:2116] sender: [26:89:2057] recipient: [26:85:2115] !Reboot 72057594037927937 (actor [26:56:2097]) rebooted! !Reboot 72057594037927937 (actor [26:56:2097]) tablet resolver refreshed! new actor is[26:88:2116] Leader for TabletID 72057594037927937 is [26:88:2116] sender: [26:142:2057] recipient: [26:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [27:54:2057] recipient: [27:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [27:54:2057] recipient: [27:51:2095] Leader for TabletID 72057594037927937 is [27:56:2097] sender: [27:57:2057] recipient: [27:51:2095] Leader for TabletID 72057594037927937 is [27:56:2097] sender: [27:74:2057] recipient: [27:14:2061] !Reboot 72057594037927937 (actor [27:56:2097]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [27:56:2097] sender: [27:84:2057] recipient: [27:36:2083] Leader for TabletID 72057594037927937 is [27:56:2097] sender: [27:87:2057] recipient: [27:14:2061] Leader for TabletID 72057594037927937 is [27:56:2097] sender: [27:88:2057] recipient: [27:86:2115] Leader for TabletID 72057594037927937 is [27:89:2116] sender: [27:90:2057] recipient: [27:86:2115] !Reboot 72057594037927937 (actor [27:56:2097]) rebooted! !Reboot 72057594037927937 (actor [27:56:2097]) tablet resolver refreshed! new actor is[27:89:2116] Leader for TabletID 72057594037927937 is [27:89:2116] sender: [27:107:2057] recipient: [27:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [28:54:2057] recipient: [28:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [28:54:2057] recipient: [28:51:2095] Leader for TabletID 72057594037927937 is [28:56:2097] sender: [28:57:2057] recipient: [28:51:2095] Leader for TabletID 72057594037927937 is [28:56:2097] sender: [28:74:2057] recipient: [28:14:2061] !Reboot 72057594037927937 (actor [28:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [28:56:2097] sender: [28:86:2057] recipient: [28:36:2083] Leader for TabletID 72057594037927937 is [28:56:2097] sender: [28:88:2057] recipient: [28:14:2061] Leader for TabletID 72057594037927937 is [28:56:2097] sender: [28:90:2057] recipient: [28:89:2117] Leader for TabletID 72057594037927937 is [28:91:2118] sender: [28:92:2057] recipient: [28:89:2117] !Reboot 72057594037927937 (actor [28:56:2097]) rebooted! !Reboot 72057594037927937 (actor [28:56:2097]) tablet resolver refreshed! new actor is[28:91:2118] Leader for TabletID 72057594037927937 is [28:91:2118] sender: [28:145:2057] recipient: [28:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [29:54:2057] recipient: [29:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [29:54:2057] recipient: [29:51:2095] Leader for TabletID 72057594037927937 is [29:56:2097] sender: [29:57:2057] recipient: [29:51:2095] Leader for TabletID 72057594037927937 is [29:56:2097] sender: [29:74:2057] recipient: [29:14:2061] !Reboot 72057594037927937 (actor [29:56:2097]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [29:56:2097] sender: [29:86:2057] recipient: [29:36:2083] Leader for TabletID 72057594037927937 is [29:56:2097] sender: [29:88:2057] recipient: [29:14:2061] Leader for TabletID 72057594037927937 is [29:56:2097] sender: [29:90:2057] recipient: [29:89:2117] Leader for TabletID 72057594037927937 is [29:91:2118] sender: [29:92:2057] recipient: [29:89:2117] !Reboot 72057594037927937 (actor [29:56:2097]) rebooted! !Reboot 72057594037927937 (actor [29:56:2097]) tablet resolver refreshed! new actor is[29:91:2118] Leader for TabletID 72057594037927937 is [29:91:2118] sender: [29:145:2057] recipient: [29:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [30:54:2057] recipient: [30:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [30:54:2057] recipient: [30:51:2095] Leader for TabletID 72057594037927937 is [30:56:2097] sender: [30:57:2057] recipient: [30:51:2095] Leader for TabletID 72057594037927937 is [30:56:2097] sender: [30:74:2057] recipient: [30:14:2061] !Reboot 72057594037927937 (actor [30:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [30:56:2097] sender: [30:87:2057] recipient: [30:36:2083] Leader for TabletID 72057594037927937 is [30:56:2097] sender: [30:90:2057] recipient: [30:14:2061] Leader for TabletID 72057594037927937 is [30:56:2097] sender: [30:91:2057] recipient: [30:89:2117] Leader for TabletID 72057594037927937 is [30:92:2118] sender: [30:93:2057] recipient: [30:89:2117] !Reboot 72057594037927937 (actor [30:56:2097]) rebooted! !Reboot 72057594037927937 (actor [30:56:2097]) tablet resolver refreshed! new actor is[30:92:2118] Leader for TabletID 72057594037927937 is [30:92:2118] sender: [30:146:2057] recipient: [30:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [31:54:2057] recipient: [31:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [31:54:2057] recipient: [31:51:2095] Leader for TabletID 72057594037927937 is [31:56:2097] sender: [31:57:2057] recipient: [31:51:2095] Leader for TabletID 72057594037927937 is [31:56:2097] sender: [31:74:2057] recipient: [31:14:2061] !Reboot 72057594037927937 (actor [31:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [31:56:2097] sender: [31:90:2057] recipient: [31:36:2083] Leader for TabletID 72057594037927937 is [31:56:2097] sender: [31:92:2057] recipient: [31:14:2061] Leader for TabletID 72057594037927937 is [31:56:2097] sender: [31:94:2057] recipient: [31:93:2120] Leader for TabletID 72057594037927937 is [31:95:2121] sender: [31:96:2057] recipient: [31:93:2120] !Reboot 72057594037927937 (actor [31:56:2097]) rebooted! !Reboot 72057594037927937 (actor [31:56:2097]) tablet resolver refreshed! new actor is[31:95:2121] Leader for TabletID 72057594037927937 is [31:95:2121] sender: [31:149:2057] recipient: [31:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [32:54:2057] recipient: [32:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [32:54:2057] recipient: [32:52:2095] Leader for TabletID 72057594037927937 is [32:56:2097] sender: [32:57:2057] recipient: [32:52:2095] Leader for TabletID 72057594037927937 is [32:56:2097] sender: [32:74:2057] recipient: [32:14:2061] !Reboot 72057594037927937 (actor [32:56:2097]) on event NKikimr::TEvKeyValue::TEvRead ! Leader for TabletID 72057594037927937 is [32:56:2097] sender: [32:90:2057] recipient: [32:36:2083] Leader for TabletID 72057594037927937 is [32:56:2097] sender: [32:93:2057] recipient: [32:14:2061] Leader for TabletID 72057594037927937 is [32:56:2097] sender: [32:94:2057] recipient: [32:92:2120] Leader for TabletID 72057594037927937 is [32:95:2121] sender: [32:96:2057] recipient: [32:92:2120] !Reboot 72057594037927937 (actor [32:56:2097]) rebooted! !Reboot 72057594037927937 (actor [32:56:2097]) tablet resolver refreshed! new actor is[32:95:2121] Leader for TabletID 72057594037927937 is [32:95:2121] sender: [32:149:2057] recipient: [32:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [33:54:2057] recipient: [33:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [33:54:2057] recipient: [33:51:2095] Leader for TabletID 72057594037927937 is [33:56:2097] sender: [33:57:2057] recipient: [33:51:2095] Leader for TabletID 72057594037927937 is [33:56:2097] sender: [33:74:2057] recipient: [33:14:2061] >> DataStreams::Test_Crreate_AutoPartitioning_Disabled [GOOD] >> TTxLocatorTest::TestSignificantRequestWhenRunReserveTx [GOOD] >> TTxLocatorTest::TestAllocateAll [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/datastreams/ut/unittest >> DataStreams::TestListShards1Shard [GOOD] Test command err: 2025-05-07T08:54:03.584369Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624427547608497:2143];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:03.584943Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002827/r3tmp/tmp7Bp2JZ/pdisk_1.dat 2025-05-07T08:54:04.443093Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:04.482295Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:04.482396Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:04.487876Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28456, node 1 2025-05-07T08:54:04.794740Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:04.794775Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:04.794785Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:04.794890Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11965 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:05.267759Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:05.603381Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:11965 2025-05-07T08:54:05.831429Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:05.858281Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480 2025-05-07T08:54:06.502257Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710661:0, at schemeshard: 72057594046644480 encryption_type: NONE records { sequence_number: "0" shard_id: "shard-000000" } records { sequence_number: "1" shard_id: "shard-000000" } records { sequence_number: "2" shard_id: "shard-000000" } records { sequence_number: "3" shard_id: "shard-000000" } records { sequence_number: "4" shard_id: "shard-000000" } records { sequence_number: "5" shard_id: "shard-000000" } records { sequence_number: "6" shard_id: "shard-000000" } records { sequence_number: "7" shard_id: "shard-000000" } records { sequence_number: "8" shard_id: "shard-000000" } records { sequence_number: "9" shard_id: "shard-000000" } records { sequence_number: "10" shard_id: "shard-000000" } records { sequence_number: "11" shard_id: "shard-000000" } records { sequence_number: "12" shard_id: "shard-000000" } records { sequence_number: "13" shard_id: "shard-000000" } records { sequence_number: "14" shard_id: "shard-000000" } records { sequence_number: "15" shard_id: "shard-000000" } records { sequence_number: "16" shard_id: "shard-000000" } records { sequence_number: "17" shard_id: "shard-000000" } records { sequence_number: "18" shard_id: "shard-000000" } records { sequence_number: "19" shard_id: "shard-000000" } records { sequence_number: "20" shard_id: "shard-000000" } records { sequence_number: "21" shard_id: "shard-000000" } records { sequence_number: "22" shard_id: "shard-000000" } records { sequence_number: "23" shard_id: "shard-000000" } records { sequence_number: "24" shard_id: "shard-000000" } records { sequence_number: "25" shard_id: "shard-000000" } records { sequence_number: "26" shard_id: "shard-000000" } records { sequence_number: "27" shard_id: "shard-000000" } records { sequence_number: "28" shard_id: "shard-000000" } records { sequence_number: "29" shard_id: "shard-000000" } 2025-05-07T08:54:06.679706Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:54:06.901745Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropPersQueueGroup, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:54:06.960751Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037889 not found 2025-05-07T08:54:06.960792Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037890 not found 2025-05-07T08:54:06.960814Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestNonChargeableUser","id":"reserved_resources-root-72075186224037888-1746608046254-1","schema":"yds.resources.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":0,"reserved_storage_bytes":90596966400},"usage":{"quantity":0,"unit":"second","start":1746608046,"finish":1746608046},"labels":{"datastreams_stream_name":"stream_TestNonChargeableUser","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1746608046}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestNonChargeableUser","id":"used_storage-root-72075186224037888-1746608046254-2","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":0,"unit":"byte*second","start":1746608046,"finish":1746608046},"labels":{"datastreams_stream_name":"stream_TestNonChargeableUser","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1746608046}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestNonChargeableUser","id":"reserved_resources-root-72075186224037890-1746608046782-3","schema":"yds.resources.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":0,"reserved_storage_bytes":90596966400},"usage":{"quantity":0,"unit":"second","start":1746608046,"finish":1746608046},"labels":{"datastreams_stream_name":"stream_TestNonChargeableUser","ydb_database":"root"},"version":"v1","source_id":"72075186224037890","source_wt":1746608046}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestNonChargeableUser","id":"used_storage-root-72075186224037890-1746608046782-4","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":0,"unit":"byte*second","start":1746608046,"finish":1746608046},"labels":{"datastreams_stream_name":"stream_TestNonChargeableUser","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037890","source_wt":1746608046}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestNonChargeableUser","id":"reserved_resources-root-72075186224037888-1746608046775-5","schema":"yds.resources.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":0,"reserved_storage_bytes":90596966400},"usage":{"quantity":0,"unit":"second","start":1746608046,"finish":1746608046},"labels":{"datastreams_stream_name":"stream_TestNonChargeableUser","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1746608046}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestNonChargeableUser","id":"used_storage-root-72075186224037888-1746608046775-6","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":0,"unit":"byte*second","start":1746608046,"finish":1746608046},"labels":{"datastreams_stream_name":"stream_TestNonChargeableUser","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1746608046}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestNonChargeableUser","id":"reserved_resources-root-72075186224037888-1746608046254-1","schema":"yds.resources.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":0,"reserved_storage_bytes":90596966400},"usage":{"quantity":0,"unit":"second","start":1746608046,"finish":1746608046},"labels":{"datastreams_stream_name":"stream_TestNonChargeableUser","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1746608046}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestNonChargeableUser","id":"used_storage-root-72075186224037888-1746608046254-2","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":0,"unit":"byte*second","start":1746608046,"finish":1746608046},"labels":{"datastreams_stream_name":"stream_TestNonChargeableUser","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1746608046}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestNonChargeableUser","id":"reserved_resources-root-72075186224037890-1746608046782-3","schema":"yds.resources.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":0,"reserved_storage_bytes":90596966400},"usage":{"quantity":0,"unit":"second","start":1746608046,"finish":1746608046},"labels":{"datastreams_stream_name":"stream_TestNonChargeableUser","ydb_database":"root"},"version":"v1","source_id":"72075186224037890","source_wt":1746608046}' Go ... tep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:18.123320Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:18.250334Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:28054 2025-05-07T08:54:18.559274Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... WARNING: All log messages before y_absl::InitializeLog() is called are written to STDERR E0000 00:00:1746608058.939858 201136 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1746608058.940055 201136 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1746608058.960529 201136 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1746608058.960676 201136 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1746608058.976485 201136 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1746608058.976603 201136 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1746608058.985651 201136 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1746608058.985761 201136 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn 2025-05-07T08:54:19.010581Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976710661:0, at schemeshard: 72057594046644480 2025-05-07T08:54:19.138084Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976710662:0, at schemeshard: 72057594046644480 E0000 00:00:1746608059.281735 201136 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1746608059.281848 201136 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn 2025-05-07T08:54:19.302647Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976710663:0, at schemeshard: 72057594046644480 E0000 00:00:1746608059.479677 201136 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1746608059.479838 201136 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn 2025-05-07T08:54:19.502291Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976710664:0, at schemeshard: 72057594046644480 E0000 00:00:1746608059.626823 201136 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1746608059.626944 201136 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1746608059.674056 201136 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1746608059.674201 201136 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn 2025-05-07T08:54:19.792640Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropPersQueueGroup, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:54:19.856648Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037888 not found 2025-05-07T08:54:19.856679Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037893 not found 2025-05-07T08:54:19.856692Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037890 not found 2025-05-07T08:54:19.856705Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037892 not found 2025-05-07T08:54:19.856725Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037889 not found 2025-05-07T08:54:19.856750Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037891 not found E0000 00:00:1746608059.906358 201136 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1746608059.906466 201136 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn 2025-05-07T08:54:24.293209Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7501624516701266751:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:24.293302Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002827/r3tmp/tmpMJyGVB/pdisk_1.dat 2025-05-07T08:54:24.673471Z node 10 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:24.720417Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:24.720539Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:24.724124Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5656, node 10 2025-05-07T08:54:25.114915Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:25.114943Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:25.114951Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:25.115100Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25418 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:25.652433Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:25.805354Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:25418 2025-05-07T08:54:26.056849Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... E0000 00:00:1746608066.313368 202904 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1746608066.329949 202904 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1746608066.340094 202904 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1746608066.358434 202904 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1746608066.374056 202904 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator/ut/unittest >> TTxLocatorTest::TestAllocateAll [GOOD] Test command err: 2025-05-07T08:54:32.673516Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1904: Tablet: 72057594046447617 LockedInitializationPath Marker# TSYS32 2025-05-07T08:54:32.674031Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:917: Tablet: 72057594046447617 HandleFindLatestLogEntry, NODATA Promote Marker# TSYS19 2025-05-07T08:54:32.674813Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:221: Tablet: 72057594046447617 TTablet::WriteZeroEntry. logid# [72057594046447617:2:0:0:0:0:0] Marker# TSYS01 2025-05-07T08:54:32.676785Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:0:0:0:20:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.677297Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:17: tablet# 72057594046447617 OnActivateExecutor 2025-05-07T08:54:32.689172Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:1:28672:35:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.689282Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:0:0:42:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.689358Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:1:8192:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.689452Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1381: Tablet: 72057594046447617 GcCollect 0 channel, tablet:gen:step => 2:0 Marker# TSYS28 2025-05-07T08:54:32.689580Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.689684Z node 1 :TX_ALLOCATOR DEBUG: txallocator__scheme.cpp:22: tablet# 72057594046447617 TTxSchema Complete 2025-05-07T08:54:32.689831Z node 1 :TABLET_MAIN INFO: tablet_sys.cpp:1015: Tablet: 72057594046447617 Active! Generation: 2, Type: TxAllocator started in 0msec Marker# TSYS24 2025-05-07T08:54:32.690649Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:70:2105] requested range size#281474976710655 2025-05-07T08:54:32.691195Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:1:24576:70:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.691294Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.691408Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 0 Reserved to# 281474976710655 2025-05-07T08:54:32.691467Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:70:2105] TEvAllocateResult from# 0 to# 281474976710655 expected SUCCESS 2025-05-07T08:54:32.698838Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:75:2109] requested range size#1 2025-05-07T08:54:32.699043Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 0 Reserved from# 281474976710655 Reserved to# 0 2025-05-07T08:54:32.699101Z node 1 :TX_ALLOCATOR ERROR: txallocator_impl.cpp:84: tablet# 72057594046447617 Send to Sender# [1:75:2109] TEvAllocateResult status# IMPOSIBLE expected IMPOSIBLE ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator/ut/unittest >> TTxLocatorTest::TestSignificantRequestWhenRunReserveTx [GOOD] Test command err: 2025-05-07T08:54:32.477953Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1904: Tablet: 72057594046447617 LockedInitializationPath Marker# TSYS32 2025-05-07T08:54:32.478813Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:917: Tablet: 72057594046447617 HandleFindLatestLogEntry, NODATA Promote Marker# TSYS19 2025-05-07T08:54:32.479673Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:221: Tablet: 72057594046447617 TTablet::WriteZeroEntry. logid# [72057594046447617:2:0:0:0:0:0] Marker# TSYS01 2025-05-07T08:54:32.481530Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:0:0:0:20:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.483076Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:17: tablet# 72057594046447617 OnActivateExecutor 2025-05-07T08:54:32.500318Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:1:28672:35:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.500498Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:0:0:42:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.500571Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:1:8192:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.500661Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1381: Tablet: 72057594046447617 GcCollect 0 channel, tablet:gen:step => 2:0 Marker# TSYS28 2025-05-07T08:54:32.500780Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.500909Z node 1 :TX_ALLOCATOR DEBUG: txallocator__scheme.cpp:22: tablet# 72057594046447617 TTxSchema Complete 2025-05-07T08:54:32.501070Z node 1 :TABLET_MAIN INFO: tablet_sys.cpp:1015: Tablet: 72057594046447617 Active! Generation: 2, Type: TxAllocator started in 0msec Marker# TSYS24 2025-05-07T08:54:32.503065Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:80:2115] requested range size#100000 2025-05-07T08:54:32.503489Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:82:2117] requested range size#100000 2025-05-07T08:54:32.504013Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:86:2121] requested range size#100000 2025-05-07T08:54:32.504341Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:84:2119] requested range size#100000 2025-05-07T08:54:32.504639Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:88:2123] requested range size#100000 2025-05-07T08:54:32.505004Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:70:2105] requested range size#100000 2025-05-07T08:54:32.505223Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:1:24576:70:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.505320Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.505492Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:72:2107] requested range size#100000 2025-05-07T08:54:32.505661Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:4:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.505769Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:4:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.505946Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:5:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.506472Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:74:2109] requested range size#100000 2025-05-07T08:54:32.506699Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:5:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.506763Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:6:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.506941Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:76:2111] requested range size#100000 2025-05-07T08:54:32.507125Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:6:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.507320Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:7:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.507397Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:78:2113] requested range size#100000 2025-05-07T08:54:32.507616Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 0 Reserved to# 100000 2025-05-07T08:54:32.507677Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:80:2115] TEvAllocateResult from# 0 to# 100000 2025-05-07T08:54:32.507817Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:7:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.507868Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:8:1:24576:74:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.507947Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 100000 Reserved to# 200000 2025-05-07T08:54:32.507973Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:82:2117] TEvAllocateResult from# 100000 to# 200000 2025-05-07T08:54:32.508182Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:8:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.508243Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:9:1:24576:74:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.508320Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 200000 Reserved to# 300000 2025-05-07T08:54:32.508345Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:86:2121] TEvAllocateResult from# 200000 to# 300000 2025-05-07T08:54:32.508425Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:9:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.508508Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 300000 Reserved to# 400000 2025-05-07T08:54:32.508535Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:84:2119] TEvAllocateResult from# 300000 to# 400000 2025-05-07T08:54:32.508690Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:10:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.508736Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 400000 Reserved to# 500000 2025-05-07T08:54:32.508759Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:88:2123] TEvAllocateResult from# 400000 to# 500000 2025-05-07T08:54:32.508817Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:10:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.508858Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 500000 Reserved to# 600000 2025-05-07T08:54:32.508896Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:70:2105] TEvAllocateResult from# 500000 to# 600000 2025-05-07T08:54:32.509036Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:11:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.509103Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 600000 Reserved to# 700000 2025-05-07T08:54:32.509126Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:72:2107] TEvAllocateResult from# 600000 to# 700000 2025-05-07T08:54:32.509202Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 700000 Reserved to# 800000 2025-05-07T08:54:32.509227Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:74:2109] TEvAllocateResult from# 700000 to# 800000 2025-05-07T08:54:32.509335Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:11:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.509417Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:12:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.509495Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 800000 Reserved to# 900000 2025-05-07T08:54:32.509523Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:76:2111] TEvAllocateResult from# 800000 to# 900000 2025-05-07T08:54:32.509694Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:12:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.509757Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 900000 Reserved to# 1000000 2025-05-07T08:54:32.509782Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:78:2113] TEvAllocateResult from# 900000 to# 1000000 expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS 2025-05-07T08:54:32.520964Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 720575 ... ender# [1:401:2435] TEvAllocateResult from# 8500000 to# 8600000 2025-05-07T08:54:32.656164Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:92:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.656227Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 8600000 Reserved to# 8700000 2025-05-07T08:54:32.656267Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:403:2437] TEvAllocateResult from# 8600000 to# 8700000 2025-05-07T08:54:32.656373Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:92:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.656457Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 8700000 Reserved to# 8800000 2025-05-07T08:54:32.656482Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:405:2439] TEvAllocateResult from# 8700000 to# 8800000 2025-05-07T08:54:32.656592Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 8800000 Reserved to# 8900000 2025-05-07T08:54:32.656621Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:407:2441] TEvAllocateResult from# 8800000 to# 8900000 2025-05-07T08:54:32.656730Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 8900000 Reserved to# 9000000 2025-05-07T08:54:32.656757Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:409:2443] TEvAllocateResult from# 8900000 to# 9000000 expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS 2025-05-07T08:54:32.669580Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:431:2465] requested range size#100000 2025-05-07T08:54:32.678527Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:433:2467] requested range size#100000 2025-05-07T08:54:32.679148Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:435:2469] requested range size#100000 2025-05-07T08:54:32.679590Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:437:2471] requested range size#100000 2025-05-07T08:54:32.679760Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:93:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.679975Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:93:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.680221Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:439:2473] requested range size#100000 2025-05-07T08:54:32.680496Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:441:2475] requested range size#100000 2025-05-07T08:54:32.680612Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:94:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.680738Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:94:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.680985Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:443:2477] requested range size#100000 2025-05-07T08:54:32.681168Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:95:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.681359Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:95:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.681402Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:96:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.681532Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:445:2479] requested range size#100000 2025-05-07T08:54:32.681672Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:96:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.681821Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:97:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.681964Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:447:2481] requested range size#100000 2025-05-07T08:54:32.694603Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:97:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.695048Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:449:2483] requested range size#100000 2025-05-07T08:54:32.695285Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:98:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.695464Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9000000 Reserved to# 9100000 2025-05-07T08:54:32.695499Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:431:2465] TEvAllocateResult from# 9000000 to# 9100000 2025-05-07T08:54:32.695572Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:98:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.695643Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:99:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.695812Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9100000 Reserved to# 9200000 2025-05-07T08:54:32.695841Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:433:2467] TEvAllocateResult from# 9100000 to# 9200000 2025-05-07T08:54:32.695896Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:99:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.696050Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9200000 Reserved to# 9300000 2025-05-07T08:54:32.696095Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:435:2469] TEvAllocateResult from# 9200000 to# 9300000 2025-05-07T08:54:32.696156Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:100:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.696231Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9300000 Reserved to# 9400000 2025-05-07T08:54:32.696273Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:437:2471] TEvAllocateResult from# 9300000 to# 9400000 2025-05-07T08:54:32.696455Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9400000 Reserved to# 9500000 2025-05-07T08:54:32.696487Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:439:2473] TEvAllocateResult from# 9400000 to# 9500000 2025-05-07T08:54:32.696562Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:100:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.696610Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:101:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.696756Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9500000 Reserved to# 9600000 2025-05-07T08:54:32.696785Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:441:2475] TEvAllocateResult from# 9500000 to# 9600000 2025-05-07T08:54:32.696922Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9600000 Reserved to# 9700000 2025-05-07T08:54:32.696948Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:443:2477] TEvAllocateResult from# 9600000 to# 9700000 2025-05-07T08:54:32.696998Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:101:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.697052Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:102:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.697157Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9700000 Reserved to# 9800000 2025-05-07T08:54:32.697182Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:445:2479] TEvAllocateResult from# 9700000 to# 9800000 2025-05-07T08:54:32.697227Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:102:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:32.697342Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9800000 Reserved to# 9900000 2025-05-07T08:54:32.697369Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:447:2481] TEvAllocateResult from# 9800000 to# 9900000 2025-05-07T08:54:32.697444Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9900000 Reserved to# 10000000 2025-05-07T08:54:32.697469Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:449:2483] TEvAllocateResult from# 9900000 to# 10000000 expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS >> TTxLocatorTest::TestWithReboot >> TTxLocatorTest::TestImposibleSize >> THiveTest::TestHiveBalancerWithPrefferedDC1 >> TTxLocatorTest::TestImposibleSize [GOOD] >> TCutHistoryRestrictions::BasicTest [GOOD] >> TCutHistoryRestrictions::EmptyAllowList [GOOD] >> TCutHistoryRestrictions::BothListsEmpty [GOOD] >> ObjectDistribution::TestImbalanceCalcualtion ------- [TM] {asan, default-linux-x86_64, release} ydb/services/datastreams/ut/unittest >> DataStreams::Test_Crreate_AutoPartitioning_Disabled [GOOD] Test command err: 2025-05-07T08:54:12.368429Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624466232763810:2076];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:12.369000Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002812/r3tmp/tmpHI2CV1/pdisk_1.dat 2025-05-07T08:54:12.977664Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:12.990606Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:12.990728Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:13.000800Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6638, node 1 2025-05-07T08:54:13.204065Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:13.204092Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:13.204100Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:13.204229Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10924 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:13.649792Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:13.771177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:10924 2025-05-07T08:54:14.003304Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:14.592479Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976710661:0, at schemeshard: 72057594046644480 2025-05-07T08:54:15.099905Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:54:19.138814Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501624494275662818:2142];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:19.138891Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002812/r3tmp/tmpND3Xrv/pdisk_1.dat 2025-05-07T08:54:19.483706Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:19.527391Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:19.527487Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:19.549867Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19780, node 4 2025-05-07T08:54:19.770842Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:19.770871Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:19.770882Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:19.771039Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23505 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:20.086357Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:20.200495Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:23505 2025-05-07T08:54:20.394970Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... encryption_type: NONE records { sequence_number: "0" shard_id: "shard-000000" } records { sequence_number: "0" shard_id: "shard-000002" } records { sequence_number: "0" shard_id: "shard-000001" } records { sequence_number: "1" shard_id: "shard-000001" } records { sequence_number: "1" shard_id: "shard-000002" } records { sequence_number: "2" shard_id: "shard-000001" } records { sequence_number: "3" shard_id: "shard-000001" } records { sequence_number: "4" shard_id: "shard-000001" } records { sequence_number: "1" shard_id: "shard-000000" } records { sequence_number: "2" shard_id: "shard-000002" } records { sequence_number: "3" shard_id: "shard-000002" } records { sequence_number: "2" shard_id: "shard-000000" } records { sequence_number: "4" shard_id: "shard-000002" } records { sequence_number: "5" shard_id: "shard-000002" } records { sequence_number: "3" shard_id: "shard-000000" } records { sequence_number: "6" shard_id: "shard-000002" } records { sequence_number: "5" shard_id: "shard-000001" } records { sequence_number: "6" shard_id: "shard-000001" } records { sequence_number: "4" shard_id: "shard-000000" } records { sequence_number: "7" shard_id: "shard-000002" } records { sequence_number: "8" shard_id: "shard-000002" } records { sequence_number: "5" shard_id: "shard-000000" } records { sequence_number: "9" shard_id: "shard-000002" } records { sequence_number: "6" shard_id: "shard-000000" } records { sequence_number: "10" shard_id: "shard-000002" } records { sequence_number: "7" shard_id: "shard-000000" } records { sequence_number: "11" shard_id: "shard-000002" } records { sequence_number: "7" shard_id: "shard-000001" } records { sequence_number: "8" shard_id: "shard-000000" } records { sequence_number: "9" shard_id: "shard-000000" } ALTER_SCHEME: { Name: "test-topic" Split { Partition: 1 SplitBoundary: "a" } } 2025-05-07T08:54:21.733261Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 107:0, at schemeshard: 72057594046644480 2025-05-07T08:54:23.163308Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:54:23.348054Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:54:23.600006Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:54:24.039949Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:54:24.139020Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7501624494275662818:2142];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:24.139115Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:54:25.962429Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7501624522263833071:2076];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:25.962473Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002812/r3tmp/tmpmXtZ8B/pdisk_1.dat 2025-05-07T08:54:26.242121Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18700, node 7 2025-05-07T08:54:26.369069Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:26.369178Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:26.370279Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:26.370306Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:26.370314Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:26.370473Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:54:26.384912Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:18940 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:26.679714Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:26.786014Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:18940 2025-05-07T08:54:27.135701Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... >> ObjectDistribution::TestImbalanceCalcualtion [GOOD] >> ObjectDistribution::TestAllowedDomainsAndDown [GOOD] >> ObjectDistribution::TestAddSameNode [GOOD] >> ObjectDistribution::TestManyIrrelevantNodes >> DataStreams::TestReservedStorageMetering [GOOD] >> DataStreams::TestReservedConsumersMetering >> TTxLocatorTest::TestWithReboot [GOOD] |90.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator/ut/unittest >> TargetDiscoverer::IndexedTable [GOOD] >> TargetDiscoverer::Negative [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator/ut/unittest >> TTxLocatorTest::TestImposibleSize [GOOD] Test command err: 2025-05-07T08:54:33.712979Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1904: Tablet: 72057594046447617 LockedInitializationPath Marker# TSYS32 2025-05-07T08:54:33.713437Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:917: Tablet: 72057594046447617 HandleFindLatestLogEntry, NODATA Promote Marker# TSYS19 2025-05-07T08:54:33.716601Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:221: Tablet: 72057594046447617 TTablet::WriteZeroEntry. logid# [72057594046447617:2:0:0:0:0:0] Marker# TSYS01 2025-05-07T08:54:33.718510Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:0:0:0:20:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:33.719081Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:17: tablet# 72057594046447617 OnActivateExecutor 2025-05-07T08:54:33.729505Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:1:28672:35:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:33.729581Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:0:0:42:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:33.729634Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:1:8192:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:33.729686Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1381: Tablet: 72057594046447617 GcCollect 0 channel, tablet:gen:step => 2:0 Marker# TSYS28 2025-05-07T08:54:33.729778Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:33.729847Z node 1 :TX_ALLOCATOR DEBUG: txallocator__scheme.cpp:22: tablet# 72057594046447617 TTxSchema Complete 2025-05-07T08:54:33.729950Z node 1 :TABLET_MAIN INFO: tablet_sys.cpp:1015: Tablet: 72057594046447617 Active! Generation: 2, Type: TxAllocator started in 0msec Marker# TSYS24 2025-05-07T08:54:33.730748Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:70:2105] requested range size#281474976710656 2025-05-07T08:54:33.730920Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 0 Reserved from# 0 Reserved to# 0 2025-05-07T08:54:33.739323Z node 1 :TX_ALLOCATOR ERROR: txallocator_impl.cpp:84: tablet# 72057594046447617 Send to Sender# [1:70:2105] TEvAllocateResult status# IMPOSIBLE expected IMPOSIBLE 2025-05-07T08:54:33.739978Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:74:2108] requested range size#123456 2025-05-07T08:54:33.740533Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:1:24576:70:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:33.740603Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:33.740712Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 0 Reserved to# 123456 2025-05-07T08:54:33.740773Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:74:2108] TEvAllocateResult from# 0 to# 123456 expected SUCCESS 2025-05-07T08:54:33.741162Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:78:2112] requested range size#281474976587200 2025-05-07T08:54:33.741331Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 0 Reserved from# 123456 Reserved to# 0 2025-05-07T08:54:33.741372Z node 1 :TX_ALLOCATOR ERROR: txallocator_impl.cpp:84: tablet# 72057594046447617 Send to Sender# [1:78:2112] TEvAllocateResult status# IMPOSIBLE expected IMPOSIBLE 2025-05-07T08:54:33.741806Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:81:2115] requested range size#246912 2025-05-07T08:54:33.742285Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:4:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:33.742355Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:4:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:33.742452Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 123456 Reserved to# 370368 2025-05-07T08:54:33.742495Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:81:2115] TEvAllocateResult from# 123456 to# 370368 expected SUCCESS 2025-05-07T08:54:33.742925Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:85:2119] requested range size#281474976340288 2025-05-07T08:54:33.743060Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 0 Reserved from# 370368 Reserved to# 0 2025-05-07T08:54:33.743110Z node 1 :TX_ALLOCATOR ERROR: txallocator_impl.cpp:84: tablet# 72057594046447617 Send to Sender# [1:85:2119] TEvAllocateResult status# IMPOSIBLE expected IMPOSIBLE >> TTxLocatorTest::Boot >> TTxLocatorTest::TestZeroRange >> DataStreams::TestUnsupported [GOOD] >> TTxLocatorTest::TestZeroRange [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator/ut/unittest >> TTxLocatorTest::TestWithReboot [GOOD] Test command err: 2025-05-07T08:54:33.708490Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1904: Tablet: 72057594046447617 LockedInitializationPath Marker# TSYS32 2025-05-07T08:54:33.708971Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:917: Tablet: 72057594046447617 HandleFindLatestLogEntry, NODATA Promote Marker# TSYS19 2025-05-07T08:54:33.709791Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:221: Tablet: 72057594046447617 TTablet::WriteZeroEntry. logid# [72057594046447617:2:0:0:0:0:0] Marker# TSYS01 2025-05-07T08:54:33.711611Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:0:0:0:20:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:33.712082Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:17: tablet# 72057594046447617 OnActivateExecutor 2025-05-07T08:54:33.723975Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:1:28672:35:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:33.724107Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:0:0:42:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:33.724199Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:1:8192:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:33.724290Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1381: Tablet: 72057594046447617 GcCollect 0 channel, tablet:gen:step => 2:0 Marker# TSYS28 2025-05-07T08:54:33.724425Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:33.724532Z node 1 :TX_ALLOCATOR DEBUG: txallocator__scheme.cpp:22: tablet# 72057594046447617 TTxSchema Complete 2025-05-07T08:54:33.724688Z node 1 :TABLET_MAIN INFO: tablet_sys.cpp:1015: Tablet: 72057594046447617 Active! Generation: 2, Type: TxAllocator started in 0msec Marker# TSYS24 2025-05-07T08:54:33.726303Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:80:2115] requested range size#100000 2025-05-07T08:54:33.726701Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:82:2117] requested range size#100000 2025-05-07T08:54:33.727241Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:86:2121] requested range size#100000 2025-05-07T08:54:33.727610Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:84:2119] requested range size#100000 2025-05-07T08:54:33.727906Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:88:2123] requested range size#100000 2025-05-07T08:54:33.728297Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:70:2105] requested range size#100000 2025-05-07T08:54:33.728541Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:1:24576:70:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:33.728642Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:33.728850Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:72:2107] requested range size#100000 2025-05-07T08:54:33.729021Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:4:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:33.729137Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:4:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:33.729325Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:5:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:33.729477Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:74:2109] requested range size#100000 2025-05-07T08:54:33.729735Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:5:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:33.729799Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:6:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:33.729951Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:76:2111] requested range size#100000 2025-05-07T08:54:33.730176Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:6:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:33.730385Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:7:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:33.730458Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:78:2113] requested range size#100000 2025-05-07T08:54:33.730700Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 0 Reserved to# 100000 2025-05-07T08:54:33.730764Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:80:2115] TEvAllocateResult from# 0 to# 100000 2025-05-07T08:54:33.730897Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:7:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:33.730955Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:8:1:24576:74:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:33.731033Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 100000 Reserved to# 200000 2025-05-07T08:54:33.731060Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:82:2117] TEvAllocateResult from# 100000 to# 200000 2025-05-07T08:54:33.731260Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:8:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:33.731336Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:9:1:24576:74:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:33.731402Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 200000 Reserved to# 300000 2025-05-07T08:54:33.731427Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:86:2121] TEvAllocateResult from# 200000 to# 300000 2025-05-07T08:54:33.731508Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:9:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:33.731582Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 300000 Reserved to# 400000 2025-05-07T08:54:33.731622Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:84:2119] TEvAllocateResult from# 300000 to# 400000 2025-05-07T08:54:33.731778Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:10:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:33.731826Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 400000 Reserved to# 500000 2025-05-07T08:54:33.731856Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:88:2123] TEvAllocateResult from# 400000 to# 500000 2025-05-07T08:54:33.731915Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:10:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:33.731961Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 500000 Reserved to# 600000 2025-05-07T08:54:33.731998Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:70:2105] TEvAllocateResult from# 500000 to# 600000 2025-05-07T08:54:33.732135Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:11:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:33.732196Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 600000 Reserved to# 700000 2025-05-07T08:54:33.732231Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:72:2107] TEvAllocateResult from# 600000 to# 700000 2025-05-07T08:54:33.732320Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 700000 Reserved to# 800000 2025-05-07T08:54:33.732359Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:74:2109] TEvAllocateResult from# 700000 to# 800000 2025-05-07T08:54:33.732463Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:11:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:33.732586Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:12:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:33.732643Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 800000 Reserved to# 900000 2025-05-07T08:54:33.732667Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:76:2111] TEvAllocateResult from# 800000 to# 900000 2025-05-07T08:54:33.732833Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:12:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:33.732918Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 900000 Reserved to# 1000000 2025-05-07T08:54:33.732948Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:78:2113] TEvAllocateResult from# 900000 to# 1000000 expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS 2025-05-07T08:54:33.740464Z node 1 :TABLET_MAIN NOTICE: tablet_sys.cpp:1828: Tablet: 7205759404 ... de 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:11:9:1:24576:78:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:34.153643Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9300000 Reserved to# 9400000 2025-05-07T08:54:34.153677Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:621:2552] TEvAllocateResult from# 9300000 to# 9400000 2025-05-07T08:54:34.153822Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9400000 Reserved to# 9500000 2025-05-07T08:54:34.153852Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:623:2554] TEvAllocateResult from# 9400000 to# 9500000 2025-05-07T08:54:34.154052Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:11:9:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:34.154122Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9500000 Reserved to# 9600000 2025-05-07T08:54:34.154159Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:625:2556] TEvAllocateResult from# 9500000 to# 9600000 2025-05-07T08:54:34.154258Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:11:10:1:24576:78:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:34.154322Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9600000 Reserved to# 9700000 2025-05-07T08:54:34.154346Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:627:2558] TEvAllocateResult from# 9600000 to# 9700000 2025-05-07T08:54:34.154453Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:11:10:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:34.154530Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:11:11:1:24576:72:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:34.154586Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9700000 Reserved to# 9800000 2025-05-07T08:54:34.154611Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:629:2560] TEvAllocateResult from# 9700000 to# 9800000 2025-05-07T08:54:34.154698Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9800000 Reserved to# 9900000 2025-05-07T08:54:34.154726Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:631:2562] TEvAllocateResult from# 9800000 to# 9900000 2025-05-07T08:54:34.154820Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:11:11:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:34.154899Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9900000 Reserved to# 10000000 2025-05-07T08:54:34.154948Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:633:2564] TEvAllocateResult from# 9900000 to# 10000000 expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS 2025-05-07T08:54:34.159788Z node 1 :TABLET_MAIN NOTICE: tablet_sys.cpp:1828: Tablet: 72057594046447617 Type: TxAllocator, EReason: ReasonPill, SuggestedGeneration: 0, KnownGeneration: 11 Marker# TSYS31 2025-05-07T08:54:34.161256Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:815: Tablet: 72057594046447617 HandleStateStorageInfoResolve, KnownGeneration: 11 Promote Marker# TSYS16 2025-05-07T08:54:34.162022Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:421: TabletId# 72057594046447617 TTabletReqRebuildHistoryGraph::ProcessKeyEntry, LastBlobID: [72057594046447617:11:11:0:0:71:0] Snap: 11:1 for 72057594046447617 Marker# TRRH04 2025-05-07T08:54:34.162105Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:356: TTabletReqRebuildHistoryGraph::ProcessLogEntry - TabletID: 72057594046447617, id [72057594046447617:11:11:0:0:71:0], refs: [[72057594046447617:11:11:1:24576:72:0],] for 72057594046447617 2025-05-07T08:54:34.162304Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:356: TTabletReqRebuildHistoryGraph::ProcessLogEntry - TabletID: 72057594046447617, id [72057594046447617:11:1:0:0:42:0], refs: [[72057594046447617:11:1:1:28672:1483:0],] for 72057594046447617 2025-05-07T08:54:34.162361Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:356: TTabletReqRebuildHistoryGraph::ProcessLogEntry - TabletID: 72057594046447617, id [72057594046447617:11:2:0:0:69:0], refs: [[72057594046447617:11:2:1:24576:76:0],] for 72057594046447617 2025-05-07T08:54:34.162405Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:356: TTabletReqRebuildHistoryGraph::ProcessLogEntry - TabletID: 72057594046447617, id [72057594046447617:11:3:0:0:71:0], refs: [[72057594046447617:11:3:1:24576:78:0],] for 72057594046447617 2025-05-07T08:54:34.162457Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:356: TTabletReqRebuildHistoryGraph::ProcessLogEntry - TabletID: 72057594046447617, id [72057594046447617:11:4:0:0:71:0], refs: [[72057594046447617:11:4:1:24576:75:0],] for 72057594046447617 2025-05-07T08:54:34.162518Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:356: TTabletReqRebuildHistoryGraph::ProcessLogEntry - TabletID: 72057594046447617, id [72057594046447617:11:5:0:0:71:0], refs: [[72057594046447617:11:5:1:24576:78:0],] for 72057594046447617 2025-05-07T08:54:34.162565Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:356: TTabletReqRebuildHistoryGraph::ProcessLogEntry - TabletID: 72057594046447617, id [72057594046447617:11:6:0:0:71:0], refs: [[72057594046447617:11:6:1:24576:78:0],] for 72057594046447617 2025-05-07T08:54:34.162666Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:356: TTabletReqRebuildHistoryGraph::ProcessLogEntry - TabletID: 72057594046447617, id [72057594046447617:11:7:0:0:71:0], refs: [[72057594046447617:11:7:1:24576:78:0],] for 72057594046447617 2025-05-07T08:54:34.162728Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:356: TTabletReqRebuildHistoryGraph::ProcessLogEntry - TabletID: 72057594046447617, id [72057594046447617:11:8:0:0:71:0], refs: [[72057594046447617:11:8:1:24576:75:0],] for 72057594046447617 2025-05-07T08:54:34.162778Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:356: TTabletReqRebuildHistoryGraph::ProcessLogEntry - TabletID: 72057594046447617, id [72057594046447617:11:9:0:0:71:0], refs: [[72057594046447617:11:9:1:24576:78:0],] for 72057594046447617 2025-05-07T08:54:34.162839Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:356: TTabletReqRebuildHistoryGraph::ProcessLogEntry - TabletID: 72057594046447617, id [72057594046447617:11:10:0:0:71:0], refs: [[72057594046447617:11:10:1:24576:78:0],] for 72057594046447617 2025-05-07T08:54:34.163021Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:625: TabletId# 72057594046447617 TTabletReqRebuildHistoryGraph::BuildHistory - Process generation 11 from 1 with 11 steps Marker# TRRH09 2025-05-07T08:54:34.163071Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:729: TTabletReqRebuildHistoryGraph::BuildHistory - NOT A TAIL - References: [[72057594046447617:11:1:1:28672:1483:0],] for 72057594046447617 2025-05-07T08:54:34.163119Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:729: TTabletReqRebuildHistoryGraph::BuildHistory - NOT A TAIL - References: [[72057594046447617:11:2:1:24576:76:0],] for 72057594046447617 2025-05-07T08:54:34.163156Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:729: TTabletReqRebuildHistoryGraph::BuildHistory - NOT A TAIL - References: [[72057594046447617:11:3:1:24576:78:0],] for 72057594046447617 2025-05-07T08:54:34.163189Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:729: TTabletReqRebuildHistoryGraph::BuildHistory - NOT A TAIL - References: [[72057594046447617:11:4:1:24576:75:0],] for 72057594046447617 2025-05-07T08:54:34.163220Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:729: TTabletReqRebuildHistoryGraph::BuildHistory - NOT A TAIL - References: [[72057594046447617:11:5:1:24576:78:0],] for 72057594046447617 2025-05-07T08:54:34.163247Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:729: TTabletReqRebuildHistoryGraph::BuildHistory - NOT A TAIL - References: [[72057594046447617:11:6:1:24576:78:0],] for 72057594046447617 2025-05-07T08:54:34.163288Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:691: TTabletReqRebuildHistoryGraph::BuildHistory - THE TAIL - References: [[72057594046447617:11:7:1:24576:78:0],] for 72057594046447617, Gc+: [[72057594046447617:11:7:1:24576:78:0],] 2025-05-07T08:54:34.163343Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:691: TTabletReqRebuildHistoryGraph::BuildHistory - THE TAIL - References: [[72057594046447617:11:8:1:24576:75:0],] for 72057594046447617, Gc+: [[72057594046447617:11:8:1:24576:75:0],] 2025-05-07T08:54:34.163384Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:691: TTabletReqRebuildHistoryGraph::BuildHistory - THE TAIL - References: [[72057594046447617:11:9:1:24576:78:0],] for 72057594046447617, Gc+: [[72057594046447617:11:9:1:24576:78:0],] 2025-05-07T08:54:34.163414Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:691: TTabletReqRebuildHistoryGraph::BuildHistory - THE TAIL - References: [[72057594046447617:11:10:1:24576:78:0],] for 72057594046447617, Gc+: [[72057594046447617:11:10:1:24576:78:0],] 2025-05-07T08:54:34.163444Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:691: TTabletReqRebuildHistoryGraph::BuildHistory - THE TAIL - References: [[72057594046447617:11:11:1:24576:72:0],] for 72057594046447617, Gc+: [[72057594046447617:11:11:1:24576:72:0],] 2025-05-07T08:54:34.163716Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:221: Tablet: 72057594046447617 TTablet::WriteZeroEntry. logid# [72057594046447617:12:0:0:0:0:0] Marker# TSYS01 2025-05-07T08:54:34.165047Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:12:0:0:0:20:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:34.169822Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:17: tablet# 72057594046447617 OnActivateExecutor 2025-05-07T08:54:34.170126Z node 1 :TX_ALLOCATOR DEBUG: txallocator__scheme.cpp:22: tablet# 72057594046447617 TTxSchema Complete 2025-05-07T08:54:34.170978Z node 1 :TABLET_MAIN INFO: tablet_sys.cpp:1015: Tablet: 72057594046447617 Active! Generation: 12, Type: TxAllocator started in 0msec Marker# TSYS24 2025-05-07T08:54:34.171057Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:12:1:1:28672:1639:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:34.171200Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:12:1:0:0:42:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:34.171298Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1381: Tablet: 72057594046447617 GcCollect 0 channel, tablet:gen:step => 12:0 Marker# TSYS28 >> TTxLocatorTest::Boot [GOOD] >> TTxLocatorTest::TestAllocateAllByPieces |90.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator/ut/unittest >> RetryPolicy::TWriteSession_TestBrokenPolicy [GOOD] >> RetryPolicy::TWriteSession_RetryOnTargetCluster ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_target_discoverer/unittest >> TargetDiscoverer::IndexedTable [GOOD] Test command err: 2025-05-07T08:54:28.704895Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624533050092484:2270];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:28.704949Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0045c6/r3tmp/tmpH7VcLb/pdisk_1.dat 2025-05-07T08:54:29.389066Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:29.427350Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:29.427439Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:29.428992Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:8208 TServer::EnableGrpc on GrpcPort 14043, node 1 2025-05-07T08:54:30.018128Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:30.018149Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:30.018156Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:30.018253Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8208 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:30.764733Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:30.794698Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:54:30.799596Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:31.446519Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:27: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribePathResponse { Result: { name: Root, owner: root@builtin, type: Directory, size_bytes: 0, created_at: { plan_step: 1746608070837, tx_id: 1 } } } 2025-05-07T08:54:31.446560Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:42: [TargetDiscoverer][rid 1] Describe path succeeded: path# /Root 2025-05-07T08:54:31.474338Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:247: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvListDirectoryResponse { Result: { children [{ name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1746608071243, tx_id: 281474976710658 } }, { name: .sys, owner: , type: Directory, size_bytes: 0, created_at: { plan_step: 0, tx_id: 0 } }] } } 2025-05-07T08:54:31.474361Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:260: [TargetDiscoverer][rid 1] Listing succeeded: path# /Root 2025-05-07T08:54:33.585923Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:98: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1746608071243, tx_id: 281474976710658 } } } 2025-05-07T08:54:33.585945Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:113: [TargetDiscoverer][rid 1] Describe table succeeded: path# /Root/Table 2025-05-07T08:54:33.585989Z node 1 :REPLICATION_CONTROLLER INFO: target_discoverer.cpp:120: [TargetDiscoverer][rid 1] Add target: srcPath# /Root/Table, dstPath# /Root/Replicated/Table, kind# Table 2025-05-07T08:54:33.586077Z node 1 :REPLICATION_CONTROLLER INFO: target_discoverer.cpp:140: [TargetDiscoverer][rid 1] Add target: srcPath# /Root/Table/Index, dstPath# /Root/Replicated/Table/Index/indexImplTable, kind# IndexTable 2025-05-07T08:54:33.714738Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501624533050092484:2270];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:33.714816Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator/ut/unittest >> TTxLocatorTest::Boot [GOOD] Test command err: 2025-05-07T08:54:35.016770Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1904: Tablet: 72057594046447617 LockedInitializationPath Marker# TSYS32 2025-05-07T08:54:35.017250Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:917: Tablet: 72057594046447617 HandleFindLatestLogEntry, NODATA Promote Marker# TSYS19 2025-05-07T08:54:35.018140Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:221: Tablet: 72057594046447617 TTablet::WriteZeroEntry. logid# [72057594046447617:2:0:0:0:0:0] Marker# TSYS01 2025-05-07T08:54:35.019783Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:0:0:0:20:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.020234Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:17: tablet# 72057594046447617 OnActivateExecutor 2025-05-07T08:54:35.029579Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:1:28672:35:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.029668Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:0:0:42:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.029728Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:1:8192:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.029786Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1381: Tablet: 72057594046447617 GcCollect 0 channel, tablet:gen:step => 2:0 Marker# TSYS28 2025-05-07T08:54:35.029886Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.031899Z node 1 :TX_ALLOCATOR DEBUG: txallocator__scheme.cpp:22: tablet# 72057594046447617 TTxSchema Complete 2025-05-07T08:54:35.032107Z node 1 :TABLET_MAIN INFO: tablet_sys.cpp:1015: Tablet: 72057594046447617 Active! Generation: 2, Type: TxAllocator started in 0msec Marker# TSYS24 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_target_discoverer/unittest >> TargetDiscoverer::Negative [GOOD] Test command err: 2025-05-07T08:54:29.722443Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624537964875066:2198];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:29.722920Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00459d/r3tmp/tmpmsSQ0Y/pdisk_1.dat 2025-05-07T08:54:30.668887Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:30.704092Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:30.704210Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:30.708138Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:17585 TServer::EnableGrpc on GrpcPort 28312, node 1 2025-05-07T08:54:31.053755Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:31.053780Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:31.053787Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:31.053926Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17585 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:31.671609Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:31.784763Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:27: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribePathResponse { Result: { status: SCHEME_ERROR, issues: {
: Error: Path not found } } } 2025-05-07T08:54:31.784837Z node 1 :REPLICATION_CONTROLLER ERROR: target_discoverer.cpp:78: [TargetDiscoverer][rid 1] Describe path failed: path# /Root/Table, status# SCHEME_ERROR, issues# {
: Error: Path not found } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator/ut/unittest >> TTxLocatorTest::TestZeroRange [GOOD] Test command err: 2025-05-07T08:54:35.048935Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1904: Tablet: 72057594046447617 LockedInitializationPath Marker# TSYS32 2025-05-07T08:54:35.049395Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:917: Tablet: 72057594046447617 HandleFindLatestLogEntry, NODATA Promote Marker# TSYS19 2025-05-07T08:54:35.050215Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:221: Tablet: 72057594046447617 TTablet::WriteZeroEntry. logid# [72057594046447617:2:0:0:0:0:0] Marker# TSYS01 2025-05-07T08:54:35.051955Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:0:0:0:20:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.052416Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:17: tablet# 72057594046447617 OnActivateExecutor 2025-05-07T08:54:35.065681Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:1:28672:35:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.065764Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:0:0:42:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.065818Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:1:8192:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.065872Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1381: Tablet: 72057594046447617 GcCollect 0 channel, tablet:gen:step => 2:0 Marker# TSYS28 2025-05-07T08:54:35.065959Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.066108Z node 1 :TX_ALLOCATOR DEBUG: txallocator__scheme.cpp:22: tablet# 72057594046447617 TTxSchema Complete 2025-05-07T08:54:35.066275Z node 1 :TABLET_MAIN INFO: tablet_sys.cpp:1015: Tablet: 72057594046447617 Active! Generation: 2, Type: TxAllocator started in 0msec Marker# TSYS24 2025-05-07T08:54:35.066978Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:70:2105] requested range size#0 2025-05-07T08:54:35.067743Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:1:24576:70:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.067839Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.067949Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 0 Reserved to# 0 2025-05-07T08:54:35.068005Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:70:2105] TEvAllocateResult from# 0 to# 0 expected SUCCESS >> TTxLocatorTest::TestAllocateAllByPieces [GOOD] >> KqpQueryPerf::MultiRead+QueryService >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-63 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-64 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/datastreams/ut/unittest >> DataStreams::TestUnsupported [GOOD] Test command err: 2025-05-07T08:54:12.659916Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624464250877071:2076];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:12.660002Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002809/r3tmp/tmpC3PGpf/pdisk_1.dat 2025-05-07T08:54:13.215381Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:13.227680Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:13.227823Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:13.250802Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20644, node 1 2025-05-07T08:54:13.561164Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:13.561191Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:13.561210Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:13.561363Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16111 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:14.056634Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:14.184544Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:16111 2025-05-07T08:54:14.388661Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:14.759244Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976710661:0, at schemeshard: 72057594046644480 2025-05-07T08:54:18.965723Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501624491613282171:2076];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:18.965773Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002809/r3tmp/tmp0XZGBd/pdisk_1.dat 2025-05-07T08:54:19.181085Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28401, node 4 2025-05-07T08:54:19.294664Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:19.294758Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:19.340913Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:19.340935Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:19.340942Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:19.341063Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:54:19.342592Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:23100 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:19.643517Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:19.764605Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:23100 2025-05-07T08:54:19.995831Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:20.248091Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715661:0, at schemeshard: 72057594046644480 2025-05-07T08:54:20.372813Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715662:0, at schemeshard: 72057594046644480 encryption_type: NONE records { sequence_number: "0" shard_id: "shard-000000" } records { sequence_number: "1" shard_id: "shard-000000" } records { sequence_number: "2" shard_id: "shard-000000" } records { sequence_number: "3" shard_id: "shard-000000" } records { sequence_number: "4" shard_id: "shard-000000" } records { sequence_number: "5" shard_id: "shard-000000" } records { sequence_number: "6" shard_id: "shard-000000" } records { sequence_number: "7" shard_id: "shard-000000" } records { sequence_number: "8" shard_id: "shard-000000" } records { sequence_number: "9" shard_id: "shard-000000" } records { sequence_number: "10" shard_id: "shard-000000" } records { sequence_number: "11" shard_id: "shard-000000" } records { sequence_number: "12" shard_id: "shard-000000" } records { sequence_number: "13" shard_id: "shard-000000" } records { sequence_number: "14" shard_id: "shard-000000" } records { sequence_number: "15" shard_id: "shard-000000" } records { sequence_number: "16" shard_id: "shard-000000" } records { sequence_number: "17" shard_id: "shard-000000" } records { sequence_number: "18" shard_id: "shard-000000" } records { sequence_number: "19" shard_id: "shard-000000" } records { sequence_number: "20" shard_id: "shard-000000" } records { sequence_number: "21" shard_id: "shard-000000" } records { sequence_number: "22" shard_id: "shard-000000" } records { sequence_number: "23" shard_id: "shard-000000" } records { sequence_number: "24" shard_id: "shard-000000" } records { sequence_number: "25" shard_id: "shard-000000" } records { sequence_number: "26" shard_id: "shard-000000" } records { sequence_number: "27" shard_id: "shard-000000" } records { sequence_number: "28" shard_id: "shard-000000" } records { sequence_number: "29" shard_id: "shard-000000" } encryption_type: NONE records { sequence_number: "30" shard_id: "shard-000000" } records { sequence_number: "31" shard_id: "shard-000000" } records { sequence_number: "32" shard_id: "shard-000000" } records { sequence_number: "33" shard_id: "shard-000000" } records { sequence_number: "34" shard_id: "shard-000000" } records { sequence_number: "35" shard_id: "shard-000000" } records { sequence_number: "36" shard_id: "shard-000000" } records { sequence_number: "37" shard_id: "shard-000000" } records { sequence_number: "38" shard_id: "shard-000000" } records { sequence_number: "39" shard_id: "shard-000000" } records { sequence_number: "40" shard_id: "shard-000000" } records { sequence_number: "41" shard_id: "shard-000000" } records { sequence_number: "42" shard_id: "shard-000000" } records { sequence_number: "43" shard_id: "shard-000000" } records { sequence_number: "44" shard_id: "shard-000000" } records { sequence_number: "45" shard_id: "shard-000000" } records { sequence_number: "46" shard_id: "shard-000000" } records { sequence_number: "47" shard_id: "shard-000000" } records { sequence_number: "48" shard_id: "shard-000000" } records { sequence_number: "49" shard_id: "shard-000000" } records { sequence_number: "50" shard_id: "shard-000000" } records { sequence_number: "51" shard_id: "shard-000000" } records { sequence_number: "52" shard_id: "shard-000000" } records { sequence_number: "53" shard_id: "shard-000000" } records { sequence_number: "54" shard_id: "shard-000000" } records { sequence_number: "55" shard_id: "shard-000000" } records { sequence_number: "56" shard_id: "shard-000000" } records { sequence_number: "57" shard_id: "shard-000000" } records { sequence_number: "58" shard_id: "shard-000000" } records { sequence_number: "59" shard_id: "shard- ... d-000000" } records { sequence_number: "74" shard_id: "shard-000000" } records { sequence_number: "75" shard_id: "shard-000000" } records { sequence_number: "76" shard_id: "shard-000000" } records { sequence_number: "77" shard_id: "shard-000000" } records { sequence_number: "78" shard_id: "shard-000000" } records { sequence_number: "79" shard_id: "shard-000000" } records { sequence_number: "80" shard_id: "shard-000000" } records { sequence_number: "81" shard_id: "shard-000000" } records { sequence_number: "82" shard_id: "shard-000000" } records { sequence_number: "83" shard_id: "shard-000000" } records { sequence_number: "84" shard_id: "shard-000000" } records { sequence_number: "85" shard_id: "shard-000000" } records { sequence_number: "86" shard_id: "shard-000000" } records { sequence_number: "87" shard_id: "shard-000000" } records { sequence_number: "88" shard_id: "shard-000000" } records { sequence_number: "89" shard_id: "shard-000000" } encryption_type: NONE records { sequence_number: "90" shard_id: "shard-000000" } records { sequence_number: "91" shard_id: "shard-000000" } records { sequence_number: "92" shard_id: "shard-000000" } records { sequence_number: "93" shard_id: "shard-000000" } records { sequence_number: "94" shard_id: "shard-000000" } records { sequence_number: "95" shard_id: "shard-000000" } records { sequence_number: "96" shard_id: "shard-000000" } records { sequence_number: "97" shard_id: "shard-000000" } records { sequence_number: "98" shard_id: "shard-000000" } records { sequence_number: "99" shard_id: "shard-000000" } records { sequence_number: "100" shard_id: "shard-000000" } records { sequence_number: "101" shard_id: "shard-000000" } records { sequence_number: "102" shard_id: "shard-000000" } records { sequence_number: "103" shard_id: "shard-000000" } records { sequence_number: "104" shard_id: "shard-000000" } records { sequence_number: "105" shard_id: "shard-000000" } records { sequence_number: "106" shard_id: "shard-000000" } records { sequence_number: "107" shard_id: "shard-000000" } records { sequence_number: "108" shard_id: "shard-000000" } records { sequence_number: "109" shard_id: "shard-000000" } records { sequence_number: "110" shard_id: "shard-000000" } records { sequence_number: "111" shard_id: "shard-000000" } records { sequence_number: "112" shard_id: "shard-000000" } records { sequence_number: "113" shard_id: "shard-000000" } records { sequence_number: "114" shard_id: "shard-000000" } records { sequence_number: "115" shard_id: "shard-000000" } records { sequence_number: "116" shard_id: "shard-000000" } records { sequence_number: "117" shard_id: "shard-000000" } records { sequence_number: "118" shard_id: "shard-000000" } records { sequence_number: "119" shard_id: "shard-000000" } 2025-05-07T08:54:23.966286Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7501624491613282171:2076];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:23.966375Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; encryption_type: NONE records { sequence_number: "120" shard_id: "shard-000000" } records { sequence_number: "121" shard_id: "shard-000000" } records { sequence_number: "122" shard_id: "shard-000000" } records { sequence_number: "123" shard_id: "shard-000000" } records { sequence_number: "124" shard_id: "shard-000000" } records { sequence_number: "125" shard_id: "shard-000000" } records { sequence_number: "126" shard_id: "shard-000000" } records { sequence_number: "127" shard_id: "shard-000000" } records { sequence_number: "128" shard_id: "shard-000000" } records { sequence_number: "129" shard_id: "shard-000000" } records { sequence_number: "130" shard_id: "shard-000000" } records { sequence_number: "131" shard_id: "shard-000000" } records { sequence_number: "132" shard_id: "shard-000000" } records { sequence_number: "133" shard_id: "shard-000000" } records { sequence_number: "134" shard_id: "shard-000000" } records { sequence_number: "135" shard_id: "shard-000000" } records { sequence_number: "136" shard_id: "shard-000000" } records { sequence_number: "137" shard_id: "shard-000000" } records { sequence_number: "138" shard_id: "shard-000000" } records { sequence_number: "139" shard_id: "shard-000000" } records { sequence_number: "140" shard_id: "shard-000000" } records { sequence_number: "141" shard_id: "shard-000000" } records { sequence_number: "142" shard_id: "shard-000000" } records { sequence_number: "143" shard_id: "shard-000000" } records { sequence_number: "144" shard_id: "shard-000000" } records { sequence_number: "145" shard_id: "shard-000000" } records { sequence_number: "146" shard_id: "shard-000000" } records { sequence_number: "147" shard_id: "shard-000000" } records { sequence_number: "148" shard_id: "shard-000000" } records { sequence_number: "149" shard_id: "shard-000000" } Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestStreamTimeRetention","id":"used_storage-root-72075186224037888-1746608060186-2","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":0,"unit":"byte*second","start":1746608060,"finish":1746608060},"labels":{"datastreams_stream_name":"stream_TestStreamTimeRetention","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1746608060}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestStreamTimeRetention","id":"used_storage-root-72075186224037888-1746608060330-3","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":0,"unit":"byte*second","start":1746608060,"finish":1746608060},"labels":{"datastreams_stream_name":"stream_TestStreamTimeRetention","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1746608060}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestStreamTimeRetention","id":"used_storage-root-72075186224037888-1746608060398-4","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":1,"unit":"byte*second","start":1746608060,"finish":1746608061},"labels":{"datastreams_stream_name":"stream_TestStreamTimeRetention","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1746608061}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestStreamTimeRetention","id":"used_storage-root-72075186224037888-1746608061453-5","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":1,"unit":"byte*second","start":1746608061,"finish":1746608062},"labels":{"datastreams_stream_name":"stream_TestStreamTimeRetention","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1746608062}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestStreamTimeRetention","id":"used_storage-root-72075186224037888-1746608062474-6","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":1,"unit":"byte*second","start":1746608062,"finish":1746608063},"labels":{"datastreams_stream_name":"stream_TestStreamTimeRetention","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1746608063}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestStreamTimeRetention","id":"used_storage-root-72075186224037888-1746608063507-7","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":1,"unit":"byte*second","start":1746608063,"finish":1746608064},"labels":{"datastreams_stream_name":"stream_TestStreamTimeRetention","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1746608064}' 2025-05-07T08:54:28.640301Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7501624533058703153:2272];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:28.640586Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002809/r3tmp/tmpWlFkjq/pdisk_1.dat 2025-05-07T08:54:29.176951Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:29.202964Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:29.203057Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:29.208144Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26669, node 7 2025-05-07T08:54:29.367043Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:29.367063Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:29.367069Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:29.367180Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64086 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:29.702016Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:29.786208Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:64086 2025-05-07T08:54:30.031759Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... >> KqpQueryPerf::Delete+QueryService-UseSink >> KqpQueryPerf::Upsert-QueryService+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator/ut/unittest >> TTxLocatorTest::TestAllocateAllByPieces [GOOD] Test command err: 2025-05-07T08:54:35.816605Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1904: Tablet: 72057594046447617 LockedInitializationPath Marker# TSYS32 2025-05-07T08:54:35.817084Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:917: Tablet: 72057594046447617 HandleFindLatestLogEntry, NODATA Promote Marker# TSYS19 2025-05-07T08:54:35.817913Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:221: Tablet: 72057594046447617 TTablet::WriteZeroEntry. logid# [72057594046447617:2:0:0:0:0:0] Marker# TSYS01 2025-05-07T08:54:35.819757Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:0:0:0:20:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.820234Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:17: tablet# 72057594046447617 OnActivateExecutor 2025-05-07T08:54:35.831240Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:1:28672:35:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.831351Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:0:0:42:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.831422Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:1:8192:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.831552Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1381: Tablet: 72057594046447617 GcCollect 0 channel, tablet:gen:step => 2:0 Marker# TSYS28 2025-05-07T08:54:35.831675Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.831784Z node 1 :TX_ALLOCATOR DEBUG: txallocator__scheme.cpp:22: tablet# 72057594046447617 TTxSchema Complete 2025-05-07T08:54:35.831936Z node 1 :TABLET_MAIN INFO: tablet_sys.cpp:1015: Tablet: 72057594046447617 Active! Generation: 2, Type: TxAllocator started in 0msec Marker# TSYS24 2025-05-07T08:54:35.832709Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:70:2105] requested range size#8796093022207 2025-05-07T08:54:35.833226Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:1:24576:70:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.833313Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.833417Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 0 Reserved to# 8796093022207 2025-05-07T08:54:35.833462Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:70:2105] TEvAllocateResult from# 0 to# 8796093022207 expected SUCCESS 2025-05-07T08:54:35.837405Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:75:2109] requested range size#8796093022207 2025-05-07T08:54:35.837894Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:4:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.837992Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:4:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.838129Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 8796093022207 Reserved to# 17592186044414 2025-05-07T08:54:35.838183Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:75:2109] TEvAllocateResult from# 8796093022207 to# 17592186044414 expected SUCCESS 2025-05-07T08:54:35.838565Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:79:2113] requested range size#8796093022207 2025-05-07T08:54:35.838961Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:5:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.839044Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:5:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.839125Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 17592186044414 Reserved to# 26388279066621 2025-05-07T08:54:35.839172Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:79:2113] TEvAllocateResult from# 17592186044414 to# 26388279066621 expected SUCCESS 2025-05-07T08:54:35.839578Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:83:2117] requested range size#8796093022207 2025-05-07T08:54:35.839915Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:6:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.839977Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:6:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.840072Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 26388279066621 Reserved to# 35184372088828 2025-05-07T08:54:35.840121Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:83:2117] TEvAllocateResult from# 26388279066621 to# 35184372088828 expected SUCCESS 2025-05-07T08:54:35.840518Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:87:2121] requested range size#8796093022207 2025-05-07T08:54:35.840842Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:7:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.840905Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:7:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.840977Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 35184372088828 Reserved to# 43980465111035 2025-05-07T08:54:35.841021Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:87:2121] TEvAllocateResult from# 35184372088828 to# 43980465111035 expected SUCCESS 2025-05-07T08:54:35.841401Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:91:2125] requested range size#8796093022207 2025-05-07T08:54:35.841736Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:8:1:24576:74:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.841797Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:8:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.841871Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 43980465111035 Reserved to# 52776558133242 2025-05-07T08:54:35.841908Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:91:2125] TEvAllocateResult from# 43980465111035 to# 52776558133242 expected SUCCESS 2025-05-07T08:54:35.842355Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:95:2129] requested range size#8796093022207 2025-05-07T08:54:35.842677Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:9:1:24576:74:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.842739Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:9:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.842834Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 52776558133242 Reserved to# 61572651155449 2025-05-07T08:54:35.842882Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:95:2129] TEvAllocateResult from# 52776558133242 to# 61572651155449 expected SUCCESS 2025-05-07T08:54:35.843398Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:99:2133] requested range size#8796093022207 2025-05-07T08:54:35.843698Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:10:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.843786Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:10:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.843904Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 61572651155449 Reserved to# 70368744177656 2025-05-07T08:54:35.843940Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:99:2133] TEvAllocateResult from# 61572651155449 to# 70368744177656 expected SUCCESS 2025-05-07T08:54:35.844471Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:103:2137] requested range size#8796093022207 2025-05-07T08:54:35.844776Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:11:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.844835Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:11:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.844920Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 70368744177656 Reserved to# 79164837199863 2025-05-07T08:54:35.844967Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:103:2137] TEvAllocateResult from# 70368744177656 to# 79164837199863 expected SUCCESS 2025-05-07T08:54:35.845410Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:107:2141] requested range size#8796093022207 2025-05-07T08:54:35.845682Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:12:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.845775Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:12:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.845863Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Succe ... node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:155:2189] TEvAllocateResult from# 184717953466347 to# 193514046488554 expected SUCCESS 2025-05-07T08:54:35.860782Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:159:2193] requested range size#8796093022207 2025-05-07T08:54:35.861115Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:25:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.861174Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:25:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.861252Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 193514046488554 Reserved to# 202310139510761 2025-05-07T08:54:35.861295Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:159:2193] TEvAllocateResult from# 193514046488554 to# 202310139510761 expected SUCCESS 2025-05-07T08:54:35.862050Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:163:2197] requested range size#8796093022207 2025-05-07T08:54:35.862437Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:26:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.862517Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:26:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.862613Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 202310139510761 Reserved to# 211106232532968 2025-05-07T08:54:35.862651Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:163:2197] TEvAllocateResult from# 202310139510761 to# 211106232532968 expected SUCCESS 2025-05-07T08:54:35.863356Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:167:2201] requested range size#8796093022207 2025-05-07T08:54:35.863670Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:27:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.863739Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:27:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.863840Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 211106232532968 Reserved to# 219902325555175 2025-05-07T08:54:35.863879Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:167:2201] TEvAllocateResult from# 211106232532968 to# 219902325555175 expected SUCCESS 2025-05-07T08:54:35.864601Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:171:2205] requested range size#8796093022207 2025-05-07T08:54:35.864912Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:28:1:24576:75:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.864974Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:28:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.865057Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 219902325555175 Reserved to# 228698418577382 2025-05-07T08:54:35.865100Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:171:2205] TEvAllocateResult from# 219902325555175 to# 228698418577382 expected SUCCESS 2025-05-07T08:54:35.865840Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:175:2209] requested range size#8796093022207 2025-05-07T08:54:35.866302Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:29:1:24576:73:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.866380Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:29:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.866469Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 228698418577382 Reserved to# 237494511599589 2025-05-07T08:54:35.866513Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:175:2209] TEvAllocateResult from# 228698418577382 to# 237494511599589 expected SUCCESS 2025-05-07T08:54:35.867347Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:179:2213] requested range size#8796093022207 2025-05-07T08:54:35.867770Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:30:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.867838Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:30:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.867923Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 237494511599589 Reserved to# 246290604621796 2025-05-07T08:54:35.867963Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:179:2213] TEvAllocateResult from# 237494511599589 to# 246290604621796 expected SUCCESS 2025-05-07T08:54:35.868760Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:183:2217] requested range size#8796093022207 2025-05-07T08:54:35.869120Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:31:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.869191Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:31:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.869276Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 246290604621796 Reserved to# 255086697644003 2025-05-07T08:54:35.869320Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:183:2217] TEvAllocateResult from# 246290604621796 to# 255086697644003 expected SUCCESS 2025-05-07T08:54:35.870201Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:187:2221] requested range size#8796093022207 2025-05-07T08:54:35.870620Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:32:1:24576:75:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.870701Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:32:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.870788Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 255086697644003 Reserved to# 263882790666210 2025-05-07T08:54:35.870834Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:187:2221] TEvAllocateResult from# 255086697644003 to# 263882790666210 expected SUCCESS 2025-05-07T08:54:35.871650Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:191:2225] requested range size#8796093022207 2025-05-07T08:54:35.871995Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:33:1:24576:77:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.872045Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:33:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.872148Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 263882790666210 Reserved to# 272678883688417 2025-05-07T08:54:35.872189Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:191:2225] TEvAllocateResult from# 263882790666210 to# 272678883688417 expected SUCCESS 2025-05-07T08:54:35.872963Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:195:2229] requested range size#8796093022207 2025-05-07T08:54:35.873289Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:34:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.873399Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:34:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.877756Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 272678883688417 Reserved to# 281474976710624 2025-05-07T08:54:35.877836Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:195:2229] TEvAllocateResult from# 272678883688417 to# 281474976710624 expected SUCCESS 2025-05-07T08:54:35.878988Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:199:2233] requested range size#31 2025-05-07T08:54:35.879449Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:35:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.879560Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:35:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:54:35.879679Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 281474976710624 Reserved to# 281474976710655 2025-05-07T08:54:35.879724Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:199:2233] TEvAllocateResult from# 281474976710624 to# 281474976710655 expected SUCCESS 2025-05-07T08:54:35.880497Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:203:2237] requested range size#1 2025-05-07T08:54:35.880639Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 0 Reserved from# 281474976710655 Reserved to# 0 2025-05-07T08:54:35.880681Z node 1 :TX_ALLOCATOR ERROR: txallocator_impl.cpp:84: tablet# 72057594046447617 Send to Sender# [1:203:2237] TEvAllocateResult status# IMPOSIBLE expected IMPOSIBLE >> KqpPg::TableDeleteAllData+useSink [GOOD] >> KqpPg::TableDeleteAllData-useSink >> TargetDiscoverer::InvalidCredentials [GOOD] >> THiveTest::TestFollowers |90.2%| [TA] $(B)/ydb/core/tx/tx_allocator/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> DataStreams::ListStreamsValidation [GOOD] >> ResourcePoolsDdl::TestCreateResourcePool ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_base/unittest >> TSchemeShardTest::FindSubDomainPathIdActorAsync [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:47:21.089319Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:47:21.089429Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:21.089489Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:47:21.089534Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:47:21.089580Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:47:21.089610Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:47:21.089690Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:47:21.089791Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:47:21.090574Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:47:21.090959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:47:21.185296Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:47:21.185367Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:47:21.205550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:47:21.205711Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:47:21.205884Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:47:21.228563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:47:21.229306Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:47:21.230028Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:21.230365Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:47:21.238985Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:21.240581Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:21.240651Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:21.240718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:47:21.240768Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:21.240893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:47:21.241079Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:47:21.253657Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:47:21.430124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:47:21.430377Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:21.430600Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:47:21.430821Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:47:21.430915Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:21.438949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:21.439108Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:47:21.439332Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:21.439402Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:47:21.439463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:47:21.439500Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:47:21.445084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:21.445169Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:47:21.445223Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:47:21.450324Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:21.450414Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:47:21.450497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:21.450550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:47:21.454511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:47:21.456911Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:47:21.457085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:47:21.466965Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:47:21.468171Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:47:21.468463Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:21.471280Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:47:21.471899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:47:21.474495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:47:21.474815Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:47:21.477330Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:47:21.477383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:47:21.477590Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:47:21.479089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 025-05-07T08:54:24.687089Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: false 2025-05-07T08:54:24.687161Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:54:24.687236Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-05-07T08:54:24.687296Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 102:0 2025-05-07T08:54:24.687550Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-05-07T08:54:24.687630Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 102, publications: 2, subscribers: 0 2025-05-07T08:54:24.687705Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 2], 5 2025-05-07T08:54:24.687762Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-05-07T08:54:24.690129Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:54:24.690247Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:54:24.690308Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-05-07T08:54:24.690384Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-05-07T08:54:24.690473Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T08:54:24.692208Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:54:24.692317Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:54:24.692358Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-05-07T08:54:24.692400Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-05-07T08:54:24.692446Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-05-07T08:54:24.692558Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-05-07T08:54:24.700635Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-05-07T08:54:24.702860Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-05-07T08:54:24.708781Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-05-07T08:54:24.708894Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-05-07T08:54:24.709582Z node 15 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-05-07T08:54:24.709751Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T08:54:24.709819Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [15:517:2470] TestWaitNotification: OK eventTxId 102 2025-05-07T08:54:24.710689Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SubDomenA" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:54:24.711039Z node 15 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SubDomenA" took 398us result status StatusSuccess 2025-05-07T08:54:24.711639Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SubDomenA" PathDescription { Self { Name: "SubDomenA" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } } Children { Name: "Topic1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ChildrenExist: false BalancerTabletID: 72075186233409547 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 247 AccountSize: 247 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:54:24.712437Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SubDomenA/Topic1" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-05-07T08:54:24.712729Z node 15 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SubDomenA/Topic1" took 320us result status StatusSuccess 2025-05-07T08:54:24.713510Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SubDomenA/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409547 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 13 WriteSpeedInBytesPerSecond: 19 } YdbDatabasePath: "/MyRoot" MeteringMode: METERING_MODE_RESERVED_CAPACITY } Partitions { PartitionId: 0 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409547 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 247 AccountSize: 247 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:54:25.284634Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 3 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:54:25.285046Z node 15 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72057594046678944 describe pathId 3 took 426us result status StatusSuccess 2025-05-07T08:54:25.288452Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SubDomenA/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409547 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 13 WriteSpeedInBytesPerSecond: 19 } YdbDatabasePath: "/MyRoot" MeteringMode: METERING_MODE_RESERVED_CAPACITY } Partitions { PartitionId: 0 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409547 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 247 AccountSize: 247 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:54:25.555169Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__find_subdomain_path_id.cpp:20: FindTabletSubDomainPathId for tablet 72075186233409546 >> KqpWorkloadService::TestQueueSizeSimple >> KqpWorkloadServiceTables::TestTablesIsNotCreatingForUnlimitedPool ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_target_discoverer/unittest >> TargetDiscoverer::InvalidCredentials [GOOD] Test command err: 2025-05-07T08:54:31.853957Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624548123238542:2198];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:31.854725Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00456c/r3tmp/tmpkTuL62/pdisk_1.dat 2025-05-07T08:54:32.643291Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:32.660182Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:32.660284Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:32.672359Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:6657 TServer::EnableGrpc on GrpcPort 21490, node 1 2025-05-07T08:54:33.365103Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:33.365151Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:33.365168Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:33.365302Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6657 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:33.855904Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:33.880022Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:54:33.885105Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:34.716528Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:27: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribePathResponse { Result: { status: CLIENT_UNAUTHENTICATED, issues: {
: Error: Can't get Authentication info from CredentialsProvider. ydb/public/sdk/cpp/src/client/types/credentials/login/login.cpp:217: Cannot find user: user } } } 2025-05-07T08:54:34.716576Z node 1 :REPLICATION_CONTROLLER ERROR: target_discoverer.cpp:78: [TargetDiscoverer][rid 1] Describe path failed: path# /Root, status# CLIENT_UNAUTHENTICATED, issues# {
: Error: Can't get Authentication info from CredentialsProvider. ydb/public/sdk/cpp/src/client/types/credentials/login/login.cpp:217: Cannot find user: user } >> ResourcePoolClassifiersDdl::TestResourcePoolClassifiersPermissions >> DataShardSnapshots::DelayedWriteReadableAfterSplitAndReboot [GOOD] >> DataShardSnapshots::BrokenLockChangesDontLeak >> KqpWorkloadService::TestQueryCancelAfterPoolWithLimits >> KqpWorkloadServiceDistributed::TestDistributedQueue >> THiveTest::TestFollowers [GOOD] >> THiveTest::TestFollowersReconfiguration >> THiveTest::TestHiveBalancerWithPrefferedDC1 [GOOD] >> THiveTest::TestHiveBalancerWithPrefferedDC2 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/datastreams/ut/unittest >> DataStreams::ListStreamsValidation [GOOD] Test command err: 2025-05-07T08:54:11.609722Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624461529369950:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:11.609795Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002818/r3tmp/tmplN3u7D/pdisk_1.dat 2025-05-07T08:54:12.202576Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:12.202754Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:12.207707Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:54:12.239975Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10529, node 1 2025-05-07T08:54:12.362621Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:12.362639Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:12.362662Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:12.362744Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18360 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:12.917994Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:13.025375Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:18360 2025-05-07T08:54:13.228601Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:13.243331Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480 2025-05-07T08:54:15.791002Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 2025-05-07T08:54:16.024421Z node 1 :CHANGE_EXCHANGE WARN: change_sender_cdc_stream.cpp:398: [CdcChangeSenderMain][72075186224037890:1][1:7501624483004208096:2370] Failed entry at 'ResolveTopic': entry# { Path: TableId: [72057594046644480:6:0] RequestType: ByTableId Operation: OpTopic RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo } 2025-05-07T08:54:16.344664Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:54:16.610948Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501624461529369950:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:16.611019Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:54:16.649359Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropPersQueueGroup, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:54:16.712261Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037908 not found 2025-05-07T08:54:16.712284Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037905 not found 2025-05-07T08:54:16.712292Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037893 not found 2025-05-07T08:54:16.712302Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037902 not found 2025-05-07T08:54:16.712308Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037907 not found 2025-05-07T08:54:16.712316Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037896 not found 2025-05-07T08:54:16.712323Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037904 not found 2025-05-07T08:54:16.712329Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037903 not found 2025-05-07T08:54:16.712337Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037899 not found 2025-05-07T08:54:16.712343Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037901 not found 2025-05-07T08:54:16.712351Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037898 not found 2025-05-07T08:54:16.712361Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037906 not found 2025-05-07T08:54:16.712376Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037894 not found 2025-05-07T08:54:16.712387Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037900 not found 2025-05-07T08:54:16.712398Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037897 not found 2025-05-07T08:54:16.712413Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037895 not found 2025-05-07T08:54:16.733059Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,19) wasn't found 2025-05-07T08:54:16.733133Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,7) wasn't found 2025-05-07T08:54:16.733164Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,13) wasn't found 2025-05-07T08:54:16.733215Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,10) wasn't found 2025-05-07T08:54:16.733249Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,16) wasn't found 2025-05-07T08:54:16.733275Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,21) wasn't found 2025-05-07T08:54:16.733314Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,18) wasn't found 2025-05-07T08:54:16.733346Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,15) wasn't found 2025-05-07T08:54:16.733376Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,12) wasn't found 2025-05-07T08:54:16.733422Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,6) wasn't found 2025-05-07T08:54:16.733473Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,9) wasn't found 2025-05-07T08:54:16.733507Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,20) wasn't found 2025-05-07T08:54:16.733543Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,17) wasn't found 2025-05-07T08:54:16.733575Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,14) wasn't found 2025-05-07T08:54:16.733602Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,11) wasn't found 2025-05-07T08:54:16.733630Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,8) wasn't found 2025-05-07T08:54:18.650300Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501624490078901294:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:18.650355Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002818/r3tmp/tmpM9QDiI/pdisk_1.dat 2025-05-07T08:54:19.127066Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:19.174214Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:19.174301Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:19.183783Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21478, node 4 2025-05-07T08:54:19.358860Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:19.358885Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:19.358894Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:19.359045Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8040 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:19.743813Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:19.875594Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:8040 2025-05-07T08:54:20.171175Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:20.198175Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 2025-05-07T08:54:20.596611Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715661:0, at schemeshard: 72057594046644480 2025-05-07T08:54:20.779828Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-05-07T08:54:20.924217Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-05-07T08:54:24.643579Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7501624514821438419:2147];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:24.651030Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002818/r3tmp/tmpt03zLl/pdisk_1.dat 2025-05-07T08:54:24.962990Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:24.977943Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:24.978531Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:24.989182Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13722, node 7 2025-05-07T08:54:25.098791Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:25.098817Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:25.098827Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:25.098965Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18603 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:25.489166Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:25.571356Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:18603 2025-05-07T08:54:25.763743Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:26.063334Z node 7 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [7:7501624523411375109:3462] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/stream_TestCreateExistingStream\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 2], type: EPathTypePersQueueGroup, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:54:31.038829Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7501624545214076741:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:31.038921Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002818/r3tmp/tmp3erA18/pdisk_1.dat 2025-05-07T08:54:31.421707Z node 10 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:31.473954Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:31.474106Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:31.478595Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14671, node 10 2025-05-07T08:54:31.755762Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:31.755795Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:31.755805Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:31.755962Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6048 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:32.413940Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:32.602012Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:6048 2025-05-07T08:54:33.010130Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... >> KqpQueryPerf::RangeLimitRead-QueryService [GOOD] >> DataStreams::TestGetRecords1MBMessagesOneByOneBySeqNo [GOOD] >> KqpWorkloadService::WorkloadServiceDisabledByFeatureFlag >> KqpQueryPerf::IdxLookupJoinThreeWay+QueryService [GOOD] >> KqpQueryPerf::IdxLookupJoinThreeWay-QueryService >> BasicUsage::PreferredDatabaseNoFallback [GOOD] >> KqpWorkloadServiceSubscriptions::TestResourcePoolSubscriptionAfterDrop >> THiveTest::TestFollowersReconfiguration [GOOD] >> THiveTest::TestFollowerPromotion ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::RangeLimitRead-QueryService [GOOD] Test command err: Trying to start YDB, gRPC: 9264, MsgBus: 29848 2025-05-07T08:54:31.514692Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624546649058113:2069];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:31.515639Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0027d3/r3tmp/tmpnIJGU0/pdisk_1.dat 2025-05-07T08:54:32.084360Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:32.119448Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:32.119548Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 9264, node 1 2025-05-07T08:54:32.125046Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:54:32.212971Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:32.212996Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:32.213004Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:32.213119Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29848 TClient is connected to server localhost:29848 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:33.097457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:33.129043Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:54:33.156699Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:33.340728Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:33.581674Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:33.666372Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:35.926916Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624563828928946:2407], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:35.927060Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:36.295362Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:54:36.339300Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:54:36.386252Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:54:36.430085Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:54:36.502466Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:54:36.515139Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501624546649058113:2069];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:36.515211Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:54:36.559224Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:54:36.640543Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:54:36.726701Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624568123896912:2471], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:36.726780Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:36.727062Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624568123896917:2474], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:36.731083Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:54:36.743381Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624568123896919:2475], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:54:36.847982Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501624568123896970:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } |90.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/backup/impl/ut_local_partition_reader/ydb-core-backup-impl-ut_local_partition_reader |90.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/backup/impl/ut_local_partition_reader/ydb-core-backup-impl-ut_local_partition_reader |90.3%| [TA] {RESULT} $(B)/ydb/core/tx/tx_allocator/ut/test-results/unittest/{meta.json ... results_accumulator.log} |90.3%| [LD] {RESULT} $(B)/ydb/core/backup/impl/ut_local_partition_reader/ydb-core-backup-impl-ut_local_partition_reader ------- [TM] {asan, default-linux-x86_64, release} ydb/services/datastreams/ut/unittest >> DataStreams::TestGetRecords1MBMessagesOneByOneBySeqNo [GOOD] Test command err: 2025-05-07T08:54:07.038800Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624444168640796:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:07.038858Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00281d/r3tmp/tmpss6dtL/pdisk_1.dat 2025-05-07T08:54:08.174212Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:54:08.386178Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:08.395818Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:08.395921Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:08.417840Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 30057, node 1 2025-05-07T08:54:08.801476Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:08.801506Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:08.801515Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:08.801641Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6350 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:09.449438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:09.625774Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:6350 2025-05-07T08:54:09.899255Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:10.381605Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropPersQueueGroup, opId: 281474976710661:0, at schemeshard: 72057594046644480 2025-05-07T08:54:10.441043Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037889 not found 2025-05-07T08:54:10.441109Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037891 not found 2025-05-07T08:54:10.450368Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037890 not found 2025-05-07T08:54:10.450436Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-05-07T08:54:14.339451Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501624474217842734:2208];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:14.339785Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00281d/r3tmp/tmpL19ZHh/pdisk_1.dat 2025-05-07T08:54:14.674482Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:14.702674Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:14.702759Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:14.711910Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10962, node 4 2025-05-07T08:54:14.914831Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:14.914858Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:14.914865Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:14.914994Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8333 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:15.199464Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:15.268291Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:8333 2025-05-07T08:54:15.538856Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:15.568612Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480 2025-05-07T08:54:15.825022Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976710661:0, at schemeshard: 72057594046644480 2025-05-07T08:54:15.943857Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropPersQueueGroup, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:54:15.982535Z node 4 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 4, TabletId: 72075186224037889 not found 2025-05-07T08:54:15.984102Z node 4 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 4, TabletId: 72075186224037891 not found 2025-05-07T08:54:15.984123Z node 4 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 4, TabletId: 72075186224037890 not found 2025-05-07T08:54:15.984138Z node 4 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 4, TabletId: 72075186224037888 not found 2025-05-07T08:54:20.452584Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7501624499674571436:2141];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:20.452638Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00281d/r3tmp/tmpNxtpcL/pdisk_1.dat 2025-05-07T08:54:20.806968Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:20.849388Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:20.849479Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:20.859683Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6023, node 7 2025-05-07T08:54:21.018792Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:21.018817Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:21.018825Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:21.018969Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23010 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:21.519197Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:21.621516Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:23010 2025-05-07T08:54:21.927168Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:22.276025Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715661:0, at schemeshard: 72057594046644480 2025-05-07T08:54:22.378475Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-05-07T08:54:22.460385Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropPersQueueGroup, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-05-07T08:54:22.492800Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037889 not found 2025-05-07T08:54:22.495215Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037891 not found 2025-05-07T08:54:22.495267Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037888 not found 2025-05-07T08:54:22.495285Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037890 not found 2025-05-07T08:54:22.505513Z node 7 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,2) wasn't found 2025-05-07T08:54:22.505716Z node 7 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,4) wasn't found 2025-05-07T08:54:22.505780Z node 7 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,3) wasn't found 2025-05-07T08:54:22.505831Z node 7 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,1) wasn't found 2025-05-07T08:54:26.178331Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7501624523468678198:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:26.178419Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00281d/r3tmp/tmpU8TsAb/pdisk_1.dat 2025-05-07T08:54:26.508106Z node 10 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:26.549327Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:26.549596Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:26.554237Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27925, node 10 2025-05-07T08:54:26.677155Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:26.677180Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:26.677190Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:26.677349Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18347 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:27.049088Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:27.188152Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:18347 2025-05-07T08:54:27.462465Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:27.479099Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480 2025-05-07T08:54:31.178436Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7501624523468678198:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:31.178524Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpWorkloadServiceActors::TestPoolFetcher ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::PreferredDatabaseNoFallback [GOOD] Test command err: 2025-05-07T08:53:34.169367Z :GetAllStartPartitionSessions INFO: Random seed for debugging is 1746608014169329 2025-05-07T08:53:34.561072Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624299910264879:2067];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:34.561239Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:53:34.661728Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501624302350074179:2092];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:34.661779Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:53:34.900054Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-05-07T08:53:34.902627Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003f18/r3tmp/tmp7n3vjQ/pdisk_1.dat 2025-05-07T08:53:35.227022Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:53:35.227131Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:53:35.228375Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:53:35.228426Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:53:35.232910Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-05-07T08:53:35.233081Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:53:35.244839Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:53:35.251130Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5167, node 1 2025-05-07T08:53:35.550685Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/zvgn/003f18/r3tmp/yandexuSiGfx.tmp 2025-05-07T08:53:35.550721Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/zvgn/003f18/r3tmp/yandexuSiGfx.tmp 2025-05-07T08:53:35.550872Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/zvgn/003f18/r3tmp/yandexuSiGfx.tmp 2025-05-07T08:53:35.550996Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:53:35.669548Z INFO: TTestServer started on Port 6592 GrpcPort 5167 TClient is connected to server localhost:6592 PQClient connected to localhost:5167 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:53:36.157563Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... waiting... 2025-05-07T08:53:39.562348Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501624299910264879:2067];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:39.562425Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:53:39.670214Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7501624302350074179:2092];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:39.670308Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:53:40.256602Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624325680069719:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:40.256781Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:40.257848Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624325680069731:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:40.270201Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480 2025-05-07T08:53:40.372233Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2175} ProcessControllerEvent event processing took too much time Type# 2146435072 Duration# 0.100031s 2025-05-07T08:53:40.372283Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:666} StateWork event processing took too much time Type# 2146435078 Duration# 0.100134s 2025-05-07T08:53:40.503372Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624325680069733:2343], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-05-07T08:53:40.914068Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501624325680069828:2696] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:53:40.969805Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:53:41.037672Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7501624328119878271:2316], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T08:53:41.039113Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=2&id=ZGE4NmFlMmUtMzgyODM1ODgtZmJjZWEzYWUtYmFkNjZkOTg=, ActorId: [2:7501624328119878230:2310], ActorState: ExecuteState, TraceId: 01jtmz6pw11b2v1zw1z3pgwks6, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T08:53:41.041500Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T08:53:41.043335Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7501624325680069841:2352], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T08:53:41.045414Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=1&id=NDI5NjA2MTQtMzkzOTkyMjgtZGE2NmE5NDItMmYyNTViZQ==, ActorId: [1:7501624325680069717:2338], ActorState: ExecuteState, TraceId: 01jtmz6prwck6m5khxgpqkfmmj, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T08:53:41.045828Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T08:53:41.255980Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:53:41.500551Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost:5167", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, false, 1000); 2025-05-07T08:53:42.087824Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710666. Ctx: { TraceId: 01jtmz6r5wcy ... 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:183: TPartitionChooser [3:7501624445670608602:2494] (SourceId=src, PreferedPartition=(NULL)) HandleUpdate PartitionPersisted=0 Status=SUCCESS 2025-05-07T08:54:08.389170Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [3:7501624445670608602:2494] (SourceId=src, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=(NULL) 2025-05-07T08:54:08.389186Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:268: TPartitionChooser [3:7501624445670608602:2494] (SourceId=src, PreferedPartition=(NULL)) Start idle 2025-05-07T08:54:08.389213Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 1 sessionId: partition: 0 expectedGeneration: (NULL) 2025-05-07T08:54:08.394410Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72075186224037892] server connected, pipe [3:7501624445670608666:2494], now have 1 active actors on pipe 2025-05-07T08:54:08.396095Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:342: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-05-07T08:54:08.396138Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2788: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-05-07T08:54:08.396248Z node 4 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src|103ea4e2-c55a668c-9afd2d1b-21cdebd0_0 generated for partition 0 topic 'rt3.dc1--test-topic' owner src 2025-05-07T08:54:08.396359Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:35: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-05-07T08:54:08.396417Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:377: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-05-07T08:54:08.397265Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:342: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-05-07T08:54:08.397288Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2788: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-05-07T08:54:08.397351Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:377: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-05-07T08:54:08.394713Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:798: TPartitionWriter 72075186224037892 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037892, NodeId 4, Generation: 1 2025-05-07T08:54:08.397952Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 1 partition: 0 MaxSeqNo: 0 sessionId: src|103ea4e2-c55a668c-9afd2d1b-21cdebd0_0 2025-05-07T08:54:08.400234Z :INFO: [] MessageGroupId [src] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1746608048400 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-05-07T08:54:08.400352Z :INFO: [] MessageGroupId [src] SessionId [] Write session established. Init response: session_id: "src|103ea4e2-c55a668c-9afd2d1b-21cdebd0_0" topic: "test-topic" cluster: "dc1" supported_codecs: CODEC_RAW supported_codecs: CODEC_GZIP supported_codecs: CODEC_LZOP 2025-05-07T08:54:08.400508Z :INFO: [] MessageGroupId [src] SessionId [src|103ea4e2-c55a668c-9afd2d1b-21cdebd0_0] Write session: close. Timeout = 0 ms 2025-05-07T08:54:08.400536Z :INFO: [] MessageGroupId [src] SessionId [src|103ea4e2-c55a668c-9afd2d1b-21cdebd0_0] Write session will now close 2025-05-07T08:54:08.400569Z :DEBUG: [] MessageGroupId [src] SessionId [src|103ea4e2-c55a668c-9afd2d1b-21cdebd0_0] Write session: aborting 2025-05-07T08:54:08.400885Z :INFO: [] MessageGroupId [src] SessionId [src|103ea4e2-c55a668c-9afd2d1b-21cdebd0_0] Write session: gracefully shut down, all writes complete 2025-05-07T08:54:08.400916Z :DEBUG: [] MessageGroupId [src] SessionId [src|103ea4e2-c55a668c-9afd2d1b-21cdebd0_0] Write session: destroy 2025-05-07T08:54:08.409601Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: src|103ea4e2-c55a668c-9afd2d1b-21cdebd0_0 grpc read done: success: 0 data: 2025-05-07T08:54:08.409633Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 1 sessionId: src|103ea4e2-c55a668c-9afd2d1b-21cdebd0_0 grpc read failed 2025-05-07T08:54:08.409667Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 1 sessionId: src|103ea4e2-c55a668c-9afd2d1b-21cdebd0_0 grpc closed 2025-05-07T08:54:08.409682Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: src|103ea4e2-c55a668c-9afd2d1b-21cdebd0_0 is DEAD 2025-05-07T08:54:08.410819Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037892] server disconnected, pipe [3:7501624445670608666:2494] destroyed 2025-05-07T08:54:08.410861Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:138: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-05-07T08:54:08.410381Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison ====TYdbPqTestRetryPolicy() ====ExpectBreakDown === Session was created, waiting for retries >>> Ready to answer: ok ====CreateRetryState ====CreateRetryState Initialized Test retry state: get retry delay 2025-05-07T08:54:08.468429Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s 2025-05-07T08:54:10.217071Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7261: Cannot get console configs 2025-05-07T08:54:10.217111Z node 3 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded Test retry state: get retry delay 2025-05-07T08:54:10.471264Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s Test retry state: get retry delay 2025-05-07T08:54:12.472287Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s === In the next federation discovery response dc2 will be available Test retry state: get retry delay 2025-05-07T08:54:14.473329Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s Test retry state: get retry delay 2025-05-07T08:54:16.477756Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s Test retry state: get retry delay 2025-05-07T08:54:18.479260Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s Test retry state: get retry delay 2025-05-07T08:54:20.481445Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s Test retry state: get retry delay 2025-05-07T08:54:22.486112Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s Test retry state: get retry delay 2025-05-07T08:54:24.490155Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s Test retry state: get retry delay 2025-05-07T08:54:26.491208Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s Test retry state: get retry delay 2025-05-07T08:54:28.493679Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s Test retry state: get retry delay 2025-05-07T08:54:30.498458Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s Test retry state: get retry delay 2025-05-07T08:54:32.502336Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s Test retry state: get retry delay 2025-05-07T08:54:34.509068Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s 2025-05-07T08:54:36.395436Z node 4 :PERSQUEUE DEBUG: partition.cpp:855: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ LifetimeSeconds: 86400 LowWatermark: 8388608 SourceIdLifetimeSeconds: 86400 WriteSpeedInBytesPerSecond: 20000000 BurstSize: 20000000 TotalPartitions: 1 SourceIdMaxCounts: 6000000 } 2025-05-07T08:54:36.394271Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:157: [72075186224037893][rt3.dc1--test-topic] TPersQueueReadBalancer::HandleWakeup 2025-05-07T08:54:36.394352Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:548: [72075186224037893][rt3.dc1--test-topic] Send TEvPersQueue::TEvStatus TabletId: 72075186224037892 Cookie: 1 2025-05-07T08:54:36.396818Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:688: [72075186224037893][rt3.dc1--test-topic] Send TEvPeriodicTopicStats PathId: 13 Generation: 1 StatsReportRound: 1 DataSize: 0 UsedReserveSize: 0 2025-05-07T08:54:36.397389Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1823: [72075186224037893][rt3.dc1--test-topic] ProcessPendingStats. PendingUpdates size 1 Test retry state: get retry delay 2025-05-07T08:54:36.514151Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s === Waiting for repair >>> Ready to answer: ok 2025-05-07T08:54:38.518452Z :INFO: [/Root] [] [] Start federated write session to database 'dc2' (previous was ) FederationState: { Status: SUCCESS SelfLocation: "fancy_datacenter" DbInfos: [ { name: "dc1" path: "/Root" id: "account-dc1" endpoint: "localhost:64543" location: "dc1" status: AVAILABLE weight: 1000 } { name: "dc2" path: "/Root" id: "account-dc2" endpoint: "localhost:64543" location: "dc2" status: AVAILABLE weight: 500 } { name: "dc3" path: "/Root" id: "account-dc3" endpoint: "localhost:64543" location: "dc3" status: AVAILABLE weight: 500 } ] ControlPlaneEndpoint: cp.logbroker-federation:2135 } === Closing the session 2025-05-07T08:54:38.535959Z :DEBUG: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Write session: try to update token 2025-05-07T08:54:38.540071Z :INFO: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Start write session. Will connect to nodeId: 0 2025-05-07T08:54:38.545624Z :INFO: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Write session: close. Timeout 0.000000s 2025-05-07T08:54:38.547417Z :INFO: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Write session will now close 2025-05-07T08:54:38.547535Z :DEBUG: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Write session: aborting 2025-05-07T08:54:38.547944Z :INFO: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Write session: gracefully shut down, all writes complete 2025-05-07T08:54:38.548003Z :DEBUG: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Write session: destroy 2025-05-07T08:54:38.558195Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:107: new grpc connection 2025-05-07T08:54:38.558234Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:141: new session created cookie 2 2025-05-07T08:54:38.564109Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 2 sessionId: grpc closed 2025-05-07T08:54:38.564143Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 2 sessionId: is DEAD 2025-05-07T08:54:39.069684Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1073: TxId: 281474976715734, task: 1, CA Id [3:7501624578814596144:2753]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 0 2025-05-07T08:54:39.103735Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1073: TxId: 281474976715734, task: 1, CA Id [3:7501624578814596144:2753]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-05-07T08:54:39.159326Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1073: TxId: 281474976715734, task: 1, CA Id [3:7501624578814596144:2753]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-05-07T08:54:39.238969Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1073: TxId: 281474976715734, task: 1, CA Id [3:7501624578814596144:2753]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-05-07T08:54:39.320955Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1073: TxId: 281474976715734, task: 1, CA Id [3:7501624578814596144:2753]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 >> DataStreams::TestGetRecords1MBMessagesOneByOneByTS [GOOD] >> DataStreams::TestGetRecordsStreamWithMultipleShards |90.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/common/ut/ydb-core-fq-libs-common-ut |90.3%| [LD] {RESULT} $(B)/ydb/core/fq/libs/common/ut/ydb-core-fq-libs-common-ut |90.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/common/ut/ydb-core-fq-libs-common-ut >> ResourcePoolsDdl::TestPoolSwitchToUnlimitedState >> THiveTest::TestFollowerPromotion [GOOD] >> THiveTest::TestFollowerPromotionFollowerDies >> KqpQueryPerf::MultiRead+QueryService [GOOD] >> TKeyValueTest::TestGetStatusWorks [GOOD] |90.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/query/ydb-core-kqp-ut-query >> KqpWorkloadServiceTables::TestTablesIsNotCreatingForUnlimitedPool [GOOD] >> ResourcePoolClassifiersDdl::TestCreateResourcePoolClassifier |90.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/query/ydb-core-kqp-ut-query |90.3%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/query/ydb-core-kqp-ut-query >> BuildStatsHistogram::Three_Mixed_Small_2_Levels_3_Buckets [GOOD] >> BuildStatsHistogram::Three_Serial_Small_2_Levels >> KqpQueryPerf::Upsert-QueryService+UseSink [GOOD] >> BuildStatsHistogram::Three_Serial_Small_2_Levels [GOOD] >> BuildStatsHistogram::Three_Serial_Small_2_Levels_3_Buckets |90.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_data_erasure/ydb-core-tx-schemeshard-ut_data_erasure ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestGetStatusWorks [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:54:2057] recipient: [1:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:54:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:56:2097] sender: [1:57:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:56:2097] sender: [1:74:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:54:2057] recipient: [2:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:54:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:57:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:74:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:76:2057] recipient: [2:36:2083] Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:79:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:56:2097] sender: [2:80:2057] recipient: [2:78:2110] Leader for TabletID 72057594037927937 is [2:81:2111] sender: [2:82:2057] recipient: [2:78:2110] !Reboot 72057594037927937 (actor [2:56:2097]) rebooted! !Reboot 72057594037927937 (actor [2:56:2097]) tablet resolver refreshed! new actor is[2:81:2111] Leader for TabletID 72057594037927937 is [2:81:2111] sender: [2:135:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:54:2057] recipient: [3:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:54:2057] recipient: [3:52:2095] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:57:2057] recipient: [3:52:2095] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:74:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:56:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:76:2057] recipient: [3:36:2083] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:78:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:56:2097] sender: [3:80:2057] recipient: [3:79:2110] Leader for TabletID 72057594037927937 is [3:81:2111] sender: [3:82:2057] recipient: [3:79:2110] !Reboot 72057594037927937 (actor [3:56:2097]) rebooted! !Reboot 72057594037927937 (actor [3:56:2097]) tablet resolver refreshed! new actor is[3:81:2111] Leader for TabletID 72057594037927937 is [3:81:2111] sender: [3:135:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:54:2057] recipient: [4:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:54:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:57:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:74:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:77:2057] recipient: [4:36:2083] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:79:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:56:2097] sender: [4:81:2057] recipient: [4:80:2110] Leader for TabletID 72057594037927937 is [4:82:2111] sender: [4:83:2057] recipient: [4:80:2110] !Reboot 72057594037927937 (actor [4:56:2097]) rebooted! !Reboot 72057594037927937 (actor [4:56:2097]) tablet resolver refreshed! new actor is[4:82:2111] Leader for TabletID 72057594037927937 is [4:82:2111] sender: [4:136:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:54:2057] recipient: [5:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:54:2057] recipient: [5:50:2095] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:57:2057] recipient: [5:50:2095] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:74:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:80:2057] recipient: [5:36:2083] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:83:2057] recipient: [5:82:2113] Leader for TabletID 72057594037927937 is [5:56:2097] sender: [5:84:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:85:2114] sender: [5:86:2057] recipient: [5:82:2113] !Reboot 72057594037927937 (actor [5:56:2097]) rebooted! !Reboot 72057594037927937 (actor [5:56:2097]) tablet resolver refreshed! new actor is[5:85:2114] Leader for TabletID 72057594037927937 is [5:85:2114] sender: [5:139:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:54:2057] recipient: [6:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:54:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:57:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:74:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:56:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:80:2057] recipient: [6:36:2083] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:83:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:56:2097] sender: [6:84:2057] recipient: [6:82:2113] Leader for TabletID 72057594037927937 is [6:85:2114] sender: [6:86:2057] recipient: [6:82:2113] !Reboot 72057594037927937 (actor [6:56:2097]) rebooted! !Reboot 72057594037927937 (actor [6:56:2097]) tablet resolver refreshed! new actor is[6:85:2114] Leader for TabletID 72057594037927937 is [6:85:2114] sender: [6:139:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:54:2057] recipient: [7:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:54:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:57:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:74:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:81:2057] recipient: [7:36:2083] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:84:2057] recipient: [7:83:2113] Leader for TabletID 72057594037927937 is [7:56:2097] sender: [7:85:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:86:2114] sender: [7:87:2057] recipient: [7:83:2113] !Reboot 72057594037927937 (actor [7:56:2097]) rebooted! !Reboot 72057594037927937 (actor [7:56:2097]) tablet resolver refreshed! new actor is[7:86:2114] Leader for TabletID 72057594037927937 is [7:86:2114] sender: [7:140:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:54:2057] recipient: [8:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:54:2057] recipient: [8:50:2095] Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:57:2057] recipient: [8:50:2095] Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:74:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:83:2057] recipient: [8:36:2083] Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:86:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:56:2097] sender: [8:87:2057] recipient: [8:85:2115] Leader for TabletID 72057594037927937 is [8:88:2116] sender: [8:89:2057] recipient: [8:85:2115] !Reboot 72057594037927937 (actor [8:56:2097]) rebooted! !Reboot 72057594037927937 (actor [8:56:2097]) tablet resolver refreshed! new actor is[8:88:2116] Leader for TabletID 72057594037927937 is [8:88:2116] sender: [8:142:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:54:2057] recipient: [9:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:54:2057] recipient: [9:52:2095] Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:57:2057] recipient: [9:52:2095] Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:74:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:56:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:83:2057] recipient: [9:36:2083] Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:86:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:56:2097] sender: [9:87:2057] recipient: [9:85:2115] Leader for TabletID 72057594037927937 is [9:88:2116] sender: [9:89:2057] recipient: [9:85:2115] !Reboot 72057594037927937 (actor [9:56:2097]) rebooted! !Reboot 72057594037927937 (actor [9:56:2097]) tablet resolver refreshed! new actor is[9:88:2116] Leader for TabletID 72057594037927937 is [9:88:2116] sender: [9:142:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:54:2057] recipient: [10:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:54:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:57:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:74:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:84:2057] recipient: [10:36:2083] Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:87:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:56:2097] sender: [10:88:2057] recipient: [10:86:2115] Leader for TabletID 72057594037927937 is [10:89:2116] sender: [10:90:2057] recipient: [10:86:2115] !Reboot 72057594037927937 (actor [10:56:2097]) rebooted! !Reboot 72057594037927937 (actor [10:56:2097]) tablet resolver refreshed! new actor is[10:89:2116] Leader for TabletID 72057594037927937 is [10:89:2116] sender: [10:143:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:54:2057] recipient: [11:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:54:2057] recipient: [11:51:2095] Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:57:2057] recipient: [11:51:2095] Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:74:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:86:2057] recipient: [11:36:2083] Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:88:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:56:2097] sender: [11:90:2057] recipient: [11:89:2117] Leader for TabletID 72057594037927937 is [11:91:2118] sender: [11:92:2057] recipient: [11:89:2117] !Reboot 72057594037927937 (actor [11:56:2097]) rebooted! !Reboot 72057594037927937 (actor [11:56:2097]) tablet resolver refreshed! new actor is[11:91:2118] Leader for TabletID 72057594037927937 is [11:91:2118] sender: [11:145:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:54:2057] recipient: [12:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:54:2057] recipient: [12:52:2095] Leader for TabletID 72057594037927937 is [12:56:2097] sender: [12:57:2057] recipient: [12:52:2095] Leader for TabletID 72057594037927937 is [12:56:2097] sender: [12:74:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (acto ... or TabletID 72057594037927937 is [29:56:2097] sender: [29:88:2057] recipient: [29:14:2061] Leader for TabletID 72057594037927937 is [29:56:2097] sender: [29:90:2057] recipient: [29:89:2117] Leader for TabletID 72057594037927937 is [29:91:2118] sender: [29:92:2057] recipient: [29:89:2117] !Reboot 72057594037927937 (actor [29:56:2097]) rebooted! !Reboot 72057594037927937 (actor [29:56:2097]) tablet resolver refreshed! new actor is[29:91:2118] Leader for TabletID 72057594037927937 is [29:91:2118] sender: [29:145:2057] recipient: [29:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [30:54:2057] recipient: [30:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [30:54:2057] recipient: [30:51:2095] Leader for TabletID 72057594037927937 is [30:56:2097] sender: [30:57:2057] recipient: [30:51:2095] Leader for TabletID 72057594037927937 is [30:56:2097] sender: [30:74:2057] recipient: [30:14:2061] !Reboot 72057594037927937 (actor [30:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [30:56:2097] sender: [30:87:2057] recipient: [30:36:2083] Leader for TabletID 72057594037927937 is [30:56:2097] sender: [30:90:2057] recipient: [30:14:2061] Leader for TabletID 72057594037927937 is [30:56:2097] sender: [30:91:2057] recipient: [30:89:2117] Leader for TabletID 72057594037927937 is [30:92:2118] sender: [30:93:2057] recipient: [30:89:2117] !Reboot 72057594037927937 (actor [30:56:2097]) rebooted! !Reboot 72057594037927937 (actor [30:56:2097]) tablet resolver refreshed! new actor is[30:92:2118] Leader for TabletID 72057594037927937 is [30:92:2118] sender: [30:146:2057] recipient: [30:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [31:54:2057] recipient: [31:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [31:54:2057] recipient: [31:51:2095] Leader for TabletID 72057594037927937 is [31:56:2097] sender: [31:57:2057] recipient: [31:51:2095] Leader for TabletID 72057594037927937 is [31:56:2097] sender: [31:74:2057] recipient: [31:14:2061] !Reboot 72057594037927937 (actor [31:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [31:56:2097] sender: [31:90:2057] recipient: [31:36:2083] Leader for TabletID 72057594037927937 is [31:56:2097] sender: [31:92:2057] recipient: [31:14:2061] Leader for TabletID 72057594037927937 is [31:56:2097] sender: [31:94:2057] recipient: [31:93:2120] Leader for TabletID 72057594037927937 is [31:95:2121] sender: [31:96:2057] recipient: [31:93:2120] !Reboot 72057594037927937 (actor [31:56:2097]) rebooted! !Reboot 72057594037927937 (actor [31:56:2097]) tablet resolver refreshed! new actor is[31:95:2121] Leader for TabletID 72057594037927937 is [31:95:2121] sender: [31:149:2057] recipient: [31:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [32:54:2057] recipient: [32:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [32:54:2057] recipient: [32:52:2095] Leader for TabletID 72057594037927937 is [32:56:2097] sender: [32:57:2057] recipient: [32:52:2095] Leader for TabletID 72057594037927937 is [32:56:2097] sender: [32:74:2057] recipient: [32:14:2061] !Reboot 72057594037927937 (actor [32:56:2097]) on event NKikimr::TEvKeyValue::TEvRead ! Leader for TabletID 72057594037927937 is [32:56:2097] sender: [32:90:2057] recipient: [32:36:2083] Leader for TabletID 72057594037927937 is [32:56:2097] sender: [32:93:2057] recipient: [32:14:2061] Leader for TabletID 72057594037927937 is [32:56:2097] sender: [32:94:2057] recipient: [32:92:2120] Leader for TabletID 72057594037927937 is [32:95:2121] sender: [32:96:2057] recipient: [32:92:2120] !Reboot 72057594037927937 (actor [32:56:2097]) rebooted! !Reboot 72057594037927937 (actor [32:56:2097]) tablet resolver refreshed! new actor is[32:95:2121] Leader for TabletID 72057594037927937 is [32:95:2121] sender: [32:149:2057] recipient: [32:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [33:54:2057] recipient: [33:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [33:54:2057] recipient: [33:51:2095] Leader for TabletID 72057594037927937 is [33:56:2097] sender: [33:57:2057] recipient: [33:51:2095] Leader for TabletID 72057594037927937 is [33:56:2097] sender: [33:74:2057] recipient: [33:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [34:54:2057] recipient: [34:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [34:54:2057] recipient: [34:50:2095] Leader for TabletID 72057594037927937 is [34:56:2097] sender: [34:57:2057] recipient: [34:50:2095] Leader for TabletID 72057594037927937 is [34:56:2097] sender: [34:74:2057] recipient: [34:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [35:54:2057] recipient: [35:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [35:54:2057] recipient: [35:51:2095] Leader for TabletID 72057594037927937 is [35:56:2097] sender: [35:57:2057] recipient: [35:51:2095] Leader for TabletID 72057594037927937 is [35:56:2097] sender: [35:74:2057] recipient: [35:14:2061] !Reboot 72057594037927937 (actor [35:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [35:56:2097] sender: [35:76:2057] recipient: [35:36:2083] Leader for TabletID 72057594037927937 is [35:56:2097] sender: [35:79:2057] recipient: [35:78:2110] Leader for TabletID 72057594037927937 is [35:56:2097] sender: [35:80:2057] recipient: [35:14:2061] Leader for TabletID 72057594037927937 is [35:81:2111] sender: [35:82:2057] recipient: [35:78:2110] !Reboot 72057594037927937 (actor [35:56:2097]) rebooted! !Reboot 72057594037927937 (actor [35:56:2097]) tablet resolver refreshed! new actor is[35:81:2111] Leader for TabletID 72057594037927937 is [35:81:2111] sender: [35:135:2057] recipient: [35:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [36:54:2057] recipient: [36:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [36:54:2057] recipient: [36:51:2095] Leader for TabletID 72057594037927937 is [36:56:2097] sender: [36:57:2057] recipient: [36:51:2095] Leader for TabletID 72057594037927937 is [36:56:2097] sender: [36:74:2057] recipient: [36:14:2061] !Reboot 72057594037927937 (actor [36:56:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [36:56:2097] sender: [36:76:2057] recipient: [36:36:2083] Leader for TabletID 72057594037927937 is [36:56:2097] sender: [36:78:2057] recipient: [36:14:2061] Leader for TabletID 72057594037927937 is [36:56:2097] sender: [36:80:2057] recipient: [36:79:2110] Leader for TabletID 72057594037927937 is [36:81:2111] sender: [36:82:2057] recipient: [36:79:2110] !Reboot 72057594037927937 (actor [36:56:2097]) rebooted! !Reboot 72057594037927937 (actor [36:56:2097]) tablet resolver refreshed! new actor is[36:81:2111] Leader for TabletID 72057594037927937 is [36:81:2111] sender: [36:135:2057] recipient: [36:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [37:54:2057] recipient: [37:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [37:54:2057] recipient: [37:50:2095] Leader for TabletID 72057594037927937 is [37:56:2097] sender: [37:57:2057] recipient: [37:50:2095] Leader for TabletID 72057594037927937 is [37:56:2097] sender: [37:74:2057] recipient: [37:14:2061] !Reboot 72057594037927937 (actor [37:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [37:56:2097] sender: [37:77:2057] recipient: [37:36:2083] Leader for TabletID 72057594037927937 is [37:56:2097] sender: [37:80:2057] recipient: [37:14:2061] Leader for TabletID 72057594037927937 is [37:56:2097] sender: [37:81:2057] recipient: [37:79:2110] Leader for TabletID 72057594037927937 is [37:82:2111] sender: [37:83:2057] recipient: [37:79:2110] !Reboot 72057594037927937 (actor [37:56:2097]) rebooted! !Reboot 72057594037927937 (actor [37:56:2097]) tablet resolver refreshed! new actor is[37:82:2111] Leader for TabletID 72057594037927937 is [37:82:2111] sender: [37:136:2057] recipient: [37:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [38:54:2057] recipient: [38:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [38:54:2057] recipient: [38:51:2095] Leader for TabletID 72057594037927937 is [38:56:2097] sender: [38:57:2057] recipient: [38:51:2095] Leader for TabletID 72057594037927937 is [38:56:2097] sender: [38:74:2057] recipient: [38:14:2061] !Reboot 72057594037927937 (actor [38:56:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [38:56:2097] sender: [38:79:2057] recipient: [38:36:2083] Leader for TabletID 72057594037927937 is [38:56:2097] sender: [38:82:2057] recipient: [38:81:2112] Leader for TabletID 72057594037927937 is [38:56:2097] sender: [38:83:2057] recipient: [38:14:2061] Leader for TabletID 72057594037927937 is [38:84:2113] sender: [38:85:2057] recipient: [38:81:2112] !Reboot 72057594037927937 (actor [38:56:2097]) rebooted! !Reboot 72057594037927937 (actor [38:56:2097]) tablet resolver refreshed! new actor is[38:84:2113] Leader for TabletID 72057594037927937 is [38:84:2113] sender: [38:138:2057] recipient: [38:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [39:54:2057] recipient: [39:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [39:54:2057] recipient: [39:51:2095] Leader for TabletID 72057594037927937 is [39:56:2097] sender: [39:57:2057] recipient: [39:51:2095] Leader for TabletID 72057594037927937 is [39:56:2097] sender: [39:74:2057] recipient: [39:14:2061] !Reboot 72057594037927937 (actor [39:56:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [39:56:2097] sender: [39:79:2057] recipient: [39:36:2083] Leader for TabletID 72057594037927937 is [39:56:2097] sender: [39:81:2057] recipient: [39:14:2061] Leader for TabletID 72057594037927937 is [39:56:2097] sender: [39:83:2057] recipient: [39:82:2112] Leader for TabletID 72057594037927937 is [39:84:2113] sender: [39:85:2057] recipient: [39:82:2112] !Reboot 72057594037927937 (actor [39:56:2097]) rebooted! !Reboot 72057594037927937 (actor [39:56:2097]) tablet resolver refreshed! new actor is[39:84:2113] Leader for TabletID 72057594037927937 is [39:84:2113] sender: [39:138:2057] recipient: [39:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [40:54:2057] recipient: [40:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [40:54:2057] recipient: [40:51:2095] Leader for TabletID 72057594037927937 is [40:56:2097] sender: [40:57:2057] recipient: [40:51:2095] Leader for TabletID 72057594037927937 is [40:56:2097] sender: [40:74:2057] recipient: [40:14:2061] !Reboot 72057594037927937 (actor [40:56:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [40:56:2097] sender: [40:80:2057] recipient: [40:36:2083] Leader for TabletID 72057594037927937 is [40:56:2097] sender: [40:83:2057] recipient: [40:14:2061] Leader for TabletID 72057594037927937 is [40:56:2097] sender: [40:84:2057] recipient: [40:82:2112] Leader for TabletID 72057594037927937 is [40:85:2113] sender: [40:86:2057] recipient: [40:82:2112] !Reboot 72057594037927937 (actor [40:56:2097]) rebooted! !Reboot 72057594037927937 (actor [40:56:2097]) tablet resolver refreshed! new actor is[40:85:2113] Leader for TabletID 72057594037927937 is [40:85:2113] sender: [40:139:2057] recipient: [40:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [41:54:2057] recipient: [41:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [41:54:2057] recipient: [41:52:2095] Leader for TabletID 72057594037927937 is [41:56:2097] sender: [41:57:2057] recipient: [41:52:2095] Leader for TabletID 72057594037927937 is [41:56:2097] sender: [41:74:2057] recipient: [41:14:2061] |90.3%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_data_erasure/ydb-core-tx-schemeshard-ut_data_erasure |90.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_data_erasure/ydb-core-tx-schemeshard-ut_data_erasure >> BuildStatsHistogram::Three_Serial_Small_2_Levels_3_Buckets [GOOD] >> BuildStatsHistogram::Three_Serial_Small_1_Level >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-64 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-65 >> TPart::State [GOOD] >> TPart::Trivials [GOOD] >> TPart::Basics [GOOD] >> TPart::CellDefaults [GOOD] >> TPart::Matter [GOOD] >> TPart::External [GOOD] >> TPart::Outer [GOOD] >> TPart::MassCheck |90.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/ut/ydb-core-sys_view-ut |90.3%| [LD] {RESULT} $(B)/ydb/core/sys_view/ut/ydb-core-sys_view-ut |90.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/sys_view/ut/ydb-core-sys_view-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::MultiRead+QueryService [GOOD] Test command err: Trying to start YDB, gRPC: 18457, MsgBus: 1147 2025-05-07T08:54:36.434068Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624565967061707:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:36.434195Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0027c4/r3tmp/tmpNtlR7h/pdisk_1.dat 2025-05-07T08:54:36.955552Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:36.968103Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:36.968213Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:36.972879Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18457, node 1 2025-05-07T08:54:37.114651Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:37.114679Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:37.114686Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:37.114797Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1147 TClient is connected to server localhost:1147 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:37.767004Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:37.796960Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:54:37.817248Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:54:37.997116Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 2025-05-07T08:54:38.173624Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:38.260918Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:40.052656Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624583146932558:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:40.052802Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:40.512316Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:54:40.566148Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:54:40.640530Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:54:40.672214Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:54:40.702046Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:54:40.785089Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:54:40.877146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:54:40.984057Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624583146933227:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:40.984127Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:40.984315Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624583146933232:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:40.988139Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:54:40.999649Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624583146933234:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:54:41.091958Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501624587441900583:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:54:41.436094Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501624565967061707:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:41.436189Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> BuildStatsHistogram::Three_Serial_Small_1_Level [GOOD] >> BuildStatsHistogram::Three_Serial_Small_0_Levels >> BuildStatsHistogram::Three_Serial_Small_0_Levels [GOOD] >> BuildStatsMixedIndex::Single >> DataStreams::TestReservedConsumersMetering [GOOD] >> TPart::MassCheck [GOOD] >> TPart::WreckPart >> KqpQueryPerf::Delete+QueryService-UseSink [GOOD] >> KqpQueryPerf::Delete+QueryService+UseSink >> BuildStatsMixedIndex::Single [GOOD] >> BuildStatsMixedIndex::Single_Slices >> BuildStatsMixedIndex::Single_Slices [GOOD] >> BuildStatsMixedIndex::Single_History >> THiveTest::TestFollowerPromotionFollowerDies [GOOD] >> THiveTest::TestHiveBalancer >> THiveTest::TestHiveBalancerWithPrefferedDC2 [GOOD] >> THiveTest::TestHiveBalancerWithPreferredDC3 >> ResourcePoolsDdl::TestCreateResourcePool [GOOD] >> ResourcePoolsDdl::TestCreateResourcePoolOnServerless >> KqpWorkloadServiceSubscriptions::TestResourcePoolSubscriptionAfterDrop [GOOD] >> KqpWorkloadServiceTables::TestCreateWorkloadSerivceTables ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::Upsert-QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 9237, MsgBus: 9854 2025-05-07T08:54:36.794642Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624565536737585:2069];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:36.811115Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0027bd/r3tmp/tmprSC0XS/pdisk_1.dat 2025-05-07T08:54:37.552427Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:37.562017Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:37.562152Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:37.564552Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9237, node 1 2025-05-07T08:54:37.654182Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:37.654214Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:37.654228Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:37.654340Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9854 TClient is connected to server localhost:9854 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:38.509651Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:38.571345Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:38.803857Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:39.011224Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:39.176809Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:41.300080Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624587011575717:2408], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:41.300196Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:41.704987Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:54:41.767038Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:54:41.804408Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501624565536737585:2069];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:41.804522Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:54:41.831202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:54:41.901121Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:54:41.975806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:54:42.052194Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:54:42.099256Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:54:42.173732Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624591306543671:2472], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:42.173834Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:42.174253Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624591306543676:2475], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:42.181665Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:54:42.193890Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624591306543678:2476], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:54:42.298631Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501624591306543732:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> BuildStatsMixedIndex::Single_History [GOOD] >> BuildStatsMixedIndex::Single_History_Slices >> KqpWorkloadService::WorkloadServiceDisabledByFeatureFlag [GOOD] >> KqpWorkloadService::WorkloadServiceDisabledByFeatureFlagOnServerless >> YdbIndexTable::MultiShardTableUniqAndNonUniqIndex [GOOD] >> YdbIndexTable::MultiShardTableTwoIndexes ------- [TM] {asan, default-linux-x86_64, release} ydb/services/datastreams/ut/unittest >> DataStreams::TestReservedConsumersMetering [GOOD] Test command err: 2025-05-07T08:54:13.015325Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624469522175569:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:13.015401Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002810/r3tmp/tmpqrHJ50/pdisk_1.dat 2025-05-07T08:54:14.002235Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:14.028925Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:14.029011Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:14.030154Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:54:14.035749Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25850, node 1 2025-05-07T08:54:14.374799Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:14.374823Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:14.374829Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:14.374946Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8695 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:14.900963Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:15.055879Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:8695 2025-05-07T08:54:15.424774Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:15.454529Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480 2025-05-07T08:54:16.106481Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976710661:0, at schemeshard: 72057594046644480 encryption_type: NONE records { sequence_number: "0" shard_id: "shard-000001" } records { sequence_number: "0" shard_id: "shard-000009" } records { sequence_number: "0" shard_id: "shard-000004" } records { sequence_number: "0" shard_id: "shard-000005" } records { sequence_number: "0" shard_id: "shard-000008" } records { sequence_number: "1" shard_id: "shard-000004" } records { sequence_number: "2" shard_id: "shard-000004" } records { sequence_number: "1" shard_id: "shard-000005" } records { sequence_number: "1" shard_id: "shard-000001" } records { sequence_number: "1" shard_id: "shard-000009" } records { sequence_number: "0" shard_id: "shard-000006" } records { sequence_number: "2" shard_id: "shard-000001" } records { sequence_number: "0" shard_id: "shard-000007" } records { sequence_number: "1" shard_id: "shard-000007" } records { sequence_number: "0" shard_id: "shard-000000" } records { sequence_number: "2" shard_id: "shard-000007" } records { sequence_number: "3" shard_id: "shard-000004" } records { sequence_number: "2" shard_id: "shard-000005" } records { sequence_number: "0" shard_id: "shard-000003" } records { sequence_number: "2" shard_id: "shard-000009" } records { sequence_number: "1" shard_id: "shard-000008" } records { sequence_number: "1" shard_id: "shard-000000" } records { sequence_number: "1" shard_id: "shard-000006" } records { sequence_number: "2" shard_id: "shard-000000" } records { sequence_number: "3" shard_id: "shard-000009" } records { sequence_number: "3" shard_id: "shard-000001" } records { sequence_number: "4" shard_id: "shard-000009" } records { sequence_number: "4" shard_id: "shard-000004" } records { sequence_number: "3" shard_id: "shard-000000" } records { sequence_number: "4" shard_id: "shard-000001" } encryption_type: NONE records { sequence_number: "5" shard_id: "shard-000001" } records { sequence_number: "5" shard_id: "shard-000009" } records { sequence_number: "5" shard_id: "shard-000004" } records { sequence_number: "3" shard_id: "shard-000005" } records { sequence_number: "2" shard_id: "shard-000008" } records { sequence_number: "6" shard_id: "shard-000004" } records { sequence_number: "7" shard_id: "shard-000004" } records { sequence_number: "4" shard_id: "shard-000005" } records { sequence_number: "6" shard_id: "shard-000001" } records { sequence_number: "6" shard_id: "shard-000009" } records { sequence_number: "2" shard_id: "shard-000006" } records { sequence_number: "7" shard_id: "shard-000001" } records { sequence_number: "3" shard_id: "shard-000007" } records { sequence_number: "4" shard_id: "shard-000007" } records { sequence_number: "4" shard_id: "shard-000000" } records { sequence_number: "5" shard_id: "shard-000007" } records { sequence_number: "8" shard_id: "shard-000004" } records { sequence_number: "5" shard_id: "shard-000005" } records { sequence_number: "1" shard_id: "shard-000003" } records { sequence_number: "7" shard_id: "shard-000009" } records { sequence_number: "3" shard_id: "shard-000008" } records { sequence_number: "5" shard_id: "shard-000000" } records { sequence_number: "3" shard_id: "shard-000006" } records { sequence_number: "6" shard_id: "shard-000000" } records { sequence_number: "8" shard_id: "shard-000009" } records { sequence_number: "8" shard_id: "shard-000001" } records { sequence_number: "9" shard_id: "shard-000009" } records { sequence_number: "9" shard_id: "shard-000004" } records { sequence_number: "7" shard_id: "shard-000000" } records { sequence_number: "9" shard_id: "shard-000001" } 2025-05-07T08:54:18.017543Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501624469522175569:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:18.017641Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; encryption_type: NONE records { sequence_number: "10" shard_id: "shard-000001" } records { sequence_number: "10" shard_id: "shard-000009" } records { sequence_number: "10" shard_id: "shard-000004" } records { sequence_number: "6" shard_id: "shard-000005" } records { sequence_number: "4" shard_id: "shard-000008" } records { sequence_number: "11" shard_id: "shard-000004" } records { sequence_number: "12" shard_id: "shard-000004" } records { sequence_number: "7" shard_id: "shard-000005" } records { sequence_number: "11" shard_id: "shard-000001" } records { sequence_number: "11" shard_id: "shard-000009" } records { sequence_number: "4" shard_id: "shard-000006" } records { sequence_number: "12" shard_id: "shard-000001" } records { sequence_number: "6" shard_id: "shard-000007" } records { sequence_number: "7" shard_id: "shard-000007" } records { sequence_number: "8" shard_id: "shard-000000" } records { sequence_number: "8" shard_id: "shard-000007" } records { sequence_number: "13" shard_id: "shard-000004" } records { sequence_number: "8" shard_id: "shard-000005" } records { sequence_number: "2" shard_id: "shard-000003" } records { sequence_number: "12" shard_id: "shard-000009" } records { sequence_number: "5" shard_id: "shard-000008" } records { sequence_number: "9" shard_id: "shard-000000" } records { sequence_number: "5" shard_id: "shard-000006" } records { sequence_number: "10" shard_id: "shard-000000" } records { sequence_number: "13" shard_id: "shard-000009" } records { sequence_number: "13" shard_id: "shard-000001" } records { sequence_number: "14" shard_id: "shard-000009" } records { sequence_number: "14" shard_id: "shard-000004" } records { sequence_number: "11" shard_id: "shard-000000" } records { sequence_number: "14" shard_id: "shard-000001" } encryption_type: NONE records { sequence_number: "15" shard_id: "shard-000001" } records { sequence_number: "15" shard_id: "shard-000009" } records { sequence_number: "15" shard_id: "shard-000004" } records { sequence_number: "9" shard_id: "shard-000005" } records { sequence_number: "6" shard_id: "shard-000008" } records { sequence_number: "16" shard_id: "shard-000004" } records { sequence_number: "17" shard_id: "shard-000004" } records { sequence_number: "10" shard_id: "shard-000005" } records { sequence_number: "16" shard_id: "shard-000001" } records { sequence_number: "16" shard_id: "shard-000009" } records { sequence_number: "6" shard_id: "shard-000006" } records { sequence_number: "17" shard_id: "shard-000001" } records { sequence_number: "9" shard_id: "shard-000007" } records { sequence_number: "10" shard_id: "shard-000007" } records { sequence_number: "12" shard_id: "shard-000000" } records { sequence_number: "11" shard_id: "shard-000007" } records { sequence_number: "18" shard_id: "shard-000004" } records { sequence_number: "11" shard_id: "shard-000005" } records { sequence_number: "3" shard_id: "shard-000003" } records { sequence_number: "17" shard_id: "shard-000009" } records { sequence_number: "7" shard_id: "shard-000008" } records { sequence_number: "13" shard_id: "shard-000000" } records { sequence_number: "7" shard_id: "shard-000006" } records { sequence_number: "14" shard_id: "shard-000000" ... lder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1746608076774-170","schema":"yds.throughput.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":2},"usage":{"quantity":0,"unit":"second","start":1746608076,"finish":1746608076},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1746608076}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1746608076774-171","schema":"yds.storage.reserved.v1","tags":{},"usage":{"quantity":0,"unit":"mbyte*second","start":1746608076,"finish":1746608076},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1746608076}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"used_storage-root-72075186224037888-1746608076774-172","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":0,"unit":"byte*second","start":1746608076,"finish":1746608076},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1746608076}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"put_units-root-72075186224037888-1746608076834-173","schema":"yds.events.puts.v1","tags":{},"usage":{"quantity":1,"unit":"put_events","start":1746608076,"finish":1746608077},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1746608077}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1746608076834-174","schema":"yds.throughput.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":2},"usage":{"quantity":1,"unit":"second","start":1746608076,"finish":1746608077},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1746608077}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1746608076834-175","schema":"yds.storage.reserved.v1","tags":{},"usage":{"quantity":56320,"unit":"mbyte*second","start":1746608076,"finish":1746608077},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1746608077}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"used_storage-root-72075186224037888-1746608076834-176","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":1,"unit":"byte*second","start":1746608076,"finish":1746608077},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1746608077}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"put_units-root-72075186224037888-1746608077977-177","schema":"yds.events.puts.v1","tags":{},"usage":{"quantity":1,"unit":"put_events","start":1746608077,"finish":1746608079},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1746608079}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1746608077977-178","schema":"yds.throughput.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":2},"usage":{"quantity":2,"unit":"second","start":1746608077,"finish":1746608079},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1746608079}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1746608077977-179","schema":"yds.storage.reserved.v1","tags":{},"usage":{"quantity":112640,"unit":"mbyte*second","start":1746608077,"finish":1746608079},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1746608079}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"used_storage-root-72075186224037888-1746608077977-180","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":2,"unit":"byte*second","start":1746608077,"finish":1746608079},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1746608079}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"put_units-root-72075186224037888-1746608079025-181","schema":"yds.events.puts.v1","tags":{},"usage":{"quantity":1,"unit":"put_events","start":1746608079,"finish":1746608080},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1746608080}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1746608079025-182","schema":"yds.throughput.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":2},"usage":{"quantity":1,"unit":"second","start":1746608079,"finish":1746608080},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1746608080}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1746608079025-183","schema":"yds.storage.reserved.v1","tags":{},"usage":{"quantity":56320,"unit":"mbyte*second","start":1746608079,"finish":1746608080},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1746608080}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"used_storage-root-72075186224037888-1746608079025-184","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":1,"unit":"byte*second","start":1746608079,"finish":1746608080},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1746608080}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"put_units-root-72075186224037888-1746608080142-185","schema":"yds.events.puts.v1","tags":{},"usage":{"quantity":1,"unit":"put_events","start":1746608080,"finish":1746608081},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1746608081}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1746608080142-186","schema":"yds.throughput.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":2},"usage":{"quantity":1,"unit":"second","start":1746608080,"finish":1746608081},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1746608081}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1746608080142-187","schema":"yds.storage.reserved.v1","tags":{},"usage":{"quantity":56320,"unit":"mbyte*second","start":1746608080,"finish":1746608081},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1746608081}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"used_storage-root-72075186224037888-1746608080142-188","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":1,"unit":"byte*second","start":1746608080,"finish":1746608081},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1746608081}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"put_units-root-72075186224037888-1746608081183-189","schema":"yds.events.puts.v1","tags":{},"usage":{"quantity":1,"unit":"put_events","start":1746608081,"finish":1746608082},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1746608082}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1746608081183-190","schema":"yds.throughput.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":2},"usage":{"quantity":1,"unit":"second","start":1746608081,"finish":1746608082},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1746608082}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1746608081183-191","schema":"yds.storage.reserved.v1","tags":{},"usage":{"quantity":56320,"unit":"mbyte*second","start":1746608081,"finish":1746608082},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1746608082}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"used_storage-root-72075186224037888-1746608081183-192","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":1,"unit":"byte*second","start":1746608081,"finish":1746608082},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1746608082}' >> BuildStatsMixedIndex::Single_History_Slices [GOOD] >> BuildStatsMixedIndex::Single_Groups >> BuildStatsMixedIndex::Single_Groups [GOOD] >> BuildStatsMixedIndex::Single_Groups_Slices |90.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_change_collector/ydb-core-tx-datashard-ut_change_collector |90.3%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_change_collector/ydb-core-tx-datashard-ut_change_collector |90.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_change_collector/ydb-core-tx-datashard-ut_change_collector >> KqpWorkloadServiceActors::TestPoolFetcher [GOOD] >> KqpWorkloadServiceActors::TestPoolFetcherAclValidation >> BuildStatsMixedIndex::Single_Groups_Slices [GOOD] >> BuildStatsMixedIndex::Single_Groups_History >> TPart::WreckPart [GOOD] >> TPart::PageFailEnv >> BuildStatsMixedIndex::Single_Groups_History [GOOD] >> BuildStatsMixedIndex::Single_Groups_History_Slices >> BuildStatsMixedIndex::Single_Groups_History_Slices [GOOD] >> BuildStatsMixedIndex::Mixed >> BuildStatsMixedIndex::Mixed [GOOD] >> BuildStatsMixedIndex::Mixed_Groups >> KqpWorkloadService::TestQueueSizeSimple [GOOD] >> KqpWorkloadService::TestQueueSizeManyQueries >> THiveTest::TestHiveBalancerWithPreferredDC3 [GOOD] >> THiveTest::TestHiveBalancerWithFollowers >> BuildStatsMixedIndex::Mixed_Groups [GOOD] >> BuildStatsMixedIndex::Mixed_Groups_History >> TPQTestSlow::TestOnDiskStoredSourceIds [GOOD] >> BuildStatsMixedIndex::Mixed_Groups_History [GOOD] >> BuildStatsMixedIndex::Serial >> TPart::PageFailEnv [GOOD] >> TPart::ForwardEnv >> BuildStatsMixedIndex::Serial [GOOD] >> BuildStatsMixedIndex::Serial_Groups >> BasicUsage::SimpleHandlers [GOOD] >> TPart::ForwardEnv [GOOD] >> TPart::WreckPartColumnGroups >> BuildStatsMixedIndex::Serial_Groups [GOOD] >> BuildStatsMixedIndex::Serial_Groups_History >> DataShardSnapshots::BrokenLockChangesDontLeak [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/slow/unittest >> TPQTestSlow::TestOnDiskStoredSourceIds [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:103:2057] recipient: [1:101:2135] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:103:2057] recipient: [1:101:2135] Leader for TabletID 72057594037927937 is [1:107:2139] sender: [1:108:2057] recipient: [1:101:2135] 2025-05-07T08:53:05.287278Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:53:05.287360Z node 1 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:149:2057] recipient: [1:147:2170] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:149:2057] recipient: [1:147:2170] Leader for TabletID 72057594037927938 is [1:153:2174] sender: [1:154:2057] recipient: [1:147:2170] Leader for TabletID 72057594037927937 is [1:107:2139] sender: [1:179:2057] recipient: [1:14:2061] 2025-05-07T08:53:05.315197Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:53:05.335052Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037927937] Config applied version 1 actor [1:177:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 SourceIdMaxCounts: 3 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 1 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-05-07T08:53:05.336083Z node 1 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:185:2198] 2025-05-07T08:53:05.338938Z node 1 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [1:185:2198] 2025-05-07T08:53:05.341170Z node 1 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [1:186:2199] 2025-05-07T08:53:05.343183Z node 1 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [1:186:2199] 2025-05-07T08:53:05.352318Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|891fb5db-d86bd5ce-6cc83afd-1482f8db_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-05-07T08:53:05.358790Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|1489f5d3-c8e63ca8-495d4a6-f09d4792_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-05-07T08:53:05.386477Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|f04585cf-54a4509c-fdf32925-878812ad_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-05-07T08:53:05.396321Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|d4b337f6-63540595-5563217d-d9f21afa_3 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-05-07T08:53:05.405028Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|b4dbbde6-ce9bdb1-1e268a83-24dc5aba_4 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-05-07T08:53:05.414672Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|ae2fd7cd-290b8902-42be1495-edf89628_5 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:103:2057] recipient: [2:101:2135] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:103:2057] recipient: [2:101:2135] Leader for TabletID 72057594037927937 is [2:107:2139] sender: [2:108:2057] recipient: [2:101:2135] 2025-05-07T08:53:05.947068Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:53:05.947141Z node 2 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:149:2057] recipient: [2:147:2170] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:149:2057] recipient: [2:147:2170] Leader for TabletID 72057594037927938 is [2:153:2174] sender: [2:154:2057] recipient: [2:147:2170] Leader for TabletID 72057594037927937 is [2:107:2139] sender: [2:179:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:107:2139]) on event NKikimr::TEvPersQueue::TEvUpdateConfigBuilder ! Leader for TabletID 72057594037927937 is [2:107:2139] sender: [2:181:2057] recipient: [2:99:2134] Leader for TabletID 72057594037927937 is [2:107:2139] sender: [2:184:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:107:2139] sender: [2:185:2057] recipient: [2:183:2195] Leader for TabletID 72057594037927937 is [2:186:2196] sender: [2:187:2057] recipient: [2:183:2195] 2025-05-07T08:53:06.015053Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:53:06.015143Z node 2 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037927937] doesn't have tx writes info !Reboot 72057594037927937 (actor [2:107:2139]) rebooted! !Reboot 72057594037927937 (actor [2:107:2139]) tablet resolver refreshed! new actor is[2:186:2196] Leader for TabletID 72057594037927937 is [2:186:2196] sender: [2:260:2057] recipient: [2:14:2061] 2025-05-07T08:53:07.804241Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:53:07.805359Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037927937] Config applied version 2 actor [2:177:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 SourceIdMaxCounts: 3 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 2 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 2 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 2 Important: false } 2025-05-07T08:53:07.806691Z node 2 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [2:266:2258] 2025-05-07T08:53:07.809669Z node 2 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 3 [2:266:2258] 2025-05-07T08:53:07.812042Z node 2 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [2:267:2259] 2025-05-07T08:53:07.814238Z node 2 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 3 [2:267:2259] 2025-05-07T08:53:07.831942Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|df866811-e9ffb2c1-f551da9d-8f0fd9e8_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-05-07T08:53:07.845635Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|22c6e070-da6843b-6a029305-8a0a1a13_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-05-07T08:53:07.891315Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|a72a4430-6b17ed85-a5b03321-17f18957_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-05-07T08:53:07.903361Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|59a151a1-87aebfe-ee40ebb-107d5e72_3 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-05-07T08:53:07.914670Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|730560b-a23e4d1f-6fc721b0-4a958045_4 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-05-07T08:53:07.928994Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|8ad233a1-9dd27616-dc2e0922-e79e77a6_5 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:103:2057] recipient: [3:101:2135] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:103:2057] recipient: [3:101:2135] Leader for TabletID 72057594037927937 is [3:107:2139] sender: [3:108:2057] recipient: [3:101:2135] 2025-05-07T08:53:08.487959Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:53:08.488075Z node 3 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [3:149:2057] recipient: [3:147:2170] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [3:149:2057] recipient: [3:147:2170] Leader for TabletID 72057594037927938 is [3:153:2174] sender: [3:154:2057] recipient: [3:147:2170] Leader for TabletID 72057594037927937 is [3:107:2139] sender: [3:179:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:107:2139]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:107:2139] sender: [3:181:2057] recipient: [3:99:2134] Leader for TabletID 72057594037927937 is [3:107:2139] sender: [3:184:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:107:2139] sender: [3:185:2057] recipient: [3:183:2195] Leader for TabletID 72057594037927937 is [3:186:2196] sender: [3:187:2057] recipient: [3:183:2195] 2025-05-07T08:53:08.541180Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:53:08.541272Z node 3 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037927937] doesn't have tx writes info !Reboot 72057594037927937 (actor [3:107:2139]) rebooted! !Reboot 72057594037927937 (actor [3:107:2139]) tablet resolver refreshed! new actor is[3:186:2196] Leader for TabletID 72057594037927937 is [3:186:2196] sender: [3:260:2057] recipient: [3:14:2061] 2025-05-07T08:53:10.367857Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:53:10.368940Z node 3 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037927937] Config applied version 3 actor [3:177:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 SourceIdMaxCounts: 3 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 3 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 3 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 3 Important: false } 2025-05-07T08:53:10.370273Z node 3 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 7205759403792 ... 057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [47:183:2196] 2025-05-07T08:54:43.970491Z node 47 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [47:183:2196] 2025-05-07T08:54:43.972939Z node 47 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [47:184:2197] 2025-05-07T08:54:43.975708Z node 47 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [47:184:2197] 2025-05-07T08:54:43.989218Z node 47 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|f494e81c-2ee7bcc1-f183643b-71142e45_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-05-07T08:54:43.996439Z node 47 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|b606503b-ca8bc60-95ba7cdc-5c486626_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-05-07T08:54:44.036961Z node 47 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|eea2512c-b51dada4-4a82e4db-6f8c168b_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-05-07T08:54:44.048681Z node 47 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|fd8e16d8-2c769f98-73235a48-ca7536a8_3 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-05-07T08:54:44.060279Z node 47 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|c9f19650-bb050eb4-49c5d175-db621bd8_4 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-05-07T08:54:44.071882Z node 47 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|cb57433e-10f82a0e-7536aa03-1ce19377_5 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default !Reboot 72057594037927937 (actor [47:107:2139]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [47:107:2139] sender: [47:282:2057] recipient: [47:99:2134] Leader for TabletID 72057594037927937 is [47:107:2139] sender: [47:285:2057] recipient: [47:14:2061] Leader for TabletID 72057594037927937 is [47:107:2139] sender: [47:286:2057] recipient: [47:284:2279] Leader for TabletID 72057594037927937 is [47:287:2280] sender: [47:288:2057] recipient: [47:284:2279] 2025-05-07T08:54:44.158502Z node 47 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:54:44.158573Z node 47 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037927937] doesn't have tx writes info 2025-05-07T08:54:44.159599Z node 47 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [47:336:2321] 2025-05-07T08:54:44.162335Z node 47 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [47:337:2322] 2025-05-07T08:54:44.172443Z node 47 :PERSQUEUE INFO: partition_init.cpp:773: [rt3.dc1--asdfgs--topic:1:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-05-07T08:54:44.172522Z node 47 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 3 [47:337:2322] 2025-05-07T08:54:44.174904Z node 47 :PERSQUEUE INFO: partition_init.cpp:773: [rt3.dc1--asdfgs--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-05-07T08:54:44.174975Z node 47 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 3 [47:336:2321] !Reboot 72057594037927937 (actor [47:107:2139]) rebooted! !Reboot 72057594037927937 (actor [47:107:2139]) tablet resolver refreshed! new actor is[47:287:2280] Leader for TabletID 72057594037927937 is [47:287:2280] sender: [47:382:2057] recipient: [47:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [48:103:2057] recipient: [48:101:2135] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [48:103:2057] recipient: [48:101:2135] Leader for TabletID 72057594037927937 is [48:107:2139] sender: [48:108:2057] recipient: [48:101:2135] 2025-05-07T08:54:46.480638Z node 48 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:54:46.480752Z node 48 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [48:149:2057] recipient: [48:147:2170] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [48:149:2057] recipient: [48:147:2170] Leader for TabletID 72057594037927938 is [48:153:2174] sender: [48:154:2057] recipient: [48:147:2170] Leader for TabletID 72057594037927937 is [48:107:2139] sender: [48:179:2057] recipient: [48:14:2061] 2025-05-07T08:54:46.543328Z node 48 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:54:46.550849Z node 48 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037927937] Config applied version 48 actor [48:177:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 SourceIdMaxCounts: 3 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 48 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 48 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 48 Important: false } 2025-05-07T08:54:46.552085Z node 48 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [48:185:2198] 2025-05-07T08:54:46.555524Z node 48 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [48:185:2198] 2025-05-07T08:54:46.557908Z node 48 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [48:186:2199] 2025-05-07T08:54:46.560510Z node 48 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [48:186:2199] 2025-05-07T08:54:46.575987Z node 48 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|1583251b-47cf53f4-b9743257-fd036967_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-05-07T08:54:46.589335Z node 48 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|16cd6b19-a932fc-9b080f9e-9022b8be_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-05-07T08:54:46.645092Z node 48 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|aa2643d8-5bba46c9-7af40637-9e7f9e3c_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-05-07T08:54:46.668088Z node 48 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|5b3899c0-2a231690-9739c765-563b2ce2_3 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-05-07T08:54:46.697270Z node 48 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|cc6a63cc-b1370576-1c0dbbf5-15bcb6e8_4 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-05-07T08:54:46.722163Z node 48 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|15351932-9d44fbb6-6200e78f-644dd32_5 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Leader for TabletID 72057594037927937 is [0:0:0] sender: [49:103:2057] recipient: [49:101:2135] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [49:103:2057] recipient: [49:101:2135] Leader for TabletID 72057594037927937 is [49:107:2139] sender: [49:108:2057] recipient: [49:101:2135] 2025-05-07T08:54:48.015475Z node 49 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:54:48.015573Z node 49 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [49:149:2057] recipient: [49:147:2170] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [49:149:2057] recipient: [49:147:2170] Leader for TabletID 72057594037927938 is [49:153:2174] sender: [49:154:2057] recipient: [49:147:2170] Leader for TabletID 72057594037927937 is [49:107:2139] sender: [49:177:2057] recipient: [49:14:2061] 2025-05-07T08:54:48.091894Z node 49 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:54:48.093016Z node 49 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037927937] Config applied version 49 actor [49:175:2190] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 SourceIdMaxCounts: 3 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 49 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 49 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 49 Important: false } 2025-05-07T08:54:48.102496Z node 49 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [49:183:2196] 2025-05-07T08:54:48.106283Z node 49 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [49:183:2196] 2025-05-07T08:54:48.108934Z node 49 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [49:184:2197] 2025-05-07T08:54:48.115610Z node 49 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [49:184:2197] 2025-05-07T08:54:48.129087Z node 49 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|c18c5a5d-70475eec-87b3a8e2-60c74a19_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-05-07T08:54:48.139761Z node 49 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|407fa8d-765f0320-d02ce42c-5722aca2_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-05-07T08:54:48.184495Z node 49 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|e1003554-3325de13-1c521477-57e38fae_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-05-07T08:54:48.200926Z node 49 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|71006a1e-9422b56-4d66362a-1784ad9e_3 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-05-07T08:54:48.213263Z node 49 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|41767efd-827411dd-593610ee-48367778_4 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-05-07T08:54:48.230305Z node 49 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|5cf8d577-36655ad3-8157692a-d85ff11e_5 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default >> BuildStatsMixedIndex::Serial_Groups_History [GOOD] >> BuildStatsMixedIndex::Single_LowResolution >> BuildStatsMixedIndex::Single_LowResolution [GOOD] >> BuildStatsMixedIndex::Single_Slices_LowResolution >> BuildStatsMixedIndex::Single_Slices_LowResolution [GOOD] >> BuildStatsMixedIndex::Single_Groups_LowResolution >> TClockProCache::Touch [GOOD] >> TClockProCache::UpdateLimit [GOOD] >> TCompaction::OneMemtable [GOOD] >> TCompaction::ManyParts >> YdbIndexTable::OnlineBuild [GOOD] >> YdbIndexTable::OnlineBuildWithDataColumn >> DataStreams::TestGetRecordsStreamWithMultipleShards [GOOD] >> SlowTopicAutopartitioning::CDC_Write [GOOD] >> DataStreams::TestGetRecordsWithBigSeqno >> KqpQueryPerf::IdxLookupJoinThreeWay-QueryService [GOOD] >> BuildStatsMixedIndex::Single_Groups_LowResolution [GOOD] >> BuildStatsMixedIndex::Single_Groups_Slices_LowResolution >> BuildStatsMixedIndex::Single_Groups_Slices_LowResolution [GOOD] >> BuildStatsMixedIndex::Single_Groups_History_LowResolution >> TCompaction::ManyParts [GOOD] >> TCompaction::BootAbort ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::SimpleHandlers [GOOD] Test command err: 2025-05-07T08:53:36.894041Z :WaitEventBlocksBeforeDiscovery INFO: Random seed for debugging is 1746608016894009 2025-05-07T08:53:37.308054Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624315623369376:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:37.308698Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:53:37.455566Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501624312796203215:2217];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:37.701932Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003f08/r3tmp/tmp4eGQey/pdisk_1.dat 2025-05-07T08:53:37.721958Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-05-07T08:53:37.771795Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:53:38.083622Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:53:38.113346Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:53:38.113466Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:53:38.115935Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:53:38.115986Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:53:38.137545Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-05-07T08:53:38.137681Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:53:38.138554Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24012, node 1 2025-05-07T08:53:38.271443Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/zvgn/003f08/r3tmp/yandexQ3zBn4.tmp 2025-05-07T08:53:38.271471Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/zvgn/003f08/r3tmp/yandexQ3zBn4.tmp 2025-05-07T08:53:38.271606Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/zvgn/003f08/r3tmp/yandexQ3zBn4.tmp 2025-05-07T08:53:38.271718Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:53:38.368552Z INFO: TTestServer started on Port 10868 GrpcPort 24012 TClient is connected to server localhost:10868 PQClient connected to localhost:24012 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:53:38.896814Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... waiting... 2025-05-07T08:53:42.298515Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501624315623369376:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:42.298637Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:53:42.434103Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7501624312796203215:2217];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:42.434174Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:53:42.514575Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624337098206858:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:42.514698Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:42.516943Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624337098206885:2343], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:42.521897Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480 2025-05-07T08:53:42.530059Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624337098206916:2346], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:42.530131Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:42.591818Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624337098206887:2344], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-05-07T08:53:42.875235Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501624337098206964:2673] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:53:42.909520Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:53:42.915161Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7501624334271039878:2315], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T08:53:42.917016Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=2&id=MWIxZjI3YmEtNmFmZWZjYzAtNjMxYTY3ZDQtYWQyNjlkZmQ=, ActorId: [2:7501624334271039839:2309], ActorState: ExecuteState, TraceId: 01jtmz6s57azhmy78pp422ez8y, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T08:53:42.916465Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7501624337098206978:2350], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T08:53:42.917022Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=1&id=ZDBjMmQyYmQtZDRmZjkzZTYtNTUwZDNkNzctYWY5MmNmYTA=, ActorId: [1:7501624337098206855:2338], ActorState: ExecuteState, TraceId: 01jtmz6ry89jp0tndmd7qjk183, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T08:53:42.919515Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T08:53:42.919909Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T08:53:43.088925Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:53:43.246793Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Confi ... : 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-05-07T08:54:47.380844Z :INFO: [/Root] MessageGroupId [src_id] SessionId [src_id|3efb980e-d00b22a9-a0b7477f-b471ed4a_0] Write session: close. Timeout = 0 ms 2025-05-07T08:54:47.380888Z :INFO: [/Root] MessageGroupId [src_id] SessionId [src_id|3efb980e-d00b22a9-a0b7477f-b471ed4a_0] Write session will now close 2025-05-07T08:54:47.380929Z :DEBUG: [/Root] MessageGroupId [src_id] SessionId [src_id|3efb980e-d00b22a9-a0b7477f-b471ed4a_0] Write session: aborting 2025-05-07T08:54:47.378293Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 3 consumer shared/user session shared/user_3_3_15778029212911121424_v1 grpc read done: success# 1, data# { read_request { bytes_size: 33072 } } 2025-05-07T08:54:47.378481Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1816: session cookie 3 consumer shared/user session shared/user_3_3_15778029212911121424_v1 got read request: guid# bed62024-4545f081-7f5d3422-638aeca1 2025-05-07T08:54:47.381332Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/user session shared/user_3_1_18066120563181412964_v1 grpc read done: success# 0, data# { } 2025-05-07T08:54:47.381395Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer shared/user session shared/user_3_1_18066120563181412964_v1 grpc read failed 2025-05-07T08:54:47.381432Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer shared/user session shared/user_3_1_18066120563181412964_v1 grpc closed 2025-05-07T08:54:47.381473Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer shared/user session shared/user_3_1_18066120563181412964_v1 is DEAD 2025-05-07T08:54:47.382139Z :INFO: [/Root] MessageGroupId [src_id] SessionId [src_id|3efb980e-d00b22a9-a0b7477f-b471ed4a_0] Write session: gracefully shut down, all writes complete 2025-05-07T08:54:47.383046Z :DEBUG: [/Root] MessageGroupId [src_id] SessionId [src_id|3efb980e-d00b22a9-a0b7477f-b471ed4a_0] Write session is aborting and will not restart 2025-05-07T08:54:47.383172Z :DEBUG: [/Root] MessageGroupId [src_id] SessionId [src_id|3efb980e-d00b22a9-a0b7477f-b471ed4a_0] Write session: destroy 2025-05-07T08:54:47.386677Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037892] server disconnected, pipe [3:7501624603031786620:2523] destroyed 2025-05-07T08:54:47.386758Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:138: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-05-07T08:54:47.385140Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: src_id|3efb980e-d00b22a9-a0b7477f-b471ed4a_0 grpc read done: success: 0 data: 2025-05-07T08:54:47.385180Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 2 sessionId: src_id|3efb980e-d00b22a9-a0b7477f-b471ed4a_0 grpc read failed 2025-05-07T08:54:47.385212Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 2 sessionId: src_id|3efb980e-d00b22a9-a0b7477f-b471ed4a_0 grpc closed 2025-05-07T08:54:47.385232Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 2 sessionId: src_id|3efb980e-d00b22a9-a0b7477f-b471ed4a_0 is DEAD 2025-05-07T08:54:47.386088Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-05-07T08:54:47.386211Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [3:7501624603031786569:2519] disconnected; active server actors: 1 2025-05-07T08:54:47.386239Z node 3 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--test-topic] pipe [3:7501624603031786569:2519] client user disconnected session shared/user_3_1_18066120563181412964_v1 2025-05-07T08:54:47.386307Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1183: [72075186224037893][rt3.dc1--test-topic] consumer user rebalancing was scheduled 2025-05-07T08:54:47.386372Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1255: [72075186224037893][rt3.dc1--test-topic] consumer user balancing. Sessions=2, Families=1, UnradableFamilies=0 [], RequireBalancing=0 [] 2025-05-07T08:54:47.386430Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1322: [72075186224037893][rt3.dc1--test-topic] consumer user start rebalancing. familyCount=1, sessionCount=2, desiredFamilyCount=0, allowPlusOne=1 2025-05-07T08:54:47.386466Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1399: [72075186224037893][rt3.dc1--test-topic] consumer user balancing duration: 0.000067s 2025-05-07T08:54:47.386547Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 2 consumer shared/user session shared/user_3_2_14529984168978829887_v1 grpc read done: success# 0, data# { } 2025-05-07T08:54:47.386562Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 2 consumer shared/user session shared/user_3_2_14529984168978829887_v1 grpc read failed 2025-05-07T08:54:47.386585Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 2 consumer shared/user session shared/user_3_2_14529984168978829887_v1 grpc closed 2025-05-07T08:54:47.386605Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 2 consumer shared/user session shared/user_3_2_14529984168978829887_v1 is DEAD 2025-05-07T08:54:47.387317Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 3 consumer shared/user session shared/user_3_3_15778029212911121424_v1 grpc read done: success# 0, data# { } 2025-05-07T08:54:47.387331Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 3 consumer shared/user session shared/user_3_3_15778029212911121424_v1 grpc read failed 2025-05-07T08:54:47.387351Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 3 consumer shared/user session shared/user_3_3_15778029212911121424_v1 grpc closed 2025-05-07T08:54:47.387383Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 3 consumer shared/user session shared/user_3_3_15778029212911121424_v1 is DEAD 2025-05-07T08:54:47.392433Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2433: [PQ: 72075186224037892] Destroy direct read session shared/user_3_3_15778029212911121424_v1 2025-05-07T08:54:47.392482Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037892] server disconnected, pipe [3:7501624603031786591:2533] destroyed 2025-05-07T08:54:47.392538Z node 4 :PQ_READ_PROXY DEBUG: caching_service.cpp:138: Direct read cache: server session deregistered: shared/user_3_3_15778029212911121424_v1 2025-05-07T08:54:47.392856Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [3:7501624603031786579:2521] disconnected; active server actors: 1 2025-05-07T08:54:47.392896Z node 3 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--test-topic] pipe [3:7501624603031786579:2521] client user disconnected session shared/user_3_2_14529984168978829887_v1 2025-05-07T08:54:47.392939Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1183: [72075186224037893][rt3.dc1--test-topic] consumer user rebalancing was scheduled 2025-05-07T08:54:47.392983Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [3:7501624603031786580:2522] disconnected; active server actors: 1 2025-05-07T08:54:47.392999Z node 3 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--test-topic] pipe [3:7501624603031786580:2522] client user disconnected session shared/user_3_3_15778029212911121424_v1 2025-05-07T08:54:47.414339Z :INFO: [/Root] [/Root] [4495ee36-b45fbb5e-f36eaf21-31262e3e] Closing read session. Close timeout: 0.000000s 2025-05-07T08:54:47.414465Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2025-05-07T08:54:47.414535Z :INFO: [/Root] [/Root] [4495ee36-b45fbb5e-f36eaf21-31262e3e] Counters: { Errors: 0 CurrentSessionLifetimeMs: 3151 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-05-07T08:54:47.414603Z :INFO: [/Root] [/Root] [3f84a36a-e13ccc4-ed2e0a99-bc79579b] Closing read session. Close timeout: 0.000000s 2025-05-07T08:54:47.414650Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2025-05-07T08:54:47.414692Z :INFO: [/Root] [/Root] [3f84a36a-e13ccc4-ed2e0a99-bc79579b] Counters: { Errors: 0 CurrentSessionLifetimeMs: 3149 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-05-07T08:54:47.414742Z :INFO: [/Root] [/Root] [a3e8ab25-402be523-f25c415a-5ce25a25] Closing read session. Close timeout: 0.000000s 2025-05-07T08:54:47.414790Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:test-topic:0:1:299:0 2025-05-07T08:54:47.414830Z :INFO: [/Root] [/Root] [a3e8ab25-402be523-f25c415a-5ce25a25] Counters: { Errors: 0 CurrentSessionLifetimeMs: 3131 BytesRead: 4936800 MessagesRead: 300 BytesReadCompressed: 4936800 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-05-07T08:54:47.414873Z :INFO: [/Root] [/Root] [a3e8ab25-402be523-f25c415a-5ce25a25] Closing read session. Close timeout: 0.000000s 2025-05-07T08:54:47.414946Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:test-topic:0:1:299:0 2025-05-07T08:54:47.415013Z :INFO: [/Root] [/Root] [a3e8ab25-402be523-f25c415a-5ce25a25] Counters: { Errors: 0 CurrentSessionLifetimeMs: 3132 BytesRead: 4936800 MessagesRead: 300 BytesReadCompressed: 4936800 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-05-07T08:54:47.415154Z :NOTICE: [/Root] [/Root] [a3e8ab25-402be523-f25c415a-5ce25a25] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-05-07T08:54:47.416399Z :INFO: [/Root] [/Root] [3f84a36a-e13ccc4-ed2e0a99-bc79579b] Closing read session. Close timeout: 0.000000s 2025-05-07T08:54:47.416452Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2025-05-07T08:54:47.416498Z :INFO: [/Root] [/Root] [3f84a36a-e13ccc4-ed2e0a99-bc79579b] Counters: { Errors: 0 CurrentSessionLifetimeMs: 3151 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-05-07T08:54:47.416581Z :NOTICE: [/Root] [/Root] [3f84a36a-e13ccc4-ed2e0a99-bc79579b] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-05-07T08:54:47.417262Z :INFO: [/Root] [/Root] [4495ee36-b45fbb5e-f36eaf21-31262e3e] Closing read session. Close timeout: 0.000000s 2025-05-07T08:54:47.417310Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2025-05-07T08:54:47.417349Z :INFO: [/Root] [/Root] [4495ee36-b45fbb5e-f36eaf21-31262e3e] Counters: { Errors: 0 CurrentSessionLifetimeMs: 3154 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-05-07T08:54:47.417421Z :NOTICE: [/Root] [/Root] [4495ee36-b45fbb5e-f36eaf21-31262e3e] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } >> BuildStatsMixedIndex::Single_Groups_History_LowResolution [GOOD] >> BuildStatsMixedIndex::Single_Groups_History_Slices_LowResolution >> THiveTest::TestHiveBalancer [GOOD] >> THiveTest::TestFollowersCrossDC_Easy >> ResourcePoolClassifiersDdl::TestResourcePoolClassifiersPermissions [GOOD] >> ResourcePoolClassifiersDdl::TestResourcePoolClassifierRanks >> BuildStatsMixedIndex::Single_Groups_History_Slices_LowResolution [GOOD] >> Charge::Lookups [GOOD] >> Charge::ByKeysBasics >> Charge::ByKeysBasics [GOOD] >> Charge::ByKeysGroups [GOOD] >> Charge::ByKeysGroupsLimits [GOOD] >> Charge::ByKeysLimits [GOOD] >> Charge::ByKeysReverse [GOOD] >> Charge::ByKeysHistory [GOOD] >> Charge::ByKeysIndex >> TCompaction::BootAbort [GOOD] >> TCompaction::Defaults [GOOD] >> TCompaction::Merges [GOOD] >> TCompactionMulti::ManyParts >> Charge::ByKeysIndex [GOOD] >> Charge::ByRows [GOOD] >> Charge::ByRowsReverse [GOOD] >> Charge::ByRowsLimits [GOOD] >> Charge::ByRowsLimitsReverse [GOOD] >> DBase::Basics [GOOD] >> DBase::Select [GOOD] >> DBase::Defaults [GOOD] >> DBase::Subsets [GOOD] >> DBase::Garbage [GOOD] >> DBase::Affects [GOOD] >> DBase::Annex [GOOD] >> DBase::AnnexRollbackChanges [GOOD] >> DBase::Outer [GOOD] >> DBase::KIKIMR_15506_MissingSnapshotKeys [GOOD] >> DBase::EraseCacheWithUncommittedChanges [GOOD] >> DBase::EraseCacheWithUncommittedChangesCompacted [GOOD] >> DBase::AlterAndUpsertChangesVisibility [GOOD] >> DBase::UncommittedChangesVisibility [GOOD] >> DBase::UncommittedChangesCommitWithUpdates [GOOD] >> DBase::ReplayNewTable [GOOD] >> DBase::SnapshotNewTable [GOOD] >> DBase::DropModifiedTable [GOOD] >> DBase::KIKIMR_15598_Many_MemTables >> TPart::WreckPartColumnGroups [GOOD] >> TPart::PageFailEnvColumnGroups ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_snapshot/unittest >> DataShardSnapshots::BrokenLockChangesDontLeak [GOOD] Test command err: 2025-05-07T08:51:44.608205Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:51:44.608377Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:51:44.608675Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002941/r3tmp/tmpv6p2LR/pdisk_1.dat 2025-05-07T08:51:45.047949Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:51:45.103352Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:51:45.155814Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:59:2106] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-05-07T08:51:45.156744Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-05-07T08:51:45.157044Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:45.157191Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:45.171326Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:51:45.261871Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:59:2106] Handle TEvProposeTransaction 2025-05-07T08:51:45.261950Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:59:2106] TxId# 281474976715657 ProcessProposeTransaction 2025-05-07T08:51:45.262138Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:59:2106] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:640:2548] 2025-05-07T08:51:45.387895Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1520: Actor# [1:640:2548] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-05-07T08:51:45.388039Z node 1 :TX_PROXY DEBUG: schemereq.cpp:563: Actor# [1:640:2548] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-05-07T08:51:45.388875Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1585: Actor# [1:640:2548] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-05-07T08:51:45.389013Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1575: Actor# [1:640:2548] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-05-07T08:51:45.389434Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1408: Actor# [1:640:2548] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-05-07T08:51:45.389707Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1455: Actor# [1:640:2548] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-05-07T08:51:45.389824Z node 1 :TX_PROXY DEBUG: schemereq.cpp:102: Actor# [1:640:2548] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-05-07T08:51:45.393229Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:51:45.393896Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1310: Actor# [1:640:2548] txid# 281474976715657 HANDLE EvClientConnected 2025-05-07T08:51:45.394860Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1332: Actor# [1:640:2548] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-05-07T08:51:45.394970Z node 1 :TX_PROXY DEBUG: schemereq.cpp:543: Actor# [1:640:2548] txid# 281474976715657 SEND to# [1:594:2519] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-05-07T08:51:45.456326Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828672, Sender [1:656:2563], Recipient [1:665:2569]: NKikimr::TEvTablet::TEvBoot 2025-05-07T08:51:45.457808Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828673, Sender [1:656:2563], Recipient [1:665:2569]: NKikimr::TEvTablet::TEvRestored 2025-05-07T08:51:45.461852Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:665:2569] 2025-05-07T08:51:45.462238Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T08:51:45.480266Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3111: StateInactive, received event# 268828684, Sender [1:656:2563], Recipient [1:665:2569]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-05-07T08:51:45.523539Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T08:51:45.523736Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T08:51:45.525907Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-05-07T08:51:45.526126Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-05-07T08:51:45.526199Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-05-07T08:51:45.526755Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T08:51:45.526948Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T08:51:45.527049Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:681:2569] in generation 1 2025-05-07T08:51:45.527628Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T08:51:45.563313Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-05-07T08:51:45.563647Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T08:51:45.563822Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:683:2579] 2025-05-07T08:51:45.563898Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T08:51:45.563958Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-05-07T08:51:45.564010Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:51:45.564290Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 2146435072, Sender [1:665:2569], Recipient [1:665:2569]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-05-07T08:51:45.564375Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3155: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-05-07T08:51:45.564805Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T08:51:45.564992Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T08:51:45.565161Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T08:51:45.565218Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:51:45.565270Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-05-07T08:51:45.565319Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-05-07T08:51:45.565363Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-05-07T08:51:45.565419Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T08:51:45.565475Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:51:45.566038Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877761, Sender [1:672:2573], Recipient [1:665:2569]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:51:45.566091Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:51:45.566145Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:672:2573], sessionId# [0:0:0] 2025-05-07T08:51:45.566274Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269549568, Sender [1:410:2405], Recipient [1:672:2573] 2025-05-07T08:51:45.566333Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3136: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-05-07T08:51:45.566451Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T08:51:45.566771Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-05-07T08:51:45.566858Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-05-07T08:51:45.566964Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-05-07T08:51:45.567046Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-05-07T08:51:45.567096Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-05-07T08:51:45.567136Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-05-07T08:51:45.567180Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-05-07T08:51:45.567538Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 ... X_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:5] at 72075186224037888 to execution unit FinishProposeWrite 2025-05-07T08:54:48.865375Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037888 on unit FinishProposeWrite 2025-05-07T08:54:48.865607Z node 16 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 5 at tablet 72075186224037888 errors: Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because it cannot acquire locks" issue_code: 2001 severity: 1 } 2025-05-07T08:54:48.865705Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037888 is DelayComplete 2025-05-07T08:54:48.865763Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037888 executing on unit FinishProposeWrite 2025-05-07T08:54:48.865845Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:5] at 72075186224037888 to execution unit CompletedOperations 2025-05-07T08:54:48.865920Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037888 on unit CompletedOperations 2025-05-07T08:54:48.866037Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037888 is Executed 2025-05-07T08:54:48.866071Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037888 executing on unit CompletedOperations 2025-05-07T08:54:48.866115Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:5] at 72075186224037888 has finished 2025-05-07T08:54:48.866287Z node 16 :TX_DATASHARD TRACE: datashard__write.cpp:150: TTxWrite complete: at tablet# 72075186224037888 2025-05-07T08:54:48.866382Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:5] at 72075186224037888 on unit FinishProposeWrite 2025-05-07T08:54:48.866492Z node 16 :TX_DATASHARD TRACE: finish_propose_write_unit.cpp:163: Propose transaction complete txid 5 at tablet 72075186224037888 send to client, propose latency: 0 ms, status: STATUS_LOCKS_BROKEN 2025-05-07T08:54:48.866785Z node 16 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 5 at tablet 72075186224037888 Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because it cannot acquire locks" issue_code: 2001 severity: 1 } 2025-05-07T08:54:48.866948Z node 16 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:54:48.867366Z node 16 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:748: SelfId: [16:904:2679], Table: `/Root/table` ([72057594046644480:2:1]), SessionActorId: [16:839:2679]Got LOCKS BROKEN for table `/Root/table`. ShardID=72075186224037888, Sink=[16:904:2679].{
: Error: Operation is aborting because it cannot acquire locks, code: 2001 } 2025-05-07T08:54:48.867656Z node 16 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:2833: SelfId: [16:897:2679], SessionActorId: [16:839:2679], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/table`., code: 2001
: Error: Operation is aborting because it cannot acquire locks, code: 2001 . sessionActorId=[16:839:2679]. isRollback=0 2025-05-07T08:54:48.882457Z node 16 :KQP_SESSION WARN: kqp_session_actor.cpp:1840: SessionId: ydb://session/3?node_id=16&id=OTQ3OTQzMmUtZThhODBjNTgtMTFiNDZhZWQtOThiM2U5Y2I=, ActorId: [16:839:2679], ActorState: ExecuteState, TraceId: 01jtmz8saq81092djvn8hzvjp5, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [16:898:2679] from: [16:897:2679] 2025-05-07T08:54:48.882884Z node 16 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1944: ActorId: [16:898:2679] TxId: 281474976715663. Ctx: { TraceId: 01jtmz8saq81092djvn8hzvjp5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=16&id=OTQ3OTQzMmUtZThhODBjNTgtMTFiNDZhZWQtOThiM2U5Y2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/table`., code: 2001 subissue: {
: Error: Operation is aborting because it cannot acquire locks, code: 2001 } } 2025-05-07T08:54:48.883455Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 278003712, Sender [16:897:2679], Recipient [16:689:2579]: NKikimrDataEvents.TEvWrite TxMode: MODE_IMMEDIATE Locks { Locks { LockId: 281474976715661 DataShard: 72075186224037888 Generation: 1 Counter: 0 SchemeShard: 72057594046644480 PathId: 2 } Op: Rollback } 2025-05-07T08:54:48.883523Z node 16 :TX_DATASHARD TRACE: datashard__write.cpp:182: Handle TTxWrite: at tablet# 72075186224037888 2025-05-07T08:54:48.883829Z node 16 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=16&id=OTQ3OTQzMmUtZThhODBjNTgtMTFiNDZhZWQtOThiM2U5Y2I=, ActorId: [16:839:2679], ActorState: ExecuteState, TraceId: 01jtmz8saq81092djvn8hzvjp5, Create QueryResponse for error on request, msg: 2025-05-07T08:54:48.885545Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 2146435074, Sender [16:689:2579], Recipient [16:689:2579]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvDelayedProposeTransaction 2025-05-07T08:54:48.885606Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvDelayedProposeTransaction 2025-05-07T08:54:48.885711Z node 16 :TX_DATASHARD TRACE: datashard__write.cpp:28: TTxWrite:: execute at tablet# 72075186224037888 2025-05-07T08:54:48.885937Z node 16 :TX_DATASHARD TRACE: datashard_write_operation.cpp:64: Parsing write transaction for 0 at 72075186224037888, record: TxMode: MODE_IMMEDIATE Locks { Locks { LockId: 281474976715661 DataShard: 72075186224037888 Generation: 1 Counter: 0 SchemeShard: 72057594046644480 PathId: 2 } Op: Rollback } 2025-05-07T08:54:48.886134Z node 16 :TX_DATASHARD TRACE: key_validator.cpp:54: -- AddWriteRange: (Uint64 : 281474976715661, Uint64 : 72075186224037888, Uint64 : 72057594046644480, Uint64 : 2) table: [1:997:0] 2025-05-07T08:54:48.886269Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit CheckWrite 2025-05-07T08:54:48.886329Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-05-07T08:54:48.886367Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit CheckWrite 2025-05-07T08:54:48.886405Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-05-07T08:54:48.886440Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit BuildAndWaitDependencies 2025-05-07T08:54:48.886484Z node 16 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v400/0 IncompleteEdge# v{min} UnprotectedReadEdge# v400/18446744073709551615 ImmediateWriteEdge# v401/0 ImmediateWriteEdgeReplied# v401/0 2025-05-07T08:54:48.886544Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:6] at 72075186224037888 2025-05-07T08:54:48.886582Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-05-07T08:54:48.886627Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-05-07T08:54:48.886663Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit ExecuteWrite 2025-05-07T08:54:48.886693Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit ExecuteWrite 2025-05-07T08:54:48.886730Z node 16 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:6] at 72075186224037888 2025-05-07T08:54:48.886876Z node 16 :TX_DATASHARD TRACE: datashard_kqp.cpp:824: KqpEraseLock LockId: 281474976715661 DataShard: 72075186224037888 Generation: 1 Counter: 0 SchemeShard: 72057594046644480 PathId: 2 2025-05-07T08:54:48.887347Z node 16 :TX_DATASHARD DEBUG: execute_write_unit.cpp:414: Skip empty write operation for [0:6] at 72075186224037888 2025-05-07T08:54:48.887478Z node 16 :TX_DATASHARD TRACE: execute_write_unit.cpp:47: add locks to result: 0 2025-05-07T08:54:48.887613Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is ExecutedNoMoreRestarts 2025-05-07T08:54:48.887664Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit ExecuteWrite 2025-05-07T08:54:48.887742Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit FinishProposeWrite 2025-05-07T08:54:48.887813Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit FinishProposeWrite 2025-05-07T08:54:48.887855Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is DelayComplete 2025-05-07T08:54:48.887886Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit FinishProposeWrite 2025-05-07T08:54:48.887937Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit CompletedOperations 2025-05-07T08:54:48.887973Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit CompletedOperations 2025-05-07T08:54:48.888032Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-05-07T08:54:48.888061Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit CompletedOperations 2025-05-07T08:54:48.888090Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:6] at 72075186224037888 has finished 2025-05-07T08:54:48.888190Z node 16 :TX_DATASHARD TRACE: datashard__write.cpp:150: TTxWrite complete: at tablet# 72075186224037888 2025-05-07T08:54:48.888244Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:6] at 72075186224037888 on unit FinishProposeWrite 2025-05-07T08:54:48.888288Z node 16 :TX_DATASHARD TRACE: finish_propose_write_unit.cpp:163: Propose transaction complete txid 6 at tablet 72075186224037888 send to client, propose latency: 0 ms, status: STATUS_COMPLETED 2025-05-07T08:54:48.888378Z node 16 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:54:48.890112Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 275709965, Sender [16:61:2108], Recipient [16:689:2579]: NKikimrLongTxService.TEvLockStatus LockId: 281474976715661 LockNode: 16 Status: STATUS_NOT_FOUND 2025-05-07T08:54:48.894915Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877761, Sender [16:911:2729], Recipient [16:689:2579]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:54:48.986293Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:54:48.986502Z node 16 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [16:910:2728], serverId# [16:911:2729], sessionId# [0:0:0] 2025-05-07T08:54:48.986876Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269553224, Sender [16:593:2518], Recipient [16:689:2579]: NKikimr::TEvDataShard::TEvGetOpenTxs ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::IdxLookupJoinThreeWay-QueryService [GOOD] Test command err: Trying to start YDB, gRPC: 2558, MsgBus: 14320 2025-05-07T08:54:31.548991Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624546320057657:2069];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:31.549035Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0027d0/r3tmp/tmpkjBJQn/pdisk_1.dat 2025-05-07T08:54:32.338304Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:32.341932Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:32.342519Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:32.352874Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2558, node 1 2025-05-07T08:54:32.638318Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:32.638341Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:32.638361Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:32.638484Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14320 TClient is connected to server localhost:14320 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:33.459316Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:33.497934Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:54:33.512400Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:33.703833Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:33.927933Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:34.075676Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:36.052886Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624567794895787:2407], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:36.053016Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:36.386687Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:54:36.433679Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:54:36.506281Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:54:36.555429Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501624546320057657:2069];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:36.555504Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:54:36.596399Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:54:36.631250Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:54:36.688853Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:54:36.739198Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:54:36.823037Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624567794896451:2471], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:36.823109Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:36.823905Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624567794896456:2474], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:36.827913Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:54:36.846604Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624567794896458:2475], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:54:36.929497Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501624567794896509:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 6914, MsgBus: 27870 2025-05-07T08:54:40.616985Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501624586777309203:2288];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:40.617584Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0027d0/r3tmp/tmph92UZV/pdisk_1.dat 2025-05-07T08:54:40.836617Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:40.885231Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:40.885327Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:40.891608Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6914, node 2 2025-05-07T08:54:41.154563Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:41.154586Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:41.154593Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:41.154723Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27870 TClient is connected to server localhost:27870 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:41.815980Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:41.842466Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:41.967055Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:42.214154Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:42.315823Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:45.443251Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501624608252147078:2407], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:45.443330Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:45.515221Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:54:45.616615Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:54:45.623367Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7501624586777309203:2288];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:45.623576Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:54:45.662706Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:54:45.706938Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:54:45.764831Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:54:45.860595Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:54:45.941246Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:54:46.055044Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501624612547115037:2471], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:46.055156Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:46.055518Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501624612547115042:2474], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:46.059886Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:54:46.078966Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7501624612547115044:2475], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:54:46.153108Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501624612547115095:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> ResourcePoolsDdl::TestPoolSwitchToUnlimitedState [GOOD] >> ResourcePoolsDdl::TestResourcePoolAcl >> TBtreeIndexBuilder::NoNodes [GOOD] >> TBtreeIndexBuilder::OneNode [GOOD] >> TBtreeIndexBuilder::FewNodes [GOOD] >> TBtreeIndexBuilder::SplitBySize [GOOD] >> TBtreeIndexNode::TIsNullBitmap [GOOD] >> TBtreeIndexNode::CompareTo [GOOD] >> TBtreeIndexNode::Basics [GOOD] >> TBtreeIndexNode::Group [GOOD] >> TBtreeIndexNode::History [GOOD] >> TBtreeIndexNode::OneKey [GOOD] >> TBtreeIndexNode::Reusable [GOOD] >> TBtreeIndexNode::CutKeys [GOOD] >> TBtreeIndexTPart::Conf [GOOD] >> TBtreeIndexTPart::NoNodes [GOOD] >> TBtreeIndexTPart::OneNode [GOOD] >> TBtreeIndexTPart::FewNodes [GOOD] >> TBtreeIndexTPart::Erases [GOOD] >> TBtreeIndexTPart::Groups >> KqpWorkloadServiceActors::TestPoolFetcherAclValidation [GOOD] >> KqpWorkloadServiceActors::TestPoolFetcherNotExistingPool >> TBtreeIndexTPart::Groups [GOOD] >> TBtreeIndexTPart::History >> TBtreeIndexTPart::History [GOOD] >> TBtreeIndexTPart::External >> DataCleanup::CleanupDataNoTables >> DataCleanup::CleanupDataNoTables [GOOD] >> DataCleanup::CleanupDataNoTablesWithRestart [GOOD] >> DataCleanup::CleanupDataLog >> DataCleanup::CleanupDataLog [GOOD] >> DataCleanup::CleanupData [GOOD] >> DataCleanup::CleanupDataMultipleFamilies >> TCompactionMulti::ManyParts [GOOD] >> TCompactionMulti::MainPageCollectionEdge >> TBtreeIndexTPart::External [GOOD] >> TChargeBTreeIndex::NoNodes >> DataCleanup::CleanupDataMultipleFamilies [GOOD] >> DataCleanup::CleanupDataMultipleTables >> DataCleanup::CleanupDataMultipleTables [GOOD] >> DataCleanup::CleanupDataWithFollowers [GOOD] >> DataCleanup::CleanupDataMultipleTimes >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-65 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-66 >> TChargeBTreeIndex::NoNodes [GOOD] >> TChargeBTreeIndex::NoNodes_Groups >> DataCleanup::CleanupDataMultipleTimes [GOOD] >> DataCleanup::CleanupDataEmptyTable [GOOD] >> DataCleanup::CleanupDataWithRestarts >> DataCleanup::CleanupDataWithRestarts [GOOD] >> DataCleanup::CleanupDataRetryWithNotGreaterGenerations >> KqpQueryPerf::Delete+QueryService+UseSink [GOOD] >> DataCleanup::CleanupDataRetryWithNotGreaterGenerations [GOOD] >> DataCleanup::CleanupDataWithTabletGCErrors >> DataCleanup::CleanupDataWithTabletGCErrors [GOOD] >> DBase::WideKey >> DBase::WideKey [GOOD] >> DBase::VersionBasics [GOOD] >> DBase::VersionPureMem >> KqpQueryPerf::Replace-QueryService+UseSink >> TPart::PageFailEnvColumnGroups [GOOD] >> TPart::ForwardEnvColumnGroups >> KqpQueryPerf::KvRead+QueryService >> TPartSlice::TrivialMerge [GOOD] >> TPartSlice::SupersetByRowId [GOOD] >> TPartSlice::Subtract [GOOD] >> TPartSlice::UnsplitBorrow [GOOD] >> TPartSliceLoader::RestoreMissingSlice >> TChargeBTreeIndex::NoNodes_Groups [GOOD] >> TChargeBTreeIndex::NoNodes_History >> TPart::ForwardEnvColumnGroups [GOOD] >> TPart::Versions [GOOD] >> TPart::ManyVersions [GOOD] >> TPart::ManyDeltas [GOOD] >> TPart::CutKeys_Lz4 [GOOD] >> TPart::CutKeys_Seek >> KqpWorkloadServiceDistributed::TestDistributedQueue [GOOD] >> KqpWorkloadServiceDistributed::TestNodeDisconnect >> TPart::CutKeys_Seek [GOOD] >> TPart::CutKeys_SeekPages [GOOD] >> TPart::CutKeys_SeekSlices [GOOD] >> TPart::CutKeys_CutString [GOOD] >> TPart::CutKeys_CutUtf8String [GOOD] >> TPartBtreeIndexIteration::NoNodes ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::Delete+QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 31358, MsgBus: 13417 2025-05-07T08:54:36.862389Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624569144533876:2274];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:36.862439Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0027b7/r3tmp/tmpZniIal/pdisk_1.dat 2025-05-07T08:54:37.424301Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:37.428718Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:37.428827Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:37.432802Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 31358, node 1 2025-05-07T08:54:37.522566Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:37.522594Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:37.522605Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:37.522723Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13417 TClient is connected to server localhost:13417 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:38.118735Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:38.134684Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:38.154680Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T08:54:38.335057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:38.544693Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:38.641799Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:41.711419Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624590619371783:2407], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:41.711564Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:41.863085Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501624569144533876:2274];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:41.863166Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:54:42.022267Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:54:42.058872Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:54:42.131188Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:54:42.180452Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:54:42.222782Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:54:42.302400Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:54:42.358557Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:54:42.515600Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624594914339748:2471], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:42.515673Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:42.515878Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624594914339753:2474], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:42.520106Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:54:42.545289Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624594914339755:2475], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:54:42.624360Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501624594914339806:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 21350, MsgBus: 18605 2025-05-07T08:54:45.682075Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501624607325158927:2064];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:45.682165Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0027b7/r3tmp/tmpH7LdXV/pdisk_1.dat 2025-05-07T08:54:45.826724Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:45.855579Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:45.855675Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:45.859242Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21350, node 2 2025-05-07T08:54:45.961485Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:45.961505Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:45.961510Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:45.961601Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18605 TClient is connected to server localhost:18605 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:46.783997Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:46.811449Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:46.968810Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:47.178333Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:47.283707Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:50.565445Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501624628799997044:2407], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:50.565550Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:50.626839Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-05-07T08:54:50.674205Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-05-07T08:54:50.682225Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7501624607325158927:2064];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:50.682294Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:54:50.761249Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-05-07T08:54:50.806285Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-05-07T08:54:50.861158Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-05-07T08:54:50.924566Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-05-07T08:54:50.975552Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-05-07T08:54:51.072590Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501624633094964998:2471], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:51.072690Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:51.073056Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501624633094965003:2474], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:51.076903Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-05-07T08:54:51.092446Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7501624633094965005:2475], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-05-07T08:54:51.178127Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501624633094965056:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> TChargeBTreeIndex::NoNodes_History [GOOD] >> TChargeBTreeIndex::NoNodes_Groups_History >> TPartSliceLoader::RestoreMissingSlice [GOOD] >> TPartSliceLoader::RestoreOneSlice [GOOD] >> TPartSliceLoader::RestoreMissingSliceFullScreen [GOOD] >> TPartSliceLoader::RestoreFromScreenIndexKeys >> TPartSliceLoader::RestoreFromScreenIndexKeys [GOOD] >> TPartSliceLoader::RestoreFromScreenDataKeys [GOOD] >> TRowVersionRangesTest::SimpleInserts [GOOD] >> TRowVersionRangesTest::MergeFailLeft [GOOD] >> TRowVersionRangesTest::MergeFailRight [GOOD] >> TRowVersionRangesTest::MergeFailOuter [GOOD] >> TRowVersionRangesTest::MergeFailInner [GOOD] >> TRowVersionRangesTest::MergeExtendLeft [GOOD] >> TRowVersionRangesTest::MergeExtendLeftInner [GOOD] >> TRowVersionRangesTest::MergeExtendLeftComplete [GOOD] >> TRowVersionRangesTest::MergeExtendRight [GOOD] >> TRowVersionRangesTest::MergeExtendRightInner [GOOD] >> TRowVersionRangesTest::MergeExtendRightComplete [GOOD] >> TRowVersionRangesTest::MergeExtendBoth [GOOD] >> TRowVersionRangesTest::MergeHoleExact [GOOD] >> TRowVersionRangesTest::MergeHoleInner [GOOD] >> TRowVersionRangesTest::MergeHoleOuter [GOOD] >> TRowVersionRangesTest::MergeAllOuter [GOOD] >> TRowVersionRangesTest::MergeAllInner [GOOD] >> TRowVersionRangesTest::MergeAllEdges [GOOD] >> TRowVersionRangesTest::ContainsEmpty [GOOD] >> TRowVersionRangesTest::ContainsNonEmpty [GOOD] >> TRowVersionRangesTest::ContainsInvalid [GOOD] >> TRowVersionRangesTest::AdjustDown [GOOD] >> TRowVersionRangesTest::AdjustDownSnapshot [GOOD] >> TRowVersionRangesTest::SteppedCookieAllocatorOrder [GOOD] >> TRowVersionRangesTest::SteppedCookieAllocatorLowerBound [GOOD] >> TS3FIFOCache::Touch [GOOD] >> TS3FIFOCache::Touch_MainQueue [GOOD] >> TS3FIFOCache::EvictNext [GOOD] >> TS3FIFOCache::UpdateLimit [GOOD] >> TS3FIFOCache::Erase [GOOD] >> TS3FIFOCache::Random >> THiveTest::TestFollowersCrossDC_Easy [GOOD] >> THiveTest::TestFollowers_LocalNodeOnly >> TS3FIFOCache::Random [GOOD] >> TS3FIFOGhostQueue::Basics [GOOD] >> TScheme::Shapshot [GOOD] >> TScheme::Delta [GOOD] >> TScheme::Policy [GOOD] >> TScreen::Cuts [GOOD] >> TScreen::Join [GOOD] >> TScreen::Sequential >> DBase::VersionPureMem [GOOD] >> DBase::VersionPureParts >> KqpWorkloadService::TestQueryCancelAfterPoolWithLimits [GOOD] >> KqpWorkloadService::TestLargeConcurrentQueryLimit >> TScreen::Sequential [GOOD] >> TScreen::Random >> TSharedPageCache::ThreeLeveledLRU >> TFlatTableExecutor_ResourceProfile::TestExecutorStaticMemoryLimits >> TFlatTableExecutor_ResourceProfile::TestExecutorStaticMemoryLimits [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorTxDataLimitExceeded [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorTxDataGC [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorTxPartialDataHold >> TFlatTableExecutor_ResourceProfile::TestExecutorTxPartialDataHold [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorTxHoldAndUse [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorTxHoldOnRelease [GOOD] >> TFlatTableExecutor_ResourceProfile::TestUpdateConfig |90.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_compaction/ydb-core-tx-schemeshard-ut_compaction |90.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_compaction/ydb-core-tx-schemeshard-ut_compaction |90.3%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_compaction/ydb-core-tx-schemeshard-ut_compaction >> TFlatTableExecutor_ResourceProfile::TestUpdateConfig [GOOD] >> TFlatTableExecutor_SliceOverlapScan::TestSliceOverlapScan >> TCompactionMulti::MainPageCollectionEdge [GOOD] >> TCompactionMulti::MainPageCollectionEdgeMany |90.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_local_partition_reader/unittest >> DBase::KIKIMR_15598_Many_MemTables [GOOD] >> TPartBtreeIndexIteration::NoNodes [GOOD] >> TPartBtreeIndexIteration::NoNodes_Groups >> TCompactionMulti::MainPageCollectionEdgeMany [GOOD] >> TCompactionMulti::MainPageCollectionOverflow >> TCompactionMulti::MainPageCollectionOverflow [GOOD] >> TCompactionMulti::MainPageCollectionOverflowSmallRefs >> DBase::VersionPureParts [GOOD] |90.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/load_test/ut_ycsb/ydb-core-load_test-ut_ycsb |90.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/load_test/ut_ycsb/ydb-core-load_test-ut_ycsb >> TCompactionMulti::MainPageCollectionOverflowSmallRefs [GOOD] >> TCompactionMulti::MainPageCollectionOverflowLargeRefs >> DBase::VersionCompactedMem |90.3%| [LD] {RESULT} $(B)/ydb/core/load_test/ut_ycsb/ydb-core-load_test-ut_ycsb >> TChargeBTreeIndex::NoNodes_Groups_History [GOOD] >> TChargeBTreeIndex::OneNode >> TCompactionMulti::MainPageCollectionOverflowLargeRefs [GOOD] >> TExecutorDb::RandomOps >> Bloom::Conf [GOOD] >> Bloom::Hashes >> TScreen::Random [GOOD] >> TScreen::Shrink [GOOD] >> TScreen::Cook [GOOD] >> TSharedPageCache::Limits >> KqpWorkloadServiceActors::TestPoolFetcherNotExistingPool [GOOD] >> KqpWorkloadServiceActors::TestDefaultPoolUsePermissions >> Bloom::Hashes [GOOD] >> Bloom::Rater >> TChargeBTreeIndex::OneNode [GOOD] >> TChargeBTreeIndex::OneNode_Groups >> DataStreams::TestGetRecordsWithBigSeqno [GOOD] >> Bloom::Rater [GOOD] >> Bloom::Dipping >> KqpWorkloadService::TestQueueSizeManyQueries [GOOD] >> KqpWorkloadService::TestZeroQueueSize >> TSharedPageCache::ThreeLeveledLRU [GOOD] >> TSharedPageCache::S3FIFO |90.3%| [TA] $(B)/ydb/core/tx/schemeshard/ut_base/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpWorkloadService::WorkloadServiceDisabledByFeatureFlagOnServerless [GOOD] >> KqpWorkloadService::WorkloadServiceDisabledByInvalidDatabasePath >> DBase::VersionCompactedMem [GOOD] >> DBase::VersionCompactedParts |90.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_local_partition_reader/unittest |90.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/tx_proxy/ut_encrypted_storage/ydb-core-tx-tx_proxy-ut_encrypted_storage |90.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/tx_proxy/ut_encrypted_storage/ydb-core-tx-tx_proxy-ut_encrypted_storage >> TSharedPageCache::Limits [GOOD] >> TSharedPageCache::Limits_Config >> THiveTest::TestFollowers_LocalNodeOnly [GOOD] >> ResourcePoolClassifiersDdl::TestCreateResourcePoolClassifier [GOOD] >> ResourcePoolClassifiersDdl::TestCreateResourcePoolClassifierOnServerless >> THiveTest::TestFollowersCrossDC_Tight >> TFlatTableExecutor_Exceptions::TestTabletExecuteExceptionEnqueue >> TFlatTableExecutor_Exceptions::TestTabletExecuteExceptionEnqueue [GOOD] >> TFlatTableExecutor_ExecutorTxLimit::TestExecutorTxLimit >> TFlatTableExecutor_ExecutorTxLimit::TestExecutorTxLimit [GOOD] >> DBase::VersionCompactedParts [GOOD] >> TFlatTableExecutor_Follower::BasicFollowerRead [GOOD] >> TFlatTableExecutor_Follower::FollowerEarlyRebootHoles >> Memtable::Basics [GOOD] >> Memtable::BasicsReverse >> TFlatTableExecutor_Follower::FollowerEarlyRebootHoles [GOOD] >> TChargeBTreeIndex::OneNode_Groups [GOOD] >> YdbIndexTable::MultiShardTableOneIndexIndexOverlap [GOOD] >> Memtable::BasicsReverse [GOOD] >> TSharedPageCache::Limits_Config [GOOD] >> TSharedPageCache::S3FIFO [GOOD] >> TFlatTableExecutor_SliceOverlapScan::TestSliceOverlapScan [GOOD] >> Bloom::Dipping [GOOD] >> TFlatTableExecutor_Follower::FollowerAttachOnTxQueueScanSnapshot >> TSharedPageCache::ClockPro >> TChargeBTreeIndex::OneNode_History >> TSharedPageCache::ReplacementPolicySwitch >> TSharedPageCache::ReplacementPolicySwitch [GOOD] >> Bloom::Basics [GOOD] >> Bloom::Stairs >> TFlatTableExecutor_SnapshotWithCommits::SnapshotWithCommits >> Memtable::Markers [GOOD] >> TSharedPageCache::MiddleCache_FlatIndex >> TFlatTableExecutor_SnapshotWithCommits::SnapshotWithCommits [GOOD] >> Bloom::Stairs [GOOD] >> Memtable::Overlap [GOOD] >> TFlatTableExecutor_StickyPages::TestNonSticky_FlatIndex >> BuildStatsBTreeIndex::Single >> Memtable::Wreck >> TFlatTableExecutor_StickyPages::TestNonSticky_FlatIndex [GOOD] >> BuildStatsBTreeIndex::Single [GOOD] >> TFlatTableExecutor_StickyPages::TestNonSticky_BTreeIndex |90.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/apps/ydbd/ydbd |90.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/apps/ydbd/ydbd >> TPartBtreeIndexIteration::NoNodes_Groups [GOOD] >> TChargeBTreeIndex::OneNode_History [GOOD] >> THiveTest::TestHiveBalancerWithFollowers [GOOD] >> TSharedPageCache::MiddleCache_FlatIndex [GOOD] >> TestDataErasure::DataErasureWithCopyTable >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-66 [FAIL] >> ResourcePoolsDdl::TestResourcePoolAcl [GOOD] >> TSharedPageCache::ClockPro [GOOD] >> Memtable::Wreck [GOOD] >> KqpQueryPerf::Replace-QueryService+UseSink [GOOD] >> TPersQueueNewSchemeCacheTest::TestWriteStat1stClassTopicAPI [GOOD] >> ResourcePoolsDdl::TestCreateResourcePoolOnServerless [GOOD] >> THiveTest::TestFollowersCrossDC_Tight [GOOD] >> TFlatTableExecutor_Follower::FollowerAttachOnTxQueueScanSnapshot [GOOD] >> KqpPg::ValuesInsert-useSink [GOOD] >> KqpQueryPerf::KvRead+QueryService [GOOD] >> TPQTest::TestPQReadAhead [GOOD] >> KqpWorkloadService::WorkloadServiceDisabledByInvalidDatabasePath [GOOD] >> KqpWorkloadServiceActors::TestDefaultPoolUsePermissions [GOOD] >> ResourcePoolsDdl::TestDefaultPoolRestrictions >> TPartBtreeIndexIteration::NoNodes_History >> TChargeBTreeIndex::OneNode_Groups_History >> TSharedPageCache::ZeroCache_BTreeIndex >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-67 >> ResourcePoolsDdl::TestWorkloadConfigOnServerless >> TPartBtreeIndexIteration::NoNodes_History [GOOD] >> BuildStatsBTreeIndex::Single_Slices >> TSharedPageCache::BigCache_BTreeIndex >> Memtable::Erased >> TFlatTableExecutor_StickyPages::TestNonSticky_BTreeIndex [GOOD] >> THiveTest::TestHiveBalancerWithLimit >> TSharedPageCache::ZeroCache_BTreeIndex [GOOD] |90.3%| [LD] {RESULT} $(B)/ydb/core/tx/tx_proxy/ut_encrypted_storage/ydb-core-tx-tx_proxy-ut_encrypted_storage >> TSharedPageCache::BigCache_BTreeIndex [GOOD] >> Memtable::Erased [GOOD] >> BuildStatsBTreeIndex::Single_Slices [GOOD] |90.3%| [LD] {RESULT} $(B)/ydb/apps/ydbd/ydbd >> KqpWorkloadServiceActors::TestDefaultPoolAdminPermissions >> PgCatalog::PgType >> THiveTest::TestFollowersCrossDC_MovingLeader >> TPartBtreeIndexIteration::OneNode >> TFlatTableExecutor_Follower::FollowerAttachAfterLoan >> TFlatTableExecutor_Follower::FollowerAttachAfterLoan [GOOD] >> TFlatTableExecutor_Gc::TestFailedGcAfterReboot >> TFlatTableExecutor_Gc::TestFailedGcAfterReboot [GOOD] >> TFlatTableExecutor_IndexLoading::CalculateReadSize_FlatIndex >> TFlatTableExecutor_IndexLoading::CalculateReadSize_FlatIndex [GOOD] >> TFlatTableExecutor_IndexLoading::CalculateReadSize_BTreeIndex >> TFlatTableExecutor_IndexLoading::CalculateReadSize_BTreeIndex [GOOD] >> TFlatTableExecutor_IndexLoading::PrechargeAndSeek_FlatIndex >> KqpWorkloadService::TestZeroQueueSizeManyQueries >> KqpQueryPerf::KvRead-QueryService >> KqpJoinOrder::OltpJoinTypeHintCBOTurnOFF [GOOD] >> TSharedPageCache::BigCache_FlatIndex >> TSharedPageCache::BigCache_FlatIndex [GOOD] >> TSharedPageCache::MiddleCache_BTreeIndex >> TSharedPageCache::MiddleCache_BTreeIndex [GOOD] >> TFlatTableExecutor_StickyPages::TestSticky >> NFwd_TBlobs::MemTableTest [GOOD] >> NFwd_TBlobs::Lower [GOOD] >> NFwd_TBlobs::Sieve [GOOD] >> NFwd_TBlobs::SieveFiltered [GOOD] >> NFwd_TBlobs::Basics [GOOD] >> NFwd_TBlobs::Simple [GOOD] >> NFwd_TBlobs::Shuffle [GOOD] >> THiveTest::TestHiveBalancerWithLimit [GOOD] >> NFwd_TBlobs::Grow [GOOD] >> NFwd_TBlobs::Trace [GOOD] >> NFwd_TBlobs::Filtered [GOOD] >> NFwd_TBTreeIndexCache::Basics [GOOD] >> TChargeBTreeIndex::OneNode_Groups_History [GOOD] >> TSharedPageCache::ZeroCache_FlatIndex >> BuildStatsBTreeIndex::Single_History >> TPartBtreeIndexIteration::OneNode [GOOD] >> NFwd_TBTreeIndexCache::IndexPagesLocator [GOOD] >> NFwd_TBTreeIndexCache::GetTwice [GOOD] >> THiveTest::TestHiveBalancerIgnoreTablet >> TChargeBTreeIndex::FewNodes >> TFlatTableExecutor_StickyPages::TestSticky [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest >> TPersQueueNewSchemeCacheTest::TestWriteStat1stClassTopicAPI [GOOD] Test command err: 2025-05-07T08:53:53.441157Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624383035521541:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:53.441222Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:53:53.522820Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501624382548594116:2155];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:53.900788Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003475/r3tmp/tmpwOOZlR/pdisk_1.dat 2025-05-07T08:53:53.979670Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-05-07T08:53:54.050992Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:53:54.559375Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:53:54.612244Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:53:54.623034Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:53:54.623156Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:53:54.628515Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:53:54.628591Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:53:54.636150Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:53:54.644511Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-05-07T08:53:54.649368Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22294, node 1 2025-05-07T08:53:54.946850Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/zvgn/003475/r3tmp/yandexTD8CDj.tmp 2025-05-07T08:53:54.946879Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/zvgn/003475/r3tmp/yandexTD8CDj.tmp 2025-05-07T08:53:54.947058Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/zvgn/003475/r3tmp/yandexTD8CDj.tmp 2025-05-07T08:53:54.947196Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:53:55.066127Z INFO: TTestServer started on Port 5606 GrpcPort 22294 TClient is connected to server localhost:5606 PQClient connected to localhost:22294 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:53:55.867132Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:53:56.056848Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... 2025-05-07T08:53:58.446100Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501624383035521541:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:58.446194Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:53:58.522397Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7501624382548594116:2155];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:58.522482Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:53:59.595158Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624408805326442:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:59.595332Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:59.596706Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624408805326455:2346], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:59.600309Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480 2025-05-07T08:53:59.629227Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624408805326458:2347], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-05-07T08:53:59.970218Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501624408805326549:2778] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:54:00.005500Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:54:00.132581Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7501624408805326565:2353], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T08:54:00.135112Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=1&id=NmNiOGFiYjEtZGRkZDAxOS0zYzJhODQzMS1jN2FiODE3OA==, ActorId: [1:7501624408805326439:2340], ActorState: ExecuteState, TraceId: 01jtmz79my0ve2gch8rt3jzkms, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T08:54:00.137339Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T08:54:00.156932Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:54:00.373326Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-05-07T08:54:00.721630Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710667. Ctx: { TraceId: 01jtmz7akc7qfbew1hhv4wt7y8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2Q0YjJjZDktYTQwNWVlMmEtNDNmZjFkYzAtZGE0MDJiODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root === CheckClustersList. Subcribe to ClusterTracker from [1:7501624413100294327:3123] === CheckClustersList. Ok 2025-05-07T08:54:07.571215Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2663: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7501624383035521787:2127], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-05-07T08:54:07.571490Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2550: HandleNotify: self# [1:7501624383035521787:2127], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /Root PathId: Partial: 0 } 2025-05-07T08:54:07.571794Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2425: ResolveCacheItem: self# [1:7501624383035521787:2127], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /Root PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7501624387330489414:2334] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 14 } Filled: 1 Status: StatusSucc ... 5:02.415017Z node 8 :PQ_READ_PROXY DEBUG: caching_service.cpp:138: Direct read cache: server session deregistered: some@random@consumer_7_1_10129411888228270825_v1 2025-05-07T08:55:02.418085Z node 7 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: 123|57eadced-2aed4cfc-709968ac-e45a7960_0 is DEAD 2025-05-07T08:55:02.424324Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2433: [PQ: 72075186224037894] Destroy direct read session some@random@consumer_7_1_10129411888228270825_v1 2025-05-07T08:55:02.424402Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037894] server disconnected, pipe [7:7501624678332568505:2545] destroyed 2025-05-07T08:55:02.424502Z node 8 :PQ_READ_PROXY DEBUG: caching_service.cpp:138: Direct read cache: server session deregistered: some@random@consumer_7_1_10129411888228270825_v1 2025-05-07T08:55:02.523037Z node 7 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037896 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-05-07T08:55:02.523756Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2663: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [7:7501624609613088927:2126], request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-05-07T08:55:02.523906Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1818: FillEntry for TNavigate: self# [7:7501624609613088927:2126], cacheItem# { Subscriber: { Subscriber: [7:7501624613908056727:2442] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: Root/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-05-07T08:55:02.524010Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:264: Send result: self# [7:7501624678332568536:4134], recipient# [7:7501624678332568535:2561], result# { ErrorCount: 1 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-05-07T08:55:02.525274Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037896] server disconnected, pipe [7:7501624678332568481:2533] destroyed 2025-05-07T08:55:02.525372Z node 7 :PERSQUEUE DEBUG: partition_write.cpp:138: [PQ: 72075186224037896, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-05-07T08:55:02.560171Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2721: Handle TEvTxProxySchemeCache::TEvResolveKeySet: self# [7:7501624609613088927:2126], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 12] Access: 1 SyncVersion: false Status: Unknown Kind: KindUnknown PartitionsCount: 0 DomainInfo From: (Utf8 : NULL) IncFrom: 1 To: () IncTo: 0 },{ TableId: [OwnerId: 72057594046644480, LocalPathId: 10] Access: 1 SyncVersion: false Status: Unknown Kind: KindUnknown PartitionsCount: 0 DomainInfo From: (Utf8 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-05-07T08:55:02.560299Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2029: FillEntry for TResolve: self# [7:7501624609613088927:2126], cacheItem# { Subscriber: { Subscriber: [7:7501624639677861111:2837] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 16 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1746608094553 PathId: [OwnerId: 72057594046644480, LocalPathId: 12] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, entry# { TableId: [OwnerId: 72057594046644480, LocalPathId: 12] Access: 1 SyncVersion: false Status: Unknown Kind: KindUnknown PartitionsCount: 0 DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-05-07T08:55:02.560556Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2029: FillEntry for TResolve: self# [7:7501624609613088927:2126], cacheItem# { Subscriber: { Subscriber: [7:7501624639677860996:2767] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 16 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1746608094077 PathId: [OwnerId: 72057594046644480, LocalPathId: 10] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, entry# { TableId: [OwnerId: 72057594046644480, LocalPathId: 10] Access: 1 SyncVersion: false Status: Unknown Kind: KindUnknown PartitionsCount: 0 DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-05-07T08:55:02.560863Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:264: Send result: self# [7:7501624678332568539:4135], recipient# [7:7501624678332568538:2508], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 12] Access: 1 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 2 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Utf8 : NULL) IncFrom: 1 To: () IncTo: 0 },{ TableId: [OwnerId: 72057594046644480, LocalPathId: 10] Access: 1 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 2 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Utf8 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-05-07T08:55:02.634503Z node 8 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2663: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [8:7501624611378725418:2104], request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-05-07T08:55:02.634691Z node 8 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1818: FillEntry for TNavigate: self# [8:7501624611378725418:2104], cacheItem# { Subscriber: { Subscriber: [8:7501624615673692750:2111] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: Root/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-05-07T08:55:02.634805Z node 8 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:264: Send result: self# [8:7501624680098202683:2422], recipient# [8:7501624680098202682:2362], result# { ErrorCount: 1 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-05-07T08:55:02.795209Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2663: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [7:7501624609613088927:2126], request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-05-07T08:55:02.795425Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1818: FillEntry for TNavigate: self# [7:7501624609613088927:2126], cacheItem# { Subscriber: { Subscriber: [7:7501624613908056727:2442] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: Root/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-05-07T08:55:02.795563Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:264: Send result: self# [7:7501624678332568555:4140], recipient# [7:7501624678332568554:2565], result# { ErrorCount: 1 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-05-07T08:55:02.847366Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2663: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [7:7501624609613088927:2126], request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-05-07T08:55:02.847544Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1818: FillEntry for TNavigate: self# [7:7501624609613088927:2126], cacheItem# { Subscriber: { Subscriber: [7:7501624639677860867:2700] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: Root/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-05-07T08:55:02.847648Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:264: Send result: self# [7:7501624678332568560:4141], recipient# [7:7501624678332568559:2566], result# { ErrorCount: 1 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/idx_test/unittest >> YdbIndexTable::MultiShardTableOneIndexIndexOverlap [GOOD] Test command err: Trying to start YDB, gRPC: 10603, MsgBus: 24240 2025-05-07T08:51:40.494383Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623811328367729:2199];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:40.506419Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001ea9/r3tmp/tmp8eprlG/pdisk_1.dat 2025-05-07T08:51:41.155561Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:51:41.195121Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:41.195225Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:41.203963Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10603, node 1 2025-05-07T08:51:41.438681Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:51:41.438717Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:51:41.438729Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:51:41.438847Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24240 TClient is connected to server localhost:24240 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:51:42.216977Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:42.241469Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:51:42.256393Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:42.444374Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:42.724320Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:42.817787Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:44.786330Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623828508238414:2407], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:44.786466Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:45.149110Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:51:45.191684Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:51:45.237562Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:51:45.315425Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:51:45.372020Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:51:45.418572Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:51:45.463778Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:51:45.503872Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623811328367729:2199];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:45.503937Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:51:45.606360Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623832803206374:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:45.606438Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:45.606853Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623832803206379:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:45.610719Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:51:45.632527Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623832803206381:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:51:45.733504Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623832803206434:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:51:47.389110Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-05-07T08:51:48.861962Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710674. Ctx: { TraceId: 01jtmz39ykfy7xzdv0yz92sd79, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MWU5MDhhYWQtZDkyMDBkZjItZDI5MzkwYzAtZWQ4MDYwOTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:51:48.870331Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710673. Ctx: { TraceId: 01jtmz39ykeqammtfrgxxbc3w0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2QxNjhhMTAtOWFjMjRhNDYtNzQ0NGE3NjQtZjAzNjg4Y2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:51:48.945372Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710675. Ctx: { TraceId: 01jtmz39ykd8s8qfaexa7mnh79, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzVhYmU4ZmQtN2JhNWFkNDctYzhlOWE5NDQtOTA5ZTgxMWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:51:48.957274Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710677. Ctx: { TraceId: 01jtmz39z92583md770gamppxs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2U4NTVlMi1hYjY1YWNmOS1hMGFkZWFkZC1lMzk2YjliNg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:51:48.996902Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710676. Ctx: { TraceId: 01jtmz39z7ccgspvm0kzk33rb3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTlmNTFjZmYtYmRhODYxYmYtZGU0MGJmZDItY2RlNGEzYTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:51:49.002176Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710678. Ctx: { TraceId: 01jtmz39ykeqammtfrgxxbc3w0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2QxNjhhMTAtOWFjMjRhNDYtNzQ0NGE3NjQtZjAzNjg4Y2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:51:49.030567Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710682. Ctx: { TraceId: 01jtmz3a14avgwayfwb43psw32, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MWU1YjM2ZjctM2FhZDlhNTgtNmUyZDExOS0zOWFlNTE1NQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:51:49.032572Z node 1 :KQP ... zRhYWItNDc3ZmFjNzYtYTZlZGZiNzMtOWQ4ZTAzOTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:57.423498Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714337. Ctx: { TraceId: 01jtmz922d6v9btcvxjjt14t02, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YzE3NDlkYzgtNjk3MTRkMzAtOTkzMDQzMjctYmNmODEwNDE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:57.427750Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714338. Ctx: { TraceId: 01jtmz9223b7drbpkyr7qhvf45, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=Nzk4MGVhYTYtNmQ2NGY3MjYtNDg2YTU1NDYtNjU2OTBlOTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:57.435318Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714339. Ctx: { TraceId: 01jtmz922d6v9btcvxjjt14t02, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YzE3NDlkYzgtNjk3MTRkMzAtOTkzMDQzMjctYmNmODEwNDE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:57.462389Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714341. Ctx: { TraceId: 01jtmz924w8yc7f2agj1asermt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YmRlMWMzNjAtOTMyZGQ3NmMtNTBjNDU4ZGQtZjI1ODZkYWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:57.466867Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714340. Ctx: { TraceId: 01jtmz92586qdq650zwv5bpqr1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZGVjYTkzZWQtZjY0MTFiMTItZjRmZTk4YmYtYzM3YTNjYmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:57.473515Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714342. Ctx: { TraceId: 01jtmz925f05agzzyewrwpj2sb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YTcyYmNjMzgtMjhjZDBmMjgtMmE0N2Y0ZjUtODIxMDE3MWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:57.486717Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714343. Ctx: { TraceId: 01jtmz924w8yc7f2agj1asermt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YmRlMWMzNjAtOTMyZGQ3NmMtNTBjNDU4ZGQtZjI1ODZkYWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:57.496371Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714344. Ctx: { TraceId: 01jtmz924w8yc7f2agj1asermt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YmRlMWMzNjAtOTMyZGQ3NmMtNTBjNDU4ZGQtZjI1ODZkYWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:57.498605Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714345. Ctx: { TraceId: 01jtmz926h46kf3c07cvqfgnyd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NGI0MzRhYWItNDc3ZmFjNzYtYTZlZGZiNzMtOWQ4ZTAzOTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:57.510214Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714346. Ctx: { TraceId: 01jtmz926qd9vbdcdac1vmag3a, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=Nzk4MGVhYTYtNmQ2NGY3MjYtNDg2YTU1NDYtNjU2OTBlOTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:57.523749Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714347. Ctx: { TraceId: 01jtmz9277enr71qewje16kdny, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YzE3NDlkYzgtNjk3MTRkMzAtOTkzMDQzMjctYmNmODEwNDE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:57.538886Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714348. Ctx: { TraceId: 01jtmz927b7z8gm0ma3skhssse, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZGVjYTkzZWQtZjY0MTFiMTItZjRmZTk4YmYtYzM3YTNjYmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:57.573794Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714350. Ctx: { TraceId: 01jtmz928fash0sge9sbsfevvg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=Nzk4MGVhYTYtNmQ2NGY3MjYtNDg2YTU1NDYtNjU2OTBlOTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:57.574900Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714351. Ctx: { TraceId: 01jtmz928n4wfyrhakswjpg9jq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NGI0MzRhYWItNDc3ZmFjNzYtYTZlZGZiNzMtOWQ4ZTAzOTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:57.579496Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714349. Ctx: { TraceId: 01jtmz928f0t7pph75wbg9affr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YTcyYmNjMzgtMjhjZDBmMjgtMmE0N2Y0ZjUtODIxMDE3MWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:57.585189Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714352. Ctx: { TraceId: 01jtmz928s8v2a6ns33dewdf2w, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YmRlMWMzNjAtOTMyZGQ3NmMtNTBjNDU4ZGQtZjI1ODZkYWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:57.585343Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714353. Ctx: { TraceId: 01jtmz928fash0sge9sbsfevvg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=Nzk4MGVhYTYtNmQ2NGY3MjYtNDg2YTU1NDYtNjU2OTBlOTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:57.603022Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714354. Ctx: { TraceId: 01jtmz928n4wfyrhakswjpg9jq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NGI0MzRhYWItNDc3ZmFjNzYtYTZlZGZiNzMtOWQ4ZTAzOTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:57.620253Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714355. Ctx: { TraceId: 01jtmz929qah8m6h61aj3fafaz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YzE3NDlkYzgtNjk3MTRkMzAtOTkzMDQzMjctYmNmODEwNDE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:57.630626Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714356. Ctx: { TraceId: 01jtmz929qah8m6h61aj3fafaz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YzE3NDlkYzgtNjk3MTRkMzAtOTkzMDQzMjctYmNmODEwNDE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:57.654157Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714357. Ctx: { TraceId: 01jtmz92bjfgfc61hwce46be78, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YTcyYmNjMzgtMjhjZDBmMjgtMmE0N2Y0ZjUtODIxMDE3MWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:57.664605Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714358. Ctx: { TraceId: 01jtmz92be7qmxkkb4724t5sce, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZGVjYTkzZWQtZjY0MTFiMTItZjRmZTk4YmYtYzM3YTNjYmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:57.676991Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714359. Ctx: { TraceId: 01jtmz92bpdyg5rbd9gjsyvxsf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=Nzk4MGVhYTYtNmQ2NGY3MjYtNDg2YTU1NDYtNjU2OTBlOTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:57.687357Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714360. Ctx: { TraceId: 01jtmz92be7qmxkkb4724t5sce, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZGVjYTkzZWQtZjY0MTFiMTItZjRmZTk4YmYtYzM3YTNjYmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:57.698766Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714361. Ctx: { TraceId: 01jtmz92ce8sg1jt6jf16z226q, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NGI0MzRhYWItNDc3ZmFjNzYtYTZlZGZiNzMtOWQ4ZTAzOTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:57.698982Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714362. Ctx: { TraceId: 01jtmz92bpdyg5rbd9gjsyvxsf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=Nzk4MGVhYTYtNmQ2NGY3MjYtNDg2YTU1NDYtNjU2OTBlOTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:57.705575Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714363. Ctx: { TraceId: 01jtmz92cpa4dmfmrb3ej7vf36, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YmRlMWMzNjAtOTMyZGQ3NmMtNTBjNDU4ZGQtZjI1ODZkYWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:57.707014Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714365. Ctx: { TraceId: 01jtmz92ce8sg1jt6jf16z226q, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NGI0MzRhYWItNDc3ZmFjNzYtYTZlZGZiNzMtOWQ4ZTAzOTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:57.709163Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714364. Ctx: { TraceId: 01jtmz92cz7e16n83ehzd5n07z, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YzE3NDlkYzgtNjk3MTRkMzAtOTkzMDQzMjctYmNmODEwNDE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS 2025-05-07T08:54:57.724408Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714366. Ctx: { TraceId: 01jtmz92cz7e16n83ehzd5n07z, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YzE3NDlkYzgtNjk3MTRkMzAtOTkzMDQzMjctYmNmODEwNDE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS finished with status: SUCCESS 2025-05-07T08:54:57.755029Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714367. Ctx: { TraceId: 01jtmz92egaw90j061trzprf2j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YTcyYmNjMzgtMjhjZDBmMjgtMmE0N2Y0ZjUtODIxMDE3MWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:57.764100Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714368. Ctx: { TraceId: 01jtmz92egaw90j061trzprf2j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YTcyYmNjMzgtMjhjZDBmMjgtMmE0N2Y0ZjUtODIxMDE3MWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:54:57.779611Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714369. Ctx: { TraceId: 01jtmz92cpa4dmfmrb3ej7vf36, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YmRlMWMzNjAtOTMyZGQ3NmMtNTBjNDU4ZGQtZjI1ODZkYWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS finished with status: SUCCESS ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut/unittest >> TSharedPageCache::MiddleCache_BTreeIndex [GOOD] Test command err: SmallQueue: MainQueue: {11 0f 1b}, {14 1f 1b}, {15 2f 1b}, {18 0f 1b}, {19 0f 1b}, {23 0f 1b}, {27 0f 1b} GhostQueue: 9, 12, 13, 16, 17, 20, 21, 24, 25, 28 0.2954 00000.000 II| FAKE_ENV: Born at 2025-05-07T08:54:59.680698Z 00000.010 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.010 II| FAKE_ENV: Starting storage for BS group 0 00000.011 II| FAKE_ENV: Starting storage for BS group 1 00000.011 II| FAKE_ENV: Starting storage for BS group 2 00000.011 II| FAKE_ENV: Starting storage for BS group 3 00000.035 II| TABLET_EXECUTOR: Leader{1:2:0} activating executor 00000.036 II| TABLET_EXECUTOR: LSnap{1:2, on 2:1, 35b, wait} done, Waste{2:0, 0b +(0, 0b), 0 trc} 00000.036 DD| TABLET_EXECUTOR: Leader{1:2:2} commited cookie 2 for step 1 00000.037 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema 00000.037 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.037 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema} hope 1 -> done Change{2, redo 0b alter 209b annex 0, ~{ } -{ }, 0 gb} 00000.037 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema} release 4194304b of static, Memory{0 dyn 0} 00000.038 DD| TABLET_EXECUTOR: Leader{1:2:3} commited cookie 1 for step 2 00000.038 NN| TABLET_SAUSAGECACHE: Update config MemoryLimit: 8388608 ReplacementPolicy: ThreeLeveledLRU 00000.038 NN| TABLET_SAUSAGECACHE: Switch replacement policy from S3FIFO to ThreeLeveledLRU 00000.038 NN| TABLET_SAUSAGECACHE: Switch replacement policy done from S3FIFO to ThreeLeveledLRU 00000.039 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.039 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.040 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{2, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.040 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.040 DD| TABLET_EXECUTOR: Leader{1:2:4} commited cookie 1 for step 3 00000.041 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.041 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.041 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{3, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.042 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.042 DD| TABLET_EXECUTOR: Leader{1:2:5} commited cookie 1 for step 4 00000.042 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.042 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.043 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{4, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.043 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.043 DD| TABLET_EXECUTOR: Leader{1:2:6} commited cookie 1 for step 5 00000.044 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.044 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.045 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{5, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.045 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.045 DD| TABLET_EXECUTOR: Leader{1:2:7} commited cookie 1 for step 6 00000.046 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.046 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.046 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{6, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.046 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.047 DD| TABLET_EXECUTOR: Leader{1:2:8} commited cookie 1 for step 7 00000.047 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.047 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.048 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{7, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.048 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.048 DD| TABLET_EXECUTOR: Leader{1:2:9} commited cookie 1 for step 8 00000.049 DD| TABLET_EXECUTOR: Leader{1:2:9} Tx{8, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.049 DD| TABLET_EXECUTOR: Leader{1:2:9} Tx{8, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.049 DD| TABLET_EXECUTOR: Leader{1:2:9} Tx{8, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{8, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.049 DD| TABLET_EXECUTOR: Leader{1:2:9} Tx{8, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.050 DD| TABLET_EXECUTOR: Leader{1:2:10} commited cookie 1 for step 9 00000.050 DD| TABLET_EXECUTOR: Leader{1:2:10} Tx{9, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.050 DD| TABLET_EXECUTOR: Leader{1:2:10} Tx{9, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.051 DD| TABLET_EXECUTOR: Leader{1:2:10} Tx{9, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{9, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.051 DD| TABLET_EXECUTOR: Leader{1:2:10} Tx{9, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.051 DD| TABLET_EXECUTOR: Leader{1:2:11} commited cookie 1 for step 10 00000.051 DD| TABLET_EXECUTOR: Leader{1:2:11} Tx{10, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.052 DD| TABLET_EXECUTOR: Leader{1:2:11} Tx{10, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.052 DD| TABLET_EXECUTOR: Leader{1:2:11} Tx{10, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{10, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.052 DD| TABLET_EXECUTOR: Leader{1:2:11} Tx{10, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.052 DD| TABLET_EXECUTOR: Leader{1:2:12} commited cookie 1 for step 11 00000.053 DD| TABLET_EXECUTOR: Leader{1:2:12} Tx{11, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.053 DD| TABLET_EXECUTOR: Leader{1:2:12} Tx{11, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.053 DD| TABLET_EXECUTOR: Leader{1:2:12} Tx{11, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{11, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.053 DD| TABLET_EXECUTOR: Leader{1:2:12} Tx{11, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.054 DD| TABLET_EXECUTOR: Leader{1:2:13} commited cookie 1 for step 12 00000.054 DD| TABLET_EXECUTOR: Leader{1:2:13} Tx{12, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.054 DD| TABLET_EXECUTOR: Leader{1:2:13} Tx{12, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.055 DD| TABLET_EXECUTOR: Leader{1:2:13} Tx{12, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{12, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.055 DD| TABLET_EXECUTOR: Leader{1:2:13} Tx{12, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.055 DD| TABLET_EXECUTOR: Leader{1:2:14} commited cookie 1 for step 13 00000.056 DD| TABLET_EXECUTOR: Leader{1:2:14} Tx{13, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.056 DD| TABLET_EXECUTOR: Leader{1:2:14} Tx{13, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.056 DD| TABLET_EXECUTOR: Leader{1:2:14} Tx{13, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{13, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.056 DD| TABLET_EXECUTOR: Leader{1:2:14} Tx{13, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.057 DD| TABLET_EXECUTOR: Leader{1:2:15} commited cookie 1 for step 14 00000.057 DD| TABLET_EXECUTOR: Leader{1:2:15} Tx{14, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.057 DD| TABLET_EXECUTOR: Leader{1:2:15} Tx{14, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.057 DD| TABLET_EXECUTOR: Leader{1:2:15} Tx{14, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{14, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.058 DD| TABLET_EXECUTOR: Leader{1:2:15} Tx{14, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWr ... TTxReadRow} took 8388608b of static mem, Memory{8388608 dyn 0} 00000.466 D3| TABLET_EXECUTOR: Leader{1:3:2} requests PageCollection [1:2:103:1:12288:2976:0] 102443 bytes, 1 pages: [4 4] 00000.466 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{96, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} postponed, 102443b, pages {1 wait, 1 load}, freshly touched 4 pages 00000.466 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] cookie 1 class Online from cache [ ] already requested [ ] to request [ 4 ] 00000.466 DD| TABLET_SAUSAGECACHE: Drop page collection [1:2:103:1:12288:2976:0] pages [ 96 ] owner [6:580:2605] 00000.466 TT| TABLET_SAUSAGECACHE: Receive page collection [1:2:103:1:12288:2976:0] status OK pages [ 4 ] 00000.466 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:103:1:12288:2976:0] owner [6:580:2605] class Online pages [ 4 ] cookie 1 00000.467 DD| TABLET_EXECUTOR: Leader{1:3:2} got result TEvResult{1 pages [1:2:103:1:12288:2976:0] ok OK}, category 1 00000.467 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{96, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} hope 2 -> done Change{103, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.467 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{96, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} release 8388608b of static, Memory{0 dyn 0} 00000.467 TT| TABLET_SAUSAGECACHE: Touch page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] pages [ 14 4 117 111 ] 00000.468 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{97, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow 00000.468 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{97, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.468 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{97, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} hope 1 -> retry Change{103, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.468 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{97, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} touch new 796b, 102443b lo load (103239b in total), 0b requested for data (4194304b in total) 00000.468 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{97, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} took 8388608b of static mem, Memory{8388608 dyn 0} 00000.468 D3| TABLET_EXECUTOR: Leader{1:3:2} requests PageCollection [1:2:103:1:12288:2976:0] 102443 bytes, 1 pages: [3 4] 00000.468 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{97, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} postponed, 102443b, pages {1 wait, 1 load}, freshly touched 4 pages 00000.468 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] cookie 1 class Online from cache [ ] already requested [ ] to request [ 3 ] 00000.468 DD| TABLET_SAUSAGECACHE: Drop page collection [1:2:103:1:12288:2976:0] pages [ 95 ] owner [6:580:2605] 00000.468 TT| TABLET_SAUSAGECACHE: Receive page collection [1:2:103:1:12288:2976:0] status OK pages [ 3 ] 00000.468 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:103:1:12288:2976:0] owner [6:580:2605] class Online pages [ 3 ] cookie 1 00000.468 DD| TABLET_EXECUTOR: Leader{1:3:2} got result TEvResult{1 pages [1:2:103:1:12288:2976:0] ok OK}, category 1 00000.469 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{97, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} hope 2 -> done Change{103, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.469 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{97, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} release 8388608b of static, Memory{0 dyn 0} 00000.469 TT| TABLET_SAUSAGECACHE: Touch page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] pages [ 14 3 117 111 ] 00000.469 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{98, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow 00000.469 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{98, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.470 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{98, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} hope 1 -> retry Change{103, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.470 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{98, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} touch new 796b, 102443b lo load (103239b in total), 0b requested for data (4194304b in total) 00000.470 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{98, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} took 8388608b of static mem, Memory{8388608 dyn 0} 00000.470 D3| TABLET_EXECUTOR: Leader{1:3:2} requests PageCollection [1:2:103:1:12288:2976:0] 102443 bytes, 1 pages: [2 4] 00000.470 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{98, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} postponed, 102443b, pages {1 wait, 1 load}, freshly touched 4 pages 00000.470 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] cookie 1 class Online from cache [ ] already requested [ ] to request [ 2 ] 00000.470 DD| TABLET_SAUSAGECACHE: Drop page collection [1:2:103:1:12288:2976:0] pages [ 93 ] owner [6:580:2605] 00000.470 TT| TABLET_SAUSAGECACHE: Receive page collection [1:2:103:1:12288:2976:0] status OK pages [ 2 ] 00000.470 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:103:1:12288:2976:0] owner [6:580:2605] class Online pages [ 2 ] cookie 1 00000.470 DD| TABLET_EXECUTOR: Leader{1:3:2} got result TEvResult{1 pages [1:2:103:1:12288:2976:0] ok OK}, category 1 00000.471 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{98, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} hope 2 -> done Change{103, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.471 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{98, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} release 8388608b of static, Memory{0 dyn 0} 00000.471 TT| TABLET_SAUSAGECACHE: Touch page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] pages [ 14 2 117 111 ] 00000.471 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{99, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow 00000.471 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{99, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.471 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{99, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} hope 1 -> retry Change{103, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.471 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{99, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} touch new 796b, 102443b lo load (103239b in total), 0b requested for data (4194304b in total) 00000.471 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{99, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} took 8388608b of static mem, Memory{8388608 dyn 0} 00000.472 D3| TABLET_EXECUTOR: Leader{1:3:2} requests PageCollection [1:2:103:1:12288:2976:0] 102443 bytes, 1 pages: [1 4] 00000.472 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{99, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} postponed, 102443b, pages {1 wait, 1 load}, freshly touched 4 pages 00000.472 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] cookie 1 class Online from cache [ ] already requested [ ] to request [ 1 ] 00000.472 DD| TABLET_SAUSAGECACHE: Drop page collection [1:2:103:1:12288:2976:0] pages [ 92 ] owner [6:580:2605] 00000.472 TT| TABLET_SAUSAGECACHE: Receive page collection [1:2:103:1:12288:2976:0] status OK pages [ 1 ] 00000.472 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:103:1:12288:2976:0] owner [6:580:2605] class Online pages [ 1 ] cookie 1 00000.472 DD| TABLET_EXECUTOR: Leader{1:3:2} got result TEvResult{1 pages [1:2:103:1:12288:2976:0] ok OK}, category 1 00000.472 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{99, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} hope 2 -> done Change{103, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.472 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{99, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} release 8388608b of static, Memory{0 dyn 0} 00000.472 TT| TABLET_SAUSAGECACHE: Touch page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] pages [ 14 1 117 111 ] 00000.473 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{100, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow 00000.473 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{100, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.473 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{100, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} hope 1 -> retry Change{103, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.473 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{100, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} touch new 796b, 102443b lo load (103239b in total), 0b requested for data (4194304b in total) 00000.473 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{100, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} took 8388608b of static mem, Memory{8388608 dyn 0} 00000.473 D3| TABLET_EXECUTOR: Leader{1:3:2} requests PageCollection [1:2:103:1:12288:2976:0] 102443 bytes, 1 pages: [0 4] 00000.473 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{100, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} postponed, 102443b, pages {1 wait, 1 load}, freshly touched 4 pages 00000.474 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] cookie 1 class Online from cache [ ] already requested [ ] to request [ 0 ] 00000.474 DD| TABLET_SAUSAGECACHE: Drop page collection [1:2:103:1:12288:2976:0] pages [ 91 ] owner [6:580:2605] 00000.474 TT| TABLET_SAUSAGECACHE: Receive page collection [1:2:103:1:12288:2976:0] status OK pages [ 0 ] 00000.474 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:103:1:12288:2976:0] owner [6:580:2605] class Online pages [ 0 ] cookie 1 00000.474 DD| TABLET_EXECUTOR: Leader{1:3:2} got result TEvResult{1 pages [1:2:103:1:12288:2976:0] ok OK}, category 1 00000.474 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{100, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} hope 2 -> done Change{103, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.474 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{100, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} release 8388608b of static, Memory{0 dyn 0} 00000.474 TT| TABLET_SAUSAGECACHE: Touch page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] pages [ 14 0 117 111 ] Counters: Active:8313958/8388608, Passive:0, MemLimit:-1 00000.475 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.477 II| TABLET_EXECUTOR: Leader{1:3:2} suiciding, Waste{2:0, 10255801b +(0, 0b), 1 trc, -48685b acc} 00000.479 DD| TABLET_SAUSAGECACHE: Unregister owner [6:580:2605] 00000.479 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] 00000.479 DD| TABLET_SAUSAGECACHE: Remove owner [6:580:2605] 00000.479 NN| TABLET_SAUSAGECACHE: Poison cache serviced 138 reqs hit {0 0b} miss {139 12197190b} 00000.480 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.480 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {10191b, 107} 00000.480 II| FAKE_ENV: DS.1 gone, left {10257096b, 5}, put {10305919b, 107} 00000.482 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.482 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.482 II| FAKE_ENV: All BS storage groups are stopped 00000.482 II| FAKE_ENV: Model stopped, hosted 4 actors, spent 0.000s 00000.483 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 2741}, stopped >> TSharedPageCache::ZeroCache_FlatIndex [GOOD] >> NFwd_TBTreeIndexCache::ForwardTwice [GOOD] >> NFwd_TBTreeIndexCache::Forward_OnlyUsed [GOOD] >> TPartBtreeIndexIteration::OneNode_Groups >> BuildStatsBTreeIndex::Single_History [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPQTest::TestPQReadAhead [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:103:2057] recipient: [1:101:2135] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:103:2057] recipient: [1:101:2135] Leader for TabletID 72057594037927937 is [1:107:2139] sender: [1:108:2057] recipient: [1:101:2135] 2025-05-07T08:51:32.678799Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:32.678932Z node 1 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:149:2057] recipient: [1:147:2170] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:149:2057] recipient: [1:147:2170] Leader for TabletID 72057594037927938 is [1:153:2174] sender: [1:154:2057] recipient: [1:147:2170] Leader for TabletID 72057594037927937 is [1:107:2139] sender: [1:179:2057] recipient: [1:14:2061] 2025-05-07T08:51:32.702181Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:32.728996Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037927937] Config applied version 1 actor [1:177:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "important_user" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 WriteSpeedInBytesPerSecond: 102400 BurstSize: 102400 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--asdfgs--topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 ReadRuleGenerations: 1 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } Consumers { Name: "important_user" Generation: 1 Important: true } 2025-05-07T08:51:32.730092Z node 1 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:185:2198] 2025-05-07T08:51:32.732787Z node 1 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [1:185:2198] 2025-05-07T08:51:32.746325Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|967f87b8-6bd9927d-8e17984-c6a72916_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user1" SessionId: "" Offset: 0 Count: 2147483647 Bytes: 2147483647 } Cookie: 123 } via pipe: [1:177:2192] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user2" SessionId: "" Offset: 0 Count: 2147483647 Bytes: 2147483647 } Cookie: 123 } via pipe: [1:177:2192] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:103:2057] recipient: [2:101:2135] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:103:2057] recipient: [2:101:2135] Leader for TabletID 72057594037927937 is [2:107:2139] sender: [2:108:2057] recipient: [2:101:2135] 2025-05-07T08:51:41.693539Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:41.693612Z node 2 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:149:2057] recipient: [2:147:2170] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:149:2057] recipient: [2:147:2170] Leader for TabletID 72057594037927938 is [2:153:2174] sender: [2:154:2057] recipient: [2:147:2170] Leader for TabletID 72057594037927937 is [2:107:2139] sender: [2:179:2057] recipient: [2:14:2061] 2025-05-07T08:51:41.711196Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:41.711880Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037927937] Config applied version 2 actor [2:177:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "important_user" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 WriteSpeedInBytesPerSecond: 102400 BurstSize: 102400 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--asdfgs--topic" Version: 2 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } ReadRuleGenerations: 2 ReadRuleGenerations: 2 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 2 Important: false } Consumers { Name: "important_user" Generation: 2 Important: true } 2025-05-07T08:51:41.712512Z node 2 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [2:185:2198] 2025-05-07T08:51:41.714834Z node 2 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [2:185:2198] 2025-05-07T08:51:41.724830Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|f4117450-6015a155-f638233c-93c0f859_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user1" SessionId: "" Offset: 0 Count: 2147483647 Bytes: 2147483647 } Cookie: 123 } via pipe: [2:177:2192] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user2" SessionId: "" Offset: 0 Count: 2147483647 Bytes: 2147483647 } Cookie: 123 } via pipe: [2:177:2192] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:103:2057] recipient: [3:101:2135] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:103:2057] recipient: [3:101:2135] Leader for TabletID 72057594037927937 is [3:107:2139] sender: [3:108:2057] recipient: [3:101:2135] 2025-05-07T08:51:51.027621Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:51.027710Z node 3 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [3:149:2057] recipient: [3:147:2170] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [3:149:2057] recipient: [3:147:2170] Leader for TabletID 72057594037927938 is [3:153:2174] sender: [3:154:2057] recipient: [3:147:2170] Leader for TabletID 72057594037927937 is [3:107:2139] sender: [3:179:2057] recipient: [3:14:2061] 2025-05-07T08:51:51.067093Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:51:51.067699Z node 3 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037927937] Config applied version 3 actor [3:177:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "important_user" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 WriteSpeedInBytesPerSecond: 102400 BurstSize: 102400 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--asdfgs--topic" Version: 3 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } ReadRuleGenerations: 3 ReadRuleGenerations: 3 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 3 Important: false } Consumers { Name: "important_user" Generation: 3 Important: true } 2025-05-07T08:51:51.068301Z node 3 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [3:185:2198] 2025-05-07T08:51:51.075144Z node 3 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [3:185:2198] 2025-05-07T08:51:51.098752Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|9631fbbc-d59cd9b1-28288618-df4a6074_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user1" SessionId: "" Offset: 0 Count: 2147483647 Bytes: 2147483647 } Cookie: 123 } via pipe: [3:177:2192] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user2" SessionId: "" Offset: 0 Count: 2147483647 Bytes: 2147483647 } Cookie: 123 } via pipe: [3:177:2192] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:103:2057] recipient: [4:101:2135] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:103:2057] recipient: [4:101:2135] Leader for TabletID 72057594037927937 is [4:107:2139] sender: [4:108:2057] recipient: [4:101:2135] 2025-05-07T08:52:01.364882Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:52:01.364998Z node 4 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [4:149:2057] recipient: [4:147:2170] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [4:149:2057] recipient: [4:147:2170] Leader for TabletID 72057594037927938 is [4:153:2174] sender: [4:154:2057] recipient: [4:147:2170] Leader for TabletID 72057594037927937 is [4:107:2139] sender: [4:179:2057] recipient: [4:14:2061] 2025-05-07T08:52:01.408845Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:52:01.409521Z node 4 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037927937] Config applied version 4 actor [4:177:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "important_user" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 WriteSpeedInBytesPerSecond: 102400 BurstSize: 102400 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--asdfgs--topic" Version: 4 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } ReadRuleGenerations: 4 ReadRuleGenerations: 4 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 4 Important: false } Consumers { Name: "important_user" Generation: 4 Important: true } 2025-05-07T08:52:01.410211Z node 4 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [4:185:2198] 2025-05-07T08:52:01.413084Z node 4 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [4:185:2198] 2025-05-07T08:52:01.432880Z node 4 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|6c179dd3-7af085ee-684cdc07-24dd5a8d_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user1" SessionId: "" Offset: 0 Count: 2147483647 Bytes: 2147483647 } Cookie: 12 ... 7Z node 80 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:55:01.401289Z node 80 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037927937] doesn't have tx writes info 2025-05-07T08:55:01.402390Z node 80 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [80:303:2296] 2025-05-07T08:55:01.406018Z node 80 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [80:304:2297] 2025-05-07T08:55:01.420671Z node 80 :PERSQUEUE INFO: partition_init.cpp:773: [rt3.dc1--asdfgs--topic:1:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-05-07T08:55:01.420791Z node 80 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 3 [80:304:2297] 2025-05-07T08:55:01.439506Z node 80 :PERSQUEUE INFO: partition_init.cpp:773: [rt3.dc1--asdfgs--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-05-07T08:55:01.439646Z node 80 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 3 [80:303:2296] 2025-05-07T08:55:01.527175Z node 80 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 0 partno 0 count 12 parts 16 size 8365317 !Reboot 72057594037927937 (actor [80:107:2139]) rebooted! !Reboot 72057594037927937 (actor [80:107:2139]) tablet resolver refreshed! new actor is[80:254:2255] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 3 Count: 2147483647 Bytes: 102400 } Cookie: 123 } via pipe: [80:177:2192] Leader for TabletID 72057594037927937 is [80:254:2255] sender: [80:354:2057] recipient: [80:14:2061] 2025-05-07T08:55:03.042573Z node 80 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 12 partno 2 count 8 parts 15 size 7877895 Leader for TabletID 72057594037927937 is [0:0:0] sender: [81:103:2057] recipient: [81:101:2135] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [81:103:2057] recipient: [81:101:2135] Leader for TabletID 72057594037927937 is [81:107:2139] sender: [81:108:2057] recipient: [81:101:2135] 2025-05-07T08:55:04.054448Z node 81 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:55:04.054531Z node 81 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [81:149:2057] recipient: [81:147:2170] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [81:149:2057] recipient: [81:147:2170] Leader for TabletID 72057594037927938 is [81:153:2174] sender: [81:154:2057] recipient: [81:147:2170] Leader for TabletID 72057594037927937 is [81:107:2139] sender: [81:179:2057] recipient: [81:14:2061] 2025-05-07T08:55:04.083969Z node 81 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:55:04.084982Z node 81 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037927937] Config applied version 81 actor [81:177:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "aaa" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 81 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 81 ReadRuleGenerations: 81 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 81 Important: false } Consumers { Name: "aaa" Generation: 81 Important: true } 2025-05-07T08:55:04.085772Z node 81 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [81:185:2198] 2025-05-07T08:55:04.088864Z node 81 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [81:185:2198] 2025-05-07T08:55:04.092168Z node 81 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [81:186:2199] 2025-05-07T08:55:04.095205Z node 81 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [81:186:2199] 2025-05-07T08:55:04.146296Z node 81 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|1d51309a-cef2f387-c825b276-4d441b26_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 0 Count: 1 Bytes: 104857600 } Cookie: 123 } via pipe: [81:177:2192] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 1 Count: 1 Bytes: 104857600 } Cookie: 123 } via pipe: [81:177:2192] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 2 Count: 1 Bytes: 104857600 } Cookie: 123 } via pipe: [81:177:2192] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 3 Count: 1 Bytes: 104857600 } Cookie: 123 } via pipe: [81:177:2192] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 4 Count: 10 Bytes: 104857600 } Cookie: 123 } via pipe: [81:177:2192] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 0 Count: 2147483647 Bytes: 102400 } Cookie: 123 } via pipe: [81:177:2192] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 1 Count: 2147483647 Bytes: 102400 } Cookie: 123 } via pipe: [81:177:2192] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 2 Count: 2147483647 Bytes: 102400 } Cookie: 123 } via pipe: [81:177:2192] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 3 Count: 2147483647 Bytes: 102400 } Cookie: 123 } via pipe: [81:177:2192] Leader for TabletID 72057594037927937 is [0:0:0] sender: [82:103:2057] recipient: [82:101:2135] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [82:103:2057] recipient: [82:101:2135] Leader for TabletID 72057594037927937 is [82:107:2139] sender: [82:108:2057] recipient: [82:101:2135] 2025-05-07T08:55:05.616618Z node 82 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:55:05.616715Z node 82 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [82:149:2057] recipient: [82:147:2170] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [82:149:2057] recipient: [82:147:2170] Leader for TabletID 72057594037927938 is [82:153:2174] sender: [82:154:2057] recipient: [82:147:2170] Leader for TabletID 72057594037927937 is [82:107:2139] sender: [82:179:2057] recipient: [82:14:2061] 2025-05-07T08:55:05.646942Z node 82 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T08:55:05.648123Z node 82 :PERSQUEUE INFO: pq_impl.cpp:1481: [PQ: 72057594037927937] Config applied version 82 actor [82:177:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "aaa" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 82 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 82 ReadRuleGenerations: 82 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 82 Important: false } Consumers { Name: "aaa" Generation: 82 Important: true } 2025-05-07T08:55:05.648997Z node 82 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [82:185:2198] 2025-05-07T08:55:05.652165Z node 82 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [82:185:2198] 2025-05-07T08:55:05.655644Z node 82 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [82:186:2199] 2025-05-07T08:55:05.658185Z node 82 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [82:186:2199] 2025-05-07T08:55:05.713793Z node 82 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|bd01d529-9c4270e3-c3cee05f-aec23f19_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 0 Count: 1 Bytes: 104857600 } Cookie: 123 } via pipe: [82:177:2192] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 1 Count: 1 Bytes: 104857600 } Cookie: 123 } via pipe: [82:177:2192] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 2 Count: 1 Bytes: 104857600 } Cookie: 123 } via pipe: [82:177:2192] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 3 Count: 1 Bytes: 104857600 } Cookie: 123 } via pipe: [82:177:2192] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 4 Count: 10 Bytes: 104857600 } Cookie: 123 } via pipe: [82:177:2192] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 0 Count: 2147483647 Bytes: 102400 } Cookie: 123 } via pipe: [82:177:2192] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 1 Count: 2147483647 Bytes: 102400 } Cookie: 123 } via pipe: [82:177:2192] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 2 Count: 2147483647 Bytes: 102400 } Cookie: 123 } via pipe: [82:177:2192] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 3 Count: 2147483647 Bytes: 102400 } Cookie: 123 } via pipe: [82:177:2192] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::Replace-QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 15625, MsgBus: 13192 2025-05-07T08:54:56.081540Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624655441747103:2125];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:56.088981Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00279f/r3tmp/tmpbU50RK/pdisk_1.dat 2025-05-07T08:54:56.729589Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:56.729822Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:56.729943Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:56.741174Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15625, node 1 2025-05-07T08:54:56.955180Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:56.955204Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:56.955212Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:56.955365Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13192 TClient is connected to server localhost:13192 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:57.848527Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:57.886848Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:54:57.909437Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:54:58.117166Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 2025-05-07T08:54:58.309350Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:58.430309Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:55:00.894152Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624672621617875:2408], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:00.894327Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:01.086259Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501624655441747103:2125];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:55:01.086352Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:55:01.281781Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:55:01.321682Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:55:01.413746Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:55:01.508601Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:55:01.558088Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:55:01.619464Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:55:01.729797Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:55:01.814438Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624676916585835:2472], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:01.814560Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:01.818589Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624676916585840:2475], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:01.823905Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:55:01.844050Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624676916585842:2476], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:55:01.941462Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501624676916585895:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> TSharedPageCache_Actor::Request_Basics >> TSharedPageCache_Actor::Request_Basics [GOOD] >> TSharedPageCache_Actor::Request_Failed >> TSharedPageCache_Actor::Request_Failed [GOOD] >> TSharedPageCache_Actor::Request_Queue >> TSharedPageCache_Actor::Request_Queue [GOOD] >> TSharedPageCache_Actor::Request_Queue_Failed >> TSharedPageCache_Actor::Request_Queue_Failed [GOOD] >> TSharedPageCache_Actor::Request_Queue_Fast >> TSharedPageCache_Actor::Request_Queue_Fast [GOOD] >> TSharedPageCache_Actor::Request_Sequential >> TSharedPageCache_Actor::Request_Sequential [GOOD] >> TSharedPageCache_Actor::Request_Cached >> TSharedPageCache_Actor::Request_Cached [GOOD] >> TSharedPageCache_Actor::Request_Different_Collections >> TFlatTableExecutor_StickyPages::TestNonStickyGroup_FlatIndex >> TSharedPageCache_Actor::Request_Different_Collections [GOOD] >> NFwd_TBTreeIndexCache::Skip_Done [GOOD] >> ResourcePoolsDdl::TestDefaultPoolRestrictions [GOOD] >> TSharedPageCache_Actor::Request_Different_Pages >> TSharedPageCache_Actor::Request_Different_Pages [GOOD] >> TSharedPageCache_Actor::Request_Different_Pages_Reversed >> TSharedPageCache_Actor::Request_Different_Pages_Reversed [GOOD] >> TSharedPageCache_Actor::Request_Subset >> TSharedPageCache_Actor::Request_Subset [GOOD] >> TSharedPageCache_Actor::Request_Subset_Shuffled >> TSharedPageCache_Actor::Request_Subset_Shuffled [GOOD] >> TSharedPageCache_Actor::Request_Superset >> TSharedPageCache_Actor::Request_Superset [GOOD] >> TSharedPageCache_Actor::Request_Superset_Reversed >> TSharedPageCache_Actor::Request_Superset_Reversed [GOOD] >> TSharedPageCache_Actor::Request_Crossing >> TSharedPageCache_Actor::Request_Crossing [GOOD] >> TSharedPageCache_Actor::Request_Crossing_Reversed >> TSharedPageCache_Actor::Request_Crossing_Reversed [GOOD] >> TSharedPageCache_Actor::Request_Crossing_Shuffled >> TSharedPageCache_Actor::Request_Crossing_Shuffled [GOOD] >> TSharedPageCache_Actor::Attach_Basics >> TSharedPageCache_Actor::Attach_Basics [GOOD] >> TSharedPageCache_Actor::Attach_Request >> TSharedPageCache_Actor::Attach_Request [GOOD] >> TSharedPageCache_Actor::Detach_Basics >> TSharedPageCache_Actor::Detach_Basics [GOOD] >> TSharedPageCache_Actor::Detach_Cached >> TSharedPageCache_Actor::Detach_Cached [GOOD] >> TSharedPageCache_Actor::Detach_Expired >> TSharedPageCache_Actor::Detach_Expired [GOOD] >> TSharedPageCache_Actor::Detach_InFly >> TSharedPageCache_Actor::Detach_InFly [GOOD] >> TSharedPageCache_Actor::Detach_Queued >> ResourcePoolsDdl::TestAlterResourcePool >> NFwd_TBTreeIndexCache::Skip_Done_None [GOOD] >> BuildStatsBTreeIndex::Single_History_Slices >> TChargeBTreeIndex::FewNodes [GOOD] >> TSharedPageCache_Actor::Detach_Queued [GOOD] >> KqpWorkloadService::TestZeroQueueSize [GOOD] >> TFlatTableExecutor_StickyPages::TestNonStickyGroup_FlatIndex [GOOD] >> NFwd_TBTreeIndexCache::Skip_Keep [GOOD] >> KqpWorkloadService::TestQueryCancelAfterUnlimitedPool >> TSharedPageCache_Actor::Unregister_Basics >> NFwd_TBTreeIndexCache::Skip_Wait [GOOD] >> BuildStatsBTreeIndex::Single_History_Slices [GOOD] >> TChargeBTreeIndex::FewNodes_Groups >> TSharedPageCache_Actor::Unregister_Basics [GOOD] >> BuildStatsBTreeIndex::Single_Groups >> NFwd_TBTreeIndexCache::Trace_BTree [GOOD] >> TFlatTableExecutor_StickyPages::TestNonStickyGroup_BTreeIndex >> BuildStatsBTreeIndex::Single_Groups [GOOD] >> TFlatTableExecutor_StickyPages::TestNonStickyGroup_BTreeIndex [GOOD] >> TSharedPageCache_Actor::Unregister_Cached >> NFwd_TBTreeIndexCache::Trace_Data [GOOD] >> NFwd_TBTreeIndexCache::End [GOOD] >> NFwd_TBTreeIndexCache::Slices [GOOD] >> BuildStatsBTreeIndex::Single_Groups_Slices ------- [TM] {asan, default-linux-x86_64, release} ydb/services/datastreams/ut/unittest >> DataStreams::TestGetRecordsWithBigSeqno [GOOD] >> TFlatTableExecutor_StickyPages::TestStickyMain Test command err: 2025-05-07T08:54:02.816261Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624420578672821:2211];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:02.816554Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00282f/r3tmp/tmpfyTmDF/pdisk_1.dat 2025-05-07T08:54:03.840312Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:54:03.844309Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:03.844393Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:03.863597Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:54:03.883708Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4985, node 1 2025-05-07T08:54:04.130720Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:04.130742Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:04.130748Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:04.130864Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29849 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:04.696101Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:04.888157Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:29849 2025-05-07T08:54:05.163501Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:05.764383Z node 1 :PERSQUEUE ERROR: partition_read.cpp:672: [PQ: 72075186224037888, Partition: 0, State: StateIdle] reading from too big offset - topic stream_TestGetRecordsStreamWithSingleShard partition 0 client $without_consumer EndOffset 30 offset 100000 2025-05-07T08:54:09.990086Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501624452084697366:2076];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:09.992780Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00282f/r3tmp/tmpPIXcYS/pdisk_1.dat 2025-05-07T08:54:10.344818Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:10.402947Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:10.403024Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:10.412224Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17290, node 4 2025-05-07T08:54:10.586726Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:10.586754Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:10.586762Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:10.586881Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31215 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:10.973647Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:11.152372Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:31215 2025-05-07T08:54:11.477537Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:11.497497Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480 2025-05-07T08:54:14.968433Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7501624452084697366:2076];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:14.968515Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:54:25.300621Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7261: Cannot get console configs 2025-05-07T08:54:25.300652Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00282f/r3tmp/tmp1IHqRT/pdisk_1.dat 2025-05-07T08:54:43.650170Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:54:43.854573Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:43.928937Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:43.929041Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:43.978107Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16680, node 7 2025-05-07T08:54:44.303110Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:44.303146Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:44.303187Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:44.303381Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25724 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:44.750465Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:44.925515Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:25724 2025-05-07T08:54:45.351405Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:45.382195Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480 2025-05-07T08:54:51.890208Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7501624632272064271:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:51.991012Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00282f/r3tmp/tmpfKhYbK/pdisk_1.dat 2025-05-07T08:54:52.504739Z node 10 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:52.551208Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:52.551301Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:52.631706Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17682, node 10 2025-05-07T08:54:52.967985Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:52.968012Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:52.968021Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:52.968203Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28485 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:53.471904Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:53.608873Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:28485 2025-05-07T08:54:54.017143Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:54.041472Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480 >> TSharedPageCache_Actor::Unregister_Cached [GOOD] >> NFwd_TBTreeIndexCache::ManyApplies [GOOD] >> NFwd_TFlatIndexCache::Basics [GOOD] >> TFlatTableExecutor_StickyPages::TestStickyMain [GOOD] >> TSharedPageCache_Actor::Unregister_Expired >> NFwd_TFlatIndexCache::IndexPagesLocator [GOOD] >> TSharedPageCache_Actor::Unregister_Expired [GOOD] >> BuildStatsBTreeIndex::Single_Groups_Slices [GOOD] >> TFlatTableExecutor_StickyPages::TestStickyAlt_FlatIndex >> NFwd_TFlatIndexCache::GetTwice [GOOD] >> NFwd_TFlatIndexCache::ForwardTwice [GOOD] >> NFwd_TFlatIndexCache::Skip_Done [GOOD] >> BuildStatsBTreeIndex::Single_Groups_History >> TFlatTableExecutor_StickyPages::TestStickyAlt_FlatIndex [GOOD] >> TSharedPageCache_Actor::Unregister_InFly >> NFwd_TFlatIndexCache::Skip_Done_None [GOOD] >> BuildStatsBTreeIndex::Single_Groups_History [GOOD] >> TFlatTableExecutor_StickyPages::TestStickyAlt_BTreeIndex >> TSharedPageCache_Actor::Unregister_InFly [GOOD] >> NFwd_TFlatIndexCache::Skip_Keep [GOOD] >> BuildStatsBTreeIndex::Single_Groups_History_Slices >> BuildStatsBTreeIndex::Single_Groups_History_Slices [GOOD] >> BuildStatsBTreeIndex::Mixed >> BuildStatsBTreeIndex::Mixed [GOOD] >> BuildStatsBTreeIndex::Mixed_Groups >> BuildStatsBTreeIndex::Mixed_Groups [GOOD] >> BuildStatsBTreeIndex::Mixed_Groups_History >> BuildStatsBTreeIndex::Mixed_Groups_History [GOOD] >> BuildStatsFlatIndex::Single >> BuildStatsFlatIndex::Single [GOOD] >> BuildStatsFlatIndex::Single_Slices >> BuildStatsFlatIndex::Single_Slices [GOOD] >> NFwd_TFlatIndexCache::End [GOOD] >> TSharedPageCache_Actor::Unregister_Queued >> BuildStatsFlatIndex::Single_History >> TFlatTableExecutor_StickyPages::TestStickyAlt_BTreeIndex [GOOD] >> BuildStatsFlatIndex::Single_History [GOOD] >> BuildStatsFlatIndex::Single_History_Slices >> BuildStatsFlatIndex::Single_History_Slices [GOOD] >> BuildStatsFlatIndex::Single_Groups >> BuildStatsFlatIndex::Single_Groups [GOOD] >> BuildStatsFlatIndex::Single_Groups_Slices >> BuildStatsFlatIndex::Single_Groups_Slices [GOOD] >> BuildStatsFlatIndex::Single_Groups_History >> BuildStatsFlatIndex::Single_Groups_History [GOOD] >> BuildStatsFlatIndex::Single_Groups_History_Slices >> BuildStatsFlatIndex::Single_Groups_History_Slices [GOOD] >> BuildStatsFlatIndex::Mixed >> TFlatTableExecutor_StickyPages::TestStickyAll >> TFlatTableExecutor_StickyPages::TestStickyAll [GOOD] >> TFlatTableExecutor_StickyPages::TestAlterAddFamilySticky >> TFlatTableExecutor_StickyPages::TestAlterAddFamilySticky [GOOD] >> TFlatTableExecutor_StickyPages::TestAlterAddFamilyPartiallySticky >> TFlatTableExecutor_StickyPages::TestAlterAddFamilyPartiallySticky [GOOD] >> TFlatTableExecutor_VersionedLargeBlobs::TestMultiVersionCompactionLargeBlobs >> TFlatTableExecutor_VersionedLargeBlobs::TestMultiVersionCompactionLargeBlobs [GOOD] >> TFlatTableExecutor_VersionedRows::TestVersionedRows >> TFlatTableExecutor_VersionedRows::TestVersionedRows [GOOD] >> TFlatTableExecutor_VersionedRows::TestVersionedRowsSmallBlobs >> TSharedPageCache_Actor::Unregister_Queued [GOOD] >> BuildStatsFlatIndex::Mixed [GOOD] >> BuildStatsFlatIndex::Mixed_Groups >> TSwitchableCache::Touch [GOOD] >> BuildStatsFlatIndex::Mixed_Groups [GOOD] >> TSwitchableCache::Erase [GOOD] >> BuildStatsFlatIndex::Mixed_Groups_History >> TSwitchableCache::EvictNext [GOOD] >> TSwitchableCache::UpdateLimit [GOOD] >> TSwitchableCache::Switch_Touch_RotatePages_All [GOOD] >> TSwitchableCache::Switch_Touch_RotatePages_Parts [GOOD] >> TSwitchableCache::Switch_RotatePages_Force [GOOD] >> TSwitchableCache::Switch_RotatePages_Evicts [GOOD] >> TSwitchableCache::Switch_Touch [GOOD] >> BuildStatsFlatIndex::Mixed_Groups_History [GOOD] >> TSwitchableCache::Switch_Erase [GOOD] >> TSwitchableCache::Switch_EvictNext [GOOD] >> TSwitchableCache::Switch_UpdateLimit [GOOD] >> TVersions::WreckHead >> BuildStatsFlatIndex::Serial >> BuildStatsFlatIndex::Serial [GOOD] >> BuildStatsFlatIndex::Serial_Groups >> BuildStatsFlatIndex::Serial_Groups [GOOD] >> BuildStatsFlatIndex::Serial_Groups_History >> BuildStatsFlatIndex::Serial_Groups_History [GOOD] |90.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/replication/service/ut_topic_reader/ydb-core-tx-replication-service-ut_topic_reader |90.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/service/ut_topic_reader/ydb-core-tx-replication-service-ut_topic_reader >> THiveTest::TestHiveBalancerIgnoreTablet [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-67 [FAIL] >> KqpWorkloadServiceActors::TestDefaultPoolAdminPermissions [GOOD] >> TChargeBTreeIndex::FewNodes_Groups [GOOD] >> ResourcePoolClassifiersDdl::TestResourcePoolClassifierRanks [GOOD] >> SystemView::PartitionStatsOneSchemeShard >> KqpPg::TableInsert-useSink [GOOD] >> BuildStatsHistogram::Single |90.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/federated_query/generic_ut/ydb-core-kqp-ut-federated_query-generic_ut |90.3%| [LD] {RESULT} $(B)/ydb/core/tx/replication/service/ut_topic_reader/ydb-core-tx-replication-service-ut_topic_reader |90.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/federated_query/generic_ut/ydb-core-kqp-ut-federated_query-generic_ut >> THiveTest::TestHiveBalancerNodeRestarts >> ResourcePoolClassifiersDdl::TestExplicitPoolId >> SystemView::CollectPreparedQueries >> SystemView::TopPartitionsByCpuFields >> KqpWorkloadServiceDistributed::TestDistributedLargeConcurrentQueryLimit >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-68 >> KqpQueryPerf::KvRead-QueryService [GOOD] >> TChargeBTreeIndex::FewNodes_History >> KqpPg::TempTablesSessionsIsolation |90.4%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/federated_query/generic_ut/ydb-core-kqp-ut-federated_query-generic_ut >> TestDataErasure::DataErasureWithCopyTable [GOOD] >> AsyncIndexChangeCollector::UpsertSingleRow >> CdcStreamChangeCollector::UpsertManyRows >> TPartBtreeIndexIteration::OneNode_Groups [GOOD] >> TChargeBTreeIndex::FewNodes_History [GOOD] >> THiveTest::TestFollowersCrossDC_MovingLeader [GOOD] >> AsyncIndexChangeCollector::CoveredIndexUpdateCoveredColumn >> BuildStatsHistogram::Single [GOOD] >> KqpWorkloadService::TestZeroQueueSizeManyQueries [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-68 [GOOD] >> KqpWorkloadServiceTables::TestCreateWorkloadSerivceTables [GOOD] >> PgCatalog::PgType [GOOD] >> THiveTest::TestHiveBalancerNodeRestarts [GOOD] >> ResourcePoolsDdl::TestWorkloadConfigOnServerless [GOOD] >> ResourcePoolsDdl::TestAlterResourcePool [GOOD] >> KqpPg::TempTablesSessionsIsolation [GOOD] >> KqpWorkloadService::TestLargeConcurrentQueryLimit [GOOD] >> ObjectDistribution::TestManyIrrelevantNodes [GOOD] >> ResourcePoolClassifiersDdl::TestCreateResourcePoolClassifierOnServerless [GOOD] >> SystemView::CollectPreparedQueries [GOOD] >> RetryPolicy::TWriteSession_RetryOnTargetCluster [GOOD] >> THiveTest::TestFollowersCrossDC_KillingHiveAndFollower >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-69 >> KqpWorkloadServiceTables::TestPoolStateFetcherActor >> THiveTest::TestHiveBalancerDifferentResources >> BuildStatsHistogram::Single_Slices >> KqpWorkloadServiceActors::TestCreateDefaultPool >> ResourcePoolsSysView::TestResourcePoolsSysViewOnServerless >> KqpPg::TempTablesDrop >> RetryPolicy::TWriteSession_SwitchBackToLocalCluster >> AsyncIndexChangeCollector::UpsertSingleRow [GOOD] >> AsyncIndexChangeCollector::UpsertManyRows >> BuildStatsHistogram::Single_Slices [GOOD] >> TPartBtreeIndexIteration::OneNode_History >> AsyncIndexChangeCollector::CoveredIndexUpdateCoveredColumn [GOOD] >> TChargeBTreeIndex::FewNodes_Sticky >> CdcStreamChangeCollector::UpsertManyRows [GOOD] >> KqpWorkloadServiceActors::TestCreateDefaultPool [GOOD] >> THiveTest::TestFollowersCrossDC_KillingHiveAndFollower [GOOD] >> PgCatalog::InformationSchema >> Sequencer::Basic1 [GOOD] >> ResourcePoolsDdl::TestPoolSwitchToLimitedState >> ResourcePoolClassifiersDdl::TestAlterResourcePoolClassifier >> SystemView::CollectScanQueries >> TExecutorDb::RandomOps [GOOD] >> KqpWorkloadService::TestLessConcurrentQueryLimit >> TFlatTableExecutor_VersionedRows::TestVersionedRowsSmallBlobs [GOOD] >> BuildStatsHistogram::Single_History >> TPartBtreeIndexIteration::OneNode_History [GOOD] >> AsyncIndexChangeCollector::CoveredIndexUpsert >> PgCatalog::InformationSchema [GOOD] >> TFlatTableExecutor_VersionedRows::TestVersionedRowsLargeBlobs >> KqpWorkloadServiceActors::TestCpuLoadActor >> TChargeBTreeIndex::FewNodes_Sticky [GOOD] >> TPartBtreeIndexIteration::OneNode_Slices >> THiveTest::TestGetStorageInfo >> StoragePool::TestDistributionRandomProbability >> CdcStreamChangeCollector::UpsertIntoTwoStreams >> TChargeBTreeIndex::FewNodes_Groups_History >> TPartBtreeIndexIteration::OneNode_Slices [GOOD] >> TExecutorDb::FullScan >> THiveTest::TestGetStorageInfo [GOOD] >> TChargeBTreeIndex::FewNodes_Groups_History [GOOD] >> TPartBtreeIndexIteration::OneNode_Groups_Slices >> PgCatalog::CheckSetConfig >> THiveTest::TestGetStorageInfoDeleteTabletBeforeAssigned >> TPartBtreeIndexIteration::OneNode_Groups_Slices [GOOD] >> THiveTest::TestGetStorageInfoDeleteTabletBeforeAssigned [GOOD] >> TChargeBTreeIndex::FewNodes_Groups_History_Sticky >> TPartBtreeIndexIteration::OneNode_History_Slices >> THiveTest::TestExternalBoot >> THiveTest::TestExternalBoot [GOOD] >> THiveTest::TestExternalBootWhenLocked >> THiveTest::TestExternalBootWhenLocked [GOOD] |90.4%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_base/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::OltpJoinTypeHintCBOTurnOFF [GOOD] Test command err: Trying to start YDB, gRPC: 21796, MsgBus: 18385 2025-05-07T08:54:13.622387Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624469590591062:2263];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:13.622650Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00343f/r3tmp/tmpr1QOT2/pdisk_1.dat 2025-05-07T08:54:14.177029Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:14.178615Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:14.186367Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:54:14.258392Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21796, node 1 2025-05-07T08:54:14.447927Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:14.447950Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:14.447956Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:14.448080Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18385 TClient is connected to server localhost:18385 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:15.363094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:15.381644Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:54:17.661916Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624486770460697:2333], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:17.662054Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:17.662409Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624486770460709:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:17.666665Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480 2025-05-07T08:54:17.687363Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624486770460711:2337], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-05-07T08:54:17.779096Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501624486770460762:2335] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:54:18.184029Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 2025-05-07T08:54:18.313492Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 2025-05-07T08:54:18.347894Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:54:18.381950Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:54:18.436692Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:54:18.610420Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:54:18.632721Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501624469590591062:2263];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:18.649110Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:54:18.694850Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:54:18.741331Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:54:18.774113Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:54:18.854765Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480 2025-05-07T08:54:18.915295Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480 2025-05-07T08:54:18.965653Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480 2025-05-07T08:54:19.026220Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-05-07T08:54:19.723297Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:2, at schemeshard: 72057594046644480 2025-05-07T08:54:19.781776Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480 2025-05-07T08:54:19.819185Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480 2025-05-07T08:54:19.855926Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480 2025-05-07T08:54:19.929532Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480 2025-05-07T08:54:19.976916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710678:0, at schemeshard: 72057594046644480 2025-05-07T08:54:20.011247Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710679:0, at schemeshard: 72057594046644480 2025-05-07T08:54:20.042128Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710680:0, at schemeshard: 72057594046644480 2025-05-07T08:54:20.119194Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710681:0, at schemeshard: 72057594046644480 2025-05-07T08:54:20.165668Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710682:0, at schemeshard: 72057594046644480 2025-05-07T08:54:20.247561Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710683:0, at schemeshard: 72057594046644480 2025-05-07T08:54:20.316743Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:17 ... _current=0;tx_id=281474976710714;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710714; 2025-05-07T08:54:58.472290Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038616;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710714; 2025-05-07T08:54:58.476149Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038548;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710714; 2025-05-07T08:54:58.481896Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038562;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710714; 2025-05-07T08:54:58.490254Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038524;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710714; 2025-05-07T08:54:58.495522Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710714; 2025-05-07T08:54:58.499847Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038522;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710714; 2025-05-07T08:54:58.505095Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038490;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710714; 2025-05-07T08:54:58.509855Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038528;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710714; 2025-05-07T08:54:58.518939Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038554;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710714; 2025-05-07T08:54:58.523785Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038526;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710714; 2025-05-07T08:54:58.528897Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038646;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710714; 2025-05-07T08:54:58.533607Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038504;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710714; 2025-05-07T08:54:58.542658Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710714; 2025-05-07T08:54:58.547433Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038618;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710714; 2025-05-07T08:54:58.552315Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710714; 2025-05-07T08:54:58.556962Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038480;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710714; 2025-05-07T08:54:58.566351Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710714; 2025-05-07T08:54:58.570837Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710714; 2025-05-07T08:54:58.575946Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038636;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710714; 2025-05-07T08:54:58.580669Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710714; 2025-05-07T08:54:58.585782Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710714; 2025-05-07T08:54:58.590736Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038608;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710714; 2025-05-07T08:54:58.598754Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038626;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710714; 2025-05-07T08:54:58.604830Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038612;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710714; 2025-05-07T08:54:58.608560Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710714; 2025-05-07T08:54:58.618741Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710714; 2025-05-07T08:54:58.622230Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710714; 2025-05-07T08:54:58.632904Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038598;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710714; 2025-05-07T08:54:58.640030Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038496;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710714; 2025-05-07T08:54:58.645921Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038604;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710714; 2025-05-07T08:54:58.648542Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710714; 2025-05-07T08:54:58.655369Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038594;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710714; 2025-05-07T08:54:58.657252Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038600;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710714; 2025-05-07T08:54:58.668028Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710714; 2025-05-07T08:54:58.668028Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038426;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710714; 2025-05-07T08:54:58.678525Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038450;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710714; 2025-05-07T08:54:58.682360Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038606;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710714; 2025-05-07T08:54:58.689397Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038488;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710714; 2025-05-07T08:54:58.692898Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710714; 2025-05-07T08:54:58.699815Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038592;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710714; 2025-05-07T08:54:58.699830Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038614;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710714; 2025-05-07T08:54:58.707027Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038634;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710714; 2025-05-07T08:54:58.916522Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jtmz804b2rwfhxhrfd7rsmpa", SessionId: ydb://session/3?node_id=1&id=YWZjNWFiOTMtZDRjODZkYmItZTUyOTQyZWYtOWMwMDU3ZDg=, Slow query, duration: 36.312621s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-05-07T08:55:00.019198Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710716; 2025-05-07T08:55:00.019445Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710716; 2025-05-07T08:55:00.019882Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;self_id=[1:7501624521130206268:3047];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038170;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038331;receive=72075186224038629; 2025-05-07T08:55:00.020368Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/slow/unittest >> SlowTopicAutopartitioning::CDC_Write [GOOD] Test command err: 2025-05-07T08:53:05.682934Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624177398438250:2059];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:05.682973Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:53:05.966612Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0048ef/r3tmp/tmprbvdrA/pdisk_1.dat 2025-05-07T08:53:06.253959Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:53:06.254119Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:53:06.256092Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:53:06.279003Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26231, node 1 2025-05-07T08:53:06.405002Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/zvgn/0048ef/r3tmp/yandexwt16Yc.tmp 2025-05-07T08:53:06.405035Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/zvgn/0048ef/r3tmp/yandexwt16Yc.tmp 2025-05-07T08:53:06.405231Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/zvgn/0048ef/r3tmp/yandexwt16Yc.tmp 2025-05-07T08:53:06.405391Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:53:06.476320Z INFO: TTestServer started on Port 28747 GrpcPort 26231 TClient is connected to server localhost:28747 PQClient connected to localhost:26231 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:53:06.955598Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:53:07.007108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... 2025-05-07T08:53:09.718277Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624194578308224:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:09.718522Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:09.727549Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624194578308261:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:09.732167Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480 2025-05-07T08:53:09.777093Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624194578308263:2343], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-05-07T08:53:10.058022Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501624194578308329:2443] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:53:10.097510Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:53:10.146010Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:53:10.253198Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7501624198873275634:2350], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T08:53:10.255520Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=1&id=YWU1NjQ2ODgtNDI5NjViY2MtMWUyYTA2ZjYtODQzNWU3YzM=, ActorId: [1:7501624194578308222:2336], ActorState: ExecuteState, TraceId: 01jtmz5rxs11c4p8722qhbcx8h, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T08:53:10.260146Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T08:53:10.340624Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7501624198873275919:2620] 2025-05-07T08:53:10.686656Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501624177398438250:2059];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:10.686772Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-05-07T08:53:16.098112Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-05-07T08:53:16.211877Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:7501624224643079880:2688], Recipient [1:7501624181693405993:2197]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:53:16.211916Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:53:16.211930Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T08:53:16.211972Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122432, Sender [1:7501624224643079876:2685], Recipient [1:7501624181693405993:2197]: {TEvModifySchemeTransaction txid# 281474976710672 TabletId# 72057594046644480} 2025-05-07T08:53:16.211986Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4851: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-05-07T08:53:16.402795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "origin" Columns { Name: "id" Type: "Uint64" NotNull: false } Columns { Name: "order" Type: "Uint64" NotNull: false } Columns { Name: "value" Type: "Utf8" NotNull: false } KeyColumnNames: "id" KeyColumnNames: "order" UniformPartitionsCount: 64 PartitionConfig { PartitioningPolicy { MinPartitionsCount: 64 MaxPartitionsCount: 64 } } Temporary: false } } TxId: 281474976710672 TabletId: 72057594046644480 Owner: "root@builtin" UserToken: "***" PeerName: "" , at schemeshard: 72057594046644480 2025-05-07T08:53:16.403181Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /Root/origin, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-05-07T08:53:16.403304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:433: TCreateTable Propose, path: /Root/origin, opId: 281474976710672:0, schema: Name: "origin" Columns { Name: "id" Type: "Uint64" NotNull: false } Columns { Name: "order" Type: "Uint64" NotNull: false } Columns { Name: "value" Type: "Utf8" NotNull: false } KeyColumnNames: "id" KeyColumnNames: "order" UniformPartitionsCount: 64 PartitionConfig { PartitioningPolicy { MinPartitionsCount: 64 MaxPartitionsCount: 64 } } Temporary: false, at schemeshard: 72057594046644480 2025-05-07T08:53:16.420683Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:319: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046644480, LocalPathId: 1], parent name: Root, child name: origin, child id: [OwnerId: 72057594046644480, LocalPathId: 13], at schemeshard: 72057594046644480 2025-05-07T08:53:16.420770Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 13] was 0 2025-05-07T08:53:16.420802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 7205759 ... nt message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1304 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.860895Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1305 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.860915Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1308 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.860934Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1310 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.860955Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1312 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.860974Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1315 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.860991Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1317 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.861010Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1318 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.861027Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1319 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.861043Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1320 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.861060Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1321 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.861078Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1323 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.861096Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1326 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.861111Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1327 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.861129Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1328 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.861147Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1330 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.861164Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1331 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.861188Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1332 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.861211Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1333 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.861236Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1338 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.861262Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1341 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.861281Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1343 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.861301Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1344 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.861321Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1346 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.861341Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1347 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.861363Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1350 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.861381Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1352 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.861399Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1353 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.861417Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1358 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.861434Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1359 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.861454Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1360 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.861472Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1361 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.861492Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1365 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.861512Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1369 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.861533Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1376 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.861567Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1378 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.861583Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1382 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.861604Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1383 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.861625Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1384 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.861644Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1386 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.861664Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1387 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.861682Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1390 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.861702Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1392 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.861723Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1394 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.861744Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1396 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.861762Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1398 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.861782Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1399 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.861804Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1400 partNo : 0 messageNo: 3 size 192 offset: -1 2025-05-07T08:54:09.861825Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037956] got client message topic: origin/feed/streamImpl partition: 0 SourceId: '\00072075186224037943' SeqNo: 1401 partNo : 0 messageNo: 3 size 192 offset: -1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut/unittest >> DBase::KIKIMR_15598_Many_MemTables [GOOD] Test command err: 3 parts: [0:0:1:0:0:0:0] 150 rows, 7 pages, 1 levels: (286, 103) (607, 210) (811, 278) (1315, 446) (1540, 521) [0:0:2:0:0:0:0] 197 rows, 9 pages, 2 levels: (253, 92) (577, 200) (742, 255) (1156, 393) (1594, 539) [0:0:3:0:0:0:0] 153 rows, 7 pages, 1 levels: (199, 74) (514, 179) (769, 264) (1291, 438) (1555, 526) Checking BTree: Touched 100% bytes, 5 pages RowCountHistogram: 19% (actual 16%) key = (286, 103) value = 97 (actual 84 - 2% error) 19% (actual 19%) key = (607, 210) value = 192 (actual 183 - 1% error) 18% (actual 22%) key = (958, 327) value = 286 (actual 293 - -1% error) 19% (actual 20%) key = (1291, 438) value = 381 (actual 394 - -2% error) 23% (actual 21%) DataSizeHistogram: 18% (actual 28%) key = (286, 103) value = 7810 (actual 11827 - -9% error) 19% (actual 19%) key = (607, 210) value = 15865 (actual 19876 - -9% error) 19% (actual 19%) key = (958, 327) value = 23894 (actual 27913 - -9% error) 18% (actual 13%) key = (1291, 438) value = 31821 (actual 33747 - -4% error) 24% (actual 19%) Checking Flat: Touched 100% bytes, 3 pages RowCountHistogram: 24% (actual 16%) key = (286, 103) value = 120 (actual 84 - 7% error) 23% (actual 28%) key = (742, 255) value = 237 (actual 226 - 2% error) 24% (actual 21%) key = (1087, 370) value = 358 (actual 332 - 5% error) 22% (actual 27%) key = (1540, 521) value = 472 (actual 469 - 0% error) 5% (actual 6%) DataSizeHistogram: 23% (actual 28%) key = (286, 103) value = 9821 (actual 11827 - -4% error) 23% (actual 23%) key = (742, 255) value = 19876 (actual 21881 - -4% error) 23% (actual 19%) key = (1087, 370) value = 29895 (actual 29895 - 0% error) 23% (actual 27%) key = (1540, 521) value = 39763 (actual 41447 - -4% error) 5% (actual 1%) Checking Mixed: Touched 100% bytes, 5 pages RowCountHistogram: 24% (actual 16%) key = (286, 103) value = 120 (actual 84 - 7% error) 23% (actual 28%) key = (742, 255) value = 237 (actual 226 - 2% error) 24% (actual 21%) key = (1087, 370) value = 358 (actual 332 - 5% error) 22% (actual 27%) key = (1540, 521) value = 472 (actual 469 - 0% error) 5% (actual 6%) DataSizeHistogram: 23% (actual 28%) key = (286, 103) value = 9821 (actual 11827 - -4% error) 23% (actual 23%) key = (742, 255) value = 19876 (actual 21881 - -4% error) 23% (actual 19%) key = (1087, 370) value = 29895 (actual 29895 - 0% error) 23% (actual 27%) key = (1540, 521) value = 39763 (actual 41447 - -4% error) 5% (actual 1%) 3 parts: [0:0:1:0:0:0:0] 167 rows, 7 pages, 1 levels: (91, 38) (166, 63) (325, 116) (394, 139) (481, 168) [0:0:2:0:0:0:0] 166 rows, 8 pages, 2 levels: (631, 218) (709, 244) (853, 292) (934, 319) (1087, 370) [0:0:3:0:0:0:0] 167 rows, 8 pages, 2 levels: (1156, 393) (1246, 423) (1396, 473) (1471, 498) (1633, 552) Checking BTree: Touched 100% bytes, 7 pages RowCountHistogram: 5% (actual 5%) key = (91, 38) value = 25 (actual 25 - 0% error) 5% (actual 5%) key = (166, 63) value = 50 (actual 50 - 0% error) 4% (actual 4%) key = (253, 92) value = 74 (actual 74 - 0% error) 4% (actual 4%) key = (325, 116) value = 96 (actual 96 - 0% error) 4% (actual 4%) key = (394, 139) value = 119 (actual 119 - 0% error) 5% (actual 5%) key = (481, 168) value = 144 (actual 144 - 0% error) 4% (actual 4%) key = (553, 192) value = 167 (actual 166 - 0% error) 4% (actual 5%) key = (631, 218) value = 191 (actual 191 - 0% error) 4% (actual 4%) key = (709, 244) value = 215 (actual 215 - 0% error) 3% (actual 3%) key = (766, 263) value = 234 (actual 234 - 0% error) 5% (actual 5%) key = (853, 292) value = 261 (actual 261 - 0% error) 4% (actual 4%) key = (934, 319) value = 285 (actual 285 - 0% error) 4% (actual 4%) key = (1006, 343) value = 309 (actual 309 - 0% error) 4% (actual 4%) key = (1087, 370) value = 333 (actual 332 - 0% error) 4% (actual 4%) key = (1156, 393) value = 354 (actual 354 - 0% error) 5% (actual 5%) key = (1246, 423) value = 380 (actual 380 - 0% error) 4% (actual 4%) key = (1324, 449) value = 404 (actual 404 - 0% error) 4% (actual 4%) key = (1396, 473) value = 426 (actual 426 - 0% error) 4% (actual 4%) key = (1471, 498) value = 448 (actual 448 - 0% error) 4% (actual 4%) key = (1543, 522) value = 470 (actual 470 - 0% error) 5% (actual 5%) key = (1633, 552) value = 496 (actual 496 - 0% error) 0% (actual 0%) DataSizeHistogram: 4% (actual 4%) key = (91, 38) value = 1974 (actual 1974 - 0% error) 4% (actual 4%) key = (166, 63) value = 3992 (actual 3992 - 0% error) 4% (actual 4%) key = (253, 92) value = 5889 (actual 5889 - 0% error) 4% (actual 4%) key = (325, 116) value = 7868 (actual 7868 - 0% error) 4% (actual 4%) key = (394, 139) value = 9910 (actual 9910 - 0% error) 4% (actual 4%) key = (481, 168) value = 11938 (actual 11938 - 0% error) 4% (actual 4%) key = (553, 192) value = 13685 (actual 13685 - 0% error) 4% (actual 4%) key = (631, 218) value = 15674 (actual 15674 - 0% error) 4% (actual 4%) key = (709, 244) value = 17709 (actual 17709 - 0% error) 4% (actual 4%) key = (766, 263) value = 19664 (actual 19664 - 0% error) 4% (actual 4%) key = (853, 292) value = 21673 (actual 21673 - 0% error) 4% (actual 4%) key = (934, 319) value = 23712 (actual 23712 - 0% error) 4% (actual 4%) key = (1006, 343) value = 25687 (actual 25687 - 0% error) 4% (actual 4%) key = (1087, 370) value = 27765 (actual 27678 - 0% error) 4% (actual 4%) key = (1156, 393) value = 29741 (actual 29741 - 0% error) 4% (actual 4%) key = (1246, 423) value = 31726 (actual 31726 - 0% error) 4% (actual 4%) key = (1324, 449) value = 33698 (actual 33698 - 0% error) 4% (actual 4%) key = (1396, 473) value = 35700 (actual 35700 - 0% error) 4% (actual 4%) key = (1471, 498) value = 37620 (actual 37620 - 0% error) 4% (actual 4%) key = (1543, 522) value = 39641 (actual 39641 - 0% error) 4% (actual 4%) key = (1633, 552) value = 41669 (actual 41669 - 0% error) 0% (actual 0%) Checking Flat: Touched 100% bytes, 3 pages RowCountHistogram: 5% (actual 5%) key = (91, 38) value = 25 (actual 25 - 0% error) 5% (actual 5%) key = (166, 63) value = 50 (actual 50 - 0% error) 4% (actual 4%) key = (253, 92) value = 74 (actual 74 - 0% error) 4% (actual 4%) key = (325, 116) value = 96 (actual 96 - 0% error) 4% (actual 4%) key = (394, 139) value = 119 (actual 119 - 0% error) 5% (actual 5%) key = (481, 168) value = 144 (actual 144 - 0% error) 4% (actual 4%) key = (556, 193) value = 167 (actual 167 - 0% error) 4% (actual 4%) key = (631, 218) value = 191 (actual 191 - 0% error) 4% (actual 4%) key = (709, 244) value = 215 (actual 215 - 0% error) 3% (actual 3%) key = (766, 263) value = 234 (actual 234 - 0% error) 5% (actual 5%) key = (853, 292) value = 261 (actual 261 - 0% error) 4% (actual 4%) key = (934, 319) value = 285 (actual 285 - 0% error) 4% (actual 4%) key = (1006, 343) value = 309 (actual 309 - 0% error) 4% (actual 4%) key = (1087, 370) value = 332 (actual 332 - 0% error) 0% (actual 0%) key = (1090, 371) value = 333 (actual 333 - 0% error) 4% (actual 4%) key = (1156, 393) value = 354 (actual 354 - 0% error) 5% (actual 5%) key = (1246, 423) value = 380 (actual 380 - 0% error) 4% (actual 4%) key = (1324, 449) value = 404 (actual 404 - 0% error) 4% (actual 4%) key = (1396, 473) value = 426 (actual 426 - 0% error) 4% (actual 4%) key = (1471, 498) value = 448 (actual 448 - 0% error) 4% (actual 4%) key = (1543, 522) value = 470 (actual 470 - 0% error) 5% (actual 5%) key = (1633, 552) value = 496 (actual 496 - 0% error) 0% (actual 0%) DataSizeHistogram: 4% (actual 4%) key = (91, 38) value = 1974 (actual 1974 - 0% error) 4% (actual 4%) key = (166, 63) value = 3992 (actual 3992 - 0% error) 4% (actual 4%) key = (253, 92) value = 5889 (actual 5889 - 0% error) 4% (actual 4%) key = (325, 116) value = 7868 (actual 7868 - 0% error) 4% (actual 4%) key = (394, 139) value = 9910 (actual 9910 - 0% error) 4% (actual 4%) key = (481, 168) value = 11938 (actual 11938 - 0% error) 4% (actual 4%) key = (556, 193) value = 13685 (actual 13685 - 0% error) 4% (actual 4%) key = (631, 218) value = 15674 (actual 15674 - 0% error) 4% (actual 4%) key = (709, 244) value = 17709 (actual 17709 - 0% error) 4% (actual 4%) key = (766, 263) value = 19664 (actual 19664 - 0% error) 4% (actual 4%) key = (853, 292) value = 21673 (actual 21673 - 0% error) 4% (actual 4%) key = (934, 319) value = 23712 (actual 23712 - 0% error) 4% (actual 4%) key = (1006, 343) value = 25687 (actual 25687 - 0% error) 4% (actual 4%) key = (1087, 370) value = 27678 (actual 27678 - 0% error) 0% (actual 0%) key = (1090, 371) value = 27765 (actual 27765 - 0% error) 4% (actual 4%) key = (1156, 393) value = 29741 (actual 29741 - 0% error) 4% (actual 4%) key = (1246, 423) value = 31726 (actual 31726 - 0% error) 4% (actual 4%) key = (1324, 449) value = 33698 (actual 33698 - 0% error) 4% (actual 4%) key = (1396, 473) value = 35700 (actual 35700 - 0% error) 4% (actual 4%) key = (1471, 498) value = 37620 (actual 37620 - 0% error) 4% (actual 4%) key = (1543, 522) value = 39641 (actual 39641 - 0% error) 4% (actual 4%) key = (1633, 552) value = 41669 (actual 41669 - 0% error) 0% (actual 0%) Checking Mixed: Touched 100% bytes, 7 pages RowCountHistogram: 14% (actual 5%) key = (91, 38) value = 70 (actual 25 - 9% error) 5% (actual 5%) key = (166, 63) value = 95 (actual 50 - 9% error) 4% (actual 4%) key = (253, 92) value = 119 (actual 74 - 9% error) 4% (actual 4%) key = (325, 116) value = 141 (actual 96 - 9% error) 4% (actual 4%) key = (394, 139) value = 164 (actual 119 - 9% error) 5% (actual 5%) key = (481, 168) value = 189 (actual 144 - 9% error) 4% (actual 9%) key = (631, 218) value = 212 (actual 191 - 4% error) 4% (actual 4%) key = (709, 244) value = 236 (actual 215 - 4% error) 3% (actual 3%) key = (766, 263) value = 255 (actual 234 - 4% error) 5% (actual 5%) key = (853, 292) value = 282 (actual 261 - 4% error) 4% (actual 4%) key = (934, 319) value = 306 (actual 285 - 4% error) 4% (actual 4%) key = (1006, 343) value = 330 (actual 309 - 4% error) 4% (actual 4%) key = (1087, 370) value = 353 (actual 332 - 4% error) 0% (actual 4%) key = (1156, 393) value = 354 (actual 354 - 0% error) 5% (actual 5%) key = (1246, 423) value = 380 (actual 380 - 0% error) 4% (actual 4%) key = (1324, 449) value = 404 (actual 404 - 0% error) 4% (actual 4%) key = (1396, 473) value = 426 (actual 426 - 0% error) 4% (actual 4%) key = (1471, 498) value = 448 (actual 448 - 0% error) 4% (actual 4%) key = (1543, 522) value = 470 (actual 470 - 0% error) 5% (actual 5%) key = (1633, 552) value = 496 (actual 496 - 0% error) 0% (actual 0%) DataSizeHistogram: 14% (actual 4%) key = (91, 38) value = 5939 (actual 1974 - 9% error) 4% (actual 4%) key = (166, 63) value = 7957 (actual 3992 - 9% error) 4% (actual 4%) key = (253, 92) value = 9854 (actual 5889 - 9% error) 4% (actual 4%) key = (325, 116) value = 11833 (actual 7868 - 9% error) 4% (actual 4%) key = (394, 139) value = 13875 (actual 9910 - 9% error) 4% (actual 4%) key = (481, 168) value = 15903 (actual 11938 - 9% error) 4% ... 85 - 0% error) 4% (actual 4%) key = (631, 218) value = 15674 (actual 15674 - 0% error) 4% (actual 4%) key = (709, 244) value = 17709 (actual 17709 - 0% error) 4% (actual 4%) key = (766, 263) value = 19664 (actual 19664 - 0% error) 4% (actual 4%) key = (853, 292) value = 21673 (actual 21673 - 0% error) 4% (actual 4%) key = (934, 319) value = 23712 (actual 23712 - 0% error) 4% (actual 4%) key = (1006, 343) value = 25687 (actual 25687 - 0% error) 4% (actual 4%) key = (1087, 370) value = 27765 (actual 27678 - 0% error) 4% (actual 4%) key = (1156, 393) value = 29741 (actual 29741 - 0% error) 4% (actual 4%) key = (1246, 423) value = 31726 (actual 31726 - 0% error) 4% (actual 4%) key = (1324, 449) value = 33698 (actual 33698 - 0% error) 4% (actual 4%) key = (1396, 473) value = 35700 (actual 35700 - 0% error) 4% (actual 4%) key = (1471, 498) value = 37620 (actual 37620 - 0% error) 4% (actual 4%) key = (1543, 522) value = 39641 (actual 39641 - 0% error) 4% (actual 4%) key = (1633, 552) value = 41669 (actual 41669 - 0% error) 0% (actual 0%) Checking Flat: Touched 100% bytes, 3 pages RowCountHistogram: 5% (actual 5%) key = (91, 38) value = 25 (actual 25 - 0% error) 5% (actual 5%) key = (166, 63) value = 50 (actual 50 - 0% error) 4% (actual 4%) key = (253, 92) value = 74 (actual 74 - 0% error) 4% (actual 4%) key = (325, 116) value = 96 (actual 96 - 0% error) 4% (actual 4%) key = (394, 139) value = 119 (actual 119 - 0% error) 5% (actual 5%) key = (481, 168) value = 144 (actual 144 - 0% error) 4% (actual 4%) key = (556, 193) value = 167 (actual 167 - 0% error) 4% (actual 4%) key = (631, 218) value = 191 (actual 191 - 0% error) 4% (actual 4%) key = (709, 244) value = 215 (actual 215 - 0% error) 3% (actual 3%) key = (766, 263) value = 234 (actual 234 - 0% error) 5% (actual 5%) key = (853, 292) value = 261 (actual 261 - 0% error) 4% (actual 4%) key = (934, 319) value = 285 (actual 285 - 0% error) 4% (actual 4%) key = (1006, 343) value = 309 (actual 309 - 0% error) 4% (actual 4%) key = (1087, 370) value = 332 (actual 332 - 0% error) 0% (actual 0%) key = (1090, 371) value = 333 (actual 333 - 0% error) 4% (actual 4%) key = (1156, 393) value = 354 (actual 354 - 0% error) 5% (actual 5%) key = (1246, 423) value = 380 (actual 380 - 0% error) 4% (actual 4%) key = (1324, 449) value = 404 (actual 404 - 0% error) 4% (actual 4%) key = (1396, 473) value = 426 (actual 426 - 0% error) 4% (actual 4%) key = (1471, 498) value = 448 (actual 448 - 0% error) 4% (actual 4%) key = (1543, 522) value = 470 (actual 470 - 0% error) 5% (actual 5%) key = (1633, 552) value = 496 (actual 496 - 0% error) 0% (actual 0%) DataSizeHistogram: 4% (actual 4%) key = (91, 38) value = 1974 (actual 1974 - 0% error) 4% (actual 4%) key = (166, 63) value = 3992 (actual 3992 - 0% error) 4% (actual 4%) key = (253, 92) value = 5889 (actual 5889 - 0% error) 4% (actual 4%) key = (325, 116) value = 7868 (actual 7868 - 0% error) 4% (actual 4%) key = (394, 139) value = 9910 (actual 9910 - 0% error) 4% (actual 4%) key = (481, 168) value = 11938 (actual 11938 - 0% error) 4% (actual 4%) key = (556, 193) value = 13685 (actual 13685 - 0% error) 4% (actual 4%) key = (631, 218) value = 15674 (actual 15674 - 0% error) 4% (actual 4%) key = (709, 244) value = 17709 (actual 17709 - 0% error) 4% (actual 4%) key = (766, 263) value = 19664 (actual 19664 - 0% error) 4% (actual 4%) key = (853, 292) value = 21673 (actual 21673 - 0% error) 4% (actual 4%) key = (934, 319) value = 23712 (actual 23712 - 0% error) 4% (actual 4%) key = (1006, 343) value = 25687 (actual 25687 - 0% error) 4% (actual 4%) key = (1087, 370) value = 27678 (actual 27678 - 0% error) 0% (actual 0%) key = (1090, 371) value = 27765 (actual 27765 - 0% error) 4% (actual 4%) key = (1156, 393) value = 29741 (actual 29741 - 0% error) 4% (actual 4%) key = (1246, 423) value = 31726 (actual 31726 - 0% error) 4% (actual 4%) key = (1324, 449) value = 33698 (actual 33698 - 0% error) 4% (actual 4%) key = (1396, 473) value = 35700 (actual 35700 - 0% error) 4% (actual 4%) key = (1471, 498) value = 37620 (actual 37620 - 0% error) 4% (actual 4%) key = (1543, 522) value = 39641 (actual 39641 - 0% error) 4% (actual 4%) key = (1633, 552) value = 41669 (actual 41669 - 0% error) 0% (actual 0%) Checking Mixed: Touched 100% bytes, 3 pages RowCountHistogram: 14% (actual 5%) key = (91, 38) value = 70 (actual 25 - 9% error) 5% (actual 5%) key = (166, 63) value = 95 (actual 50 - 9% error) 4% (actual 4%) key = (253, 92) value = 119 (actual 74 - 9% error) 4% (actual 4%) key = (325, 116) value = 141 (actual 96 - 9% error) 4% (actual 4%) key = (394, 139) value = 164 (actual 119 - 9% error) 5% (actual 5%) key = (481, 168) value = 189 (actual 144 - 9% error) 4% (actual 9%) key = (631, 218) value = 212 (actual 191 - 4% error) 4% (actual 4%) key = (709, 244) value = 236 (actual 215 - 4% error) 3% (actual 3%) key = (766, 263) value = 255 (actual 234 - 4% error) 5% (actual 5%) key = (853, 292) value = 282 (actual 261 - 4% error) 4% (actual 4%) key = (934, 319) value = 306 (actual 285 - 4% error) 4% (actual 4%) key = (1006, 343) value = 330 (actual 309 - 4% error) 4% (actual 4%) key = (1087, 370) value = 353 (actual 332 - 4% error) 0% (actual 4%) key = (1156, 393) value = 354 (actual 354 - 0% error) 5% (actual 5%) key = (1246, 423) value = 380 (actual 380 - 0% error) 4% (actual 4%) key = (1324, 449) value = 404 (actual 404 - 0% error) 4% (actual 4%) key = (1396, 473) value = 426 (actual 426 - 0% error) 4% (actual 4%) key = (1471, 498) value = 448 (actual 448 - 0% error) 4% (actual 4%) key = (1543, 522) value = 470 (actual 470 - 0% error) 5% (actual 5%) key = (1633, 552) value = 496 (actual 496 - 0% error) 0% (actual 0%) DataSizeHistogram: 14% (actual 4%) key = (91, 38) value = 5939 (actual 1974 - 9% error) 4% (actual 4%) key = (166, 63) value = 7957 (actual 3992 - 9% error) 4% (actual 4%) key = (253, 92) value = 9854 (actual 5889 - 9% error) 4% (actual 4%) key = (325, 116) value = 11833 (actual 7868 - 9% error) 4% (actual 4%) key = (394, 139) value = 13875 (actual 9910 - 9% error) 4% (actual 4%) key = (481, 168) value = 15903 (actual 11938 - 9% error) 4% (actual 8%) key = (631, 218) value = 17650 (actual 15674 - 4% error) 4% (actual 4%) key = (709, 244) value = 19685 (actual 17709 - 4% error) 4% (actual 4%) key = (766, 263) value = 21640 (actual 19664 - 4% error) 4% (actual 4%) key = (853, 292) value = 23649 (actual 21673 - 4% error) 4% (actual 4%) key = (934, 319) value = 25688 (actual 23712 - 4% error) 4% (actual 4%) key = (1006, 343) value = 27663 (actual 25687 - 4% error) 4% (actual 4%) key = (1087, 370) value = 29654 (actual 27678 - 4% error) 0% (actual 4%) key = (1156, 393) value = 29741 (actual 29741 - 0% error) 4% (actual 4%) key = (1246, 423) value = 31726 (actual 31726 - 0% error) 4% (actual 4%) key = (1324, 449) value = 33698 (actual 33698 - 0% error) 4% (actual 4%) key = (1396, 473) value = 35700 (actual 35700 - 0% error) 4% (actual 4%) key = (1471, 498) value = 37620 (actual 37620 - 0% error) 4% (actual 4%) key = (1543, 522) value = 39641 (actual 39641 - 0% error) 4% (actual 4%) key = (1633, 552) value = 41669 (actual 41669 - 0% error) 0% (actual 0%) 3 parts: [0:0:1:0:0:0:0] 167 rows, 1 pages, 0 levels: () () () () () [0:0:2:0:0:0:0] 166 rows, 1 pages, 0 levels: () () () () () [0:0:3:0:0:0:0] 167 rows, 1 pages, 0 levels: () () () () () Checking BTree: Touched 0% bytes, 0 pages RowCountHistogram: 33% (actual 33%) key = (553, 192) value = 167 (actual 166 - 0% error) 33% (actual 33%) key = (1087, 370) value = 333 (actual 332 - 0% error) 33% (actual 33%) DataSizeHistogram: 32% (actual 32%) key = (553, 192) value = 13565 (actual 13565 - 0% error) 33% (actual 33%) key = (1087, 370) value = 27505 (actual 27505 - 0% error) 33% (actual 33%) Checking Flat: Touched 100% bytes, 3 pages RowCountHistogram: 33% (actual 33%) key = (556, 193) value = 167 (actual 167 - 0% error) 33% (actual 33%) key = (1090, 371) value = 333 (actual 333 - 0% error) 33% (actual 33%) DataSizeHistogram: 32% (actual 32%) key = (556, 193) value = 13565 (actual 13565 - 0% error) 33% (actual 33%) key = (1090, 371) value = 27505 (actual 27505 - 0% error) 33% (actual 33%) Checking Mixed: Touched 0% bytes, 0 pages RowCountHistogram: 100% (actual 100%) DataSizeHistogram: 100% (actual 100%) Got : 24000 2106439 49449 38 44 Expected: 24000 2106439 49449 38 44 { [2455, 2599), [2798, 3624), [4540, 4713), [5654, 7161), [8509, 8794), [8936, 9973), [11888, 14280), [14337, 14882), [15507, 16365), [17368, 19451), [19536, 20135), [20790, 21503), [21589, 23243) } Got : 12816 1121048 49449 20 23 Expected: 12816 1121048 49449 20 23 Got : 24000 3547100 81694 64 44 Expected: 24000 3547100 81694 64 44 { [1012, 1475), [1682, 1985), [2727, 3553), [3599, 3992), [5397, 7244), [9181, 9807), [9993, 10178), [12209, 14029), [15089, 15342), [16198, 16984), [17238, 18436), [21087, 21876), [23701, 23794) } Got : 9582 1425198 81694 26 17 Expected: 9582 1425198 81694 26 17 Got : 24000 2460139 23760 42 41 Expected: 24000 2460139 23760 42 41 { [1296, 2520), [3888, 4320), [5040, 6840), [6912, 7272), [10872, 11160), [11520, 12096), [12096, 13824), [15192, 15624), [17064, 17856), [18216, 19296), [19800, 20160), [20736, 21096), [21096, 22104) } Got : 10440 1060798 23760 18 18 Expected: 10440 1060798 23760 18 18 Got : 24000 4054050 46562 68 43 Expected: 24000 4054050 46562 68 43 { [460, 1518), [2300, 2484), [2760, 4002), [4600, 5842), [6302, 9752), [11178, 12328), [14582, 14858), [16790, 18032), [18216, 18446), [18722, 19504), [19504, 19964), [20378, 20470), [21344, 23506) } Got : 13570 2277890 46562 38 24 Expected: 13570 2277890 46562 38 24 Got : 24000 2106459 49449 38 44 Expected: 24000 2106459 49449 38 44 Got : 24000 2460219 23555 41 41 Expected: 24000 2460219 23555 41 41 Got : 24000 4054270 46543 66 43 Expected: 24000 4054270 46543 66 43 Got : 24000 2106479 49555 38 44 Expected: 24000 2106479 49555 38 44 Got : 24000 2460259 23628 41 41 Expected: 24000 2460259 23628 41 41 Got : 24000 4054290 46640 65 43 Expected: 24000 4054290 46640 65 43 Got : 24000 2106439 66674 3 4 Expected: 24000 2106439 66674 3 4 { [2455, 2599), [2798, 3624), [4540, 4713), [5654, 7161), [8509, 8794), [8936, 9973), [11888, 14280), [14337, 14882), [15507, 16365), [17368, 19451), [19536, 20135), [20790, 21503), [21589, 23243) } Got : 12816 1121048 66674 2 2 Expected: 12816 1121048 66674 2 2 Got : 24000 2460139 33541 4 4 Expected: 24000 2460139 33541 4 4 { [1296, 2520), [3888, 4320), [5040, 6840), [6912, 7272), [10872, 11160), [11520, 12096), [12096, 13824), [15192, 15624), [17064, 17856), [18216, 19296), [19800, 20160), [20736, 21096), [21096, 22104) } Got : 10440 1060798 33541 1 1 Expected: 10440 1060798 33541 1 1 Got : 24000 4054050 64742 7 4 Expected: 24000 4054050 64742 7 4 { [460, 1518), [2300, 2484), [2760, 4002), [4600, 5842), [6302, 9752), [11178, 12328), [14582, 14858), [16790, 18032), [18216, 18446), [18722, 19504), [19504, 19964), [20378, 20470), [21344, 23506) } Got : 13570 2234982 64742 4 2 Expected: 13570 2234982 64742 4 2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::KvRead-QueryService [GOOD] Test command err: Trying to start YDB, gRPC: 24438, MsgBus: 1243 2025-05-07T08:54:56.371430Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624654702397688:2078];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:56.378575Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0027a8/r3tmp/tmpeFHbAq/pdisk_1.dat 2025-05-07T08:54:57.222079Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:57.232933Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:57.233042Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:57.243249Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24438, node 1 2025-05-07T08:54:57.666584Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:57.666606Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:57.666613Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:57.666738Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1243 TClient is connected to server localhost:1243 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:58.925478Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:58.958928Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:54:58.968454Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:59.281101Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:59.604045Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:59.768902Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:55:01.378131Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501624654702397688:2078];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:55:01.378222Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:55:02.203116Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624680472203092:2409], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:02.203365Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:02.709443Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:55:02.775164Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:55:02.862752Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:55:02.930696Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:55:02.998397Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:55:03.090637Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:55:03.196009Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:55:03.299293Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624684767171054:2475], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:03.299387Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:03.299600Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624684767171059:2478], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:03.303852Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:55:03.322093Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624684767171061:2479], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:55:03.418486Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501624684767171114:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 18478, MsgBus: 65197 2025-05-07T08:55:06.049948Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501624692059258547:2208];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0027a8/r3tmp/tmpdqBKQv/pdisk_1.dat 2025-05-07T08:55:06.093232Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:55:06.198933Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:55:06.231824Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:55:06.231964Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:55:06.234792Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18478, node 2 2025-05-07T08:55:06.366757Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:55:06.366787Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:55:06.366795Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:55:06.366930Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:65197 TClient is connected to server localhost:65197 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:55:07.017655Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:55:07.058555Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T08:55:07.096648Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:55:07.201343Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:55:07.461536Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:55:07.555305Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:55:10.501847Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501624713534096518:2407], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:10.501956Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:10.563092Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-05-07T08:55:10.613343Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-05-07T08:55:10.658316Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-05-07T08:55:10.740384Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-05-07T08:55:10.798256Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-05-07T08:55:10.850610Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-05-07T08:55:10.916066Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-05-07T08:55:10.987565Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7501624692059258547:2208];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:55:10.987655Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:55:11.056585Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501624717829064471:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:11.056765Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:11.057120Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501624717829064476:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:11.063294Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-05-07T08:55:11.084581Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7501624717829064478:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-05-07T08:55:11.155273Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501624717829064532:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_data_erasure/unittest >> TestDataErasure::DataErasureWithCopyTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:65:2058] recipient: [1:59:2100] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:65:2058] recipient: [1:59:2100] Leader for TabletID 72057594046678944 is [1:69:2104] sender: [1:73:2058] recipient: [1:59:2100] 2025-05-07T08:55:04.103220Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:55:04.103320Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:55:04.103364Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:55:04.103400Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:55:04.103443Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:55:04.103478Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:55:04.103529Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:55:04.103613Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:55:04.104366Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:55:04.104707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:55:04.203820Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:55:04.203892Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:55:04.205366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:55:04.205587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:55:04.205775Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:55:04.213451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:55:04.213716Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:55:04.214450Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:55:04.214859Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:55:04.220090Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:55:04.222603Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:55:04.222679Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:55:04.222852Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:55:04.222907Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:55:04.222957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:55:04.223176Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:55:04.225888Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:69:2104] sender: [1:148:2058] recipient: [1:16:2063] 2025-05-07T08:55:04.377491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:55:04.377752Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:55:04.381182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:55:04.381511Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:55:04.381583Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:55:04.382415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:55:04.382570Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:55:04.382753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:55:04.382825Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:55:04.382862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:55:04.382894Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:55:04.383450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:55:04.383502Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:55:04.383542Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:55:04.383981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:55:04.384019Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:55:04.384055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:55:04.384110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:55:04.399039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:55:04.399667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:55:04.399830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:55:04.400952Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:55:04.401082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 74 RawX2: 4294969404 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:55:04.401149Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:55:04.401435Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:55:04.401483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:55:04.401662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:55:04.401762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:55:04.402544Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:55:04.402592Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:55:04.402766Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:55:04.402801Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:123:2 ... count 50 2025-05-07T08:55:13.032325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409552 maps to shardIdx: 72075186233409546:7 followerId=0, pathId: [OwnerId: 72075186233409546, LocalPathId: 3], pathId map=SimpleCopy, is column=0, is olap=0, RowCount 50, DataSize 5121950 2025-05-07T08:55:13.032379Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72075186233409546:7 with partCount# 1, rowCount# 50, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:50.000000Z at schemeshard 72075186233409546 2025-05-07T08:55:13.032411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186233409552 2025-05-07T08:55:13.032493Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72075186233409546 2025-05-07T08:55:13.043516Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [1:277:2238]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-05-07T08:55:13.043589Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5015: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-05-07T08:55:13.043615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:588: Started TEvPersistStats at tablet 72075186233409546, queue size# 0 2025-05-07T08:55:13.065483Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:185:2178]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T08:55:13.065577Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T08:55:13.065699Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [1:185:2178], Recipient [1:185:2178]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T08:55:13.065732Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T08:55:13.076435Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:277:2238]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T08:55:13.076540Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T08:55:13.076663Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [1:277:2238], Recipient [1:277:2238]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T08:55:13.076700Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T08:55:13.112962Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:185:2178]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T08:55:13.113058Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T08:55:13.113190Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [1:185:2178], Recipient [1:185:2178]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T08:55:13.113223Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T08:55:13.123852Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:277:2238]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T08:55:13.123951Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T08:55:13.124059Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [1:277:2238], Recipient [1:277:2238]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T08:55:13.124132Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T08:55:13.160066Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:185:2178]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T08:55:13.160157Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T08:55:13.160275Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [1:185:2178], Recipient [1:185:2178]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T08:55:13.160312Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T08:55:13.171099Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:277:2238]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T08:55:13.171197Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T08:55:13.171311Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [1:277:2238], Recipient [1:277:2238]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T08:55:13.171348Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T08:55:13.210219Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:185:2178]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T08:55:13.210311Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T08:55:13.210454Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [1:185:2178], Recipient [1:185:2178]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T08:55:13.210491Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T08:55:13.222859Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:277:2238]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T08:55:13.222960Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T08:55:13.223083Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [1:277:2238], Recipient [1:277:2238]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T08:55:13.223121Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T08:55:13.260103Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:185:2178]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T08:55:13.260196Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T08:55:13.260310Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [1:185:2178], Recipient [1:185:2178]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T08:55:13.260346Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T08:55:13.270949Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125517, Sender [0:0:0], Recipient [1:185:2178]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-05-07T08:55:13.271040Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5039: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-05-07T08:55:13.271079Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:352: [RootDataErasureManager] SendRequestToBSC: Generation# 1 2025-05-07T08:55:13.271318Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 268637738, Sender [1:184:2177], Recipient [1:185:2178]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 1 Completed: true Progress10k: 10000 2025-05-07T08:55:13.271359Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5038: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-05-07T08:55:13.271394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7757: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-05-07T08:55:13.271480Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:628: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-05-07T08:55:13.271517Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:640: TTxCompleteDataErasureBSC: Data shred in BSC is completed 2025-05-07T08:55:13.271595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:168: [RootDataErasureManager] ScheduleDataErasureWakeup: Interval# 14.999500s, Timestamp# 1970-01-01T00:01:25.000500Z 2025-05-07T08:55:13.271650Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:376: [RootDataErasureManager] Complete: Generation# 1, duration# 35 s 2025-05-07T08:55:13.272296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:652: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# false 2025-05-07T08:55:13.275420Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:1735:3439], Recipient [1:185:2178]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:55:13.275500Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:55:13.275545Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046678944 2025-05-07T08:55:13.275734Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125519, Sender [1:169:2169], Recipient [1:185:2178]: NKikimrScheme.TEvDataErasureInfoRequest 2025-05-07T08:55:13.275768Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5036: StateWork, processing event TEvSchemeShard::TEvDataErasureInfoRequest 2025-05-07T08:55:13.275807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7712: Handle TEvDataErasureInfoRequest, at schemeshard: 72057594046678944 >> ResourcePoolClassifiersDdl::TestExplicitPoolId [GOOD] >> ResourcePoolClassifiersDdl::TestMultiGroupClassification ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut/unittest >> NFwd_TFlatIndexCache::End [GOOD] Test command err: 00000.000 II| FAKE_ENV: Born at 2025-05-07T08:54:53.772064Z 00000.011 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.012 II| FAKE_ENV: Starting storage for BS group 0 00000.012 II| FAKE_ENV: Starting storage for BS group 1 00000.012 II| FAKE_ENV: Starting storage for BS group 2 00000.012 II| FAKE_ENV: Starting storage for BS group 3 00000.021 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.021 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.021 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.021 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {146b, 4} 00000.021 II| FAKE_ENV: DS.1 gone, left {105b, 3}, put {105b, 3} 00000.021 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.021 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.021 II| FAKE_ENV: All BS storage groups are stopped 00000.021 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.022 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-05-07T08:54:53.799426Z 00000.008 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.008 II| FAKE_ENV: Starting storage for BS group 0 00000.009 II| FAKE_ENV: Starting storage for BS group 1 00000.009 II| FAKE_ENV: Starting storage for BS group 2 00000.009 II| FAKE_ENV: Starting storage for BS group 3 00000.017 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.018 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.018 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.018 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {292b, 8} 00000.018 II| FAKE_ENV: DS.1 gone, left {210b, 6}, put {210b, 6} 00000.018 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.018 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.018 II| FAKE_ENV: All BS storage groups are stopped 00000.018 II| FAKE_ENV: Model stopped, hosted 4 actors, spent 0.000s 00000.018 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-05-07T08:54:53.823635Z 00000.013 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.013 II| FAKE_ENV: Starting storage for BS group 0 00000.014 II| FAKE_ENV: Starting storage for BS group 1 00000.014 II| FAKE_ENV: Starting storage for BS group 2 00000.014 II| FAKE_ENV: Starting storage for BS group 3 00000.055 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.056 NN| TABLET_SAUSAGECACHE: Poison cache serviced 1 reqs hit {1 76b} miss {0 0b} 00000.056 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.056 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {132b, 2} 00000.056 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {116b, 2} 00000.056 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {1181b, 13} 00000.056 II| FAKE_ENV: DS.1 gone, left {909b, 3}, put {1913b, 12} 00000.057 II| FAKE_ENV: All BS storage groups are stopped 00000.057 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.057 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-05-07T08:54:53.886405Z 00000.008 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.008 II| FAKE_ENV: Starting storage for BS group 0 00000.009 II| FAKE_ENV: Starting storage for BS group 1 00000.009 II| FAKE_ENV: Starting storage for BS group 2 00000.009 II| FAKE_ENV: Starting storage for BS group 3 00000.037 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.038 NN| TABLET_SAUSAGECACHE: Poison cache serviced 1 reqs hit {1 102443b} miss {0 0b} 00000.038 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.038 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {751b, 11} 00000.038 II| FAKE_ENV: DS.1 gone, left {541b, 3}, put {103970b, 10} 00000.038 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.038 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.039 II| FAKE_ENV: All BS storage groups are stopped 00000.039 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.039 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-05-07T08:54:53.932056Z 00000.007 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.008 II| FAKE_ENV: Starting storage for BS group 0 00000.008 II| FAKE_ENV: Starting storage for BS group 1 00000.009 II| FAKE_ENV: Starting storage for BS group 2 00000.009 II| FAKE_ENV: Starting storage for BS group 3 ... blocking NKikimr::TEvBlobStorage::TEvCollectGarbage from FLAT_EXECUTOR to FAKE_ENV_A cookie 0 00000.092 II| TABLET_SAUSAGECACHE: Wakeup 1 ... unblocking NKikimr::TEvBlobStorage::TEvCollectGarbage from FLAT_EXECUTOR to FAKE_ENV_A 00000.093 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.094 NN| TABLET_SAUSAGECACHE: Poison cache serviced 11 reqs hit {18 513007b} miss {0 0b} 00000.094 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.094 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {2095b, 23} 00000.094 II| FAKE_ENV: DS.1 gone, left {774b, 4}, put {210604b, 21} 00000.094 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {205178b, 4} 00000.094 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {102690b, 4} 00000.094 II| FAKE_ENV: All BS storage groups are stopped 00000.095 II| FAKE_ENV: Model stopped, hosted 4 actors, spent 15.00s 00000.095 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 16}, stopped 00000.000 II| FAKE_ENV: Born at 2025-05-07T08:54:54.032800Z 00000.008 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.008 II| FAKE_ENV: Starting storage for BS group 0 00000.009 II| FAKE_ENV: Starting storage for BS group 1 00000.009 II| FAKE_ENV: Starting storage for BS group 2 00000.009 II| FAKE_ENV: Starting storage for BS group 3 00000.054 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.055 NN| TABLET_SAUSAGECACHE: Poison cache serviced 3 reqs hit {3 307329b} miss {0 0b} 00000.055 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.055 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {1830b, 23} 00000.055 II| FAKE_ENV: DS.1 gone, left {1247b, 3}, put {311467b, 22} 00000.055 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.055 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.055 II| FAKE_ENV: All BS storage groups are stopped 00000.055 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.056 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-05-07T08:54:54.095634Z 00000.009 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.009 II| FAKE_ENV: Starting storage for BS group 0 00000.010 II| FAKE_ENV: Starting storage for BS group 1 00000.010 II| FAKE_ENV: Starting storage for BS group 2 00000.010 II| FAKE_ENV: Starting storage for BS group 3 00000.047 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 5 actors 00000.048 NN| TABLET_SAUSAGECACHE: Poison cache serviced 4 reqs hit {8 307836b} miss {0 0b} 00000.048 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.048 II| FAKE_ENV: DS.0 gone, left {57b, 2}, put {1436b, 31} 00000.048 II| FAKE_ENV: DS.1 gone, left {629b, 3}, put {310476b, 16} 00000.048 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.048 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.049 II| FAKE_ENV: All BS storage groups are stopped 00000.049 II| FAKE_ENV: Model stopped, hosted 5 actors, spent 0.000s 00000.049 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-05-07T08:54:54.150618Z 00000.008 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.009 II| FAKE_ENV: Starting storage for BS group 0 00000.009 II| FAKE_ENV: Starting storage for BS group 1 00000.009 II| FAKE_ENV: Starting storage for BS group 2 00000.009 II| FAKE_ENV: Starting storage for BS group 3 00000.062 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.063 NN| TABLET_SAUSAGECACHE: Poison cache serviced 2 reqs hit {2 194646b} miss {0 0b} 00000.063 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.063 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {1768b, 27} 00000.063 II| FAKE_ENV: DS.1 gone, left {732b, 6}, put {197813b, 24} 00000.063 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.063 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.063 II| FAKE_ENV: All BS storage groups are stopped 00000.063 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.064 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-05-07T08:54:54.219935Z 00000.012 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.012 II| FAKE_ENV: Starting storage for BS group 0 00000.013 II| FAKE_ENV: Starting storage for BS group 1 00000.013 II| FAKE_ENV: Starting storage for BS group 2 00000.013 II| FAKE_ENV: Starting storage for BS group 3 00000.019 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.020 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.020 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.020 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {326b, 7} 00000.020 II| FAKE_ENV: DS.1 gone, left {418b, 4}, put {453b, 5} 00000.020 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.020 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.020 II| FAKE_ENV: All BS storage groups are stopped 00000.020 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.020 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-05-07T08:54:54.253331Z 00000.016 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.016 II| FAKE_ENV: Starting storage for BS group 0 00000.021 II| FAKE_ENV: Starting storage for BS group 1 00000.021 II| FAKE_ENV: Starting storage for BS group 2 00000.021 II| FAKE_ENV: Starting storage for BS group 3 ... blocking NKikimr::TEvBlobStorage::TEvCollectGarbage from FLAT_EXECUTOR to FAKE_ENV_A cookie 0 00000.108 II| TABLET_SAUSAGECACHE: Wakeup 1 ... unblocking NKikimr::TEvBlobStorage::TEvCollectGarbage from FLAT_EXECUTOR to FAKE_ENV_A 00000.109 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.110 NN| TABLET_SAUSAGECACHE: Poison cache serviced 6 reqs hit {8 410030b} miss {0 0b} 00000.110 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.110 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {1492b, 23} 00000.110 II| FAKE_ENV: DS.1 gone, left {504b, 4}, put {310786b, 20} 00000.110 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.110 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.110 II| FAKE_ENV: All BS storage groups are stopped 00000.110 II| FAKE_ENV: Model stopped, hosted 5 actors, spent 15.00s 00000.110 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 16}, stopped 00000.000 II| FAKE_ENV: Born at 2025-05-07T08:54:54.375216Z 00000.008 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.008 II| FAKE_ENV: Starting storage for BS group 0 00000.008 II| FAKE_ENV: Starting storage for BS group 1 00000.008 II| FAKE_ENV: Starting storage for BS group 2 00000.009 II| FAKE_ENV: Starting storage for ... 3} Label{34 rev 1, 50b}, [6, +2)row | ERowOp 1: {6} {Set 1 Uint32 : 600} | ERowOp 1: {7} {Set 1 Uint32 : 700} + Rows{4} Label{44 rev 1, 50b}, [8, +2)row | ERowOp 1: {8} {Set 1 Uint32 : 800} | ERowOp 1: {9} {Set 1 Uint32 : 900} + Rows{5} Label{54 rev 1, 50b}, [10, +2)row | ERowOp 1: {10} {Set 1 Uint32 : 1000} | ERowOp 1: {11} {Set 1 Uint32 : 1100} + Rows{6} Label{64 rev 1, 50b}, [12, +2)row | ERowOp 1: {12} {Set 1 Uint32 : 1200} | ERowOp 1: {13} {Set 1 Uint32 : 1300} + Rows{7} Label{74 rev 1, 50b}, [14, +2)row | ERowOp 1: {14} {Set 1 Uint32 : 1400} | ERowOp 1: {15} {Set 1 Uint32 : 1500} + Rows{8} Label{84 rev 1, 50b}, [16, +2)row | ERowOp 1: {16} {Set 1 Uint32 : 1600} | ERowOp 1: {17} {Set 1 Uint32 : 1700} + Rows{9} Label{94 rev 1, 50b}, [18, +2)row | ERowOp 1: {18} {Set 1 Uint32 : 1800} | ERowOp 1: {19} {Set 1 Uint32 : 1900} + Rows{10} Label{104 rev 1, 50b}, [20, +2)row | ERowOp 1: {20} {Set 1 Uint32 : 2000} | ERowOp 1: {21} {Set 1 Uint32 : 2100} + Rows{11} Label{114 rev 1, 50b}, [22, +2)row | ERowOp 1: {22} {Set 1 Uint32 : 2200} | ERowOp 1: {23} {Set 1 Uint32 : 2300} + Rows{12} Label{124 rev 1, 50b}, [24, +2)row | ERowOp 1: {24} {Set 1 Uint32 : 2400} | ERowOp 1: {25} {Set 1 Uint32 : 2500} + Rows{13} Label{134 rev 1, 50b}, [26, +2)row | ERowOp 1: {26} {Set 1 Uint32 : 2600} | ERowOp 1: {27} {Set 1 Uint32 : 2700} + Rows{14} Label{144 rev 1, 50b}, [28, +2)row | ERowOp 1: {28} {Set 1 Uint32 : 2800} | ERowOp 1: {29} {Set 1 Uint32 : 2900} + Rows{15} Label{154 rev 1, 50b}, [30, +2)row | ERowOp 1: {30} {Set 1 Uint32 : 3000} | ERowOp 1: {31} {Set 1 Uint32 : 3100} + Rows{16} Label{164 rev 1, 50b}, [32, +2)row | ERowOp 1: {32} {Set 1 Uint32 : 3200} | ERowOp 1: {33} {Set 1 Uint32 : 3300} + Rows{17} Label{174 rev 1, 50b}, [34, +2)row | ERowOp 1: {34} {Set 1 Uint32 : 3400} | ERowOp 1: {35} {Set 1 Uint32 : 3500} + Rows{18} Label{184 rev 1, 50b}, [36, +2)row | ERowOp 1: {36} {Set 1 Uint32 : 3600} | ERowOp 1: {37} {Set 1 Uint32 : 3700} + Rows{19} Label{194 rev 1, 50b}, [38, +2)row | ERowOp 1: {38} {Set 1 Uint32 : 3800} | ERowOp 1: {39} {Set 1 Uint32 : 3900} Part{[1:2:3:0:0:0:0] eph 0, 1000b 40r} data 1479b + FlatIndex{20} Label{3 rev 3, 453b} 21 rec | Page Row Bytes (Uint32) | 0 0 50b {0} | 1 2 50b {2} | 2 4 50b {4} | 3 6 50b {6} | 4 8 50b {8} | 5 10 50b {10} | 6 12 50b {12} | 7 14 50b {14} | 8 16 50b {16} | 9 18 50b {18} | 10 20 50b {20} | 11 22 50b {22} | 12 24 50b {24} | 13 26 50b {26} | 14 28 50b {28} | 15 30 50b {30} | 16 32 50b {32} | 17 34 50b {34} | 18 36 50b {36} | 19 38 50b {38} | 19 39 50b {39} + Rows{0} Label{04 rev 1, 50b}, [0, +2)row | ERowOp 1: {0} {Set 1 Uint32 : 0} | ERowOp 1: {1} {Set 1 Uint32 : 100} + Rows{1} Label{14 rev 1, 50b}, [2, +2)row | ERowOp 1: {2} {Set 1 Uint32 : 200} | ERowOp 1: {3} {Set 1 Uint32 : 300} + Rows{2} Label{24 rev 1, 50b}, [4, +2)row | ERowOp 1: {4} {Set 1 Uint32 : 400} | ERowOp 1: {5} {Set 1 Uint32 : 500} + Rows{3} Label{34 rev 1, 50b}, [6, +2)row | ERowOp 1: {6} {Set 1 Uint32 : 600} | ERowOp 1: {7} {Set 1 Uint32 : 700} + Rows{4} Label{44 rev 1, 50b}, [8, +2)row | ERowOp 1: {8} {Set 1 Uint32 : 800} | ERowOp 1: {9} {Set 1 Uint32 : 900} + Rows{5} Label{54 rev 1, 50b}, [10, +2)row | ERowOp 1: {10} {Set 1 Uint32 : 1000} | ERowOp 1: {11} {Set 1 Uint32 : 1100} + Rows{6} Label{64 rev 1, 50b}, [12, +2)row | ERowOp 1: {12} {Set 1 Uint32 : 1200} | ERowOp 1: {13} {Set 1 Uint32 : 1300} + Rows{7} Label{74 rev 1, 50b}, [14, +2)row | ERowOp 1: {14} {Set 1 Uint32 : 1400} | ERowOp 1: {15} {Set 1 Uint32 : 1500} + Rows{8} Label{84 rev 1, 50b}, [16, +2)row | ERowOp 1: {16} {Set 1 Uint32 : 1600} | ERowOp 1: {17} {Set 1 Uint32 : 1700} + Rows{9} Label{94 rev 1, 50b}, [18, +2)row | ERowOp 1: {18} {Set 1 Uint32 : 1800} | ERowOp 1: {19} {Set 1 Uint32 : 1900} + Rows{10} Label{104 rev 1, 50b}, [20, +2)row | ERowOp 1: {20} {Set 1 Uint32 : 2000} | ERowOp 1: {21} {Set 1 Uint32 : 2100} + Rows{11} Label{114 rev 1, 50b}, [22, +2)row | ERowOp 1: {22} {Set 1 Uint32 : 2200} | ERowOp 1: {23} {Set 1 Uint32 : 2300} + Rows{12} Label{124 rev 1, 50b}, [24, +2)row | ERowOp 1: {24} {Set 1 Uint32 : 2400} | ERowOp 1: {25} {Set 1 Uint32 : 2500} + Rows{13} Label{134 rev 1, 50b}, [26, +2)row | ERowOp 1: {26} {Set 1 Uint32 : 2600} | ERowOp 1: {27} {Set 1 Uint32 : 2700} + Rows{14} Label{144 rev 1, 50b}, [28, +2)row | ERowOp 1: {28} {Set 1 Uint32 : 2800} | ERowOp 1: {29} {Set 1 Uint32 : 2900} + Rows{15} Label{154 rev 1, 50b}, [30, +2)row | ERowOp 1: {30} {Set 1 Uint32 : 3000} | ERowOp 1: {31} {Set 1 Uint32 : 3100} + Rows{16} Label{164 rev 1, 50b}, [32, +2)row | ERowOp 1: {32} {Set 1 Uint32 : 3200} | ERowOp 1: {33} {Set 1 Uint32 : 3300} + Rows{17} Label{174 rev 1, 50b}, [34, +2)row | ERowOp 1: {34} {Set 1 Uint32 : 3400} | ERowOp 1: {35} {Set 1 Uint32 : 3500} + Rows{18} Label{184 rev 1, 50b}, [36, +2)row | ERowOp 1: {36} {Set 1 Uint32 : 3600} | ERowOp 1: {37} {Set 1 Uint32 : 3700} + Rows{19} Label{194 rev 1, 50b}, [38, +2)row | ERowOp 1: {38} {Set 1 Uint32 : 3800} | ERowOp 1: {39} {Set 1 Uint32 : 3900} Part{[1:2:3:0:0:0:0] eph 0, 1000b 40r} data 1479b + FlatIndex{20} Label{3 rev 3, 453b} 21 rec | Page Row Bytes (Uint32) | 0 0 50b {0} | 1 2 50b {2} | 2 4 50b {4} | 3 6 50b {6} | 4 8 50b {8} | 5 10 50b {10} | 6 12 50b {12} | 7 14 50b {14} | 8 16 50b {16} | 9 18 50b {18} | 10 20 50b {20} | 11 22 50b {22} | 12 24 50b {24} | 13 26 50b {26} | 14 28 50b {28} | 15 30 50b {30} | 16 32 50b {32} | 17 34 50b {34} | 18 36 50b {36} | 19 38 50b {38} | 19 39 50b {39} + Rows{0} Label{04 rev 1, 50b}, [0, +2)row | ERowOp 1: {0} {Set 1 Uint32 : 0} | ERowOp 1: {1} {Set 1 Uint32 : 100} + Rows{1} Label{14 rev 1, 50b}, [2, +2)row | ERowOp 1: {2} {Set 1 Uint32 : 200} | ERowOp 1: {3} {Set 1 Uint32 : 300} + Rows{2} Label{24 rev 1, 50b}, [4, +2)row | ERowOp 1: {4} {Set 1 Uint32 : 400} | ERowOp 1: {5} {Set 1 Uint32 : 500} + Rows{3} Label{34 rev 1, 50b}, [6, +2)row | ERowOp 1: {6} {Set 1 Uint32 : 600} | ERowOp 1: {7} {Set 1 Uint32 : 700} + Rows{4} Label{44 rev 1, 50b}, [8, +2)row | ERowOp 1: {8} {Set 1 Uint32 : 800} | ERowOp 1: {9} {Set 1 Uint32 : 900} + Rows{5} Label{54 rev 1, 50b}, [10, +2)row | ERowOp 1: {10} {Set 1 Uint32 : 1000} | ERowOp 1: {11} {Set 1 Uint32 : 1100} + Rows{6} Label{64 rev 1, 50b}, [12, +2)row | ERowOp 1: {12} {Set 1 Uint32 : 1200} | ERowOp 1: {13} {Set 1 Uint32 : 1300} + Rows{7} Label{74 rev 1, 50b}, [14, +2)row | ERowOp 1: {14} {Set 1 Uint32 : 1400} | ERowOp 1: {15} {Set 1 Uint32 : 1500} + Rows{8} Label{84 rev 1, 50b}, [16, +2)row | ERowOp 1: {16} {Set 1 Uint32 : 1600} | ERowOp 1: {17} {Set 1 Uint32 : 1700} + Rows{9} Label{94 rev 1, 50b}, [18, +2)row | ERowOp 1: {18} {Set 1 Uint32 : 1800} | ERowOp 1: {19} {Set 1 Uint32 : 1900} + Rows{10} Label{104 rev 1, 50b}, [20, +2)row | ERowOp 1: {20} {Set 1 Uint32 : 2000} | ERowOp 1: {21} {Set 1 Uint32 : 2100} + Rows{11} Label{114 rev 1, 50b}, [22, +2)row | ERowOp 1: {22} {Set 1 Uint32 : 2200} | ERowOp 1: {23} {Set 1 Uint32 : 2300} + Rows{12} Label{124 rev 1, 50b}, [24, +2)row | ERowOp 1: {24} {Set 1 Uint32 : 2400} | ERowOp 1: {25} {Set 1 Uint32 : 2500} + Rows{13} Label{134 rev 1, 50b}, [26, +2)row | ERowOp 1: {26} {Set 1 Uint32 : 2600} | ERowOp 1: {27} {Set 1 Uint32 : 2700} + Rows{14} Label{144 rev 1, 50b}, [28, +2)row | ERowOp 1: {28} {Set 1 Uint32 : 2800} | ERowOp 1: {29} {Set 1 Uint32 : 2900} + Rows{15} Label{154 rev 1, 50b}, [30, +2)row | ERowOp 1: {30} {Set 1 Uint32 : 3000} | ERowOp 1: {31} {Set 1 Uint32 : 3100} + Rows{16} Label{164 rev 1, 50b}, [32, +2)row | ERowOp 1: {32} {Set 1 Uint32 : 3200} | ERowOp 1: {33} {Set 1 Uint32 : 3300} + Rows{17} Label{174 rev 1, 50b}, [34, +2)row | ERowOp 1: {34} {Set 1 Uint32 : 3400} | ERowOp 1: {35} {Set 1 Uint32 : 3500} + Rows{18} Label{184 rev 1, 50b}, [36, +2)row | ERowOp 1: {36} {Set 1 Uint32 : 3600} | ERowOp 1: {37} {Set 1 Uint32 : 3700} + Rows{19} Label{194 rev 1, 50b}, [38, +2)row | ERowOp 1: {38} {Set 1 Uint32 : 3800} | ERowOp 1: {39} {Set 1 Uint32 : 3900} Part{[1:2:3:0:0:0:0] eph 0, 1000b 40r} data 1479b + FlatIndex{20} Label{3 rev 3, 453b} 21 rec | Page Row Bytes (Uint32) | 0 0 50b {0} | 1 2 50b {2} | 2 4 50b {4} | 3 6 50b {6} | 4 8 50b {8} | 5 10 50b {10} | 6 12 50b {12} | 7 14 50b {14} | 8 16 50b {16} | 9 18 50b {18} | 10 20 50b {20} | 11 22 50b {22} | 12 24 50b {24} | 13 26 50b {26} | 14 28 50b {28} | 15 30 50b {30} | 16 32 50b {32} | 17 34 50b {34} | 18 36 50b {36} | 19 38 50b {38} | 19 39 50b {39} + Rows{0} Label{04 rev 1, 50b}, [0, +2)row | ERowOp 1: {0} {Set 1 Uint32 : 0} | ERowOp 1: {1} {Set 1 Uint32 : 100} + Rows{1} Label{14 rev 1, 50b}, [2, +2)row | ERowOp 1: {2} {Set 1 Uint32 : 200} | ERowOp 1: {3} {Set 1 Uint32 : 300} + Rows{2} Label{24 rev 1, 50b}, [4, +2)row | ERowOp 1: {4} {Set 1 Uint32 : 400} | ERowOp 1: {5} {Set 1 Uint32 : 500} + Rows{3} Label{34 rev 1, 50b}, [6, +2)row | ERowOp 1: {6} {Set 1 Uint32 : 600} | ERowOp 1: {7} {Set 1 Uint32 : 700} + Rows{4} Label{44 rev 1, 50b}, [8, +2)row | ERowOp 1: {8} {Set 1 Uint32 : 800} | ERowOp 1: {9} {Set 1 Uint32 : 900} + Rows{5} Label{54 rev 1, 50b}, [10, +2)row | ERowOp 1: {10} {Set 1 Uint32 : 1000} | ERowOp 1: {11} {Set 1 Uint32 : 1100} + Rows{6} Label{64 rev 1, 50b}, [12, +2)row | ERowOp 1: {12} {Set 1 Uint32 : 1200} | ERowOp 1: {13} {Set 1 Uint32 : 1300} + Rows{7} Label{74 rev 1, 50b}, [14, +2)row | ERowOp 1: {14} {Set 1 Uint32 : 1400} | ERowOp 1: {15} {Set 1 Uint32 : 1500} + Rows{8} Label{84 rev 1, 50b}, [16, +2)row | ERowOp 1: {16} {Set 1 Uint32 : 1600} | ERowOp 1: {17} {Set 1 Uint32 : 1700} + Rows{9} Label{94 rev 1, 50b}, [18, +2)row | ERowOp 1: {18} {Set 1 Uint32 : 1800} | ERowOp 1: {19} {Set 1 Uint32 : 1900} + Rows{10} Label{104 rev 1, 50b}, [20, +2)row | ERowOp 1: {20} {Set 1 Uint32 : 2000} | ERowOp 1: {21} {Set 1 Uint32 : 2100} + Rows{11} Label{114 rev 1, 50b}, [22, +2)row | ERowOp 1: {22} {Set 1 Uint32 : 2200} | ERowOp 1: {23} {Set 1 Uint32 : 2300} + Rows{12} Label{124 rev 1, 50b}, [24, +2)row | ERowOp 1: {24} {Set 1 Uint32 : 2400} | ERowOp 1: {25} {Set 1 Uint32 : 2500} + Rows{13} Label{134 rev 1, 50b}, [26, +2)row | ERowOp 1: {26} {Set 1 Uint32 : 2600} | ERowOp 1: {27} {Set 1 Uint32 : 2700} + Rows{14} Label{144 rev 1, 50b}, [28, +2)row | ERowOp 1: {28} {Set 1 Uint32 : 2800} | ERowOp 1: {29} {Set 1 Uint32 : 2900} + Rows{15} Label{154 rev 1, 50b}, [30, +2)row | ERowOp 1: {30} {Set 1 Uint32 : 3000} | ERowOp 1: {31} {Set 1 Uint32 : 3100} + Rows{16} Label{164 rev 1, 50b}, [32, +2)row | ERowOp 1: {32} {Set 1 Uint32 : 3200} | ERowOp 1: {33} {Set 1 Uint32 : 3300} + Rows{17} Label{174 rev 1, 50b}, [34, +2)row | ERowOp 1: {34} {Set 1 Uint32 : 3400} | ERowOp 1: {35} {Set 1 Uint32 : 3500} + Rows{18} Label{184 rev 1, 50b}, [36, +2)row | ERowOp 1: {36} {Set 1 Uint32 : 3600} | ERowOp 1: {37} {Set 1 Uint32 : 3700} + Rows{19} Label{194 rev 1, 50b}, [38, +2)row | ERowOp 1: {38} {Set 1 Uint32 : 3800} | ERowOp 1: {39} {Set 1 Uint32 : 3900} >> THiveTest::TestHiveBalancerDifferentResources [GOOD] >> THiveTest::TestHiveBalancerDifferentResources2 >> Viewer::JsonStorageListingV2NodeIdFilter [GOOD] >> Viewer::JsonStorageListingV2PDiskIdFilter ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/hive/ut/unittest >> THiveTest::TestExternalBootWhenLocked [GOOD] Test command err: 2025-05-07T08:54:37.272803Z node 1 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:319} Bootstrap 2025-05-07T08:54:37.277213Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "/tmp/pdisk.dat" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-05-07T08:54:37.277501Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 1 PDiskId# 1 Path# "/tmp/pdisk.dat" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-05-07T08:54:37.278255Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-05-07T08:54:37.279551Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:265} StartLocalVDiskActor done VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 2025-05-07T08:54:37.279610Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:22} StartLocalProxy GroupId# 0 2025-05-07T08:54:37.280703Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [1:73:2076] ControllerId# 72057594037932033 2025-05-07T08:54:37.280750Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-05-07T08:54:37.280874Z node 1 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:294} StartInvalidGroupProxy GroupId# 4294967295 2025-05-07T08:54:37.281231Z node 1 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:306} StartRequestReportingThrottler 2025-05-07T08:54:37.297691Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:146: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-05-07T08:54:37.297774Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:294: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-05-07T08:54:37.301008Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:72:2075] Create Queue# [1:81:2081] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:37.301200Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:72:2075] Create Queue# [1:82:2082] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:37.301369Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:72:2075] Create Queue# [1:83:2083] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:37.301549Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:72:2075] Create Queue# [1:84:2084] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:37.301716Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:72:2075] Create Queue# [1:85:2085] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:37.301844Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:72:2075] Create Queue# [1:86:2086] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:37.302048Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:72:2075] Create Queue# [1:87:2087] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:37.302099Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:29: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-05-07T08:54:37.302220Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [1:73:2076] 2025-05-07T08:54:37.302273Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [1:73:2076] 2025-05-07T08:54:37.302348Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:234: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-05-07T08:54:37.302408Z node 1 :BS_NODE DEBUG: {NWDC00@distconf.cpp:22} Bootstrap 2025-05-07T08:54:37.303349Z node 1 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-05-07T08:54:37.303465Z node 2 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:319} Bootstrap 2025-05-07T08:54:37.306598Z node 2 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "/tmp/pdisk.dat" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-05-07T08:54:37.306754Z node 2 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:22} StartLocalProxy GroupId# 0 2025-05-07T08:54:37.307704Z node 2 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [2:96:2074] ControllerId# 72057594037932033 2025-05-07T08:54:37.307747Z node 2 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-05-07T08:54:37.307821Z node 2 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:294} StartInvalidGroupProxy GroupId# 4294967295 2025-05-07T08:54:37.308062Z node 2 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:306} StartRequestReportingThrottler 2025-05-07T08:54:37.310185Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:146: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-05-07T08:54:37.310237Z node 2 :BS_PROXY NOTICE: dsproxy_state.cpp:294: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-05-07T08:54:37.312189Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:95:2073] Create Queue# [2:102:2078] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:37.312349Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:95:2073] Create Queue# [2:103:2079] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:37.312495Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:95:2073] Create Queue# [2:104:2080] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:37.312649Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:95:2073] Create Queue# [2:105:2081] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:37.312784Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:95:2073] Create Queue# [2:106:2082] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:37.312963Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:95:2073] Create Queue# [2:107:2083] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:37.313123Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:95:2073] Create Queue# [2:108:2084] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:37.313155Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:29: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-05-07T08:54:37.313219Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [2:96:2074] 2025-05-07T08:54:37.313249Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [2:96:2074] 2025-05-07T08:54:37.313323Z node 2 :BS_PROXY NOTICE: dsproxy_state.cpp:234: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-05-07T08:54:37.313362Z node 2 :BS_NODE DEBUG: {NWDC00@distconf.cpp:22} Bootstrap 2025-05-07T08:54:37.313876Z node 2 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-05-07T08:54:37.314060Z node 3 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:319} Bootstrap 2025-05-07T08:54:37.316899Z node 3 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "/tmp/pdisk.dat" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-05-07T08:54:37.317053Z node 3 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:22} StartLocalProxy GroupId# 0 2025-05-07T08:54:37.317950Z node 3 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [3:114:2074] ControllerId# 72057594037932033 2025-05-07T08:54:37.318018Z node 3 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-05-07T08:54:37.318108Z node 3 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:294} StartInvalidGroupProxy GroupId# 4294967295 2025-05-07T08:54:37.318327Z node 3 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:306} StartRequestReportingThrottler 2025-05-07T08:54:37.320508Z node 3 :BS_PROXY INFO: dsproxy_state.cpp:146: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-05-07T08:54:37.320559Z node 3 :BS_PROXY NOTICE: dsproxy_state.cpp:294: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-05-07T08:54:37.322531Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:113:2073] Create Queue# [3:120:2078] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:37.322729Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:113:2073] Create Queue# [3:121:2079] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:37.322905Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:113:2073] Create Queue# [3:122:2080] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:37.323045Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:113:2073] Create Queue# [3:123:2081] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:37.323204Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:113:2073] Create Queue# [3:124:2082] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:37.323360Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:113:2073] Create Queue# [3:125:2083] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:37.323505Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:113:2073] Create Queue# [3:126:2084] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:37.323552Z node 3 :BS_PROXY INFO: dsproxy_state.cpp:29: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-05-07T08:54:37.323639Z node 3 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [3:114:2074] 2025-05-07T08:54:37.323689Z node 3 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [3:114:2074] 2025-05-07T08:54:37.323745Z node 3 :BS_PROXY NOTICE: dsproxy_state.cpp:234: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-05-07T08:54:37.323788Z node 3 :BS_NODE DEBUG: {NWDC00@distconf.cpp:22} Bootstrap 2025-05-07T08:54:37.324162Z node 3 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-05-07T08:54:37.324625Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037932033 entry.State: StInit ev: {EvForward TabletID: 72057594037932033 Ev: nullptr Flags: 1:2:0} 2025-05-07T08:54:37.344067Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:73:2076] 2025-05-07T08:54:37.344148Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:255} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-05-07T08:54:37.344201Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-05-07T08:54:37.346370Z node 2 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037932033 entry.State: StInit ev: {EvForward TabletID: 72057594037932033 Ev: nullptr Flags: 1:2:0} 2025-05-07T08:54:37.346601Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send ... 9b +(0, 0b), 2 trc, -0b acc} 2025-05-07T08:55:23.037816Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72057594037927937] send [58:99:2093] 2025-05-07T08:55:23.040095Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037927937] push event to server [58:99:2093] 2025-05-07T08:55:23.040230Z node 58 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:141: [72057594037927937] HandleSend Sender# [58:98:2093] EventType# 268960257 2025-05-07T08:55:23.040377Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:373: TClient[72075186224037888] peer closed [58:440:2351] 2025-05-07T08:55:23.040450Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:505: TClient[72075186224037888] notify reset [58:440:2351] 2025-05-07T08:55:23.040713Z node 58 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:9} Tx{23, NKikimr::NHive::TTxUpdateTabletStatus} queued, type NKikimr::NHive::TTxUpdateTabletStatus 2025-05-07T08:55:23.040799Z node 58 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:9} Tx{23, NKikimr::NHive::TTxUpdateTabletStatus} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-05-07T08:55:23.040936Z node 58 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:9} Tx{23, NKikimr::NHive::TTxUpdateTabletStatus} hope 1 -> done Change{13, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-05-07T08:55:23.041041Z node 58 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:9} Tx{23, NKikimr::NHive::TTxUpdateTabletStatus} release 4194304b of static, Memory{0 dyn 0} 2025-05-07T08:55:23.041379Z node 58 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:9} Tx{24, NKikimr::NHive::TTxProcessBootQueue} queued, type NKikimr::NHive::TTxProcessBootQueue 2025-05-07T08:55:23.041460Z node 58 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:9} Tx{24, NKikimr::NHive::TTxProcessBootQueue} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-05-07T08:55:23.041566Z node 58 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:9} Tx{24, NKikimr::NHive::TTxProcessBootQueue} hope 1 -> done Change{13, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-05-07T08:55:23.041639Z node 58 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:9} Tx{24, NKikimr::NHive::TTxProcessBootQueue} release 4194304b of static, Memory{0 dyn 0} 2025-05-07T08:55:23.042342Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72075186224037888] ::Bootstrap [58:454:2358] 2025-05-07T08:55:23.042393Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72075186224037888] lookup [58:454:2358] 2025-05-07T08:55:23.042509Z node 58 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72075186224037888 entry.State: StNormal ev: {EvForward TabletID: 72075186224037888 Ev: nullptr Flags: 1:2:0} 2025-05-07T08:55:23.042604Z node 58 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 58 selfDC 1 leaderDC 1 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 72075186224037888 followers: 0 countLeader 1 allowFollowers 0 winner: [58:374:2299] 2025-05-07T08:55:23.042697Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[72075186224037888] forward result local node, try to connect [58:454:2358] 2025-05-07T08:55:23.042760Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037888]::SendEvent [58:454:2358] 2025-05-07T08:55:23.042924Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:349: TClient[72075186224037888] connect request undelivered [58:454:2358] 2025-05-07T08:55:23.043008Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:498: TClient[72075186224037888] connect failed [58:454:2358] 2025-05-07T08:55:23.043106Z node 58 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:536: Handle TEvTabletProblem tabletId: 72075186224037888 entry.State: StNormal 2025-05-07T08:55:23.043322Z node 58 :STATESTORAGE DEBUG: statestorage_proxy.cpp:253: ProxyRequest::HandleInit ev: {EvLookup TabletID: 72075186224037888 Cookie: 0 ProxyOptions: SigNone} 2025-05-07T08:55:23.043480Z node 58 :STATESTORAGE DEBUG: statestorage_replica.cpp:183: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037888 Cookie: 0} 2025-05-07T08:55:23.044662Z node 58 :STATESTORAGE DEBUG: statestorage_replica.cpp:183: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037888 Cookie: 1} 2025-05-07T08:55:23.044740Z node 58 :STATESTORAGE DEBUG: statestorage_replica.cpp:183: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037888 Cookie: 2} 2025-05-07T08:55:23.044855Z node 58 :STATESTORAGE DEBUG: statestorage_proxy.cpp:372: ProxyRequest::HandleLookup ev: {EvReplicaInfo Status: 0 TabletID: 72075186224037888 CurrentLeader: [58:374:2299] CurrentLeaderTablet: [58:389:2311] CurrentGeneration: 1 CurrentStep: 0} 2025-05-07T08:55:23.044966Z node 58 :STATESTORAGE DEBUG: statestorage_proxy.cpp:372: ProxyRequest::HandleLookup ev: {EvReplicaInfo Status: 0 TabletID: 72075186224037888 CurrentLeader: [58:374:2299] CurrentLeaderTablet: [58:389:2311] CurrentGeneration: 1 CurrentStep: 0} 2025-05-07T08:55:23.045098Z node 58 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 72075186224037888 entry.State: StProblemResolve success: true ev: {EvInfo Status: 0 TabletID: 72075186224037888 Cookie: 0 CurrentLeader: [58:374:2299] CurrentLeaderTablet: [58:389:2311] CurrentGeneration: 1 CurrentStep: 0 Locked: false LockedFor: 0 SignatureSz: 3 Signature: {3, 6, 0}} 2025-05-07T08:55:23.045289Z node 58 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:361: DropEntry tabletId: 72075186224037888 followers: 0 2025-05-07T08:55:23.045700Z node 59 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037927937] ::Bootstrap [59:456:2093] 2025-05-07T08:55:23.045758Z node 59 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037927937] lookup [59:456:2093] 2025-05-07T08:55:23.045876Z node 59 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037927937 entry.State: StNormal ev: {EvForward TabletID: 72057594037927937 Ev: nullptr Flags: 1:2:0} 2025-05-07T08:55:23.045962Z node 59 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 59 selfDC 2 leaderDC 1 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 72057594037927937 followers: 0 countLeader 1 allowFollowers 0 winner: [58:323:2263] 2025-05-07T08:55:23.046053Z node 59 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037927937] queue send [59:456:2093] 2025-05-07T08:55:23.046122Z node 59 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:411: TClient[72057594037927937] received pending shutdown [59:456:2093] 2025-05-07T08:55:23.046181Z node 59 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:195: TClient[72057594037927937] forward result remote node 58 [59:456:2093] 2025-05-07T08:55:23.046330Z node 59 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:229: TClient[72057594037927937] remote node connected [59:456:2093] 2025-05-07T08:55:23.046414Z node 59 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594037927937]::SendEvent [59:456:2093] 2025-05-07T08:55:23.046714Z node 58 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72057594037927937] Accept Connect Originator# [59:456:2093] 2025-05-07T08:55:23.047049Z node 59 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72057594037927937] connected with status OK role: Leader [59:456:2093] 2025-05-07T08:55:23.047116Z node 59 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72057594037927937] send queued [59:456:2093] 2025-05-07T08:55:23.047168Z node 59 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037927937] push event to server [59:456:2093] 2025-05-07T08:55:23.047279Z node 59 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594037927937]::SendEvent [59:456:2093] 2025-05-07T08:55:23.047354Z node 59 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:332: TClient[72057594037927937] shutdown pipe due to pending shutdown request [59:456:2093] 2025-05-07T08:55:23.047408Z node 59 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:505: TClient[72057594037927937] notify reset [59:456:2093] 2025-05-07T08:55:23.047678Z node 58 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:72: [72057594037927937] Push Sender# [59:443:2088] EventType# 268697624 2025-05-07T08:55:23.047870Z node 58 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:9} Tx{25, NKikimr::NHive::TTxStartTablet} queued, type NKikimr::NHive::TTxStartTablet 2025-05-07T08:55:23.047932Z node 58 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:9} Tx{25, NKikimr::NHive::TTxStartTablet} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-05-07T08:55:23.048137Z node 58 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:9} Tx{25, NKikimr::NHive::TTxStartTablet} hope 1 -> done Change{13, redo 83b alter 0b annex 0, ~{ 1 } -{ }, 0 gb} 2025-05-07T08:55:23.048201Z node 58 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:9} Tx{25, NKikimr::NHive::TTxStartTablet} release 4194304b of static, Memory{0 dyn 0} 2025-05-07T08:55:23.059829Z node 58 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [9eafe310d4d96c0d] bootstrap ActorId# [58:459:2361] Group# 0 BlobCount# 1 BlobIDs# [[72057594037927937:2:9:0:0:92:0]] HandleClass# TabletLog Tactic# MinLatency RestartCounter# 0 Marker# BPP13 2025-05-07T08:55:23.060033Z node 58 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [9eafe310d4d96c0d] Id# [72057594037927937:2:9:0:0:92:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-05-07T08:55:23.060132Z node 58 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [9eafe310d4d96c0d] restore Id# [72057594037927937:2:9:0:0:92:0] optimisticReplicas# 1 optimisticState# EBS_FULL Marker# BPG55 2025-05-07T08:55:23.060234Z node 58 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [9eafe310d4d96c0d] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72057594037927937:2:9:0:0:92:1] Marker# BPG33 2025-05-07T08:55:23.060308Z node 58 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [9eafe310d4d96c0d] Sending missing VPut part# 0 to# 0 blob Id# [72057594037927937:2:9:0:0:92:1] Marker# BPG32 2025-05-07T08:55:23.060486Z node 58 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [58:58:2081] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037927937:2:9:0:0:92:1] FDS# 92 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-05-07T08:55:23.062815Z node 58 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [9eafe310d4d96c0d] received {EvVPutResult Status# OK ID# [72057594037927937:2:9:0:0:92:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 24 } Cost# 80724 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 25 }}}} from# [0:1:0:0:0] Marker# BPP01 2025-05-07T08:55:23.062981Z node 58 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [9eafe310d4d96c0d] Result# TEvPutResult {Id# [72057594037927937:2:9:0:0:92:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} GroupId# 0 Marker# BPP12 2025-05-07T08:55:23.063079Z node 58 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [9eafe310d4d96c0d] SendReply putResult# TEvPutResult {Id# [72057594037927937:2:9:0:0:92:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-05-07T08:55:23.063296Z node 58 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 0 HandleClass# TabletLog Tactic# MinLatency History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.918 sample PartId# [72057594037927937:2:9:0:0:92:1] QueryCount# 1 VDiskId# [0:1:0:0:0] NodeId# 58 } TEvVPutResult{ TimestampMs# 3.269 VDiskId# [0:1:0:0:0] NodeId# 58 Status# OK } ] } 2025-05-07T08:55:23.063505Z node 58 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594037927937:2:9:0:0:92:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} 2025-05-07T08:55:23.063681Z node 58 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:10} commited cookie 1 for step 9 |90.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/mind/bscontroller/ut/ydb-core-mind-bscontroller-ut |90.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/mind/bscontroller/ut/ydb-core-mind-bscontroller-ut |90.4%| [LD] {RESULT} $(B)/ydb/core/mind/bscontroller/ut/ydb-core-mind-bscontroller-ut >> AsyncIndexChangeCollector::UpsertManyRows [GOOD] >> AsyncIndexChangeCollector::MultiIndexedTableUpdateOneIndexedColumn >> KqpWorkloadService::TestQueryCancelAfterUnlimitedPool [GOOD] >> KqpWorkloadService::TestStartQueryAfterCancel >> AsyncIndexChangeCollector::CoveredIndexUpsert [GOOD] >> AsyncIndexChangeCollector::AllColumnsInPk |90.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_subdomain/ydb-core-tx-schemeshard-ut_subdomain |90.4%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_subdomain/ydb-core-tx-schemeshard-ut_subdomain |90.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_subdomain/ydb-core-tx-schemeshard-ut_subdomain |90.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_external_blobs/ydb-core-tx-datashard-ut_external_blobs |90.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_external_blobs/ydb-core-tx-datashard-ut_external_blobs |90.4%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_external_blobs/ydb-core-tx-datashard-ut_external_blobs >> TPartBtreeIndexIteration::OneNode_History_Slices [GOOD] >> TPartBtreeIndexIteration::OneNode_Groups_History_Slices >> SystemView::PartitionStatsOneSchemeShard [GOOD] >> SystemView::PartitionStatsOneSchemeShardDataQuery >> AsyncIndexChangeCollector::InsertSingleRow >> AsyncIndexChangeCollector::DeleteNothing >> CdcStreamChangeCollector::UpsertIntoTwoStreams [GOOD] >> CdcStreamChangeCollector::PageFaults >> ReadLoad::ShouldReadKqp |90.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/replication/controller/ut_dst_creator/ydb-core-tx-replication-controller-ut_dst_creator |90.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/controller/ut_dst_creator/ydb-core-tx-replication-controller-ut_dst_creator |90.4%| [LD] {RESULT} $(B)/ydb/core/tx/replication/controller/ut_dst_creator/ydb-core-tx-replication-controller-ut_dst_creator >> TSchemeshardCompactionQueueTest::ShouldNotEnqueueSinglePartedShardWithMemData [GOOD] >> TSchemeshardCompactionQueueTest::ShouldPopWhenOnlyLastCompactionQueue [GOOD] |90.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_compaction/unittest >> TSchemeshardCompactionQueueTest::ShouldPopWhenOnlyLastCompactionQueue [GOOD] >> SystemView::CollectScanQueries [GOOD] >> SystemView::AuthUsers >> KqpWorkloadServiceTables::TestPoolStateFetcherActor [GOOD] >> KqpWorkloadServiceTables::TestCleanupOnServiceRestart >> THiveTest::TestHiveBalancerDifferentResources2 [GOOD] >> THiveTest::TestHiveBalancerUselessNeighbourMoves >> TVersions::WreckHead [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-69 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-70 >> TVersions::WreckHeadReverse >> StoragePool::TestDistributionRandomProbability [GOOD] >> StoragePool::TestDistributionRandomProbabilityWithOverflow [GOOD] >> StoragePool::TestDistributionExactMin >> KqpPg::TempTablesDrop [FAIL] >> KqpPg::TempTablesWithCache |90.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream/ydb-core-tx-schemeshard-ut_cdc_stream |90.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream/ydb-core-tx-schemeshard-ut_cdc_stream |90.4%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream/ydb-core-tx-schemeshard-ut_cdc_stream >> BuildStatsHistogram::Single_History [GOOD] >> BuildStatsHistogram::Single_History_Slices >> AsyncIndexChangeCollector::MultiIndexedTableUpdateOneIndexedColumn [GOOD] >> AsyncIndexChangeCollector::MultiIndexedTableReplaceSingleRow >> TExecutorDb::FullScan [GOOD] >> TExecutorDb::CoordinatorSimulation >> AsyncIndexChangeCollector::DeleteNothing [GOOD] >> AsyncIndexChangeCollector::DeleteSingleRow >> TChargeBTreeIndex::FewNodes_Groups_History_Sticky [GOOD] >> TClockProCache::Lifecycle [GOOD] >> TClockProCache::EvictNext [GOOD] >> TClockProCache::Erase [GOOD] >> TClockProCache::Random [GOOD] >> NFwd_TFlatIndexCache::Skip_Wait [GOOD] >> NFwd_TFlatIndexCache::Trace [GOOD] >> NFwd_TFlatIndexCache::Slices [GOOD] >> NFwd_TLoadedPagesCircularBuffer::Basics [GOOD] >> NOther::Blocks [GOOD] >> NPage::Encoded [GOOD] >> NPage::ABI_002 >> AsyncIndexChangeCollector::AllColumnsInPk [GOOD] >> AsyncIndexChangeCollector::CoverIndexedColumn >> AsyncIndexChangeCollector::InsertSingleRow [GOOD] >> AsyncIndexChangeCollector::InsertManyRows >> NPage::ABI_002 [GOOD] >> NPage::GroupIdEncoding [GOOD] >> NPageCollection::Align [GOOD] >> NPageCollection::Meta >> ResourcePoolsDdl::TestPoolSwitchToLimitedState [GOOD] >> ResourcePoolsDdl::TestDropResourcePool >> NPageCollection::Meta [GOOD] >> NPageCollection::PagesToBlobsConverter [GOOD] >> NPageCollection::Grow [GOOD] >> NPageCollection::Groups [GOOD] >> NPageCollection::Chop [GOOD] >> NPageCollection::CookieAllocator [GOOD] >> NProto::LargeGlobId [GOOD] >> Redo::ABI_008 >> Redo::ABI_008 [GOOD] >> Self::Literals [GOOD] >> THiveTest::TestHiveBalancerUselessNeighbourMoves [GOOD] >> THiveTest::TestHiveBalancerWithImmovableTablets >> TPartBtreeIndexIteration::OneNode_Groups_History_Slices [GOOD] >> TPartBtreeIndexIteration::FewNodes >> ResourcePoolsSysView::TestResourcePoolsSysViewOnServerless [GOOD] >> ResourcePoolsSysView::TestResourcePoolsSysViewFilters >> StoragePool::TestDistributionExactMin [GOOD] >> StoragePool::TestDistributionExactMinWithOverflow [GOOD] >> StoragePool::TestDistributionRandomMin7p ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut/unittest >> Self::Literals [GOOD] Test command err: + BTreeIndex{PageId: 0 RowCount: 1155 DataSize: 11055 GroupDataSize: 22055 ErasedRowCount: 385, 13 rev 1, 683b} | PageId: 10000 RowCount: 100 DataSize: 1000 GroupDataSize: 2000 ErasedRowCount: 30 | > {0, a, false, 0} | PageId: 10001 RowCount: 201 DataSize: 2001 GroupDataSize: 4001 ErasedRowCount: 61 | > {1, b, true, 10} | PageId: 10002 RowCount: 303 DataSize: 3003 GroupDataSize: 6003 ErasedRowCount: 93 | > {2, c, false, 20} | PageId: 10003 RowCount: 406 DataSize: 4006 GroupDataSize: 8006 ErasedRowCount: 126 | > {3, d, true, 30} | PageId: 10004 RowCount: 510 DataSize: 5010 GroupDataSize: 10010 ErasedRowCount: 160 | > {4, e, false, 40} | PageId: 10005 RowCount: 615 DataSize: 6015 GroupDataSize: 12015 ErasedRowCount: 195 | > {5, f, true, 50} | PageId: 10006 RowCount: 721 DataSize: 7021 GroupDataSize: 14021 ErasedRowCount: 231 | > {6, g, false, 60} | PageId: 10007 RowCount: 828 DataSize: 8028 GroupDataSize: 16028 ErasedRowCount: 268 | > {7, h, true, 70} | PageId: 10008 RowCount: 936 DataSize: 9036 GroupDataSize: 18036 ErasedRowCount: 306 | > {8, i, false, 80} | PageId: 10009 RowCount: 1045 DataSize: 10045 GroupDataSize: 20045 ErasedRowCount: 345 | > {9, j, true, 90} | PageId: 10010 RowCount: 1155 DataSize: 11055 GroupDataSize: 22055 ErasedRowCount: 385 + BTreeIndex{PageId: 9 RowCount: 2310 DataSize: 21210 GroupDataSize: 42210 ErasedRowCount: 840, 13 rev 1, 116b} | + BTreeIndex{PageId: 5 RowCount: 936 DataSize: 9036 GroupDataSize: 18036 ErasedRowCount: 306, 13 rev 1, 179b} | | + BTreeIndex{PageId: 0 RowCount: 303 DataSize: 3003 GroupDataSize: 6003 ErasedRowCount: 93, 13 rev 1, 179b} | | | PageId: 10000 RowCount: 100 DataSize: 1000 GroupDataSize: 2000 ErasedRowCount: 30 | | | > {0, a, false, 0} | | | PageId: 10001 RowCount: 201 DataSize: 2001 GroupDataSize: 4001 ErasedRowCount: 61 | | | > {1, b, true, 10} | | | PageId: 10002 RowCount: 303 DataSize: 3003 GroupDataSize: 6003 ErasedRowCount: 93 | | > {2, c, false, 20} | | + BTreeIndex{PageId: 1 RowCount: 615 DataSize: 6015 GroupDataSize: 12015 ErasedRowCount: 195, 13 rev 1, 179b} | | | PageId: 10003 RowCount: 406 DataSize: 4006 GroupDataSize: 8006 ErasedRowCount: 126 | | | > {3, d, true, 30} | | | PageId: 10004 RowCount: 510 DataSize: 5010 GroupDataSize: 10010 ErasedRowCount: 160 | | | > {4, e, false, 40} | | | PageId: 10005 RowCount: 615 DataSize: 6015 GroupDataSize: 12015 ErasedRowCount: 195 | | > {5, f, true, 50} | | + BTreeIndex{PageId: 2 RowCount: 936 DataSize: 9036 GroupDataSize: 18036 ErasedRowCount: 306, 13 rev 1, 179b} | | | PageId: 10006 RowCount: 721 DataSize: 7021 GroupDataSize: 14021 ErasedRowCount: 231 | | | > {6, g, false, 60} | | | PageId: 10007 RowCount: 828 DataSize: 8028 GroupDataSize: 16028 ErasedRowCount: 268 | | | > {7, h, true, 70} | | | PageId: 10008 RowCount: 936 DataSize: 9036 GroupDataSize: 18036 ErasedRowCount: 306 | > {8, i, false, 80} | + BTreeIndex{PageId: 8 RowCount: 2310 DataSize: 21210 GroupDataSize: 42210 ErasedRowCount: 840, 13 rev 1, 242b} | | + BTreeIndex{PageId: 3 RowCount: 1266 DataSize: 12066 GroupDataSize: 24066 ErasedRowCount: 426, 13 rev 1, 179b} | | | PageId: 10009 RowCount: 1045 DataSize: 10045 GroupDataSize: 20045 ErasedRowCount: 345 | | | > {9, j, true, 90} | | | PageId: 10010 RowCount: 1155 DataSize: 11055 GroupDataSize: 22055 ErasedRowCount: 385 | | | > {10, k, false, 100} | | | PageId: 10011 RowCount: 1266 DataSize: 12066 GroupDataSize: 24066 ErasedRowCount: 426 | | > {11, l, true, 110} | | + BTreeIndex{PageId: 4 RowCount: 1605 DataSize: 15105 GroupDataSize: 30105 ErasedRowCount: 555, 13 rev 1, 179b} | | | PageId: 10012 RowCount: 1378 DataSize: 13078 GroupDataSize: 26078 ErasedRowCount: 468 | | | > {12, m, false, 120} | | | PageId: 10013 RowCount: 1491 DataSize: 14091 GroupDataSize: 28091 ErasedRowCount: 511 | | | > {13, n, true, 130} | | | PageId: 10014 RowCount: 1605 DataSize: 15105 GroupDataSize: 30105 ErasedRowCount: 555 | | > {14, o, false, 140} | | + BTreeIndex{PageId: 6 RowCount: 1953 DataSize: 18153 GroupDataSize: 36153 ErasedRowCount: 693, 13 rev 1, 179b} | | | PageId: 10015 RowCount: 1720 DataSize: 16120 GroupDataSize: 32120 ErasedRowCount: 600 | | | > {15, p, true, 150} | | | PageId: 10016 RowCount: 1836 DataSize: 17136 GroupDataSize: 34136 ErasedRowCount: 646 | | | > {16, q, false, 160} | | | PageId: 10017 RowCount: 1953 DataSize: 18153 GroupDataSize: 36153 ErasedRowCount: 693 | | > {17, r, true, 170} | | + BTreeIndex{PageId: 7 RowCount: 2310 DataSize: 21210 GroupDataSize: 42210 ErasedRowCount: 840, 13 rev 1, 179b} | | | PageId: 10018 RowCount: 2071 DataSize: 19171 GroupDataSize: 38171 ErasedRowCount: 741 | | | > {18, s, false, 180} | | | PageId: 10019 RowCount: 2190 DataSize: 20190 GroupDataSize: 40190 ErasedRowCount: 790 | | | > {19, t, true, 190} | | | PageId: 10020 RowCount: 2310 DataSize: 21210 GroupDataSize: 42210 ErasedRowCount: 840 + BTreeIndex{PageId: 15 RowCount: 15150 DataSize: 106050 GroupDataSize: 207050 ErasedRowCount: 8080, 13 rev 1, 174b} | + BTreeIndex{PageId: 12 RowCount: 9078 DataSize: 70278 GroupDataSize: 138278 ErasedRowCount: 4318, 13 rev 1, 690b} | | + BTreeIndex{PageId: 0 RowCount: 1266 DataSize: 12066 GroupDataSize: 24066 ErasedRowCount: 426, 13 rev 1, 702b} | | | PageId: 10000 RowCount: 100 DataSize: 1000 GroupDataSize: 2000 ErasedRowCount: 30 | | | > {0, x, NULL, NULL} | | | PageId: 10001 RowCount: 201 DataSize: 2001 GroupDataSize: 4001 ErasedRowCount: 61 | | | > {1, xx, NULL, NULL} | | | PageId: 10002 RowCount: 303 DataSize: 3003 GroupDataSize: 6003 ErasedRowCount: 93 | | | > {2, xxx, NULL, NULL} | | | PageId: 10003 RowCount: 406 DataSize: 4006 GroupDataSize: 8006 ErasedRowCount: 126 | | | > {3, xxxx, NULL, NULL} | | | PageId: 10004 RowCount: 510 DataSize: 5010 GroupDataSize: 10010 ErasedRowCount: 160 | | | > {4, xxxxx, NULL, NULL} | | | PageId: 10005 RowCount: 615 DataSize: 6015 GroupDataSize: 12015 ErasedRowCount: 195 | | | > {5, xxxxxx, NULL, NULL} | | | PageId: 10006 RowCount: 721 DataSize: 7021 GroupDataSize: 14021 ErasedRowCount: 231 | | | > {6, xxxxxxx, NULL, NULL} | | | PageId: 10007 RowCount: 828 DataSize: 8028 GroupDataSize: 16028 ErasedRowCount: 268 | | | > {7, xxxxxxxx, NULL, NULL} | | | PageId: 10008 RowCount: 936 DataSize: 9036 GroupDataSize: 18036 ErasedRowCount: 306 | | | > {8, xxxxxxxxx, NULL, NULL} | | | PageId: 10009 RowCount: 1045 DataSize: 10045 GroupDataSize: 20045 ErasedRowCount: 345 | | | > {9, xxxxxxxxxx, NULL, NULL} | | | PageId: 10010 RowCount: 1155 DataSize: 11055 GroupDataSize: 22055 ErasedRowCount: 385 | | | > {10, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10011 RowCount: 1266 DataSize: 12066 GroupDataSize: 24066 ErasedRowCount: 426 | | > {11, xxxxxxxxxx.., NULL, NULL} | | + BTreeIndex{PageId: 1 RowCount: 2431 DataSize: 22231 GroupDataSize: 44231 ErasedRowCount: 891, 13 rev 1, 683b} | | | PageId: 10012 RowCount: 1378 DataSize: 13078 GroupDataSize: 26078 ErasedRowCount: 468 | | | > {12, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10013 RowCount: 1491 DataSize: 14091 GroupDataSize: 28091 ErasedRowCount: 511 | | | > {13, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10014 RowCount: 1605 DataSize: 15105 GroupDataSize: 30105 ErasedRowCount: 555 | | | > {14, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10015 RowCount: 1720 DataSize: 16120 GroupDataSize: 32120 ErasedRowCount: 600 | | | > {15, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10016 RowCount: 1836 DataSize: 17136 GroupDataSize: 34136 ErasedRowCount: 646 | | | > {16, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10017 RowCount: 1953 DataSize: 18153 GroupDataSize: 36153 ErasedRowCount: 693 | | | > {17, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10018 RowCount: 2071 DataSize: 19171 GroupDataSize: 38171 ErasedRowCount: 741 | | | > {18, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10019 RowCount: 2190 DataSize: 20190 GroupDataSize: 40190 ErasedRowCount: 790 | | | > {19, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10020 RowCount: 2310 DataSize: 21210 GroupDataSize: 42210 ErasedRowCount: 840 | | | > {20, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10021 RowCount: 2431 DataSize: 22231 GroupDataSize: 44231 ErasedRowCount: 891 | | > {21, xxxxxxxxxx.., NULL, NULL} | | + BTreeIndex{PageId: 2 RowCount: 3565 DataSize: 31465 GroupDataSize: 62465 ErasedRowCount: 1395, 13 rev 1, 689b} | | | PageId: 10022 RowCount: 2553 DataSize: 23253 GroupDataSize: 46253 ErasedRowCount: 943 | | | > {22, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10023 RowCount: 2676 DataSize: 24276 GroupDataSize: 48276 ErasedRowCount: 996 | | | > {23, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10024 RowCount: 2800 DataSize: 25300 GroupDataSize: 50300 ErasedRowCount: 1050 | | | > {24, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10025 RowCount: 2925 DataSize: 26325 GroupDataSize: 52325 ErasedRowCount: 1105 | | | > {25, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10026 RowCount: 3051 DataSize: 27351 GroupDataSize: 54351 ErasedRowCount: 1161 | | | > {26, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10027 RowCount: 3178 DataSize: 28378 GroupDataSize: 56378 ErasedRowCount: 1218 | | | > {27, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10028 RowCount: 3306 DataSize: 29406 GroupDataSize: 58406 ErasedRowCount: 1276 | | | > {28, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10029 RowCount: 3435 DataSize: 30435 GroupDataSize: 60435 ErasedRowCount: 1335 | | | > {29, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10030 RowCount: 3565 DataSize: 31465 GroupDataSize: 62465 ErasedRowCount: 1395 | | > {30, xxxxxxxxxx.., NULL, NULL} | | + BTreeIndex{PageId: 3 RowCount: 4641 DataSize: 39741 GroupDataSize: 78741 ErasedRowCount: 1911, 13 rev 1, 669b} | | | PageId: 10031 RowCount: 3696 DataSize: 32496 GroupDataSize: 64496 ErasedRowCount: 1456 | | | > {31, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10032 RowCount: 3828 DataSize: 33528 GroupDataSize: 66528 ErasedRowCount: 1518 | | | > {32, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10033 RowCount: 3961 DataSize: 34561 GroupDataSize: 68561 ErasedRowCount: 1581 | | | > {33, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10034 RowCount: 4095 DataSize: 35595 GroupDataSize: 70595 ErasedRowCount: 1645 | | | > {34, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10035 RowCount: 4230 DataSize: 36630 GroupDataSize: 72630 ErasedRowCount: 1710 | | | > {35, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10036 RowCount: 4366 DataSize: 37666 GroupDataSize: 74666 ErasedRowCount: 1776 | | | > {36, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10037 RowCount: 4503 DataSize: 38703 GroupDataSize: 76703 ErasedRowCount: 1843 | | | > {37, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10038 RowCount: 4641 DataSize: 39741 GroupDataSize: 78741 ErasedRowCount: 1911 | | > {38, xxxxxxxxxx.., NULL, NULL} | | + BTreeIndex{PageId: 4 RowCount: 5781 DataSize: 48081 GroupDataSize: 95081 ErasedRowCount: 2491, 13 rev 1, 725b} | | | PageId: 10039 RowCount: 4780 DataSize: 40780 GroupDataSize: 80780 ErasedRowCount: 1980 | | | > {39, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10040 RowCount: 4920 DataSize: 41820 GroupDataSize: 82820 ErasedRowCount: 2050 | | | > {40, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10041 RowCount: 5061 DataSize: 42861 GroupDataSize: 84861 ErasedRowCount: 2121 | | | > {41, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10042 RowCount: 5203 DataSize: 43903 GroupDataSize: 86903 ErasedRowCount: 2193 | | | > {42, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10043 RowCount: 5346 DataSize: 44946 GroupDataSize: 88946 ErasedRowCount: 2266 | | | > {43, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10044 RowCount: 5490 DataSize: 45990 GroupDataSize: 90990 ErasedRowCount: 2340 | | | > {44, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10045 RowCount: 5635 DataSize: 47035 GroupDataSize: 93035 ErasedRowCount: 2415 | | | > {45, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10046 RowCount: 5781 DataSize: 48081 GroupDataSize: 95081 ErasedRowCount: 2491 | | > {46, xxxxxxxxxx.., NULL, NULL} | | + BTreeIndex{PageId: 5 RowCount: 6831 DataSize: 55431 GroupDataSize: 109431 ErasedRowCount: 3051, 13 ... } | | | PageId: 67 RowCount: 34 DataSize: 1202 GroupDataSize: 7632 ErasedRowCount: 0 | | | > {4, 10} | | | PageId: 70 RowCount: 36 DataSize: 1284 GroupDataSize: 8163 ErasedRowCount: 0 | | | > {5, 3} | | | PageId: 82 RowCount: 38 DataSize: 1350 GroupDataSize: 8602 ErasedRowCount: 0 | | | > {5, 6} | | | PageId: 87 RowCount: 40 DataSize: 1416 GroupDataSize: 9358 ErasedRowCount: 0 + Rows{0} Label{04 rev 1, 66b}, [0, +2)row | ERowOp 1: {0, 1} | ERowOp 1: {0, 3} + Rows{2} Label{24 rev 1, 66b}, [2, +2)row | ERowOp 1: {0, 4} | ERowOp 1: {0, 6} + Rows{4} Label{44 rev 1, 82b}, [4, +2)row | ERowOp 1: {0, 7} | ERowOp 1: {0, 8} + Rows{8} Label{84 rev 1, 66b}, [6, +2)row | ERowOp 1: {0, 10} | ERowOp 1: {1, 1} + Rows{11} Label{114 rev 1, 66b}, [8, +2)row | ERowOp 1: {1, 3} | ERowOp 1: {1, 4} + Rows{14} Label{144 rev 1, 82b}, [10, +2)row | ERowOp 1: {1, 6} | ERowOp 1: {1, 7} + Rows{20} Label{204 rev 1, 66b}, [12, +2)row | ERowOp 1: {1, 8} | ERowOp 1: {1, 10} + Rows{23} Label{234 rev 1, 66b}, [14, +2)row | ERowOp 1: {2, 1} | ERowOp 1: {2, 3} + Rows{26} Label{264 rev 1, 82b}, [16, +2)row | ERowOp 1: {2, 4} | ERowOp 1: {2, 6} + Rows{36} Label{364 rev 1, 66b}, [18, +2)row | ERowOp 1: {2, 7} | ERowOp 1: {2, 8} + Rows{39} Label{394 rev 1, 66b}, [20, +2)row | ERowOp 1: {2, 10} | ERowOp 1: {3, 1} + Rows{42} Label{424 rev 1, 82b}, [22, +2)row | ERowOp 1: {3, 3} | ERowOp 1: {3, 4} + Rows{48} Label{484 rev 1, 66b}, [24, +2)row | ERowOp 1: {3, 6} | ERowOp 1: {3, 7} + Rows{53} Label{534 rev 1, 66b}, [26, +2)row | ERowOp 1: {3, 8} | ERowOp 1: {3, 10} + Rows{58} Label{584 rev 1, 82b}, [28, +2)row | ERowOp 1: {4, 1} | ERowOp 1: {4, 3} + Rows{64} Label{644 rev 1, 66b}, [30, +2)row | ERowOp 1: {4, 4} | ERowOp 1: {4, 6} + Rows{67} Label{674 rev 1, 66b}, [32, +2)row | ERowOp 1: {4, 7} | ERowOp 1: {4, 8} + Rows{70} Label{704 rev 1, 82b}, [34, +2)row | ERowOp 1: {4, 10} | ERowOp 1: {5, 1} + Rows{82} Label{824 rev 1, 66b}, [36, +2)row | ERowOp 1: {5, 3} | ERowOp 1: {5, 4} + Rows{87} Label{874 rev 1, 66b}, [38, +2)row | ERowOp 1: {5, 6} | ERowOp 1: {5, 7} 0.29205 Part{[1:2:3:0:0:0:0] eph 0, 1000b 40r} data 1479b + FlatIndex{20} Label{3 rev 3, 453b} 21 rec | Page Row Bytes (Uint32) | 0 0 50b {0} | 1 2 50b {2} | 2 4 50b {4} | 3 6 50b {6} | 4 8 50b {8} | 5 10 50b {10} | 6 12 50b {12} | 7 14 50b {14} | 8 16 50b {16} | 9 18 50b {18} | 10 20 50b {20} | 11 22 50b {22} | 12 24 50b {24} | 13 26 50b {26} | 14 28 50b {28} | 15 30 50b {30} | 16 32 50b {32} | 17 34 50b {34} | 18 36 50b {36} | 19 38 50b {38} | 19 39 50b {39} + Rows{0} Label{04 rev 1, 50b}, [0, +2)row | ERowOp 1: {0} {Set 1 Uint32 : 0} | ERowOp 1: {1} {Set 1 Uint32 : 100} + Rows{1} Label{14 rev 1, 50b}, [2, +2)row | ERowOp 1: {2} {Set 1 Uint32 : 200} | ERowOp 1: {3} {Set 1 Uint32 : 300} + Rows{2} Label{24 rev 1, 50b}, [4, +2)row | ERowOp 1: {4} {Set 1 Uint32 : 400} | ERowOp 1: {5} {Set 1 Uint32 : 500} + Rows{3} Label{34 rev 1, 50b}, [6, +2)row | ERowOp 1: {6} {Set 1 Uint32 : 600} | ERowOp 1: {7} {Set 1 Uint32 : 700} + Rows{4} Label{44 rev 1, 50b}, [8, +2)row | ERowOp 1: {8} {Set 1 Uint32 : 800} | ERowOp 1: {9} {Set 1 Uint32 : 900} + Rows{5} Label{54 rev 1, 50b}, [10, +2)row | ERowOp 1: {10} {Set 1 Uint32 : 1000} | ERowOp 1: {11} {Set 1 Uint32 : 1100} + Rows{6} Label{64 rev 1, 50b}, [12, +2)row | ERowOp 1: {12} {Set 1 Uint32 : 1200} | ERowOp 1: {13} {Set 1 Uint32 : 1300} + Rows{7} Label{74 rev 1, 50b}, [14, +2)row | ERowOp 1: {14} {Set 1 Uint32 : 1400} | ERowOp 1: {15} {Set 1 Uint32 : 1500} + Rows{8} Label{84 rev 1, 50b}, [16, +2)row | ERowOp 1: {16} {Set 1 Uint32 : 1600} | ERowOp 1: {17} {Set 1 Uint32 : 1700} + Rows{9} Label{94 rev 1, 50b}, [18, +2)row | ERowOp 1: {18} {Set 1 Uint32 : 1800} | ERowOp 1: {19} {Set 1 Uint32 : 1900} + Rows{10} Label{104 rev 1, 50b}, [20, +2)row | ERowOp 1: {20} {Set 1 Uint32 : 2000} | ERowOp 1: {21} {Set 1 Uint32 : 2100} + Rows{11} Label{114 rev 1, 50b}, [22, +2)row | ERowOp 1: {22} {Set 1 Uint32 : 2200} | ERowOp 1: {23} {Set 1 Uint32 : 2300} + Rows{12} Label{124 rev 1, 50b}, [24, +2)row | ERowOp 1: {24} {Set 1 Uint32 : 2400} | ERowOp 1: {25} {Set 1 Uint32 : 2500} + Rows{13} Label{134 rev 1, 50b}, [26, +2)row | ERowOp 1: {26} {Set 1 Uint32 : 2600} | ERowOp 1: {27} {Set 1 Uint32 : 2700} + Rows{14} Label{144 rev 1, 50b}, [28, +2)row | ERowOp 1: {28} {Set 1 Uint32 : 2800} | ERowOp 1: {29} {Set 1 Uint32 : 2900} + Rows{15} Label{154 rev 1, 50b}, [30, +2)row | ERowOp 1: {30} {Set 1 Uint32 : 3000} | ERowOp 1: {31} {Set 1 Uint32 : 3100} + Rows{16} Label{164 rev 1, 50b}, [32, +2)row | ERowOp 1: {32} {Set 1 Uint32 : 3200} | ERowOp 1: {33} {Set 1 Uint32 : 3300} + Rows{17} Label{174 rev 1, 50b}, [34, +2)row | ERowOp 1: {34} {Set 1 Uint32 : 3400} | ERowOp 1: {35} {Set 1 Uint32 : 3500} + Rows{18} Label{184 rev 1, 50b}, [36, +2)row | ERowOp 1: {36} {Set 1 Uint32 : 3600} | ERowOp 1: {37} {Set 1 Uint32 : 3700} + Rows{19} Label{194 rev 1, 50b}, [38, +2)row | ERowOp 1: {38} {Set 1 Uint32 : 3800} | ERowOp 1: {39} {Set 1 Uint32 : 3900} Part{[1:2:3:0:0:0:0] eph 0, 1000b 40r} data 1479b + FlatIndex{20} Label{3 rev 3, 453b} 21 rec | Page Row Bytes (Uint32) | 0 0 50b {0} | 1 2 50b {2} | 2 4 50b {4} | 3 6 50b {6} | 4 8 50b {8} | 5 10 50b {10} | 6 12 50b {12} | 7 14 50b {14} | 8 16 50b {16} | 9 18 50b {18} | 10 20 50b {20} | 11 22 50b {22} | 12 24 50b {24} | 13 26 50b {26} | 14 28 50b {28} | 15 30 50b {30} | 16 32 50b {32} | 17 34 50b {34} | 18 36 50b {36} | 19 38 50b {38} | 19 39 50b {39} + Rows{0} Label{04 rev 1, 50b}, [0, +2)row | ERowOp 1: {0} {Set 1 Uint32 : 0} | ERowOp 1: {1} {Set 1 Uint32 : 100} + Rows{1} Label{14 rev 1, 50b}, [2, +2)row | ERowOp 1: {2} {Set 1 Uint32 : 200} | ERowOp 1: {3} {Set 1 Uint32 : 300} + Rows{2} Label{24 rev 1, 50b}, [4, +2)row | ERowOp 1: {4} {Set 1 Uint32 : 400} | ERowOp 1: {5} {Set 1 Uint32 : 500} + Rows{3} Label{34 rev 1, 50b}, [6, +2)row | ERowOp 1: {6} {Set 1 Uint32 : 600} | ERowOp 1: {7} {Set 1 Uint32 : 700} + Rows{4} Label{44 rev 1, 50b}, [8, +2)row | ERowOp 1: {8} {Set 1 Uint32 : 800} | ERowOp 1: {9} {Set 1 Uint32 : 900} + Rows{5} Label{54 rev 1, 50b}, [10, +2)row | ERowOp 1: {10} {Set 1 Uint32 : 1000} | ERowOp 1: {11} {Set 1 Uint32 : 1100} + Rows{6} Label{64 rev 1, 50b}, [12, +2)row | ERowOp 1: {12} {Set 1 Uint32 : 1200} | ERowOp 1: {13} {Set 1 Uint32 : 1300} + Rows{7} Label{74 rev 1, 50b}, [14, +2)row | ERowOp 1: {14} {Set 1 Uint32 : 1400} | ERowOp 1: {15} {Set 1 Uint32 : 1500} + Rows{8} Label{84 rev 1, 50b}, [16, +2)row | ERowOp 1: {16} {Set 1 Uint32 : 1600} | ERowOp 1: {17} {Set 1 Uint32 : 1700} + Rows{9} Label{94 rev 1, 50b}, [18, +2)row | ERowOp 1: {18} {Set 1 Uint32 : 1800} | ERowOp 1: {19} {Set 1 Uint32 : 1900} + Rows{10} Label{104 rev 1, 50b}, [20, +2)row | ERowOp 1: {20} {Set 1 Uint32 : 2000} | ERowOp 1: {21} {Set 1 Uint32 : 2100} + Rows{11} Label{114 rev 1, 50b}, [22, +2)row | ERowOp 1: {22} {Set 1 Uint32 : 2200} | ERowOp 1: {23} {Set 1 Uint32 : 2300} + Rows{12} Label{124 rev 1, 50b}, [24, +2)row | ERowOp 1: {24} {Set 1 Uint32 : 2400} | ERowOp 1: {25} {Set 1 Uint32 : 2500} + Rows{13} Label{134 rev 1, 50b}, [26, +2)row | ERowOp 1: {26} {Set 1 Uint32 : 2600} | ERowOp 1: {27} {Set 1 Uint32 : 2700} + Rows{14} Label{144 rev 1, 50b}, [28, +2)row | ERowOp 1: {28} {Set 1 Uint32 : 2800} | ERowOp 1: {29} {Set 1 Uint32 : 2900} + Rows{15} Label{154 rev 1, 50b}, [30, +2)row | ERowOp 1: {30} {Set 1 Uint32 : 3000} | ERowOp 1: {31} {Set 1 Uint32 : 3100} + Rows{16} Label{164 rev 1, 50b}, [32, +2)row | ERowOp 1: {32} {Set 1 Uint32 : 3200} | ERowOp 1: {33} {Set 1 Uint32 : 3300} + Rows{17} Label{174 rev 1, 50b}, [34, +2)row | ERowOp 1: {34} {Set 1 Uint32 : 3400} | ERowOp 1: {35} {Set 1 Uint32 : 3500} + Rows{18} Label{184 rev 1, 50b}, [36, +2)row | ERowOp 1: {36} {Set 1 Uint32 : 3600} | ERowOp 1: {37} {Set 1 Uint32 : 3700} + Rows{19} Label{194 rev 1, 50b}, [38, +2)row | ERowOp 1: {38} {Set 1 Uint32 : 3800} | ERowOp 1: {39} {Set 1 Uint32 : 3900} Part{[1:2:3:0:0:0:0] eph 0, 1000b 40r} data 1479b + FlatIndex{20} Label{3 rev 3, 453b} 21 rec | Page Row Bytes (Uint32) | 0 0 50b {0} | 1 2 50b {2} | 2 4 50b {4} | 3 6 50b {6} | 4 8 50b {8} | 5 10 50b {10} | 6 12 50b {12} | 7 14 50b {14} | 8 16 50b {16} | 9 18 50b {18} | 10 20 50b {20} | 11 22 50b {22} | 12 24 50b {24} | 13 26 50b {26} | 14 28 50b {28} | 15 30 50b {30} | 16 32 50b {32} | 17 34 50b {34} | 18 36 50b {36} | 19 38 50b {38} | 19 39 50b {39} + Rows{0} Label{04 rev 1, 50b}, [0, +2)row | ERowOp 1: {0} {Set 1 Uint32 : 0} | ERowOp 1: {1} {Set 1 Uint32 : 100} + Rows{1} Label{14 rev 1, 50b}, [2, +2)row | ERowOp 1: {2} {Set 1 Uint32 : 200} | ERowOp 1: {3} {Set 1 Uint32 : 300} + Rows{2} Label{24 rev 1, 50b}, [4, +2)row | ERowOp 1: {4} {Set 1 Uint32 : 400} | ERowOp 1: {5} {Set 1 Uint32 : 500} + Rows{3} Label{34 rev 1, 50b}, [6, +2)row | ERowOp 1: {6} {Set 1 Uint32 : 600} | ERowOp 1: {7} {Set 1 Uint32 : 700} + Rows{4} Label{44 rev 1, 50b}, [8, +2)row | ERowOp 1: {8} {Set 1 Uint32 : 800} | ERowOp 1: {9} {Set 1 Uint32 : 900} + Rows{5} Label{54 rev 1, 50b}, [10, +2)row | ERowOp 1: {10} {Set 1 Uint32 : 1000} | ERowOp 1: {11} {Set 1 Uint32 : 1100} + Rows{6} Label{64 rev 1, 50b}, [12, +2)row | ERowOp 1: {12} {Set 1 Uint32 : 1200} | ERowOp 1: {13} {Set 1 Uint32 : 1300} + Rows{7} Label{74 rev 1, 50b}, [14, +2)row | ERowOp 1: {14} {Set 1 Uint32 : 1400} | ERowOp 1: {15} {Set 1 Uint32 : 1500} + Rows{8} Label{84 rev 1, 50b}, [16, +2)row | ERowOp 1: {16} {Set 1 Uint32 : 1600} | ERowOp 1: {17} {Set 1 Uint32 : 1700} + Rows{9} Label{94 rev 1, 50b}, [18, +2)row | ERowOp 1: {18} {Set 1 Uint32 : 1800} | ERowOp 1: {19} {Set 1 Uint32 : 1900} + Rows{10} Label{104 rev 1, 50b}, [20, +2)row | ERowOp 1: {20} {Set 1 Uint32 : 2000} | ERowOp 1: {21} {Set 1 Uint32 : 2100} + Rows{11} Label{114 rev 1, 50b}, [22, +2)row | ERowOp 1: {22} {Set 1 Uint32 : 2200} | ERowOp 1: {23} {Set 1 Uint32 : 2300} + Rows{12} Label{124 rev 1, 50b}, [24, +2)row | ERowOp 1: {24} {Set 1 Uint32 : 2400} | ERowOp 1: {25} {Set 1 Uint32 : 2500} + Rows{13} Label{134 rev 1, 50b}, [26, +2)row | ERowOp 1: {26} {Set 1 Uint32 : 2600} | ERowOp 1: {27} {Set 1 Uint32 : 2700} + Rows{14} Label{144 rev 1, 50b}, [28, +2)row | ERowOp 1: {28} {Set 1 Uint32 : 2800} | ERowOp 1: {29} {Set 1 Uint32 : 2900} + Rows{15} Label{154 rev 1, 50b}, [30, +2)row | ERowOp 1: {30} {Set 1 Uint32 : 3000} | ERowOp 1: {31} {Set 1 Uint32 : 3100} + Rows{16} Label{164 rev 1, 50b}, [32, +2)row | ERowOp 1: {32} {Set 1 Uint32 : 3200} | ERowOp 1: {33} {Set 1 Uint32 : 3300} + Rows{17} Label{174 rev 1, 50b}, [34, +2)row | ERowOp 1: {34} {Set 1 Uint32 : 3400} | ERowOp 1: {35} {Set 1 Uint32 : 3500} + Rows{18} Label{184 rev 1, 50b}, [36, +2)row | ERowOp 1: {36} {Set 1 Uint32 : 3600} | ERowOp 1: {37} {Set 1 Uint32 : 3700} + Rows{19} Label{194 rev 1, 50b}, [38, +2)row | ERowOp 1: {38} {Set 1 Uint32 : 3800} | ERowOp 1: {39} {Set 1 Uint32 : 3900} |90.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_continuous_backup/ydb-core-tx-schemeshard-ut_continuous_backup |90.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_continuous_backup/ydb-core-tx-schemeshard-ut_continuous_backup |90.4%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_continuous_backup/ydb-core-tx-schemeshard-ut_continuous_backup >> SystemView::TopPartitionsByCpuFields [GOOD] >> SystemView::TopPartitionsByCpuRanges |90.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_volatile/ydb-core-tx-datashard-ut_volatile |90.4%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_volatile/ydb-core-tx-datashard-ut_volatile |90.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_volatile/ydb-core-tx-datashard-ut_volatile >> TPartBtreeIndexIteration::FewNodes [GOOD] >> TPartBtreeIndexIteration::FewNodes_Groups >> KqpWorkloadServiceActors::TestCpuLoadActor [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-70 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-71 >> KqpPg::TempTablesWithCache [FAIL] >> KqpPg::TableDeleteWhere+useSink |90.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/tiering/ut/ydb-core-tx-tiering-ut >> AsyncIndexChangeCollector::DeleteSingleRow [GOOD] >> AsyncIndexChangeCollector::IndexedPrimaryKeyDeleteSingleRow |90.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/tiering/ut/ydb-core-tx-tiering-ut |90.4%| [LD] {RESULT} $(B)/ydb/core/tx/tiering/ut/ydb-core-tx-tiering-ut >> AsyncIndexChangeCollector::InsertManyRows [GOOD] >> AsyncIndexChangeCollector::MultiIndexedTableInsertSingleRow >> BuildStatsHistogram::Single_History_Slices [GOOD] >> BuildStatsHistogram::Ten_Mixed |90.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/replication/service/ut_json_change_record/tx-replication-service-ut_json_change_record |90.4%| [LD] {RESULT} $(B)/ydb/core/tx/replication/service/ut_json_change_record/tx-replication-service-ut_json_change_record |90.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/service/ut_json_change_record/tx-replication-service-ut_json_change_record >> AsyncIndexChangeCollector::MultiIndexedTableReplaceSingleRow [GOOD] >> AsyncIndexChangeCollector::CoverIndexedColumn [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/workload_service/ut/unittest >> KqpWorkloadServiceActors::TestCpuLoadActor [GOOD] Test command err: 2025-05-07T08:54:40.501942Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624582941729238:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:40.502061Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00482b/r3tmp/tmpcIWTRS/pdisk_1.dat 2025-05-07T08:54:41.279862Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:41.292811Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:41.292940Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:41.298040Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15265, node 1 2025-05-07T08:54:41.542615Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:41.542639Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:41.542655Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:41.542777Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18453 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:42.095169Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:44.981432Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=MTJhMTdlZDktM2U1OTkzMzAtNWViZjhhNTYtOTNmYmYyZWQ=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id MTJhMTdlZDktM2U1OTkzMzAtNWViZjhhNTYtOTNmYmYyZWQ= 2025-05-07T08:54:44.990229Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-05-07T08:54:44.990258Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:114: [WorkloadService] [Service] Resource pools was disabled 2025-05-07T08:54:45.006743Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=MTJhMTdlZDktM2U1OTkzMzAtNWViZjhhNTYtOTNmYmYyZWQ=, ActorId: [1:7501624600121599058:2328], ActorState: unknown state, session actor bootstrapped 2025-05-07T08:54:45.032147Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=NjI5MWQ3NGUtZDRiOWM0MTEtMzMxMDBlZTctOTJiNmU3ZDk=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id NjI5MWQ3NGUtZDRiOWM0MTEtMzMxMDBlZTctOTJiNmU3ZDk= 2025-05-07T08:54:45.032477Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=NjI5MWQ3NGUtZDRiOWM0MTEtMzMxMDBlZTctOTJiNmU3ZDk=, ActorId: [1:7501624604416566356:2329], ActorState: unknown state, session actor bootstrapped 2025-05-07T08:54:45.032787Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=1&id=NjI5MWQ3NGUtZDRiOWM0MTEtMzMxMDBlZTctOTJiNmU3ZDk=, ActorId: [1:7501624604416566356:2329], ActorState: ReadyState, TraceId: 01jtmz8p186b6n7tz0a78cf0bh, received request, proxyRequestId: 3 prepared: 0 tx_control: 0 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_GENERIC_QUERY text: SELECT 42; rpcActor: [1:7501624604416566355:2295] database: Root databaseId: /Root pool id: 2025-05-07T08:54:45.032878Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:575: SessionId: ydb://session/3?node_id=1&id=NjI5MWQ3NGUtZDRiOWM0MTEtMzMxMDBlZTctOTJiNmU3ZDk=, ActorId: [1:7501624604416566356:2329], ActorState: ExecuteState, TraceId: 01jtmz8p186b6n7tz0a78cf0bh, Sending CompileQuery request 2025-05-07T08:54:45.450903Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1298: SessionId: ydb://session/3?node_id=1&id=NjI5MWQ3NGUtZDRiOWM0MTEtMzMxMDBlZTctOTJiNmU3ZDk=, ActorId: [1:7501624604416566356:2329], ActorState: ExecuteState, TraceId: 01jtmz8p186b6n7tz0a78cf0bh, ExecutePhyTx, tx: 0x000050C00023EF18 literal: 0 commit: 1 txCtx.DeferredEffects.size(): 0 2025-05-07T08:54:45.451148Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1449: SessionId: ydb://session/3?node_id=1&id=NjI5MWQ3NGUtZDRiOWM0MTEtMzMxMDBlZTctOTJiNmU3ZDk=, ActorId: [1:7501624604416566356:2329], ActorState: ExecuteState, TraceId: 01jtmz8p186b6n7tz0a78cf0bh, Sending to Executer TraceId: 0 8 2025-05-07T08:54:45.451331Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1507: SessionId: ydb://session/3?node_id=1&id=NjI5MWQ3NGUtZDRiOWM0MTEtMzMxMDBlZTctOTJiNmU3ZDk=, ActorId: [1:7501624604416566356:2329], ActorState: ExecuteState, TraceId: 01jtmz8p186b6n7tz0a78cf0bh, Created new KQP executer: [1:7501624604416566360:2329] isRollback: 0 2025-05-07T08:54:45.504295Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1797: SessionId: ydb://session/3?node_id=1&id=NjI5MWQ3NGUtZDRiOWM0MTEtMzMxMDBlZTctOTJiNmU3ZDk=, ActorId: [1:7501624604416566356:2329], ActorState: ExecuteState, TraceId: 01jtmz8p186b6n7tz0a78cf0bh, Forwarded TEvStreamData to [1:7501624604416566355:2295] 2025-05-07T08:54:45.504381Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501624582941729238:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:45.504434Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:54:45.510403Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1699: SessionId: ydb://session/3?node_id=1&id=NjI5MWQ3NGUtZDRiOWM0MTEtMzMxMDBlZTctOTJiNmU3ZDk=, ActorId: [1:7501624604416566356:2329], ActorState: ExecuteState, TraceId: 01jtmz8p186b6n7tz0a78cf0bh, TEvTxResponse, CurrentTx: 1/1 response.status: SUCCESS 2025-05-07T08:54:45.510618Z node 1 :KQP_SESSION INFO: kqp_session_actor.cpp:1958: SessionId: ydb://session/3?node_id=1&id=NjI5MWQ3NGUtZDRiOWM0MTEtMzMxMDBlZTctOTJiNmU3ZDk=, ActorId: [1:7501624604416566356:2329], ActorState: ExecuteState, TraceId: 01jtmz8p186b6n7tz0a78cf0bh, txInfo Status: Committed Kind: Pure TotalDuration: 59.839 ServerDuration: 59.762 QueriesCount: 2 2025-05-07T08:54:45.510676Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2113: SessionId: ydb://session/3?node_id=1&id=NjI5MWQ3NGUtZDRiOWM0MTEtMzMxMDBlZTctOTJiNmU3ZDk=, ActorId: [1:7501624604416566356:2329], ActorState: ExecuteState, TraceId: 01jtmz8p186b6n7tz0a78cf0bh, Create QueryResponse for action: QUERY_ACTION_EXECUTE with SUCCESS status 2025-05-07T08:54:45.510862Z node 1 :KQP_SESSION INFO: kqp_session_actor.cpp:2473: SessionId: ydb://session/3?node_id=1&id=NjI5MWQ3NGUtZDRiOWM0MTEtMzMxMDBlZTctOTJiNmU3ZDk=, ActorId: [1:7501624604416566356:2329], ActorState: ExecuteState, TraceId: 01jtmz8p186b6n7tz0a78cf0bh, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-05-07T08:54:45.510893Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2534: SessionId: ydb://session/3?node_id=1&id=NjI5MWQ3NGUtZDRiOWM0MTEtMzMxMDBlZTctOTJiNmU3ZDk=, ActorId: [1:7501624604416566356:2329], ActorState: ExecuteState, TraceId: 01jtmz8p186b6n7tz0a78cf0bh, EndCleanup, isFinal: 1 2025-05-07T08:54:45.510938Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2270: SessionId: ydb://session/3?node_id=1&id=NjI5MWQ3NGUtZDRiOWM0MTEtMzMxMDBlZTctOTJiNmU3ZDk=, ActorId: [1:7501624604416566356:2329], ActorState: ExecuteState, TraceId: 01jtmz8p186b6n7tz0a78cf0bh, Sent query response back to proxy, proxyRequestId: 3, proxyId: [1:7501624582941729483:2277] 2025-05-07T08:54:45.510962Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2546: SessionId: ydb://session/3?node_id=1&id=NjI5MWQ3NGUtZDRiOWM0MTEtMzMxMDBlZTctOTJiNmU3ZDk=, ActorId: [1:7501624604416566356:2329], ActorState: unknown state, TraceId: 01jtmz8p186b6n7tz0a78cf0bh, Cleanup temp tables: 0 2025-05-07T08:54:45.517737Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2637: SessionId: ydb://session/3?node_id=1&id=NjI5MWQ3NGUtZDRiOWM0MTEtMzMxMDBlZTctOTJiNmU3ZDk=, ActorId: [1:7501624604416566356:2329], ActorState: unknown state, TraceId: 01jtmz8p186b6n7tz0a78cf0bh, Session actor destroyed 2025-05-07T08:54:45.543132Z node 1 :KQP_SESSION INFO: kqp_session_actor.cpp:2315: SessionId: ydb://session/3?node_id=1&id=MTJhMTdlZDktM2U1OTkzMzAtNWViZjhhNTYtOTNmYmYyZWQ=, ActorId: [1:7501624600121599058:2328], ActorState: ReadyState, Session closed due to explicit close event 2025-05-07T08:54:45.543180Z node 1 :KQP_SESSION INFO: kqp_session_actor.cpp:2473: SessionId: ydb://session/3?node_id=1&id=MTJhMTdlZDktM2U1OTkzMzAtNWViZjhhNTYtOTNmYmYyZWQ=, ActorId: [1:7501624600121599058:2328], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-05-07T08:54:45.543200Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2534: SessionId: ydb://session/3?node_id=1&id=MTJhMTdlZDktM2U1OTkzMzAtNWViZjhhNTYtOTNmYmYyZWQ=, ActorId: [1:7501624600121599058:2328], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-05-07T08:54:45.543226Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2546: SessionId: ydb://session/3?node_id=1&id=MTJhMTdlZDktM2U1OTkzMzAtNWViZjhhNTYtOTNmYmYyZWQ=, ActorId: [1:7501624600121599058:2328], ActorState: unknown state, Cleanup temp tables: 0 2025-05-07T08:54:45.543304Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2637: SessionId: ydb://session/3?node_id=1&id=MTJhMTdlZDktM2U1OTkzMzAtNWViZjhhNTYtOTNmYmYyZWQ=, ActorId: [1:7501624600121599058:2328], ActorState: unknown state, Session actor destroyed 2025-05-07T08:54:46.951026Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501624611498579730:2225];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00482b/r3tmp/tmpmFjfC8/pdisk_1.dat 2025-05-07T08:54:46.971637Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:54:47.064892Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:47.118291Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0, ... uery SELECT SUM(CpuThreads) AS ThreadsCount, SUM(CpuThreads * (1.0 - CpuIdle)) AS TotalLoad FROM `.sys/nodes`; rpcActor: [8:7501624803457310930:2365] database: /Root databaseId: /Root pool id: default 2025-05-07T08:55:31.235698Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:169: [WorkloadService] [Service] Recieved new request from [8:7501624803457310929:2364], DatabaseId: /Root, PoolId: default, SessionId: ydb://session/3?node_id=8&id=YzZjYTU5Y2EtZGRmZTI5MTMtZjIwY2I3MDItNmNkYjEwMjM= 2025-05-07T08:55:31.235750Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:44: [WorkloadService] [TPoolResolverActor] ActorId: [8:7501624803457310933:2367], DatabaseId: /Root, PoolId: default, SessionId: ydb://session/3?node_id=8&id=YzZjYTU5Y2EtZGRmZTI5MTMtZjIwY2I3MDItNmNkYjEwMjM=, Start pool fetching 2025-05-07T08:55:31.236071Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7501624803457310934:2368], DatabaseId: /Root, PoolId: default, Start pool fetching 2025-05-07T08:55:31.236211Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:223: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7501624803457310932:2366], DatabaseId: /Root, PoolId: default, Pool info successfully fetched 2025-05-07T08:55:31.236278Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:223: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7501624803457310934:2368], DatabaseId: /Root, PoolId: default, Pool info successfully fetched 2025-05-07T08:55:31.236304Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:253: [WorkloadService] [Service] Successfully fetched pool default, DatabaseId: /Root 2025-05-07T08:55:31.236317Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:107: [WorkloadService] [TPoolResolverActor] ActorId: [8:7501624803457310933:2367], DatabaseId: /Root, PoolId: default, SessionId: ydb://session/3?node_id=8&id=YzZjYTU5Y2EtZGRmZTI5MTMtZjIwY2I3MDItNmNkYjEwMjM=, Pool info successfully resolved 2025-05-07T08:55:31.236362Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:279: [WorkloadService] [Service] Successfully fetched pool default, DatabaseId: /Root, SessionId: ydb://session/3?node_id=8&id=YzZjYTU5Y2EtZGRmZTI5MTMtZjIwY2I3MDItNmNkYjEwMjM= 2025-05-07T08:55:31.236412Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:203: [WorkloadService] [TPoolHandlerActorBase] ActorId: [8:7501624799162343586:2351], DatabaseId: /Root, PoolId: default, Received new request, worker id: [8:7501624803457310929:2364], session id: ydb://session/3?node_id=8&id=YzZjYTU5Y2EtZGRmZTI5MTMtZjIwY2I3MDItNmNkYjEwMjM= 2025-05-07T08:55:31.236417Z node 8 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:290: [WorkloadService] [Service] Request placed into pool, DatabaseId: /Root, PoolId: default, SessionId: ydb://session/3?node_id=8&id=YzZjYTU5Y2EtZGRmZTI5MTMtZjIwY2I3MDItNmNkYjEwMjM= 2025-05-07T08:55:31.236475Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:313: [WorkloadService] [TPoolHandlerActorBase] ActorId: [8:7501624799162343586:2351], DatabaseId: /Root, PoolId: default, Reply continue success to [8:7501624803457310929:2364], session id: ydb://session/3?node_id=8&id=YzZjYTU5Y2EtZGRmZTI5MTMtZjIwY2I3MDItNmNkYjEwMjM=, local in flight: 1 2025-05-07T08:55:31.236476Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:527: SessionId: ydb://session/3?node_id=8&id=YzZjYTU5Y2EtZGRmZTI5MTMtZjIwY2I3MDItNmNkYjEwMjM=, ActorId: [8:7501624803457310929:2364], ActorState: ExecuteState, TraceId: 01jtmza3536cc7v4xw7h5es8c2, continue request, pool id: default 2025-05-07T08:55:31.242509Z node 8 :KQP_SESSION INFO: kqp_query_state.cpp:78: Scheme error, pathId: [OwnerId: 72057594046644480, LocalPathId: 1], status: PathNotTable 2025-05-07T08:55:32.218843Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1298: SessionId: ydb://session/3?node_id=8&id=YzZjYTU5Y2EtZGRmZTI5MTMtZjIwY2I3MDItNmNkYjEwMjM=, ActorId: [8:7501624803457310929:2364], ActorState: ExecuteState, TraceId: 01jtmza3536cc7v4xw7h5es8c2, ExecutePhyTx, tx: 0x000050C000519058 literal: 0 commit: 0 txCtx.DeferredEffects.size(): 0 2025-05-07T08:55:32.218909Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1449: SessionId: ydb://session/3?node_id=8&id=YzZjYTU5Y2EtZGRmZTI5MTMtZjIwY2I3MDItNmNkYjEwMjM=, ActorId: [8:7501624803457310929:2364], ActorState: ExecuteState, TraceId: 01jtmza3536cc7v4xw7h5es8c2, Sending to Executer TraceId: 0 8 2025-05-07T08:55:32.219021Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1507: SessionId: ydb://session/3?node_id=8&id=YzZjYTU5Y2EtZGRmZTI5MTMtZjIwY2I3MDItNmNkYjEwMjM=, ActorId: [8:7501624803457310929:2364], ActorState: ExecuteState, TraceId: 01jtmza3536cc7v4xw7h5es8c2, Created new KQP executer: [8:7501624807752278253:2364] isRollback: 0 2025-05-07T08:55:32.237234Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1699: SessionId: ydb://session/3?node_id=8&id=YzZjYTU5Y2EtZGRmZTI5MTMtZjIwY2I3MDItNmNkYjEwMjM=, ActorId: [8:7501624803457310929:2364], ActorState: ExecuteState, TraceId: 01jtmza3536cc7v4xw7h5es8c2, TEvTxResponse, CurrentTx: 1/2 response.status: SUCCESS 2025-05-07T08:55:32.237337Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1298: SessionId: ydb://session/3?node_id=8&id=YzZjYTU5Y2EtZGRmZTI5MTMtZjIwY2I3MDItNmNkYjEwMjM=, ActorId: [8:7501624803457310929:2364], ActorState: ExecuteState, TraceId: 01jtmza3536cc7v4xw7h5es8c2, ExecutePhyTx, tx: 0x000050C000519658 literal: 1 commit: 1 txCtx.DeferredEffects.size(): 0 2025-05-07T08:55:32.238053Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1699: SessionId: ydb://session/3?node_id=8&id=YzZjYTU5Y2EtZGRmZTI5MTMtZjIwY2I3MDItNmNkYjEwMjM=, ActorId: [8:7501624803457310929:2364], ActorState: ExecuteState, TraceId: 01jtmza3536cc7v4xw7h5es8c2, TEvTxResponse, CurrentTx: 2/2 response.status: SUCCESS 2025-05-07T08:55:32.238210Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:1958: SessionId: ydb://session/3?node_id=8&id=YzZjYTU5Y2EtZGRmZTI5MTMtZjIwY2I3MDItNmNkYjEwMjM=, ActorId: [8:7501624803457310929:2364], ActorState: ExecuteState, TraceId: 01jtmza3536cc7v4xw7h5es8c2, txInfo Status: Committed Kind: ReadOnly TotalDuration: 19.442 ServerDuration: 19.375 QueriesCount: 2 2025-05-07T08:55:32.238306Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2113: SessionId: ydb://session/3?node_id=8&id=YzZjYTU5Y2EtZGRmZTI5MTMtZjIwY2I3MDItNmNkYjEwMjM=, ActorId: [8:7501624803457310929:2364], ActorState: ExecuteState, TraceId: 01jtmza3536cc7v4xw7h5es8c2, Create QueryResponse for action: QUERY_ACTION_EXECUTE with SUCCESS status 2025-05-07T08:55:32.238372Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2473: SessionId: ydb://session/3?node_id=8&id=YzZjYTU5Y2EtZGRmZTI5MTMtZjIwY2I3MDItNmNkYjEwMjM=, ActorId: [8:7501624803457310929:2364], ActorState: ExecuteState, TraceId: 01jtmza3536cc7v4xw7h5es8c2, Cleanup start, isFinal: 0 CleanupCtx: 1 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 1 2025-05-07T08:55:32.238773Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:233: [WorkloadService] [TPoolHandlerActorBase] ActorId: [8:7501624799162343586:2351], DatabaseId: /Root, PoolId: default, Received cleanup request, worker id: [8:7501624803457310929:2364], session id: ydb://session/3?node_id=8&id=YzZjYTU5Y2EtZGRmZTI5MTMtZjIwY2I3MDItNmNkYjEwMjM=, duration: 1.002526s, cpu consumed: 0.001887s 2025-05-07T08:55:32.238807Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:437: [WorkloadService] [TPoolHandlerActorBase] ActorId: [8:7501624799162343586:2351], DatabaseId: /Root, PoolId: default, Reply cleanup success to [8:7501624803457310929:2364], session id: ydb://session/3?node_id=8&id=YzZjYTU5Y2EtZGRmZTI5MTMtZjIwY2I3MDItNmNkYjEwMjM=, local in flight: 0 2025-05-07T08:55:32.238870Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2534: SessionId: ydb://session/3?node_id=8&id=YzZjYTU5Y2EtZGRmZTI5MTMtZjIwY2I3MDItNmNkYjEwMjM=, ActorId: [8:7501624803457310929:2364], ActorState: CleanupState, TraceId: 01jtmza3536cc7v4xw7h5es8c2, EndCleanup, isFinal: 0 2025-05-07T08:55:32.238927Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2270: SessionId: ydb://session/3?node_id=8&id=YzZjYTU5Y2EtZGRmZTI5MTMtZjIwY2I3MDItNmNkYjEwMjM=, ActorId: [8:7501624803457310929:2364], ActorState: CleanupState, TraceId: 01jtmza3536cc7v4xw7h5es8c2, Sent query response back to proxy, proxyRequestId: 6, proxyId: [8:7501624764802604324:2259] 2025-05-07T08:55:32.239292Z node 8 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:335: [WorkloadService] [Service] Request finished in pool, DatabaseId: /Root, PoolId: default, Duration: 1.002526s, CpuConsumed: 0.001887s, AdjustCpuQuota: 0 2025-05-07T08:55:32.239509Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: query_actor.cpp:240: [TQueryBase] [TCpuLoadFetcherActor] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=8&id=YzZjYTU5Y2EtZGRmZTI5MTMtZjIwY2I3MDItNmNkYjEwMjM=, TxId: 2025-05-07T08:55:32.239609Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: query_actor.cpp:367: [TQueryBase] [TCpuLoadFetcherActor] Finish with SUCCESS, SessionId: ydb://session/3?node_id=8&id=YzZjYTU5Y2EtZGRmZTI5MTMtZjIwY2I3MDItNmNkYjEwMjM=, TxId: 2025-05-07T08:55:32.240376Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2315: SessionId: ydb://session/3?node_id=8&id=YzZjYTU5Y2EtZGRmZTI5MTMtZjIwY2I3MDItNmNkYjEwMjM=, ActorId: [8:7501624803457310929:2364], ActorState: ReadyState, Session closed due to explicit close event 2025-05-07T08:55:32.240434Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2473: SessionId: ydb://session/3?node_id=8&id=YzZjYTU5Y2EtZGRmZTI5MTMtZjIwY2I3MDItNmNkYjEwMjM=, ActorId: [8:7501624803457310929:2364], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-05-07T08:55:32.240463Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2534: SessionId: ydb://session/3?node_id=8&id=YzZjYTU5Y2EtZGRmZTI5MTMtZjIwY2I3MDItNmNkYjEwMjM=, ActorId: [8:7501624803457310929:2364], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-05-07T08:55:32.240495Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2546: SessionId: ydb://session/3?node_id=8&id=YzZjYTU5Y2EtZGRmZTI5MTMtZjIwY2I3MDItNmNkYjEwMjM=, ActorId: [8:7501624803457310929:2364], ActorState: unknown state, Cleanup temp tables: 0 2025-05-07T08:55:32.240588Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2637: SessionId: ydb://session/3?node_id=8&id=YzZjYTU5Y2EtZGRmZTI5MTMtZjIwY2I3MDItNmNkYjEwMjM=, ActorId: [8:7501624803457310929:2364], ActorState: unknown state, Session actor destroyed 2025-05-07T08:55:32.264532Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2315: SessionId: ydb://session/3?node_id=8&id=ZDBkNDdhMzYtZDZlNDk0MjYtMjY5YmM0YjYtZDI5Y2VhYjM=, ActorId: [8:7501624794867376120:2338], ActorState: ReadyState, Session closed due to explicit close event 2025-05-07T08:55:32.264575Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2473: SessionId: ydb://session/3?node_id=8&id=ZDBkNDdhMzYtZDZlNDk0MjYtMjY5YmM0YjYtZDI5Y2VhYjM=, ActorId: [8:7501624794867376120:2338], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-05-07T08:55:32.264602Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2534: SessionId: ydb://session/3?node_id=8&id=ZDBkNDdhMzYtZDZlNDk0MjYtMjY5YmM0YjYtZDI5Y2VhYjM=, ActorId: [8:7501624794867376120:2338], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-05-07T08:55:32.264633Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2546: SessionId: ydb://session/3?node_id=8&id=ZDBkNDdhMzYtZDZlNDk0MjYtMjY5YmM0YjYtZDI5Y2VhYjM=, ActorId: [8:7501624794867376120:2338], ActorState: unknown state, Cleanup temp tables: 0 2025-05-07T08:55:32.264692Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2637: SessionId: ydb://session/3?node_id=8&id=ZDBkNDdhMzYtZDZlNDk0MjYtMjY5YmM0YjYtZDI5Y2VhYjM=, ActorId: [8:7501624794867376120:2338], ActorState: unknown state, Session actor destroyed >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_3__SYNC-pk_types1-all_types1-index1---SYNC] |90.4%| [TA] $(B)/ydb/core/persqueue/ut/slow/test-results/unittest/{meta.json ... results_accumulator.log} >> THiveTest::TestHiveBalancerWithImmovableTablets [GOOD] >> THiveTest::TestHiveBalancerHighUsage >> PgCatalog::CheckSetConfig [FAIL] >> PgCatalog::PgDatabase+useSink >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_0__SYNC-pk_types4-all_types4-index4---SYNC] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_4__SYNC-pk_types0-all_types0-index0---SYNC] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_change_collector/unittest >> AsyncIndexChangeCollector::MultiIndexedTableReplaceSingleRow [GOOD] Test command err: 2025-05-07T08:55:17.763589Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:55:17.763725Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:55:17.763939Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0032ec/r3tmp/tmpXyTRFS/pdisk_1.dat 2025-05-07T08:55:18.147668Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:55:18.192974Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:55:18.251068Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:55:18.251220Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:55:18.262888Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:55:18.349028Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:55:18.400070Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:675:2577] 2025-05-07T08:55:18.400414Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T08:55:18.455914Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T08:55:18.456133Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T08:55:18.458097Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-05-07T08:55:18.458210Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-05-07T08:55:18.458264Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-05-07T08:55:18.458656Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T08:55:18.459004Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T08:55:18.459078Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:702:2577] in generation 1 2025-05-07T08:55:18.460611Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:679:2579] 2025-05-07T08:55:18.460837Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T08:55:18.475335Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T08:55:18.475473Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T08:55:18.476969Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-05-07T08:55:18.477047Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-05-07T08:55:18.477097Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-05-07T08:55:18.477424Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T08:55:18.477554Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T08:55:18.477617Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:710:2579] in generation 1 2025-05-07T08:55:18.488578Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T08:55:18.526662Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-05-07T08:55:18.526951Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T08:55:18.527087Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:713:2598] 2025-05-07T08:55:18.527141Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T08:55:18.527207Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-05-07T08:55:18.527260Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:55:18.527666Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T08:55:18.527713Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-05-07T08:55:18.527783Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T08:55:18.527863Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:714:2599] 2025-05-07T08:55:18.527926Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-05-07T08:55:18.527959Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-05-07T08:55:18.527987Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-05-07T08:55:18.528472Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T08:55:18.528594Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T08:55:18.528772Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T08:55:18.528831Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:55:18.528899Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T08:55:18.528977Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:55:18.529037Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-05-07T08:55:18.529116Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-05-07T08:55:18.529259Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:669:2573], serverId# [1:689:2584], sessionId# [0:0:0] 2025-05-07T08:55:18.529334Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-05-07T08:55:18.529361Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:55:18.529390Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-05-07T08:55:18.529423Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-05-07T08:55:18.530131Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T08:55:18.530444Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-05-07T08:55:18.530545Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-05-07T08:55:18.531088Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:670:2574], serverId# [1:697:2591], sessionId# [0:0:0] 2025-05-07T08:55:18.531306Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-05-07T08:55:18.531476Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037889 txId 281474976715657 ssId 72057594046644480 seqNo 2:2 2025-05-07T08:55:18.531536Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037889 2025-05-07T08:55:18.533441Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:55:18.533554Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-05-07T08:55:18.550971Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T08:55:18.551124Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-05-07T08:55:18.551786Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2025-05-07T08:55:18.551864Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037889 not sending time cast registration request in state WaitScheme 2025-05-07T08:55:18.717027Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:731:2610], serverId# [1:734:2613], sessionId# [0:0:0] 2025-05-07T08:55:18.717775Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:733:2612], serverId# [1:735:2614], sessionId# [0:0:0] 2025-05-07T08:55:18.730266Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037889 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037889 } 2025-05-07T08:55:18.730396Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-05-07T08:55:18.730918Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-05-07T08:55:18.730982Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 1 2025-05-07T08:55:18.731040Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:2814749767 ... erId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:55:36.740105Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-05-07T08:55:36.740167Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037890 2025-05-07T08:55:36.740743Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-05-07T08:55:36.740822Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037890 2025-05-07T08:55:36.740868Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037890 2025-05-07T08:55:36.740936Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037890 at tablet 72075186224037890 send result to client [4:410:2405], exec latency: 0 ms, propose latency: 0 ms 2025-05-07T08:55:36.740990Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037890 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-05-07T08:55:36.741075Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-05-07T08:55:36.764514Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037889 coordinator 72057594046316545 last step 0 next step 1000 2025-05-07T08:55:36.765213Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-05-07T08:55:36.765322Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037889 state Ready 2025-05-07T08:55:36.765414Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2025-05-07T08:55:36.775161Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037890 coordinator 72057594046316545 last step 0 next step 1000 2025-05-07T08:55:36.775507Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-05-07T08:55:36.775619Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-05-07T08:55:36.776769Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037890 state Ready 2025-05-07T08:55:36.776835Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037890 Got TEvSchemaChangedResult from SS at 72075186224037890 2025-05-07T08:55:36.807095Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:829:2687], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:36.807207Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:839:2692], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:36.807300Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:36.826700Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T08:55:36.839533Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:55:36.839690Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-05-07T08:55:36.839755Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037890 2025-05-07T08:55:37.144047Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:55:37.144222Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-05-07T08:55:37.144293Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037890 2025-05-07T08:55:37.152756Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:843:2695], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T08:55:37.193518Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:915:2736] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 10], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:55:37.346646Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715660. Ctx: { TraceId: 01jtmza8k5dx99ys2czxh4r58h, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=NjdmYjJiYjAtODVkZjJmMmMtNjA2ODdlYjUtZTJmMDljMzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:55:37.349676Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:1020:2779], serverId# [4:1021:2780], sessionId# [0:0:0] 2025-05-07T08:55:37.350406Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:2] at 72075186224037889 2025-05-07T08:55:37.350755Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 1 Group: 1746608137350631 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-05-07T08:55:37.350975Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 2 Group: 1746608137350631 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 5] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-05-07T08:55:37.351097Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:410: Executed write operation for [0:2] at 72075186224037889, row count=1 2025-05-07T08:55:37.362688Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037889, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 }, { Order: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 5] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 } 2025-05-07T08:55:37.362804Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-05-07T08:55:37.448527Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jtmza94q46x1w1k8q0g6j2sh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=N2QxNDI2OTUtZDJiZGUwMDItMWZhYTE0MmUtZjg0YTkzMzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:55:37.455772Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:3] at 72075186224037889 2025-05-07T08:55:37.456194Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 3 Group: 1746608137456058 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-05-07T08:55:37.456416Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 4 Group: 1746608137456058 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-05-07T08:55:37.456540Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 5 Group: 1746608137456058 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 5] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-05-07T08:55:37.456628Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 6 Group: 1746608137456058 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 5] Kind: AsyncIndex Source: Unspecified Body: 24b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-05-07T08:55:37.456744Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:410: Executed write operation for [0:3] at 72075186224037889, row count=1 2025-05-07T08:55:37.470902Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037889, records: { Order: 3 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 }, { Order: 4 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 }, { Order: 5 PathId: [OwnerId: 72057594046644480, LocalPathId: 5] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 }, { Order: 6 PathId: [OwnerId: 72057594046644480, LocalPathId: 5] BodySize: 24 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 } 2025-05-07T08:55:37.470990Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-05-07T08:55:37.475753Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:1069:2819], serverId# [4:1070:2820], sessionId# [0:0:0] 2025-05-07T08:55:37.491960Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:1071:2821], serverId# [4:1072:2822], sessionId# [0:0:0] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_change_collector/unittest >> AsyncIndexChangeCollector::CoverIndexedColumn [GOOD] Test command err: 2025-05-07T08:55:17.686574Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:55:17.686740Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:55:17.687007Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0032a2/r3tmp/tmpbMcyLO/pdisk_1.dat 2025-05-07T08:55:18.089903Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:55:18.145120Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:55:18.196856Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:55:18.197035Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:55:18.213069Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:55:18.297592Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:55:18.347867Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:675:2577] 2025-05-07T08:55:18.348177Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T08:55:18.397228Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T08:55:18.397444Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T08:55:18.399367Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-05-07T08:55:18.399455Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-05-07T08:55:18.399524Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-05-07T08:55:18.399972Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T08:55:18.400896Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T08:55:18.401015Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:702:2577] in generation 1 2025-05-07T08:55:18.402693Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:679:2579] 2025-05-07T08:55:18.402942Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T08:55:18.412278Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T08:55:18.412446Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T08:55:18.413845Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-05-07T08:55:18.413938Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-05-07T08:55:18.414012Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-05-07T08:55:18.414359Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T08:55:18.414503Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T08:55:18.414568Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:710:2579] in generation 1 2025-05-07T08:55:18.426440Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T08:55:18.466689Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-05-07T08:55:18.466931Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T08:55:18.467046Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:713:2598] 2025-05-07T08:55:18.467086Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T08:55:18.467121Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-05-07T08:55:18.467153Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:55:18.467412Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T08:55:18.467438Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-05-07T08:55:18.467470Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T08:55:18.467504Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:714:2599] 2025-05-07T08:55:18.467532Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-05-07T08:55:18.467558Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-05-07T08:55:18.467578Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-05-07T08:55:18.468070Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T08:55:18.468195Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T08:55:18.468404Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T08:55:18.468457Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:55:18.468521Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T08:55:18.468557Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:55:18.468595Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-05-07T08:55:18.468665Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-05-07T08:55:18.468761Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:669:2573], serverId# [1:689:2584], sessionId# [0:0:0] 2025-05-07T08:55:18.468808Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-05-07T08:55:18.468823Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:55:18.468841Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-05-07T08:55:18.468859Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-05-07T08:55:18.469272Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T08:55:18.469492Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-05-07T08:55:18.469597Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-05-07T08:55:18.470022Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:670:2574], serverId# [1:697:2591], sessionId# [0:0:0] 2025-05-07T08:55:18.470168Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-05-07T08:55:18.470294Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037889 txId 281474976715657 ssId 72057594046644480 seqNo 2:2 2025-05-07T08:55:18.470334Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037889 2025-05-07T08:55:18.471568Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:55:18.471644Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-05-07T08:55:18.482796Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T08:55:18.482920Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-05-07T08:55:18.483417Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2025-05-07T08:55:18.483537Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037889 not sending time cast registration request in state WaitScheme 2025-05-07T08:55:18.639446Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:731:2610], serverId# [1:734:2613], sessionId# [0:0:0] 2025-05-07T08:55:18.639978Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:733:2612], serverId# [1:735:2614], sessionId# [0:0:0] 2025-05-07T08:55:18.644631Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037889 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037889 } 2025-05-07T08:55:18.644723Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-05-07T08:55:18.645154Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-05-07T08:55:18.645213Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 1 2025-05-07T08:55:18.645268Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:2814749767 ... 24037889 2025-05-07T08:55:37.090292Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-05-07T08:55:37.090338Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037889 2025-05-07T08:55:37.090416Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037889 at tablet 72075186224037889 send result to client [4:410:2405], exec latency: 0 ms, propose latency: 0 ms 2025-05-07T08:55:37.090481Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037889 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-05-07T08:55:37.090583Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-05-07T08:55:37.090966Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-05-07T08:55:37.091030Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:55:37.092829Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-05-07T08:55:37.092926Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:55:37.093362Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:55:37.093407Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T08:55:37.093465Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-05-07T08:55:37.093539Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [4:410:2405], exec latency: 0 ms, propose latency: 0 ms 2025-05-07T08:55:37.093592Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-05-07T08:55:37.093662Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:55:37.094713Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037890 time 0 2025-05-07T08:55:37.094756Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-05-07T08:55:37.095583Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037890 step# 1000} 2025-05-07T08:55:37.095666Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-05-07T08:55:37.096401Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:55:37.096526Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-05-07T08:55:37.096586Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037890 2025-05-07T08:55:37.097073Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-05-07T08:55:37.097120Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037890 2025-05-07T08:55:37.097158Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037890 2025-05-07T08:55:37.097214Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037890 at tablet 72075186224037890 send result to client [4:410:2405], exec latency: 0 ms, propose latency: 0 ms 2025-05-07T08:55:37.097291Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037890 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-05-07T08:55:37.097363Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-05-07T08:55:37.108662Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037889 coordinator 72057594046316545 last step 0 next step 1000 2025-05-07T08:55:37.109229Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-05-07T08:55:37.109296Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037889 state Ready 2025-05-07T08:55:37.109364Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2025-05-07T08:55:37.110311Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037890 coordinator 72057594046316545 last step 0 next step 1000 2025-05-07T08:55:37.110586Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-05-07T08:55:37.110638Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-05-07T08:55:37.111432Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037890 state Ready 2025-05-07T08:55:37.111487Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037890 Got TEvSchemaChangedResult from SS at 72075186224037890 2025-05-07T08:55:37.123934Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:829:2687], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:37.124057Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:839:2692], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:37.124151Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:37.130856Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T08:55:37.139667Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:55:37.139827Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-05-07T08:55:37.139911Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037890 2025-05-07T08:55:37.343273Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:55:37.343417Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-05-07T08:55:37.343815Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037890 2025-05-07T08:55:37.347186Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:843:2695], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T08:55:37.417742Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:915:2736] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 10], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:55:37.558576Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715660. Ctx: { TraceId: 01jtmza8x16n1v32vjqhrqrk2t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=YzVhMTNhZGEtNTgwMjliZGUtOTU4ZWRmZmQtNzgxMDUwZTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:55:37.561491Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:1020:2779], serverId# [4:1021:2780], sessionId# [0:0:0] 2025-05-07T08:55:37.582052Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:2] at 72075186224037889 2025-05-07T08:55:37.582572Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 1 Group: 1746608137582423 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: AsyncIndex Source: Unspecified Body: 38b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-05-07T08:55:37.582776Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 2 Group: 1746608137582423 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 5] Kind: AsyncIndex Source: Unspecified Body: 42b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-05-07T08:55:37.582911Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:410: Executed write operation for [0:2] at 72075186224037889, row count=1 2025-05-07T08:55:37.594212Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037889, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 38 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 }, { Order: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 5] BodySize: 42 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 } 2025-05-07T08:55:37.594310Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-05-07T08:55:37.599501Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:1027:2785], serverId# [4:1028:2786], sessionId# [0:0:0] 2025-05-07T08:55:37.606311Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:1029:2787], serverId# [4:1030:2788], sessionId# [0:0:0] >> TFlatTableExecutor_VersionedRows::TestVersionedRowsLargeBlobs [GOOD] >> TFlatTableRenameTableAndColumn::TestSchema1ToSchema2NoRestart [GOOD] >> TFlatTableRenameTableAndColumn::TestSchema1ToSchema2 [GOOD] >> TFlatTableRenameTableAndColumn::TestSchema1ToSchema2ToSchema1 >> TFlatTableRenameTableAndColumn::TestSchema1ToSchema2ToSchema1 [GOOD] >> TFlatTableRenameTableAndColumn::TestSchema1ToSchema2ToSchema1ToSchema2 >> TFlatTableRenameTableAndColumn::TestSchema1ToSchema2ToSchema1ToSchema2 [GOOD] >> TGenCompaction::OverloadFactorDuringForceCompaction |90.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> ReadLoad::ShouldReadKqp [GOOD] >> ReadLoad::ShouldReadKqpMoreThanRows >> CdcStreamChangeCollector::PageFaults [GOOD] >> THiveTest::TestHiveBalancerHighUsage [GOOD] >> TGenCompaction::OverloadFactorDuringForceCompaction [GOOD] >> CdcStreamChangeCollector::OldImage >> THiveTest::TestHiveBalancerHighUsageAndColumnShards >> TGenCompaction::ForcedCompactionNoGenerations [GOOD] >> TGenCompaction::ForcedCompactionWithGenerations [GOOD] >> TGenCompaction::ForcedCompactionWithFinalParts [GOOD] >> ResourcePoolClassifiersDdl::TestMultiGroupClassification [GOOD] >> TGenCompaction::ForcedCompactionByDeletedRows [GOOD] >> StoragePool::TestDistributionRandomMin7p [GOOD] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_DyNumber-pk_types8-all_types8-index8-DyNumber--] >> ResourcePoolClassifiersDdl::TestAlterResourcePoolClassifier [GOOD] >> ResourcePoolClassifiersSysView::TestResourcePoolClassifiersSysViewOnServerless >> ResourcePoolClassifiersDdl::TestDropResourcePoolClassifier >> TGenCompaction::ForcedCompactionByUnreachableMvccData [GOOD] >> StoragePool::TestDistributionRandomMin7pWithOverflow >> TGenCompaction::ForcedCompactionByUnreachableMvccDataRestart [GOOD] >> StoragePool::TestDistributionRandomMin7pWithOverflow [GOOD] >> TGenCompaction::ForcedCompactionByUnreachableMvccDataBorrowed [GOOD] >> TPartBtreeIndexIteration::FewNodes_Groups [GOOD] >> KqpWorkloadServiceTables::TestCleanupOnServiceRestart [GOOD] >> TIterator::Basics >> TPartBtreeIndexIteration::FewNodes_History >> KqpWorkloadServiceTables::TestLeaseExpiration >> TIterator::Basics [GOOD] >> TIterator::External [GOOD] >> TIterator::Single |90.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/tx_allocator_client/ut/ydb-core-tx-tx_allocator_client-ut |90.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/tx_allocator_client/ut/ydb-core-tx-tx_allocator_client-ut |90.4%| [LD] {RESULT} $(B)/ydb/core/tx/tx_allocator_client/ut/ydb-core-tx-tx_allocator_client-ut |90.4%| [TA] {RESULT} $(B)/ydb/core/persqueue/ut/slow/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpWorkloadService::TestStartQueryAfterCancel [GOOD] >> KqpWorkloadService::TestZeroConcurrentQueryLimit >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Uint32-pk_types9-all_types9-index9-Uint32--] >> GenericFederatedQuery::YdbManagedSelectAll >> GenericFederatedQuery::IcebergHadoopBasicSelectAll ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/hive/ut/unittest >> StoragePool::TestDistributionRandomMin7pWithOverflow [GOOD] Test command err: Took 13.5092 seconds >> KqpWorkloadServiceDistributed::TestNodeDisconnect [GOOD] >> KqpWorkloadServiceDistributed::TestDistributedLessConcurrentQueryLimit >> ResourcePoolsDdl::TestDropResourcePool [GOOD] >> TIterator::Single [GOOD] >> TIterator::SingleReverse >> BuildStatsHistogram::Ten_Mixed [GOOD] >> BuildStatsHistogram::Ten_Serial >> GenericFederatedQuery::IcebergHiveBasicSelectAll >> AsyncIndexChangeCollector::MultiIndexedTableInsertSingleRow [GOOD] >> AsyncIndexChangeCollector::IndexedPrimaryKeyInsertSingleRow ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/workload_service/ut/unittest >> ResourcePoolsDdl::TestDropResourcePool [GOOD] Test command err: 2025-05-07T08:54:38.215954Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624576116927429:2195];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:38.216399Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00483c/r3tmp/tmpxcDBDs/pdisk_1.dat 2025-05-07T08:54:38.755062Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:38.759137Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:38.759226Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:38.766972Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 64688, node 1 2025-05-07T08:54:39.006545Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:39.006580Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:39.006592Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:39.006694Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26635 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:39.405147Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:39.419604Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:54:41.883782Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:440: [WorkloadService] [Service] Started workload service initialization 2025-05-07T08:54:41.885111Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:241: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7501624589001829808:2326], Start check tables existence, number paths: 2 2025-05-07T08:54:41.904977Z node 1 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:125: [WorkloadService] [Service] Updated node info, noode count: 1 2025-05-07T08:54:41.905033Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7501624589001829808:2326], Describe table /Root/.metadata/workload_manager/delayed_requests status PathErrorUnknown 2025-05-07T08:54:41.905079Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7501624589001829808:2326], Describe table /Root/.metadata/workload_manager/running_requests status PathErrorUnknown 2025-05-07T08:54:41.905107Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:289: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7501624589001829808:2326], Successfully finished 2025-05-07T08:54:41.905153Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-05-07T08:54:41.905427Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-05-07T08:54:41.905442Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:111: [WorkloadService] [Service] Resource pools was enanbled 2025-05-07T08:54:41.908795Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=YmRhNzA0MDMtNzhjNjllODEtYTE2ZmY3OWQtYzMxNWNhMWE=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id YmRhNzA0MDMtNzhjNjllODEtYTE2ZmY3OWQtYzMxNWNhMWE= 2025-05-07T08:54:41.909457Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=YmRhNzA0MDMtNzhjNjllODEtYTE2ZmY3OWQtYzMxNWNhMWE=, ActorId: [1:7501624589001829828:2330], ActorState: unknown state, session actor bootstrapped 2025-05-07T08:54:41.959162Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624589001829839:2300], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-05-07T08:54:41.963314Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480 2025-05-07T08:54:41.970547Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:429: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624589001829839:2300], DatabaseId: Root, PoolId: sample_pool_id, Subscribe on create pool tx: 281474976710658 2025-05-07T08:54:41.973010Z node 1 :KQP_WORKLOAD_SERVICE TRACE: scheme_actors.cpp:352: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624589001829839:2300], DatabaseId: Root, PoolId: sample_pool_id, Tablet to pipe successfully connected 2025-05-07T08:54:41.984198Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624589001829839:2300], DatabaseId: Root, PoolId: sample_pool_id, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-05-07T08:54:42.038265Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624589001829839:2300], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-05-07T08:54:42.045566Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501624593296797186:2332] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/sample_pool_id\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:54:42.045749Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:480: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624589001829839:2300], DatabaseId: Root, PoolId: sample_pool_id, Pool successfully created 2025-05-07T08:54:42.056955Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: /Root, PoolId: default 2025-05-07T08:54:42.056996Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:561: [WorkloadService] [Service] Creating new database state for id /Root 2025-05-07T08:54:42.057045Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624593296797195:2332], DatabaseId: /Root, PoolId: default, Start pool fetching 2025-05-07T08:54:42.057205Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=1&id=YmRhNzA0MDMtNzhjNjllODEtYTE2ZmY3OWQtYzMxNWNhMWE=, ActorId: [1:7501624589001829828:2330], ActorState: ReadyState, TraceId: 01jtmz8k48dtkmkaprna4nnseq, received request, proxyRequestId: 3 prepared: 0 tx_control: 0 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_DDL text: CREATE RESOURCE POOL my_pool WITH ( CONCURRENT_QUERY_LIMIT=1, QUEUE_SIZE=0 ); rpcActor: [0:0:0] database: /Root databaseId: /Root pool id: default 2025-05-07T08:54:42.064318Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624593296797195:2332], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:42.064461Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:42.384518Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710660:0, at schemeshard: 72057594046644480 2025-05-07T08:54:42.404554Z node 1 :KQP_SESSION INFO: kqp_session_actor.cpp:2473: SessionId: ydb://session/3?node_id=1&id=YmRhNzA0MDMtNzhjNjllODEtYTE2ZmY3OWQtYzMxNWNhMWE=, ActorId: [1:7501624589001829828:2330], ActorState: ExecuteState, TraceId: 01jtmz8k48dtkmkaprna4nnseq, Cleanup start, isFinal: 0 CleanupCtx: 1 TransactionsToBeAborted.size(): 0 WorkerId: [1:7501624593296797196:2330] WorkloadServiceCleanup: 0 2025-05-07T08:54:42.406483Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2534: SessionId: ydb://session/3?node_id=1&id=YmRhNzA0MDMtNzhjNjllODEtYTE2ZmY3OWQtYzMxNWNhMWE=, ActorId: [1:7501624589001829828:2330], ActorState: CleanupState, TraceId: 01jtmz8k48dtkmkaprna4nnseq, EndCleanup, isFinal: 0 2025-05-07T08:54:42.406557Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2270: SessionId: ydb://session/3?node_id=1&id=YmRhNzA0MDMtNzhjNjllODEtYTE2ZmY3OWQtYzMxNWNhMWE=, ActorId: [1:7501624589001829828:2330], ActorState: CleanupState, TraceId: 01jtmz8k48dtkmkaprna4nnseq, Sent query response back to proxy, proxyRequestId: 3, proxyId: [1:7501624576116927509:2271] 2025-05-07T08:54:42.415534Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=OWY2ZWI4ZTMtYmVhNGJjZmUtYzEwYjNkMGUtNDJjYjliZTA=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id OWY2ZWI4ZTMtYmVhNGJjZmUtYzEwYjNkMGUtNDJjYjliZTA= 2025-05-07T08:54:42.415963Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=OWY2ZWI4ZTMtYmVhNGJjZmUtYzEwYjNkMGUtNDJjYjliZTA=, ActorId: [1:7501624593296797228:2334], ActorState: unknown state, session actor bootstrapped 2025-05-07T08:54:42.416147Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=1&id=OWY2ZWI4ZTMtYmVhNGJjZmUtYzEwYjNkMGUtNDJjYjliZTA=, ActorId: [1:7501624593296797228:2334], ActorState: ReadyState, TraceId: 01jtmz8kfg2bt8hv6b609xtd0e, received request, proxyRequestId: 4 prepared: 0 tx_control: 0 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_GENERIC_QUERY text: SELECT 42; rpcActor: [1:7501624593296797227:2360] database: Root databaseId: /Root pool id: my_pool 2025-05-07T08:54:42.416180Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [S ... 11888:2462], DatabaseId: /Root, PoolId: my_pool, SessionId: ydb://session/3?node_id=8&id=NzgwYzM5NDAtMTNiODlkOTQtOWE5OGQ4YjItNTk2OTFmYjA= 2025-05-07T08:55:43.804162Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7501624854289311891:2463], DatabaseId: /Root, PoolId: my_pool, Start pool fetching 2025-05-07T08:55:43.804278Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:44: [WorkloadService] [TPoolResolverActor] ActorId: [8:7501624854289311892:2464], DatabaseId: /Root, PoolId: my_pool, SessionId: ydb://session/3?node_id=8&id=NzgwYzM5NDAtMTNiODlkOTQtOWE5OGQ4YjItNTk2OTFmYjA=, Start pool fetching 2025-05-07T08:55:43.804304Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7501624854289311893:2465], DatabaseId: /Root, PoolId: my_pool, Start pool fetching 2025-05-07T08:55:43.806478Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7501624854289311891:2463], DatabaseId: /Root, PoolId: my_pool, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool my_pool not found or you don't have access permissions } 2025-05-07T08:55:43.806490Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7501624854289311893:2465], DatabaseId: /Root, PoolId: my_pool, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool my_pool not found or you don't have access permissions } 2025-05-07T08:55:43.806649Z node 8 :KQP_WORKLOAD_SERVICE ERROR: scheme_actors.cpp:56: [WorkloadService] [TPoolResolverActor] ActorId: [8:7501624854289311892:2464], DatabaseId: /Root, PoolId: my_pool, SessionId: ydb://session/3?node_id=8&id=NzgwYzM5NDAtMTNiODlkOTQtOWE5OGQ4YjItNTk2OTFmYjA=, Failed to fetch pool info NOT_FOUND, issues: {
: Error: Resource pool my_pool not found or you don't have access permissions } 2025-05-07T08:55:43.806653Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool my_pool, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool my_pool not found or you don't have access permissions } 2025-05-07T08:55:43.806790Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:114: [WorkloadService] [TPoolResolverActor] ActorId: [8:7501624854289311892:2464], DatabaseId: /Root, PoolId: my_pool, SessionId: ydb://session/3?node_id=8&id=NzgwYzM5NDAtMTNiODlkOTQtOWE5OGQ4YjItNTk2OTFmYjA=, Failed to resolve pool, NOT_FOUND, issues: {
: Error: Failed to resolve pool id my_pool subissue: {
: Error: Resource pool my_pool not found or you don't have access permissions } } 2025-05-07T08:55:43.806962Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:546: [WorkloadService] [Service] Reply continue error NOT_FOUND to [8:7501624854289311888:2462]: {
: Error: Failed to resolve pool id my_pool subissue: {
: Error: Resource pool my_pool not found or you don't have access permissions } } 2025-05-07T08:55:43.807110Z node 8 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=8&id=NzgwYzM5NDAtMTNiODlkOTQtOWE5OGQ4YjItNTk2OTFmYjA=, ActorId: [8:7501624854289311888:2462], ActorState: ExecuteState, TraceId: 01jtmzafdv5vwgta451p7wsd1k, Create QueryResponse for error on request, msg: Query failed during adding/waiting in workload pool 2025-05-07T08:55:43.807311Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2473: SessionId: ydb://session/3?node_id=8&id=NzgwYzM5NDAtMTNiODlkOTQtOWE5OGQ4YjItNTk2OTFmYjA=, ActorId: [8:7501624854289311888:2462], ActorState: ExecuteState, TraceId: 01jtmzafdv5vwgta451p7wsd1k, Cleanup start, isFinal: 1 CleanupCtx: 1 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 1 2025-05-07T08:55:43.807499Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:189: [WorkloadService] [Service] Finished request with worker actor [8:7501624854289311888:2462], DatabaseId: /Root, PoolId: my_pool, SessionId: ydb://session/3?node_id=8&id=NzgwYzM5NDAtMTNiODlkOTQtOWE5OGQ4YjItNTk2OTFmYjA= 2025-05-07T08:55:43.807566Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2534: SessionId: ydb://session/3?node_id=8&id=NzgwYzM5NDAtMTNiODlkOTQtOWE5OGQ4YjItNTk2OTFmYjA=, ActorId: [8:7501624854289311888:2462], ActorState: CleanupState, TraceId: 01jtmzafdv5vwgta451p7wsd1k, EndCleanup, isFinal: 1 2025-05-07T08:55:43.807697Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2270: SessionId: ydb://session/3?node_id=8&id=NzgwYzM5NDAtMTNiODlkOTQtOWE5OGQ4YjItNTk2OTFmYjA=, ActorId: [8:7501624854289311888:2462], ActorState: CleanupState, TraceId: 01jtmzafdv5vwgta451p7wsd1k, Sent query response back to proxy, proxyRequestId: 19, proxyId: [8:7501624807044670529:2227] 2025-05-07T08:55:43.807738Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2546: SessionId: ydb://session/3?node_id=8&id=NzgwYzM5NDAtMTNiODlkOTQtOWE5OGQ4YjItNTk2OTFmYjA=, ActorId: [8:7501624854289311888:2462], ActorState: unknown state, TraceId: 01jtmzafdv5vwgta451p7wsd1k, Cleanup temp tables: 0 2025-05-07T08:55:43.807896Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2637: SessionId: ydb://session/3?node_id=8&id=NzgwYzM5NDAtMTNiODlkOTQtOWE5OGQ4YjItNTk2OTFmYjA=, ActorId: [8:7501624854289311888:2462], ActorState: unknown state, TraceId: 01jtmzafdv5vwgta451p7wsd1k, Session actor destroyed 2025-05-07T08:55:43.816715Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1699: SessionId: ydb://session/3?node_id=8&id=ZDA5MmFhMGMtMWFlNzQ0ZDUtYjI5OTFjN2UtOWZiYzYyMmQ=, ActorId: [8:7501624854289311828:2452], ActorState: ExecuteState, TraceId: 01jtmzafdm77f5z42rdwc32f15, TEvTxResponse, CurrentTx: 1/2 response.status: SUCCESS 2025-05-07T08:55:43.816823Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1298: SessionId: ydb://session/3?node_id=8&id=ZDA5MmFhMGMtMWFlNzQ0ZDUtYjI5OTFjN2UtOWZiYzYyMmQ=, ActorId: [8:7501624854289311828:2452], ActorState: ExecuteState, TraceId: 01jtmzafdm77f5z42rdwc32f15, ExecutePhyTx, tx: 0x000050C0003AAB18 literal: 1 commit: 1 txCtx.DeferredEffects.size(): 0 2025-05-07T08:55:43.826011Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1699: SessionId: ydb://session/3?node_id=8&id=ZDA5MmFhMGMtMWFlNzQ0ZDUtYjI5OTFjN2UtOWZiYzYyMmQ=, ActorId: [8:7501624854289311828:2452], ActorState: ExecuteState, TraceId: 01jtmzafdm77f5z42rdwc32f15, TEvTxResponse, CurrentTx: 2/2 response.status: SUCCESS 2025-05-07T08:55:43.826315Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:1958: SessionId: ydb://session/3?node_id=8&id=ZDA5MmFhMGMtMWFlNzQ0ZDUtYjI5OTFjN2UtOWZiYzYyMmQ=, ActorId: [8:7501624854289311828:2452], ActorState: ExecuteState, TraceId: 01jtmzafdm77f5z42rdwc32f15, txInfo Status: Committed Kind: ReadOnly TotalDuration: 28.538 ServerDuration: 28.377 QueriesCount: 2 2025-05-07T08:55:43.826465Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2113: SessionId: ydb://session/3?node_id=8&id=ZDA5MmFhMGMtMWFlNzQ0ZDUtYjI5OTFjN2UtOWZiYzYyMmQ=, ActorId: [8:7501624854289311828:2452], ActorState: ExecuteState, TraceId: 01jtmzafdm77f5z42rdwc32f15, Create QueryResponse for action: QUERY_ACTION_EXECUTE with SUCCESS status 2025-05-07T08:55:43.826561Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2473: SessionId: ydb://session/3?node_id=8&id=ZDA5MmFhMGMtMWFlNzQ0ZDUtYjI5OTFjN2UtOWZiYzYyMmQ=, ActorId: [8:7501624854289311828:2452], ActorState: ExecuteState, TraceId: 01jtmzafdm77f5z42rdwc32f15, Cleanup start, isFinal: 0 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-05-07T08:55:43.826596Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2534: SessionId: ydb://session/3?node_id=8&id=ZDA5MmFhMGMtMWFlNzQ0ZDUtYjI5OTFjN2UtOWZiYzYyMmQ=, ActorId: [8:7501624854289311828:2452], ActorState: ExecuteState, TraceId: 01jtmzafdm77f5z42rdwc32f15, EndCleanup, isFinal: 0 2025-05-07T08:55:43.826665Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2270: SessionId: ydb://session/3?node_id=8&id=ZDA5MmFhMGMtMWFlNzQ0ZDUtYjI5OTFjN2UtOWZiYzYyMmQ=, ActorId: [8:7501624854289311828:2452], ActorState: ExecuteState, TraceId: 01jtmzafdm77f5z42rdwc32f15, Sent query response back to proxy, proxyRequestId: 18, proxyId: [8:7501624807044670529:2227] 2025-05-07T08:55:43.827373Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: query_actor.cpp:240: [TQueryBase] [TRefreshPoolStateQuery] TraceId: my_pool, RequestDatabase: /Root, RequestSessionId: , State: Describe pool, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=8&id=ZDA5MmFhMGMtMWFlNzQ0ZDUtYjI5OTFjN2UtOWZiYzYyMmQ=, TxId: 2025-05-07T08:55:43.827474Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: query_actor.cpp:367: [TQueryBase] [TRefreshPoolStateQuery] TraceId: my_pool, RequestDatabase: /Root, RequestSessionId: , State: Describe pool, Finish with SUCCESS, SessionId: ydb://session/3?node_id=8&id=ZDA5MmFhMGMtMWFlNzQ0ZDUtYjI5OTFjN2UtOWZiYzYyMmQ=, TxId: 2025-05-07T08:55:43.828595Z node 8 :KQP_WORKLOAD_SERVICE TRACE: pool_handlers_actors.cpp:746: [WorkloadService] [TPoolHandlerActorBase] ActorId: [8:7501624832814474896:2339], DatabaseId: /Root, PoolId: my_pool, succefully refreshed pool state, in flight: 0, delayed: 0 2025-05-07T08:55:43.828653Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2315: SessionId: ydb://session/3?node_id=8&id=ZDA5MmFhMGMtMWFlNzQ0ZDUtYjI5OTFjN2UtOWZiYzYyMmQ=, ActorId: [8:7501624854289311828:2452], ActorState: ReadyState, Session closed due to explicit close event 2025-05-07T08:55:43.828693Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2473: SessionId: ydb://session/3?node_id=8&id=ZDA5MmFhMGMtMWFlNzQ0ZDUtYjI5OTFjN2UtOWZiYzYyMmQ=, ActorId: [8:7501624854289311828:2452], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-05-07T08:55:43.828725Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2534: SessionId: ydb://session/3?node_id=8&id=ZDA5MmFhMGMtMWFlNzQ0ZDUtYjI5OTFjN2UtOWZiYzYyMmQ=, ActorId: [8:7501624854289311828:2452], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-05-07T08:55:43.828753Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2546: SessionId: ydb://session/3?node_id=8&id=ZDA5MmFhMGMtMWFlNzQ0ZDUtYjI5OTFjN2UtOWZiYzYyMmQ=, ActorId: [8:7501624854289311828:2452], ActorState: unknown state, Cleanup temp tables: 0 2025-05-07T08:55:43.828836Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2637: SessionId: ydb://session/3?node_id=8&id=ZDA5MmFhMGMtMWFlNzQ0ZDUtYjI5OTFjN2UtOWZiYzYyMmQ=, ActorId: [8:7501624854289311828:2452], ActorState: unknown state, Session actor destroyed 2025-05-07T08:55:43.829738Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2315: SessionId: ydb://session/3?node_id=8&id=NjU2OWQ5NjQtNGI3OGJjMjAtNzYwZGFhMmYtNDlhZGQxMjA=, ActorId: [8:7501624828519507466:2330], ActorState: ReadyState, Session closed due to explicit close event 2025-05-07T08:55:43.829798Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2473: SessionId: ydb://session/3?node_id=8&id=NjU2OWQ5NjQtNGI3OGJjMjAtNzYwZGFhMmYtNDlhZGQxMjA=, ActorId: [8:7501624828519507466:2330], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-05-07T08:55:43.829821Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2534: SessionId: ydb://session/3?node_id=8&id=NjU2OWQ5NjQtNGI3OGJjMjAtNzYwZGFhMmYtNDlhZGQxMjA=, ActorId: [8:7501624828519507466:2330], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-05-07T08:55:43.829842Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2546: SessionId: ydb://session/3?node_id=8&id=NjU2OWQ5NjQtNGI3OGJjMjAtNzYwZGFhMmYtNDlhZGQxMjA=, ActorId: [8:7501624828519507466:2330], ActorState: unknown state, Cleanup temp tables: 0 2025-05-07T08:55:43.829895Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2637: SessionId: ydb://session/3?node_id=8&id=NjU2OWQ5NjQtNGI3OGJjMjAtNzYwZGFhMmYtNDlhZGQxMjA=, ActorId: [8:7501624828519507466:2330], ActorState: unknown state, Session actor destroyed >> TIterator::SingleReverse [GOOD] >> TIterator::Mixed >> SystemView::PartitionStatsOneSchemeShardDataQuery [GOOD] >> SystemView::PgTablesOneSchemeShardDataQuery >> AsyncIndexChangeCollector::IndexedPrimaryKeyDeleteSingleRow [GOOD] >> AsyncIndexChangeCollector::ImplicitlyUpdateCoveredColumn >> GenericFederatedQuery::YdbFilterPushdown ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_incremental_backup/unittest >> IncrementalBackup::ComplexRestoreBackupCollection-WithIncremental [GOOD] Test command err: 2025-05-07T08:51:14.003470Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:51:14.003631Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:51:14.003914Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003163/r3tmp/tmp4A9pbQ/pdisk_1.dat 2025-05-07T08:51:14.346365Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:597:2521], Recipient [1:410:2405]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:51:14.346451Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:51:14.346490Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T08:51:14.346618Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122432, Sender [1:594:2519], Recipient [1:410:2405]: {TEvModifySchemeTransaction txid# 1 TabletId# 72057594046644480} 2025-05-07T08:51:14.346653Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4851: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-05-07T08:51:14.479333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 1 TabletId: 72057594046644480 , at schemeshard: 72057594046644480 2025-05-07T08:51:14.479609Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:51:14.479816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-05-07T08:51:14.480059Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-05-07T08:51:14.480153Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:51:14.480304Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T08:51:14.481000Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-05-07T08:51:14.481160Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-05-07T08:51:14.481219Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:51:14.481258Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 1:0 2025-05-07T08:51:14.481478Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435072, Sender [1:410:2405], Recipient [1:410:2405]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-05-07T08:51:14.481527Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4857: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-05-07T08:51:14.481596Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:51:14.481652Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-05-07T08:51:14.481696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:51:14.481733Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:51:14.481843Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T08:51:14.482379Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:51:14.482442Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 1:0 2025-05-07T08:51:14.482580Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435072, Sender [1:410:2405], Recipient [1:410:2405]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-05-07T08:51:14.482612Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4857: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-05-07T08:51:14.482706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:51:14.482753Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046644480 2025-05-07T08:51:14.482804Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:51:14.482872Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T08:51:14.483274Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:51:14.483305Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 1:0 2025-05-07T08:51:14.483417Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435072, Sender [1:410:2405], Recipient [1:410:2405]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-05-07T08:51:14.483452Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4857: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-05-07T08:51:14.483491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:51:14.483525Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:51:14.483574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046644480 2025-05-07T08:51:14.483615Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T08:51:14.483667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:51:14.487390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:51:14.487958Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:51:14.488010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:51:14.488201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 2025-05-07T08:51:14.489489Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877760, Sender [1:602:2526], Recipient [1:410:2405]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594046316545 Status: OK ServerId: [1:604:2527] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2025-05-07T08:51:14.489545Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4935: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-05-07T08:51:14.489586Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5663: Handle TEvClientConnected, tabletId: 72057594046316545, status: OK, at schemeshard: 72057594046644480 2025-05-07T08:51:14.489771Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269091328, Sender [1:406:2401], Recipient [1:410:2405]: NKikimrTx.TEvProposeTransactionStatus Status: 16 StepId: 500 TxId: 1 2025-05-07T08:51:14.490203Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:606:2529], Recipient [1:410:2405]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:51:14.490253Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:51:14.490290Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T08:51:14.490420Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124996, Sender [1:594:2519], Recipient [1:410:2405]: NKikimrScheme.TEvNotifyTxCompletion TxId: 1 2025-05-07T08:51:14.490447Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4853: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-05-07T08:51:14.490533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 1, at schemeshard: 72057594046644480 2025-05-07T08:51:14.490576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 1, ready parts: 0/1, is published: true 2025-05-07T08:51:14.490612Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 1, at schemeshard: 72057594046644480 2025-05-07T08:51:14.532213Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 273285138, Sender [1:43:2090], Recipient [1:410:2405]: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { QueryServiceConfig { AvailableExternalDataSources: "ObjectStorage" } } ItemKinds: 26 ItemKinds: 34 ItemKinds: 52 ItemKinds: 54 ItemKinds: 73 Local: true } 2025-05-07T08:51:14.532360Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7610: Got new config: QueryServiceConfig { AvailableExternalDataSources: "ObjectStorage" } 2025-05-07T08:51:14.532405Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-0 ... meshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715668 ready parts: 6/7 2025-05-07T08:54:12.616493Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715668:6 progress is 6/7 2025-05-07T08:54:12.616517Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715668 ready parts: 6/7 2025-05-07T08:54:12.616550Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976715668, ready parts: 6/7, is published: true 2025-05-07T08:54:12.616852Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435072, Sender [3:419:2412], Recipient [3:419:2412]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-05-07T08:54:12.616884Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4857: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-05-07T08:54:12.616930Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976715668:4, at schemeshard: 72057594046644480 2025-05-07T08:54:12.616959Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046644480] TDone opId# 281474976715668:4 ProgressState 2025-05-07T08:54:12.617019Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T08:54:12.617045Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715668:4 progress is 7/7 2025-05-07T08:54:12.617068Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715668 ready parts: 7/7 2025-05-07T08:54:12.617110Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715668:4 progress is 7/7 2025-05-07T08:54:12.617133Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715668 ready parts: 7/7 2025-05-07T08:54:12.617158Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976715668, ready parts: 7/7, is published: true 2025-05-07T08:54:12.617243Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:1232:2940] message: TxId: 281474976715668 2025-05-07T08:54:12.617315Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715668 ready parts: 7/7 2025-05-07T08:54:12.617378Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715668:0 2025-05-07T08:54:12.617429Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976715668:0 2025-05-07T08:54:12.617511Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 17] was 2 2025-05-07T08:54:12.617559Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715668:1 2025-05-07T08:54:12.617586Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976715668:1 2025-05-07T08:54:12.617625Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 18] was 2 2025-05-07T08:54:12.617649Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715668:2 2025-05-07T08:54:12.617674Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976715668:2 2025-05-07T08:54:12.617706Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 19] was 3 2025-05-07T08:54:12.617731Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715668:3 2025-05-07T08:54:12.617752Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976715668:3 2025-05-07T08:54:12.617846Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 20] was 3 2025-05-07T08:54:12.617881Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046644480, LocalPathId: 6] was 3 2025-05-07T08:54:12.617935Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715668:4 2025-05-07T08:54:12.617958Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976715668:4 2025-05-07T08:54:12.622259Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 21] was 3 2025-05-07T08:54:12.622321Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046644480, LocalPathId: 12] was 3 2025-05-07T08:54:12.622367Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715668:5 2025-05-07T08:54:12.622402Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976715668:5 2025-05-07T08:54:12.622457Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 22] was 3 2025-05-07T08:54:12.622485Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046644480, LocalPathId: 13] was 3 2025-05-07T08:54:12.622517Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715668:6 2025-05-07T08:54:12.622540Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976715668:6 2025-05-07T08:54:12.622599Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 23] was 3 2025-05-07T08:54:12.622627Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046644480, LocalPathId: 16] was 3 2025-05-07T08:54:12.623556Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:54:12.623711Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:54:12.623800Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:54:12.623908Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T08:54:12.624030Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [3:1232:2940] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715668 at schemeshard: 72057594046644480 2025-05-07T08:54:12.624680Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877764, Sender [3:1239:2946], Recipient [3:419:2412]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:54:12.624727Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4938: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:54:12.624762Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5761: Server pipe is reset, at schemeshard: 72057594046644480 2025-05-07T08:54:13.093901Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037893, clientId# [3:1524:3171], serverId# [3:1525:3172], sessionId# [0:0:0] 2025-05-07T08:54:13.102477Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715669. Ctx: { TraceId: 01jtmz7pde6f21y3hfaenrq8ch, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NTkzN2I5MmEtODEzNTQwNDktOWNlM2JhMmMtZTE3ZjFiYmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 1 } items { uint32_value: 10 } }, { items { uint32_value: 2 } items { uint32_value: 20 } }, { items { uint32_value: 3 } items { uint32_value: 30 } }, { items { uint32_value: 4 } items { uint32_value: 40 } }, { items { uint32_value: 5 } items { uint32_value: 50 } } 2025-05-07T08:54:13.432473Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037895, clientId# [3:1553:3188], serverId# [3:1554:3189], sessionId# [0:0:0] 2025-05-07T08:54:13.433357Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715670. Ctx: { TraceId: 01jtmz7pxw4hfats7qgthbpecg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=Y2I1YjEyM2EtNzAzYjYzNmItNzc4NWMyNjktNzQwYjMwMTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 11 } items { uint32_value: 101 } }, { items { uint32_value: 21 } items { uint32_value: 201 } }, { items { uint32_value: 31 } items { uint32_value: 301 } }, { items { uint32_value: 41 } items { uint32_value: 401 } }, { items { uint32_value: 51 } items { uint32_value: 501 } } 2025-05-07T08:54:13.686771Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037892, clientId# [3:1582:3205], serverId# [3:1583:3206], sessionId# [0:0:0] 2025-05-07T08:54:13.687054Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715671. Ctx: { TraceId: 01jtmz7q64fpzj3remhswmp1x1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=OGE3YjkwNTUtMjMyZTFkZTMtNTRjMDFlMGItN2E2YzcxN2Q=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 12 } items { uint32_value: 102 } }, { items { uint32_value: 22 } items { uint32_value: 202 } }, { items { uint32_value: 32 } items { uint32_value: 302 } }, { items { uint32_value: 42 } items { uint32_value: 402 } }, { items { uint32_value: 52 } items { uint32_value: 502 } } 2025-05-07T08:54:13.971872Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037894, clientId# [3:1611:3222], serverId# [3:1612:3223], sessionId# [0:0:0] 2025-05-07T08:54:13.972133Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715672. Ctx: { TraceId: 01jtmz7qe36s68k7fqf2254hpg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZTA3MDYyYTUtNTY0YTE2NzctNDRkMjYxNWYtYTc5MmJmMTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 13 } items { uint32_value: 103 } }, { items { uint32_value: 23 } items { uint32_value: 203 } }, { items { uint32_value: 33 } items { uint32_value: 303 } }, { items { uint32_value: 43 } items { uint32_value: 403 } }, { items { uint32_value: 53 } items { uint32_value: 503 } } >> TVersions::WreckHeadReverse [GOOD] >> TVersions::Wreck2 >> TExecutorDb::CoordinatorSimulation [GOOD] >> SystemView::AuthUsers [GOOD] >> SystemView::AuthUsers_LockUnlock >> TExecutorDb::RandomCoordinatorSimulation >> GenericFederatedQuery::IcebergHadoopSaSelectAll >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-71 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-72 >> THiveTest::TestHiveBalancerHighUsageAndColumnShards [GOOD] >> THiveTest::TestHiveBalancerWithSpareNodes >> TPartBtreeIndexIteration::FewNodes_History [GOOD] >> TPartBtreeIndexIteration::FewNodes_Sticky |90.4%| [TA] $(B)/ydb/core/tx/datashard/ut_incremental_backup/test-results/unittest/{meta.json ... results_accumulator.log} >> CdcStreamChangeCollector::OldImage [GOOD] >> KqpWorkloadService::TestZeroConcurrentQueryLimit [GOOD] >> GenericFederatedQuery::IcebergHiveSaSelectAll >> RetryPolicy::TWriteSession_TestPolicy [GOOD] >> RetryPolicy::TWriteSession_TestBrokenPolicy >> AsyncIndexChangeCollector::IndexedPrimaryKeyInsertSingleRow [GOOD] |90.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/executer_actor/ut/ydb-core-kqp-executer_actor-ut |90.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/executer_actor/ut/ydb-core-kqp-executer_actor-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_change_collector/unittest >> CdcStreamChangeCollector::OldImage [GOOD] Test command err: 2025-05-07T08:55:17.788159Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:55:17.788348Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:55:17.791011Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0032c1/r3tmp/tmpOc4eCv/pdisk_1.dat 2025-05-07T08:55:18.271698Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:55:18.312135Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:55:18.322627Z node 1 :TABLET_SAUSAGECACHE NOTICE: shared_sausagecache.cpp:1187: Update config MemoryLimit: 33554432 2025-05-07T08:55:18.370307Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:55:18.370458Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:55:18.383970Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:55:18.476954Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:55:18.515807Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:665:2569] 2025-05-07T08:55:18.516048Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T08:55:18.568273Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T08:55:18.568414Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T08:55:18.570338Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-05-07T08:55:18.570431Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-05-07T08:55:18.570491Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-05-07T08:55:18.570919Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T08:55:18.571102Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T08:55:18.571177Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:681:2569] in generation 1 2025-05-07T08:55:18.582679Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T08:55:18.627143Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-05-07T08:55:18.627382Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T08:55:18.627508Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:683:2579] 2025-05-07T08:55:18.627554Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T08:55:18.627594Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-05-07T08:55:18.627629Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:55:18.628093Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T08:55:18.628193Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T08:55:18.628275Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T08:55:18.628325Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:55:18.628384Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T08:55:18.628463Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:55:18.628589Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:672:2573], sessionId# [0:0:0] 2025-05-07T08:55:18.629145Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T08:55:18.629392Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-05-07T08:55:18.629496Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-05-07T08:55:18.631226Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:55:18.642024Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T08:55:18.642164Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-05-07T08:55:18.793180Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:697:2587], serverId# [1:699:2589], sessionId# [0:0:0] 2025-05-07T08:55:18.799129Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-05-07T08:55:18.799231Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:55:18.799584Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T08:55:18.799642Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-05-07T08:55:18.799720Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-05-07T08:55:18.800031Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-05-07T08:55:18.800233Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-05-07T08:55:18.801051Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T08:55:18.801147Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-05-07T08:55:18.803471Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-05-07T08:55:18.804060Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:55:18.806367Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-05-07T08:55:18.806425Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:55:18.807093Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-05-07T08:55:18.807203Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:55:18.808445Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:55:18.808495Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T08:55:18.808558Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-05-07T08:55:18.808658Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:410:2405], exec latency: 0 ms, propose latency: 0 ms 2025-05-07T08:55:18.808730Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-05-07T08:55:18.808829Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:55:18.813392Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:55:18.815405Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-05-07T08:55:18.815601Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-05-07T08:55:18.815675Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-05-07T08:55:18.827713Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:55:18.828139Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T08:55:18.828292Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715658 ssId 72057594046644480 seqNo 2:2 2025-05-07T08:55:18.828349Z node 1 :TX_DATASHARD INFO: check_scheme_tx_unit.cpp:234: Check scheme tx, proposed scheme version# 2 current version# 1 expected version# 2 at tablet# 72075186224037888 txId# 281474976715658 2025-05-07T08:55:18.828389Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715658 at tablet 72075186224037888 2025-05-07T08:55:18.855032Z node 1 :TX_DATASHARD DEBUG: datashard__pro ... D DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-05-07T08:55:47.685140Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:55:47.695116Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:55:47.695193Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T08:55:47.695258Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-05-07T08:55:47.695354Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [4:410:2405], exec latency: 0 ms, propose latency: 0 ms 2025-05-07T08:55:47.695422Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-05-07T08:55:47.695534Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:55:47.697391Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:55:47.708334Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-05-07T08:55:47.708625Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-05-07T08:55:47.708710Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-05-07T08:55:47.764368Z node 4 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T08:55:47.764586Z node 4 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715658 ssId 72057594046644480 seqNo 2:2 2025-05-07T08:55:47.764681Z node 4 :TX_DATASHARD INFO: check_scheme_tx_unit.cpp:234: Check scheme tx, proposed scheme version# 2 current version# 1 expected version# 2 at tablet# 72075186224037888 txId# 281474976715658 2025-05-07T08:55:47.764730Z node 4 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715658 at tablet 72075186224037888 2025-05-07T08:55:47.765757Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:55:47.814866Z node 4 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T08:55:48.197481Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715658 at step 1500 at tablet 72075186224037888 { Transactions { TxId: 281474976715658 AckTo { RawX1: 0 RawX2: 0 } } Step: 1500 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-05-07T08:55:48.197586Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:55:48.206474Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T08:55:48.206592Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-05-07T08:55:48.206666Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1500:281474976715658] in PlanQueue unit at 72075186224037888 2025-05-07T08:55:48.206956Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1500:281474976715658 keys extracted: 0 2025-05-07T08:55:48.207155Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-05-07T08:55:48.207597Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T08:55:48.208514Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:55:48.329226Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1500} 2025-05-07T08:55:48.329360Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:55:48.329424Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:55:48.329504Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:55:48.329604Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1500 : 281474976715658] from 72075186224037888 at tablet 72075186224037888 send result to client [4:410:2405], exec latency: 0 ms, propose latency: 0 ms 2025-05-07T08:55:48.329678Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715658 state Ready TxInFly 0 2025-05-07T08:55:48.329848Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:55:48.332469Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715658 datashard 72075186224037888 state Ready 2025-05-07T08:55:48.332575Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-05-07T08:55:48.393177Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:876:2714], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:48.393339Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:887:2719], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:48.393476Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:48.417277Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-05-07T08:55:48.449140Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:55:48.692445Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:55:48.730469Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:890:2722], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-05-07T08:55:48.782660Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:946:2759] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:55:49.131149Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jtmzakww8qy3garz0ydbv1t9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=ZGY0NTAxYjEtOWYzMWJlNDEtNTM4M2Y3NTQtNDU5NGM0ZDM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:55:49.170863Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:977:2776], serverId# [4:978:2777], sessionId# [0:0:0] 2025-05-07T08:55:49.171498Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:3] at 72075186224037888 2025-05-07T08:55:49.171836Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 1 Group: 1746608149171698 Step: 2000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcDataChange Source: Unspecified Body: 18b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-05-07T08:55:49.172072Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:410: Executed write operation for [0:3] at 72075186224037888, row count=1 2025-05-07T08:55:49.185815Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 18 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 } 2025-05-07T08:55:49.185925Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:55:49.616058Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jtmzampmcf09z5zedqk19fg9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=OTkyNmQ0MmQtNWI1N2U2YjYtZjc4YTJlNDgtMmI4MWYyMTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:55:49.627380Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:4] at 72075186224037888 2025-05-07T08:55:49.627787Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 2 Group: 1746608149627634 Step: 2000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcDataChange Source: Unspecified Body: 40b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-05-07T08:55:49.628031Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:410: Executed write operation for [0:4] at 72075186224037888, row count=1 2025-05-07T08:55:49.642965Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 40 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 } 2025-05-07T08:55:49.643067Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:55:49.645482Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:1005:2795], serverId# [4:1006:2796], sessionId# [0:0:0] 2025-05-07T08:55:49.674790Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:1007:2797], serverId# [4:1008:2798], sessionId# [0:0:0] |90.5%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_incremental_backup/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/workload_service/ut/unittest >> KqpWorkloadService::TestZeroConcurrentQueryLimit [GOOD] Test command err: 2025-05-07T08:54:38.305014Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624577812504485:2064];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:38.318450Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004840/r3tmp/tmpxHVIBT/pdisk_1.dat 2025-05-07T08:54:38.865264Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:38.872819Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:38.872910Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:38.876835Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4526, node 1 2025-05-07T08:54:39.014588Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:39.014615Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:39.014626Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:39.014744Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26254 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:39.400463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:39.415077Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:54:42.178210Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:440: [WorkloadService] [Service] Started workload service initialization 2025-05-07T08:54:42.182169Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=MzJlNmQ3NTAtYTljNGFhODQtZjkwMDczODEtNzg0YWM4ODY=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id MzJlNmQ3NTAtYTljNGFhODQtZjkwMDczODEtNzg0YWM4ODY= 2025-05-07T08:54:42.183117Z node 1 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:125: [WorkloadService] [Service] Updated node info, noode count: 1 2025-05-07T08:54:42.183134Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-05-07T08:54:42.183160Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:111: [WorkloadService] [Service] Resource pools was enanbled 2025-05-07T08:54:42.204591Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:241: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7501624594992374287:2327], Start check tables existence, number paths: 2 2025-05-07T08:54:42.204710Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=MzJlNmQ3NTAtYTljNGFhODQtZjkwMDczODEtNzg0YWM4ODY=, ActorId: [1:7501624594992374288:2328], ActorState: unknown state, session actor bootstrapped 2025-05-07T08:54:42.210229Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7501624594992374287:2327], Describe table /Root/.metadata/workload_manager/delayed_requests status PathErrorUnknown 2025-05-07T08:54:42.210296Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7501624594992374287:2327], Describe table /Root/.metadata/workload_manager/running_requests status PathErrorUnknown 2025-05-07T08:54:42.210331Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:289: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7501624594992374287:2327], Successfully finished 2025-05-07T08:54:42.210424Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-05-07T08:54:42.226050Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624594992374315:2300], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-05-07T08:54:42.230610Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480 2025-05-07T08:54:42.232932Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:429: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624594992374315:2300], DatabaseId: Root, PoolId: sample_pool_id, Subscribe on create pool tx: 281474976710658 2025-05-07T08:54:42.233123Z node 1 :KQP_WORKLOAD_SERVICE TRACE: scheme_actors.cpp:352: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624594992374315:2300], DatabaseId: Root, PoolId: sample_pool_id, Tablet to pipe successfully connected 2025-05-07T08:54:42.244560Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624594992374315:2300], DatabaseId: Root, PoolId: sample_pool_id, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-05-07T08:54:42.341840Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624594992374315:2300], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-05-07T08:54:42.350590Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501624594992374368:2333] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/sample_pool_id\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:54:42.350754Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:480: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624594992374315:2300], DatabaseId: Root, PoolId: sample_pool_id, Pool successfully created 2025-05-07T08:54:42.354815Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=MThiOWU2YjAtNzljYWJhYWItNjY4OTdlZTUtMWUzNGQ3NDg=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id MThiOWU2YjAtNzljYWJhYWItNjY4OTdlZTUtMWUzNGQ3NDg= 2025-05-07T08:54:42.355193Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: /Root, PoolId: sample_pool_id 2025-05-07T08:54:42.355216Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:561: [WorkloadService] [Service] Creating new database state for id /Root 2025-05-07T08:54:42.355298Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=MThiOWU2YjAtNzljYWJhYWItNjY4OTdlZTUtMWUzNGQ3NDg=, ActorId: [1:7501624594992374376:2332], ActorState: unknown state, session actor bootstrapped 2025-05-07T08:54:42.355455Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624594992374378:2333], DatabaseId: /Root, PoolId: sample_pool_id, Start pool fetching 2025-05-07T08:54:42.357149Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:223: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624594992374378:2333], DatabaseId: /Root, PoolId: sample_pool_id, Pool info successfully fetched 2025-05-07T08:54:42.357211Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:253: [WorkloadService] [Service] Successfully fetched pool sample_pool_id, DatabaseId: /Root 2025-05-07T08:54:42.357231Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:571: [WorkloadService] [Service] Creating new handler for pool /Root/sample_pool_id 2025-05-07T08:54:42.357537Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:466: [WorkloadService] [TPoolHandlerActorBase] ActorId: [1:7501624594992374387:2334], DatabaseId: /Root, PoolId: sample_pool_id, Subscribed on schemeboard notifications for path: [OwnerId: 72057594046644480, LocalPathId: 5] 2025-05-07T08:54:42.358837Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:274: [WorkloadService] [TPoolHandlerActorBase] ActorId: [1:7501624594992374387:2334], DatabaseId: /Root, PoolId: sample_pool_id, Got watch notification 2025-05-07T08:54:42.359361Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=1&id=MThiOWU2YjAtNzljYWJhYWItNjY4OTdlZTUtMWUzNGQ3NDg=, ActorId: [1:7501624594992374376:2332], ActorState: ReadyState, TraceId: 01jtmz8kdq24hxw0apgpvkj1eh, received request, proxyRequestId: 3 prepared: 0 tx_control: 0 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_GENERIC_QUERY text: SELECT 42; rpcActor: [1:7501624594992374375:2339] database: Root databaseId: /Root pool id: sample_pool_id 2025-05-07T08:54:42.359439Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:169: [WorkloadService] [Service] Recieved new request from [1:7501624594992374376:2332], DatabaseId: /Root, PoolId: sample_pool_id, SessionId: ydb://session/3?node_id=1&id=MThiOWU2YjAtNzljYWJhYWItNjY4OTdlZTUtMWUzNGQ3NDg= 2025-05-07T08:54:42.359526Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:574: [WorkloadService] [TDatabaseFetcherActor] ActorId: [1:7501624594992374396:2335], Database: /Root, Start database fetching 2025-05-07T08:54:42.359809Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:600: [WorkloadService] [TDatabaseFetcherActor] ActorId: [1:7501624594992374396:2335], Database: /Root, Database info successfully fetched, serverless: 0 2025-05-07T08:54:42.359866Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:240: [WorkloadService] [Service] Successfully fetched database info, DatabaseId: /Root, Serverless: 0 2025-05-07T08:54:42.359915Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:44: [WorkloadService] [TPoolResolverActor] ActorId: [1:7501624594992374398:2336], DatabaseId: /Root, PoolId: sample_pool_id, SessionId: ydb://session/3?node_id=1&id=MThiOWU2YjAtNzljYWJhYWItNjY4OTdlZTUtMWUzNGQ3NDg=, Start pool fetching 2025-05-07T08:54:42.359951Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624594992374399:2337], DatabaseId: /Root, PoolId: sample_pool_id, Start pool fetching 2025-05-07T08:54:42.360269Z ... sActor] ActorId: [6:7501624878310699187:2330], Describe table /Root/.metadata/workload_manager/running_requests status PathErrorUnknown 2025-05-07T08:55:48.678414Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:289: [WorkloadService] [TCleanupTablesActor] ActorId: [6:7501624878310699187:2330], Successfully finished 2025-05-07T08:55:48.678469Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-05-07T08:55:48.681283Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480 2025-05-07T08:55:48.682128Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[6:7501624856835862204:2196];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:55:48.682199Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:55:48.685362Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:429: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7501624878310699204:2298], DatabaseId: Root, PoolId: sample_pool_id, Subscribe on create pool tx: 281474976710658 2025-05-07T08:55:48.687843Z node 6 :KQP_WORKLOAD_SERVICE TRACE: scheme_actors.cpp:352: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7501624878310699204:2298], DatabaseId: Root, PoolId: sample_pool_id, Tablet to pipe successfully connected 2025-05-07T08:55:48.721755Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7501624878310699204:2298], DatabaseId: Root, PoolId: sample_pool_id, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-05-07T08:55:48.770567Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7501624878310699204:2298], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-05-07T08:55:48.774196Z node 6 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [6:7501624878310699258:2332] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/sample_pool_id\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:55:48.774391Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:480: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7501624878310699204:2298], DatabaseId: Root, PoolId: sample_pool_id, Pool successfully created 2025-05-07T08:55:48.780978Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=6&id=NjcwNDlhMDYtNTJhZDI0Yi0yZDViNWFkNy1iMDhlM2U5Nw==, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id NjcwNDlhMDYtNTJhZDI0Yi0yZDViNWFkNy1iMDhlM2U5Nw== 2025-05-07T08:55:48.781392Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=6&id=NjcwNDlhMDYtNTJhZDI0Yi0yZDViNWFkNy1iMDhlM2U5Nw==, ActorId: [6:7501624878310699266:2333], ActorState: unknown state, session actor bootstrapped 2025-05-07T08:55:48.781567Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=6&id=NjcwNDlhMDYtNTJhZDI0Yi0yZDViNWFkNy1iMDhlM2U5Nw==, ActorId: [6:7501624878310699266:2333], ActorState: ReadyState, TraceId: 01jtmzam9d3my3dp80w77ncgbn, received request, proxyRequestId: 3 prepared: 0 tx_control: 0 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_GENERIC_QUERY text: SELECT 42; rpcActor: [6:7501624878310699265:2338] database: Root databaseId: /Root pool id: sample_pool_id 2025-05-07T08:55:48.781624Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: /Root, PoolId: sample_pool_id 2025-05-07T08:55:48.781639Z node 6 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:561: [WorkloadService] [Service] Creating new database state for id /Root 2025-05-07T08:55:48.781702Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:169: [WorkloadService] [Service] Recieved new request from [6:7501624878310699266:2333], DatabaseId: /Root, PoolId: sample_pool_id, SessionId: ydb://session/3?node_id=6&id=NjcwNDlhMDYtNTJhZDI0Yi0yZDViNWFkNy1iMDhlM2U5Nw== 2025-05-07T08:55:48.781764Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7501624878310699268:2334], DatabaseId: /Root, PoolId: sample_pool_id, Start pool fetching 2025-05-07T08:55:48.781855Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:574: [WorkloadService] [TDatabaseFetcherActor] ActorId: [6:7501624878310699269:2335], Database: /Root, Start database fetching 2025-05-07T08:55:48.785139Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:600: [WorkloadService] [TDatabaseFetcherActor] ActorId: [6:7501624878310699269:2335], Database: /Root, Database info successfully fetched, serverless: 0 2025-05-07T08:55:48.785273Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:223: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7501624878310699268:2334], DatabaseId: /Root, PoolId: sample_pool_id, Pool info successfully fetched 2025-05-07T08:55:48.785320Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:240: [WorkloadService] [Service] Successfully fetched database info, DatabaseId: /Root, Serverless: 0 2025-05-07T08:55:48.785363Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:253: [WorkloadService] [Service] Successfully fetched pool sample_pool_id, DatabaseId: /Root 2025-05-07T08:55:48.785382Z node 6 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:571: [WorkloadService] [Service] Creating new handler for pool /Root/sample_pool_id 2025-05-07T08:55:48.785569Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:44: [WorkloadService] [TPoolResolverActor] ActorId: [6:7501624878310699279:2336], DatabaseId: /Root, PoolId: sample_pool_id, SessionId: ydb://session/3?node_id=6&id=NjcwNDlhMDYtNTJhZDI0Yi0yZDViNWFkNy1iMDhlM2U5Nw==, Start pool fetching 2025-05-07T08:55:48.785602Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7501624878310699281:2338], DatabaseId: /Root, PoolId: sample_pool_id, Start pool fetching 2025-05-07T08:55:48.785685Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:466: [WorkloadService] [TPoolHandlerActorBase] ActorId: [6:7501624878310699280:2337], DatabaseId: /Root, PoolId: sample_pool_id, Subscribed on schemeboard notifications for path: [OwnerId: 72057594046644480, LocalPathId: 5] 2025-05-07T08:55:48.787644Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:223: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7501624878310699281:2338], DatabaseId: /Root, PoolId: sample_pool_id, Pool info successfully fetched 2025-05-07T08:55:48.787718Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:274: [WorkloadService] [TPoolHandlerActorBase] ActorId: [6:7501624878310699280:2337], DatabaseId: /Root, PoolId: sample_pool_id, Got watch notification 2025-05-07T08:55:48.787856Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:107: [WorkloadService] [TPoolResolverActor] ActorId: [6:7501624878310699279:2336], DatabaseId: /Root, PoolId: sample_pool_id, SessionId: ydb://session/3?node_id=6&id=NjcwNDlhMDYtNTJhZDI0Yi0yZDViNWFkNy1iMDhlM2U5Nw==, Pool info successfully resolved 2025-05-07T08:55:48.787965Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:279: [WorkloadService] [Service] Successfully fetched pool sample_pool_id, DatabaseId: /Root, SessionId: ydb://session/3?node_id=6&id=NjcwNDlhMDYtNTJhZDI0Yi0yZDViNWFkNy1iMDhlM2U5Nw== 2025-05-07T08:55:48.788063Z node 6 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:290: [WorkloadService] [Service] Request placed into pool, DatabaseId: /Root, PoolId: sample_pool_id, SessionId: ydb://session/3?node_id=6&id=NjcwNDlhMDYtNTJhZDI0Yi0yZDViNWFkNy1iMDhlM2U5Nw== 2025-05-07T08:55:48.788216Z node 6 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=6&id=NjcwNDlhMDYtNTJhZDI0Yi0yZDViNWFkNy1iMDhlM2U5Nw==, ActorId: [6:7501624878310699266:2333], ActorState: ExecuteState, TraceId: 01jtmzam9d3my3dp80w77ncgbn, Create QueryResponse for error on request, msg: Query failed during adding/waiting in workload pool sample_pool_id 2025-05-07T08:55:48.788360Z node 6 :KQP_SESSION INFO: kqp_session_actor.cpp:2473: SessionId: ydb://session/3?node_id=6&id=NjcwNDlhMDYtNTJhZDI0Yi0yZDViNWFkNy1iMDhlM2U5Nw==, ActorId: [6:7501624878310699266:2333], ActorState: ExecuteState, TraceId: 01jtmzam9d3my3dp80w77ncgbn, Cleanup start, isFinal: 1 CleanupCtx: 1 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 1 2025-05-07T08:55:48.788592Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:189: [WorkloadService] [Service] Finished request with worker actor [6:7501624878310699266:2333], DatabaseId: /Root, PoolId: sample_pool_id, SessionId: ydb://session/3?node_id=6&id=NjcwNDlhMDYtNTJhZDI0Yi0yZDViNWFkNy1iMDhlM2U5Nw== 2025-05-07T08:55:48.788646Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2534: SessionId: ydb://session/3?node_id=6&id=NjcwNDlhMDYtNTJhZDI0Yi0yZDViNWFkNy1iMDhlM2U5Nw==, ActorId: [6:7501624878310699266:2333], ActorState: CleanupState, TraceId: 01jtmzam9d3my3dp80w77ncgbn, EndCleanup, isFinal: 1 2025-05-07T08:55:48.788761Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2270: SessionId: ydb://session/3?node_id=6&id=NjcwNDlhMDYtNTJhZDI0Yi0yZDViNWFkNy1iMDhlM2U5Nw==, ActorId: [6:7501624878310699266:2333], ActorState: CleanupState, TraceId: 01jtmzam9d3my3dp80w77ncgbn, Sent query response back to proxy, proxyRequestId: 3, proxyId: [6:7501624856835862271:2263] 2025-05-07T08:55:48.788793Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2546: SessionId: ydb://session/3?node_id=6&id=NjcwNDlhMDYtNTJhZDI0Yi0yZDViNWFkNy1iMDhlM2U5Nw==, ActorId: [6:7501624878310699266:2333], ActorState: unknown state, TraceId: 01jtmzam9d3my3dp80w77ncgbn, Cleanup temp tables: 0 2025-05-07T08:55:48.788923Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2637: SessionId: ydb://session/3?node_id=6&id=NjcwNDlhMDYtNTJhZDI0Yi0yZDViNWFkNy1iMDhlM2U5Nw==, ActorId: [6:7501624878310699266:2333], ActorState: unknown state, TraceId: 01jtmzam9d3my3dp80w77ncgbn, Session actor destroyed 2025-05-07T08:55:48.806873Z node 6 :KQP_SESSION INFO: kqp_session_actor.cpp:2315: SessionId: ydb://session/3?node_id=6&id=ZDJjMDAzNTctODc4M2ViOWQtOGFkNTMwZWMtZGZmNDIzZTk=, ActorId: [6:7501624878310699188:2331], ActorState: ReadyState, Session closed due to explicit close event 2025-05-07T08:55:48.806924Z node 6 :KQP_SESSION INFO: kqp_session_actor.cpp:2473: SessionId: ydb://session/3?node_id=6&id=ZDJjMDAzNTctODc4M2ViOWQtOGFkNTMwZWMtZGZmNDIzZTk=, ActorId: [6:7501624878310699188:2331], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-05-07T08:55:48.806954Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2534: SessionId: ydb://session/3?node_id=6&id=ZDJjMDAzNTctODc4M2ViOWQtOGFkNTMwZWMtZGZmNDIzZTk=, ActorId: [6:7501624878310699188:2331], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-05-07T08:55:48.806985Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2546: SessionId: ydb://session/3?node_id=6&id=ZDJjMDAzNTctODc4M2ViOWQtOGFkNTMwZWMtZGZmNDIzZTk=, ActorId: [6:7501624878310699188:2331], ActorState: unknown state, Cleanup temp tables: 0 2025-05-07T08:55:48.807067Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2637: SessionId: ydb://session/3?node_id=6&id=ZDJjMDAzNTctODc4M2ViOWQtOGFkNTMwZWMtZGZmNDIzZTk=, ActorId: [6:7501624878310699188:2331], ActorState: unknown state, Session actor destroyed |90.5%| [LD] {RESULT} $(B)/ydb/core/kqp/executer_actor/ut/ydb-core-kqp-executer_actor-ut |90.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/persqueue_cluster_discovery/ut/ydb-services-persqueue_cluster_discovery-ut |90.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/persqueue_cluster_discovery/ut/ydb-services-persqueue_cluster_discovery-ut |90.5%| [LD] {RESULT} $(B)/ydb/services/persqueue_cluster_discovery/ut/ydb-services-persqueue_cluster_discovery-ut >> TPartBtreeIndexIteration::FewNodes_Sticky [GOOD] >> TPartBtreeIndexIteration::FewNodes_Slices ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_change_collector/unittest >> AsyncIndexChangeCollector::IndexedPrimaryKeyInsertSingleRow [GOOD] Test command err: 2025-05-07T08:55:29.505903Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:55:29.506076Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:55:29.506359Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003297/r3tmp/tmp5aNLUr/pdisk_1.dat 2025-05-07T08:55:29.959140Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:55:30.006676Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:55:30.063786Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:55:30.063930Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:55:30.078797Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:55:30.176490Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:55:30.234973Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:675:2577] 2025-05-07T08:55:30.235334Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T08:55:30.284822Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T08:55:30.285035Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T08:55:30.286934Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-05-07T08:55:30.287041Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-05-07T08:55:30.287121Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-05-07T08:55:30.287535Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T08:55:30.287898Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T08:55:30.287964Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:702:2577] in generation 1 2025-05-07T08:55:30.289509Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:679:2579] 2025-05-07T08:55:30.289722Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T08:55:30.300231Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T08:55:30.300365Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T08:55:30.302013Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-05-07T08:55:30.302111Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-05-07T08:55:30.302170Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-05-07T08:55:30.302515Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T08:55:30.302649Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T08:55:30.302717Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:710:2579] in generation 1 2025-05-07T08:55:30.317493Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T08:55:30.350919Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-05-07T08:55:30.351163Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T08:55:30.351281Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:713:2598] 2025-05-07T08:55:30.351322Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T08:55:30.351364Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-05-07T08:55:30.351410Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:55:30.351770Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T08:55:30.351813Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-05-07T08:55:30.351873Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T08:55:30.351942Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:714:2599] 2025-05-07T08:55:30.351987Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-05-07T08:55:30.352031Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-05-07T08:55:30.352056Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-05-07T08:55:30.352536Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T08:55:30.352653Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T08:55:30.352818Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T08:55:30.352884Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:55:30.352943Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T08:55:30.353024Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:55:30.353097Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-05-07T08:55:30.353187Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-05-07T08:55:30.353350Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:669:2573], serverId# [1:689:2584], sessionId# [0:0:0] 2025-05-07T08:55:30.353419Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-05-07T08:55:30.353447Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:55:30.353479Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-05-07T08:55:30.353515Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-05-07T08:55:30.354052Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T08:55:30.354335Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-05-07T08:55:30.354430Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-05-07T08:55:30.354987Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:670:2574], serverId# [1:697:2591], sessionId# [0:0:0] 2025-05-07T08:55:30.355197Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-05-07T08:55:30.355390Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037889 txId 281474976715657 ssId 72057594046644480 seqNo 2:2 2025-05-07T08:55:30.355451Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037889 2025-05-07T08:55:30.357278Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:55:30.357396Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-05-07T08:55:30.370922Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T08:55:30.371059Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-05-07T08:55:30.371623Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2025-05-07T08:55:30.371683Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037889 not sending time cast registration request in state WaitScheme 2025-05-07T08:55:30.544862Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:731:2610], serverId# [1:734:2613], sessionId# [0:0:0] 2025-05-07T08:55:30.545424Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:733:2612], serverId# [1:735:2614], sessionId# [0:0:0] 2025-05-07T08:55:30.554454Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037889 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037889 } 2025-05-07T08:55:30.554562Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-05-07T08:55:30.554943Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-05-07T08:55:30.555000Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 1 2025-05-07T08:55:30.555057Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:2814749767 ... Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-05-07T08:55:50.490457Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:55:50.490601Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-05-07T08:55:50.490684Z node 4 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037889 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-05-07T08:55:50.491180Z node 4 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037889 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-05-07T08:55:50.491635Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:55:50.493079Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T08:55:50.493157Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-05-07T08:55:50.493203Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-05-07T08:55:50.493405Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-05-07T08:55:50.493534Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-05-07T08:55:50.511963Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T08:55:50.512085Z node 4 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 4] schema version# 1 2025-05-07T08:55:50.512458Z node 4 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-05-07T08:55:50.512840Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:55:50.519837Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037889 time 0 2025-05-07T08:55:50.519931Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-05-07T08:55:50.521017Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037889 step# 1000} 2025-05-07T08:55:50.521127Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-05-07T08:55:50.536548Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-05-07T08:55:50.536644Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-05-07T08:55:50.536709Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037889 2025-05-07T08:55:50.536790Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037889 at tablet 72075186224037889 send result to client [4:410:2405], exec latency: 0 ms, propose latency: 0 ms 2025-05-07T08:55:50.536855Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037889 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-05-07T08:55:50.537012Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-05-07T08:55:50.537803Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-05-07T08:55:50.537872Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:55:50.539938Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-05-07T08:55:50.539992Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:55:50.540380Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:55:50.540458Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-05-07T08:55:50.540859Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:55:50.540916Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T08:55:50.540961Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-05-07T08:55:50.541022Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [4:410:2405], exec latency: 0 ms, propose latency: 0 ms 2025-05-07T08:55:50.541072Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-05-07T08:55:50.541141Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:55:50.557288Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037889 state Ready 2025-05-07T08:55:50.557393Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2025-05-07T08:55:50.571138Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037889 coordinator 72057594046316545 last step 0 next step 1000 2025-05-07T08:55:50.571485Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-05-07T08:55:50.572548Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-05-07T08:55:50.572617Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-05-07T08:55:50.596087Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:779:2650], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:50.596241Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:789:2655], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:50.596345Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:50.603216Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T08:55:50.611018Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:55:50.611190Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-05-07T08:55:50.835702Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:55:50.835897Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-05-07T08:55:50.845023Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:793:2658], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T08:55:50.886675Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:865:2699] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:55:51.044185Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715660. Ctx: { TraceId: 01jtmzap1v5r2j9n0p26htv77b, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=Y2Q2ZWM3ZTQtMTVhOGE4MTktYWNlYTU0ZjgtZGI4YmQ3MTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:55:51.047793Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:934:2730], serverId# [4:935:2731], sessionId# [0:0:0] 2025-05-07T08:55:51.048304Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:2] at 72075186224037889 2025-05-07T08:55:51.048654Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 1 Group: 1746608151048528 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-05-07T08:55:51.048867Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:410: Executed write operation for [0:2] at 72075186224037889, row count=1 2025-05-07T08:55:51.062941Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037889, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 } 2025-05-07T08:55:51.063061Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-05-07T08:55:51.068509Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:941:2736], serverId# [4:942:2737], sessionId# [0:0:0] 2025-05-07T08:55:51.075822Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:943:2738], serverId# [4:944:2739], sessionId# [0:0:0] >> PgCatalog::PgDatabase+useSink [GOOD] >> PgCatalog::PgDatabase-useSink >> GenericFederatedQuery::IcebergHiveTokenSelectAll |90.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_export/ydb-core-tx-schemeshard-ut_export |90.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_export/ydb-core-tx-schemeshard-ut_export |90.5%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_export/ydb-core-tx-schemeshard-ut_export |90.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_backup_collection/ydb-core-tx-schemeshard-ut_backup_collection |90.5%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_backup_collection/ydb-core-tx-schemeshard-ut_backup_collection |90.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_backup_collection/ydb-core-tx-schemeshard-ut_backup_collection >> THiveTest::TestHiveBalancerWithSpareNodes [GOOD] |90.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/grpc_services/ut/ydb-core-grpc_services-ut |90.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/grpc_services/ut/ydb-core-grpc_services-ut |90.5%| [LD] {RESULT} $(B)/ydb/core/grpc_services/ut/ydb-core-grpc_services-ut >> ResourcePoolsSysView::TestResourcePoolsSysViewFilters [GOOD] >> TCdcStreamTests::Basic >> DstCreator::WithSyncIndexAndIntermediateDir >> TPartBtreeIndexIteration::FewNodes_Slices [GOOD] >> TPartBtreeIndexIteration::FewNodes_Groups_Slices >> BuildStatsHistogram::Ten_Serial [GOOD] >> BuildStatsHistogram::Ten_Crossed >> AsyncIndexChangeCollector::ImplicitlyUpdateCoveredColumn [GOOD] >> KqpPg::TableDeleteAllData-useSink [GOOD] >> KqpPg::PgUpdateCompoundKey+useSink >> TCdcStreamTests::Basic [GOOD] >> TCdcStreamTests::Attributes ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/workload_service/ut/unittest >> ResourcePoolsSysView::TestResourcePoolsSysViewFilters [GOOD] Test command err: 2025-05-07T08:54:43.068052Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624598569522593:2209];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:43.082868Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004822/r3tmp/tmpqC7xK8/pdisk_1.dat 2025-05-07T08:54:43.931761Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:43.931864Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:43.933640Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:54:44.038334Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10553, node 1 2025-05-07T08:54:44.174279Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:44.174304Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:44.174311Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:44.174437Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10033 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:44.685485Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:44.710122Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:54:47.902152Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:440: [WorkloadService] [Service] Started workload service initialization 2025-05-07T08:54:47.902321Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:241: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7501624615749392238:2329], Start check tables existence, number paths: 2 2025-05-07T08:54:47.903300Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-05-07T08:54:47.903317Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:111: [WorkloadService] [Service] Resource pools was enanbled 2025-05-07T08:54:47.922593Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=ODY5ZTU3NjktYTEzZDliOTUtNGY3MGUzMzItMWU0MDJkYw==, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id ODY5ZTU3NjktYTEzZDliOTUtNGY3MGUzMzItMWU0MDJkYw== 2025-05-07T08:54:47.923000Z node 1 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:125: [WorkloadService] [Service] Updated node info, noode count: 1 2025-05-07T08:54:47.923061Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7501624615749392238:2329], Describe table /Root/.metadata/workload_manager/delayed_requests status PathErrorUnknown 2025-05-07T08:54:47.923102Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7501624615749392238:2329], Describe table /Root/.metadata/workload_manager/running_requests status PathErrorUnknown 2025-05-07T08:54:47.923148Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:289: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7501624615749392238:2329], Successfully finished 2025-05-07T08:54:47.933408Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-05-07T08:54:47.933506Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=ODY5ZTU3NjktYTEzZDliOTUtNGY3MGUzMzItMWU0MDJkYw==, ActorId: [1:7501624615749392263:2331], ActorState: unknown state, session actor bootstrapped 2025-05-07T08:54:47.945058Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624615749392265:2301], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-05-07T08:54:47.949572Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480 2025-05-07T08:54:47.957476Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:429: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624615749392265:2301], DatabaseId: Root, PoolId: sample_pool_id, Subscribe on create pool tx: 281474976710658 2025-05-07T08:54:47.960735Z node 1 :KQP_WORKLOAD_SERVICE TRACE: scheme_actors.cpp:352: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624615749392265:2301], DatabaseId: Root, PoolId: sample_pool_id, Tablet to pipe successfully connected 2025-05-07T08:54:47.968352Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624615749392265:2301], DatabaseId: Root, PoolId: sample_pool_id, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-05-07T08:54:48.026102Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624615749392265:2301], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-05-07T08:54:48.032037Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501624620044359613:2334] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/sample_pool_id\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:54:48.032195Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:480: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624615749392265:2301], DatabaseId: Root, PoolId: sample_pool_id, Pool successfully created 2025-05-07T08:54:48.055468Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501624598569522593:2209];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:48.055548Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:54:48.056396Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=ZjIwODVkZjMtMzIwNDZjYTktOGY0YTY0ZjktNTE4YmNkYjk=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id ZjIwODVkZjMtMzIwNDZjYTktOGY0YTY0ZjktNTE4YmNkYjk= 2025-05-07T08:54:48.056776Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: /Root, PoolId: sample_pool_id 2025-05-07T08:54:48.056788Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:561: [WorkloadService] [Service] Creating new database state for id /Root 2025-05-07T08:54:48.056840Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=ZjIwODVkZjMtMzIwNDZjYTktOGY0YTY0ZjktNTE4YmNkYjk=, ActorId: [1:7501624620044359620:2332], ActorState: unknown state, session actor bootstrapped 2025-05-07T08:54:48.057051Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=1&id=ZjIwODVkZjMtMzIwNDZjYTktOGY0YTY0ZjktNTE4YmNkYjk=, ActorId: [1:7501624620044359620:2332], ActorState: ReadyState, TraceId: 01jtmz8rzrdrtf773b4fz0a9r5, received request, proxyRequestId: 3 prepared: 0 tx_control: 0 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_GENERIC_QUERY text: SELECT 42; rpcActor: [1:7501624620044359619:2339] database: Root databaseId: /Root pool id: sample_pool_id 2025-05-07T08:54:48.057083Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624620044359622:2333], DatabaseId: /Root, PoolId: sample_pool_id, Start pool fetching 2025-05-07T08:54:48.057147Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:169: [WorkloadService] [Service] Recieved new request from [1:7501624620044359620:2332], DatabaseId: /Root, PoolId: sample_pool_id, SessionId: ydb://session/3?node_id=1&id=ZjIwODVkZjMtMzIwNDZjYTktOGY0YTY0ZjktNTE4YmNkYjk= 2025-05-07T08:54:48.057188Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:574: [WorkloadService] [TDatabaseFetcherActor] ActorId: [1:7501624620044359623:2334], Database: /Root, Start database fetching 2025-05-07T08:54:48.058401Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:600: [WorkloadService] [TDatabaseFetcherActor] ActorId: [1:7501624620044359623:2334], Database: /Root, Database info successfully fetched, serverless: 0 2025-05-07T08:54:48.058505Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:223: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624620044359622:2333], DatabaseId: /Root, PoolId: sample_pool_id, Pool info successfully fetched 2025-05-07T08:54:48.058555Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:240: [WorkloadService] [Service] Successfully fetched database info, DatabaseId: /Root, Serverless: 0 2025-05-07T08:54:48.058605Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:253: [WorkloadService] [Service] Successfully fetched pool sample_pool_id, DatabaseId: /Root 2025-05-07T08:54:48.058626Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:571: [WorkloadService] [Service] Creating new handler for pool /Root/sample_pool_id 2025-05-07T08:54:48.058881Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:44: [WorkloadService] [TPoolResolverActor] ActorId: [1:7501624620044359633:2335], DatabaseId: /Root, PoolId: sample_pool_id, SessionId: ydb://session/3?node_id=1&id=ZjIwODVkZjMtMzIwNDZjYTktOGY0YTY0ZjktNTE4YmNkYjk=, Start pool fetching 2025-05-07T08:54:48.058919Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624620044359635:2337], DatabaseId: /Root, PoolId: sample_pool_id, Start pool fetching 2025-05-07T08:54:48.059022Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:466: [WorkloadService] [TPoolHandlerActorBase] ActorId: [1:7501624 ... RkNzctNWNlZTliMTI=, ActorId: [10:7501624896714746315:2416], ActorState: ExecuteState, TraceId: 01jtmzas5g5vbavc0jjy2kcec2, ExecutePhyTx, tx: 0x000050C00016E058 literal: 0 commit: 1 txCtx.DeferredEffects.size(): 0 2025-05-07T08:55:53.946112Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1449: SessionId: ydb://session/3?node_id=10&id=NzhlODk5MmYtM2YyMTlmNTgtOTQ2NGRkNzctNWNlZTliMTI=, ActorId: [10:7501624896714746315:2416], ActorState: ExecuteState, TraceId: 01jtmzas5g5vbavc0jjy2kcec2, Sending to Executer TraceId: 0 8 2025-05-07T08:55:53.946206Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1507: SessionId: ydb://session/3?node_id=10&id=NzhlODk5MmYtM2YyMTlmNTgtOTQ2NGRkNzctNWNlZTliMTI=, ActorId: [10:7501624896714746315:2416], ActorState: ExecuteState, TraceId: 01jtmzas5g5vbavc0jjy2kcec2, Created new KQP executer: [10:7501624896714746321:2416] isRollback: 0 2025-05-07T08:55:53.975136Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1797: SessionId: ydb://session/3?node_id=10&id=NzhlODk5MmYtM2YyMTlmNTgtOTQ2NGRkNzctNWNlZTliMTI=, ActorId: [10:7501624896714746315:2416], ActorState: ExecuteState, TraceId: 01jtmzas5g5vbavc0jjy2kcec2, Forwarded TEvStreamData to [9:7501624896572049478:3422] 2025-05-07T08:55:53.976560Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1699: SessionId: ydb://session/3?node_id=10&id=NzhlODk5MmYtM2YyMTlmNTgtOTQ2NGRkNzctNWNlZTliMTI=, ActorId: [10:7501624896714746315:2416], ActorState: ExecuteState, TraceId: 01jtmzas5g5vbavc0jjy2kcec2, TEvTxResponse, CurrentTx: 1/1 response.status: SUCCESS 2025-05-07T08:55:53.977035Z node 10 :KQP_SESSION INFO: kqp_session_actor.cpp:1958: SessionId: ydb://session/3?node_id=10&id=NzhlODk5MmYtM2YyMTlmNTgtOTQ2NGRkNzctNWNlZTliMTI=, ActorId: [10:7501624896714746315:2416], ActorState: ExecuteState, TraceId: 01jtmzas5g5vbavc0jjy2kcec2, txInfo Status: Committed Kind: ReadOnly TotalDuration: 31.119 ServerDuration: 30.734 QueriesCount: 2 2025-05-07T08:55:53.977117Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2113: SessionId: ydb://session/3?node_id=10&id=NzhlODk5MmYtM2YyMTlmNTgtOTQ2NGRkNzctNWNlZTliMTI=, ActorId: [10:7501624896714746315:2416], ActorState: ExecuteState, TraceId: 01jtmzas5g5vbavc0jjy2kcec2, Create QueryResponse for action: QUERY_ACTION_EXECUTE with SUCCESS status 2025-05-07T08:55:53.977652Z node 10 :KQP_SESSION INFO: kqp_session_actor.cpp:2473: SessionId: ydb://session/3?node_id=10&id=NzhlODk5MmYtM2YyMTlmNTgtOTQ2NGRkNzctNWNlZTliMTI=, ActorId: [10:7501624896714746315:2416], ActorState: ExecuteState, TraceId: 01jtmzas5g5vbavc0jjy2kcec2, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-05-07T08:55:53.977692Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2534: SessionId: ydb://session/3?node_id=10&id=NzhlODk5MmYtM2YyMTlmNTgtOTQ2NGRkNzctNWNlZTliMTI=, ActorId: [10:7501624896714746315:2416], ActorState: ExecuteState, TraceId: 01jtmzas5g5vbavc0jjy2kcec2, EndCleanup, isFinal: 1 2025-05-07T08:55:53.977770Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2270: SessionId: ydb://session/3?node_id=10&id=NzhlODk5MmYtM2YyMTlmNTgtOTQ2NGRkNzctNWNlZTliMTI=, ActorId: [10:7501624896714746315:2416], ActorState: ExecuteState, TraceId: 01jtmzas5g5vbavc0jjy2kcec2, Sent query response back to proxy, proxyRequestId: 5, proxyId: [10:7501624853765072225:2277] 2025-05-07T08:55:53.977795Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2546: SessionId: ydb://session/3?node_id=10&id=NzhlODk5MmYtM2YyMTlmNTgtOTQ2NGRkNzctNWNlZTliMTI=, ActorId: [10:7501624896714746315:2416], ActorState: unknown state, TraceId: 01jtmzas5g5vbavc0jjy2kcec2, Cleanup temp tables: 0 2025-05-07T08:55:53.978347Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2637: SessionId: ydb://session/3?node_id=10&id=NzhlODk5MmYtM2YyMTlmNTgtOTQ2NGRkNzctNWNlZTliMTI=, ActorId: [10:7501624896714746315:2416], ActorState: unknown state, TraceId: 01jtmzas5g5vbavc0jjy2kcec2, Session actor destroyed 2025-05-07T08:55:54.002313Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=10&id=ZTE2NWNiMzYtYmRiODJhNjctN2RjNzhjNDktYzZiYWY2, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id ZTE2NWNiMzYtYmRiODJhNjctN2RjNzhjNDktYzZiYWY2 2025-05-07T08:55:54.002418Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=10&id=ZTE2NWNiMzYtYmRiODJhNjctN2RjNzhjNDktYzZiYWY2, ActorId: [10:7501624901009713636:2424], ActorState: unknown state, session actor bootstrapped 2025-05-07T08:55:54.003187Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=10&id=ZTE2NWNiMzYtYmRiODJhNjctN2RjNzhjNDktYzZiYWY2, ActorId: [10:7501624901009713636:2424], ActorState: ReadyState, TraceId: 01jtmzasck1xkwj62bzy51db9f, received request, proxyRequestId: 6 prepared: 0 tx_control: 0 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_GENERIC_QUERY text: SELECT * FROM `.sys/resource_pools` WHERE Name >= "default" rpcActor: [9:7501624896572049488:3429] database: /Root/test-dedicated databaseId: /Root/test-dedicated pool id: default 2025-05-07T08:55:54.003236Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:264: SessionId: ydb://session/3?node_id=10&id=ZTE2NWNiMzYtYmRiODJhNjctN2RjNzhjNDktYzZiYWY2, ActorId: [10:7501624901009713636:2424], ActorState: ReadyState, TraceId: 01jtmzasck1xkwj62bzy51db9f, request placed into pool from cache: default 2025-05-07T08:55:54.003347Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:575: SessionId: ydb://session/3?node_id=10&id=ZTE2NWNiMzYtYmRiODJhNjctN2RjNzhjNDktYzZiYWY2, ActorId: [10:7501624901009713636:2424], ActorState: ExecuteState, TraceId: 01jtmzasck1xkwj62bzy51db9f, Sending CompileQuery request 2025-05-07T08:55:54.180890Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1298: SessionId: ydb://session/3?node_id=10&id=ZTE2NWNiMzYtYmRiODJhNjctN2RjNzhjNDktYzZiYWY2, ActorId: [10:7501624901009713636:2424], ActorState: ExecuteState, TraceId: 01jtmzasck1xkwj62bzy51db9f, ExecutePhyTx, tx: 0x000050C000392698 literal: 0 commit: 1 txCtx.DeferredEffects.size(): 0 2025-05-07T08:55:54.180964Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1449: SessionId: ydb://session/3?node_id=10&id=ZTE2NWNiMzYtYmRiODJhNjctN2RjNzhjNDktYzZiYWY2, ActorId: [10:7501624901009713636:2424], ActorState: ExecuteState, TraceId: 01jtmzasck1xkwj62bzy51db9f, Sending to Executer TraceId: 0 8 2025-05-07T08:55:54.181101Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1507: SessionId: ydb://session/3?node_id=10&id=ZTE2NWNiMzYtYmRiODJhNjctN2RjNzhjNDktYzZiYWY2, ActorId: [10:7501624901009713636:2424], ActorState: ExecuteState, TraceId: 01jtmzasck1xkwj62bzy51db9f, Created new KQP executer: [10:7501624901009713645:2424] isRollback: 0 2025-05-07T08:55:54.203305Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1797: SessionId: ydb://session/3?node_id=10&id=ZTE2NWNiMzYtYmRiODJhNjctN2RjNzhjNDktYzZiYWY2, ActorId: [10:7501624901009713636:2424], ActorState: ExecuteState, TraceId: 01jtmzasck1xkwj62bzy51db9f, Forwarded TEvStreamData to [9:7501624896572049488:3429] 2025-05-07T08:55:54.210909Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1699: SessionId: ydb://session/3?node_id=10&id=ZTE2NWNiMzYtYmRiODJhNjctN2RjNzhjNDktYzZiYWY2, ActorId: [10:7501624901009713636:2424], ActorState: ExecuteState, TraceId: 01jtmzasck1xkwj62bzy51db9f, TEvTxResponse, CurrentTx: 1/1 response.status: SUCCESS 2025-05-07T08:55:54.211082Z node 10 :KQP_SESSION INFO: kqp_session_actor.cpp:1958: SessionId: ydb://session/3?node_id=10&id=ZTE2NWNiMzYtYmRiODJhNjctN2RjNzhjNDktYzZiYWY2, ActorId: [10:7501624901009713636:2424], ActorState: ExecuteState, TraceId: 01jtmzasck1xkwj62bzy51db9f, txInfo Status: Committed Kind: ReadOnly TotalDuration: 30.313 ServerDuration: 30.218 QueriesCount: 2 2025-05-07T08:55:54.211168Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2113: SessionId: ydb://session/3?node_id=10&id=ZTE2NWNiMzYtYmRiODJhNjctN2RjNzhjNDktYzZiYWY2, ActorId: [10:7501624901009713636:2424], ActorState: ExecuteState, TraceId: 01jtmzasck1xkwj62bzy51db9f, Create QueryResponse for action: QUERY_ACTION_EXECUTE with SUCCESS status 2025-05-07T08:55:54.211610Z node 10 :KQP_SESSION INFO: kqp_session_actor.cpp:2473: SessionId: ydb://session/3?node_id=10&id=ZTE2NWNiMzYtYmRiODJhNjctN2RjNzhjNDktYzZiYWY2, ActorId: [10:7501624901009713636:2424], ActorState: ExecuteState, TraceId: 01jtmzasck1xkwj62bzy51db9f, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-05-07T08:55:54.211661Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2534: SessionId: ydb://session/3?node_id=10&id=ZTE2NWNiMzYtYmRiODJhNjctN2RjNzhjNDktYzZiYWY2, ActorId: [10:7501624901009713636:2424], ActorState: ExecuteState, TraceId: 01jtmzasck1xkwj62bzy51db9f, EndCleanup, isFinal: 1 2025-05-07T08:55:54.211717Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2270: SessionId: ydb://session/3?node_id=10&id=ZTE2NWNiMzYtYmRiODJhNjctN2RjNzhjNDktYzZiYWY2, ActorId: [10:7501624901009713636:2424], ActorState: ExecuteState, TraceId: 01jtmzasck1xkwj62bzy51db9f, Sent query response back to proxy, proxyRequestId: 6, proxyId: [10:7501624853765072225:2277] 2025-05-07T08:55:54.211743Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2546: SessionId: ydb://session/3?node_id=10&id=ZTE2NWNiMzYtYmRiODJhNjctN2RjNzhjNDktYzZiYWY2, ActorId: [10:7501624901009713636:2424], ActorState: unknown state, TraceId: 01jtmzasck1xkwj62bzy51db9f, Cleanup temp tables: 0 2025-05-07T08:55:54.212228Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2637: SessionId: ydb://session/3?node_id=10&id=ZTE2NWNiMzYtYmRiODJhNjctN2RjNzhjNDktYzZiYWY2, ActorId: [10:7501624901009713636:2424], ActorState: unknown state, TraceId: 01jtmzasck1xkwj62bzy51db9f, Session actor destroyed 2025-05-07T08:55:54.246972Z node 9 :KQP_SESSION INFO: kqp_session_actor.cpp:2315: SessionId: ydb://session/3?node_id=9&id=YTRiMjU5ZWQtYWYwYWJiZGYtMzliNTIyZWEtYTJiNzk2ZmM=, ActorId: [9:7501624845032440668:2336], ActorState: ReadyState, Session closed due to explicit close event 2025-05-07T08:55:54.247026Z node 9 :KQP_SESSION INFO: kqp_session_actor.cpp:2473: SessionId: ydb://session/3?node_id=9&id=YTRiMjU5ZWQtYWYwYWJiZGYtMzliNTIyZWEtYTJiNzk2ZmM=, ActorId: [9:7501624845032440668:2336], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-05-07T08:55:54.247055Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2534: SessionId: ydb://session/3?node_id=9&id=YTRiMjU5ZWQtYWYwYWJiZGYtMzliNTIyZWEtYTJiNzk2ZmM=, ActorId: [9:7501624845032440668:2336], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-05-07T08:55:54.247084Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2546: SessionId: ydb://session/3?node_id=9&id=YTRiMjU5ZWQtYWYwYWJiZGYtMzliNTIyZWEtYTJiNzk2ZmM=, ActorId: [9:7501624845032440668:2336], ActorState: unknown state, Cleanup temp tables: 0 2025-05-07T08:55:54.247151Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2637: SessionId: ydb://session/3?node_id=9&id=YTRiMjU5ZWQtYWYwYWJiZGYtMzliNTIyZWEtYTJiNzk2ZmM=, ActorId: [9:7501624845032440668:2336], ActorState: unknown state, Session actor destroyed 2025-05-07T08:55:54.253761Z node 9 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 11 2025-05-07T08:55:54.254155Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-05-07T08:55:54.254292Z node 9 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 10 2025-05-07T08:55:54.254478Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-05-07T08:55:55.682724Z node 10 :SYSTEM_VIEWS WARN: sysview_service.cpp:811: Summary delivery problem: service id# [10:7501624853765072066:2123], processor id# 72075186224037891, database# /Root/test-dedicated |90.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_minstep/ydb-core-tx-datashard-ut_minstep |90.5%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_minstep/ydb-core-tx-datashard-ut_minstep |90.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_minstep/ydb-core-tx-datashard-ut_minstep ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/hive/ut/unittest >> THiveTest::TestHiveBalancerWithSpareNodes [GOOD] Test command err: 2025-05-07T08:54:34.006253Z node 6 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:319} Bootstrap 2025-05-07T08:54:34.010223Z node 6 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "/tmp/pdisk.dat" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-05-07T08:54:34.010466Z node 6 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:22} StartLocalProxy GroupId# 0 2025-05-07T08:54:34.011447Z node 6 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [6:149:2076] ControllerId# 72057594037932033 2025-05-07T08:54:34.011496Z node 6 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-05-07T08:54:34.011610Z node 6 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:294} StartInvalidGroupProxy GroupId# 4294967295 2025-05-07T08:54:34.011955Z node 6 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:306} StartRequestReportingThrottler 2025-05-07T08:54:34.014291Z node 3 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:319} Bootstrap 2025-05-07T08:54:34.017066Z node 3 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "/tmp/pdisk.dat" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-05-07T08:54:34.017190Z node 3 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:22} StartLocalProxy GroupId# 0 2025-05-07T08:54:34.018126Z node 3 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [3:158:2076] ControllerId# 72057594037932033 2025-05-07T08:54:34.018166Z node 3 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-05-07T08:54:34.018225Z node 3 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:294} StartInvalidGroupProxy GroupId# 4294967295 2025-05-07T08:54:34.018437Z node 3 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:306} StartRequestReportingThrottler 2025-05-07T08:54:34.020578Z node 3 :BS_PROXY INFO: dsproxy_state.cpp:146: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-05-07T08:54:34.020629Z node 3 :BS_PROXY NOTICE: dsproxy_state.cpp:294: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-05-07T08:54:34.022921Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:157:2075] Create Queue# [3:164:2080] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:34.023091Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:157:2075] Create Queue# [3:165:2081] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:34.023260Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:157:2075] Create Queue# [3:166:2082] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:34.023444Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:157:2075] Create Queue# [3:167:2083] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:34.023575Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:157:2075] Create Queue# [3:168:2084] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:34.023712Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:157:2075] Create Queue# [3:169:2085] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:34.023861Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:157:2075] Create Queue# [3:170:2086] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:34.023890Z node 3 :BS_PROXY INFO: dsproxy_state.cpp:29: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-05-07T08:54:34.023973Z node 3 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [3:158:2076] 2025-05-07T08:54:34.024021Z node 3 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [3:158:2076] 2025-05-07T08:54:34.024063Z node 3 :BS_PROXY NOTICE: dsproxy_state.cpp:234: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-05-07T08:54:34.024105Z node 3 :BS_NODE DEBUG: {NWDC00@distconf.cpp:22} Bootstrap 2025-05-07T08:54:34.024665Z node 3 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-05-07T08:54:34.024767Z node 4 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:319} Bootstrap 2025-05-07T08:54:34.028912Z node 4 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "/tmp/pdisk.dat" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-05-07T08:54:34.029043Z node 4 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:22} StartLocalProxy GroupId# 0 2025-05-07T08:54:34.029879Z node 4 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [4:178:2077] ControllerId# 72057594037932033 2025-05-07T08:54:34.029911Z node 4 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-05-07T08:54:34.029999Z node 4 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:294} StartInvalidGroupProxy GroupId# 4294967295 2025-05-07T08:54:34.030209Z node 4 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:306} StartRequestReportingThrottler 2025-05-07T08:54:34.034647Z node 4 :BS_PROXY INFO: dsproxy_state.cpp:146: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-05-07T08:54:34.034720Z node 4 :BS_PROXY NOTICE: dsproxy_state.cpp:294: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-05-07T08:54:34.036655Z node 4 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [4:177:2076] Create Queue# [4:184:2081] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:34.036826Z node 4 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [4:177:2076] Create Queue# [4:185:2082] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:34.036998Z node 4 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [4:177:2076] Create Queue# [4:186:2083] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:34.037151Z node 4 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [4:177:2076] Create Queue# [4:187:2084] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:34.037313Z node 4 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [4:177:2076] Create Queue# [4:188:2085] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:34.037456Z node 4 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [4:177:2076] Create Queue# [4:189:2086] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:34.037587Z node 4 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [4:177:2076] Create Queue# [4:190:2087] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:34.037628Z node 4 :BS_PROXY INFO: dsproxy_state.cpp:29: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-05-07T08:54:34.037707Z node 4 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [4:178:2077] 2025-05-07T08:54:34.037735Z node 4 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [4:178:2077] 2025-05-07T08:54:34.037790Z node 4 :BS_PROXY NOTICE: dsproxy_state.cpp:234: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-05-07T08:54:34.037827Z node 4 :BS_NODE DEBUG: {NWDC00@distconf.cpp:22} Bootstrap 2025-05-07T08:54:34.038584Z node 4 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-05-07T08:54:34.038698Z node 5 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:319} Bootstrap 2025-05-07T08:54:34.042319Z node 5 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "/tmp/pdisk.dat" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-05-07T08:54:34.042443Z node 5 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:22} StartLocalProxy GroupId# 0 2025-05-07T08:54:34.043314Z node 5 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [5:198:2077] ControllerId# 72057594037932033 2025-05-07T08:54:34.043351Z node 5 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-05-07T08:54:34.043411Z node 5 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:294} StartInvalidGroupProxy GroupId# 4294967295 2025-05-07T08:54:34.043612Z node 5 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:306} StartRequestReportingThrottler 2025-05-07T08:54:34.045698Z node 5 :BS_PROXY INFO: dsproxy_state.cpp:146: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-05-07T08:54:34.045742Z node 5 :BS_PROXY NOTICE: dsproxy_state.cpp:294: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-05-07T08:54:34.047617Z node 5 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [5:197:2076] Create Queue# [5:204:2081] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:34.047804Z node 5 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [5:197:2076] Create Queue# [5:205:2082] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:34.047999Z node 5 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [5:197:2076] Create Queue# [5:206:2083] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:34.048174Z node 5 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [5:197:2076] Create Queue# [5:207:2084] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:34.048341Z node 5 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [5:197:2076] Create Queue# [5:208:2085] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:34.048476Z node 5 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [5:197:2076] Create Queue# [5:209:2086] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:34.048625Z node 5 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [5:197:2076] Create Queue# [5:210:2087] targetNodeId# 1 Marker# DSP01 2025-05-07T08:54:34.048659Z node 5 :BS_PROXY INFO: dsproxy_state.cpp:29: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-05-07T08:54:34.048720Z node 5 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [5:198:2077] 2025-05-07T08:54:34.048751Z node 5 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [5:198:2077] 2025-05-07T08:54:34.048812Z node 5 :BS_PROXY NOTICE: dsproxy_state.cpp:234: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-05-07T08:54:34.048852Z node 5 :BS_NODE DEBUG: {NWDC00@distconf.cpp:22} Bootstrap 2025-05-07T08:54:34.049417Z node 5 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-05-07T08:54:34.056922Z node 6 :BS_PROXY INFO: dsproxy_state.cpp:146: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-05-07T08:54:34.056979Z node 6 :BS_PROXY NOTICE: dsproxy_state.cpp:294: EnsureMonitoring Group# 0 IsLimitedKeyless# ... 6Z node 52 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72075186224037893 entry.State: StNormal ev: {EvForward TabletID: 72075186224037893 Ev: nullptr Flags: 1:2:0} 2025-05-07T08:55:55.032814Z node 52 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 52 selfDC 1 leaderDC 3 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 72075186224037893 followers: 0 countLeader 1 allowFollowers 0 winner: [57:1293:2098] 2025-05-07T08:55:55.033023Z node 52 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:195: TClient[72075186224037893] forward result remote node 57 [52:2074:2740] 2025-05-07T08:55:55.033172Z node 52 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:229: TClient[72075186224037893] remote node connected [52:2074:2740] 2025-05-07T08:55:55.033228Z node 52 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037893]::SendEvent [52:2074:2740] 2025-05-07T08:55:55.033592Z node 57 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72075186224037893] Accept Connect Originator# [52:2074:2740] 2025-05-07T08:55:55.037491Z node 52 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72075186224037893] connected with status OK role: Leader [52:2074:2740] 2025-05-07T08:55:55.037578Z node 52 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72075186224037893] send queued [52:2074:2740] 2025-05-07T08:55:55.039320Z node 52 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72075186224037894] ::Bootstrap [52:2077:2742] 2025-05-07T08:55:55.039389Z node 52 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72075186224037894] lookup [52:2077:2742] 2025-05-07T08:55:55.039487Z node 52 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72075186224037894 entry.State: StNormal ev: {EvForward TabletID: 72075186224037894 Ev: nullptr Flags: 1:2:0} 2025-05-07T08:55:55.039574Z node 52 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 52 selfDC 1 leaderDC 3 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 72075186224037894 followers: 0 countLeader 1 allowFollowers 0 winner: [56:1292:2099] 2025-05-07T08:55:55.039754Z node 52 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:195: TClient[72075186224037894] forward result remote node 56 [52:2077:2742] 2025-05-07T08:55:55.039910Z node 52 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:229: TClient[72075186224037894] remote node connected [52:2077:2742] 2025-05-07T08:55:55.039967Z node 52 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037894]::SendEvent [52:2077:2742] 2025-05-07T08:55:55.040481Z node 52 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:349: TClient[72075186224037894] connect request undelivered [52:2077:2742] 2025-05-07T08:55:55.040540Z node 52 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:559: TClient[72075186224037894] immediate retry [52:2077:2742] 2025-05-07T08:55:55.040577Z node 52 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72075186224037894] lookup [52:2077:2742] 2025-05-07T08:55:55.040637Z node 52 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:536: Handle TEvTabletProblem tabletId: 72075186224037894 entry.State: StNormal 2025-05-07T08:55:55.040925Z node 52 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72075186224037894 entry.State: StProblemResolve ev: {EvForward TabletID: 72075186224037894 Ev: nullptr Flags: 1:2:0} 2025-05-07T08:55:55.041070Z node 52 :STATESTORAGE DEBUG: statestorage_proxy.cpp:253: ProxyRequest::HandleInit ev: {EvLookup TabletID: 72075186224037894 Cookie: 0 ProxyOptions: SigNone} 2025-05-07T08:55:55.041221Z node 52 :STATESTORAGE DEBUG: statestorage_replica.cpp:183: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037894 Cookie: 0} 2025-05-07T08:55:55.041283Z node 52 :STATESTORAGE DEBUG: statestorage_replica.cpp:183: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037894 Cookie: 1} 2025-05-07T08:55:55.041324Z node 52 :STATESTORAGE DEBUG: statestorage_replica.cpp:183: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037894 Cookie: 2} 2025-05-07T08:55:55.041382Z node 52 :STATESTORAGE DEBUG: statestorage_proxy.cpp:372: ProxyRequest::HandleLookup ev: {EvReplicaInfo Status: 0 TabletID: 72075186224037894 CurrentLeader: [57:1943:2266] CurrentLeaderTablet: [57:1948:2269] CurrentGeneration: 3 CurrentStep: 0} 2025-05-07T08:55:55.041473Z node 52 :STATESTORAGE DEBUG: statestorage_proxy.cpp:372: ProxyRequest::HandleLookup ev: {EvReplicaInfo Status: 0 TabletID: 72075186224037894 CurrentLeader: [57:1943:2266] CurrentLeaderTablet: [57:1948:2269] CurrentGeneration: 3 CurrentStep: 0} 2025-05-07T08:55:55.041576Z node 52 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 72075186224037894 entry.State: StProblemResolve success: true ev: {EvInfo Status: 0 TabletID: 72075186224037894 Cookie: 0 CurrentLeader: [57:1943:2266] CurrentLeaderTablet: [57:1948:2269] CurrentGeneration: 3 CurrentStep: 0 Locked: false LockedFor: 0 SignatureSz: 3 Signature: {7, 10, 0}} 2025-05-07T08:55:55.041624Z node 52 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:354: ApplyEntry leader tabletId: 72075186224037894 followers: 0 2025-05-07T08:55:55.041680Z node 52 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 52 selfDC 1 leaderDC 3 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 72075186224037894 followers: 0 countLeader 1 allowFollowers 0 winner: [57:1943:2266] 2025-05-07T08:55:55.041828Z node 52 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:195: TClient[72075186224037894] forward result remote node 57 [52:2077:2742] 2025-05-07T08:55:55.041957Z node 52 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:229: TClient[72075186224037894] remote node connected [52:2077:2742] 2025-05-07T08:55:55.050202Z node 52 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037894]::SendEvent [52:2077:2742] 2025-05-07T08:55:55.050890Z node 57 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72075186224037894] Accept Connect Originator# [52:2077:2742] 2025-05-07T08:55:55.051343Z node 52 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72075186224037894] connected with status OK role: Leader [52:2077:2742] 2025-05-07T08:55:55.051421Z node 52 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72075186224037894] send queued [52:2077:2742] 2025-05-07T08:55:55.052603Z node 52 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72075186224037895] ::Bootstrap [52:2081:2744] 2025-05-07T08:55:55.052648Z node 52 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72075186224037895] lookup [52:2081:2744] 2025-05-07T08:55:55.052737Z node 52 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72075186224037895 entry.State: StNormal ev: {EvForward TabletID: 72075186224037895 Ev: nullptr Flags: 1:2:0} 2025-05-07T08:55:55.052800Z node 52 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 52 selfDC 1 leaderDC 3 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 72075186224037895 followers: 0 countLeader 1 allowFollowers 0 winner: [57:1787:2193] 2025-05-07T08:55:55.052915Z node 52 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:195: TClient[72075186224037895] forward result remote node 57 [52:2081:2744] 2025-05-07T08:55:55.053081Z node 52 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:229: TClient[72075186224037895] remote node connected [52:2081:2744] 2025-05-07T08:55:55.053140Z node 52 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037895]::SendEvent [52:2081:2744] 2025-05-07T08:55:55.053403Z node 57 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72075186224037895] Accept Connect Originator# [52:2081:2744] 2025-05-07T08:55:55.053867Z node 52 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72075186224037895] connected with status OK role: Leader [52:2081:2744] 2025-05-07T08:55:55.053922Z node 52 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72075186224037895] send queued [52:2081:2744] 2025-05-07T08:55:55.064247Z node 52 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72075186224037896] ::Bootstrap [52:2084:2746] 2025-05-07T08:55:55.064340Z node 52 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72075186224037896] lookup [52:2084:2746] 2025-05-07T08:55:55.064456Z node 52 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72075186224037896 entry.State: StNormal ev: {EvForward TabletID: 72075186224037896 Ev: nullptr Flags: 1:2:0} 2025-05-07T08:55:55.064539Z node 52 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 52 selfDC 1 leaderDC 3 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 72075186224037896 followers: 0 countLeader 1 allowFollowers 0 winner: [57:1791:2195] 2025-05-07T08:55:55.064725Z node 52 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:195: TClient[72075186224037896] forward result remote node 57 [52:2084:2746] 2025-05-07T08:55:55.064925Z node 52 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:229: TClient[72075186224037896] remote node connected [52:2084:2746] 2025-05-07T08:55:55.064992Z node 52 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037896]::SendEvent [52:2084:2746] 2025-05-07T08:55:55.065390Z node 57 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72075186224037896] Accept Connect Originator# [52:2084:2746] 2025-05-07T08:55:55.078992Z node 52 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72075186224037896] connected with status OK role: Leader [52:2084:2746] 2025-05-07T08:55:55.079089Z node 52 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72075186224037896] send queued [52:2084:2746] 2025-05-07T08:55:55.081170Z node 52 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037927937] ::Bootstrap [52:2086:2747] 2025-05-07T08:55:55.081291Z node 52 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037927937] lookup [52:2086:2747] 2025-05-07T08:55:55.081461Z node 52 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037927937 entry.State: StNormal ev: {EvForward TabletID: 72057594037927937 Ev: nullptr Flags: 1:2:0} 2025-05-07T08:55:55.081566Z node 52 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 52 selfDC 1 leaderDC 1 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 72057594037927937 followers: 0 countLeader 1 allowFollowers 0 winner: [52:592:2274] 2025-05-07T08:55:55.081841Z node 52 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037927937] queue send [52:2086:2747] 2025-05-07T08:55:55.088322Z node 52 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:411: TClient[72057594037927937] received pending shutdown [52:2086:2747] 2025-05-07T08:55:55.088796Z node 52 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[72057594037927937] forward result local node, try to connect [52:2086:2747] 2025-05-07T08:55:55.088926Z node 52 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594037927937]::SendEvent [52:2086:2747] 2025-05-07T08:55:55.089204Z node 52 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72057594037927937] Accept Connect Originator# [52:2086:2747] 2025-05-07T08:55:55.089627Z node 52 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72057594037927937] connected with status OK role: Leader [52:2086:2747] 2025-05-07T08:55:55.089740Z node 52 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72057594037927937] send queued [52:2086:2747] 2025-05-07T08:55:55.089829Z node 52 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037927937] push event to server [52:2086:2747] 2025-05-07T08:55:55.089932Z node 52 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:332: TClient[72057594037927937] shutdown pipe due to pending shutdown request [52:2086:2747] 2025-05-07T08:55:55.090029Z node 52 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:505: TClient[72057594037927937] notify reset [52:2086:2747] 2025-05-07T08:55:55.090147Z node 52 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:141: [72057594037927937] HandleSend Sender# [52:563:2269] EventType# 268697616 >> TIterator::Mixed [GOOD] >> TIterator::MixedReverse >> TCdcStreamTests::Attributes [GOOD] >> TCdcStreamTests::DocApi ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_change_collector/unittest >> AsyncIndexChangeCollector::ImplicitlyUpdateCoveredColumn [GOOD] Test command err: 2025-05-07T08:55:29.023303Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:55:29.023447Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:55:29.023676Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00327a/r3tmp/tmpQ96NDo/pdisk_1.dat 2025-05-07T08:55:29.433880Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:55:29.511269Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:55:29.575559Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:55:29.575719Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:55:29.587386Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:55:29.697624Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:55:29.775210Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:675:2577] 2025-05-07T08:55:29.775556Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T08:55:29.845684Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T08:55:29.845931Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T08:55:29.848205Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-05-07T08:55:29.848302Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-05-07T08:55:29.848366Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-05-07T08:55:29.848804Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T08:55:29.849141Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T08:55:29.849209Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:702:2577] in generation 1 2025-05-07T08:55:29.851047Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:679:2579] 2025-05-07T08:55:29.851262Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T08:55:29.861482Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T08:55:29.861616Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T08:55:29.863313Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-05-07T08:55:29.863394Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-05-07T08:55:29.863445Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-05-07T08:55:29.863800Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T08:55:29.863926Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T08:55:29.863985Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:710:2579] in generation 1 2025-05-07T08:55:29.875217Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T08:55:29.944486Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-05-07T08:55:29.944750Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T08:55:29.944936Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:713:2598] 2025-05-07T08:55:29.944986Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T08:55:29.945024Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-05-07T08:55:29.945070Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:55:29.945417Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T08:55:29.945459Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-05-07T08:55:29.945515Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T08:55:29.945608Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:714:2599] 2025-05-07T08:55:29.945682Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-05-07T08:55:29.945711Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-05-07T08:55:29.945737Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-05-07T08:55:29.952944Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T08:55:29.953115Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T08:55:29.953355Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T08:55:29.953430Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:55:29.953485Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T08:55:29.953560Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:55:29.953624Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-05-07T08:55:29.953697Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-05-07T08:55:29.953858Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:669:2573], serverId# [1:689:2584], sessionId# [0:0:0] 2025-05-07T08:55:29.953920Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-05-07T08:55:29.953948Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:55:29.954000Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-05-07T08:55:29.954041Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-05-07T08:55:29.954603Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T08:55:29.954942Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-05-07T08:55:29.955062Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-05-07T08:55:29.961723Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:670:2574], serverId# [1:697:2591], sessionId# [0:0:0] 2025-05-07T08:55:29.962096Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-05-07T08:55:29.962323Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037889 txId 281474976715657 ssId 72057594046644480 seqNo 2:2 2025-05-07T08:55:29.962401Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037889 2025-05-07T08:55:29.964327Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:55:29.964426Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-05-07T08:55:29.978801Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T08:55:29.978949Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-05-07T08:55:29.979478Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2025-05-07T08:55:29.979537Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037889 not sending time cast registration request in state WaitScheme 2025-05-07T08:55:30.152318Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:731:2610], serverId# [1:734:2613], sessionId# [0:0:0] 2025-05-07T08:55:30.152889Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:733:2612], serverId# [1:735:2614], sessionId# [0:0:0] 2025-05-07T08:55:30.157799Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037889 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037889 } 2025-05-07T08:55:30.157914Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-05-07T08:55:30.161768Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-05-07T08:55:30.161845Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 1 2025-05-07T08:55:30.161917Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:2814749767 ... create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 4] schema version# 1 2025-05-07T08:55:57.401770Z node 4 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-05-07T08:55:57.406693Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:55:57.408149Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037889 time 0 2025-05-07T08:55:57.408230Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-05-07T08:55:57.409509Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037889 step# 1000} 2025-05-07T08:55:57.409617Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-05-07T08:55:57.415833Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-05-07T08:55:57.415937Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-05-07T08:55:57.415995Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037889 2025-05-07T08:55:57.416089Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037889 at tablet 72075186224037889 send result to client [4:410:2405], exec latency: 0 ms, propose latency: 0 ms 2025-05-07T08:55:57.416164Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037889 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-05-07T08:55:57.416300Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-05-07T08:55:57.417130Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-05-07T08:55:57.417224Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:55:57.432321Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-05-07T08:55:57.432403Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:55:57.432900Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:55:57.432989Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-05-07T08:55:57.433509Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:55:57.433563Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T08:55:57.433626Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-05-07T08:55:57.433719Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [4:410:2405], exec latency: 0 ms, propose latency: 0 ms 2025-05-07T08:55:57.433776Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-05-07T08:55:57.433881Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:55:57.459411Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037889 state Ready 2025-05-07T08:55:57.459543Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2025-05-07T08:55:57.460734Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037889 coordinator 72057594046316545 last step 0 next step 1000 2025-05-07T08:55:57.461048Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-05-07T08:55:57.467108Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-05-07T08:55:57.467235Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-05-07T08:55:57.497804Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:779:2650], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:57.497941Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:789:2655], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:57.502625Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:57.519888Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T08:55:57.557127Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:55:57.557300Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-05-07T08:55:57.739783Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:55:57.739922Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-05-07T08:55:57.743979Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:793:2658], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T08:55:57.782901Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:865:2699] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:55:57.859796Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715660. Ctx: { TraceId: 01jtmzawsq1wngk49m5hg7ra9r, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=YjEzMDFkMjctMTQwMmUwMjgtMTE4ZjY4MjgtOWViNTkzN2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:55:57.862803Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:934:2730], serverId# [4:935:2731], sessionId# [0:0:0] 2025-05-07T08:55:57.863236Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:2] at 72075186224037889 2025-05-07T08:55:57.863526Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 1 Group: 1746608157863423 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: AsyncIndex Source: Unspecified Body: 42b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-05-07T08:55:57.863752Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:410: Executed write operation for [0:2] at 72075186224037889, row count=1 2025-05-07T08:55:57.874840Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037889, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 42 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 } 2025-05-07T08:55:57.874940Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-05-07T08:55:57.938791Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jtmzax5qe9m17s1dfbrer7d7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=YWE4OGI1YzgtOTc3MzNiNDMtM2ZhN2QxMDEtYjM3NmJlNzE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:55:57.940907Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:3] at 72075186224037889 2025-05-07T08:55:57.941203Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 2 Group: 1746608157941099 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-05-07T08:55:57.941377Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 3 Group: 1746608157941099 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: AsyncIndex Source: Unspecified Body: 42b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-05-07T08:55:57.941476Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:410: Executed write operation for [0:3] at 72075186224037889, row count=1 2025-05-07T08:55:57.953745Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037889, records: { Order: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 }, { Order: 3 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 42 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 } 2025-05-07T08:55:57.953821Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-05-07T08:55:57.957889Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:975:2762], serverId# [4:976:2763], sessionId# [0:0:0] 2025-05-07T08:55:57.967077Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:977:2764], serverId# [4:978:2765], sessionId# [0:0:0] >> JsonChangeRecord::Heartbeat [GOOD] >> KqpWorkloadServiceDistributed::TestDistributedLargeConcurrentQueryLimit [GOOD] |90.5%| [TS] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_json_change_record/unittest >> JsonChangeRecord::Heartbeat [GOOD] >> ReadLoad::ShouldReadKqpMoreThanRows [GOOD] |90.5%| [TS] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_json_change_record/unittest >> SystemView::PgTablesOneSchemeShardDataQuery [GOOD] >> SystemView::QueryStats >> TCdcStreamTests::DocApi [GOOD] >> TCdcStreamTests::DocApiNegative >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-72 [FAIL] >> KqpWorkloadService::TestLessConcurrentQueryLimit [GOOD] >> KqpWorkloadService::TestCpuLoadThreshold |90.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_external_table/ydb-core-tx-schemeshard-ut_external_table |90.5%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_external_table/ydb-core-tx-schemeshard-ut_external_table |90.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_external_table/ydb-core-tx-schemeshard-ut_external_table >> TPartBtreeIndexIteration::FewNodes_Groups_Slices [GOOD] >> TPartBtreeIndexIteration::FewNodes_History_Slices >> TCdcStreamTests::DocApiNegative [GOOD] >> TCdcStreamTests::Negative >> JsonChangeRecord::DataChange [GOOD] >> GenericFederatedQuery::YdbManagedSelectAll [GOOD] >> GenericFederatedQuery::YdbManagedSelectConstant >> JsonChangeRecord::DataChangeVersion [GOOD] |90.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator_client/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut_ycsb/unittest >> ReadLoad::ShouldReadKqpMoreThanRows [GOOD] Test command err: 2025-05-07T08:55:30.207918Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:55:30.208076Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:55:30.208303Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0038c9/r3tmp/tmpqsWvOD/pdisk_1.dat 2025-05-07T08:55:30.648826Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:55:30.711764Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:55:30.772963Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:55:30.773111Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:55:30.787140Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:55:30.884722Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:55:31.318109Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:346: TLoad# 0 warmups table# usertable in dir# /Root with rows# 100 2025-05-07T08:55:31.319860Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [1:732:2614], subTag: 1} TUpsertActor Bootstrap called: RowCount: 100 Inflight: 100 BatchSize: 100 with type# 0, target# TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "usertable" 2025-05-07T08:55:31.346789Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [1:732:2614], subTag: 1} TUpsertActor finished in 0.026650s, errors=0 2025-05-07T08:55:31.347010Z node 1 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kReadKqpStart with tag# 2, proto# NotifyWhenFinished: true TableSetup { WorkingDir: "/Root" TableName: "usertable" } TargetShard { TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "usertable" } ReadKqpStart { RowCount: 100 Inflights: 10 } 2025-05-07T08:55:31.347078Z node 1 :DS_LOAD_TEST NOTICE: kqp_select.cpp:322: TKqpSelectActorMultiSession# {Tag: 0, parent: [1:732:2614], subTag: 3} Bootstrap called: RowCount: 100 Inflights: 10 2025-05-07T08:55:31.347811Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:366: TKqpSelectActorMultiSession# {Tag: 0, parent: [1:732:2614], subTag: 3} will work with tablet# 72075186224037888 with ownerId# 72057594046644480 with tableId# 2 resolved for path# /Root/usertable with columnsCount# 11, keyColumnCount# 1 2025-05-07T08:55:31.347906Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:400: TKqpSelectActorMultiSession# {Tag: 0, parent: [1:732:2614], subTag: 3} started fullscan actor# [1:744:2626] 2025-05-07T08:55:31.347996Z node 1 :DS_LOAD_TEST INFO: common.cpp:52: ReadIteratorScan# {Tag: 0, parent: [1:741:2623], subTag: 1} Bootstrap called, sample# 100 2025-05-07T08:55:31.348033Z node 1 :DS_LOAD_TEST DEBUG: common.cpp:61: ReadIteratorScan# {Tag: 0, parent: [1:741:2623], subTag: 1} Connect to# 72075186224037888 called 2025-05-07T08:55:31.348605Z node 1 :DS_LOAD_TEST DEBUG: common.cpp:75: ReadIteratorScan# {Tag: 0, parent: [1:741:2623], subTag: 1} Handle TEvClientConnected called, Status# OK 2025-05-07T08:55:31.349309Z node 1 :DS_LOAD_TEST NOTICE: common.cpp:137: ReadIteratorScan# {Tag: 0, parent: [1:741:2623], subTag: 1} finished in 0.000605s, sampled# 100, iter finished# 1, oks# 100 2025-05-07T08:55:31.349422Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:416: TKqpSelectActorMultiSession# {Tag: 0, parent: [1:732:2614], subTag: 3} received keyCount# 100 2025-05-07T08:55:31.349602Z node 1 :DS_LOAD_TEST NOTICE: kqp_select.cpp:445: TKqpSelectActorMultiSession# {Tag: 0, parent: [1:732:2614], subTag: 3} started# 10 actors each with inflight# 1 2025-05-07T08:55:31.349645Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:130: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 2} Bootstrap called 2025-05-07T08:55:31.349675Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:142: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 2} sends event for session creation to proxy: [1:8678280833929343339:121] 2025-05-07T08:55:31.349718Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:130: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 3} Bootstrap called 2025-05-07T08:55:31.349734Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:142: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 3} sends event for session creation to proxy: [1:8678280833929343339:121] 2025-05-07T08:55:31.349750Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:130: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 4} Bootstrap called 2025-05-07T08:55:31.349770Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:142: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 4} sends event for session creation to proxy: [1:8678280833929343339:121] 2025-05-07T08:55:31.349790Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:130: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 5} Bootstrap called 2025-05-07T08:55:31.349829Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:142: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 5} sends event for session creation to proxy: [1:8678280833929343339:121] 2025-05-07T08:55:31.349846Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:130: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 6} Bootstrap called 2025-05-07T08:55:31.349859Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:142: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 6} sends event for session creation to proxy: [1:8678280833929343339:121] 2025-05-07T08:55:31.349873Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:130: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 7} Bootstrap called 2025-05-07T08:55:31.349885Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:142: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 7} sends event for session creation to proxy: [1:8678280833929343339:121] 2025-05-07T08:55:31.349905Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:130: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 8} Bootstrap called 2025-05-07T08:55:31.349924Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:142: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 8} sends event for session creation to proxy: [1:8678280833929343339:121] 2025-05-07T08:55:31.349946Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:130: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 9} Bootstrap called 2025-05-07T08:55:31.349957Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:142: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 9} sends event for session creation to proxy: [1:8678280833929343339:121] 2025-05-07T08:55:31.350063Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:130: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 10} Bootstrap called 2025-05-07T08:55:31.350085Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:142: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 10} sends event for session creation to proxy: [1:8678280833929343339:121] 2025-05-07T08:55:31.350107Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:130: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 11} Bootstrap called 2025-05-07T08:55:31.350130Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:142: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 11} sends event for session creation to proxy: [1:8678280833929343339:121] 2025-05-07T08:55:31.351848Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:214: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 2} session: ydb://session/3?node_id=1&id=YjMwNWM2YTMtYmVjZjdmODQtZmNhZjNhNWYtM2JmNTM3NWE= 2025-05-07T08:55:31.355465Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:214: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 3} session: ydb://session/3?node_id=1&id=ODk0N2RkZDMtOGVhNGY1NGEtMmQ2ODdkZTItYWQ1OTVjYTc= 2025-05-07T08:55:31.355680Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:214: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 4} session: ydb://session/3?node_id=1&id=N2EyNjhmZDktZjgxZGQ1M2EtN2RlZTc2ZDEtNGRiODNiNGU= 2025-05-07T08:55:31.357293Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:214: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 5} session: ydb://session/3?node_id=1&id=NDUwMDA0NzAtNDAxYTMyZjctZjQwNTdjNWQtYTllODBhNg== 2025-05-07T08:55:31.360334Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:214: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 6} session: ydb://session/3?node_id=1&id=OWI0YTFjZmUtOWQ1ZWE3N2QtMWYwMWU1My0yNDJmNjY0Ng== 2025-05-07T08:55:31.360451Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:214: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 7} session: ydb://session/3?node_id=1&id=NDc0ZWFjMGEtMTNhM2VmZGMtZmE1NzliZjItOWQ3NmZjMDQ= 2025-05-07T08:55:31.361936Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:214: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 8} session: ydb://session/3?node_id=1&id=NmZmNDE2YjAtOTliMWNkMTQtNmM0YzlmODEtZWNkNjhhZjU= 2025-05-07T08:55:31.363567Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:214: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 9} session: ydb://session/3?node_id=1&id=M2YyZWQ1MDItNTZmMzU1M2YtMWRjYzAzNmItZTM4ZGQ1MTM= 2025-05-07T08:55:31.366479Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:214: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 10} session: ydb://session/3?node_id=1&id=ZWMwNDE4NDQtYjcyZWMxNzgtN2E1MzM3ZDctMTc0ZWY3NzM= 2025-05-07T08:55:31.366612Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:214: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 11} session: ydb://session/3?node_id=1&id=YjU4ZjQ0ZjctNWY2ZTFkMWQtODcwMjU4MWQtNWRjZDcxYzg= 2025-05-07T08:55:31.371848Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:768:2650], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:31.371985Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:800:2676], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:31.372089Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:801:2677], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:31.372147Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:802:2678], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:31.372197Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:803:2679], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource ... PathStateCreate)" severity: 1 } 2025-05-07T08:55:48.795763Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:860:2723] txid# 281474976715666, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-05-07T08:55:48.999693Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:822:2698], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T08:55:48.999823Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:825:2701], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T08:55:48.999884Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:826:2702], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T08:55:48.999933Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:827:2703], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T08:55:49.000010Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:828:2704], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T08:55:49.000098Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:829:2705], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T08:55:49.000187Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:830:2706], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T08:55:49.000264Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:836:2712], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T08:55:49.000314Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:839:2715], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T08:55:49.000363Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:849:2719], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T08:55:49.041435Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:976:2812] txid# 281474976715668, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:55:51.255725Z node 2 :DS_LOAD_TEST NOTICE: kqp_select.cpp:197: TKqpSelectActor# {Tag: 0, parent: [2:741:2623], subTag: 10} finished in 2.683227s, errors=0 2025-05-07T08:55:51.255908Z node 2 :DS_LOAD_TEST DEBUG: kqp_select.cpp:461: TKqpSelectActorMultiSession# {Tag: 0, parent: [2:732:2614], subTag: 3} finished: 10 { Tag: 10 DurationMs: 2683 OperationsOK: 100 OperationsError: 0 } 2025-05-07T08:55:51.269576Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:1905:3134] txid# 281474976715769, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:55:52.249485Z node 2 :DS_LOAD_TEST NOTICE: kqp_select.cpp:197: TKqpSelectActor# {Tag: 0, parent: [2:741:2623], subTag: 9} finished in 3.679191s, errors=0 2025-05-07T08:55:52.254621Z node 2 :DS_LOAD_TEST DEBUG: kqp_select.cpp:461: TKqpSelectActorMultiSession# {Tag: 0, parent: [2:732:2614], subTag: 3} finished: 9 { Tag: 9 DurationMs: 3679 OperationsOK: 100 OperationsError: 0 } 2025-05-07T08:55:52.270550Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:2812:3440] txid# 281474976715870, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:55:53.112856Z node 2 :DS_LOAD_TEST NOTICE: kqp_select.cpp:197: TKqpSelectActor# {Tag: 0, parent: [2:741:2623], subTag: 2} finished in 4.588619s, errors=0 2025-05-07T08:55:53.113174Z node 2 :DS_LOAD_TEST DEBUG: kqp_select.cpp:461: TKqpSelectActorMultiSession# {Tag: 0, parent: [2:732:2614], subTag: 3} finished: 2 { Tag: 2 DurationMs: 4588 OperationsOK: 100 OperationsError: 0 } 2025-05-07T08:55:53.135836Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:3719:3746] txid# 281474976715971, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:55:54.432399Z node 2 :DS_LOAD_TEST NOTICE: kqp_select.cpp:197: TKqpSelectActor# {Tag: 0, parent: [2:741:2623], subTag: 6} finished in 5.884701s, errors=0 2025-05-07T08:55:54.432588Z node 2 :DS_LOAD_TEST DEBUG: kqp_select.cpp:461: TKqpSelectActorMultiSession# {Tag: 0, parent: [2:732:2614], subTag: 3} finished: 6 { Tag: 6 DurationMs: 5884 OperationsOK: 100 OperationsError: 0 } 2025-05-07T08:55:54.459711Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:4626:4052] txid# 281474976716072, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:55:55.370769Z node 2 :DS_LOAD_TEST NOTICE: kqp_select.cpp:197: TKqpSelectActor# {Tag: 0, parent: [2:741:2623], subTag: 11} finished in 6.783821s, errors=0 2025-05-07T08:55:55.371138Z node 2 :DS_LOAD_TEST DEBUG: kqp_select.cpp:461: TKqpSelectActorMultiSession# {Tag: 0, parent: [2:732:2614], subTag: 3} finished: 11 { Tag: 11 DurationMs: 6783 OperationsOK: 100 OperationsError: 0 } 2025-05-07T08:55:55.412550Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:5533:4358] txid# 281474976716173, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:55:57.162618Z node 2 :DS_LOAD_TEST NOTICE: kqp_select.cpp:197: TKqpSelectActor# {Tag: 0, parent: [2:741:2623], subTag: 4} finished in 8.625598s, errors=0 2025-05-07T08:55:57.162868Z node 2 :DS_LOAD_TEST DEBUG: kqp_select.cpp:461: TKqpSelectActorMultiSession# {Tag: 0, parent: [2:732:2614], subTag: 3} finished: 4 { Tag: 4 DurationMs: 8625 OperationsOK: 100 OperationsError: 0 } 2025-05-07T08:55:57.183808Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:6440:4664] txid# 281474976716274, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:55:58.081946Z node 2 :DS_LOAD_TEST NOTICE: kqp_select.cpp:197: TKqpSelectActor# {Tag: 0, parent: [2:741:2623], subTag: 7} finished in 9.532214s, errors=0 2025-05-07T08:55:58.082559Z node 2 :DS_LOAD_TEST DEBUG: kqp_select.cpp:461: TKqpSelectActorMultiSession# {Tag: 0, parent: [2:732:2614], subTag: 3} finished: 7 { Tag: 7 DurationMs: 9532 OperationsOK: 100 OperationsError: 0 } 2025-05-07T08:55:58.105805Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7347:4970] txid# 281474976716375, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:55:59.167857Z node 2 :DS_LOAD_TEST NOTICE: kqp_select.cpp:197: TKqpSelectActor# {Tag: 0, parent: [2:741:2623], subTag: 3} finished in 10.643453s, errors=0 2025-05-07T08:55:59.168300Z node 2 :DS_LOAD_TEST DEBUG: kqp_select.cpp:461: TKqpSelectActorMultiSession# {Tag: 0, parent: [2:732:2614], subTag: 3} finished: 3 { Tag: 3 DurationMs: 10643 OperationsOK: 100 OperationsError: 0 } 2025-05-07T08:55:59.195215Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:8254:5276] txid# 281474976716476, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:55:59.212095Z node 2 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jtmzam3ca8389af77esq98xv", SessionId: ydb://session/3?node_id=2&id=NDY2MWU2MGUtNzBiMmY1MmYtNDE2NjVjOTAtOTFmYzVkNmI=, Slow query, duration: 10.622993s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "\n --!syntax_v1\n\n DECLARE $key AS Text;\n\n SELECT * FROM `usertable` WHERE id == $key;\n ", parameters: 36b 2025-05-07T08:56:00.850073Z node 2 :DS_LOAD_TEST NOTICE: kqp_select.cpp:197: TKqpSelectActor# {Tag: 0, parent: [2:741:2623], subTag: 8} finished in 12.279931s, errors=0 2025-05-07T08:56:00.850589Z node 2 :DS_LOAD_TEST DEBUG: kqp_select.cpp:461: TKqpSelectActorMultiSession# {Tag: 0, parent: [2:732:2614], subTag: 3} finished: 8 { Tag: 8 DurationMs: 12279 OperationsOK: 100 OperationsError: 0 } 2025-05-07T08:56:00.876212Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:9161:5582] txid# 281474976716577, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:56:00.899276Z node 2 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jtmzam3cbvjya2b6k1jn3dtq", SessionId: ydb://session/3?node_id=2&id=ZDI3OGQ5YzYtMTg1YzVhNjItNTQxMDI0ZWMtNGViNzY5NDM=, Slow query, duration: 12.310969s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "\n --!syntax_v1\n\n DECLARE $key AS Text;\n\n SELECT * FROM `usertable` WHERE id == $key;\n ", parameters: 36b 2025-05-07T08:56:02.203679Z node 2 :DS_LOAD_TEST NOTICE: kqp_select.cpp:197: TKqpSelectActor# {Tag: 0, parent: [2:741:2623], subTag: 5} finished in 13.666490s, errors=0 2025-05-07T08:56:02.203998Z node 2 :DS_LOAD_TEST DEBUG: kqp_select.cpp:461: TKqpSelectActorMultiSession# {Tag: 0, parent: [2:732:2614], subTag: 3} finished: 5 { Tag: 5 DurationMs: 13666 OperationsOK: 100 OperationsError: 0 } 2025-05-07T08:56:02.204079Z node 2 :DS_LOAD_TEST NOTICE: kqp_select.cpp:480: TKqpSelectActorMultiSession# {Tag: 0, parent: [2:732:2614], subTag: 3} finished in 13.693471s, oks# 1000, errors# 0 2025-05-07T08:56:02.204426Z node 2 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [2:741:2623] with tag# 3 >> GenericFederatedQuery::YdbFilterPushdown [GOOD] >> GenericFederatedQuery::TestFailsOnIncorrectScriptExecutionOperationId |90.5%| [TS] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_json_change_record/unittest >> JsonChangeRecord::DataChange [GOOD] >> GenericFederatedQuery::IcebergHadoopBasicSelectAll [GOOD] >> GenericFederatedQuery::IcebergHadoopBasicSelectConstant >> BuildStatsHistogram::Ten_Crossed [GOOD] >> BuildStatsHistogram::Ten_Mixed_Log >> TCdcStreamTests::Negative [GOOD] >> TCdcStreamTests::DisableProtoSourceIdInfo |90.5%| [TS] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_json_change_record/unittest >> JsonChangeRecord::DataChangeVersion [GOOD] >> DstCreator::WithSyncIndexAndIntermediateDir [GOOD] |90.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/persqueue_v1/ut/ydb-services-persqueue_v1-ut |90.5%| [LD] {RESULT} $(B)/ydb/services/persqueue_v1/ut/ydb-services-persqueue_v1-ut |90.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/persqueue_v1/ut/ydb-services-persqueue_v1-ut >> TCdcStreamTests::DisableProtoSourceIdInfo [GOOD] >> TCdcStreamTests::CreateStream >> TTxAllocatorClientTest::ZeroRange ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/workload_service/ut/unittest >> KqpWorkloadServiceDistributed::TestDistributedLargeConcurrentQueryLimit [GOOD] Test command err: 2025-05-07T08:54:41.965601Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624588170828175:2190];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:41.965656Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004828/r3tmp/tmpdCqh5d/pdisk_1.dat 2025-05-07T08:54:42.756945Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:42.766301Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:42.766420Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:42.779260Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12981, node 1 2025-05-07T08:54:43.064486Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:43.064505Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:43.064516Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:43.064623Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10831 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:43.683728Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:43.711199Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:54:46.390459Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:440: [WorkloadService] [Service] Started workload service initialization 2025-05-07T08:54:46.390553Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:241: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7501624609645665167:2330], Start check tables existence, number paths: 2 2025-05-07T08:54:46.393759Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=YTIzODYzMGUtZmYyMjhhYjItNjgxOWIzYjgtYmIwMzJiM2E=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id YTIzODYzMGUtZmYyMjhhYjItNjgxOWIzYjgtYmIwMzJiM2E= 2025-05-07T08:54:46.395556Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=YTIzODYzMGUtZmYyMjhhYjItNjgxOWIzYjgtYmIwMzJiM2E=, ActorId: [1:7501624609645665191:2331], ActorState: unknown state, session actor bootstrapped 2025-05-07T08:54:46.402564Z node 1 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:125: [WorkloadService] [Service] Updated node info, noode count: 1 2025-05-07T08:54:46.402605Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-05-07T08:54:46.402631Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:111: [WorkloadService] [Service] Resource pools was enanbled 2025-05-07T08:54:46.402737Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7501624609645665167:2330], Describe table /Root/.metadata/workload_manager/delayed_requests status PathErrorUnknown 2025-05-07T08:54:46.402833Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7501624609645665167:2330], Describe table /Root/.metadata/workload_manager/running_requests status PathErrorUnknown 2025-05-07T08:54:46.402861Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:289: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7501624609645665167:2330], Successfully finished 2025-05-07T08:54:46.409841Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-05-07T08:54:46.427669Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624609645665193:2303], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-05-07T08:54:46.432041Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480 2025-05-07T08:54:46.437285Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:429: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624609645665193:2303], DatabaseId: Root, PoolId: sample_pool_id, Subscribe on create pool tx: 281474976710658 2025-05-07T08:54:46.437471Z node 1 :KQP_WORKLOAD_SERVICE TRACE: scheme_actors.cpp:352: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624609645665193:2303], DatabaseId: Root, PoolId: sample_pool_id, Tablet to pipe successfully connected 2025-05-07T08:54:46.458523Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624609645665193:2303], DatabaseId: Root, PoolId: sample_pool_id, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-05-07T08:54:46.524831Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624609645665193:2303], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-05-07T08:54:46.529929Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501624609645665244:2335] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/sample_pool_id\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:54:46.533378Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:480: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624609645665193:2303], DatabaseId: Root, PoolId: sample_pool_id, Pool successfully created 2025-05-07T08:54:46.533897Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624609645665251:2341], DatabaseId: Root, PoolId: sample_pool_id, Start pool fetching 2025-05-07T08:54:46.535047Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:223: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624609645665251:2341], DatabaseId: Root, PoolId: sample_pool_id, Pool info successfully fetched 2025-05-07T08:54:46.551532Z node 1 :KQP_SESSION INFO: kqp_session_actor.cpp:2315: SessionId: ydb://session/3?node_id=1&id=YTIzODYzMGUtZmYyMjhhYjItNjgxOWIzYjgtYmIwMzJiM2E=, ActorId: [1:7501624609645665191:2331], ActorState: ReadyState, Session closed due to explicit close event 2025-05-07T08:54:46.551597Z node 1 :KQP_SESSION INFO: kqp_session_actor.cpp:2473: SessionId: ydb://session/3?node_id=1&id=YTIzODYzMGUtZmYyMjhhYjItNjgxOWIzYjgtYmIwMzJiM2E=, ActorId: [1:7501624609645665191:2331], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-05-07T08:54:46.551615Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2534: SessionId: ydb://session/3?node_id=1&id=YTIzODYzMGUtZmYyMjhhYjItNjgxOWIzYjgtYmIwMzJiM2E=, ActorId: [1:7501624609645665191:2331], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-05-07T08:54:46.551650Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2546: SessionId: ydb://session/3?node_id=1&id=YTIzODYzMGUtZmYyMjhhYjItNjgxOWIzYjgtYmIwMzJiM2E=, ActorId: [1:7501624609645665191:2331], ActorState: unknown state, Cleanup temp tables: 0 2025-05-07T08:54:46.551736Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2637: SessionId: ydb://session/3?node_id=1&id=YTIzODYzMGUtZmYyMjhhYjItNjgxOWIzYjgtYmIwMzJiM2E=, ActorId: [1:7501624609645665191:2331], ActorState: unknown state, Session actor destroyed 2025-05-07T08:54:47.611594Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501624613564934074:2192];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:47.611734Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004828/r3tmp/tmpqNz8ne/pdisk_1.dat 2025-05-07T08:54:47.828586Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:47.868788Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:47.874183Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:47.884773Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28346, node 2 2025-05-07T08:54:48.070773Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:48.070797Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:48.070806Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:48.070909Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28378 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children ... sactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-05-07T08:55:58.926111Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2534: SessionId: ydb://session/3?node_id=6&id=ZThjZmFiNDAtNTBmNzc0NjYtZmE5YmMxY2YtNzg0YTAzMzk=, ActorId: [6:7501624748880837425:2336], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-05-07T08:55:58.926142Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2546: SessionId: ydb://session/3?node_id=6&id=ZThjZmFiNDAtNTBmNzc0NjYtZmE5YmMxY2YtNzg0YTAzMzk=, ActorId: [6:7501624748880837425:2336], ActorState: unknown state, Cleanup temp tables: 0 2025-05-07T08:55:58.926220Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2637: SessionId: ydb://session/3?node_id=6&id=ZThjZmFiNDAtNTBmNzc0NjYtZmE5YmMxY2YtNzg0YTAzMzk=, ActorId: [6:7501624748880837425:2336], ActorState: unknown state, Session actor destroyed 2025-05-07T08:55:58.963264Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1699: SessionId: ydb://session/3?node_id=8&id=MjRlMGNmZC0yMmY2NzA1YS0zOWM1OWY4My05OWM1NmVkMw==, ActorId: [8:7501624920197801346:4601], ActorState: ExecuteState, TraceId: 01jtmzay5gc52edchhajvmgva0, TEvTxResponse, CurrentTx: 1/1 response.status: SUCCESS 2025-05-07T08:55:58.963458Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:1958: SessionId: ydb://session/3?node_id=8&id=MjRlMGNmZC0yMmY2NzA1YS0zOWM1OWY4My05OWM1NmVkMw==, ActorId: [8:7501624920197801346:4601], ActorState: ExecuteState, TraceId: 01jtmzay5gc52edchhajvmgva0, txInfo Status: Committed Kind: ReadWrite TotalDuration: 63.44 ServerDuration: 62.848 QueriesCount: 2 2025-05-07T08:55:58.963581Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2113: SessionId: ydb://session/3?node_id=8&id=MjRlMGNmZC0yMmY2NzA1YS0zOWM1OWY4My05OWM1NmVkMw==, ActorId: [8:7501624920197801346:4601], ActorState: ExecuteState, TraceId: 01jtmzay5gc52edchhajvmgva0, Create QueryResponse for action: QUERY_ACTION_EXECUTE with SUCCESS status 2025-05-07T08:55:58.963640Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2473: SessionId: ydb://session/3?node_id=8&id=MjRlMGNmZC0yMmY2NzA1YS0zOWM1OWY4My05OWM1NmVkMw==, ActorId: [8:7501624920197801346:4601], ActorState: ExecuteState, TraceId: 01jtmzay5gc52edchhajvmgva0, Cleanup start, isFinal: 0 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-05-07T08:55:58.963678Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2534: SessionId: ydb://session/3?node_id=8&id=MjRlMGNmZC0yMmY2NzA1YS0zOWM1OWY4My05OWM1NmVkMw==, ActorId: [8:7501624920197801346:4601], ActorState: ExecuteState, TraceId: 01jtmzay5gc52edchhajvmgva0, EndCleanup, isFinal: 0 2025-05-07T08:55:58.963740Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2270: SessionId: ydb://session/3?node_id=8&id=MjRlMGNmZC0yMmY2NzA1YS0zOWM1OWY4My05OWM1NmVkMw==, ActorId: [8:7501624920197801346:4601], ActorState: ExecuteState, TraceId: 01jtmzay5gc52edchhajvmgva0, Sent query response back to proxy, proxyRequestId: 480, proxyId: [8:7501624726924264907:2201] 2025-05-07T08:55:58.964884Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: query_actor.cpp:240: [TQueryBase] [TRefreshPoolStateQuery] TraceId: sample_pool_id, RequestDatabase: /Root, RequestSessionId: , State: Update lease, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=8&id=MjRlMGNmZC0yMmY2NzA1YS0zOWM1OWY4My05OWM1NmVkMw==, TxId: 2025-05-07T08:55:58.964999Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: query_actor.cpp:197: [TQueryBase] [TRefreshPoolStateQuery] TraceId: sample_pool_id, RequestDatabase: /Root, RequestSessionId: , State: Update lease, RunDataQuery: -- TRefreshPoolStateQuery::OnLeaseUpdated DECLARE $database_id AS Text; DECLARE $pool_id AS Text; SELECT COUNT(*) AS delayed_requests FROM `.metadata/workload_manager/delayed_requests` WHERE database = $database_id AND pool_id = $pool_id AND (wait_deadline IS NULL OR wait_deadline >= CurrentUtcTimestamp()) AND lease_deadline >= CurrentUtcTimestamp(); SELECT COUNT(*) AS running_requests FROM `.metadata/workload_manager/running_requests` WHERE database = $database_id AND pool_id = $pool_id AND lease_deadline >= CurrentUtcTimestamp(); 2025-05-07T08:55:58.965502Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=8&id=MjRlMGNmZC0yMmY2NzA1YS0zOWM1OWY4My05OWM1NmVkMw==, ActorId: [8:7501624920197801346:4601], ActorState: ReadyState, TraceId: 01jtmzay7ncjx0p50q7cxr8tf5, received request, proxyRequestId: 481 prepared: 0 tx_control: 1 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_DML text: -- TRefreshPoolStateQuery::OnLeaseUpdated DECLARE $database_id AS Text; DECLARE $pool_id AS Text; SELECT COUNT(*) AS delayed_requests FROM `.metadata/workload_manager/delayed_requests` WHERE database = $database_id AND pool_id = $pool_id AND (wait_deadline IS NULL OR wait_deadline >= CurrentUtcTimestamp()) AND lease_deadline >= CurrentUtcTimestamp(); SELECT COUNT(*) AS running_requests FROM `.metadata/workload_manager/running_requests` WHERE database = $database_id AND pool_id = $pool_id AND lease_deadline >= CurrentUtcTimestamp(); rpcActor: [8:7501624920197801366:4606] database: /Root databaseId: /Root pool id: default 2025-05-07T08:55:58.965535Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:264: SessionId: ydb://session/3?node_id=8&id=MjRlMGNmZC0yMmY2NzA1YS0zOWM1OWY4My05OWM1NmVkMw==, ActorId: [8:7501624920197801346:4601], ActorState: ReadyState, TraceId: 01jtmzay7ncjx0p50q7cxr8tf5, request placed into pool from cache: default 2025-05-07T08:55:58.967506Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1298: SessionId: ydb://session/3?node_id=8&id=MjRlMGNmZC0yMmY2NzA1YS0zOWM1OWY4My05OWM1NmVkMw==, ActorId: [8:7501624920197801346:4601], ActorState: ExecuteState, TraceId: 01jtmzay7ncjx0p50q7cxr8tf5, ExecutePhyTx, tx: 0x000050C00002DC58 literal: 0 commit: 0 txCtx.DeferredEffects.size(): 0 2025-05-07T08:55:58.967584Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1449: SessionId: ydb://session/3?node_id=8&id=MjRlMGNmZC0yMmY2NzA1YS0zOWM1OWY4My05OWM1NmVkMw==, ActorId: [8:7501624920197801346:4601], ActorState: ExecuteState, TraceId: 01jtmzay7ncjx0p50q7cxr8tf5, Sending to Executer TraceId: 0 8 2025-05-07T08:55:58.967698Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1507: SessionId: ydb://session/3?node_id=8&id=MjRlMGNmZC0yMmY2NzA1YS0zOWM1OWY4My05OWM1NmVkMw==, ActorId: [8:7501624920197801346:4601], ActorState: ExecuteState, TraceId: 01jtmzay7ncjx0p50q7cxr8tf5, Created new KQP executer: [8:7501624920197801369:4601] isRollback: 0 2025-05-07T08:55:58.993154Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1699: SessionId: ydb://session/3?node_id=8&id=MjRlMGNmZC0yMmY2NzA1YS0zOWM1OWY4My05OWM1NmVkMw==, ActorId: [8:7501624920197801346:4601], ActorState: ExecuteState, TraceId: 01jtmzay7ncjx0p50q7cxr8tf5, TEvTxResponse, CurrentTx: 1/2 response.status: SUCCESS 2025-05-07T08:55:58.993248Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1298: SessionId: ydb://session/3?node_id=8&id=MjRlMGNmZC0yMmY2NzA1YS0zOWM1OWY4My05OWM1NmVkMw==, ActorId: [8:7501624920197801346:4601], ActorState: ExecuteState, TraceId: 01jtmzay7ncjx0p50q7cxr8tf5, ExecutePhyTx, tx: 0x000050C0000715D8 literal: 1 commit: 1 txCtx.DeferredEffects.size(): 0 2025-05-07T08:55:58.994253Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1699: SessionId: ydb://session/3?node_id=8&id=MjRlMGNmZC0yMmY2NzA1YS0zOWM1OWY4My05OWM1NmVkMw==, ActorId: [8:7501624920197801346:4601], ActorState: ExecuteState, TraceId: 01jtmzay7ncjx0p50q7cxr8tf5, TEvTxResponse, CurrentTx: 2/2 response.status: SUCCESS 2025-05-07T08:55:58.996131Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:1958: SessionId: ydb://session/3?node_id=8&id=MjRlMGNmZC0yMmY2NzA1YS0zOWM1OWY4My05OWM1NmVkMw==, ActorId: [8:7501624920197801346:4601], ActorState: ExecuteState, TraceId: 01jtmzay7ncjx0p50q7cxr8tf5, txInfo Status: Committed Kind: ReadOnly TotalDuration: 28.765 ServerDuration: 28.637 QueriesCount: 2 2025-05-07T08:55:58.996270Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2113: SessionId: ydb://session/3?node_id=8&id=MjRlMGNmZC0yMmY2NzA1YS0zOWM1OWY4My05OWM1NmVkMw==, ActorId: [8:7501624920197801346:4601], ActorState: ExecuteState, TraceId: 01jtmzay7ncjx0p50q7cxr8tf5, Create QueryResponse for action: QUERY_ACTION_EXECUTE with SUCCESS status 2025-05-07T08:55:58.996339Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2473: SessionId: ydb://session/3?node_id=8&id=MjRlMGNmZC0yMmY2NzA1YS0zOWM1OWY4My05OWM1NmVkMw==, ActorId: [8:7501624920197801346:4601], ActorState: ExecuteState, TraceId: 01jtmzay7ncjx0p50q7cxr8tf5, Cleanup start, isFinal: 0 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-05-07T08:55:58.996371Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2534: SessionId: ydb://session/3?node_id=8&id=MjRlMGNmZC0yMmY2NzA1YS0zOWM1OWY4My05OWM1NmVkMw==, ActorId: [8:7501624920197801346:4601], ActorState: ExecuteState, TraceId: 01jtmzay7ncjx0p50q7cxr8tf5, EndCleanup, isFinal: 0 2025-05-07T08:55:58.996428Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2270: SessionId: ydb://session/3?node_id=8&id=MjRlMGNmZC0yMmY2NzA1YS0zOWM1OWY4My05OWM1NmVkMw==, ActorId: [8:7501624920197801346:4601], ActorState: ExecuteState, TraceId: 01jtmzay7ncjx0p50q7cxr8tf5, Sent query response back to proxy, proxyRequestId: 481, proxyId: [8:7501624726924264907:2201] 2025-05-07T08:55:58.997385Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: query_actor.cpp:240: [TQueryBase] [TRefreshPoolStateQuery] TraceId: sample_pool_id, RequestDatabase: /Root, RequestSessionId: , State: Describe pool, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=8&id=MjRlMGNmZC0yMmY2NzA1YS0zOWM1OWY4My05OWM1NmVkMw==, TxId: 2025-05-07T08:55:58.997483Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: query_actor.cpp:367: [TQueryBase] [TRefreshPoolStateQuery] TraceId: sample_pool_id, RequestDatabase: /Root, RequestSessionId: , State: Describe pool, Finish with SUCCESS, SessionId: ydb://session/3?node_id=8&id=MjRlMGNmZC0yMmY2NzA1YS0zOWM1OWY4My05OWM1NmVkMw==, TxId: 2025-05-07T08:55:58.997694Z node 8 :KQP_WORKLOAD_SERVICE TRACE: pool_handlers_actors.cpp:746: [WorkloadService] [TPoolHandlerActorBase] ActorId: [8:7501624748399101703:2359], DatabaseId: /Root, PoolId: sample_pool_id, succefully refreshed pool state, in flight: 0, delayed: 0 2025-05-07T08:55:58.997731Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2315: SessionId: ydb://session/3?node_id=8&id=MjRlMGNmZC0yMmY2NzA1YS0zOWM1OWY4My05OWM1NmVkMw==, ActorId: [8:7501624920197801346:4601], ActorState: ReadyState, Session closed due to explicit close event 2025-05-07T08:55:58.997765Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2473: SessionId: ydb://session/3?node_id=8&id=MjRlMGNmZC0yMmY2NzA1YS0zOWM1OWY4My05OWM1NmVkMw==, ActorId: [8:7501624920197801346:4601], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-05-07T08:55:58.997795Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2534: SessionId: ydb://session/3?node_id=8&id=MjRlMGNmZC0yMmY2NzA1YS0zOWM1OWY4My05OWM1NmVkMw==, ActorId: [8:7501624920197801346:4601], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-05-07T08:55:58.997822Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2546: SessionId: ydb://session/3?node_id=8&id=MjRlMGNmZC0yMmY2NzA1YS0zOWM1OWY4My05OWM1NmVkMw==, ActorId: [8:7501624920197801346:4601], ActorState: unknown state, Cleanup temp tables: 0 2025-05-07T08:55:58.997894Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2637: SessionId: ydb://session/3?node_id=8&id=MjRlMGNmZC0yMmY2NzA1YS0zOWM1OWY4My05OWM1NmVkMw==, ActorId: [8:7501624920197801346:4601], ActorState: unknown state, Session actor destroyed |90.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/scheme_board/ut_populator/ydb-core-tx-scheme_board-ut_populator |90.5%| [LD] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_populator/ydb-core-tx-scheme_board-ut_populator >> Viewer::JsonStorageListingV1NodeIdFilter [GOOD] >> Viewer::JsonStorageListingV1PDiskIdFilter |90.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/scheme_board/ut_populator/ydb-core-tx-scheme_board-ut_populator |90.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/load_test/ut/ydb-core-load_test-ut |90.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/load_test/ut/ydb-core-load_test-ut |90.5%| [LD] {RESULT} $(B)/ydb/core/load_test/ut/ydb-core-load_test-ut >> GenericFederatedQuery::IcebergHiveBasicSelectAll [GOOD] >> GenericFederatedQuery::IcebergHiveBasicSelectConstant ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::WithSyncIndexAndIntermediateDir [GOOD] Test command err: 2025-05-07T08:55:58.220225Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624919666403799:2200];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:55:58.220785Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002984/r3tmp/tmpMD9lnc/pdisk_1.dat 2025-05-07T08:55:59.067066Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:55:59.100467Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:55:59.100588Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:55:59.130730Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25129 TServer::EnableGrpc on GrpcPort 24275, node 1 2025-05-07T08:55:59.710122Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:55:59.710142Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:55:59.710150Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:55:59.710277Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25129 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:56:00.637112Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:56:00.653200Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:56:00.657640Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1746608161270 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyCo... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1746608160696 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1746608161270 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: true } Children { Name: ".sys" PathId: 18446... (TRUNCATED) 2025-05-07T08:56:01.451311Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-05-07T08:56:01.451409Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-05-07T08:56:01.451422Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-05-07T08:56:01.452170Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-05-07T08:56:03.158152Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501624919666403799:2200];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:56:03.158221Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:56:05.094476Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1746608161270, tx_id: 281474976710658 } } } 2025-05-07T08:56:05.094943Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-05-07T08:56:05.097335Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:1, at schemeshard: 72057594046644480 2025-05-07T08:56:05.112838Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710659} 2025-05-07T08:56:05.112868Z node 1 :REPLICATION_CONTROLLER DEBUG: dst_creator.cpp:306: [DstCreator][rid 1][tid 1] Subscribe tx: txId# 281474976710659 2025-05-07T08:56:05.228861Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:311: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710659 2025-05-07T08:56:05.230267Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dir/Replicated" PathDescription { Self { Name: "Replicated" PathId: 6 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1746608165260 ParentPathId: 5 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Replicated" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Thre ... titionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 SplitByLoadSettings { Enabled: false } } ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186224037905 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 7 PathsLimit: 10000 ShardsInside: 19 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 8 PathOwnerId: 72057594046644480 } 2025-05-07T08:56:05.250307Z node 1 :REPLICATION_CONTROLLER INFO: dst_creator.cpp:585: [DstCreator][rid 1][tid 2] Success: dstPathId# [OwnerId: 72057594046644480, LocalPathId: 8] TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "indexImplTable" PathId: 8 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1746608165260 ParentPathId: 7 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } ... (TRUNCATED) Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "indexImplTable" PathId: 8 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1746608165260 ParentPathId: 7 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 SplitByLoadSettings { Enabled: false } } ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186224037905 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 7 PathsLimit: 10000 ShardsInside: 19 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } UserAttributes { Key: "__async_replica" Value: "true" } } Path: "/Root/Dir/Replicated/index_by_value/indexImplTable" >> TExecutorDb::RandomCoordinatorSimulation [GOOD] >> TExecutorDb::MultiPage >> PgCatalog::PgDatabase-useSink [GOOD] >> PgCatalog::PgRoles >> TPartBtreeIndexIteration::FewNodes_History_Slices [GOOD] >> TPartBtreeIndexIteration::FewNodes_Groups_History_Slices |90.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/executer_actor/ut/unittest >> GenericFederatedQuery::IcebergHadoopSaSelectAll [GOOD] >> GenericFederatedQuery::IcebergHadoopSaSelectConstant >> TTxAllocatorClientTest::AllocateOverTheEdge >> TCdcStreamTests::CreateStream [GOOD] >> TCdcStreamTests::AlterStream >> TTxAllocatorClientTest::AllocateOverTheEdge [GOOD] |90.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/executer_actor/ut/unittest >> TPQCDTest::TestDiscoverClusters >> ResourcePoolClassifiersDdl::TestDropResourcePoolClassifier [GOOD] >> ResourcePoolClassifiersDdl::TestDropResourcePool >> TExecutorDb::MultiPage [GOOD] >> TExecutorDb::EncodedPage ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator_client/ut/unittest >> TTxAllocatorClientTest::AllocateOverTheEdge [GOOD] Test command err: 2025-05-07T08:56:09.744317Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1904: Tablet: 72057594046447617 LockedInitializationPath Marker# TSYS32 2025-05-07T08:56:09.744812Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:917: Tablet: 72057594046447617 HandleFindLatestLogEntry, NODATA Promote Marker# TSYS19 2025-05-07T08:56:09.745659Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:221: Tablet: 72057594046447617 TTablet::WriteZeroEntry. logid# [72057594046447617:2:0:0:0:0:0] Marker# TSYS01 2025-05-07T08:56:09.747633Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:0:0:0:20:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:56:09.748164Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:17: tablet# 72057594046447617 OnActivateExecutor 2025-05-07T08:56:09.759767Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:1:28672:35:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:56:09.759896Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:0:0:42:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:56:09.759987Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:1:8192:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:56:09.760076Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1381: Tablet: 72057594046447617 GcCollect 0 channel, tablet:gen:step => 2:0 Marker# TSYS28 2025-05-07T08:56:09.760213Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:56:09.760318Z node 1 :TX_ALLOCATOR DEBUG: txallocator__scheme.cpp:22: tablet# 72057594046447617 TTxSchema Complete 2025-05-07T08:56:09.760471Z node 1 :TABLET_MAIN INFO: tablet_sys.cpp:1015: Tablet: 72057594046447617 Active! Generation: 2, Type: TxAllocator started in 0msec Marker# TSYS24 2025-05-07T08:56:09.761248Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:70:2105] requested range size#5000 2025-05-07T08:56:09.761840Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:1:24576:70:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:56:09.761916Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:56:09.762068Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 0 Reserved to# 5000 2025-05-07T08:56:09.762124Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:70:2105] TEvAllocateResult from# 0 to# 5000 2025-05-07T08:56:09.762329Z node 1 :TX_ALLOCATOR_CLIENT WARN: client.cpp:38: AllocateTxIds: requested many txIds. Just a warning, request is processed. Requested: 1000 TxAllocators count: 1 RequestPerAllocator: 5000 MaxCapacity: 5000 BatchAllocationWarning: 500 2025-05-07T08:56:09.762550Z node 1 :TX_ALLOCATOR_CLIENT WARN: client.cpp:38: AllocateTxIds: requested many txIds. Just a warning, request is processed. Requested: 1000 TxAllocators count: 1 RequestPerAllocator: 5000 MaxCapacity: 5000 BatchAllocationWarning: 500 2025-05-07T08:56:09.762746Z node 1 :TX_ALLOCATOR_CLIENT WARN: client.cpp:38: AllocateTxIds: requested many txIds. Just a warning, request is processed. Requested: 1000 TxAllocators count: 1 RequestPerAllocator: 5000 MaxCapacity: 5000 BatchAllocationWarning: 500 2025-05-07T08:56:09.762990Z node 1 :TX_ALLOCATOR_CLIENT WARN: client.cpp:38: AllocateTxIds: requested many txIds. Just a warning, request is processed. Requested: 1000 TxAllocators count: 1 RequestPerAllocator: 5000 MaxCapacity: 5000 BatchAllocationWarning: 500 2025-05-07T08:56:09.763145Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:70:2105] requested range size#5000 2025-05-07T08:56:09.763674Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:4:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:56:09.763783Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:4:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:56:09.763882Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 5000 Reserved to# 10000 2025-05-07T08:56:09.763920Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:70:2105] TEvAllocateResult from# 5000 to# 10000 2025-05-07T08:56:09.764119Z node 1 :TX_ALLOCATOR_CLIENT WARN: client.cpp:38: AllocateTxIds: requested many txIds. Just a warning, request is processed. Requested: 500 TxAllocators count: 1 RequestPerAllocator: 5000 MaxCapacity: 5000 BatchAllocationWarning: 500 2025-05-07T08:56:09.764297Z node 1 :TX_ALLOCATOR_CLIENT WARN: client.cpp:38: AllocateTxIds: requested many txIds. Just a warning, request is processed. Requested: 1000 TxAllocators count: 1 RequestPerAllocator: 5000 MaxCapacity: 5000 BatchAllocationWarning: 500 2025-05-07T08:56:09.764520Z node 1 :TX_ALLOCATOR_CLIENT WARN: client.cpp:38: AllocateTxIds: requested many txIds. Just a warning, request is processed. Requested: 2500 TxAllocators count: 1 RequestPerAllocator: 5000 MaxCapacity: 5000 BatchAllocationWarning: 500 2025-05-07T08:56:09.764842Z node 1 :TX_ALLOCATOR_CLIENT WARN: client.cpp:38: AllocateTxIds: requested many txIds. Just a warning, request is processed. Requested: 1000 TxAllocators count: 1 RequestPerAllocator: 5000 MaxCapacity: 5000 BatchAllocationWarning: 500 2025-05-07T08:56:09.765014Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:70:2105] requested range size#5000 2025-05-07T08:56:09.765474Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:5:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:56:09.765544Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:5:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:56:09.765685Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 10000 Reserved to# 15000 2025-05-07T08:56:09.765729Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:70:2105] TEvAllocateResult from# 10000 to# 15000 2025-05-07T08:56:09.765923Z node 1 :TX_ALLOCATOR_CLIENT WARN: client.cpp:38: AllocateTxIds: requested many txIds. Just a warning, request is processed. Requested: 3000 TxAllocators count: 1 RequestPerAllocator: 5000 MaxCapacity: 5000 BatchAllocationWarning: 500 >> SystemView::TopPartitionsByCpuRanges [GOOD] >> SystemView::TopPartitionsByCpuFollowers >> SplitPathTests::WithDatabaseShouldSuccess [GOOD] >> TDataShardMinStepTest::TestDropTablePlanComesNotTooEarlyRW+VolatileTxs >> GenericFederatedQuery::IcebergHiveSaSelectAll [GOOD] >> GenericFederatedQuery::IcebergHiveSaSelectConstant >> TExecutorDb::EncodedPage [GOOD] >> TFlatCxxDatabaseTest::BasicSchemaTest >> TCdcStreamTests::AlterStream [GOOD] >> TCdcStreamTests::DropStream >> TFlatCxxDatabaseTest::BasicSchemaTest [GOOD] >> TFlatCxxDatabaseTest::RenameColumnSchemaTest [GOOD] >> TFlatCxxDatabaseTest::SchemaFillerTest [GOOD] >> TFlatDatabaseDecimal::UpdateRead [GOOD] >> TFlatEraseCacheTest::BasicUsage [GOOD] >> TFlatEraseCacheTest::BasicUsageReverse [GOOD] >> TFlatEraseCacheTest::CacheEviction |90.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/ut/unittest >> SplitPathTests::WithDatabaseShouldSuccess [GOOD] >> TFlatEraseCacheTest::CacheEviction [GOOD] >> TFlatEraseCacheTest::StressGarbageCollection >> TFlatEraseCacheTest::StressGarbageCollection [GOOD] >> TFlatEraseCacheTest::StressGarbageCollectionWithStrings >> TFlatEraseCacheTest::StressGarbageCollectionWithStrings [GOOD] >> TFlatExecutorLeases::Basics >> GenericFederatedQuery::TestFailsOnIncorrectScriptExecutionOperationId [GOOD] >> GenericFederatedQuery::TestFailsOnIncorrectScriptExecutionFetchToken >> BuildStatsHistogram::Ten_Mixed_Log [GOOD] >> BuildStatsHistogram::Ten_Serial_Log >> TIterator::MixedReverse [GOOD] >> TIterator::Serial |90.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/ut/unittest >> TVersions::Wreck2 [GOOD] >> TVersions::Wreck2Reverse >> TDataShardMinStepTest::TestDropTableCompletesQuicklyRW+VolatileTxs >> GenericFederatedQuery::IcebergHiveTokenSelectAll [GOOD] >> GenericFederatedQuery::IcebergHiveTokenSelectConstant >> OperationMapping::IndexBuildCanceled [GOOD] |90.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/ut/unittest >> OperationMapping::IndexBuildCanceled [GOOD] >> KqpPg::PgUpdateCompoundKey+useSink [GOOD] >> KqpPg::PgUpdateCompoundKey-useSink >> TExternalTableTest::DropExternalTable >> TCdcStreamTests::DropStream [GOOD] >> TCdcStreamTests::AlterStreamImplShouldFail >> TExternalTableTest::ReplaceExternalTableIfNotExists >> TExternalTableTest::DropExternalTable [GOOD] >> TExternalTableTest::Decimal >> TIterator::Serial [GOOD] >> TIterator::SerialReverse >> SystemView::AuthUsers_LockUnlock [GOOD] >> SystemView::AuthUsers_Access >> RetryPolicy::TWriteSession_TestBrokenPolicy [GOOD] >> RetryPolicy::TWriteSession_RetryOnTargetCluster >> TFlatExecutorLeases::Basics [GOOD] >> TFlatExecutorLeases::BasicsLeaseTimeout >> BasicUsage::WriteSessionSwitchDatabases [GOOD] >> TExternalTableTest::Decimal [GOOD] >> BuildStatsHistogram::Ten_Serial_Log [GOOD] >> BuildStatsHistogram::Ten_Crossed_Log |90.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/keyvalue/ut_trace/ydb-core-keyvalue-ut_trace |90.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/keyvalue/ut_trace/ydb-core-keyvalue-ut_trace |90.6%| [LD] {RESULT} $(B)/ydb/core/keyvalue/ut_trace/ydb-core-keyvalue-ut_trace |90.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/ydb/backup_ut/ydb-services-ydb-backup_ut |90.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/ydb/backup_ut/ydb-services-ydb-backup_ut |90.6%| [LD] {RESULT} $(B)/ydb/services/ydb/backup_ut/ydb-services-ydb-backup_ut >> TExternalTableTest::ReplaceExternalTableIfNotExists [GOOD] >> TCdcStreamTests::AlterStreamImplShouldFail [GOOD] >> TCdcStreamTests::DropStreamImplShouldFail >> TDataShardMinStepTest::TestDropTablePlanComesNotTooEarlyRW+VolatileTxs [GOOD] >> TDataShardMinStepTest::TestDropTablePlanComesNotTooEarlyRW-VolatileTxs ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_table/unittest >> TExternalTableTest::Decimal [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:127:2058] recipient: [1:109:2141] 2025-05-07T08:56:16.057303Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:56:16.057409Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:56:16.057502Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:56:16.057553Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:56:16.057626Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:56:16.057659Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:56:16.057708Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:56:16.057792Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:56:16.058549Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:56:16.058924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:56:16.146179Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7610: Got new config: QueryServiceConfig { AvailableExternalDataSources: "ObjectStorage" AvailableExternalDataSources: "ClickHouse" AvailableExternalDataSources: "PostgreSQL" AvailableExternalDataSources: "MySQL" AvailableExternalDataSources: "Ydb" AvailableExternalDataSources: "YT" AvailableExternalDataSources: "Greenplum" AvailableExternalDataSources: "MsSQLServer" AvailableExternalDataSources: "Oracle" AvailableExternalDataSources: "Logging" AvailableExternalDataSources: "Solomon" } 2025-05-07T08:56:16.146270Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:56:16.147014Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# ObjectStorage, ClickHouse, PostgreSQL, MySQL, Ydb, YT, Greenplum, MsSQLServer, Oracle, Logging, Solomon 2025-05-07T08:56:16.168934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:56:16.169527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:56:16.169707Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:56:16.185773Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:56:16.186059Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:56:16.186762Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:56:16.187061Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:56:16.190600Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:56:16.191983Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:56:16.192059Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:56:16.192194Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:56:16.192248Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:56:16.192352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:56:16.192539Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:56:16.201347Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:56:16.381057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:56:16.381317Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:56:16.381569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:56:16.381822Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:56:16.381879Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:56:16.387112Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:56:16.387285Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:56:16.387519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:56:16.387583Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:56:16.387630Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:56:16.387666Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:56:16.390586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:56:16.390672Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:56:16.390719Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:56:16.393097Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:56:16.393160Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:56:16.393201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:56:16.393265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:56:16.397250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:56:16.399635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:56:16.399910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:56:16.400962Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:56:16.401147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 134 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:56:16.401198Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:56:16.401494Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:56:16.401562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:56:16.401762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:56:16.401843Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:56:16.404328Z node 1 :FLAT_TX_SCHEMESHARD INF ... Id: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:56:17.477118Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-05-07T08:56:17.477232Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-05-07T08:56:17.477319Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T08:56:17.477439Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:56:17.477473Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:205:2207], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-05-07T08:56:17.477515Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:205:2207], at schemeshard: 72057594046678944, txId: 101, path id: 3 2025-05-07T08:56:17.477547Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:205:2207], at schemeshard: 72057594046678944, txId: 101, path id: 3 2025-05-07T08:56:17.477573Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:205:2207], at schemeshard: 72057594046678944, txId: 101, path id: 2 2025-05-07T08:56:17.478021Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:56:17.478080Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 101:0 ProgressState 2025-05-07T08:56:17.478185Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T08:56:17.478227Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T08:56:17.478273Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T08:56:17.478338Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T08:56:17.478387Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-05-07T08:56:17.478446Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T08:56:17.478491Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:0 2025-05-07T08:56:17.478528Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 101:0 2025-05-07T08:56:17.478621Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-05-07T08:56:17.478673Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T08:56:17.478717Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 101, publications: 3, subscribers: 0 2025-05-07T08:56:17.478757Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-05-07T08:56:17.478791Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 2 2025-05-07T08:56:17.478814Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-05-07T08:56:17.480088Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T08:56:17.480186Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T08:56:17.480221Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 3, at schemeshard: 72057594046678944, txId: 101 2025-05-07T08:56:17.480265Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-05-07T08:56:17.480319Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-05-07T08:56:17.485504Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T08:56:17.485658Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T08:56:17.485696Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-05-07T08:56:17.485730Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-05-07T08:56:17.485767Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-05-07T08:56:17.487097Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T08:56:17.487193Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T08:56:17.487247Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-05-07T08:56:17.487284Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 2 2025-05-07T08:56:17.487316Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T08:56:17.487408Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-05-07T08:56:17.499197Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T08:56:17.499563Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T08:56:17.501115Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-05-07T08:56:17.501365Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-05-07T08:56:17.501408Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-05-07T08:56:17.501841Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-05-07T08:56:17.501945Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-05-07T08:56:17.502005Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [2:332:2323] TestWaitNotification: OK eventTxId 101 2025-05-07T08:56:17.502484Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:56:17.502737Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalTable" took 295us result status StatusSuccess 2025-05-07T08:56:17.503073Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ExternalTable" PathDescription { Self { Name: "ExternalTable" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 101 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalTableVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } ExternalTableDescription { Name: "ExternalTable" PathId { OwnerId: 72057594046678944 LocalId: 3 } Version: 1 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Decimal(35,9)" TypeId: 4865 Id: 1 NotNull: false TypeInfo { DecimalPrecision: 35 DecimalScale: 9 } } Content: "" } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> GenericFederatedQuery::TestFailsOnIncorrectScriptExecutionFetchToken [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_table/unittest >> TExternalTableTest::ReplaceExternalTableIfNotExists [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:127:2058] recipient: [1:109:2141] 2025-05-07T08:56:17.239803Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:56:17.239891Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:56:17.239947Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:56:17.239984Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:56:17.240028Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:56:17.240070Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:56:17.240128Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:56:17.240222Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:56:17.240960Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:56:17.241344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:56:17.323843Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7610: Got new config: QueryServiceConfig { AvailableExternalDataSources: "ObjectStorage" AvailableExternalDataSources: "ClickHouse" AvailableExternalDataSources: "PostgreSQL" AvailableExternalDataSources: "MySQL" AvailableExternalDataSources: "Ydb" AvailableExternalDataSources: "YT" AvailableExternalDataSources: "Greenplum" AvailableExternalDataSources: "MsSQLServer" AvailableExternalDataSources: "Oracle" AvailableExternalDataSources: "Logging" AvailableExternalDataSources: "Solomon" } 2025-05-07T08:56:17.323923Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:56:17.324756Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# ObjectStorage, ClickHouse, PostgreSQL, MySQL, Ydb, YT, Greenplum, MsSQLServer, Oracle, Logging, Solomon 2025-05-07T08:56:17.341558Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:56:17.342919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:56:17.343122Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:56:17.374430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:56:17.374693Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:56:17.375420Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:56:17.375733Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:56:17.383787Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:56:17.385373Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:56:17.385472Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:56:17.385620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:56:17.385675Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:56:17.385798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:56:17.386045Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:56:17.398905Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:56:17.592532Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:56:17.606891Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:56:17.607382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:56:17.607782Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:56:17.607879Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:56:17.615228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:56:17.615404Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:56:17.615630Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:56:17.615693Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:56:17.615754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:56:17.615792Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:56:17.618970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:56:17.619049Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:56:17.619089Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:56:17.626929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:56:17.627000Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:56:17.627049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:56:17.627114Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:56:17.634270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:56:17.638993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:56:17.639323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:56:17.640511Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:56:17.640673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 134 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:56:17.640729Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:56:17.641125Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:56:17.641196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:56:17.641377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:56:17.641463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:56:17.655048Z node 1 :FLAT_TX_SCHEMESHARD INF ... 05 2025-05-07T08:56:17.886385Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000005, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:56:17.886535Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 104 Coordinator: 72057594046316545 AckTo { RawX1: 134 RawX2: 4294969453 } } Step: 5000005 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:56:17.886595Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_external_table.cpp:58: [72057594046678944] TAlterExternalTable TPropose, operationId: 104:0 HandleReply TEvOperationPlan: step# 5000005 2025-05-07T08:56:17.886720Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 104:0 128 -> 240 2025-05-07T08:56:17.886942Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:56:17.887026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-05-07T08:56:17.887803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-05-07T08:56:17.887961Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 FAKE_COORDINATOR: Erasing txId 104 2025-05-07T08:56:17.889859Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:56:17.889916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:56:17.890112Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-05-07T08:56:17.890204Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-05-07T08:56:17.890332Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:56:17.890377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2208], at schemeshard: 72057594046678944, txId: 104, path id: 1 2025-05-07T08:56:17.890422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2208], at schemeshard: 72057594046678944, txId: 104, path id: 3 2025-05-07T08:56:17.890446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2208], at schemeshard: 72057594046678944, txId: 104, path id: 3 2025-05-07T08:56:17.890519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-05-07T08:56:17.890562Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 104:0 ProgressState 2025-05-07T08:56:17.890659Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 1/1 2025-05-07T08:56:17.890707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-05-07T08:56:17.890781Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 1/1 2025-05-07T08:56:17.890816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-05-07T08:56:17.890855Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: false 2025-05-07T08:56:17.890908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-05-07T08:56:17.890956Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 104:0 2025-05-07T08:56:17.890984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 104:0 2025-05-07T08:56:17.891059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-05-07T08:56:17.891105Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T08:56:17.891139Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 104, publications: 2, subscribers: 0 2025-05-07T08:56:17.891169Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 1], 11 2025-05-07T08:56:17.891200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 3], 4 2025-05-07T08:56:17.892300Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T08:56:17.892405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T08:56:17.892445Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 104 2025-05-07T08:56:17.892485Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 11 2025-05-07T08:56:17.892526Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-05-07T08:56:17.893490Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T08:56:17.893578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T08:56:17.893626Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-05-07T08:56:17.893654Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 4 2025-05-07T08:56:17.893687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-05-07T08:56:17.893751Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 0 2025-05-07T08:56:17.902419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-05-07T08:56:17.902943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 104 2025-05-07T08:56:17.903270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2025-05-07T08:56:17.903317Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2025-05-07T08:56:17.903825Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-05-07T08:56:17.903927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-05-07T08:56:17.903970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:395:2386] TestWaitNotification: OK eventTxId 104 2025-05-07T08:56:17.904536Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:56:17.904802Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalTable" took 247us result status StatusSuccess 2025-05-07T08:56:17.905125Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ExternalTable" PathDescription { Self { Name: "ExternalTable" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalTableVersion: 3 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } ExternalTableDescription { Name: "ExternalTable" PathId { OwnerId: 72057594046678944 LocalId: 3 } Version: 3 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/other_location" Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false } Content: "" } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TFlatExecutorLeases::BasicsLeaseTimeout [GOOD] >> TFlatExecutorLeases::BasicsInitialLease >> TPartBtreeIndexIteration::FewNodes_Groups_History_Slices [GOOD] >> TPartBtreeIndexIteration::FewNodes_Groups_History_Slices_Sticky >> RetryPolicy::TWriteSession_SwitchBackToLocalCluster [GOOD] >> RetryPolicy::TWriteSession_SeqNoShift ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::WriteSessionSwitchDatabases [GOOD] Test command err: 2025-05-07T08:53:40.858094Z :WriteSessionNoAvailableDatabase INFO: Random seed for debugging is 1746608020858033 2025-05-07T08:53:41.570647Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624330972724720:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:41.570704Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:53:41.871699Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501624333310206982:2278];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:41.871758Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:53:42.205782Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003efe/r3tmp/tmpqJl24h/pdisk_1.dat 2025-05-07T08:53:42.263212Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-05-07T08:53:42.594163Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:53:42.940309Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:53:42.999309Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:53:42.999412Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:53:43.003455Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:53:43.003516Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:53:43.010381Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:53:43.021241Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-05-07T08:53:43.021409Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:53:43.025283Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16281, node 1 2025-05-07T08:53:43.526914Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/zvgn/003efe/r3tmp/yandex21tWAj.tmp 2025-05-07T08:53:43.526945Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/zvgn/003efe/r3tmp/yandex21tWAj.tmp 2025-05-07T08:53:43.527158Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/zvgn/003efe/r3tmp/yandex21tWAj.tmp 2025-05-07T08:53:43.527308Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:53:43.637937Z INFO: TTestServer started on Port 27682 GrpcPort 16281 TClient is connected to server localhost:27682 PQClient connected to localhost:16281 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:53:44.393906Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... waiting... 2025-05-07T08:53:46.578095Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501624330972724720:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:46.578192Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:53:46.874137Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7501624333310206982:2278];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:46.874211Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:53:48.685877Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624361037496839:2344], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:48.686022Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:48.694098Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624361037496859:2347], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:48.703472Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480 2025-05-07T08:53:48.775901Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624361037496861:2348], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-05-07T08:53:49.154174Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501624361037496955:2709] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:53:49.181758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-05-07T08:53:49.211154Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7501624365332464264:2354], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T08:53:49.212528Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7501624363374978183:2319], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T08:53:49.213102Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=1&id=YmI3NDQyNzAtYzA4ZDBiNzAtZWIzMzA2NzItYjNkZmNmNDI=, ActorId: [1:7501624361037496820:2341], ActorState: ExecuteState, TraceId: 01jtmz6yzecvvny22yct31vxwn, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T08:53:49.213534Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=2&id=NDc2NjhhNDgtMjkyNjVlNTctYmU5OTU3ZDYtY2M1ODhjNmI=, ActorId: [2:7501624363374978148:2312], ActorState: ExecuteState, TraceId: 01jtmz6z726b8jwr149g7k8nxy, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T08:53:49.223800Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T08:53:49.224321Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T08:53:49.360916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-05-07T08:53:49.584897Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost:16281", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", f ... 1Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:548: [72075186224037893][rt3.dc1--test-topic] Send TEvPersQueue::TEvStatus TabletId: 72075186224037892 Cookie: 4 2025-05-07T08:56:12.783641Z node 4 :PERSQUEUE DEBUG: partition.cpp:855: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ LifetimeSeconds: 86400 LowWatermark: 8388608 SourceIdLifetimeSeconds: 86400 WriteSpeedInBytesPerSecond: 20000000 BurstSize: 20000000 TotalPartitions: 1 SourceIdMaxCounts: 6000000 } 2025-05-07T08:56:12.784577Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:688: [72075186224037893][rt3.dc1--test-topic] Send TEvPeriodicTopicStats PathId: 13 Generation: 1 StatsReportRound: 4 DataSize: 0 UsedReserveSize: 0 2025-05-07T08:56:12.784719Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1823: [72075186224037893][rt3.dc1--test-topic] ProcessPendingStats. PendingUpdates size 1 2025-05-07T08:56:14.142389Z :DEBUG: [/Root] TraceId [] SessionId [src_id|145010a8-512d918c-9322ca53-8d26ee5_0] MessageGroupId [src_id] Write 1 messages with Id from 1 to 1 >>> Got event: ReadyToAcceptEvent >>> Ready to answer: ok 2025-05-07T08:56:14.143587Z :DEBUG: [/Root] TraceId [] SessionId [src_id|145010a8-512d918c-9322ca53-8d26ee5_0] MessageGroupId [src_id] Write session: try to update token 2025-05-07T08:56:14.143636Z :DEBUG: [/Root] TraceId [] SessionId [src_id|145010a8-512d918c-9322ca53-8d26ee5_0] MessageGroupId [src_id] Send 1 message(s) (0 left), first sequence number is 3 2025-05-07T08:56:14.145913Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 4 sessionId: src_id|145010a8-512d918c-9322ca53-8d26ee5_0 grpc read done: success: 1 data: write_request[data omitted] 2025-05-07T08:56:14.146215Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::NPQ::TEvPartitionWriter::TEvWriteRequest 2025-05-07T08:56:14.150926Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:342: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-05-07T08:56:14.150976Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2788: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-05-07T08:56:14.151088Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:377: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 1 2025-05-07T08:56:14.151630Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::IEventHandle 2025-05-07T08:56:14.152338Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:342: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-05-07T08:56:14.152372Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2788: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-05-07T08:56:14.152445Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037892] got client message topic: rt3.dc1--test-topic partition: 0 SourceId: '\0src_id' SeqNo: 3 partNo : 0 messageNo: 1 size 98 offset: -1 2025-05-07T08:56:14.152666Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1233: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 part blob processing sourceId '\0src_id' seqNo 3 partNo 0 2025-05-07T08:56:14.153689Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1333: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 part blob complete sourceId '\0src_id' seqNo 3 partNo 0 FormedBlobsCount 0 NewHead: Offset 2 PartNo 0 PackedSize 172 count 1 nextOffset 3 batches 1 2025-05-07T08:56:14.154276Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1623: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Add new write blob: topic 'rt3.dc1--test-topic' partition 0 compactOffset 2,1 HeadOffset 0 endOffset 2 curOffset 3 d0000000000_00000000000000000002_00000_0000000001_00000| size 160 WTime 1746608174152 2025-05-07T08:56:14.154412Z node 4 :PERSQUEUE DEBUG: partition.cpp:2182: [PQ: 72075186224037892, Partition: 0, State: StateIdle] === DumpKeyValueRequest === 2025-05-07T08:56:14.154432Z node 4 :PERSQUEUE DEBUG: partition.cpp:2183: [PQ: 72075186224037892, Partition: 0, State: StateIdle] --- delete ---------------- 2025-05-07T08:56:14.154451Z node 4 :PERSQUEUE DEBUG: partition.cpp:2189: [PQ: 72075186224037892, Partition: 0, State: StateIdle] [x0000000000, x0000000001) 2025-05-07T08:56:14.154472Z node 4 :PERSQUEUE DEBUG: partition.cpp:2191: [PQ: 72075186224037892, Partition: 0, State: StateIdle] --- write ----------------- 2025-05-07T08:56:14.154490Z node 4 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037892, Partition: 0, State: StateIdle] m0000000000psrc_id 2025-05-07T08:56:14.154508Z node 4 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037892, Partition: 0, State: StateIdle] d0000000000_00000000000000000002_00000_0000000001_00000| 2025-05-07T08:56:14.154524Z node 4 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037892, Partition: 0, State: StateIdle] i0000000000 2025-05-07T08:56:14.154541Z node 4 :PERSQUEUE DEBUG: partition.cpp:2196: [PQ: 72075186224037892, Partition: 0, State: StateIdle] --- rename ---------------- 2025-05-07T08:56:14.154558Z node 4 :PERSQUEUE DEBUG: partition.cpp:2201: [PQ: 72075186224037892, Partition: 0, State: StateIdle] =========================== 2025-05-07T08:56:14.154612Z node 4 :PERSQUEUE DEBUG: read.h:262: CacheProxy. Passthrough write request to KV 2025-05-07T08:56:14.154684Z node 4 :PERSQUEUE DEBUG: read.h:300: CacheProxy. Passthrough blob. Partition 0 offset 2 partNo 0 count 1 size 160 2025-05-07T08:56:14.163346Z node 4 :PERSQUEUE DEBUG: cache_eviction.h:315: Caching head blob in L1. Partition 0 offset 2 count 1 size 160 actorID [4:7501624462476059091:2424] 2025-05-07T08:56:14.163487Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 105 WriteNewSizeFromSupportivePartitions# 0 2025-05-07T08:56:14.163541Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-05-07T08:56:14.163586Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0src_id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 3, partNo: 0, Offset: 2 is stored on disk 2025-05-07T08:56:14.163822Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:1366: [PQ: 72075186224037892] Topic 'rt3.dc1--test-topic' counters. CacheSize 480 CachedBlobs 3 2025-05-07T08:56:14.163854Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:377: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 1 requestId: cookie: 1 2025-05-07T08:56:14.164206Z node 4 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037892' partition 0 offset 2 partno 0 count 1 parts 0 size 160 2025-05-07T08:56:14.166139Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::IEventHandle 2025-05-07T08:56:14.167147Z :DEBUG: [/Root] TraceId [] SessionId [src_id|145010a8-512d918c-9322ca53-8d26ee5_0] MessageGroupId [src_id] Write session: OnReadDone gRpcStatusCode: 0 2025-05-07T08:56:14.167310Z :DEBUG: [/Root] TraceId [] SessionId [src_id|145010a8-512d918c-9322ca53-8d26ee5_0] MessageGroupId [src_id] Write session got write response: acks { seq_no: 3 written { offset: 2 } } write_statistics { persisting_time { nanos: 11000000 } min_queue_wait_time { } max_queue_wait_time { } partition_quota_wait_time { } topic_quota_wait_time { } } 2025-05-07T08:56:14.167339Z :DEBUG: [/Root] TraceId [] SessionId [src_id|145010a8-512d918c-9322ca53-8d26ee5_0] MessageGroupId [src_id] OnAck: seqNo=1, txId=? 2025-05-07T08:56:14.167366Z :DEBUG: [/Root] TraceId [] SessionId [src_id|145010a8-512d918c-9322ca53-8d26ee5_0] MessageGroupId [src_id] Write session: acknoledged message 1 2025-05-07T08:56:14.193301Z :DEBUG: [/Root] TraceId [] SessionId [src_id|145010a8-512d918c-9322ca53-8d26ee5_0] MessageGroupId [src_id] Write session: OnReadDone gRpcStatusCode: 1, Msg: Cancelled on the server side, Details: , InternalError: 0 2025-05-07T08:56:14.193425Z :ERROR: [/Root] TraceId [] SessionId [src_id|145010a8-512d918c-9322ca53-8d26ee5_0] MessageGroupId [src_id] Got error. Status: CLIENT_CANCELLED, Description:
: Error: GRpc error: (1): Cancelled on the server side 2025-05-07T08:56:14.193460Z :ERROR: [/Root] TraceId [] SessionId [src_id|145010a8-512d918c-9322ca53-8d26ee5_0] MessageGroupId [src_id] Write session will not restart after a fatal error 2025-05-07T08:56:14.193495Z :INFO: [/Root] TraceId [] SessionId [src_id|145010a8-512d918c-9322ca53-8d26ee5_0] MessageGroupId [src_id] Write session will now close 2025-05-07T08:56:14.193559Z :DEBUG: [/Root] TraceId [] SessionId [src_id|145010a8-512d918c-9322ca53-8d26ee5_0] MessageGroupId [src_id] Write session: aborting 2025-05-07T08:56:14.192152Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 4 sessionId: src_id|145010a8-512d918c-9322ca53-8d26ee5_0 grpc read done: success: 0 data: 2025-05-07T08:56:14.192181Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 4 sessionId: src_id|145010a8-512d918c-9322ca53-8d26ee5_0 grpc read failed 2025-05-07T08:56:14.192216Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 4 sessionId: src_id|145010a8-512d918c-9322ca53-8d26ee5_0 grpc closed 2025-05-07T08:56:14.192231Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 4 sessionId: src_id|145010a8-512d918c-9322ca53-8d26ee5_0 is DEAD 2025-05-07T08:56:14.192685Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-05-07T08:56:14.194496Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037892] server disconnected, pipe [3:7501624903771395815:3321] destroyed 2025-05-07T08:56:14.194560Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:138: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-05-07T08:56:14.306770Z :DEBUG: [/Root] TraceId [] SessionId [src_id|145010a8-512d918c-9322ca53-8d26ee5_0] MessageGroupId [src_id] Write session: destroy 2025-05-07T08:56:14.890932Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1073: TxId: 281474976720888, task: 1, CA Id [3:7501624989670742590:3477]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 0 2025-05-07T08:56:14.931965Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1073: TxId: 281474976720888, task: 1, CA Id [3:7501624989670742590:3477]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-05-07T08:56:14.986979Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1073: TxId: 281474976720888, task: 1, CA Id [3:7501624989670742590:3477]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-05-07T08:56:15.074774Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1073: TxId: 281474976720888, task: 1, CA Id [3:7501624989670742590:3477]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-05-07T08:56:15.178568Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1073: TxId: 281474976720888, task: 1, CA Id [3:7501624989670742590:3477]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-05-07T08:56:15.340015Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1073: TxId: 281474976720888, task: 1, CA Id [3:7501624989670742590:3477]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-05-07T08:56:15.614089Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1073: TxId: 281474976720888, task: 1, CA Id [3:7501624989670742590:3477]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 >> PgCatalog::PgRoles [GOOD] >> PgCatalog::PgTables ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::TestFailsOnIncorrectScriptExecutionFetchToken [GOOD] Test command err: Trying to start YDB, gRPC: 11726, MsgBus: 19079 2025-05-07T08:55:47.846827Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624871600520248:2065];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:55:47.866624Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00487f/r3tmp/tmpXFv1Nf/pdisk_1.dat 2025-05-07T08:55:49.052809Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:55:49.052909Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:55:49.055412Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:55:49.087740Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:55:49.251264Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11726, node 1 2025-05-07T08:55:49.505848Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:55:49.505876Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:55:49.505883Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:55:49.506032Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19079 TClient is connected to server localhost:19079 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:55:50.928456Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:55:50.995008Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:55:52.850160Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501624871600520248:2065];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:55:52.850230Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:55:55.172845Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624905960259291:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:55.172948Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:56.067126Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:2, at schemeshard: 72057594046644480 2025-05-07T08:55:56.290910Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624910255226715:2353], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:56.291036Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:56.291278Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624910255226721:2356], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:56.294331Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:2, at schemeshard: 72057594046644480 2025-05-07T08:55:56.314755Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480 2025-05-07T08:55:56.315419Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624910255226723:2357], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-05-07T08:55:56.415856Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501624910255226763:2412] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:55:57.451460Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:55:57.971314Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:1, at schemeshard: 72057594046644480 2025-05-07T08:55:58.640185Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480 2025-05-07T08:55:59.318410Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710676:0, at schemeshard: 72057594046644480 2025-05-07T08:56:00.083385Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710681:0, at schemeshard: 72057594046644480 2025-05-07T08:56:00.946043Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480 2025-05-07T08:56:01.017280Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480 2025-05-07T08:56:04.190182Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7261: Cannot get console configs 2025-05-07T08:56:04.190212Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:56:04.363981Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710703:0, at schemeshard: 72057594046644480 Call DescribeTable. data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } columns { name: "data_column" type { optional_type { item { type_id: STRING } } } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Expected: splits { select { data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Actual: splits { select { data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL ReadSplits result. GRpcStatusCode: 0 Trying to start YDB, gRPC: 17697, MsgBus: 14952 2025-05-07T08:56:05.983809Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501624949766242664:2125];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:56:05.983877Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00487f/r3tmp/tmpmeXsfy/pdisk_1.dat 2025-05-07T08:56:06.479617Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:56:06.479707Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:56:06.490505Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:56:06.546874Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17697, node 2 2025-05-07T08:56:06.770901Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:56:06.770936Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:56:06.770945Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:56:06.771106Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14952 TClient is connected to server localhost:14952 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:56:07.483278Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:56:07.491322Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 9051, MsgBus: 14601 2025-05-07T08:56:12.883742Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7501624979086214282:2213];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00487f/r3tmp/tmp0RByDi/pdisk_1.dat 2025-05-07T08:56:12.972440Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:56:13.113490Z node 3 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:56:13.168418Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:56:13.168504Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:56:13.179316Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9051, node 3 2025-05-07T08:56:13.394684Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:56:13.394705Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:56:13.394713Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:56:13.394830Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14601 TClient is connected to server localhost:14601 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-05-07T08:56:14.578388Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-05-07T08:56:14.583719Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 >> TExternalTableTest::ReplaceExternalTableIfNotExistsShouldFailIfFeatureFlagIsNotSet >> TCdcStreamTests::DropStreamImplShouldFail [GOOD] >> TCdcStreamTests::CopyTableShouldNotCopyStream >> TDataShardMinStepTest::TestDropTableCompletesQuicklyRW+VolatileTxs [GOOD] >> TDataShardMinStepTest::TestDropTableCompletesQuicklyRW-VolatileTxs >> TExternalTableTest::ReadOnlyMode >> TFlatExecutorLeases::BasicsInitialLease [GOOD] >> TFlatExecutorLeases::BasicsInitialLeaseTimeout >> YdbIndexTable::MultiShardTableTwoIndexes [GOOD] >> TIterator::SerialReverse [GOOD] >> TIterator::GetKey >> GenericFederatedQuery::IcebergHiveBasicSelectConstant [GOOD] >> GenericFederatedQuery::IcebergHiveBasicSelectCount >> TIterator::GetKey [GOOD] >> TIterator::GetKeyWithEraseCache [GOOD] >> TIterator::GetKeyWithVersionSkips [GOOD] >> TLegacy::IndexIter >> GenericFederatedQuery::YdbManagedSelectConstant [GOOD] >> GenericFederatedQuery::YdbSelectCount >> TPQCDTest::TestDiscoverClusters [GOOD] >> TExternalTableTest::ReplaceExternalTableIfNotExistsShouldFailIfFeatureFlagIsNotSet [GOOD] >> TLegacy::IndexIter [GOOD] >> TLegacy::ScreenedIndexIter [GOOD] >> TLegacy::StatsIter >> GroupWriteTest::WriteHardRateDispatcher >> TExternalTableTest::ReadOnlyMode [GOOD] >> TLegacy::StatsIter [GOOD] >> TPageHandleTest::Uninitialized [GOOD] >> TPageHandleTest::NormalUse [GOOD] >> TPageHandleTest::HandleRef [GOOD] >> TPageHandleTest::PinnedRef [GOOD] >> TPageHandleTest::PinnedRefPure [GOOD] >> TPart::BasicColumnGroups [GOOD] >> TPersQueueTest::ReadFromSeveralPartitions ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_table/unittest >> TExternalTableTest::ReplaceExternalTableIfNotExistsShouldFailIfFeatureFlagIsNotSet [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:127:2058] recipient: [1:109:2141] 2025-05-07T08:56:21.985324Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:56:21.985415Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:56:21.985455Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:56:21.985494Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:56:21.985535Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:56:21.985614Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:56:21.985678Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:56:21.985775Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:56:21.986565Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:56:21.986956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:56:22.188407Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7610: Got new config: QueryServiceConfig { AvailableExternalDataSources: "ObjectStorage" AvailableExternalDataSources: "ClickHouse" AvailableExternalDataSources: "PostgreSQL" AvailableExternalDataSources: "MySQL" AvailableExternalDataSources: "Ydb" AvailableExternalDataSources: "YT" AvailableExternalDataSources: "Greenplum" AvailableExternalDataSources: "MsSQLServer" AvailableExternalDataSources: "Oracle" AvailableExternalDataSources: "Logging" AvailableExternalDataSources: "Solomon" } 2025-05-07T08:56:22.188486Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:56:22.189249Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# ObjectStorage, ClickHouse, PostgreSQL, MySQL, Ydb, YT, Greenplum, MsSQLServer, Oracle, Logging, Solomon 2025-05-07T08:56:22.204951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:56:22.205525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:56:22.205728Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:56:22.214056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:56:22.214318Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:56:22.214999Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:56:22.215285Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:56:22.218525Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:56:22.220000Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:56:22.220067Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:56:22.220191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:56:22.220242Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:56:22.220339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:56:22.220556Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:56:22.227515Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:56:22.386617Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:56:22.386922Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:56:22.387212Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:56:22.387500Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:56:22.387585Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:56:22.391134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:56:22.391321Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:56:22.391611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:56:22.391768Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:56:22.391820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:56:22.391874Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:56:22.394552Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:56:22.394619Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:56:22.394672Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:56:22.397310Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:56:22.397374Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:56:22.397438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:56:22.397525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:56:22.403692Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:56:22.406833Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:56:22.407117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:56:22.408414Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:56:22.408598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 134 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:56:22.408674Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:56:22.409028Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:56:22.409116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:56:22.409341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:56:22.409454Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:56:22.412452Z node 1 :FLAT_TX_SCHEMESHARD INF ... TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-05-07T08:56:22.482396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T08:56:22.482442Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:0 2025-05-07T08:56:22.482482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 101:0 2025-05-07T08:56:22.482560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T08:56:22.482596Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-05-07T08:56:22.482652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-05-07T08:56:22.482702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 2 2025-05-07T08:56:22.483535Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T08:56:22.483647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T08:56:22.483694Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-05-07T08:56:22.483762Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-05-07T08:56:22.483809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:56:22.484784Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T08:56:22.484877Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T08:56:22.484915Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-05-07T08:56:22.484970Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 2 2025-05-07T08:56:22.485006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T08:56:22.485092Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-05-07T08:56:22.489139Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T08:56:22.489617Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-05-07T08:56:22.489850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-05-07T08:56:22.489905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-05-07T08:56:22.490416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-05-07T08:56:22.490538Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-05-07T08:56:22.490577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:305:2296] TestWaitNotification: OK eventTxId 101 2025-05-07T08:56:22.491031Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalDataSource" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:56:22.491228Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalDataSource" took 230us result status StatusSuccess 2025-05-07T08:56:22.491613Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ExternalDataSource" PathDescription { Self { Name: "ExternalDataSource" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalDataSource CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalDataSourceVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } ExternalDataSourceDescription { Name: "ExternalDataSource" PathId { OwnerId: 72057594046678944 LocalId: 2 } Version: 1 SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Installation: "" Auth { None { } } Properties { } References { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 102 2025-05-07T08:56:22.496084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalTable CreateExternalTable { Name: "ExternalTable" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Uint64" } ReplaceIfExists: true } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:56:22.496529Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_table.cpp:427: [72057594046678944] CreateNewExternalTable, opId 102:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalTable FailOnExist: false CreateExternalTable { Name: "ExternalTable" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Uint64" } ReplaceIfExists: true } 2025-05-07T08:56:22.496620Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 102:0, explain: Invalid TCreateExternalTable request: Unsupported: feature flag EnableReplaceIfExistsForExternalEntities is off, at schemeshard: 72057594046678944 2025-05-07T08:56:22.496674Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 102:1, propose status:StatusPreconditionFailed, reason: Invalid TCreateExternalTable request: Unsupported: feature flag EnableReplaceIfExistsForExternalEntities is off, at schemeshard: 72057594046678944 2025-05-07T08:56:22.499485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 102, response: Status: StatusPreconditionFailed Reason: "Invalid TCreateExternalTable request: Unsupported: feature flag EnableReplaceIfExistsForExternalEntities is off" TxId: 102 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:56:22.499677Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusPreconditionFailed, reason: Invalid TCreateExternalTable request: Unsupported: feature flag EnableReplaceIfExistsForExternalEntities is off, operation: CREATE EXTERNAL TABLE, path: /MyRoot/ExternalTable TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-05-07T08:56:22.499988Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-05-07T08:56:22.500034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-05-07T08:56:22.500492Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-05-07T08:56:22.500616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T08:56:22.500684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:313:2304] TestWaitNotification: OK eventTxId 102 2025-05-07T08:56:22.501199Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:56:22.501404Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalTable" took 215us result status StatusPathDoesNotExist 2025-05-07T08:56:22.501596Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ExternalTable\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/ExternalTable" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TFlatExecutorLeases::BasicsInitialLeaseTimeout [GOOD] >> TFlatExecutorLeases::BasicsInitialLeaseSleep >> KqpWorkloadService::TestCpuLoadThreshold [GOOD] >> KqpWorkloadService::TestCpuLoadThresholdRefresh >> GenericFederatedQuery::IcebergHadoopBasicSelectConstant [GOOD] >> GenericFederatedQuery::IcebergHadoopBasicSelectCount ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_table/unittest >> TExternalTableTest::ReadOnlyMode [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] Leader for TabletID 72057594046678944 is [1:126:2152] sender: [1:129:2058] recipient: [1:109:2141] 2025-05-07T08:56:22.266357Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:56:22.266449Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:56:22.266490Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:56:22.266527Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:56:22.266571Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:56:22.266605Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:56:22.266678Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:56:22.266834Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:56:22.267701Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:56:22.268131Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:56:22.348562Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7610: Got new config: QueryServiceConfig { AvailableExternalDataSources: "ObjectStorage" AvailableExternalDataSources: "ClickHouse" AvailableExternalDataSources: "PostgreSQL" AvailableExternalDataSources: "MySQL" AvailableExternalDataSources: "Ydb" AvailableExternalDataSources: "YT" AvailableExternalDataSources: "Greenplum" AvailableExternalDataSources: "MsSQLServer" AvailableExternalDataSources: "Oracle" AvailableExternalDataSources: "Logging" AvailableExternalDataSources: "Solomon" } 2025-05-07T08:56:22.348627Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:56:22.349385Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# ObjectStorage, ClickHouse, PostgreSQL, MySQL, Ydb, YT, Greenplum, MsSQLServer, Oracle, Logging, Solomon 2025-05-07T08:56:22.370191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:56:22.370315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:56:22.370447Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:56:22.382893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:56:22.383064Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:56:22.383546Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:56:22.383728Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:56:22.386833Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:56:22.388066Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:56:22.388134Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:56:22.388322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:56:22.388378Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:56:22.388424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:56:22.388586Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:56:22.395733Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2152] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:56:22.532764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:56:22.533012Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:56:22.533269Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:56:22.533487Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:56:22.533542Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:56:22.542914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:56:22.543083Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:56:22.543364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:56:22.543428Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:56:22.543467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:56:22.543503Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:56:22.547758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:56:22.547835Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:56:22.547878Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:56:22.559566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:56:22.559639Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:56:22.559689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:56:22.559755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:56:22.563741Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:56:22.568627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:56:22.568897Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:56:22.570038Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:56:22.570233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 131 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:56:22.570283Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:56:22.570654Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:56:22.570728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:56:22.570915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:56:22.570992Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:56:22.575489 ... ogressState, at schemeshard: 72057594046678944 2025-05-07T08:56:23.038048Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 129 ready parts: 1/1 2025-05-07T08:56:23.038197Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 129 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:56:23.038996Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 10 PathOwnerId: 72057594046678944, cookie: 129 2025-05-07T08:56:23.039107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 10 PathOwnerId: 72057594046678944, cookie: 129 2025-05-07T08:56:23.039156Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 129 2025-05-07T08:56:23.039199Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 129, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 10 2025-05-07T08:56:23.039248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 5 2025-05-07T08:56:23.040278Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 5 Version: 2 PathOwnerId: 72057594046678944, cookie: 129 2025-05-07T08:56:23.040365Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 5 Version: 2 PathOwnerId: 72057594046678944, cookie: 129 2025-05-07T08:56:23.040403Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 129 2025-05-07T08:56:23.040433Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 129, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 2 2025-05-07T08:56:23.040463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-05-07T08:56:23.040573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 129, ready parts: 0/1, is published: true 2025-05-07T08:56:23.043056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 129:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:129 msg type: 269090816 2025-05-07T08:56:23.043241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 129, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 129 at step: 5000005 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 129 at step: 5000005 2025-05-07T08:56:23.045383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 129 2025-05-07T08:56:23.045507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 129 2025-05-07T08:56:23.045827Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000005, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:56:23.045957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 129 Coordinator: 72057594046316545 AckTo { RawX1: 131 RawX2: 4294969451 } } Step: 5000005 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:56:23.046024Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_mkdir.cpp:33: MkDir::TPropose operationId# 129:0 HandleReply TEvPrivate::TEvOperationPlan, step: 5000005, at schemeshard: 72057594046678944 2025-05-07T08:56:23.046171Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 129:0 128 -> 240 2025-05-07T08:56:23.046360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-05-07T08:56:23.046440Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 1 2025-05-07T08:56:23.049563Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:56:23.049639Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 129, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:56:23.049822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 129, path id: [OwnerId: 72057594046678944, LocalPathId: 5] 2025-05-07T08:56:23.049936Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:56:23.049998Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:481:2439], at schemeshard: 72057594046678944, txId: 129, path id: 1 2025-05-07T08:56:23.050051Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:481:2439], at schemeshard: 72057594046678944, txId: 129, path id: 5 FAKE_COORDINATOR: Erasing txId 129 2025-05-07T08:56:23.050541Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 129:0, at schemeshard: 72057594046678944 2025-05-07T08:56:23.050585Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 129:0 ProgressState 2025-05-07T08:56:23.050704Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#129:0 progress is 1/1 2025-05-07T08:56:23.050739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 129 ready parts: 1/1 2025-05-07T08:56:23.050780Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#129:0 progress is 1/1 2025-05-07T08:56:23.050815Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 129 ready parts: 1/1 2025-05-07T08:56:23.050883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 129, ready parts: 1/1, is published: false 2025-05-07T08:56:23.050934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 129 ready parts: 1/1 2025-05-07T08:56:23.050971Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 129:0 2025-05-07T08:56:23.051013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 129:0 2025-05-07T08:56:23.051100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-05-07T08:56:23.051142Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 129, publications: 2, subscribers: 0 2025-05-07T08:56:23.051180Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 129, [OwnerId: 72057594046678944, LocalPathId: 1], 11 2025-05-07T08:56:23.051214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 129, [OwnerId: 72057594046678944, LocalPathId: 5], 3 2025-05-07T08:56:23.051814Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 129 2025-05-07T08:56:23.051906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 129 2025-05-07T08:56:23.051956Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 129 2025-05-07T08:56:23.052006Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 129, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 11 2025-05-07T08:56:23.052053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 5 2025-05-07T08:56:23.057529Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 5 Version: 3 PathOwnerId: 72057594046678944, cookie: 129 2025-05-07T08:56:23.057690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 5 Version: 3 PathOwnerId: 72057594046678944, cookie: 129 2025-05-07T08:56:23.057727Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 129 2025-05-07T08:56:23.057762Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 129, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 3 2025-05-07T08:56:23.057812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 1 2025-05-07T08:56:23.057925Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 129, subscribers: 0 2025-05-07T08:56:23.075007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 129 2025-05-07T08:56:23.075208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 129 TestModificationResult got TxId: 129, wait until txId: 129 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_cluster_discovery/ut/unittest >> TPQCDTest::TestDiscoverClusters [GOOD] Test command err: 2025-05-07T08:56:10.670313Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624973240396165:2071];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:56:10.670353Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003ddd/r3tmp/tmpAxWHRS/pdisk_1.dat 2025-05-07T08:56:11.164002Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:56:11.179937Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:56:11.180056Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:56:11.183083Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6013, node 1 2025-05-07T08:56:11.359440Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/zvgn/003ddd/r3tmp/yandex9pese5.tmp 2025-05-07T08:56:11.359465Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/zvgn/003ddd/r3tmp/yandex9pese5.tmp 2025-05-07T08:56:11.359666Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/zvgn/003ddd/r3tmp/yandex9pese5.tmp 2025-05-07T08:56:11.359809Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13919 PQClient connected to localhost:6013 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:56:12.062341Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... waiting... 2025-05-07T08:56:15.360793Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624994715233372:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:15.360793Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624994715233360:2335], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:15.360968Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:15.365546Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480 2025-05-07T08:56:15.384954Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624994715233374:2339], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-05-07T08:56:15.658156Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501624994715233439:2390] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:56:15.687029Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501624973240396165:2071];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:56:15.687154Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:56:15.731744Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:56:15.873452Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7501624994715233447:2345], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T08:56:15.873740Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=1&id=ZjA3YTRlN2MtMWEzYjNkMGItNzY4NDRiMzItYzEwNjU3Ng==, ActorId: [1:7501624994715233357:2333], ActorState: ExecuteState, TraceId: 01jtmzbe7e8k71qpmzsf0ex6jr, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T08:56:15.925435Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T08:56:15.930015Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:56:16.097345Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-05-07T08:56:16.384180Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710666. Ctx: { TraceId: 01jtmzbf1yeadg1d0awr161tk2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDUwOGY3N2MtYzE0NGEzMTMtZTRjNjk4OTktNTIxYmY5MzY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:17.599518Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710668. Ctx: { TraceId: 01jtmzbg6sfbkfm4dr7t4xfcfr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTJiZjRkYy1mM2FiOTY2NC1mNmZkYWU0Ny1lZjA1ZWMxMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:18.959827Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710670. Ctx: { TraceId: 01jtmzbhjb8r3w70kcv1br6wff, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2JkY2M4NTQtY2VlNTY5YmYtYTQ4ZDllMTUtNzdmNjdkZTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:20.860323Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710676. Ctx: { TraceId: 01jtmzbk7jbv5g00mvn7hfwwfn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjczOThmMjItNTkxOWI5OC1iZjBmZjRkYy1iNzVmM2ZmOQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:21.923696Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710680. Ctx: { TraceId: 01jtmzbme8eynn0pnqx4xheb94, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NGZhMjBiN2ItMjRlYzM2MTYtZjVhMDU5Yi1hZWZiZDgyOQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> TCdcStreamTests::CopyTableShouldNotCopyStream [GOOD] >> TCdcStreamTests::MoveTableShouldFail >> GroupWriteTest::Simple >> BuildStatsHistogram::Ten_Crossed_Log [GOOD] >> BuildStatsHistogram::Five_Five_Mixed >> GroupWriteTest::TwoTables >> GenericFederatedQuery::IcebergHiveSaSelectConstant [GOOD] >> GenericFederatedQuery::IcebergHiveSaSelectCount >> GroupWriteTest::ByTableName ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut/unittest >> TPart::BasicColumnGroups [GOOD] Test command err: 00000.001 II| FAKE_ENV: Born at 2025-05-07T08:54:57.827111Z 00000.011 DD| RESOURCE_BROKER: TResourceBrokerActor bootstrap 00000.013 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.014 II| FAKE_ENV: Starting storage for BS group 0 00000.014 II| FAKE_ENV: Starting storage for BS group 1 00000.014 II| FAKE_ENV: Starting storage for BS group 2 00000.014 II| FAKE_ENV: Starting storage for BS group 3 00000.027 II| TABLET_EXECUTOR: Leader{1:2:0} activating executor 00000.027 II| TABLET_EXECUTOR: LSnap{1:2, on 2:1, 35b, wait} done, Waste{2:0, 0b +(0, 0b), 0 trc} 00000.028 DD| TABLET_EXECUTOR: Leader{1:2:2} commited cookie 2 for step 1 00000.028 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema} queued, type NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema 00000.029 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.029 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema} hope 1 -> done Change{2, redo 0b alter 302b annex 0, ~{ } -{ }, 0 gb} 00000.029 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema} release 4194304b of static, Memory{0 dyn 0} 00000.029 DD| TABLET_EXECUTOR: TGenCompactionStrategy CheckGeneration for 1 generation 1, state Free, final id 0, final level 0 00000.030 DD| TABLET_EXECUTOR: Leader{1:2:3} commited cookie 1 for step 2 00000.030 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxSetResourceProfile} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxSetResourceProfile 00000.030 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxSetResourceProfile} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.030 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxSetResourceProfile} hope 1 -> done Change{2, redo 0b alter 15b annex 0, ~{ } -{ }, 0 gb} 00000.030 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxSetResourceProfile} release 4194304b of static, Memory{0 dyn 0} 00000.030 DD| TABLET_EXECUTOR: TGenCompactionStrategy CheckGeneration for 1 generation 1, state Free, final id 0, final level 0 00000.030 DD| TABLET_EXECUTOR: Leader{1:2:4} commited cookie 1 for step 3 00000.031 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory 00000.031 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} took 1024b of static mem, Memory{1024 dyn 0} 00000.031 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 1 -> retry Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.031 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} touch new 0b, 0b lo load (0b in total), 104856577b requested for data (104857601b in total) 00000.031 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release 1024b of static, Memory{0 dyn 0} 00000.031 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release tx data 00000.031 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} request Res{1 104857601b} type large_transaction 00000.031 DD| RESOURCE_BROKER: Submitted new unknown task Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (1 by [1:30:2062]) priority=5 resources={0, 104857601} 00000.031 EE| RESOURCE_BROKER: Assigning waiting task 'Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (1 by [1:30:2062])' of unknown type 'large_transaction' to default queue 00000.031 DD| RESOURCE_BROKER: Allocate resources {0, 104857601} for task Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (1 by [1:30:2062]) from queue queue_default 00000.031 EE| RESOURCE_BROKER: Assigning in-fly task 'Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (1 by [1:30:2062])' of unknown type 'large_transaction' to default queue 00000.031 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 0.000000 to 12.207031 (insert task Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (1 by [1:30:2062])) 00000.031 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} acquired dyn mem Res{1 104857601b}, Memory{0 dyn 104857601} 00000.031 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 2 -> done Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.032 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release Res{1 104857601b}, Memory{0 dyn 0} 00000.032 DD| RESOURCE_BROKER: Finish task Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (1 by [1:30:2062]) (release resources {0, 104857601}) 00000.032 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 12.207031 to 0.000000 (remove task Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (1 by [1:30:2062])) 00000.032 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory 00000.032 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} took 1024b of static mem, Memory{1024 dyn 0} 00000.032 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 1 -> retry Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.032 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} touch new 0b, 0b lo load (0b in total), 104856577b requested for data (104857601b in total) 00000.032 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} took 104857601b of static mem, Memory{104857601 dyn 0} 00000.032 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 2 -> done Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.032 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release 104857601b of static, Memory{0 dyn 0} 00000.033 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory 00000.033 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} took 1024b of static mem, Memory{1024 dyn 0} 00000.033 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 1 -> retry Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.033 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} touch new 0b, 0b lo load (0b in total), 209714177b requested for data (209715201b in total) 00000.033 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release 1024b of static, Memory{0 dyn 0} 00000.033 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release tx data 00000.033 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} request Res{2 209715201b} type large_transaction 00000.033 DD| RESOURCE_BROKER: Submitted new unknown task Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (2 by [1:30:2062]) priority=5 resources={0, 209715201} 00000.033 EE| RESOURCE_BROKER: Assigning waiting task 'Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (2 by [1:30:2062])' of unknown type 'large_transaction' to default queue 00000.034 DD| RESOURCE_BROKER: Allocate resources {0, 209715201} for task Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (2 by [1:30:2062]) from queue queue_default 00000.034 EE| RESOURCE_BROKER: Assigning in-fly task 'Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (2 by [1:30:2062])' of unknown type 'large_transaction' to default queue 00000.034 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 0.000000 to 23.193359 (insert task Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (2 by [1:30:2062])) 00000.034 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} acquired dyn mem Res{2 209715201b}, Memory{0 dyn 209715201} 00000.034 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 2 -> done Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.034 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release Res{2 209715201b}, Memory{0 dyn 0} 00000.034 DD| RESOURCE_BROKER: Finish task Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (2 by [1:30:2062]) (release resources {0, 209715201}) 00000.034 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 23.193359 to 0.000000 (remove task Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (2 by [1:30:2062])) 00000.034 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{6, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory 00000.034 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{6, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} took 1024b of static mem, Memory{1024 dyn 0} 00000.034 DD| TABLET_EXE ... 76:97:0], [1:2:54:1:24576:97:0], [1:2:55:1:24576:97:0], [1:2:56:1:24576:97:0], [1:2:57:1:24576:97:0], [1:2:58:1:24576:97:0], [1:2:59:1:24576:97:0], [1:2:60:1:24576:97:0], [1:2:61:1:24576:97:0], [1:2:62:1:24576:97:0], [1:2:63:1:24576:97:0], [1:2:64:1:24576:97:0], [1:2:65:1:24576:97:0], [1:2:66:1:24576:97:0], [1:2:67:1:24576:97:0], [1:2:68:1:24576:97:0], [1:2:69:1:24576:97:0], [1:2:70:1:24576:97:0], [1:2:71:1:24576:97:0], [1:2:72:1:24576:97:0], [1:2:73:1:24576:101:0], [1:2:74:1:24576:102:0], [1:2:75:1:24576:101:0], [1:2:76:1:24576:102:0], [1:2:77:1:24576:104:0], [1:2:78:1:24576:104:0], [1:2:79:1:24576:104:0], [1:2:80:1:24576:104:0], [1:2:81:1:24576:103:0], [1:2:82:1:24576:101:0], [1:2:83:1:24576:104:0], [1:2:84:1:24576:104:0], [1:2:85:1:24576:104:0], [1:2:86:1:24576:104:0], [1:2:87:1:24576:104:0], [1:2:88:1:24576:104:0], [1:2:89:1:24576:104:0], [1:2:90:1:24576:101:0], [1:2:91:1:24576:104:0], [1:2:92:1:24576:104:0], [1:2:93:1:24576:98:0], [1:2:94:1:24576:104:0], [1:2:95:1:24576:104:0], [1:2:96:1:24576:104:0], [1:2:97:1:24576:104:0], [1:2:98:1:24576:104:0], [1:2:99:1:24576:104:0], [1:2:100:1:24576:104:0], [1:2:101:1:24576:97:0], [1:2:102:1:24576:100:0], [1:2:103:1:24576:104:0], [1:2:104:1:24576:104:0], [1:2:105:1:24576:104:0], [1:2:106:1:24576:104:0], [1:2:107:1:24576:104:0], [1:2:108:1:24576:104:0], [1:2:109:1:24576:104:0], [1:2:110:1:24576:104:0], [1:2:111:1:24576:104:0], [1:2:112:1:24576:104:0], [1:2:113:1:24576:104:0], [1:2:114:1:24576:104:0], [1:2:115:1:24576:104:0], [1:2:116:1:24576:104:0], [1:2:117:1:24576:104:0], [1:2:118:1:24576:104:0], [1:2:119:1:24576:104:0], [1:2:120:1:24576:104:0], [1:2:121:1:24576:104:0], [1:2:122:1:24576:104:0], [1:2:123:1:24576:104:0], [1:2:124:1:24576:104:0], [1:2:125:1:24576:104:0], [1:2:126:1:24576:104:0], [1:2:127:1:24576:104:0], [1:2:128:1:24576:104:0], [1:2:129:1:24576:104:0], [1:2:130:1:24576:104:0], [1:2:131:1:24576:104:0], [1:2:132:1:24576:104:0], [1:2:133:1:24576:104:0], [1:2:134:1:24576:104:0], [1:2:135:1:24576:104:0], [1:2:136:1:24576:104:0], [1:2:137:1:24576:104:0], [1:2:138:1:24576:104:0], [1:2:139:1:24576:104:0], [1:2:140:1:24576:104:0], [1:2:141:1:24576:104:0], [1:2:142:1:24576:104:0], [1:2:145:1:24576:60:0], [1:2:146:1:24576:60:0] } 00000.152 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:143:1:12288:758:0] 00000.152 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:143:1:12288:758:0] owner [20:212:2237] 00000.152 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:143:1:12288:758:0] owner [20:212:2237] cookie 4 class Online from cache [ ] already requested [ ] to request [ 22 23 24 25 ] 00000.152 TT| TABLET_SAUSAGECACHE: Receive page collection [1:2:143:1:12288:758:0] status OK pages [ 22 23 24 25 ] 00000.152 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:143:1:12288:758:0] owner [20:212:2237] class Online pages [ 22 23 24 25 ] cookie 4 00000.153 II| TABLET_EXECUTOR: Leader{1:3:0} activating executor 00000.153 II| TABLET_EXECUTOR: LSnap{1:3, on 3:1, 1880b, wait} done, Waste{2:0, 141856b +(140, 14018b), 146 trc} 00000.154 DD| TABLET_SAUSAGECACHE: Attach page collection [1:2:143:1:12288:758:0] owner [20:212:2237] 00000.154 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:143:1:12288:758:0] owner [20:212:2237] cookie 2 class AsyncLoad from cache [ 22 23 24 25 ] already requested [ ] to request [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 ] 00000.154 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:143:1:12288:758:0] async queue pages [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 ] 00000.155 TT| TABLET_SAUSAGECACHE: Receive page collection [1:2:143:1:12288:758:0] status OK pages [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 ] 00000.155 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:143:1:12288:758:0] owner [20:212:2237] class AsyncLoad pages [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 ] cookie 2 00000.155 TT| TABLET_SAUSAGECACHE: Touch page collection [1:2:143:1:12288:758:0] owner [20:212:2237] pages [ 22 23 24 25 ] 00000.156 DD| TABLET_EXECUTOR: Leader{1:3:2} commited cookie 2 for step 1 00000.156 DD| TABLET_EXECUTOR: Leader{1:3:2} got result TEvResult{26 pages [1:2:143:1:12288:758:0] ok OK}, category 2 00000.156 TT| TABLET_SAUSAGECACHE: Touch page collection [1:2:143:1:12288:758:0] owner [20:212:2237] pages [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 ] 00000.157 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{1, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_StickyPages::TTxFullScan} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_StickyPages::TTxFullScan 00000.157 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{1, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_StickyPages::TTxFullScan} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.158 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{1, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_StickyPages::TTxFullScan} hope 1 -> done Change{145, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.158 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{1, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_StickyPages::TTxFullScan} release 4194304b of static, Memory{0 dyn 0} 00000.158 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.158 II| TABLET_EXECUTOR: Leader{1:3:2} suiciding, Waste{2:0, 141856b +(0, 0b), 1 trc, -14018b acc} 00000.159 DD| TABLET_SAUSAGECACHE: Unregister owner [20:212:2237] 00000.159 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:143:1:12288:758:0] owner [20:212:2237] 00000.159 DD| TABLET_SAUSAGECACHE: Remove owner [20:212:2237] 00000.159 DD| TABLET_SAUSAGECACHE: Drop expired page collection [1:2:143:1:12288:758:0] 00000.159 NN| TABLET_SAUSAGECACHE: Poison cache serviced 3 reqs hit {6 1077b} miss {50 281387b} 00000.159 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.159 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {14354b, 149} 00000.159 II| FAKE_ENV: DS.1 gone, left {143736b, 8}, put {157893b, 150} 00000.159 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.159 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.159 II| FAKE_ENV: All BS storage groups are stopped 00000.159 II| FAKE_ENV: Model stopped, hosted 4 actors, spent 0.000s 00000.159 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 795}, stopped 00000.000 II| FAKE_ENV: Born at 2025-05-07T08:55:04.759419Z 00000.007 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.008 II| FAKE_ENV: Starting storage for BS group 0 00000.008 II| FAKE_ENV: Starting storage for BS group 1 00000.008 II| FAKE_ENV: Starting storage for BS group 2 00000.008 II| FAKE_ENV: Starting storage for BS group 3 00000.037 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.038 NN| TABLET_SAUSAGECACHE: Poison cache serviced 3 reqs hit {3 512b} miss {0 0b} 00000.038 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.038 II| FAKE_ENV: DS.0 gone, left {1356b, 12}, put {1376b, 13} 00000.038 II| FAKE_ENV: DS.1 gone, left {6814b, 23}, put {6814b, 23} 00000.038 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.038 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.039 II| FAKE_ENV: All BS storage groups are stopped 00000.039 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.039 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-05-07T08:55:04.804099Z 00000.007 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.007 II| FAKE_ENV: Starting storage for BS group 0 00000.008 II| FAKE_ENV: Starting storage for BS group 1 00000.008 II| FAKE_ENV: Starting storage for BS group 2 00000.008 II| FAKE_ENV: Starting storage for BS group 3 00000.341 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 2 actors 00000.341 NN| TABLET_SAUSAGECACHE: Poison cache serviced 10 reqs hit {860 5551893b} miss {0 0b} 00000.352 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.352 II| FAKE_ENV: DS.0 gone, left {1201b, 13}, put {1221b, 14} 00000.352 II| FAKE_ENV: DS.1 gone, left {6751256b, 17}, put {6751256b, 17} 00000.355 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.355 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.355 II| FAKE_ENV: All BS storage groups are stopped 00000.355 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.355 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-05-07T08:55:05.175170Z 00000.007 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.007 II| FAKE_ENV: Starting storage for BS group 0 00000.007 II| FAKE_ENV: Starting storage for BS group 1 00000.007 II| FAKE_ENV: Starting storage for BS group 2 00000.008 II| FAKE_ENV: Starting storage for BS group 3 00017.694 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 2 actors 00017.694 NN| TABLET_SAUSAGECACHE: Poison cache serviced 4109 reqs hit {2091 2366986b} miss {6144 6340608b} 00017.703 II| FAKE_ENV: Shut order, stopping 4 BS groups 00017.703 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00017.703 II| FAKE_ENV: DS.0 gone, left {1761b, 14}, put {1781b, 15} 00017.703 II| FAKE_ENV: DS.1 gone, left {6927727b, 27}, put {6927727b, 27} 00017.717 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00017.717 II| FAKE_ENV: All BS storage groups are stopped 00017.717 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00017.717 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-05-07T08:55:22.907718Z 00000.007 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.008 II| FAKE_ENV: Starting storage for BS group 0 00000.008 II| FAKE_ENV: Starting storage for BS group 1 00000.008 II| FAKE_ENV: Starting storage for BS group 2 00000.008 II| FAKE_ENV: Starting storage for BS group 3 00017.740 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 2 actors 00017.740 NN| TABLET_SAUSAGECACHE: Poison cache serviced 4106 reqs hit {43 253450b} miss {4096 4227072b} 00017.746 II| FAKE_ENV: Shut order, stopping 4 BS groups 00017.747 II| FAKE_ENV: DS.0 gone, left {44744b, 2}, put {164749b, 16} 00017.747 II| FAKE_ENV: DS.1 gone, left {2764621b, 2068}, put {2764621b, 2068} 00017.755 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00017.755 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00017.755 II| FAKE_ENV: All BS storage groups are stopped 00017.755 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00017.755 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-05-07T08:55:40.686140Z 00000.008 II| FAKE_ENV: Starting storage for BS group 0 00000.009 II| FAKE_ENV: Starting storage for BS group 1 00000.009 II| FAKE_ENV: Starting storage for BS group 2 00000.009 II| FAKE_ENV: Starting storage for BS group 3 00000.000 II| FAKE_ENV: Born at 2025-05-07T08:55:40.722879Z 00000.009 II| FAKE_ENV: Starting storage for BS group 0 00000.010 II| FAKE_ENV: Starting storage for BS group 1 00000.010 II| FAKE_ENV: Starting storage for BS group 2 00000.010 II| FAKE_ENV: Starting storage for BS group 3 00000.000 II| FAKE_ENV: Born at 2025-05-07T08:55:40.768915Z 00000.009 II| FAKE_ENV: Starting storage for BS group 0 00000.010 II| FAKE_ENV: Starting storage for BS group 1 00000.010 II| FAKE_ENV: Starting storage for BS group 2 00000.010 II| FAKE_ENV: Starting storage for BS group 3 00000.000 II| FAKE_ENV: Born at 2025-05-07T08:55:40.822000Z 00000.008 II| FAKE_ENV: Starting storage for BS group 0 00000.009 II| FAKE_ENV: Starting storage for BS group 1 00000.009 II| FAKE_ENV: Starting storage for BS group 2 00000.009 II| FAKE_ENV: Starting storage for BS group 3 >> TCdcStreamTests::MoveTableShouldFail [GOOD] >> TCdcStreamTests::CheckSchemeLimits >> GenericFederatedQuery::IcebergHadoopSaSelectConstant [GOOD] >> GenericFederatedQuery::IcebergHadoopSaSelectCount >> TDataShardMinStepTest::TestDropTableCompletesQuicklyRW-VolatileTxs [GOOD] >> GenericFederatedQuery::IcebergHiveTokenSelectConstant [GOOD] >> GenericFederatedQuery::IcebergHiveTokenSelectCount >> TFlatExecutorLeases::BasicsInitialLeaseSleep [GOOD] >> TFlatExecutorLeases::BasicsInitialLeaseSleepTimeout |90.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/scheme_board/ut_replica/ydb-core-tx-scheme_board-ut_replica |90.6%| [LD] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_replica/ydb-core-tx-scheme_board-ut_replica |90.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/scheme_board/ut_replica/ydb-core-tx-scheme_board-ut_replica ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/idx_test/unittest >> YdbIndexTable::MultiShardTableTwoIndexes [GOOD] Test command err: Trying to start YDB, gRPC: 16913, MsgBus: 17022 2025-05-07T08:51:39.442698Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623807729602679:2066];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:39.447429Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00208c/r3tmp/tmp4cOGPl/pdisk_1.dat 2025-05-07T08:51:39.893529Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:51:39.897307Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:39.897405Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:39.901809Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16913, node 1 2025-05-07T08:51:40.020087Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:51:40.020108Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:51:40.020113Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:51:40.020225Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17022 TClient is connected to server localhost:17022 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:51:40.806076Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:40.838273Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:51:40.846382Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:41.127065Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:41.325156Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:41.410529Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:43.375059Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623824909473516:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:43.375188Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:43.848223Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:51:43.894168Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:51:43.980109Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:51:44.063865Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:51:44.107276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:51:44.162261Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:51:44.244246Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:51:44.344695Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623829204441479:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:44.344804Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:44.345270Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623829204441484:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:44.350334Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:51:44.367883Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623829204441486:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:51:44.436611Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623807729602679:2066];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:44.436698Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:51:44.453155Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623829204441539:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:51:45.923829Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-05-07T08:51:48.015608Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710673. Ctx: { TraceId: 01jtmz393z75ybd7m5wsdzbsat, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjgyNTBhZjUtNWY2ZTI3ZS1jZjI5ZjE0MS01ZjkwZDFiNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:51:48.029958Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710674. Ctx: { TraceId: 01jtmz3950b33abrfre84sv6xr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTZkNmQzYzUtOTc5MDVhZjktZTMzZjdjMjctMWZkZGJjN2Q=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:51:48.031951Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710675. Ctx: { TraceId: 01jtmz39518q0ew350x05e6jfv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjI3NTFmZTctYWRjNDQ5Y2MtYTU2ODQ4MjQtMzY2MmEwNTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:51:48.055806Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710676. Ctx: { TraceId: 01jtmz3957fkwxx6jqp2c7qn2c, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZGNmODYxZGEtMWY4ODU2MmYtODJiNDdlMGMtODA1YjQwOTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:51:48.057487Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710678. Ctx: { TraceId: 01jtmz39572jgt21kapvebsxvd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmE2MWMxOGQtNjZiOTk2YmEtZjUzZGRlMWQtOTQyYWVkYmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:51:48.068283Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710677. Ctx: { TraceId: 01jtmz3957d0axqg766s1zy090, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NThhOWM0M2YtNzU3YmNiZC0yNzZiNDgyMy1iYzQxNzg1ZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:51:48.069719Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710679. Ctx: { TraceId: 01jtmz39579sy4szexk9p4bg38, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjIwNjI3NzMtNjM1MTc0MDYtYjAyNGIxZDItNjBkMjFmZDI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:51:48.070942Z node 1 :KQP ... jRlNTItZWRkNmUwYWEtOTZhZDgwMzYtNDM4OWUyYmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:14.468686Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714493. Ctx: { TraceId: 01jtmzbdb005szfv9m1vxe36dr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NGNmYWU2ZDktNTY0ZWZiY2EtYmY2NDM3NzItOTk0M2Y1NWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:14.486219Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714494. Ctx: { TraceId: 01jtmzbdb4db3bf6pjbq8zqqsv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzY0YmUwM2UtOTA5MDIwZDgtOWE5M2JmYi1hOWY5ODIzYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:14.487621Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714495. Ctx: { TraceId: 01jtmzbdb99fwpbvzyj646fd08, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTFjYjRlNTItZWRkNmUwYWEtOTZhZDgwMzYtNDM4OWUyYmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:14.505295Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714496. Ctx: { TraceId: 01jtmzbdctb49cf3rkry0efw14, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NjE2ZDY2N2EtZWM0MWY2NTYtZDk5N2U3ZmYtMTk2ZWViNzE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:14.515214Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714497. Ctx: { TraceId: 01jtmzbdd998yaa94c8svnrjfa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTEyYjUyZWYtMzhhMTMyOWEtOWRkMjFjMTYtODQ4YmY3ODA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:14.523076Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714498. Ctx: { TraceId: 01jtmzbddc43r9yzvm103hbwq4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NGNmYWU2ZDktNTY0ZWZiY2EtYmY2NDM3NzItOTk0M2Y1NWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:14.530710Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714499. Ctx: { TraceId: 01jtmzbddc43r9yzvm103hbwq4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NGNmYWU2ZDktNTY0ZWZiY2EtYmY2NDM3NzItOTk0M2Y1NWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:14.547236Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714500. Ctx: { TraceId: 01jtmzbdea1wmvyhbhme91ge3a, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OGFiNGMwMGItNGM0MDhlNC01ZjIwZjI1My0xNWM1YTczMQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:14.556445Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714501. Ctx: { TraceId: 01jtmzbdeb781h0cpq59xtc0q4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZmI5MzYzZjgtMmNjOTgzMjgtMTNkMGJmNGUtYjgwNjI2M2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:14.565867Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714502. Ctx: { TraceId: 01jtmzbdee9h2shc5m14dbm16x, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzY0YmUwM2UtOTA5MDIwZDgtOWE5M2JmYi1hOWY5ODIzYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:14.575412Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714503. Ctx: { TraceId: 01jtmzbdeb781h0cpq59xtc0q4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZmI5MzYzZjgtMmNjOTgzMjgtMTNkMGJmNGUtYjgwNjI2M2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:14.581006Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714505. Ctx: { TraceId: 01jtmzbdf65th6rz1wyf6vj6hq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTEyYjUyZWYtMzhhMTMyOWEtOWRkMjFjMTYtODQ4YmY3ODA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:14.585317Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714504. Ctx: { TraceId: 01jtmzbdebd66p42mjwgvt3zhm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTFjYjRlNTItZWRkNmUwYWEtOTZhZDgwMzYtNDM4OWUyYmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:14.587800Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714506. Ctx: { TraceId: 01jtmzbdee9h2shc5m14dbm16x, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzY0YmUwM2UtOTA5MDIwZDgtOWE5M2JmYi1hOWY5ODIzYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:14.612479Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714507. Ctx: { TraceId: 01jtmzbdg73b41p3qe0bg1jhr1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NjE2ZDY2N2EtZWM0MWY2NTYtZDk5N2U3ZmYtMTk2ZWViNzE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:14.626764Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714508. Ctx: { TraceId: 01jtmzbdgv1n5dqfpv8k0rtkjm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OGFiNGMwMGItNGM0MDhlNC01ZjIwZjI1My0xNWM1YTczMQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:14.635157Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714509. Ctx: { TraceId: 01jtmzbdh264kvqdmvfcdxcg2r, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTFjYjRlNTItZWRkNmUwYWEtOTZhZDgwMzYtNDM4OWUyYmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:14.644132Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714510. Ctx: { TraceId: 01jtmzbdh264kvqdmvfcdxcg2r, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTFjYjRlNTItZWRkNmUwYWEtOTZhZDgwMzYtNDM4OWUyYmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:14.649115Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714511. Ctx: { TraceId: 01jtmzbdhe9c758vp6s3q5fz86, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZmI5MzYzZjgtMmNjOTgzMjgtMTNkMGJmNGUtYjgwNjI2M2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:14.649871Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714512. Ctx: { TraceId: 01jtmzbdhd2drk8ggzk5dbdjnp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTEyYjUyZWYtMzhhMTMyOWEtOWRkMjFjMTYtODQ4YmY3ODA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:14.650616Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714513. Ctx: { TraceId: 01jtmzbdhdf8q3ct0c3zvq45ae, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzY0YmUwM2UtOTA5MDIwZDgtOWE5M2JmYi1hOWY5ODIzYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:14.665546Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714514. Ctx: { TraceId: 01jtmzbdhd2drk8ggzk5dbdjnp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTEyYjUyZWYtMzhhMTMyOWEtOWRkMjFjMTYtODQ4YmY3ODA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:14.666097Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714515. Ctx: { TraceId: 01jtmzbdhdf8q3ct0c3zvq45ae, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzY0YmUwM2UtOTA5MDIwZDgtOWE5M2JmYi1hOWY5ODIzYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:14.672692Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714516. Ctx: { TraceId: 01jtmzbdhd2drk8ggzk5dbdjnp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTEyYjUyZWYtMzhhMTMyOWEtOWRkMjFjMTYtODQ4YmY3ODA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:14.680485Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714517. Ctx: { TraceId: 01jtmzbdhs2mta2vbptz0yv6yz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NjE2ZDY2N2EtZWM0MWY2NTYtZDk5N2U3ZmYtMTk2ZWViNzE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:14.693135Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714518. Ctx: { TraceId: 01jtmzbdhs2mta2vbptz0yv6yz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NjE2ZDY2N2EtZWM0MWY2NTYtZDk5N2U3ZmYtMTk2ZWViNzE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:14.696506Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714519. Ctx: { TraceId: 01jtmzbdjr4vgd1pmnb43jqk85, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NGNmYWU2ZDktNTY0ZWZiY2EtYmY2NDM3NzItOTk0M2Y1NWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:14.700820Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714520. Ctx: { TraceId: 01jtmzbdk3fe2nwfr34mv6n3k4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZmI5MzYzZjgtMmNjOTgzMjgtMTNkMGJmNGUtYjgwNjI2M2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:14.736075Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714521. Ctx: { TraceId: 01jtmzbdjr4vgd1pmnb43jqk85, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NGNmYWU2ZDktNTY0ZWZiY2EtYmY2NDM3NzItOTk0M2Y1NWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS 2025-05-07T08:56:14.747744Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714522. Ctx: { TraceId: 01jtmzbdk459v6sp32azykmg8t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OGFiNGMwMGItNGM0MDhlNC01ZjIwZjI1My0xNWM1YTczMQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:14.758319Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714523. Ctx: { TraceId: 01jtmzbdmh8hgz44tkdbe3s5dt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzY0YmUwM2UtOTA5MDIwZDgtOWE5M2JmYi1hOWY5ODIzYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:14.760759Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714524. Ctx: { TraceId: 01jtmzbdk459v6sp32azykmg8t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OGFiNGMwMGItNGM0MDhlNC01ZjIwZjI1My0xNWM1YTczMQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS 2025-05-07T08:56:14.769865Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976714525. Ctx: { TraceId: 01jtmzbdn38p4zte2gc61k73je, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTEyYjUyZWYtMzhhMTMyOWEtOWRkMjFjMTYtODQ4YmY3ODA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS finished with status: SUCCESS finished with status: SUCCESS |90.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut/unittest >> KqpWorkloadServiceTables::TestLeaseExpiration [GOOD] >> KqpWorkloadServiceTables::TestLeaseUpdates |90.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/table_creator/ut/ydb-library-table_creator-ut |90.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/table_creator/ut/ydb-library-table_creator-ut |90.6%| [LD] {RESULT} $(B)/ydb/library/table_creator/ut/ydb-library-table_creator-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_minstep/unittest >> TDataShardMinStepTest::TestDropTableCompletesQuicklyRW-VolatileTxs [GOOD] Test command err: 2025-05-07T08:56:17.209923Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:56:17.210133Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:56:17.210454Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00375e/r3tmp/tmpRSmt6t/pdisk_1.dat 2025-05-07T08:56:17.620244Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:56:17.633824Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-05-07T08:56:17.635563Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:56:17.636457Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-05-07T08:56:17.638583Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:183: tablet# 72057594046316545 txid# 1 HANDLE EvProposeTransaction marker# C0 2025-05-07T08:56:17.638646Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:29: tablet# 72057594046316545 txid# 1 step# 500 Status# 16 SEND to# [1:410:2405] Proxy marker# C1 2025-05-07T08:56:17.664864Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:56:17.666632Z node 1 :HIVE DEBUG: hive_impl.cpp:2249: HIVE#72057594037968897 Merged config: { } 2025-05-07T08:56:17.752964Z node 1 :HIVE DEBUG: hive_impl.cpp:141: HIVE#72057594037968897 Handle TEvLocal::TEvRegisterNode from [1:336:2375] HiveId: 72057594037968897 ServicedDomains { SchemeShard: 72057594046644480 PathId: 1 } TabletAvailability { Type: Mediator Priority: 0 } TabletAvailability { Type: Dummy Priority: 0 } TabletAvailability { Type: KeyValue Priority: 0 } TabletAvailability { Type: Coordinator Priority: 0 } TabletAvailability { Type: Hive Priority: 0 } TabletAvailability { Type: SchemeShard Priority: 0 } TabletAvailability { Type: DataShard Priority: 0 } TabletAvailability { Type: PersQueue Priority: 0 } TabletAvailability { Type: PersQueueReadBalancer Priority: 0 } TabletAvailability { Type: Kesus Priority: 0 } TabletAvailability { Type: SysViewProcessor Priority: 0 } TabletAvailability { Type: ColumnShard Priority: 0 } TabletAvailability { Type: SequenceShard Priority: 0 } TabletAvailability { Type: ReplicationController Priority: 0 } TabletAvailability { Type: StatisticsAggregator Priority: 0 } 2025-05-07T08:56:17.753150Z node 1 :HIVE DEBUG: tx__register_node.cpp:21: HIVE#72057594037968897 THive::TTxRegisterNode(1)::Execute 2025-05-07T08:56:17.753335Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:56:17.753374Z node 1 :HIVE DEBUG: hive_impl.cpp:361: HIVE#72057594037968897 ProcessWaitQueue (0) 2025-05-07T08:56:17.753407Z node 1 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037968897 ProcessBootQueue (0) 2025-05-07T08:56:17.753474Z node 1 :HIVE DEBUG: hive_impl.cpp:361: HIVE#72057594037968897 ProcessWaitQueue (0) 2025-05-07T08:56:17.753506Z node 1 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037968897 ProcessBootQueue (0) 2025-05-07T08:56:17.753627Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:56:17.753898Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:18: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Execute 2025-05-07T08:56:17.753944Z node 1 :HIVE DEBUG: hive_impl.cpp:222: HIVE#72057594037968897 Handle ProcessBootQueue (size: 0) 2025-05-07T08:56:17.754001Z node 1 :HIVE DEBUG: hive_impl.cpp:225: HIVE#72057594037968897 Handle ProcessWaitQueue (size: 0) 2025-05-07T08:56:17.754044Z node 1 :HIVE DEBUG: hive_impl.cpp:302: HIVE#72057594037968897 ProcessBootQueue - BootQueue empty (WaitQueue: 0) 2025-05-07T08:56:17.754214Z node 1 :HIVE DEBUG: hive_impl.cpp:798: HIVE#72057594037968897 TEvInterconnect::TEvNodeInfo NodeId 1 Location DataCenter: "1" Module: "1" Rack: "1" Unit: "1" 2025-05-07T08:56:17.765273Z node 1 :HIVE DEBUG: tx__register_node.cpp:88: HIVE#72057594037968897 THive::TTxRegisterNode(1)::Complete 2025-05-07T08:56:17.765370Z node 1 :HIVE DEBUG: node_info.cpp:353: HIVE#72057594037968897 Node(1) Ping([1:336:2375]) 2025-05-07T08:56:17.765471Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:26: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Complete 2025-05-07T08:56:17.766047Z node 1 :HIVE DEBUG: hive_impl.cpp:727: HIVE#72057594037968897 THive::Handle::TEvSyncTablets 2025-05-07T08:56:17.766390Z node 1 :HIVE DEBUG: tx__sync_tablets.cpp:41: HIVE#72057594037968897 THive::TTxSyncTablets([1:336:2375])::Execute 2025-05-07T08:56:17.766455Z node 1 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037968897 ProcessBootQueue (0) 2025-05-07T08:56:17.766556Z node 1 :HIVE DEBUG: tx__sync_tablets.cpp:130: HIVE#72057594037968897 THive::TTxSyncTablets([1:336:2375])::Complete 2025-05-07T08:56:17.766769Z node 1 :HIVE DEBUG: hive_impl.cpp:721: HIVE#72057594037968897 Handle TEvLocal::TEvStatus for Node 1: Status: 0 StartTime: 0 ResourceMaximum { Memory: 270443335680 } 2025-05-07T08:56:17.766848Z node 1 :HIVE DEBUG: tx__status.cpp:22: HIVE#72057594037968897 THive::TTxStatus(1)::Execute 2025-05-07T08:56:17.766914Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:56:17.767096Z node 1 :HIVE DEBUG: hive_impl.cpp:2765: HIVE#72057594037968897 AddRegisteredDataCentersNode(1, 1) 2025-05-07T08:56:17.767175Z node 1 :HIVE DEBUG: hive_impl.cpp:361: HIVE#72057594037968897 ProcessWaitQueue (0) 2025-05-07T08:56:17.767217Z node 1 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037968897 ProcessBootQueue (0) 2025-05-07T08:56:17.767376Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:18: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Execute 2025-05-07T08:56:17.767434Z node 1 :HIVE DEBUG: hive_impl.cpp:222: HIVE#72057594037968897 Handle ProcessBootQueue (size: 0) 2025-05-07T08:56:17.767470Z node 1 :HIVE DEBUG: hive_impl.cpp:225: HIVE#72057594037968897 Handle ProcessWaitQueue (size: 0) 2025-05-07T08:56:17.767504Z node 1 :HIVE DEBUG: hive_impl.cpp:302: HIVE#72057594037968897 ProcessBootQueue - BootQueue empty (WaitQueue: 0) 2025-05-07T08:56:17.778309Z node 1 :HIVE DEBUG: tx__status.cpp:65: HIVE#72057594037968897 THive::TTxStatus(1)::Complete 2025-05-07T08:56:17.778425Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:26: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Complete 2025-05-07T08:56:17.850488Z node 1 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:184: Transaction 1 has been planned 2025-05-07T08:56:17.850626Z node 1 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:197: Planned transaction 1 for mediator 72057594046382081 tablet 72057594046644480 2025-05-07T08:56:17.851058Z node 1 :TX_COORDINATOR TRACE: coordinator_impl.cpp:268: Coordinator# 72057594046316545 scheduling step 1000 in 0.500000s at 0.950000s 2025-05-07T08:56:17.851474Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:579: Send from# 72057594046316545 to mediator# 72057594046382081, step# 500, txid# 1 marker# C2 2025-05-07T08:56:17.851588Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:424: tablet# 72057594046316545 txid# 1 stepId# 500 Status# 17 SEND EvProposeTransactionStatus to# [1:410:2405] Proxy 2025-05-07T08:56:17.852619Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 500, transactions count in step: 1, at schemeshard: 72057594046644480 2025-05-07T08:56:17.858270Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:397: tablet# 72057594046316545 HANDLE EvMediatorQueueConfirmations MediatorId# 72057594046382081 2025-05-07T08:56:17.858459Z node 1 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:84: at tablet# 72057594046316545 [2:6] persistent tx 1 for mediator 72057594046382081 tablet 72057594046644480 removed=1 2025-05-07T08:56:17.858545Z node 1 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:91: at tablet# 72057594046316545 [2:6] persistent tx 1 for mediator 72057594046382081 acknowledged 2025-05-07T08:56:17.858587Z node 1 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:99: at tablet# 72057594046316545 [2:6] persistent tx 1 acknowledged 2025-05-07T08:56:17.859654Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1:0 2025-05-07T08:56:17.859741Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1, publications: 1, subscribers: 1 2025-05-07T08:56:17.871545Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 1, subscribers: 1 2025-05-07T08:56:17.887713Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /Root/table-1, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:56:17.889193Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976715657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-05-07T08:56:17.889304Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:56:17.903427Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715657, database: /Root, subject: , status: StatusAccepted, operation: CREATE TABLE, path: /Root/table-1 2025-05-07T08:56:17.908142Z node 1 :HIVE DEBUG: hive_impl.cpp:34: HIVE#72057594037968897 Handle TEvHive::TEvCreateTablet(DataShard(72057594046644480,1)) 2025-05-07T08:56:17.955546Z node 1 :HIVE DEBUG: tx__create_tablet.cpp:200: HIVE#72057594037968897 THive::TTxCreateTablet::Execute Owner: 72057594046644480 OwnerIdx: 1 TabletType: DataShard ObjectDomain { SchemeShard: 72057594046644480 PathId: 1 } ObjectId: 2 BindedChannels { StoragePoolName: "/Root:test" } BindedChannels { StoragePoolName: "/Root:test" } BindedChannels { StoragePoolName: "/Root:test" } AllowedDomains { SchemeShard: 72057594046644480 PathId: 1 } 2025-05-07T08:56:17.955774Z node 1 :HIVE DEBUG: tx__create_tablet.cpp:348: HIVE#72057594037968897 Hive 72057594037968897 allocated TabletId 72075186224037888 from TabletIdIndex 65536 2025-05-07T08:56:17.956105Z node 1 :HIVE DEBUG: tx__create_tablet.cpp:440: HIVE#72057594037968897 THive::TTxCreateTablet::Execute; Default resources after merge for type DataShard: {} 2025-05-07T08:56:17.956564Z node 1 :HIVE DEBUG: tx__create_tablet.cpp:443: HIVE#72057594037968897 THive::TTxCreateTablet::Execute; Default resources after merge for object (72057594046644480,2): {} 2025-05-07T08:56:17.956666Z node 1 :HIVE DEBUG: tx__create_tablet.cpp:447: HIVE#72057594037968897 THive::TTxCreateTablet::Execute; Default resources after merge for profile 'default': {Memory: 1048576} 2025-05-07T08:56: ... s {} 2025-05-07T08:56:27.045317Z node 2 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:184: Transaction 281474976715665 has been planned 2025-05-07T08:56:27.045459Z node 2 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:197: Planned transaction 281474976715665 for mediator 72057594046382081 tablet 72057594046644480 2025-05-07T08:56:27.045524Z node 2 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:197: Planned transaction 281474976715665 for mediator 72057594046382081 tablet 72075186224037889 2025-05-07T08:56:27.045856Z node 2 :TX_COORDINATOR TRACE: coordinator_impl.cpp:268: Coordinator# 72057594046316545 scheduling step 4000 in 0.500000s at 3.950000s 2025-05-07T08:56:27.046414Z node 2 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:579: Send from# 72057594046316545 to mediator# 72057594046382081, step# 3500, txid# 281474976715665 marker# C2 2025-05-07T08:56:27.046504Z node 2 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:424: tablet# 72057594046316545 txid# 281474976715665 stepId# 3500 Status# 17 SEND EvProposeTransactionStatus to# [2:409:2404] Proxy 2025-05-07T08:56:27.046969Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 3500, transactions count in step: 1, at schemeshard: 72057594046644480 2025-05-07T08:56:27.047660Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715665 at step 3500 at tablet 72075186224037889 { Transactions { TxId: 281474976715665 AckTo { RawX1: 0 RawX2: 0 } } Step: 3500 MediatorID: 72057594046382081 TabletID: 72075186224037889 } 2025-05-07T08:56:27.047716Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-05-07T08:56:27.047955Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-05-07T08:56:27.048008Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 1 2025-05-07T08:56:27.048082Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [3500:281474976715665] in PlanQueue unit at 72075186224037889 2025-05-07T08:56:27.048289Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037889 loaded tx from db 3500:281474976715665 keys extracted: 0 2025-05-07T08:56:27.048589Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-05-07T08:56:27.048843Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-05-07T08:56:27.048951Z node 2 :TX_DATASHARD INFO: drop_table_unit.cpp:72: Trying to DROP TABLE at 72075186224037889 2025-05-07T08:56:27.049410Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:56:27.054073Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037889 step# 3500} 2025-05-07T08:56:27.054182Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-05-07T08:56:27.054946Z node 2 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:397: tablet# 72057594046316545 HANDLE EvMediatorQueueConfirmations MediatorId# 72057594046382081 2025-05-07T08:56:27.055085Z node 2 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:84: at tablet# 72057594046316545 [2:20] persistent tx 281474976715665 for mediator 72057594046382081 tablet 72057594046644480 removed=1 2025-05-07T08:56:27.055155Z node 2 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:84: at tablet# 72057594046316545 [2:20] persistent tx 281474976715665 for mediator 72057594046382081 tablet 72075186224037889 removed=1 2025-05-07T08:56:27.055194Z node 2 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:91: at tablet# 72057594046316545 [2:20] persistent tx 281474976715665 for mediator 72057594046382081 acknowledged 2025-05-07T08:56:27.055247Z node 2 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:99: at tablet# 72057594046316545 [2:20] persistent tx 281474976715665 acknowledged 2025-05-07T08:56:27.055636Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-05-07T08:56:27.055719Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [3500 : 281474976715665] from 72075186224037889 at tablet 72075186224037889 send result to client [2:409:2404], exec latency: 0 ms, propose latency: 0 ms 2025-05-07T08:56:27.055786Z node 2 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037889 Sending notify to schemeshard 72057594046644480 txId 281474976715665 state PreOffline TxInFly 0 2025-05-07T08:56:27.055890Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-05-07T08:56:27.056799Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1103: All parts have reached barrier, tx: 281474976715665, done: 0, blocked: 1 2025-05-07T08:56:27.082271Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715665 datashard 72075186224037889 state PreOffline 2025-05-07T08:56:27.082395Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2025-05-07T08:56:27.083095Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715665:0 2025-05-07T08:56:27.083243Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 281474976715665, publications: 2, subscribers: 1 2025-05-07T08:56:27.084241Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976715665, subscribers: 1 2025-05-07T08:56:27.085190Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-05-07T08:56:27.085915Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=YTFmNDgwNjctNzQ3MmRhMDUtMjc0NmIzNzAtZmE3OTIzY2Y= 2025-05-07 08:56:27.085 INFO ydb-core-tx-datashard-ut_minstep(pid=221765, tid=0x00007FC31F1ADD00) [core exec] yql_execution.cpp:133: Completed async execution for node #42 2025-05-07T08:56:27.094656Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=YTFmNDgwNjctNzQ3MmRhMDUtMjc0NmIzNzAtZmE3OTIzY2Y= 2025-05-07 08:56:27.094 INFO ydb-core-tx-datashard-ut_minstep(pid=221765, tid=0x00007FC31F1ADD00) [core exec] yql_execution.cpp:153: State is ExecutionComplete after apply async changes for node #42 2025-05-07T08:56:27.094823Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=YTFmNDgwNjctNzQ3MmRhMDUtMjc0NmIzNzAtZmE3OTIzY2Y= 2025-05-07 08:56:27.094 INFO ydb-core-tx-datashard-ut_minstep(pid=221765, tid=0x00007FC31F1ADD00) [core exec] yql_execution.cpp:59: Begin, root #43 2025-05-07T08:56:27.094895Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=YTFmNDgwNjctNzQ3MmRhMDUtMjc0NmIzNzAtZmE3OTIzY2Y= 2025-05-07 08:56:27.094 INFO ydb-core-tx-datashard-ut_minstep(pid=221765, tid=0x00007FC31F1ADD00) [core exec] yql_execution.cpp:72: Collect unused nodes for root #43, status: Ok 2025-05-07T08:56:27.094967Z node 2 :KQP_YQL TRACE: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=YTFmNDgwNjctNzQ3MmRhMDUtMjc0NmIzNzAtZmE3OTIzY2Y= 2025-05-07 08:56:27.094 TRACE ydb-core-tx-datashard-ut_minstep(pid=221765, tid=0x00007FC31F1ADD00) [core exec] yql_execution.cpp:387: {0}, callable #43 2025-05-07T08:56:27.095063Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=YTFmNDgwNjctNzQ3MmRhMDUtMjc0NmIzNzAtZmE3OTIzY2Y= 2025-05-07 08:56:27.095 INFO ydb-core-tx-datashard-ut_minstep(pid=221765, tid=0x00007FC31F1ADD00) [core exec] yql_execution.cpp:577: Node #43 finished execution 2025-05-07T08:56:27.095164Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=YTFmNDgwNjctNzQ3MmRhMDUtMjc0NmIzNzAtZmE3OTIzY2Y= 2025-05-07 08:56:27.095 INFO ydb-core-tx-datashard-ut_minstep(pid=221765, tid=0x00007FC31F1ADD00) [core exec] yql_execution.cpp:594: Node #43 created 0 trackable nodes: 2025-05-07T08:56:27.095233Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=YTFmNDgwNjctNzQ3MmRhMDUtMjc0NmIzNzAtZmE3OTIzY2Y= 2025-05-07 08:56:27.095 INFO ydb-core-tx-datashard-ut_minstep(pid=221765, tid=0x00007FC31F1ADD00) [core exec] yql_execution.cpp:87: Finish, output #43, status: Ok 2025-05-07T08:56:27.095310Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=YTFmNDgwNjctNzQ3MmRhMDUtMjc0NmIzNzAtZmE3OTIzY2Y= 2025-05-07 08:56:27.095 INFO ydb-core-tx-datashard-ut_minstep(pid=221765, tid=0x00007FC31F1ADD00) [core exec] yql_execution.cpp:93: Creating finalizing transformer, output #43 2025-05-07T08:56:27.095645Z node 2 :KQP_YQL NOTICE: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=YTFmNDgwNjctNzQ3MmRhMDUtMjc0NmIzNzAtZmE3OTIzY2Y= 2025-05-07 08:56:27.095 NOTE ydb-core-tx-datashard-ut_minstep(pid=221765, tid=0x00007FC31F1ADD00) [common provider] yql_provider_gateway.cpp:21:
: Info: Execution, code: 1060 2025-05-07T08:56:27.095726Z node 2 :KQP_YQL NOTICE: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=YTFmNDgwNjctNzQ3MmRhMDUtMjc0NmIzNzAtZmE3OTIzY2Y= 2025-05-07 08:56:27.095 NOTE ydb-core-tx-datashard-ut_minstep(pid=221765, tid=0x00007FC31F1ADD00) [common provider] yql_provider_gateway.cpp:21:
:1:12: Info: Executing DROP TABLE 2025-05-07T08:56:27.095785Z node 2 :KQP_YQL NOTICE: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=YTFmNDgwNjctNzQ3MmRhMDUtMjc0NmIzNzAtZmE3OTIzY2Y= 2025-05-07 08:56:27.095 NOTE ydb-core-tx-datashard-ut_minstep(pid=221765, tid=0x00007FC31F1ADD00) [common provider] yql_provider_gateway.cpp:21:
: Info: Success, code: 4 2025-05-07T08:56:27.114660Z node 2 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037889 in PreOffline state HasSharedBobs: 0 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-05-07T08:56:27.114891Z node 2 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186224037889 Initiating switch from PreOffline to Offline state 2025-05-07T08:56:27.117693Z node 2 :TX_DATASHARD INFO: datashard_impl.h:3307: 72075186224037889 Reporting state Offline to schemeshard 72057594046644480 2025-05-07T08:56:27.119099Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037889 state Offline 2025-05-07T08:56:27.119632Z node 2 :HIVE DEBUG: tx__delete_tablet.cpp:74: HIVE#72057594037968897 THive::TTxDeleteTablet::Execute() ShardOwnerId: 72057594046644480 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186224037889 2025-05-07T08:56:27.119714Z node 2 :HIVE DEBUG: tx__delete_tablet.cpp:19: HIVE#72057594037968897 THive::TTxDeleteTablet::Execute Tablet 72075186224037889 2025-05-07T08:56:27.119843Z node 2 :HIVE DEBUG: tablet_info.cpp:123: HIVE#72057594037968897 Tablet(DataShard.72075186224037889.Leader.1) VolatileState: Running -> Stopped (Node 2) 2025-05-07T08:56:27.120006Z node 2 :HIVE DEBUG: tablet_info.cpp:523: HIVE#72057594037968897 Sending TEvStopTablet(DataShard.72075186224037889.Leader.1 gen 1) to node 2 2025-05-07T08:56:27.120149Z node 2 :HIVE DEBUG: tx__delete_tablet.cpp:67: HIVE#72057594037968897 THive::TTxDeleteTablet::Execute() result Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046644480 ShardLocalIdx: 2 >> ResourcePoolClassifiersSysView::TestResourcePoolClassifiersSysViewOnServerless [GOOD] >> ResourcePoolClassifiersSysView::TestResourcePoolClassifiersSysViewFilters >> TPartBtreeIndexIteration::FewNodes_Groups_History_Slices_Sticky [GOOD] >> TPartGroupBtreeIndexIter::NoNodes [GOOD] >> TPartGroupBtreeIndexIter::OneNode >> KqpPg::PgUpdateCompoundKey-useSink [GOOD] >> TPartGroupBtreeIndexIter::OneNode [GOOD] >> TPartGroupBtreeIndexIter::FewNodes >> TPartGroupBtreeIndexIter::FewNodes [GOOD] >> TPartMulti::Basics [GOOD] >> TPartMulti::BasicsReverse >> TPartMulti::BasicsReverse [GOOD] >> TPartSlice::SimpleMerge [GOOD] >> TPartSlice::ComplexMerge [GOOD] >> TPartSlice::LongTailMerge [GOOD] >> TPartSlice::CutSingle [GOOD] >> TPartSlice::CutMulti [GOOD] >> TPartSlice::LookupBasics [GOOD] >> TPartSlice::LookupFull [GOOD] >> TPartSlice::EqualByRowId [GOOD] >> TPartSlice::ParallelCompactions [GOOD] |90.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut/unittest >> SystemView::QueryStats [GOOD] >> SystemView::QueryStatsFields >> GroupWriteTest::WithRead >> TDataShardMinStepTest::TestDropTablePlanComesNotTooEarlyRW-VolatileTxs [GOOD] >> TCdcStreamTests::CheckSchemeLimits [GOOD] >> TCdcStreamTests::MeteringServerless >> TFlatExecutorLeases::BasicsInitialLeaseSleepTimeout [GOOD] >> TFlatTableDatetime::TestDate >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeInvalid [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeTable >> TFlatTableDatetime::TestDate [GOOD] >> TFlatTableExecutor_BackgroundCompactions::TestRunBackgroundSnapshot [GOOD] >> TFlatTableExecutor_BackgroundCompactions::TestChangeBackgroundSnapshotToRegular >> TFlatTableExecutor_BackgroundCompactions::TestChangeBackgroundSnapshotToRegular [GOOD] >> TFlatTableExecutor_BackgroundCompactions::TestRunBackgroundCompactionGen1 >> TFlatTableExecutor_BackgroundCompactions::TestRunBackgroundCompactionGen1 [GOOD] >> TFlatTableExecutor_BackgroundCompactions::TestChangeBackgroundCompactionToRegular >> TFlatTableExecutor_BackgroundCompactions::TestChangeBackgroundCompactionToRegular [GOOD] >> TFlatTableExecutor_BackgroundCompactions::TestRunBackgroundCompactionGen2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_minstep/unittest >> TDataShardMinStepTest::TestDropTablePlanComesNotTooEarlyRW-VolatileTxs [GOOD] Test command err: 2025-05-07T08:56:14.722788Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:56:14.722953Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:56:14.723273Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003765/r3tmp/tmpdc83uZ/pdisk_1.dat 2025-05-07T08:56:15.211188Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:56:15.241340Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-05-07T08:56:15.243018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:56:15.244038Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-05-07T08:56:15.246609Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:183: tablet# 72057594046316545 txid# 1 HANDLE EvProposeTransaction marker# C0 2025-05-07T08:56:15.246674Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:29: tablet# 72057594046316545 txid# 1 step# 500 Status# 16 SEND to# [1:410:2405] Proxy marker# C1 2025-05-07T08:56:15.273425Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:56:15.275188Z node 1 :HIVE DEBUG: hive_impl.cpp:2249: HIVE#72057594037968897 Merged config: { } 2025-05-07T08:56:15.334974Z node 1 :HIVE DEBUG: hive_impl.cpp:141: HIVE#72057594037968897 Handle TEvLocal::TEvRegisterNode from [1:336:2375] HiveId: 72057594037968897 ServicedDomains { SchemeShard: 72057594046644480 PathId: 1 } TabletAvailability { Type: Mediator Priority: 0 } TabletAvailability { Type: Dummy Priority: 0 } TabletAvailability { Type: KeyValue Priority: 0 } TabletAvailability { Type: Coordinator Priority: 0 } TabletAvailability { Type: Hive Priority: 0 } TabletAvailability { Type: SchemeShard Priority: 0 } TabletAvailability { Type: DataShard Priority: 0 } TabletAvailability { Type: PersQueue Priority: 0 } TabletAvailability { Type: PersQueueReadBalancer Priority: 0 } TabletAvailability { Type: Kesus Priority: 0 } TabletAvailability { Type: SysViewProcessor Priority: 0 } TabletAvailability { Type: ColumnShard Priority: 0 } TabletAvailability { Type: SequenceShard Priority: 0 } TabletAvailability { Type: ReplicationController Priority: 0 } TabletAvailability { Type: StatisticsAggregator Priority: 0 } 2025-05-07T08:56:15.335140Z node 1 :HIVE DEBUG: tx__register_node.cpp:21: HIVE#72057594037968897 THive::TTxRegisterNode(1)::Execute 2025-05-07T08:56:15.335249Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:56:15.335282Z node 1 :HIVE DEBUG: hive_impl.cpp:361: HIVE#72057594037968897 ProcessWaitQueue (0) 2025-05-07T08:56:15.335314Z node 1 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037968897 ProcessBootQueue (0) 2025-05-07T08:56:15.335351Z node 1 :HIVE DEBUG: hive_impl.cpp:361: HIVE#72057594037968897 ProcessWaitQueue (0) 2025-05-07T08:56:15.335399Z node 1 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037968897 ProcessBootQueue (0) 2025-05-07T08:56:15.335495Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:56:15.335895Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:18: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Execute 2025-05-07T08:56:15.335969Z node 1 :HIVE DEBUG: hive_impl.cpp:222: HIVE#72057594037968897 Handle ProcessBootQueue (size: 0) 2025-05-07T08:56:15.336003Z node 1 :HIVE DEBUG: hive_impl.cpp:225: HIVE#72057594037968897 Handle ProcessWaitQueue (size: 0) 2025-05-07T08:56:15.336037Z node 1 :HIVE DEBUG: hive_impl.cpp:302: HIVE#72057594037968897 ProcessBootQueue - BootQueue empty (WaitQueue: 0) 2025-05-07T08:56:15.336171Z node 1 :HIVE DEBUG: hive_impl.cpp:798: HIVE#72057594037968897 TEvInterconnect::TEvNodeInfo NodeId 1 Location DataCenter: "1" Module: "1" Rack: "1" Unit: "1" 2025-05-07T08:56:15.347271Z node 1 :HIVE DEBUG: tx__register_node.cpp:88: HIVE#72057594037968897 THive::TTxRegisterNode(1)::Complete 2025-05-07T08:56:15.347364Z node 1 :HIVE DEBUG: node_info.cpp:353: HIVE#72057594037968897 Node(1) Ping([1:336:2375]) 2025-05-07T08:56:15.347447Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:26: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Complete 2025-05-07T08:56:15.347856Z node 1 :HIVE DEBUG: hive_impl.cpp:727: HIVE#72057594037968897 THive::Handle::TEvSyncTablets 2025-05-07T08:56:15.347920Z node 1 :HIVE DEBUG: tx__sync_tablets.cpp:41: HIVE#72057594037968897 THive::TTxSyncTablets([1:336:2375])::Execute 2025-05-07T08:56:15.347953Z node 1 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037968897 ProcessBootQueue (0) 2025-05-07T08:56:15.348029Z node 1 :HIVE DEBUG: tx__sync_tablets.cpp:130: HIVE#72057594037968897 THive::TTxSyncTablets([1:336:2375])::Complete 2025-05-07T08:56:15.348216Z node 1 :HIVE DEBUG: hive_impl.cpp:721: HIVE#72057594037968897 Handle TEvLocal::TEvStatus for Node 1: Status: 0 StartTime: 0 ResourceMaximum { Memory: 270443335680 } 2025-05-07T08:56:15.348283Z node 1 :HIVE DEBUG: tx__status.cpp:22: HIVE#72057594037968897 THive::TTxStatus(1)::Execute 2025-05-07T08:56:15.348348Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:56:15.348566Z node 1 :HIVE DEBUG: hive_impl.cpp:2765: HIVE#72057594037968897 AddRegisteredDataCentersNode(1, 1) 2025-05-07T08:56:15.348634Z node 1 :HIVE DEBUG: hive_impl.cpp:361: HIVE#72057594037968897 ProcessWaitQueue (0) 2025-05-07T08:56:15.348676Z node 1 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037968897 ProcessBootQueue (0) 2025-05-07T08:56:15.348898Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:18: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Execute 2025-05-07T08:56:15.348968Z node 1 :HIVE DEBUG: hive_impl.cpp:222: HIVE#72057594037968897 Handle ProcessBootQueue (size: 0) 2025-05-07T08:56:15.349006Z node 1 :HIVE DEBUG: hive_impl.cpp:225: HIVE#72057594037968897 Handle ProcessWaitQueue (size: 0) 2025-05-07T08:56:15.349041Z node 1 :HIVE DEBUG: hive_impl.cpp:302: HIVE#72057594037968897 ProcessBootQueue - BootQueue empty (WaitQueue: 0) 2025-05-07T08:56:15.361125Z node 1 :HIVE DEBUG: tx__status.cpp:65: HIVE#72057594037968897 THive::TTxStatus(1)::Complete 2025-05-07T08:56:15.361230Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:26: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Complete 2025-05-07T08:56:15.426253Z node 1 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:184: Transaction 1 has been planned 2025-05-07T08:56:15.426383Z node 1 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:197: Planned transaction 1 for mediator 72057594046382081 tablet 72057594046644480 2025-05-07T08:56:15.426756Z node 1 :TX_COORDINATOR TRACE: coordinator_impl.cpp:268: Coordinator# 72057594046316545 scheduling step 1000 in 0.500000s at 0.950000s 2025-05-07T08:56:15.427138Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:579: Send from# 72057594046316545 to mediator# 72057594046382081, step# 500, txid# 1 marker# C2 2025-05-07T08:56:15.427246Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:424: tablet# 72057594046316545 txid# 1 stepId# 500 Status# 17 SEND EvProposeTransactionStatus to# [1:410:2405] Proxy 2025-05-07T08:56:15.428243Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 500, transactions count in step: 1, at schemeshard: 72057594046644480 2025-05-07T08:56:15.429704Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:397: tablet# 72057594046316545 HANDLE EvMediatorQueueConfirmations MediatorId# 72057594046382081 2025-05-07T08:56:15.429822Z node 1 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:84: at tablet# 72057594046316545 [2:6] persistent tx 1 for mediator 72057594046382081 tablet 72057594046644480 removed=1 2025-05-07T08:56:15.429867Z node 1 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:91: at tablet# 72057594046316545 [2:6] persistent tx 1 for mediator 72057594046382081 acknowledged 2025-05-07T08:56:15.429914Z node 1 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:99: at tablet# 72057594046316545 [2:6] persistent tx 1 acknowledged 2025-05-07T08:56:15.430910Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1:0 2025-05-07T08:56:15.430998Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1, publications: 1, subscribers: 1 2025-05-07T08:56:15.432118Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 1, subscribers: 1 2025-05-07T08:56:15.435534Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /Root/table-1, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:56:15.436827Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976715657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-05-07T08:56:15.436923Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:56:15.438008Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715657, database: /Root, subject: , status: StatusAccepted, operation: CREATE TABLE, path: /Root/table-1 2025-05-07T08:56:15.442504Z node 1 :HIVE DEBUG: hive_impl.cpp:34: HIVE#72057594037968897 Handle TEvHive::TEvCreateTablet(DataShard(72057594046644480,1)) 2025-05-07T08:56:15.456708Z node 1 :HIVE DEBUG: tx__create_tablet.cpp:200: HIVE#72057594037968897 THive::TTxCreateTablet::Execute Owner: 72057594046644480 OwnerIdx: 1 TabletType: DataShard ObjectDomain { SchemeShard: 72057594046644480 PathId: 1 } ObjectId: 2 BindedChannels { StoragePoolName: "/Root:test" } BindedChannels { StoragePoolName: "/Root:test" } BindedChannels { StoragePoolName: "/Root:test" } AllowedDomains { SchemeShard: 72057594046644480 PathId: 1 } 2025-05-07T08:56:15.456841Z node 1 :HIVE DEBUG: tx__create_tablet.cpp:348: HIVE#72057594037968897 Hive 72057594037968897 allocated TabletId 72075186224037888 from TabletIdIndex 65536 2025-05-07T08:56:15.457114Z node 1 :HIVE DEBUG: tx__create_tablet.cpp:440: HIVE#72057594037968897 THive::TTxCreateTablet::Execute; Default resources after merge for type DataShard: {} 2025-05-07T08:56:15.457198Z node 1 :HIVE DEBUG: tx__create_tablet.cpp:443: HIVE#72057594037968897 THive::TTxCreateTablet::Execute; Default resources after merge for object (72057594046644480,2): {} 2025-05-07T08:56:15.457267Z node 1 :HIVE DEBUG: tx__create_tablet.cpp:447: HIVE#72057594037968897 THive::TTxCreateTablet::Execute; Default resources after merge for profile 'default': {Memory: 1048576} 2025-05-07T08:56: ... 5-07T08:56:30.679214Z node 2 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:184: Transaction 281474976715665 has been planned 2025-05-07T08:56:30.679341Z node 2 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:197: Planned transaction 281474976715665 for mediator 72057594046382081 tablet 72057594046644480 2025-05-07T08:56:30.679384Z node 2 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:197: Planned transaction 281474976715665 for mediator 72057594046382081 tablet 72075186224037889 2025-05-07T08:56:30.679676Z node 2 :TX_COORDINATOR TRACE: coordinator_impl.cpp:268: Coordinator# 72057594046316545 scheduling step 33500 in 0.500000s at 33.450000s 2025-05-07T08:56:30.680111Z node 2 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:579: Send from# 72057594046316545 to mediator# 72057594046382081, step# 33000, txid# 281474976715665 marker# C2 2025-05-07T08:56:30.680191Z node 2 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:424: tablet# 72057594046316545 txid# 281474976715665 stepId# 33000 Status# 17 SEND EvProposeTransactionStatus to# [2:409:2404] Proxy 2025-05-07T08:56:30.680983Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715665 at step 33000 at tablet 72075186224037889 { Transactions { TxId: 281474976715665 AckTo { RawX1: 0 RawX2: 0 } } Step: 33000 MediatorID: 72057594046382081 TabletID: 72075186224037889 } 2025-05-07T08:56:30.681047Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-05-07T08:56:30.681250Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 33000, transactions count in step: 1, at schemeshard: 72057594046644480 2025-05-07T08:56:30.681836Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-05-07T08:56:30.681899Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 1 2025-05-07T08:56:30.681954Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [33000:281474976715665] in PlanQueue unit at 72075186224037889 2025-05-07T08:56:30.682566Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037889 loaded tx from db 33000:281474976715665 keys extracted: 0 2025-05-07T08:56:30.682729Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-05-07T08:56:30.682915Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-05-07T08:56:30.682997Z node 2 :TX_DATASHARD INFO: drop_table_unit.cpp:72: Trying to DROP TABLE at 72075186224037889 2025-05-07T08:56:30.683508Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:56:30.685406Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037889 step# 33000} 2025-05-07T08:56:30.685480Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-05-07T08:56:30.686287Z node 2 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:397: tablet# 72057594046316545 HANDLE EvMediatorQueueConfirmations MediatorId# 72057594046382081 2025-05-07T08:56:30.686396Z node 2 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:84: at tablet# 72057594046316545 [2:49] persistent tx 281474976715665 for mediator 72057594046382081 tablet 72057594046644480 removed=1 2025-05-07T08:56:30.686447Z node 2 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:84: at tablet# 72057594046316545 [2:49] persistent tx 281474976715665 for mediator 72057594046382081 tablet 72075186224037889 removed=1 2025-05-07T08:56:30.686487Z node 2 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:91: at tablet# 72057594046316545 [2:49] persistent tx 281474976715665 for mediator 72057594046382081 acknowledged 2025-05-07T08:56:30.686538Z node 2 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:99: at tablet# 72057594046316545 [2:49] persistent tx 281474976715665 acknowledged 2025-05-07T08:56:30.686879Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-05-07T08:56:30.686952Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [33000 : 281474976715665] from 72075186224037889 at tablet 72075186224037889 send result to client [2:409:2404], exec latency: 0 ms, propose latency: 0 ms 2025-05-07T08:56:30.687017Z node 2 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037889 Sending notify to schemeshard 72057594046644480 txId 281474976715665 state PreOffline TxInFly 0 2025-05-07T08:56:30.687148Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-05-07T08:56:30.688011Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1103: All parts have reached barrier, tx: 281474976715665, done: 0, blocked: 1 2025-05-07T08:56:30.691234Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715665 datashard 72075186224037889 state PreOffline 2025-05-07T08:56:30.691325Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2025-05-07T08:56:30.691938Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715665:0 2025-05-07T08:56:30.692059Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 281474976715665, publications: 2, subscribers: 1 2025-05-07T08:56:30.693384Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976715665, subscribers: 1 2025-05-07T08:56:30.693888Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-05-07T08:56:30.695053Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=NGNkNzQ1OTctODZjYWYxZGUtZmRjYWJlZmEtNzM1MzEzZTU= 2025-05-07 08:56:30.695 INFO ydb-core-tx-datashard-ut_minstep(pid=221288, tid=0x00007F7D271E4D00) [core exec] yql_execution.cpp:133: Completed async execution for node #42 2025-05-07T08:56:30.695210Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=NGNkNzQ1OTctODZjYWYxZGUtZmRjYWJlZmEtNzM1MzEzZTU= 2025-05-07 08:56:30.695 INFO ydb-core-tx-datashard-ut_minstep(pid=221288, tid=0x00007F7D271E4D00) [core exec] yql_execution.cpp:153: State is ExecutionComplete after apply async changes for node #42 2025-05-07T08:56:30.695293Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=NGNkNzQ1OTctODZjYWYxZGUtZmRjYWJlZmEtNzM1MzEzZTU= 2025-05-07 08:56:30.695 INFO ydb-core-tx-datashard-ut_minstep(pid=221288, tid=0x00007F7D271E4D00) [core exec] yql_execution.cpp:59: Begin, root #43 2025-05-07T08:56:30.695345Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=NGNkNzQ1OTctODZjYWYxZGUtZmRjYWJlZmEtNzM1MzEzZTU= 2025-05-07 08:56:30.695 INFO ydb-core-tx-datashard-ut_minstep(pid=221288, tid=0x00007F7D271E4D00) [core exec] yql_execution.cpp:72: Collect unused nodes for root #43, status: Ok 2025-05-07T08:56:30.695399Z node 2 :KQP_YQL TRACE: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=NGNkNzQ1OTctODZjYWYxZGUtZmRjYWJlZmEtNzM1MzEzZTU= 2025-05-07 08:56:30.695 TRACE ydb-core-tx-datashard-ut_minstep(pid=221288, tid=0x00007F7D271E4D00) [core exec] yql_execution.cpp:387: {0}, callable #43 2025-05-07T08:56:30.695479Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=NGNkNzQ1OTctODZjYWYxZGUtZmRjYWJlZmEtNzM1MzEzZTU= 2025-05-07 08:56:30.695 INFO ydb-core-tx-datashard-ut_minstep(pid=221288, tid=0x00007F7D271E4D00) [core exec] yql_execution.cpp:577: Node #43 finished execution 2025-05-07T08:56:30.695569Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=NGNkNzQ1OTctODZjYWYxZGUtZmRjYWJlZmEtNzM1MzEzZTU= 2025-05-07 08:56:30.695 INFO ydb-core-tx-datashard-ut_minstep(pid=221288, tid=0x00007F7D271E4D00) [core exec] yql_execution.cpp:594: Node #43 created 0 trackable nodes: 2025-05-07T08:56:30.695623Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=NGNkNzQ1OTctODZjYWYxZGUtZmRjYWJlZmEtNzM1MzEzZTU= 2025-05-07 08:56:30.695 INFO ydb-core-tx-datashard-ut_minstep(pid=221288, tid=0x00007F7D271E4D00) [core exec] yql_execution.cpp:87: Finish, output #43, status: Ok 2025-05-07T08:56:30.695673Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=NGNkNzQ1OTctODZjYWYxZGUtZmRjYWJlZmEtNzM1MzEzZTU= 2025-05-07 08:56:30.695 INFO ydb-core-tx-datashard-ut_minstep(pid=221288, tid=0x00007F7D271E4D00) [core exec] yql_execution.cpp:93: Creating finalizing transformer, output #43 2025-05-07T08:56:30.695849Z node 2 :KQP_YQL NOTICE: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=NGNkNzQ1OTctODZjYWYxZGUtZmRjYWJlZmEtNzM1MzEzZTU= 2025-05-07 08:56:30.695 NOTE ydb-core-tx-datashard-ut_minstep(pid=221288, tid=0x00007F7D271E4D00) [common provider] yql_provider_gateway.cpp:21:
: Info: Execution, code: 1060 2025-05-07T08:56:30.695907Z node 2 :KQP_YQL NOTICE: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=NGNkNzQ1OTctODZjYWYxZGUtZmRjYWJlZmEtNzM1MzEzZTU= 2025-05-07 08:56:30.695 NOTE ydb-core-tx-datashard-ut_minstep(pid=221288, tid=0x00007F7D271E4D00) [common provider] yql_provider_gateway.cpp:21:
:1:12: Info: Executing DROP TABLE 2025-05-07T08:56:30.695953Z node 2 :KQP_YQL NOTICE: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=NGNkNzQ1OTctODZjYWYxZGUtZmRjYWJlZmEtNzM1MzEzZTU= 2025-05-07 08:56:30.695 NOTE ydb-core-tx-datashard-ut_minstep(pid=221288, tid=0x00007F7D271E4D00) [common provider] yql_provider_gateway.cpp:21:
: Info: Success, code: 4 2025-05-07T08:56:30.710020Z node 2 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037889 in PreOffline state HasSharedBobs: 0 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-05-07T08:56:30.710311Z node 2 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186224037889 Initiating switch from PreOffline to Offline state 2025-05-07T08:56:30.712461Z node 2 :TX_DATASHARD INFO: datashard_impl.h:3307: 72075186224037889 Reporting state Offline to schemeshard 72057594046644480 2025-05-07T08:56:30.713523Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037889 state Offline 2025-05-07T08:56:30.714101Z node 2 :HIVE DEBUG: tx__delete_tablet.cpp:74: HIVE#72057594037968897 THive::TTxDeleteTablet::Execute() ShardOwnerId: 72057594046644480 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186224037889 2025-05-07T08:56:30.714166Z node 2 :HIVE DEBUG: tx__delete_tablet.cpp:19: HIVE#72057594037968897 THive::TTxDeleteTablet::Execute Tablet 72075186224037889 2025-05-07T08:56:30.714292Z node 2 :HIVE DEBUG: tablet_info.cpp:123: HIVE#72057594037968897 Tablet(DataShard.72075186224037889.Leader.1) VolatileState: Running -> Stopped (Node 2) 2025-05-07T08:56:30.714422Z node 2 :HIVE DEBUG: tablet_info.cpp:523: HIVE#72057594037968897 Sending TEvStopTablet(DataShard.72075186224037889.Leader.1 gen 1) to node 2 2025-05-07T08:56:30.714550Z node 2 :HIVE DEBUG: tx__delete_tablet.cpp:67: HIVE#72057594037968897 THive::TTxDeleteTablet::Execute() result Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046644480 ShardLocalIdx: 2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut/unittest >> TPartSlice::ParallelCompactions [GOOD] Test command err: ======= CUT ======= Part{[1:2:3:0:0:0:0] eph 0, 346b 12r} data 755b + FlatIndex{4} Label{3 rev 3, 172b} 5 rec | Page Row Bytes (Uint32, String) | 0 0 86b {1, aaa} | 1 3 88b {1, b} | 2 6 86b {2, NULL} | 3 9 86b {2, ccx} | 3 11 86b {2, cxz} + BTreeIndex{PageId: 5 RowCount: 12 DataSize: 346 ErasedRowCount: 0} Label{13 rev 1, 208b} | PageId: 0 RowCount: 3 DataSize: 86 ErasedRowCount: 0 | > {1, b} | PageId: 1 RowCount: 6 DataSize: 174 ErasedRowCount: 0 | > {2, NULL} | PageId: 2 RowCount: 9 DataSize: 260 ErasedRowCount: 0 | > {2, ccx} | PageId: 3 RowCount: 12 DataSize: 346 ErasedRowCount: 0 ======= FULL ======= Part{[1:2:3:0:0:0:0] eph 0, 346b 12r} data 777b + FlatIndex{4} Label{3 rev 3, 179b} 5 rec | Page Row Bytes (Uint32, String) | 0 0 86b {1, aaa} | 1 3 88b {1, baaaa} | 2 6 86b {2, aaa} | 3 9 86b {2, ccx} | 3 11 86b {2, cxz} + BTreeIndex{PageId: 5 RowCount: 12 DataSize: 346 ErasedRowCount: 0} Label{13 rev 1, 223b} | PageId: 0 RowCount: 3 DataSize: 86 ErasedRowCount: 0 | > {1, baaaa} | PageId: 1 RowCount: 6 DataSize: 174 ErasedRowCount: 0 | > {2, aaa} | PageId: 2 RowCount: 9 DataSize: 260 ErasedRowCount: 0 | > {2, ccx} | PageId: 3 RowCount: 12 DataSize: 346 ErasedRowCount: 0 ======= CUT ======= Part{[1:2:3:0:0:0:0] eph 0, 420b 10r} data 1347b + FlatIndex{10} Label{3 rev 3, 362b} 11 rec | Page Row Bytes (Uint32, String) | 0 0 42b {1, aaa} | 1 1 42b {1, ab} | 2 2 42b {1, ac} | 3 3 42b {1, b} | 4 4 42b {1, bb} | 5 5 42b {2, NULL} | 6 6 42b {2, ab} | 7 7 42b {2, ac} | 8 8 42b {2, b} | 9 9 42b {2, bb} | 9 9 42b {2, bba} + BTreeIndex{PageId: 11 RowCount: 10 DataSize: 420 ErasedRowCount: 0} Label{13 rev 1, 536b} | PageId: 0 RowCount: 1 DataSize: 42 ErasedRowCount: 0 | > {1, ab} | PageId: 1 RowCount: 2 DataSize: 84 ErasedRowCount: 0 | > {1, ac} | PageId: 2 RowCount: 3 DataSize: 126 ErasedRowCount: 0 | > {1, b} | PageId: 3 RowCount: 4 DataSize: 168 ErasedRowCount: 0 | > {1, bb} | PageId: 4 RowCount: 5 DataSize: 210 ErasedRowCount: 0 | > {2, NULL} | PageId: 5 RowCount: 6 DataSize: 252 ErasedRowCount: 0 | > {2, ab} | PageId: 6 RowCount: 7 DataSize: 294 ErasedRowCount: 0 | > {2, ac} | PageId: 7 RowCount: 8 DataSize: 336 ErasedRowCount: 0 | > {2, b} | PageId: 8 RowCount: 9 DataSize: 378 ErasedRowCount: 0 | > {2, bb} | PageId: 9 RowCount: 10 DataSize: 420 ErasedRowCount: 0 ======= FULL ======= Part{[1:2:3:0:0:0:0] eph 0, 420b 10r} data 1381b + FlatIndex{10} Label{3 rev 3, 375b} 11 rec | Page Row Bytes (Uint32, String) | 0 0 42b {1, aaa} | 1 1 42b {1, aba} | 2 2 42b {1, aca} | 3 3 42b {1, baa} | 4 4 42b {1, bba} | 5 5 42b {2, aaa} | 6 6 42b {2, aba} | 7 7 42b {2, aca} | 8 8 42b {2, baa} | 9 9 42b {2, bba} | 9 9 42b {2, bba} + BTreeIndex{PageId: 11 RowCount: 10 DataSize: 420 ErasedRowCount: 0} Label{13 rev 1, 557b} | PageId: 0 RowCount: 1 DataSize: 42 ErasedRowCount: 0 | > {1, aba} | PageId: 1 RowCount: 2 DataSize: 84 ErasedRowCount: 0 | > {1, aca} | PageId: 2 RowCount: 3 DataSize: 126 ErasedRowCount: 0 | > {1, baa} | PageId: 3 RowCount: 4 DataSize: 168 ErasedRowCount: 0 | > {1, bba} | PageId: 4 RowCount: 5 DataSize: 210 ErasedRowCount: 0 | > {2, aaa} | PageId: 5 RowCount: 6 DataSize: 252 ErasedRowCount: 0 | > {2, aba} | PageId: 6 RowCount: 7 DataSize: 294 ErasedRowCount: 0 | > {2, aca} | PageId: 7 RowCount: 8 DataSize: 336 ErasedRowCount: 0 | > {2, baa} | PageId: 8 RowCount: 9 DataSize: 378 ErasedRowCount: 0 | > {2, bba} | PageId: 9 RowCount: 10 DataSize: 420 ErasedRowCount: 0 ======= SLICES ======= { [0, 2), [2, 3), [3, 5), [5, 7), [7, 9), [9, 9] } ======= CUT ======= Part{[1:2:3:0:0:0:0] eph 0, 420b 10r} data 1347b + FlatIndex{10} Label{3 rev 3, 362b} 11 rec | Page Row Bytes (Uint32, String) | 0 0 42b {1, aaa} | 1 1 42b {1, ab} | 2 2 42b {1, ac} | 3 3 42b {1, b} | 4 4 42b {1, bb} | 5 5 42b {2, NULL} | 6 6 42b {2, ab} | 7 7 42b {2, ac} | 8 8 42b {2, b} | 9 9 42b {2, bb} | 9 9 42b {2, bba} + BTreeIndex{PageId: 11 RowCount: 10 DataSize: 420 ErasedRowCount: 0} Label{13 rev 1, 536b} | PageId: 0 RowCount: 1 DataSize: 42 ErasedRowCount: 0 | > {1, ab} | PageId: 1 RowCount: 2 DataSize: 84 ErasedRowCount: 0 | > {1, ac} | PageId: 2 RowCount: 3 DataSize: 126 ErasedRowCount: 0 | > {1, b} | PageId: 3 RowCount: 4 DataSize: 168 ErasedRowCount: 0 | > {1, bb} | PageId: 4 RowCount: 5 DataSize: 210 ErasedRowCount: 0 | > {2, NULL} | PageId: 5 RowCount: 6 DataSize: 252 ErasedRowCount: 0 | > {2, ab} | PageId: 6 RowCount: 7 DataSize: 294 ErasedRowCount: 0 | > {2, ac} | PageId: 7 RowCount: 8 DataSize: 336 ErasedRowCount: 0 | > {2, b} | PageId: 8 RowCount: 9 DataSize: 378 ErasedRowCount: 0 | > {2, bb} | PageId: 9 RowCount: 10 DataSize: 420 ErasedRowCount: 0 ======= FULL ======= Part{[1:2:3:0:0:0:0] eph 0, 420b 10r} data 1381b + FlatIndex{10} Label{3 rev 3, 375b} 11 rec | Page Row Bytes (Uint32, String) | 0 0 42b {1, aaa} | 1 1 42b {1, aba} | 2 2 42b {1, aca} | 3 3 42b {1, baa} | 4 4 42b {1, bba} | 5 5 42b {2, aaa} | 6 6 42b {2, aba} | 7 7 42b {2, aca} | 8 8 42b {2, baa} | 9 9 42b {2, bba} | 9 9 42b {2, bba} + BTreeIndex{PageId: 11 RowCount: 10 DataSize: 420 ErasedRowCount: 0} Label{13 rev 1, 557b} | PageId: 0 RowCount: 1 DataSize: 42 ErasedRowCount: 0 | > {1, aba} | PageId: 1 RowCount: 2 DataSize: 84 ErasedRowCount: 0 | > {1, aca} | PageId: 2 RowCount: 3 DataSize: 126 ErasedRowCount: 0 | > {1, baa} | PageId: 3 RowCount: 4 DataSize: 168 ErasedRowCount: 0 | > {1, bba} | PageId: 4 RowCount: 5 DataSize: 210 ErasedRowCount: 0 | > {2, aaa} | PageId: 5 RowCount: 6 DataSize: 252 ErasedRowCount: 0 | > {2, aba} | PageId: 6 RowCount: 7 DataSize: 294 ErasedRowCount: 0 | > {2, aca} | PageId: 7 RowCount: 8 DataSize: 336 ErasedRowCount: 0 | > {2, baa} | PageId: 8 RowCount: 9 DataSize: 378 ErasedRowCount: 0 | > {2, bba} | PageId: 9 RowCount: 10 DataSize: 420 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 81b 2r} data 316b + FlatIndex{2} Label{3 rev 3, 107b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 41b {ccccccd} | 1 1 41b {ccccccd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 81 ErasedRowCount: 0} Label{13 rev 1, 109b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {ccccccd} | PageId: 1 RowCount: 2 DataSize: 81 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 83b 2r} data 320b + FlatIndex{2} Label{3 rev 3, 109b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 43b {ccccccd} | 1 1 43b {ccccccddd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 83 ErasedRowCount: 0} Label{13 rev 1, 109b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {ccccccd} | PageId: 1 RowCount: 2 DataSize: 83 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 80b 2r} data 312b + FlatIndex{2} Label{3 rev 3, 105b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 40b {cccccd} | 1 1 40b {cccccd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 80 ErasedRowCount: 0} Label{13 rev 1, 108b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {cccccd} | PageId: 1 RowCount: 2 DataSize: 80 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 82b 2r} data 316b + FlatIndex{2} Label{3 rev 3, 107b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 42b {cccccd} | 1 1 42b {cccccddd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 82 ErasedRowCount: 0} Label{13 rev 1, 108b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {cccccd} | PageId: 1 RowCount: 2 DataSize: 82 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 79b 2r} data 308b + FlatIndex{2} Label{3 rev 3, 103b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 39b {ccccd} | 1 1 39b {ccccd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 79 ErasedRowCount: 0} Label{13 rev 1, 107b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {ccccd} | PageId: 1 RowCount: 2 DataSize: 79 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 81b 2r} data 312b + FlatIndex{2} Label{3 rev 3, 105b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 41b {ccccd} | 1 1 41b {ccccddd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 81 ErasedRowCount: 0} Label{13 rev 1, 107b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {ccccd} | PageId: 1 RowCount: 2 DataSize: 81 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 78b 2r} data 304b + FlatIndex{2} Label{3 rev 3, 101b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 38b {cccd} | 1 1 38b {cccd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 78 ErasedRowCount: 0} Label{13 rev 1, 106b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {cccd} | PageId: 1 RowCount: 2 DataSize: 78 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 80b 2r} data 308b + FlatIndex{2} Label{3 rev 3, 103b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 40b {cccd} | 1 1 40b {cccddd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 80 ErasedRowCount: 0} Label{13 rev 1, 106b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {cccd} | PageId: 1 RowCount: 2 DataSize: 80 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 75b 2r} data 292b + FlatIndex{2} Label{3 rev 3, 95b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 35b {d} | 1 1 35b {d} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 75 ErasedRowCount: 0} Label{13 rev 1, 103b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {d} | PageId: 1 RowCount: 2 DataSize: 75 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 77b 2r} data 296b + FlatIndex{2} Label{3 rev 3, 97b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 37b {d} | 1 1 37b {ddd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 77 ErasedRowCount: 0} Label{13 rev 1, 103b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {d} | PageId: 1 RowCount: 2 DataSize: 77 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 69b 2r} data 280b + FlatIndex{2} Label{3 rev 3, 89b} 3 rec | Page Row Bytes (String) | 0 0 34b {} | 1 1 35b {d} | 1 1 35b {d} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 69 ErasedRowCount: 0} Label{13 rev 1, 103b} | PageId: 0 RowCount: 1 DataSize: 34 ErasedRowCount: 0 | > {d} | PageId: 1 RowCount: 2 DataSize: 69 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 71b 2r} data 284b + FlatIndex{2} Label{3 rev 3, 91b} 3 rec | Page Row Bytes (String) | 0 0 34b {} | 1 1 37b {d} | 1 1 37b {ddd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 71 ErasedRowCount: 0} Label{13 rev 1, 103b} | P ... xxxxxxxxx_4} | ERowOp 1: {0, 8} {Set 2 Uint32 : 5}, {Set 3 Uint64 : 5}, {Set 4 String : xxxxxxxxxx_5} + Rows{3} Label{34 rev 1, 120b}, [6, +2)row | ERowOp 1: {0, 10} {Set 2 Uint32 : 6}, {Set 3 Uint64 : 6}, {Set 4 String : xxxxxxxxxx_6} | ERowOp 1: {1, 1} {Set 2 Uint32 : 7}, {Set 3 Uint64 : 7}, {Set 4 String : xxxxxxxxxx_7} + Rows{4} Label{44 rev 1, 120b}, [8, +2)row | ERowOp 1: {1, 3} {Set 2 Uint32 : 8}, {Set 3 Uint64 : 8}, {Set 4 String : xxxxxxxxxx_8} | ERowOp 1: {1, 4} {Set 2 Uint32 : 9}, {Set 3 Uint64 : 9}, {Set 4 String : xxxxxxxxxx_9} + Rows{5} Label{54 rev 1, 122b}, [10, +2)row | ERowOp 1: {1, 6} {Set 2 Uint32 : 10}, {Set 3 Uint64 : 10}, {Set 4 String : xxxxxxxxxx_10} | ERowOp 1: {1, 7} {Set 2 Uint32 : 11}, {Set 3 Uint64 : 11}, {Set 4 String : xxxxxxxxxx_11} + Rows{6} Label{64 rev 1, 122b}, [12, +2)row | ERowOp 1: {1, 8} {Set 2 Uint32 : 12}, {Set 3 Uint64 : 12}, {Set 4 String : xxxxxxxxxx_12} | ERowOp 1: {1, 10} {Set 2 Uint32 : 13}, {Set 3 Uint64 : 13}, {Set 4 String : xxxxxxxxxx_13} + Rows{7} Label{74 rev 1, 122b}, [14, +2)row | ERowOp 1: {2, 1} {Set 2 Uint32 : 14}, {Set 3 Uint64 : 14}, {Set 4 String : xxxxxxxxxx_14} | ERowOp 1: {2, 3} {Set 2 Uint32 : 15}, {Set 3 Uint64 : 15}, {Set 4 String : xxxxxxxxxx_15} + Rows{8} Label{84 rev 1, 122b}, [16, +2)row | ERowOp 1: {2, 4} {Set 2 Uint32 : 16}, {Set 3 Uint64 : 16}, {Set 4 String : xxxxxxxxxx_16} | ERowOp 1: {2, 6} {Set 2 Uint32 : 17}, {Set 3 Uint64 : 17}, {Set 4 String : xxxxxxxxxx_17} + Rows{9} Label{94 rev 1, 122b}, [18, +2)row | ERowOp 1: {2, 7} {Set 2 Uint32 : 18}, {Set 3 Uint64 : 18}, {Set 4 String : xxxxxxxxxx_18} | ERowOp 1: {2, 8} {Set 2 Uint32 : 19}, {Set 3 Uint64 : 19}, {Set 4 String : xxxxxxxxxx_19} + Rows{10} Label{104 rev 1, 122b}, [20, +2)row | ERowOp 1: {2, 10} {Set 2 Uint32 : 20}, {Set 3 Uint64 : 20}, {Set 4 String : xxxxxxxxxx_20} | ERowOp 1: {3, 1} {Set 2 Uint32 : 21}, {Set 3 Uint64 : 21}, {Set 4 String : xxxxxxxxxx_21} + Rows{11} Label{114 rev 1, 122b}, [22, +2)row | ERowOp 1: {3, 3} {Set 2 Uint32 : 22}, {Set 3 Uint64 : 22}, {Set 4 String : xxxxxxxxxx_22} | ERowOp 1: {3, 4} {Set 2 Uint32 : 23}, {Set 3 Uint64 : 23}, {Set 4 String : xxxxxxxxxx_23} + Rows{12} Label{124 rev 1, 122b}, [24, +2)row | ERowOp 1: {3, 6} {Set 2 Uint32 : 24}, {Set 3 Uint64 : 24}, {Set 4 String : xxxxxxxxxx_24} | ERowOp 1: {3, 7} {Set 2 Uint32 : 25}, {Set 3 Uint64 : 25}, {Set 4 String : xxxxxxxxxx_25} + Rows{13} Label{134 rev 1, 122b}, [26, +2)row | ERowOp 1: {3, 8} {Set 2 Uint32 : 26}, {Set 3 Uint64 : 26}, {Set 4 String : xxxxxxxxxx_26} | ERowOp 1: {3, 10} {Set 2 Uint32 : 27}, {Set 3 Uint64 : 27}, {Set 4 String : xxxxxxxxxx_27} + Rows{14} Label{144 rev 1, 122b}, [28, +2)row | ERowOp 1: {4, 1} {Set 2 Uint32 : 28}, {Set 3 Uint64 : 28}, {Set 4 String : xxxxxxxxxx_28} | ERowOp 1: {4, 3} {Set 2 Uint32 : 29}, {Set 3 Uint64 : 29}, {Set 4 String : xxxxxxxxxx_29} + Rows{15} Label{154 rev 1, 122b}, [30, +2)row | ERowOp 1: {4, 4} {Set 2 Uint32 : 30}, {Set 3 Uint64 : 30}, {Set 4 String : xxxxxxxxxx_30} | ERowOp 1: {4, 6} {Set 2 Uint32 : 31}, {Set 3 Uint64 : 31}, {Set 4 String : xxxxxxxxxx_31} + Rows{16} Label{164 rev 1, 122b}, [32, +2)row | ERowOp 1: {4, 7} {Set 2 Uint32 : 32}, {Set 3 Uint64 : 32}, {Set 4 String : xxxxxxxxxx_32} | ERowOp 1: {4, 8} {Set 2 Uint32 : 33}, {Set 3 Uint64 : 33}, {Set 4 String : xxxxxxxxxx_33} + Rows{17} Label{174 rev 1, 122b}, [34, +2)row | ERowOp 1: {4, 10} {Set 2 Uint32 : 34}, {Set 3 Uint64 : 34}, {Set 4 String : xxxxxxxxxx_34} | ERowOp 1: {5, 1} {Set 2 Uint32 : 35}, {Set 3 Uint64 : 35}, {Set 4 String : xxxxxxxxxx_35} + Rows{18} Label{184 rev 1, 122b}, [36, +2)row | ERowOp 1: {5, 3} {Set 2 Uint32 : 36}, {Set 3 Uint64 : 36}, {Set 4 String : xxxxxxxxxx_36} | ERowOp 1: {5, 4} {Set 2 Uint32 : 37}, {Set 3 Uint64 : 37}, {Set 4 String : xxxxxxxxxx_37} + Rows{19} Label{194 rev 1, 122b}, [38, +2)row | ERowOp 1: {5, 6} {Set 2 Uint32 : 38}, {Set 3 Uint64 : 38}, {Set 4 String : xxxxxxxxxx_38} | ERowOp 1: {5, 7} {Set 2 Uint32 : 39}, {Set 3 Uint64 : 39}, {Set 4 String : xxxxxxxxxx_39} Slices{ [0, 39] } Part{[1:2:3:0:0:0:0] eph 0, 2430b 40r} data 4441b + FlatIndex{26} Label{3 rev 3, 558b} 21 rec | Page Row Bytes (Uint32, Uint32) | 0 0 120b {0, 1} | 1 2 120b {0, 4} | 2 4 120b {0, 7} | 3 6 120b {0, 10} | 4 8 120b {1, 3} | 5 10 122b {1, 6} | 7 12 122b {1, 8} | 8 14 122b {2, NULL} | 9 16 122b {2, 4} | 11 18 122b {2, 7} | 12 20 122b {2, 10} | 13 22 122b {3, 3} | 15 24 122b {3, 6} | 16 26 122b {3, 8} | 17 28 122b {4, NULL} | 19 30 122b {4, 4} | 20 32 122b {4, 7} | 21 34 122b {4, 10} | 24 36 122b {5, 3} | 25 38 122b {5, 6} | 25 39 122b {5, 7} + BTreeIndex{PageId: 29 RowCount: 40 DataSize: 2430 ErasedRowCount: 0} Label{13 rev 1, 102b} | + BTreeIndex{PageId: 23 RowCount: 18 DataSize: 1088 ErasedRowCount: 0} Label{13 rev 1, 151b} | | + BTreeIndex{PageId: 6 RowCount: 6 DataSize: 360 ErasedRowCount: 0} Label{13 rev 1, 151b} | | | PageId: 0 RowCount: 2 DataSize: 120 ErasedRowCount: 0 | | | > {0, 4} | | | PageId: 1 RowCount: 4 DataSize: 240 ErasedRowCount: 0 | | | > {0, 7} | | | PageId: 2 RowCount: 6 DataSize: 360 ErasedRowCount: 0 | | > {0, 10} | | + BTreeIndex{PageId: 10 RowCount: 12 DataSize: 722 ErasedRowCount: 0} Label{13 rev 1, 151b} | | | PageId: 3 RowCount: 8 DataSize: 480 ErasedRowCount: 0 | | | > {1, 3} | | | PageId: 4 RowCount: 10 DataSize: 600 ErasedRowCount: 0 | | | > {1, 6} | | | PageId: 5 RowCount: 12 DataSize: 722 ErasedRowCount: 0 | | > {1, 8} | | + BTreeIndex{PageId: 14 RowCount: 18 DataSize: 1088 ErasedRowCount: 0} Label{13 rev 1, 147b} | | | PageId: 7 RowCount: 14 DataSize: 844 ErasedRowCount: 0 | | | > {2, NULL} | | | PageId: 8 RowCount: 16 DataSize: 966 ErasedRowCount: 0 | | | > {2, 4} | | | PageId: 9 RowCount: 18 DataSize: 1088 ErasedRowCount: 0 | > {2, 7} | + BTreeIndex{PageId: 28 RowCount: 40 DataSize: 2430 ErasedRowCount: 0} Label{13 rev 1, 151b} | | + BTreeIndex{PageId: 18 RowCount: 24 DataSize: 1454 ErasedRowCount: 0} Label{13 rev 1, 151b} | | | PageId: 11 RowCount: 20 DataSize: 1210 ErasedRowCount: 0 | | | > {2, 10} | | | PageId: 12 RowCount: 22 DataSize: 1332 ErasedRowCount: 0 | | | > {3, 3} | | | PageId: 13 RowCount: 24 DataSize: 1454 ErasedRowCount: 0 | | > {3, 6} | | + BTreeIndex{PageId: 22 RowCount: 30 DataSize: 1820 ErasedRowCount: 0} Label{13 rev 1, 147b} | | | PageId: 15 RowCount: 26 DataSize: 1576 ErasedRowCount: 0 | | | > {3, 8} | | | PageId: 16 RowCount: 28 DataSize: 1698 ErasedRowCount: 0 | | | > {4, NULL} | | | PageId: 17 RowCount: 30 DataSize: 1820 ErasedRowCount: 0 | | > {4, 4} | | + BTreeIndex{PageId: 27 RowCount: 40 DataSize: 2430 ErasedRowCount: 0} Label{13 rev 1, 249b} | | | PageId: 19 RowCount: 32 DataSize: 1942 ErasedRowCount: 0 | | | > {4, 7} | | | PageId: 20 RowCount: 34 DataSize: 2064 ErasedRowCount: 0 | | | > {4, 10} | | | PageId: 21 RowCount: 36 DataSize: 2186 ErasedRowCount: 0 | | | > {5, 3} | | | PageId: 24 RowCount: 38 DataSize: 2308 ErasedRowCount: 0 | | | > {5, 6} | | | PageId: 25 RowCount: 40 DataSize: 2430 ErasedRowCount: 0 + Rows{0} Label{04 rev 1, 120b}, [0, +2)row | ERowOp 1: {0, 1} {Set 2 Uint32 : 0}, {Set 3 Uint64 : 0}, {Set 4 String : xxxxxxxxxx_0} | ERowOp 1: {0, 3} {Set 2 Uint32 : 1}, {Set 3 Uint64 : 1}, {Set 4 String : xxxxxxxxxx_1} + Rows{1} Label{14 rev 1, 120b}, [2, +2)row | ERowOp 1: {0, 4} {Set 2 Uint32 : 2}, {Set 3 Uint64 : 2}, {Set 4 String : xxxxxxxxxx_2} | ERowOp 1: {0, 6} {Set 2 Uint32 : 3}, {Set 3 Uint64 : 3}, {Set 4 String : xxxxxxxxxx_3} + Rows{2} Label{24 rev 1, 120b}, [4, +2)row | ERowOp 1: {0, 7} {Set 2 Uint32 : 4}, {Set 3 Uint64 : 4}, {Set 4 String : xxxxxxxxxx_4} | ERowOp 1: {0, 8} {Set 2 Uint32 : 5}, {Set 3 Uint64 : 5}, {Set 4 String : xxxxxxxxxx_5} + Rows{3} Label{34 rev 1, 120b}, [6, +2)row | ERowOp 1: {0, 10} {Set 2 Uint32 : 6}, {Set 3 Uint64 : 6}, {Set 4 String : xxxxxxxxxx_6} | ERowOp 1: {1, 1} {Set 2 Uint32 : 7}, {Set 3 Uint64 : 7}, {Set 4 String : xxxxxxxxxx_7} + Rows{4} Label{44 rev 1, 120b}, [8, +2)row | ERowOp 1: {1, 3} {Set 2 Uint32 : 8}, {Set 3 Uint64 : 8}, {Set 4 String : xxxxxxxxxx_8} | ERowOp 1: {1, 4} {Set 2 Uint32 : 9}, {Set 3 Uint64 : 9}, {Set 4 String : xxxxxxxxxx_9} + Rows{5} Label{54 rev 1, 122b}, [10, +2)row | ERowOp 1: {1, 6} {Set 2 Uint32 : 10}, {Set 3 Uint64 : 10}, {Set 4 String : xxxxxxxxxx_10} | ERowOp 1: {1, 7} {Set 2 Uint32 : 11}, {Set 3 Uint64 : 11}, {Set 4 String : xxxxxxxxxx_11} + Rows{7} Label{74 rev 1, 122b}, [12, +2)row | ERowOp 1: {1, 8} {Set 2 Uint32 : 12}, {Set 3 Uint64 : 12}, {Set 4 String : xxxxxxxxxx_12} | ERowOp 1: {1, 10} {Set 2 Uint32 : 13}, {Set 3 Uint64 : 13}, {Set 4 String : xxxxxxxxxx_13} + Rows{8} Label{84 rev 1, 122b}, [14, +2)row | ERowOp 1: {2, 1} {Set 2 Uint32 : 14}, {Set 3 Uint64 : 14}, {Set 4 String : xxxxxxxxxx_14} | ERowOp 1: {2, 3} {Set 2 Uint32 : 15}, {Set 3 Uint64 : 15}, {Set 4 String : xxxxxxxxxx_15} + Rows{9} Label{94 rev 1, 122b}, [16, +2)row | ERowOp 1: {2, 4} {Set 2 Uint32 : 16}, {Set 3 Uint64 : 16}, {Set 4 String : xxxxxxxxxx_16} | ERowOp 1: {2, 6} {Set 2 Uint32 : 17}, {Set 3 Uint64 : 17}, {Set 4 String : xxxxxxxxxx_17} + Rows{11} Label{114 rev 1, 122b}, [18, +2)row | ERowOp 1: {2, 7} {Set 2 Uint32 : 18}, {Set 3 Uint64 : 18}, {Set 4 String : xxxxxxxxxx_18} | ERowOp 1: {2, 8} {Set 2 Uint32 : 19}, {Set 3 Uint64 : 19}, {Set 4 String : xxxxxxxxxx_19} + Rows{12} Label{124 rev 1, 122b}, [20, +2)row | ERowOp 1: {2, 10} {Set 2 Uint32 : 20}, {Set 3 Uint64 : 20}, {Set 4 String : xxxxxxxxxx_20} | ERowOp 1: {3, 1} {Set 2 Uint32 : 21}, {Set 3 Uint64 : 21}, {Set 4 String : xxxxxxxxxx_21} + Rows{13} Label{134 rev 1, 122b}, [22, +2)row | ERowOp 1: {3, 3} {Set 2 Uint32 : 22}, {Set 3 Uint64 : 22}, {Set 4 String : xxxxxxxxxx_22} | ERowOp 1: {3, 4} {Set 2 Uint32 : 23}, {Set 3 Uint64 : 23}, {Set 4 String : xxxxxxxxxx_23} + Rows{15} Label{154 rev 1, 122b}, [24, +2)row | ERowOp 1: {3, 6} {Set 2 Uint32 : 24}, {Set 3 Uint64 : 24}, {Set 4 String : xxxxxxxxxx_24} | ERowOp 1: {3, 7} {Set 2 Uint32 : 25}, {Set 3 Uint64 : 25}, {Set 4 String : xxxxxxxxxx_25} + Rows{16} Label{164 rev 1, 122b}, [26, +2)row | ERowOp 1: {3, 8} {Set 2 Uint32 : 26}, {Set 3 Uint64 : 26}, {Set 4 String : xxxxxxxxxx_26} | ERowOp 1: {3, 10} {Set 2 Uint32 : 27}, {Set 3 Uint64 : 27}, {Set 4 String : xxxxxxxxxx_27} + Rows{17} Label{174 rev 1, 122b}, [28, +2)row | ERowOp 1: {4, 1} {Set 2 Uint32 : 28}, {Set 3 Uint64 : 28}, {Set 4 String : xxxxxxxxxx_28} | ERowOp 1: {4, 3} {Set 2 Uint32 : 29}, {Set 3 Uint64 : 29}, {Set 4 String : xxxxxxxxxx_29} + Rows{19} Label{194 rev 1, 122b}, [30, +2)row | ERowOp 1: {4, 4} {Set 2 Uint32 : 30}, {Set 3 Uint64 : 30}, {Set 4 String : xxxxxxxxxx_30} | ERowOp 1: {4, 6} {Set 2 Uint32 : 31}, {Set 3 Uint64 : 31}, {Set 4 String : xxxxxxxxxx_31} + Rows{20} Label{204 rev 1, 122b}, [32, +2)row | ERowOp 1: {4, 7} {Set 2 Uint32 : 32}, {Set 3 Uint64 : 32}, {Set 4 String : xxxxxxxxxx_32} | ERowOp 1: {4, 8} {Set 2 Uint32 : 33}, {Set 3 Uint64 : 33}, {Set 4 String : xxxxxxxxxx_33} + Rows{21} Label{214 rev 1, 122b}, [34, +2)row | ERowOp 1: {4, 10} {Set 2 Uint32 : 34}, {Set 3 Uint64 : 34}, {Set 4 String : xxxxxxxxxx_34} | ERowOp 1: {5, 1} {Set 2 Uint32 : 35}, {Set 3 Uint64 : 35}, {Set 4 String : xxxxxxxxxx_35} + Rows{24} Label{244 rev 1, 122b}, [36, +2)row | ERowOp 1: {5, 3} {Set 2 Uint32 : 36}, {Set 3 Uint64 : 36}, {Set 4 String : xxxxxxxxxx_36} | ERowOp 1: {5, 4} {Set 2 Uint32 : 37}, {Set 3 Uint64 : 37}, {Set 4 String : xxxxxxxxxx_37} + Rows{25} Label{254 rev 1, 122b}, [38, +2)row | ERowOp 1: {5, 6} {Set 2 Uint32 : 38}, {Set 3 Uint64 : 38}, {Set 4 String : xxxxxxxxxx_38} | ERowOp 1: {5, 7} {Set 2 Uint32 : 39}, {Set 3 Uint64 : 39}, {Set 4 String : xxxxxxxxxx_39} >> ResourcePoolClassifiersDdl::TestDropResourcePool [GOOD] >> BackupRestoreS3::TestAllIndexTypes-EIndexTypeInvalid [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-BOOL >> TFlatTableExecutor_BackgroundCompactions::TestRunBackgroundCompactionGen2 [GOOD] >> TFlatTableExecutor_BackgroundCompactions::TestChangeBackgroundSnapshotPriorityByTime >> TFlatTableExecutor_BackgroundCompactions::TestChangeBackgroundSnapshotPriorityByTime [GOOD] >> TFlatTableExecutor_BackgroundCompactions::TestChangeBackgroundCompactionPriorityByTime >> YdbIndexTable::MultiShardTableOneUniqIndexDataColumn [GOOD] >> TVersions::Wreck2Reverse [GOOD] >> TVersions::Wreck1 >> TFlatTableExecutor_BackgroundCompactions::TestChangeBackgroundCompactionPriorityByTime [GOOD] >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_Default >> SystemView::TopPartitionsByCpuFollowers [GOOD] >> SystemView::TabletsFields >> BuildStatsHistogram::Five_Five_Mixed [GOOD] >> BuildStatsHistogram::Five_Five_Serial |90.6%| [TA] $(B)/ydb/core/tx/datashard/ut_minstep/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/workload_service/ut/unittest >> ResourcePoolClassifiersDdl::TestDropResourcePool [GOOD] Test command err: 2025-05-07T08:54:38.394047Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624574894120119:2132];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:38.431440Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004837/r3tmp/tmp66H85K/pdisk_1.dat 2025-05-07T08:54:39.122398Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:39.154440Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:39.154529Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:39.157809Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 30577, node 1 2025-05-07T08:54:39.338584Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:39.338611Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:39.338617Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:39.338707Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25440 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:39.728476Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:39.758768Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:54:42.608642Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:440: [WorkloadService] [Service] Started workload service initialization 2025-05-07T08:54:42.620715Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=NWUyZGQ1MWYtNjc1YThkOTAtMzE4ZThkNjMtNGY5MzczZDg=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id NWUyZGQ1MWYtNjc1YThkOTAtMzE4ZThkNjMtNGY5MzczZDg= 2025-05-07T08:54:42.621063Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:241: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7501624592073989877:2330], Start check tables existence, number paths: 2 2025-05-07T08:54:42.621168Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=NWUyZGQ1MWYtNjc1YThkOTAtMzE4ZThkNjMtNGY5MzczZDg=, ActorId: [1:7501624592073989878:2331], ActorState: unknown state, session actor bootstrapped 2025-05-07T08:54:42.621667Z node 1 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:125: [WorkloadService] [Service] Updated node info, noode count: 1 2025-05-07T08:54:42.621699Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-05-07T08:54:42.621727Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:111: [WorkloadService] [Service] Resource pools was enanbled 2025-05-07T08:54:42.623500Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7501624592073989877:2330], Describe table /Root/.metadata/workload_manager/delayed_requests status PathErrorUnknown 2025-05-07T08:54:42.623558Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7501624592073989877:2330], Describe table /Root/.metadata/workload_manager/running_requests status PathErrorUnknown 2025-05-07T08:54:42.623610Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:289: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7501624592073989877:2330], Successfully finished 2025-05-07T08:54:42.623671Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-05-07T08:54:42.730093Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624592073989895:2300], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-05-07T08:54:42.734412Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480 2025-05-07T08:54:42.738176Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:429: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624592073989895:2300], DatabaseId: Root, PoolId: sample_pool_id, Subscribe on create pool tx: 281474976710658 2025-05-07T08:54:42.741208Z node 1 :KQP_WORKLOAD_SERVICE TRACE: scheme_actors.cpp:352: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624592073989895:2300], DatabaseId: Root, PoolId: sample_pool_id, Tablet to pipe successfully connected 2025-05-07T08:54:42.749049Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624592073989895:2300], DatabaseId: Root, PoolId: sample_pool_id, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-05-07T08:54:42.822091Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624592073989895:2300], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-05-07T08:54:42.826830Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501624592073989946:2332] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/sample_pool_id\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:54:42.826986Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:480: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624592073989895:2300], DatabaseId: Root, PoolId: sample_pool_id, Pool successfully created 2025-05-07T08:54:42.829645Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=NzdlYWQ3Mi02YzJiYWE4Zi1jYTZhMzQ0Ny01NzA0Y2Y5Nw==, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id NzdlYWQ3Mi02YzJiYWE4Zi1jYTZhMzQ0Ny01NzA0Y2Y5Nw== 2025-05-07T08:54:42.829964Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=NzdlYWQ3Mi02YzJiYWE4Zi1jYTZhMzQ0Ny01NzA0Y2Y5Nw==, ActorId: [1:7501624592073989953:2332], ActorState: unknown state, session actor bootstrapped 2025-05-07T08:54:42.830170Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=1&id=NzdlYWQ3Mi02YzJiYWE4Zi1jYTZhMzQ0Ny01NzA0Y2Y5Nw==, ActorId: [1:7501624592073989953:2332], ActorState: ReadyState, TraceId: 01jtmz8kwe13ehqvrm9y3828k2, received request, proxyRequestId: 3 prepared: 0 tx_control: 0 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_GENERIC_QUERY text: SELECT 42; rpcActor: [1:7501624592073989952:2337] database: Root databaseId: /Root pool id: sample_pool_id 2025-05-07T08:54:42.830221Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: /Root, PoolId: sample_pool_id 2025-05-07T08:54:42.830234Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:561: [WorkloadService] [Service] Creating new database state for id /Root 2025-05-07T08:54:42.830287Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:169: [WorkloadService] [Service] Recieved new request from [1:7501624592073989953:2332], DatabaseId: /Root, PoolId: sample_pool_id, SessionId: ydb://session/3?node_id=1&id=NzdlYWQ3Mi02YzJiYWE4Zi1jYTZhMzQ0Ny01NzA0Y2Y5Nw== 2025-05-07T08:54:42.830342Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624592073989955:2333], DatabaseId: /Root, PoolId: sample_pool_id, Start pool fetching 2025-05-07T08:54:42.830416Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:574: [WorkloadService] [TDatabaseFetcherActor] ActorId: [1:7501624592073989956:2334], Database: /Root, Start database fetching 2025-05-07T08:54:42.831853Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:600: [WorkloadService] [TDatabaseFetcherActor] ActorId: [1:7501624592073989956:2334], Database: /Root, Database info successfully fetched, serverless: 0 2025-05-07T08:54:42.831975Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:223: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624592073989955:2333], DatabaseId: /Root, PoolId: sample_pool_id, Pool info successfully fetched 2025-05-07T08:54:42.832020Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:240: [WorkloadService] [Service] Successfully fetched database info, DatabaseId: /Root, Serverless: 0 2025-05-07T08:54:42.832060Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:253: [WorkloadService] [Service] Successfully fetched pool sample_pool_id, DatabaseId: /Root 2025-05-07T08:54:42.832074Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:571: [WorkloadService] [Service] Creating new handler for pool /Root/sample_pool_id 2025-05-07T08:54:42.832365Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:44: [WorkloadService] [TPoolResolverActor] ActorId: [1:7501624592073989966:2335], DatabaseId: /Root, PoolId: sample_pool_id, SessionId: ydb://session/3?node_id=1&id=NzdlYWQ3Mi02YzJiYWE4Zi1jYTZhMzQ0Ny01NzA0Y2Y5Nw==, Start pool fetching 2025-05-07T08:54:42.832392Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624592073989968:2337], DatabaseId: /Root, PoolId: sample_pool_id, Start pool fetching 2025-05-07T08:54:42.832471Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:466: [WorkloadService] [TPoolHandlerActorBase] ActorId: [1:7501624592073989967:2336], DatabaseId: /Root, PoolId: sample_pool_id, Subscribed on schemeboard notifications for path: [OwnerId: 72057594046644480, LocalPathId: 5] 2025-05-07T08:54:42.833828Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:223: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624592073989968:2337], DatabaseId: /Root, PoolId: sample_pool_id, Pool info successfully fetched 2025-05-07T08:54:42.833891Z n ... 025-05-07T08:56:31.785598Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2270: SessionId: ydb://session/3?node_id=8&id=NDgxNzE1NjYtMjlmNTY0ZjUtNmEzODBmZjYtNTI1ZjIyMWI=, ActorId: [8:7501625062951704442:2679], ActorState: CleanupState, TraceId: 01jtmzby9845dc5h15nhbd1r6r, Sent query response back to proxy, proxyRequestId: 56, proxyId: [8:7501624972757389246:2065] 2025-05-07T08:56:31.785644Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2546: SessionId: ydb://session/3?node_id=8&id=NDgxNzE1NjYtMjlmNTY0ZjUtNmEzODBmZjYtNTI1ZjIyMWI=, ActorId: [8:7501625062951704442:2679], ActorState: unknown state, TraceId: 01jtmzby9845dc5h15nhbd1r6r, Cleanup temp tables: 0 2025-05-07T08:56:31.785775Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2637: SessionId: ydb://session/3?node_id=8&id=NDgxNzE1NjYtMjlmNTY0ZjUtNmEzODBmZjYtNTI1ZjIyMWI=, ActorId: [8:7501625062951704442:2679], ActorState: unknown state, TraceId: 01jtmzby9845dc5h15nhbd1r6r, Session actor destroyed 2025-05-07T08:56:31.800887Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=8&id=NzFlNTEzYmMtZTE1ZGUwZmYtZDQzYzYzNjktYTg1ZDFjYjc=, ActorId: [8:7501625015707062856:2335], ActorState: ReadyState, TraceId: 01jtmzby9p3fyv0prsca1885xz, received request, proxyRequestId: 57 prepared: 0 tx_control: 0 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_DDL text: DROP RESOURCE POOL my_pool; rpcActor: [0:0:0] database: /Root databaseId: /Root pool id: default 2025-05-07T08:56:31.818607Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:294: [WorkloadService] [TPoolHandlerActorBase] ActorId: [8:7501625062951704415:2674], DatabaseId: /Root, PoolId: my_pool, Got delete notification 2025-05-07T08:56:31.818711Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: /Root, PoolId: my_pool 2025-05-07T08:56:31.818772Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7501625062951704464:2683], DatabaseId: /Root, PoolId: my_pool, Start pool fetching 2025-05-07T08:56:31.821404Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7501625062951704464:2683], DatabaseId: /Root, PoolId: my_pool, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool my_pool not found or you don't have access permissions } 2025-05-07T08:56:31.821546Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool my_pool, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool my_pool not found or you don't have access permissions } 2025-05-07T08:56:31.822734Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2473: SessionId: ydb://session/3?node_id=8&id=NzFlNTEzYmMtZTE1ZGUwZmYtZDQzYzYzNjktYTg1ZDFjYjc=, ActorId: [8:7501625015707062856:2335], ActorState: ExecuteState, TraceId: 01jtmzby9p3fyv0prsca1885xz, Cleanup start, isFinal: 0 CleanupCtx: 1 TransactionsToBeAborted.size(): 0 WorkerId: [8:7501625062951704451:2335] WorkloadServiceCleanup: 0 2025-05-07T08:56:31.824716Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2534: SessionId: ydb://session/3?node_id=8&id=NzFlNTEzYmMtZTE1ZGUwZmYtZDQzYzYzNjktYTg1ZDFjYjc=, ActorId: [8:7501625015707062856:2335], ActorState: CleanupState, TraceId: 01jtmzby9p3fyv0prsca1885xz, EndCleanup, isFinal: 0 2025-05-07T08:56:31.824781Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2270: SessionId: ydb://session/3?node_id=8&id=NzFlNTEzYmMtZTE1ZGUwZmYtZDQzYzYzNjktYTg1ZDFjYjc=, ActorId: [8:7501625015707062856:2335], ActorState: CleanupState, TraceId: 01jtmzby9p3fyv0prsca1885xz, Sent query response back to proxy, proxyRequestId: 57, proxyId: [8:7501624972757389246:2065] 2025-05-07T08:56:31.834961Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=8&id=NDczZGY4NGEtZjVjYWI1ZC01YjczOWU5Ny00OTlhMjg1Zg==, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id NDczZGY4NGEtZjVjYWI1ZC01YjczOWU5Ny00OTlhMjg1Zg== 2025-05-07T08:56:31.835127Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=8&id=NDczZGY4NGEtZjVjYWI1ZC01YjczOWU5Ny00OTlhMjg1Zg==, ActorId: [8:7501625062951704473:2684], ActorState: unknown state, session actor bootstrapped 2025-05-07T08:56:31.835395Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: /Root, PoolId: my_pool 2025-05-07T08:56:31.835471Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7501625062951704474:2685], DatabaseId: /Root, PoolId: my_pool, Start pool fetching 2025-05-07T08:56:31.835544Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=8&id=NDczZGY4NGEtZjVjYWI1ZC01YjczOWU5Ny00OTlhMjg1Zg==, ActorId: [8:7501625062951704473:2684], ActorState: ReadyState, TraceId: 01jtmzbyav9k8d61d5rwedt437, received request, proxyRequestId: 58 prepared: 0 tx_control: 0 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_GENERIC_QUERY text: SELECT 42; rpcActor: [8:7501625062951704472:2917] database: Root databaseId: /Root pool id: default 2025-05-07T08:56:31.835574Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:264: SessionId: ydb://session/3?node_id=8&id=NDczZGY4NGEtZjVjYWI1ZC01YjczOWU5Ny00OTlhMjg1Zg==, ActorId: [8:7501625062951704473:2684], ActorState: ReadyState, TraceId: 01jtmzbyav9k8d61d5rwedt437, request placed into pool from cache: default 2025-05-07T08:56:31.835678Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:575: SessionId: ydb://session/3?node_id=8&id=NDczZGY4NGEtZjVjYWI1ZC01YjczOWU5Ny00OTlhMjg1Zg==, ActorId: [8:7501625062951704473:2684], ActorState: ExecuteState, TraceId: 01jtmzbyav9k8d61d5rwedt437, Sending CompileQuery request 2025-05-07T08:56:31.837273Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7501625062951704474:2685], DatabaseId: /Root, PoolId: my_pool, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool my_pool not found or you don't have access permissions } 2025-05-07T08:56:31.837362Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool my_pool, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool my_pool not found or you don't have access permissions } 2025-05-07T08:56:31.922348Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1298: SessionId: ydb://session/3?node_id=8&id=NDczZGY4NGEtZjVjYWI1ZC01YjczOWU5Ny00OTlhMjg1Zg==, ActorId: [8:7501625062951704473:2684], ActorState: ExecuteState, TraceId: 01jtmzbyav9k8d61d5rwedt437, ExecutePhyTx, tx: 0x000050C0001B3118 literal: 0 commit: 1 txCtx.DeferredEffects.size(): 0 2025-05-07T08:56:31.922416Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1449: SessionId: ydb://session/3?node_id=8&id=NDczZGY4NGEtZjVjYWI1ZC01YjczOWU5Ny00OTlhMjg1Zg==, ActorId: [8:7501625062951704473:2684], ActorState: ExecuteState, TraceId: 01jtmzbyav9k8d61d5rwedt437, Sending to Executer TraceId: 0 8 2025-05-07T08:56:31.922548Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1507: SessionId: ydb://session/3?node_id=8&id=NDczZGY4NGEtZjVjYWI1ZC01YjczOWU5Ny00OTlhMjg1Zg==, ActorId: [8:7501625062951704473:2684], ActorState: ExecuteState, TraceId: 01jtmzbyav9k8d61d5rwedt437, Created new KQP executer: [8:7501625062951704480:2684] isRollback: 0 2025-05-07T08:56:31.924379Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1797: SessionId: ydb://session/3?node_id=8&id=NDczZGY4NGEtZjVjYWI1ZC01YjczOWU5Ny00OTlhMjg1Zg==, ActorId: [8:7501625062951704473:2684], ActorState: ExecuteState, TraceId: 01jtmzbyav9k8d61d5rwedt437, Forwarded TEvStreamData to [8:7501625062951704472:2917] 2025-05-07T08:56:31.925207Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1699: SessionId: ydb://session/3?node_id=8&id=NDczZGY4NGEtZjVjYWI1ZC01YjczOWU5Ny00OTlhMjg1Zg==, ActorId: [8:7501625062951704473:2684], ActorState: ExecuteState, TraceId: 01jtmzbyav9k8d61d5rwedt437, TEvTxResponse, CurrentTx: 1/1 response.status: SUCCESS 2025-05-07T08:56:31.925356Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:1958: SessionId: ydb://session/3?node_id=8&id=NDczZGY4NGEtZjVjYWI1ZC01YjczOWU5Ny00OTlhMjg1Zg==, ActorId: [8:7501625062951704473:2684], ActorState: ExecuteState, TraceId: 01jtmzbyav9k8d61d5rwedt437, txInfo Status: Committed Kind: Pure TotalDuration: 3.118 ServerDuration: 3.046 QueriesCount: 2 2025-05-07T08:56:31.925428Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2113: SessionId: ydb://session/3?node_id=8&id=NDczZGY4NGEtZjVjYWI1ZC01YjczOWU5Ny00OTlhMjg1Zg==, ActorId: [8:7501625062951704473:2684], ActorState: ExecuteState, TraceId: 01jtmzbyav9k8d61d5rwedt437, Create QueryResponse for action: QUERY_ACTION_EXECUTE with SUCCESS status 2025-05-07T08:56:31.925661Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2473: SessionId: ydb://session/3?node_id=8&id=NDczZGY4NGEtZjVjYWI1ZC01YjczOWU5Ny00OTlhMjg1Zg==, ActorId: [8:7501625062951704473:2684], ActorState: ExecuteState, TraceId: 01jtmzbyav9k8d61d5rwedt437, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-05-07T08:56:31.925699Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2534: SessionId: ydb://session/3?node_id=8&id=NDczZGY4NGEtZjVjYWI1ZC01YjczOWU5Ny00OTlhMjg1Zg==, ActorId: [8:7501625062951704473:2684], ActorState: ExecuteState, TraceId: 01jtmzbyav9k8d61d5rwedt437, EndCleanup, isFinal: 1 2025-05-07T08:56:31.925764Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2270: SessionId: ydb://session/3?node_id=8&id=NDczZGY4NGEtZjVjYWI1ZC01YjczOWU5Ny00OTlhMjg1Zg==, ActorId: [8:7501625062951704473:2684], ActorState: ExecuteState, TraceId: 01jtmzbyav9k8d61d5rwedt437, Sent query response back to proxy, proxyRequestId: 58, proxyId: [8:7501624972757389246:2065] 2025-05-07T08:56:31.925795Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2546: SessionId: ydb://session/3?node_id=8&id=NDczZGY4NGEtZjVjYWI1ZC01YjczOWU5Ny00OTlhMjg1Zg==, ActorId: [8:7501625062951704473:2684], ActorState: unknown state, TraceId: 01jtmzbyav9k8d61d5rwedt437, Cleanup temp tables: 0 2025-05-07T08:56:31.926121Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2637: SessionId: ydb://session/3?node_id=8&id=NDczZGY4NGEtZjVjYWI1ZC01YjczOWU5Ny00OTlhMjg1Zg==, ActorId: [8:7501625062951704473:2684], ActorState: unknown state, TraceId: 01jtmzbyav9k8d61d5rwedt437, Session actor destroyed 2025-05-07T08:56:31.935598Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2315: SessionId: ydb://session/3?node_id=8&id=NzFlNTEzYmMtZTE1ZGUwZmYtZDQzYzYzNjktYTg1ZDFjYjc=, ActorId: [8:7501625015707062856:2335], ActorState: ReadyState, Session closed due to explicit close event 2025-05-07T08:56:31.935660Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2473: SessionId: ydb://session/3?node_id=8&id=NzFlNTEzYmMtZTE1ZGUwZmYtZDQzYzYzNjktYTg1ZDFjYjc=, ActorId: [8:7501625015707062856:2335], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-05-07T08:56:31.935731Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2534: SessionId: ydb://session/3?node_id=8&id=NzFlNTEzYmMtZTE1ZGUwZmYtZDQzYzYzNjktYTg1ZDFjYjc=, ActorId: [8:7501625015707062856:2335], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-05-07T08:56:31.935772Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2546: SessionId: ydb://session/3?node_id=8&id=NzFlNTEzYmMtZTE1ZGUwZmYtZDQzYzYzNjktYTg1ZDFjYjc=, ActorId: [8:7501625015707062856:2335], ActorState: unknown state, Cleanup temp tables: 0 2025-05-07T08:56:31.935866Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2637: SessionId: ydb://session/3?node_id=8&id=NzFlNTEzYmMtZTE1ZGUwZmYtZDQzYzYzNjktYTg1ZDFjYjc=, ActorId: [8:7501625015707062856:2335], ActorState: unknown state, Session actor destroyed >> BackupRestoreS3::TestAllPrimitiveTypes-PRIMITIVE_TYPE_ID_UNSPECIFIED [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-UINT8 >> BackupRestoreS3::RestoreTablePartitioningSettings >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_Default [GOOD] >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_True ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/idx_test/unittest >> YdbIndexTable::MultiShardTableOneUniqIndexDataColumn [GOOD] Test command err: Trying to start YDB, gRPC: 9993, MsgBus: 17769 2025-05-07T08:51:40.363991Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623812914284582:2063];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:40.364066Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001ec0/r3tmp/tmpKKfE2u/pdisk_1.dat 2025-05-07T08:51:40.898432Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:51:40.900059Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:40.900134Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:40.905235Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9993, node 1 2025-05-07T08:51:41.013892Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:51:41.013914Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:51:41.013935Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:51:41.014085Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17769 TClient is connected to server localhost:17769 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:51:41.765313Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:41.791153Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:51:41.807902Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:41.987601Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:42.173863Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:42.312955Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:44.620150Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623830094155415:2407], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:44.624743Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:45.052381Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:51:45.116327Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:51:45.201018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:51:45.269151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:51:45.344143Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:51:45.373308Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623812914284582:2063];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:45.373364Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:51:45.440242Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:51:45.545999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:51:45.656593Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623834389123383:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:45.656683Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:45.658762Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623834389123388:2476], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:45.663779Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:51:45.681352Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-05-07T08:51:45.681633Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623834389123390:2477], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:51:45.767663Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623834389123441:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:51:47.312965Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-05-07T08:51:48.520308Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710673. Ctx: { TraceId: 01jtmz39ka0qd2bnsrma35akqv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWVhOGUyMGUtYmVmNDVjMGQtZDYwZGQyZmUtMTc4MWE4MzQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:51:48.533561Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710674. Ctx: { TraceId: 01jtmz39kjccp9d9hyjb9ytabs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWU1N2E4N2MtYjIzODUwMTktZjk0ODZlMzktYTZjMzNlZjU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:51:48.541912Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710676. Ctx: { TraceId: 01jtmz39kz1zvpzswjmcr1j6jp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTI3ZTc1ZTAtM2ZlZjk4MTgtZTA4ZTAwOWMtZDhmNTgxMGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:51:48.543527Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710675. Ctx: { TraceId: 01jtmz39m03nbdhda5vn08nwe0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTZkNTE0MzEtYTY5NzRkYzgtNDNlNzFiYzMtYjFiNTc2Yg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:51:48.545171Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710679. Ctx: { TraceId: 01jtmz39kjccp9d9hyjb9ytabs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWU1N2E4N2MtYjIzODUwMTktZjk0ODZlMzktYTZjMzNlZjU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:51:48.545675Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710677. Ctx: { TraceId: 01jtmz39kz9bymgkgdjhexmcet, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTQ4NDk3YzEtNDc3ZWI5NWMtNTY2NGY0YzUtZTVhYTBmYTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:51:48.553833Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710678. Ctx: { TraceId: 01jtmz39kz5p7x1msd1ay4te53, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_ ... WQyMjctODMyZmQ1MWUtNzhjNTRhNzUtMTE4ZmMxZTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:25.897958Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727014. Ctx: { TraceId: 01jtmzbrgc7wyg1st1x40g1gcc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWVhNzAyNjgtZjRjMWRkYTMtODhkMmIwNzMtZjRlOWY2NTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:25.909058Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727015. Ctx: { TraceId: 01jtmzbrgx1p2nsaggxr8096n4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTJhZjBmNDYtZTk1YzMxYjEtOTg5ZDU0YWItZDNmNjBiZWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:25.913299Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727016. Ctx: { TraceId: 01jtmzbrgc7wyg1st1x40g1gcc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWVhNzAyNjgtZjRjMWRkYTMtODhkMmIwNzMtZjRlOWY2NTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:25.917819Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727017. Ctx: { TraceId: 01jtmzbrgx1p2nsaggxr8096n4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTJhZjBmNDYtZTk1YzMxYjEtOTg5ZDU0YWItZDNmNjBiZWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:25.923302Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727018. Ctx: { TraceId: 01jtmzbrhee0w706jrh8ahkqp8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZjBlYTM3OTQtMTY2OTgxNzktMThiZjY2NzItN2Q2MDgwZDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:25.927565Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727020. Ctx: { TraceId: 01jtmzbrgx1p2nsaggxr8096n4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTJhZjBmNDYtZTk1YzMxYjEtOTg5ZDU0YWItZDNmNjBiZWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:25.929690Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727019. Ctx: { TraceId: 01jtmzbrhx2wjcjg2xkwwr2g2k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NDgyMzQxZGQtZjQ2ODdlMDktN2EyYmE1ZjItZmI0OTlmY2E=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:25.941018Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727021. Ctx: { TraceId: 01jtmzbrhsbakd990cm19pg306, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NDE4YTM3MGYtY2IzYjhhZmQtOWQwOTc2MTctN2FhZjI5MzY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:25.965753Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727023. Ctx: { TraceId: 01jtmzbrhsbakd990cm19pg306, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NDE4YTM3MGYtY2IzYjhhZmQtOWQwOTc2MTctN2FhZjI5MzY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:25.966667Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727022. Ctx: { TraceId: 01jtmzbrhee0w706jrh8ahkqp8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZjBlYTM3OTQtMTY2OTgxNzktMThiZjY2NzItN2Q2MDgwZDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:25.972607Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727024. Ctx: { TraceId: 01jtmzbrhee0w706jrh8ahkqp8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZjBlYTM3OTQtMTY2OTgxNzktMThiZjY2NzItN2Q2MDgwZDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:25.972937Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727025. Ctx: { TraceId: 01jtmzbrhsbakd990cm19pg306, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NDE4YTM3MGYtY2IzYjhhZmQtOWQwOTc2MTctN2FhZjI5MzY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:25.979077Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727026. Ctx: { TraceId: 01jtmzbrkma19qemkeensnck3v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YjMwOWQyMjctODMyZmQ1MWUtNzhjNTRhNzUtMTE4ZmMxZTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:25.979186Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727027. Ctx: { TraceId: 01jtmzbrhee0w706jrh8ahkqp8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZjBlYTM3OTQtMTY2OTgxNzktMThiZjY2NzItN2Q2MDgwZDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:25.988671Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727028. Ctx: { TraceId: 01jtmzbrkm99a3mxgtrwax38df, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWVhNzAyNjgtZjRjMWRkYTMtODhkMmIwNzMtZjRlOWY2NTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:25.997988Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727029. Ctx: { TraceId: 01jtmzbrkm99a3mxgtrwax38df, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWVhNzAyNjgtZjRjMWRkYTMtODhkMmIwNzMtZjRlOWY2NTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:26.037770Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727030. Ctx: { TraceId: 01jtmzbrmhc03hevcwmhtkzr84, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NDgyMzQxZGQtZjQ2ODdlMDktN2EyYmE1ZjItZmI0OTlmY2E=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:26.038910Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727031. Ctx: { TraceId: 01jtmzbrmneae1xp038waaww63, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YjMwOWQyMjctODMyZmQ1MWUtNzhjNTRhNzUtMTE4ZmMxZTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:26.040720Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727032. Ctx: { TraceId: 01jtmzbrmr21g6m6xg6rs5jxbq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZjBlYTM3OTQtMTY2OTgxNzktMThiZjY2NzItN2Q2MDgwZDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:26.046817Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727033. Ctx: { TraceId: 01jtmzbrmr3we6sht8b8njk6pc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTJhZjBmNDYtZTk1YzMxYjEtOTg5ZDU0YWItZDNmNjBiZWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:26.076227Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727034. Ctx: { TraceId: 01jtmzbrmhc03hevcwmhtkzr84, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NDgyMzQxZGQtZjQ2ODdlMDktN2EyYmE1ZjItZmI0OTlmY2E=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:26.076971Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727035. Ctx: { TraceId: 01jtmzbrmr3we6sht8b8njk6pc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTJhZjBmNDYtZTk1YzMxYjEtOTg5ZDU0YWItZDNmNjBiZWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:26.079432Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727036. Ctx: { TraceId: 01jtmzbrmneae1xp038waaww63, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YjMwOWQyMjctODMyZmQ1MWUtNzhjNTRhNzUtMTE4ZmMxZTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:26.085153Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727037. Ctx: { TraceId: 01jtmzbrmr21g6m6xg6rs5jxbq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZjBlYTM3OTQtMTY2OTgxNzktMThiZjY2NzItN2Q2MDgwZDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:26.092850Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727038. Ctx: { TraceId: 01jtmzbrmr3we6sht8b8njk6pc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTJhZjBmNDYtZTk1YzMxYjEtOTg5ZDU0YWItZDNmNjBiZWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:26.108488Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727039. Ctx: { TraceId: 01jtmzbrpk6pyjavk42vqhr3bt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NDE4YTM3MGYtY2IzYjhhZmQtOWQwOTc2MTctN2FhZjI5MzY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:26.130473Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727040. Ctx: { TraceId: 01jtmzbrmr21g6m6xg6rs5jxbq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZjBlYTM3OTQtMTY2OTgxNzktMThiZjY2NzItN2Q2MDgwZDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS 2025-05-07T08:56:26.150971Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727041. Ctx: { TraceId: 01jtmzbrpk6pyjavk42vqhr3bt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NDE4YTM3MGYtY2IzYjhhZmQtOWQwOTc2MTctN2FhZjI5MzY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS 2025-05-07T08:56:26.179765Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727042. Ctx: { TraceId: 01jtmzbrrr45gexyxp3zy1mhaf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWVhNzAyNjgtZjRjMWRkYTMtODhkMmIwNzMtZjRlOWY2NTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:26.193725Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727043. Ctx: { TraceId: 01jtmzbrspcetsaj650r4f3j3f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YjMwOWQyMjctODMyZmQ1MWUtNzhjNTRhNzUtMTE4ZmMxZTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:26.196671Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727045. Ctx: { TraceId: 01jtmzbrpk6pyjavk42vqhr3bt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NDE4YTM3MGYtY2IzYjhhZmQtOWQwOTc2MTctN2FhZjI5MzY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:26.215636Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727044. Ctx: { TraceId: 01jtmzbrrr45gexyxp3zy1mhaf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWVhNzAyNjgtZjRjMWRkYTMtODhkMmIwNzMtZjRlOWY2NTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS finished with status: SUCCESS 2025-05-07T08:56:26.256160Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727046. Ctx: { TraceId: 01jtmzbrrr45gexyxp3zy1mhaf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWVhNzAyNjgtZjRjMWRkYTMtODhkMmIwNzMtZjRlOWY2NTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/pg/unittest >> KqpPg::PgUpdateCompoundKey-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 6153, MsgBus: 28188 2025-05-07T08:50:52.382902Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623603752032035:2194];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:52.383262Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00219d/r3tmp/tmpnpo0Vw/pdisk_1.dat 2025-05-07T08:50:52.940123Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:52.940213Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:52.943118Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:50:52.981103Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 6153, node 1 2025-05-07T08:50:53.255907Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:53.255956Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:53.255965Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:53.256101Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28188 TClient is connected to server localhost:28188 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:50:54.200629Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 16 2025-05-07T08:50:56.849397Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 --!syntax_pg INSERT INTO Pg1000_b (key, value) VALUES ( '0'::int2, ARRAY ['false'::bool, 'false'::bool] ); 2025-05-07T08:50:57.099554Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623625226869136:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:57.099676Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:57.100143Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623625226869148:2344], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:57.104340Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480 2025-05-07T08:50:57.119605Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480 2025-05-07T08:50:57.122230Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623625226869150:2345], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-05-07T08:50:57.182302Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623625226869201:2392] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } --!syntax_pg INSERT INTO Pg1000_b (key, value) VALUES ( '1'::int2, ARRAY ['true'::bool, 'true'::bool] ); 2025-05-07T08:50:57.379762Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623603752032035:2194];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:57.379847Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 18 2025-05-07T08:50:57.705830Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 --!syntax_pg INSERT INTO Pg1002_b (key, value) VALUES ( '0'::int2, ARRAY ['0'::"char", '0'::"char"] ); --!syntax_pg INSERT INTO Pg1002_b (key, value) VALUES ( '1'::int2, ARRAY ['1'::"char", '1'::"char"] ); --!syntax_pg INSERT INTO Pg1002_b (key, value) VALUES ( '2'::int2, ARRAY ['2'::"char", '2'::"char"] ); 21 2025-05-07T08:50:58.329434Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480 2025-05-07T08:50:58.385300Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill --!syntax_pg INSERT INTO Pg1005_b (key, value) VALUES ( '0'::int2, ARRAY ['0'::int2, '0'::int2] ); --!syntax_pg INSERT INTO Pg1005_b (key, value) VALUES ( '1'::int2, ARRAY ['1'::int2, '1'::int2] ); --!syntax_pg INSERT INTO Pg1005_b (key, value) VALUES ( '2'::int2, ARRAY ['2'::int2, '2'::int2] ); 23 2025-05-07T08:50:58.924705Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480 2025-05-07T08:50:59.022561Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill --!syntax_pg INSERT INTO Pg1007_b (key, value) VALUES ( '0'::int2, ARRAY ['0'::int4, '0'::int4] ); --!syntax_pg INSERT INTO Pg1007_b (key, value) VALUES ( '1'::int2, ARRAY ['1'::int4, '1'::int4] ); --!syntax_pg INSERT INTO Pg1007_b (key, value) VALUES ( '2'::int2, ARRAY ['2'::int4, '2'::int4] ); 20 2025-05-07T08:50:59.669453Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710681:0, at schemeshard: 72057594046644480 --!syntax_pg INSERT INTO Pg1016_b (key, value) VALUES ( '0'::int2, ARRAY ['0'::int8, '0'::int8] ); --!syntax_pg INSERT INTO Pg1016_b (key, value) VALUES ( '1'::int2, ARRAY ['1'::int8, '1'::int8] ); --!syntax_pg INSERT INTO Pg1016_b (key, value) VALUES ( '2'::int2, ARRAY ['2'::int8, '2'::int8] ); 700 2025-05-07T08:51:00.389243Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710686:0, at schemeshard: 72057594046644480 2025-05-07T08:51:00.490279Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill --!syntax_pg INSERT INTO Pg1021_b (key, value) VALUES ( '0'::int2, ARRAY ['0.5'::float4, '0.5'::float4] ); --!syntax_pg INSERT INTO Pg1021_b (key, value) VALUES ( '1'::int2, ARRAY ['1.5'::float4, '1.5'::float4] ); --!syntax_pg INSERT INTO Pg1021_b (key, value) VALUES ( '2'::int2, ARRAY ['2.5'::float4, '2.5'::float4] ); 701 2025-05-07T08:51:01.039190Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710692:0, at schemeshard: 72057594046644480 --!syntax_pg INSERT INTO Pg1022_b (key, value) VALUES ( '0'::int2, ARRAY ['0.5'::float8, '0.5'::float8] ); --!syntax_pg INSERT INTO Pg1022_b (key, value) VALUES ( '1'::int2, ARRAY ['1.5'::float8, '1.5'::float8] ); --!syntax_pg INSERT INTO Pg1022_b (key, value) VALUES ( '2'::int2, ARRAY ['2.5'::float8, '2.5'::float8] ); 25 2025-05-07T08:51:01.633319Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710697:0, at schemeshard: 72057594046644480 2025-05-07T08:51:01.734650Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill --!syntax_pg INSERT INTO Pg1009_b (key, value) VALUES ( '0'::int2, ARRAY ['text 0'::text, 'text 0'::text] ); --!syntax_pg INSERT INTO Pg1009_b (key, value) VALUES ( '1'::int2, ARRAY ['text 1'::text, 'text 1'::text] ); --!syntax_pg INSERT INTO Pg1009_b (key, value) VALUES ( '2'::int2, ARRAY ['text 2'::text, 'text 2'::text] ); 1042 2025-05-07T08:51:02.263101Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710703:0, at schemeshard: 72057594046644480 2025-05-07T08:51:02.348869Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill --!syntax_pg INSERT INTO Pg1014_b (key, value) VALUES ( '0'::int2, ARRAY ['bpcha ... sed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:56:02.483444Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T08:56:04.950664Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[9:7501624922704037135:2059];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:56:04.950748Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:56:09.554142Z node 9 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [9:7501624965653710777:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:09.570081Z node 9 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:09.575400Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T08:56:09.727155Z node 9 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [9:7501624965653710885:2352], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:09.733802Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-05-07T08:56:09.738211Z node 9 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [9:7501624965653710879:2349], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:09.738358Z node 9 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:09.766811Z node 9 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [9:7501624965653710887:2353], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-05-07T08:56:09.854889Z node 9 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [9:7501624965653710938:2405] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:56:10.334747Z node 9 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [9:7501624969948678283:2366], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiWriteTable!
:1:1: Error: Cannot update primary key column: key1
:1:1: Error: Cannot update primary key column: key2 2025-05-07T08:56:10.339659Z node 9 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=9&id=OTQxMjc2NWMtZjczNTFhMTMtYjgwNDc5Ni1mYjUwNzUyNw==, ActorId: [9:7501624969948678276:2362], ActorState: ExecuteState, TraceId: 01jtmzb99a50nqk1xde5xhw57e, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-05-07T08:56:10.431154Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 waiting... Trying to start YDB, gRPC: 8916, MsgBus: 18424 2025-05-07T08:56:15.205298Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7501624992647522563:2143];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00219d/r3tmp/tmp8UFcM1/pdisk_1.dat 2025-05-07T08:56:15.381550Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:56:15.614674Z node 10 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:56:15.637551Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:56:15.638070Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:56:15.642395Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8916, node 10 2025-05-07T08:56:15.929138Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:56:15.929172Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:56:15.929184Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:56:15.929368Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18424 TClient is connected to server localhost:18424 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:56:17.488709Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:56:20.204704Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7501624992647522563:2143];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:56:20.204825Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:56:25.193503Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7501625035597196120:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:25.193657Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:25.223877Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T08:56:25.549483Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7501625035597196230:2354], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:25.549666Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:25.550093Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7501625035597196236:2357], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:25.557589Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480 2025-05-07T08:56:25.596707Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7501625035597196238:2358], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-05-07T08:56:25.661521Z node 10 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [10:7501625035597196289:2413] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:56:26.625461Z node 10 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [10:7501625039892163658:2380], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiWriteTable!
:1:1: Error: Cannot update primary key column: key1
:1:1: Error: Cannot update primary key column: key2 2025-05-07T08:56:26.626272Z node 10 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=10&id=ZTdmM2MzMjYtMzgyN2Y5MjQtN2Y0Mjk1YmMtMWNkZjkyNDE=, ActorId: [10:7501625039892163651:2376], ActorState: ExecuteState, TraceId: 01jtmzbs5y9k29dx5ptdmxe305, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-05-07T08:56:26.745526Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 waiting... >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_True [GOOD] >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_False >> BackupRestore::TestAllPrimitiveTypes-PRIMITIVE_TYPE_ID_UNSPECIFIED [GOOD] >> BackupRestore::TestAllPrimitiveTypes-INT8 >> KqpPg::TableDeleteWhere+useSink [GOOD] >> KqpPg::TableDeleteWhere-useSink >> GenericFederatedQuery::IcebergHiveBasicSelectCount [GOOD] >> GenericFederatedQuery::IcebergHiveBasicFilterPushdown >> KqpWorkloadService::TestCpuLoadThresholdRefresh [GOOD] >> KqpWorkloadService::TestHandlerActorCleanup >> GenericFederatedQuery::YdbSelectCount [GOOD] >> SystemView::AuthUsers_Access [GOOD] >> SystemView::AuthUsers_ResultOrder |90.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/data/ydb-core-kqp-ut-data |90.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/data/ydb-core-kqp-ut-data |90.6%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_minstep/test-results/unittest/{meta.json ... results_accumulator.log} |90.6%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/data/ydb-core-kqp-ut-data >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeDir [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeBlockStoreVolume [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeExtSubDomain [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeFileStore [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeColumnStore [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeColumnTable [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeCdcStream >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_False [GOOD] >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_True_EnableLocalDBFlatIndex_False >> GroupWriteTest::TwoTables [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut/unittest >> GroupWriteTest::TwoTables [GOOD] Test command err: RandomSeed# 6359024187862071822 2025-05-07T08:56:28.266156Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058679074007041 Generation# 1 is bootstrapped, going to send TEvDiscover {TabletId# 72058679074007041 MinGeneration# 1 ReadBody# false DiscoverBlockedGeneration# true ForceBlockedGeneration# 0 FromLeader# true Deadline# 18446744073709551} 2025-05-07T08:56:28.266266Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058502699329537 Generation# 1 is bootstrapped, going to send TEvDiscover {TabletId# 72058502699329537 MinGeneration# 1 ReadBody# false DiscoverBlockedGeneration# true ForceBlockedGeneration# 0 FromLeader# true Deadline# 18446744073709551} 2025-05-07T08:56:28.304322Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 72058679074007041 Generation# 1 recieved TEvDiscoverResult {Status# NODATA BlockedGeneration# 0 Id# [0:0:0:0:0:0:0] Size# 0 MinGeneration# 1} 2025-05-07T08:56:28.304450Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058679074007041 Generation# 1 going to send TEvBlock {TabletId# 72058679074007041 Generation# 1 Deadline# 18446744073709551 IsMonitored# 1} 2025-05-07T08:56:28.304559Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 72058502699329537 Generation# 1 recieved TEvDiscoverResult {Status# NODATA BlockedGeneration# 0 Id# [0:0:0:0:0:0:0] Size# 0 MinGeneration# 1} 2025-05-07T08:56:28.304589Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058502699329537 Generation# 1 going to send TEvBlock {TabletId# 72058502699329537 Generation# 1 Deadline# 18446744073709551 IsMonitored# 1} 2025-05-07T08:56:28.309812Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 72058679074007041 Generation# 1 recieved TEvBlockResult {Status# OK} 2025-05-07T08:56:28.309921Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 72058502699329537 Generation# 1 recieved TEvBlockResult {Status# OK} 2025-05-07T08:56:28.332365Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058502699329537 Generation# 2 going to send TEvCollectGarbage {TabletId# 72058502699329537 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 0 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-05-07T08:56:28.332483Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058679074007041 Generation# 2 going to send TEvCollectGarbage {TabletId# 72058679074007041 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 0 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-05-07T08:56:28.337628Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 72058502699329537 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 72058502699329537 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Status# OK} 2025-05-07T08:56:28.337744Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 72058679074007041 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 72058679074007041 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Status# OK} 2025-05-07T08:56:39.230899Z 1 00h01m20.010512s :BS_LOAD_TEST DEBUG: Load tablet recieved PoisonPill, going to die 2025-05-07T08:56:39.231011Z 1 00h01m20.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058679074007041 Generation# 2 end working, going to send TEvCollectGarbage {TabletId# 72058679074007041 RecordGeneration# 2 PerGenerationCounter# 22 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-05-07T08:56:39.231082Z 1 00h01m20.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058502699329537 Generation# 2 end working, going to send TEvCollectGarbage {TabletId# 72058502699329537 RecordGeneration# 2 PerGenerationCounter# 22 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-05-07T08:56:39.231124Z 1 00h01m20.010512s :BS_LOAD_TEST DEBUG: Load tablet recieved PoisonPill, going to die 2025-05-07T08:56:39.231164Z 1 00h01m20.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058679074007041 Generation# 2 end working, going to send TEvCollectGarbage {TabletId# 72058679074007041 RecordGeneration# 2 PerGenerationCounter# 23 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-05-07T08:56:39.231214Z 1 00h01m20.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058502699329537 Generation# 2 end working, going to send TEvCollectGarbage {TabletId# 72058502699329537 RecordGeneration# 2 PerGenerationCounter# 23 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-05-07T08:56:39.231248Z 1 00h01m20.010512s :BS_LOAD_TEST DEBUG: Load tablet recieved PoisonPill, going to die 2025-05-07T08:56:39.231284Z 1 00h01m20.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058679074007041 Generation# 2 end working, going to send TEvCollectGarbage {TabletId# 72058679074007041 RecordGeneration# 2 PerGenerationCounter# 24 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-05-07T08:56:39.231332Z 1 00h01m20.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058502699329537 Generation# 2 end working, going to send TEvCollectGarbage {TabletId# 72058502699329537 RecordGeneration# 2 PerGenerationCounter# 24 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-05-07T08:56:39.287975Z 1 00h01m20.010512s :BS_LOAD_TEST INFO: TabletId# 72058679074007041 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 72058679074007041 RecordGeneration# 2 PerGenerationCounter# 22 Channel# 0 Status# OK} 2025-05-07T08:56:39.288093Z 1 00h01m20.010512s :BS_LOAD_TEST INFO: TabletId# 72058502699329537 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 72058502699329537 RecordGeneration# 2 PerGenerationCounter# 22 Channel# 0 Status# OK} 2025-05-07T08:56:39.288138Z 1 00h01m20.010512s :BS_LOAD_TEST INFO: TabletId# 72058679074007041 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 72058679074007041 RecordGeneration# 2 PerGenerationCounter# 23 Channel# 0 Status# OK} 2025-05-07T08:56:39.288183Z 1 00h01m20.010512s :BS_LOAD_TEST INFO: TabletId# 72058502699329537 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 72058502699329537 RecordGeneration# 2 PerGenerationCounter# 23 Channel# 0 Status# OK} 2025-05-07T08:56:39.288227Z 1 00h01m20.010512s :BS_LOAD_TEST INFO: TabletId# 72058679074007041 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 72058679074007041 RecordGeneration# 2 PerGenerationCounter# 24 Channel# 0 Status# OK} 2025-05-07T08:56:39.288271Z 1 00h01m20.010512s :BS_LOAD_TEST INFO: TabletId# 72058502699329537 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 72058502699329537 RecordGeneration# 2 PerGenerationCounter# 24 Channel# 0 Status# OK} >> BuildStatsHistogram::Five_Five_Serial [GOOD] >> BuildStatsHistogram::Five_Five_Crossed >> BackupRestore::TestAllIndexTypes-EIndexTypeInvalid [GOOD] >> BackupRestore::TestAllIndexTypes-EIndexTypeGlobalAsync ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::YdbSelectCount [GOOD] Test command err: Trying to start YDB, gRPC: 16285, MsgBus: 63617 2025-05-07T08:55:43.783399Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624853718294179:2059];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:55:43.783451Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0048a4/r3tmp/tmpeq6td8/pdisk_1.dat 2025-05-07T08:55:44.600415Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:55:44.616713Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:55:44.616824Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:55:44.620956Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16285, node 1 2025-05-07T08:55:44.914800Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:55:44.914830Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:55:44.914847Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:55:44.914988Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63617 TClient is connected to server localhost:63617 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:55:45.750932Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:55:48.389964Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624875193131327:2332], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:48.390118Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:48.790132Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501624853718294179:2059];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:55:48.790217Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:55:49.404104Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:2, at schemeshard: 72057594046644480 2025-05-07T08:55:49.719222Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624879488098752:2347], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:49.719278Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:49.719297Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624879488098757:2350], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:49.722595Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:2, at schemeshard: 72057594046644480 2025-05-07T08:55:49.739306Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480 2025-05-07T08:55:49.739627Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624879488098759:2351], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-05-07T08:55:49.821165Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501624879488098820:2409] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:55:51.109160Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:55:51.756553Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:1, at schemeshard: 72057594046644480 2025-05-07T08:55:52.885049Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480 2025-05-07T08:55:54.162601Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710680:0, at schemeshard: 72057594046644480 2025-05-07T08:55:55.283281Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710685:0, at schemeshard: 72057594046644480 2025-05-07T08:55:56.684388Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480 2025-05-07T08:55:56.895573Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480 2025-05-07T08:55:59.255856Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710706:0, at schemeshard: 72057594046644480 2025-05-07T08:55:59.279014Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jtmzakx29bkdrqxtj29npska", SessionId: ydb://session/3?node_id=1&id=Mzk4MjUxZGQtNWY1ZjE1Y2UtMjI4NDIxZS00YjUzMWMwNw==, Slow query, duration: 10.889811s, status: SUCCESS, user: UNAUTHENTICATED, results: 0b, text: "\n CREATE OBJECT external_data_source_password (TYPE SECRET) WITH (value=qwerty12345);\n\n CREATE EXTERNAL DATA SOURCE external_data_source WITH (\n SOURCE_TYPE=\"Ydb\",\n LOCATION=\"localhost:2136\",\n AUTH_METHOD=\"BASIC\",\n LOGIN=\"crab\",\n DATABASE_NAME=\"pgdb\",\n PASSWORD_SECRET_NAME=\"external_data_source_password\",\n USE_TLS=\"TRUE\"\n );\n ", parameters: 0b 2025-05-07T08:55:59.356846Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710708:0, at schemeshard: 72057594046644480 2025-05-07T08:55:59.359192Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710707:0, at schemeshard: 72057594046644480 2025-05-07T08:55:59.364243Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710709:0, at schemeshard: 72057594046644480 2025-05-07T08:55:59.571879Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7261: Cannot get console configs 2025-05-07T08:55:59.571905Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded Call DescribeTable. data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "col1" type { type_id: UINT16 } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } from { table: "example_1" } } CRAB Ex ... che_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0048a4/r3tmp/tmpzJAvcQ/pdisk_1.dat 2025-05-07T08:56:22.682905Z node 3 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:56:22.722962Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:56:22.723069Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:56:22.724409Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 30844, node 3 2025-05-07T08:56:22.950781Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:56:22.950809Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:56:22.950819Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:56:22.950952Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24725 TClient is connected to server localhost:24725 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:56:23.697002Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:56:23.713602Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T08:56:27.054126Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7501625042485632554:2332], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:27.054247Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:27.072057Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:2, at schemeshard: 72057594046644480 2025-05-07T08:56:27.178839Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7501625042485632673:2345], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:27.178951Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:27.179335Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7501625042485632678:2348], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:27.185009Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72057594046644480 2025-05-07T08:56:27.222995Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7501625042485632680:2349], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-05-07T08:56:27.295102Z node 3 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [3:7501625042485632720:2396] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:56:27.426235Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7501625021010795413:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:56:27.426319Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:56:28.083097Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-05-07T08:56:29.134548Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:1, at schemeshard: 72057594046644480 2025-05-07T08:56:30.080799Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715677:0, at schemeshard: 72057594046644480 2025-05-07T08:56:30.839700Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715682:0, at schemeshard: 72057594046644480 2025-05-07T08:56:31.741326Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715687:0, at schemeshard: 72057594046644480 2025-05-07T08:56:32.490204Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480 2025-05-07T08:56:32.587149Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480 2025-05-07T08:56:36.277163Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715712:0, at schemeshard: 72057594046644480 Call DescribeTable. data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "col1" type { type_id: UINT16 } } columns { name: "col2" type { type_id: DOUBLE } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } what { } from { table: "example_1" } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Expected: splits { select { data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } what { } from { table: "example_1" } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Actual: splits { select { data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } what { } from { table: "example_1" } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL ReadSplits result. GRpcStatusCode: 0 >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_True_EnableLocalDBFlatIndex_False [GOOD] >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_False_EnableLocalDBFlatIndex_False >> TReplicaTest::Commit >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeTable [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypePersQueueGroup [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeSubDomain [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeRtmrVolume [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeKesus [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeSolomonVolume [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeTableIndex >> TReplicaTest::Commit [GOOD] >> TReplicaTest::AckNotifications >> SystemView::TabletsFields [GOOD] >> SystemView::TabletsShards >> PgCatalog::PgTables [GOOD] >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_False_EnableLocalDBFlatIndex_False [GOOD] >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_True_TurnOff >> TReplicaTest::AckNotifications [GOOD] >> TReplicaTest::AckNotificationsUponPathRecreation >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeDir >> GenericFederatedQuery::IcebergHiveSaSelectCount [GOOD] >> GenericFederatedQuery::IcebergHiveSaFilterPushdown >> TReplicaTest::AckNotificationsUponPathRecreation [GOOD] >> GroupWriteTest::WithRead [GOOD] >> GenericFederatedQuery::IcebergHadoopSaSelectCount [GOOD] >> GenericFederatedQuery::IcebergHadoopSaFilterPushdown ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_replica/unittest >> TReplicaTest::AckNotificationsUponPathRecreation [GOOD] Test command err: 2025-05-07T08:56:42.406366Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [1:7:2054] 2025-05-07T08:56:42.406450Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:6:2053] Successful handshake: owner# 1, generation# 1 2025-05-07T08:56:42.406558Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [1:6:2053] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 1 Generation: 1 }: sender# [1:7:2054] 2025-05-07T08:56:42.406596Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [1:6:2053] Commit generation: owner# 1, generation# 1 2025-05-07T08:56:42.406644Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 2 }: sender# [1:7:2054] 2025-05-07T08:56:42.406678Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:6:2053] Successful handshake: owner# 1, generation# 2 2025-05-07T08:56:42.741735Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [2:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [2:8:2055] 2025-05-07T08:56:42.741820Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [2:6:2053] Upsert description: path# path 2025-05-07T08:56:42.742032Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [2:6:2053] Subscribe: subscriber# [2:8:2055], path# path, domainOwnerId# 0, capabilities# AckNotifications: true 2025-05-07T08:56:42.742180Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [2:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [2:7:2054] 2025-05-07T08:56:42.742222Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [2:6:2053] Successful handshake: owner# 1, generation# 1 2025-05-07T08:56:42.742384Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:6:2053] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:7:2054], cookie# 0, event size# 72 2025-05-07T08:56:42.742437Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:6:2053] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-05-07T08:56:42.750655Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [2:6:2053] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-05-07T08:56:42.750912Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [2:6:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [2:8:2055] 2025-05-07T08:56:42.751100Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:6:2053] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:7:2054], cookie# 0, event size# 40 2025-05-07T08:56:42.751158Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:6:2053] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# true 2025-05-07T08:56:42.751202Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:575: [2:6:2053] Delete description: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2025-05-07T08:56:42.751291Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [2:6:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 1 }: sender# [2:8:2055] 2025-05-07T08:56:43.125844Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [3:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [3:7:2054] 2025-05-07T08:56:43.125945Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [3:6:2053] Successful handshake: owner# 1, generation# 1 2025-05-07T08:56:43.126560Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:6:2053] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:7:2054], cookie# 0, event size# 72 2025-05-07T08:56:43.126643Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:6:2053] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-05-07T08:56:43.126739Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [3:6:2053] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 2, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-05-07T08:56:43.126896Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [3:8:2055] 2025-05-07T08:56:43.127080Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:6:2053] Subscribe: subscriber# [3:8:2055], path# path, domainOwnerId# 0, capabilities# AckNotifications: true 2025-05-07T08:56:43.127259Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:6:2053] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:7:2054], cookie# 0, event size# 72 2025-05-07T08:56:43.127310Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:6:2053] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-05-07T08:56:43.127366Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [3:6:2053] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 3, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-05-07T08:56:43.127589Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:6:2053] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:7:2054], cookie# 0, event size# 72 2025-05-07T08:56:43.127640Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:6:2053] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 2], deletion# false 2025-05-07T08:56:43.127676Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:575: [3:6:2053] Delete description: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2025-05-07T08:56:43.127750Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [3:6:2053] Upsert description: path# path 2025-05-07T08:56:43.127817Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:6:2053] Subscribe: subscriber# [3:8:2055], path# path, domainOwnerId# 0, capabilities# AckNotifications: true 2025-05-07T08:56:43.127877Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [3:6:2053] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 2], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 2], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-05-07T08:56:43.128022Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:6:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 3 }: sender# [3:8:2055] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut/unittest >> GroupWriteTest::WithRead [GOOD] Test command err: RandomSeed# 2835594017757869961 2025-05-07T08:56:32.062288Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 3 Generation# 1 is bootstrapped, going to send TEvDiscover {TabletId# 3 MinGeneration# 1 ReadBody# false DiscoverBlockedGeneration# true ForceBlockedGeneration# 0 FromLeader# true Deadline# 18446744073709551} 2025-05-07T08:56:32.088361Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 3 Generation# 1 recieved TEvDiscoverResult {Status# NODATA BlockedGeneration# 0 Id# [0:0:0:0:0:0:0] Size# 0 MinGeneration# 1} 2025-05-07T08:56:32.088454Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 3 Generation# 1 going to send TEvBlock {TabletId# 3 Generation# 1 Deadline# 18446744073709551 IsMonitored# 1} 2025-05-07T08:56:32.091675Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 3 Generation# 1 recieved TEvBlockResult {Status# OK} 2025-05-07T08:56:32.106836Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 3 Generation# 2 going to send TEvCollectGarbage {TabletId# 3 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 0 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-05-07T08:56:32.110060Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 3 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 3 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Status# OK} 2025-05-07T08:56:43.107229Z 1 00h01m10.010512s :BS_LOAD_TEST DEBUG: Load tablet recieved PoisonPill, going to die 2025-05-07T08:56:43.107373Z 1 00h01m10.010512s :BS_LOAD_TEST DEBUG: TabletId# 3 Generation# 2 end working, going to send TEvCollectGarbage {TabletId# 3 RecordGeneration# 2 PerGenerationCounter# 12 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-05-07T08:56:43.107437Z 1 00h01m10.010512s :BS_LOAD_TEST DEBUG: Load tablet recieved PoisonPill, going to die 2025-05-07T08:56:43.107481Z 1 00h01m10.010512s :BS_LOAD_TEST DEBUG: TabletId# 3 Generation# 2 end working, going to send TEvCollectGarbage {TabletId# 3 RecordGeneration# 2 PerGenerationCounter# 13 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-05-07T08:56:43.170423Z 1 00h01m10.010512s :BS_LOAD_TEST INFO: TabletId# 3 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 3 RecordGeneration# 2 PerGenerationCounter# 12 Channel# 0 Status# OK} 2025-05-07T08:56:43.170676Z 1 00h01m10.010512s :BS_LOAD_TEST INFO: TabletId# 3 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 3 RecordGeneration# 2 PerGenerationCounter# 13 Channel# 0 Status# OK} >> GenericFederatedQuery::IcebergHiveTokenSelectCount [GOOD] >> GenericFederatedQuery::IcebergHiveTokenFilterPushdown >> BackupRestoreS3::TestAllPrimitiveTypes-UINT8 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-UINT16 >> BackupRestoreS3::TestAllPrimitiveTypes-BOOL [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-INT8 >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_True_TurnOff [GOOD] >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_True_Generations >> SystemView::QueryStatsFields [GOOD] >> SystemView::PartitionStatsTtlFields >> BackupRestoreS3::RestoreTablePartitioningSettings [GOOD] >> BackupRestoreS3::RestoreIndexTablePartitioningSettings >> TReplicaTest::Subscribe |90.6%| [TM] {asan, default-linux-x86_64, release} ydb/library/table_creator/ut/unittest >> BackupRestore::TestAllPrimitiveTypes-INT8 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-UINT16 >> BuildStatsHistogram::Five_Five_Crossed [GOOD] >> BuildStatsHistogram::Single_Small_2_Levels >> TReplicaTest::Subscribe [GOOD] >> TReplicaTest::SubscribeUnknownPath >> TReplicaTest::SubscribeUnknownPath [GOOD] >> TReplicaTest::SyncVersion >> BuildStatsHistogram::Single_Small_2_Levels [GOOD] >> BuildStatsHistogram::Single_Small_2_Levels_3_Buckets >> TReplicaTest::SyncVersion [GOOD] >> BuildStatsHistogram::Single_Small_2_Levels_3_Buckets [GOOD] >> BuildStatsHistogram::Single_Small_1_Level >> BuildStatsHistogram::Single_Small_1_Level [GOOD] >> BuildStatsHistogram::Single_Small_0_Levels [GOOD] >> BuildStatsHistogram::Three_Mixed_Small_2_Levels >> BuildStatsHistogram::Three_Mixed_Small_2_Levels [GOOD] >> BuildStatsHistogram::Three_Mixed_Small_1_Level >> TPersQueueTest::ReadFromSeveralPartitions [GOOD] >> TPersQueueTest::ReadFromSeveralPartitionsMigrated >> BuildStatsHistogram::Three_Mixed_Small_1_Level [GOOD] >> BuildStatsHistogram::Three_Mixed_Small_0_Levels [GOOD] >> BuildStatsHistogram::Mixed_Groups_History ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_replica/unittest >> TReplicaTest::SyncVersion [GOOD] Test command err: 2025-05-07T08:56:46.248258Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [1:7:2054] 2025-05-07T08:56:46.248350Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:6:2053] Successful handshake: owner# 1, generation# 1 2025-05-07T08:56:46.248527Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:6:2053] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [1:7:2054], cookie# 0, event size# 72 2025-05-07T08:56:46.248564Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:6:2053] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-05-07T08:56:46.270622Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [1:6:2053] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-05-07T08:56:46.270820Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [1:7:2054] 2025-05-07T08:56:46.270941Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:6:2053] Subscribe: subscriber# [1:7:2054], path# path, domainOwnerId# 0, capabilities# 2025-05-07T08:56:46.271096Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:6:2053] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [1:7:2054], cookie# 0, event size# 40 2025-05-07T08:56:46.271167Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:6:2053] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# true 2025-05-07T08:56:46.271205Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:575: [1:6:2053] Delete description: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2025-05-07T08:56:46.594400Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [2:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [2:7:2054] 2025-05-07T08:56:46.594487Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [2:6:2053] Upsert description: path# path 2025-05-07T08:56:46.594580Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [2:6:2053] Subscribe: subscriber# [2:7:2054], path# path, domainOwnerId# 0, capabilities# 2025-05-07T08:56:46.767917Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [3:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [3:7:2054] 2025-05-07T08:56:46.768000Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [3:6:2053] Successful handshake: owner# 1, generation# 1 2025-05-07T08:56:46.768171Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:6:2053] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:7:2054], cookie# 0, event size# 76 2025-05-07T08:56:46.768226Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:6:2053] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-05-07T08:56:46.768299Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [3:6:2053] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 100500, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 32} 2025-05-07T08:56:46.768482Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [3:7:2054] 2025-05-07T08:56:46.768557Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:6:2053] Subscribe: subscriber# [3:7:2054], path# path, domainOwnerId# 0, capabilities# 2025-05-07T08:56:46.768664Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [3:6:2053] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [3:7:2054], cookie# 1 |90.6%| [TM] {asan, default-linux-x86_64, release} ydb/library/table_creator/ut/unittest >> BuildStatsHistogram::Mixed_Groups_History [GOOD] >> BuildStatsHistogram::Serial_Groups_History >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_True_Generations [GOOD] >> TFlatTableExecutor_CachePressure::TestNotEnoughLocalCache >> TFlatTableExecutor_CachePressure::TestNotEnoughLocalCache [GOOD] >> TFlatTableExecutor_Cold::ColdBorrowScan |90.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/proxy_service/ut/ydb-core-kqp-proxy_service-ut |90.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/proxy_service/ut/ydb-core-kqp-proxy_service-ut |90.6%| [LD] {RESULT} $(B)/ydb/core/kqp/proxy_service/ut/ydb-core-kqp-proxy_service-ut >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeDir [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeBlockStoreVolume [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeExtSubDomain [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeFileStore [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeColumnStore [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeColumnTable [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeCdcStream >> TFlatTableExecutor_Cold::ColdBorrowScan [GOOD] >> TFlatTableExecutor_ColumnGroups::TestManyRows >> BuildStatsHistogram::Serial_Groups_History [GOOD] >> BuildStatsHistogram::Benchmark >> BackupRestore::TestAllIndexTypes-EIndexTypeGlobalAsync [GOOD] >> BackupRestore::TestAllIndexTypes-EIndexTypeGlobalUnique [GOOD] >> BackupRestore::TestAllIndexTypes-EIndexTypeGlobalVectorKmeansTree |90.7%| [TM] {asan, default-linux-x86_64, release} ydb/library/table_creator/ut/unittest >> GroupWriteTest::Simple [GOOD] |90.7%| [TM] {asan, default-linux-x86_64, release} ydb/library/table_creator/ut/unittest >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeCdcStream [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeBlobDepot [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeExternalTable [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeExternalDataSource [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeBackupCollection [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-UTF8 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut/unittest >> GroupWriteTest::Simple [GOOD] Test command err: RandomSeed# 7008514733607323318 2025-05-07T08:56:28.054899Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 1 Generation# 1 is bootstrapped, going to send TEvDiscover {TabletId# 1 MinGeneration# 1 ReadBody# false DiscoverBlockedGeneration# true ForceBlockedGeneration# 0 FromLeader# true Deadline# 18446744073709551} 2025-05-07T08:56:28.115826Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 1 Generation# 1 recieved TEvDiscoverResult {Status# NODATA BlockedGeneration# 0 Id# [0:0:0:0:0:0:0] Size# 0 MinGeneration# 1} 2025-05-07T08:56:28.115908Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 1 Generation# 1 going to send TEvBlock {TabletId# 1 Generation# 1 Deadline# 18446744073709551 IsMonitored# 1} 2025-05-07T08:56:28.127373Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 1 Generation# 1 recieved TEvBlockResult {Status# OK} 2025-05-07T08:56:28.146122Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 1 Generation# 2 going to send TEvCollectGarbage {TabletId# 1 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 0 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-05-07T08:56:28.149747Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 1 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 1 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Status# OK} 2025-05-07T08:56:50.171950Z 1 00h01m30.010512s :BS_LOAD_TEST DEBUG: Load tablet recieved PoisonPill, going to die 2025-05-07T08:56:50.172093Z 1 00h01m30.010512s :BS_LOAD_TEST DEBUG: TabletId# 1 Generation# 2 end working, going to send TEvCollectGarbage {TabletId# 1 RecordGeneration# 2 PerGenerationCounter# 32 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-05-07T08:56:50.172152Z 1 00h01m30.010512s :BS_LOAD_TEST DEBUG: Load tablet recieved PoisonPill, going to die 2025-05-07T08:56:50.172198Z 1 00h01m30.010512s :BS_LOAD_TEST DEBUG: TabletId# 1 Generation# 2 end working, going to send TEvCollectGarbage {TabletId# 1 RecordGeneration# 2 PerGenerationCounter# 33 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-05-07T08:56:50.252472Z 1 00h01m30.010512s :BS_LOAD_TEST INFO: TabletId# 1 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 1 RecordGeneration# 2 PerGenerationCounter# 32 Channel# 0 Status# OK} 2025-05-07T08:56:50.252613Z 1 00h01m30.010512s :BS_LOAD_TEST INFO: TabletId# 1 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 1 RecordGeneration# 2 PerGenerationCounter# 33 Channel# 0 Status# OK} >> SystemView::TabletsShards [GOOD] >> SystemView::TabletsFollowers >> BuildStatsHistogram::Benchmark [GOOD] >> BuildStatsHistogram::Many_Mixed |90.7%| [TM] {asan, default-linux-x86_64, release} ydb/library/table_creator/ut/unittest >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeTableIndex [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeSequence >> BackupRestoreS3::TestAllPrimitiveTypes-INT8 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-INT16 >> TColumnShardTestReadWrite::CompactionInGranule_PKString_Reboot [GOOD] >> TableCreator::CreateTables |90.7%| [TM] {asan, default-linux-x86_64, release} ydb/library/table_creator/ut/unittest >> BackupRestore::TestAllPrimitiveTypes-UINT16 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-UINT32 >> BackupRestoreS3::TestAllPrimitiveTypes-UINT16 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-UINT32 >> TFlatTableExecutor_ColumnGroups::TestManyRows [GOOD] >> TFlatTableExecutor_CompactionScan::TestCompactionScan |90.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tablet/ut/ydb-core-tablet-ut |90.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tablet/ut/ydb-core-tablet-ut |90.7%| [LD] {RESULT} $(B)/ydb/core/tablet/ut/ydb-core-tablet-ut >> KqpUserConstraint::KqpReadNull-UploadNull >> BackupRestoreS3::RestoreIndexTablePartitioningSettings [GOOD] >> BackupRestoreS3::RestoreIndexTableReadReplicasSettings >> TFlatTableExecutor_CompactionScan::TestCompactionScan [GOOD] >> TFlatTableExecutor_CompressedSelectRows::TestCompressedSelectRows [GOOD] >> TFlatTableExecutor_Exceptions::TestTabletExecuteExceptionDirect >> TFlatTableExecutor_Exceptions::TestTabletExecuteExceptionDirect [GOOD] >> TFlatTableExecutorGC::TestGCVectorDeduplicaton [GOOD] |90.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data/unittest |90.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/ncloud/impl/ut/ydb-library-ncloud-impl-ut |90.7%| [LD] {RESULT} $(B)/ydb/library/ncloud/impl/ut/ydb-library-ncloud-impl-ut |90.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/ncloud/impl/ut/ydb-library-ncloud-impl-ut >> TFlatTableExecutor_IndexLoading::PrechargeAndSeek_FlatIndex [GOOD] >> TFlatTableExecutor_IndexLoading::PrechargeAndSeek_BTreeIndex >> KqpWorkloadServiceTables::TestLeaseUpdates [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut/unittest >> TFlatTableExecutorGC::TestGCVectorDeduplicaton [GOOD] Test command err: 00000.000 II| FAKE_ENV: Born at 2025-05-07T08:54:59.451070Z 00000.010 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.010 II| FAKE_ENV: TNanny initiates TDummy tablet 72057594037927937 birth 00000.011 II| FAKE_ENV: Starting storage for BS group 0 00000.012 II| FAKE_ENV: Starting storage for BS group 1 00000.012 II| FAKE_ENV: Starting storage for BS group 2 00000.012 II| FAKE_ENV: Starting storage for BS group 3 00000.017 II| TABLET_EXECUTOR: Leader{1:2:0} activating executor 00000.018 II| TABLET_EXECUTOR: LSnap{1:2, on 2:1, 35b, wait} done, Waste{2:0, 0b +(0, 0b), 0 trc} 00000.280 II| TABLET_EXECUTOR: LSnap{1:2, on 2:301, 5682b, wait} done, Waste{2:0, 587932b +(0, 0b), 300 trc} 00000.285 II| TABLET_EXECUTOR: Leader{1:2:308} starting compaction 00000.286 II| TABLET_EXECUTOR: Leader{1:2:309} starting Scan{1 on 2, Compact{1.2.308, eph 1}} 00000.286 II| TABLET_EXECUTOR: Leader{1:2:309} started compaction 1 00000.286 II| TABLET_OPS_HOST: Scan{1 on 2, Compact{1.2.308, eph 1}} begin on TSubset{head 2, 1m 0p 0c} 00000.289 II| TABLET_OPS_HOST: Scan{1 on 2, Compact{1.2.308, eph 1}} end=0, 109r seen, TFwd{fetch=0B,saved=0B,usage=0B,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=1}, trace 17 of 18 ~1p 00000.289 II| OPS_COMPACT: Compact{1.2.308, eph 1} end=0, 8 blobs 83r (max 109), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 1 pk, lobs 17 +6, (150155 0 209197)b }, ecr=1.000 00000.292 II| TABLET_EXECUTOR: Leader{1:2:310} Compact 1 on TGenCompactionParams{2: gen 0 epoch +inf, 0 parts} step 308, product {1 parts epoch 2} done 00000.292 II| TABLET_EXECUTOR: Leader{1:2:311} starting compaction 00000.293 II| TABLET_EXECUTOR: Leader{1:2:312} starting Scan{3 on 3, Compact{1.2.311, eph 1}} 00000.293 II| TABLET_EXECUTOR: Leader{1:2:312} started compaction 3 00000.293 II| TABLET_OPS_HOST: Scan{3 on 3, Compact{1.2.311, eph 1}} begin on TSubset{head 2, 1m 0p 0c} 00000.297 II| TABLET_OPS_HOST: Scan{3 on 3, Compact{1.2.311, eph 1}} end=0, 117r seen, TFwd{fetch=0B,saved=0B,usage=0B,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=1}, trace 13 of 14 ~1p 00000.297 II| OPS_COMPACT: Compact{1.2.311, eph 1} end=0, 16 blobs 82r (max 117), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 13 +12, (143310 20105 217108)b }, ecr=1.000 00000.318 II| TABLET_EXECUTOR: Leader{1:2:314} Compact 3 on TGenCompactionParams{3: gen 0 epoch +inf, 0 parts} step 311, product {1 parts epoch 2} done 00000.596 II| TABLET_EXECUTOR: LSnap{1:2, on 2:601, 8482b, wait} done, Waste{2:0, 1555126b +(143, 32919b), 300 trc} 00000.618 II| TABLET_EXECUTOR: Leader{1:2:625} starting compaction 00000.618 II| TABLET_EXECUTOR: Leader{1:2:626} starting Scan{5 on 3, Compact{1.2.625, eph 2}} 00000.618 II| TABLET_EXECUTOR: Leader{1:2:626} started compaction 5 00000.618 II| TABLET_OPS_HOST: Scan{5 on 3, Compact{1.2.625, eph 2}} begin on TSubset{head 3, 1m 0p 0c} 00000.623 II| TABLET_OPS_HOST: Scan{5 on 3, Compact{1.2.625, eph 2}} end=0, 110r seen, TFwd{fetch=0B,saved=0B,usage=0B,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=1}, trace 8 of 9 ~1p 00000.624 II| OPS_COMPACT: Compact{1.2.625, eph 2} end=0, 18 blobs 110r (max 110), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 8 +14, (188189 20834 177068)b }, ecr=1.000 00000.647 II| TABLET_EXECUTOR: Leader{1:2:627} Compact 5 on TGenCompactionParams{3: gen 0 epoch +inf, 0 parts} step 625, product {1 parts epoch 3} done 00000.649 II| TABLET_EXECUTOR: Leader{1:2:628} starting compaction 00000.649 II| TABLET_EXECUTOR: Leader{1:2:629} starting Scan{7 on 3, Compact{1.2.628, eph 2}} 00000.649 II| TABLET_EXECUTOR: Leader{1:2:629} started compaction 7 00000.649 II| TABLET_OPS_HOST: Scan{7 on 3, Compact{1.2.628, eph 2}} begin on TSubset{head 0, 0m 2p 0c} 00000.656 II| TABLET_OPS_HOST: Scan{7 on 3, Compact{1.2.628, eph 2}} end=0, 159r seen, TFwd{fetch=322KiB,saved=322KiB,usage=322KiB,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=8}, trace 39 of 47 ~3p 00000.657 II| OPS_COMPACT: Compact{1.2.628, eph 2} end=0, 4 blobs 134r (max 192), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 39 +0, (267998 40939 322465)b }, ecr=1.000 00000.659 II| TABLET_EXECUTOR: Leader{1:2:629} Compact 7 on TGenCompactionParams{3: gen 1 epoch 0, 2 parts} step 628, product {1 parts epoch 0} done 00000.737 II| TABLET_EXECUTOR: Leader{1:2:672} starting compaction 00000.737 II| TABLET_EXECUTOR: Leader{1:2:673} starting Scan{9 on 2, Compact{1.2.672, eph 2}} 00000.737 II| TABLET_EXECUTOR: Leader{1:2:673} started compaction 9 00000.737 II| TABLET_OPS_HOST: Scan{9 on 2, Compact{1.2.672, eph 2}} begin on TSubset{head 3, 1m 0p 0c} 00000.741 II| TABLET_OPS_HOST: Scan{9 on 2, Compact{1.2.672, eph 2}} end=0, 108r seen, TFwd{fetch=0B,saved=0B,usage=0B,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=1}, trace 16 of 23 ~1p 00000.741 II| OPS_COMPACT: Compact{1.2.672, eph 2} end=0, 14 blobs 108r (max 108), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 16 +10, (134549 13246 229506)b }, ecr=1.000 00000.743 II| TABLET_EXECUTOR: Leader{1:2:673} Compact 9 on TGenCompactionParams{2: gen 0 epoch +inf, 0 parts} step 672, product {1 parts epoch 3} done 00000.744 II| TABLET_EXECUTOR: Leader{1:2:674} starting compaction 00000.744 II| TABLET_EXECUTOR: Leader{1:2:675} starting Scan{11 on 2, Compact{1.2.674, eph 2}} 00000.744 II| TABLET_EXECUTOR: Leader{1:2:675} started compaction 11 00000.745 II| TABLET_OPS_HOST: Scan{11 on 2, Compact{1.2.674, eph 2}} begin on TSubset{head 0, 0m 2p 0c} 00000.751 II| TABLET_OPS_HOST: Scan{11 on 2, Compact{1.2.674, eph 2}} end=0, 162r seen, TFwd{fetch=277KiB,saved=277KiB,usage=277KiB,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=6}, trace 44 of 49 ~3p 00000.751 II| OPS_COMPACT: Compact{1.2.674, eph 2} end=0, 4 blobs 130r (max 191), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 44 +0, (219567 13246 392668)b }, ecr=1.000 00000.781 II| TABLET_EXECUTOR: Leader{1:2:675} Compact 11 on TGenCompactionParams{2: gen 1 epoch 0, 2 parts} step 674, product {1 parts epoch 0} done 00001.108 II| TABLET_EXECUTOR: LSnap{1:2, on 2:901, 10332b, wait} done, Waste{2:0, 2297131b +(171, 831658b), 300 trc} 00001.180 II| TABLET_EXECUTOR: Leader{1:2:956} starting compaction 00001.180 II| TABLET_EXECUTOR: Leader{1:2:957} starting Scan{13 on 3, Compact{1.2.956, eph 3}} 00001.180 II| TABLET_EXECUTOR: Leader{1:2:957} started compaction 13 00001.180 II| TABLET_OPS_HOST: Scan{13 on 3, Compact{1.2.956, eph 3}} begin on TSubset{head 4, 1m 0p 0c} 00001.184 II| TABLET_OPS_HOST: Scan{13 on 3, Compact{1.2.956, eph 3}} end=0, 108r seen, TFwd{fetch=0B,saved=0B,usage=0B,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=1}, trace 13 of 15 ~1p 00001.185 II| OPS_COMPACT: Compact{1.2.956, eph 3} end=0, 13 blobs 108r (max 108), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 13 +9, (157602 20174 195273)b }, ecr=1.000 00001.239 II| TABLET_EXECUTOR: Leader{1:2:958} Compact 13 on TGenCompactionParams{3: gen 0 epoch +inf, 0 parts} step 956, product {1 parts epoch 4} done 00001.259 II| TABLET_EXECUTOR: Leader{1:2:963} starting compaction 00001.259 II| TABLET_EXECUTOR: Leader{1:2:964} starting Scan{15 on 2, Compact{1.2.963, eph 3}} 00001.259 II| TABLET_EXECUTOR: Leader{1:2:964} started compaction 15 00001.259 II| TABLET_OPS_HOST: Scan{15 on 2, Compact{1.2.963, eph 3}} begin on TSubset{head 4, 1m 0p 0c} 00001.263 II| TABLET_OPS_HOST: Scan{15 on 2, Compact{1.2.963, eph 3}} end=0, 112r seen, TFwd{fetch=0B,saved=0B,usage=0B,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=1}, trace 15 of 18 ~1p 00001.263 II| OPS_COMPACT: Compact{1.2.963, eph 3} end=0, 14 blobs 112r (max 112), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 15 +10, (152316 33751 212556)b }, ecr=1.000 00001.266 II| TABLET_EXECUTOR: Leader{1:2:965} Compact 15 on TGenCompactionParams{2: gen 0 epoch +inf, 0 parts} step 963, product {1 parts epoch 4} done 00001.630 II| TABLET_EXECUTOR: LSnap{1:2, on 2:1201, 11587b, wait} done, Waste{2:0, 3394435b +(136, 63572b), 300 trc} 00001.737 II| TABLET_EXECUTOR: Leader{1:2:1279} starting compaction 00001.737 II| TABLET_EXECUTOR: Leader{1:2:1280} starting Scan{17 on 2, Compact{1.2.1279, eph 4}} 00001.737 II| TABLET_EXECUTOR: Leader{1:2:1280} started compaction 17 00001.737 II| TABLET_OPS_HOST: Scan{17 on 2, Compact{1.2.1279, eph 4}} begin on TSubset{head 5, 1m 0p 0c} 00001.740 II| TABLET_OPS_HOST: Scan{17 on 2, Compact{1.2.1279, eph 4}} end=0, 106r seen, TFwd{fetch=0B,saved=0B,usage=0B,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=1}, trace 22 of 28 ~1p 00001.741 II| OPS_COMPACT: Compact{1.2.1279, eph 4} end=0, 10 blobs 106r (max 106), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 22 +6, (168196 20067 250972)b }, ecr=1.000 00001.769 II| TABLET_EXECUTOR: Leader{1:2:1281} Compact 17 on TGenCompactionParams{2: gen 0 epoch +inf, 0 parts} step 1279, product {1 parts epoch 5} done 00001.770 II| TABLET_EXECUTOR: Leader{1:2:1282} starting compaction 00001.770 II| TABLET_EXECUTOR: Leader{1:2:1283} starting Scan{19 on 2, Compact{1.2.1282, eph 4}} 00001.770 II| TABLET_EXECUTOR: Leader{1:2:1283} started compaction 19 00001.770 II| TABLET_OPS_HOST: Scan{19 on 2, Compact{1.2.1282, eph 4}} begin on TSubset{head 0, 0m 2p 0c} 00001.777 II| TABLET_OPS_HOST: Scan{19 on 2, Compact{1.2.1282, eph 4}} end=0, 182r seen, TFwd{fetch=311KiB,saved=311KiB,usage=291KiB,after=6.72KiB,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=7}, trace 44 of 53 ~3p 00001.777 II| OPS_COMPACT: Compact{1.2.1282, eph 4} end=0, 4 blobs 182r (max 218), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 44 +0, (244789 33456 384139)b }, ecr=1.000 00001.780 II| TABLET_EXECUTOR: Leader{1:2:1284} Compact 19 on TGenCompactionParams{2: gen 1 epoch 0, 2 parts} step 1282, product {1 parts epoch 0} done 00001.854 II| TABLET_EXECUTOR: Leader{1:2:1307} starting compaction 00001.854 II| TABLET_EXECUTOR: Leader{1:2:1308} starting Scan{21 on 3, Compact{1.2.1307, eph 4}} 00001.854 II| TABLET_EXECUTOR: Leader{1:2:1308} started compaction 21 00001.854 II| TABLET_OPS_HOST: Scan{21 on 3, Compact{1.2.1307, eph 4}} begin on TSubset{head 5, 1m 0p 0c} 00001.858 II| TABLET_OPS_HOST: Scan{21 on 3, Compact{1.2.1307, eph 4}} end=0, 107r seen, TFwd{fetch=0B,saved=0B,usage=0B,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=1}, trace 12 of 15 ~1p 00001.858 II| OPS_COMPACT: Compact{1.2.1307, eph 4} end=0, 9 blobs 107r (max 107), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 12 +5, (125811 13684 147310)b }, ecr=1.000 00001.861 II| TABLET_EXECUTOR: Leader{1:2:1309} Compact 21 on TGenCompactionParams{3: gen 0 epoch +inf, 0 parts} step 1307, product {1 parts epoch 5} done 00001.862 II| TABLET_EXECUTOR: Leader{1:2:1310} starting compaction 00001.862 II| TABLET_EXECUTOR: Leader{1:2:1311} starting Scan{23 on 3, Compact{1.2.1310, eph 4}} 00001.862 II| TABLET_EXECUTOR: Leader{1:2:1311} started compaction 23 00001.862 II| TABLET_OPS_HOST: Scan{23 on 3, Compact{1.2.1310, eph 4}} begin on TSubset{head 0, 0m 2p 0c} 00001.868 II| TABLET_OPS_HOST: Scan{23 on 3, Compact{1.2.1310, eph 4}} end=0, 171r seen, TFwd{fetch=276KiB,saved=276KiB,usage=269KiB,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=7}, trace 32 of 39 ~3p 00001.868 II| OPS_COMPACT: Compact{1.2.1310, eph 4} end=0, 4 blobs 171r (max 215), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 32 +0, (212909 27235 285820)b }, ecr=1.000 00001.882 II| TABLET_EXECUTOR: Leader{1:2:1313} Compact 23 on TGenCompactionParams{3: gen 1 epoch 0, 2 parts} step 1310, product {1 parts epoch 0} done 00002.104 II| TABLET_EXECUTOR: LSnap{1:2, on 2:1501, 13283b, wait} done, Waste{2:0, 4052398b +(201, 857250b), 300 trc} 00002.191 II| TABLET_EXECUTOR: Leader{1:2:1572} st ... USAGECACHE: Send page collection result [1:2:250:1:12288:161:0] owner [37:418:2424] class Scan pages [ 0 ] cookie 0 00000.463 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:313:1:12288:161:0] owner [37:418:2424] 00000.463 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:313:1:12288:161:0] owner [37:418:2424] cookie 0 class Scan from cache [ 0 ] 00000.463 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:313:1:12288:161:0] owner [37:418:2424] class Scan pages [ 0 ] cookie 0 00000.464 DD| TABLET_SAUSAGECACHE: Save page collection [1:2:315:1:12288:163:0] owner [37:419:2424] compacted pages [ 2 ] 00000.464 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:315:1:12288:163:0] 00000.464 DD| TABLET_SAUSAGECACHE: Unregister owner [37:418:2424] 00000.464 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:188:1:12288:161:0] owner [37:418:2424] 00000.464 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:64:1:12288:161:0] owner [37:418:2424] 00000.464 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:313:1:12288:161:0] owner [37:418:2424] 00000.464 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:250:1:12288:161:0] owner [37:418:2424] 00000.464 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:126:1:12288:161:0] owner [37:418:2424] 00000.464 DD| TABLET_SAUSAGECACHE: Remove owner [37:418:2424] 00000.464 II| TABLET_EXECUTOR: Leader{1:2:316} Compact 63 on TGenCompactionParams{101: gen 2 epoch 0, 5 parts} step 315, product {1 parts epoch 0} done 00000.465 DD| TABLET_EXECUTOR: TGenCompactionStrategy CompactionFinished for 1: compaction 63, generation 2 00000.465 DD| TABLET_EXECUTOR: TGenCompactionStrategy CheckGeneration for 1 generation 2, state Free, final id 0, final level 2 00000.465 DD| RESOURCE_BROKER: Finish task gen2-table-101-tablet-1 (32 by [37:30:2062]) (release resources {1, 0}) 00000.465 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_compaction_gen2 from 60.000000 to 0.000000 (remove task gen2-table-101-tablet-1 (32 by [37:30:2062])) 00000.465 DD| TABLET_SAUSAGECACHE: Attach page collection [1:2:315:1:12288:163:0] owner [37:30:2062] 00000.465 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:315:1:12288:163:0] owner [37:30:2062] 00000.465 DD| TABLET_SAUSAGECACHE: Detach page collection [1:2:313:1:12288:161:0] owner [37:30:2062] 00000.465 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:313:1:12288:161:0] owner [37:30:2062] 00000.465 DD| TABLET_SAUSAGECACHE: Detach page collection [1:2:250:1:12288:161:0] owner [37:30:2062] 00000.465 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:250:1:12288:161:0] owner [37:30:2062] 00000.465 DD| TABLET_SAUSAGECACHE: Detach page collection [1:2:188:1:12288:161:0] owner [37:30:2062] 00000.465 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:188:1:12288:161:0] owner [37:30:2062] 00000.466 DD| TABLET_EXECUTOR: Leader{1:2:317} commited cookie 3 for step 316 00000.466 DD| TABLET_SAUSAGECACHE: Detach page collection [1:2:126:1:12288:161:0] owner [37:30:2062] 00000.466 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:126:1:12288:161:0] owner [37:30:2062] 00000.466 DD| TABLET_EXECUTOR: Leader{1:2:317} switch applied on followers, step 316 00000.466 DD| TABLET_SAUSAGECACHE: Detach page collection [1:2:64:1:12288:161:0] owner [37:30:2062] 00000.466 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:64:1:12288:161:0] owner [37:30:2062] 00000.466 TT| TABLET_SAUSAGECACHE: Touch page collection [1:2:315:1:12288:163:0] owner [37:30:2062] pages [ 2 ] 00000.467 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:64:1:12288:161:0] owner [37:405:2414] 00000.467 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:64:1:12288:161:0] owner [37:405:2414] cookie 0 class Scan from cache [ 0 ] 00000.467 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:64:1:12288:161:0] owner [37:405:2414] class Scan pages [ 0 ] cookie 0 00000.467 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:126:1:12288:161:0] owner [37:405:2414] 00000.467 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:126:1:12288:161:0] owner [37:405:2414] cookie 0 class Scan from cache [ 0 ] 00000.467 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:126:1:12288:161:0] owner [37:405:2414] class Scan pages [ 0 ] cookie 0 00000.467 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:188:1:12288:161:0] owner [37:405:2414] 00000.467 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:188:1:12288:161:0] owner [37:405:2414] cookie 0 class Scan from cache [ 0 ] 00000.467 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:188:1:12288:161:0] owner [37:405:2414] class Scan pages [ 0 ] cookie 0 00000.468 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:250:1:12288:161:0] owner [37:405:2414] 00000.468 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:250:1:12288:161:0] owner [37:405:2414] cookie 0 class Scan from cache [ 0 ] 00000.468 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:250:1:12288:161:0] owner [37:405:2414] class Scan pages [ 0 ] cookie 0 00000.468 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:261:1:12288:161:0] owner [37:405:2414] 00000.468 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:261:1:12288:161:0] owner [37:405:2414] cookie 0 class Scan from cache [ 0 ] 00000.468 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:261:1:12288:161:0] owner [37:405:2414] class Scan pages [ 0 ] cookie 0 00000.468 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:273:1:12288:161:0] owner [37:405:2414] 00000.468 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:273:1:12288:161:0] owner [37:405:2414] cookie 0 class Scan from cache [ 0 ] 00000.468 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:273:1:12288:161:0] owner [37:405:2414] class Scan pages [ 0 ] cookie 0 00000.468 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:285:1:12288:161:0] owner [37:405:2414] 00000.468 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:285:1:12288:161:0] owner [37:405:2414] cookie 0 class Scan from cache [ 0 ] 00000.468 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:285:1:12288:161:0] owner [37:405:2414] class Scan pages [ 0 ] cookie 0 00000.468 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:297:1:12288:161:0] owner [37:405:2414] 00000.468 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:297:1:12288:161:0] owner [37:405:2414] cookie 0 class Scan from cache [ 0 ] 00000.468 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:297:1:12288:161:0] owner [37:405:2414] class Scan pages [ 0 ] cookie 0 00000.469 DD| TABLET_SAUSAGECACHE: Unregister owner [37:405:2414] 00000.469 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:297:1:12288:161:0] owner [37:405:2414] 00000.469 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:285:1:12288:161:0] owner [37:405:2414] 00000.469 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:261:1:12288:161:0] owner [37:405:2414] 00000.469 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:188:1:12288:161:0] owner [37:405:2414] 00000.469 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:126:1:12288:161:0] owner [37:405:2414] 00000.469 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:250:1:12288:161:0] owner [37:405:2414] 00000.469 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:64:1:12288:161:0] owner [37:405:2414] 00000.469 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:273:1:12288:161:0] owner [37:405:2414] 00000.469 DD| TABLET_SAUSAGECACHE: Remove owner [37:405:2414] 00000.469 DD| RESOURCE_BROKER: Finish task Scan{58 on 101}::1 (29 by [37:30:2062]) (release resources {1, 0}) 00000.469 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_scan from 150.000000 to 0.000000 (remove task Scan{58 on 101}::1 (29 by [37:30:2062])) 00000.470 II| TABLET_EXECUTOR: Leader{1:2:317} suiciding, Waste{2:0, 7661b +(30, 11928b), 16 trc, -42337b acc} 00000.471 DD| TABLET_SAUSAGECACHE: Unregister owner [37:30:2062] 00000.471 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:315:1:12288:163:0] owner [37:30:2062] 00000.471 DD| TABLET_SAUSAGECACHE: Remove owner [37:30:2062] 00000.471 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 2 actors 00000.471 NN| TABLET_SAUSAGECACHE: Poison cache serviced 38 reqs hit {38 21480b} miss {0 0b} 00000.471 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.472 II| FAKE_ENV: DS.0 gone, left {1961b, 17}, put {31666b, 317} 00000.472 II| FAKE_ENV: DS.1 gone, left {23850b, 37}, put {57240b, 346} 00000.472 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.472 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.472 II| FAKE_ENV: All BS storage groups are stopped 00000.472 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.472 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 2287}, stopped 00000.000 II| FAKE_ENV: Born at 2025-05-07T08:56:54.608201Z 00000.008 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.009 II| FAKE_ENV: Starting storage for BS group 0 00000.009 II| FAKE_ENV: Starting storage for BS group 1 00000.009 II| FAKE_ENV: Starting storage for BS group 2 00000.009 II| FAKE_ENV: Starting storage for BS group 3 00000.069 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 2 actors 00000.069 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.069 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.069 II| FAKE_ENV: DS.0 gone, left {536b, 6}, put {556b, 7} 00000.069 II| FAKE_ENV: DS.1 gone, left {30495b, 8}, put {30495b, 8} 00000.069 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.070 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.070 II| FAKE_ENV: All BS storage groups are stopped 00000.070 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.070 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-05-07T08:56:54.693265Z 00000.008 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.009 II| FAKE_ENV: Starting storage for BS group 0 00000.009 II| FAKE_ENV: Starting storage for BS group 1 00000.010 II| FAKE_ENV: Starting storage for BS group 2 00000.010 II| FAKE_ENV: Starting storage for BS group 3 00000.107 CC| TABLET_EXECUTOR: Tablet 1 unhandled exception std::runtime_error: test ??+0 (0x11617AF1) __cxa_throw+221 (0x1161791D) NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Exceptions::TTxExecuteThrowException::Execute(NKikimr::NTabletFlatExecutor::TTransactionContext&, NActors::TActorContext const&)+62 (0x1092C13E) NKikimr::NTabletFlatExecutor::TExecutor::ExecuteTransaction(NKikimr::NTabletFlatExecutor::TSeat*)+3349 (0x177C3FB5) NKikimr::NTabletFlatExecutor::TExecutor::DoExecute(TAutoPtr, NKikimr::NTabletFlatExecutor::TExecutor::ETxMode)+10645 (0x177BF955) non-virtual thunk to NKikimr::NTabletFlatExecutor::TExecutor::Execute(TAutoPtr, NActors::TActorContext const&)+54 (0x177C6C26) ??+0 (0x1092BF30) NKikimr::NFake::TDummy::Inbox(TAutoPtr&)+2810 (0x1086D1EA) NActors::IActor::Receive(TAutoPtr&)+237 (0x12ABCD3D) 00000.108 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 2 actors 00000.108 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.108 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.108 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {62b, 2} 00000.108 II| FAKE_ENV: DS.1 gone, left {35b, 1}, put {35b, 1} 00000.108 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.108 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.108 II| FAKE_ENV: All BS storage groups are stopped 00000.108 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.108 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 1 Error 0 Left 15}, stopped ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/pg/unittest >> PgCatalog::PgTables [GOOD] Test command err: Trying to start YDB, gRPC: 19342, MsgBus: 20675 2025-05-07T08:50:52.830949Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623604999429123:2263];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:52.836040Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0020a9/r3tmp/tmpcVG92X/pdisk_1.dat 2025-05-07T08:50:53.357135Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:53.357249Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:53.359642Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:50:53.381565Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19342, node 1 2025-05-07T08:50:53.566480Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:53.566505Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:53.566526Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:53.566645Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20675 TClient is connected to server localhost:20675 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:50:54.292186Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 1042 2025-05-07T08:50:56.483121Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480
: Error: Bulk upsert to table '/Root/Coerce_pgbpchar_17472595041006102391_17823623939509273229' Typemod mismatch, got type pgbpchar for column value, type mod , but expected 2 --!syntax_pg INSERT INTO Coerce_pgbpchar_17472595041006102391_17823623939509273229 (key, value) VALUES ( '0'::int2, 'abcd'::bpchar ) 2025-05-07T08:50:56.706595Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623622179298872:2344], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:56.706689Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623622179298864:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:56.706866Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:50:56.710383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480 2025-05-07T08:50:56.728893Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623622179298878:2345], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-05-07T08:50:56.814672Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623622179298929:2395] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:50:57.171245Z node 1 :TX_DATASHARD CRIT: execute_kqp_data_tx_unit.cpp:443: Exception while executing KQP transaction [0:281474976710663] at 72075186224037888: ydb/core/tx/datashard/datashard_kqp_upsert_rows.cpp:87: Apply(): requirement !error failed. Incorrect value: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) 2025-05-07T08:50:57.172827Z node 1 :TX_DATASHARD ERROR: finish_propose_unit.cpp:174: Errors while proposing transaction txid 281474976710663 at tablet 72075186224037888 status: EXEC_ERROR errors: UNKNOWN (Tx was terminated: ydb/core/tx/datashard/datashard_kqp_upsert_rows.cpp:87: Apply(): requirement !error failed. Incorrect value: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) ) | 2025-05-07T08:50:57.173090Z node 1 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:863: ActorId: [1:7501623626474266279:2339] TxId: 281474976710663. Ctx: { TraceId: 01jtmz1q2111phaec8mr6cf8vq, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODY4MWRmY2EtMmRjYjFjY2UtZTMyZDg4MmItYTBkY2Y5ODI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. EXEC_ERROR: [UNKNOWN] Tx was terminated: ydb/core/tx/datashard/datashard_kqp_upsert_rows.cpp:87: Apply(): requirement !error failed. Incorrect value: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) ; 2025-05-07T08:50:57.209674Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=1&id=ODY4MWRmY2EtMmRjYjFjY2UtZTMyZDg4MmItYTBkY2Y5ODI=, ActorId: [1:7501623622179298861:2339], ActorState: ExecuteState, TraceId: 01jtmz1q2111phaec8mr6cf8vq, Create QueryResponse for error on request, msg:
: Error: Error executing transaction (ExecError): Execution failed
: Error: [UNKNOWN] Tx was terminated: ydb/core/tx/datashard/datashard_kqp_upsert_rows.cpp:87: Apply(): requirement !error failed. Incorrect value: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) 2025-05-07T08:50:57.250823Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480
: Error: Bulk upsert to table '/Root/Coerce__pgbpchar_17472595041006102391_5352544928909966465' Typemod mismatch, got type _pgbpchar for column value, type mod , but expected 2 --!syntax_pg INSERT INTO Coerce__pgbpchar_17472595041006102391_5352544928909966465 (key, value) VALUES ( '0'::int2, '{abcd,abcd}'::_bpchar ) 2025-05-07T08:50:57.612015Z node 1 :TX_DATASHARD CRIT: execute_kqp_data_tx_unit.cpp:443: Exception while executing KQP transaction [0:281474976710668] at 72075186224037889: ydb/core/tx/datashard/datashard_kqp_upsert_rows.cpp:87: Apply(): requirement !error failed. Incorrect value: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) 2025-05-07T08:50:57.613701Z node 1 :TX_DATASHARD ERROR: finish_propose_unit.cpp:174: Errors while proposing transaction txid 281474976710668 at tablet 72075186224037889 status: EXEC_ERROR errors: UNKNOWN (Tx was terminated: ydb/core/tx/datashard/datashard_kqp_upsert_rows.cpp:87: Apply(): requirement !error failed. Incorrect value: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) ) | 2025-05-07T08:50:57.613936Z node 1 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:863: ActorId: [1:7501623626474266412:2373] TxId: 281474976710668. Ctx: { TraceId: 01jtmz1qp6bwdbvf7qc9mz4nk7, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTBlYTI3Y2ItNjIyN2Q3ZDQtZTY2M2E3ZDQtY2U5MThjM2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. EXEC_ERROR: [UNKNOWN] Tx was terminated: ydb/core/tx/datashard/datashard_kqp_upsert_rows.cpp:87: Apply(): requirement !error failed. Incorrect value: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) ; 2025-05-07T08:50:57.614145Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=1&id=YTBlYTI3Y2ItNjIyN2Q3ZDQtZTY2M2E3ZDQtY2U5MThjM2I=, ActorId: [1:7501623626474266369:2373], ActorState: ExecuteState, TraceId: 01jtmz1qp6bwdbvf7qc9mz4nk7, Create QueryResponse for error on request, msg:
: Error: Error executing transaction (ExecError): Execution failed
: Error: [UNKNOWN] Tx was terminated: ydb/core/tx/datashard/datashard_kqp_upsert_rows.cpp:87: Apply(): requirement !error failed. Incorrect value: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) 1042 2025-05-07T08:50:57.647439Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480
: Error: Bulk upsert to table '/Root/Coerce_pgbpchar_2169371982377735806_17823623939509273229' Typemod mismatch, got type pgbpchar for column value, type mod , but expected 4 --!syntax_pg INSERT INTO Coerce_pgbpchar_2169371982377735806_17823623939509273229 (key, value) VALUES ( '0'::int2, 'abcd'::bpchar ) 2025-05-07T08:50:57.838118Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623604999429123:2263];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:57.838173Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; abcd 2025-05-07T08:50:58.166283Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057 ... table config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:56:10.119069Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:56:10.119084Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:56:10.119297Z node 11 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1182 TClient is connected to server localhost:1182 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:56:11.973537Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:56:18.942887Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7501625007253990573:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:18.943070Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:18.943807Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7501625007253990585:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:18.960619Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T08:56:18.991262Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [11:7501625007253990587:2340], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T08:56:19.072901Z node 11 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [11:7501625011548957935:2349] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 1127, MsgBus: 23304 2025-05-07T08:56:21.122449Z node 12 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[12:7501625019178396101:2064];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:56:21.123832Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0020a9/r3tmp/tmpaRlT0A/pdisk_1.dat 2025-05-07T08:56:21.663383Z node 12 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:56:21.784418Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:56:21.784569Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:56:21.786760Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1127, node 12 2025-05-07T08:56:21.971005Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:56:21.971047Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:56:21.971063Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:56:21.971338Z node 12 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23304 TClient is connected to server localhost:23304 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:56:23.916229Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:56:26.134121Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[12:7501625019178396101:2064];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:56:26.141110Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:56:30.740184Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7501625057833102436:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:30.740353Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:30.740917Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7501625057833102448:2343], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:30.753180Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480 2025-05-07T08:56:30.810281Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [12:7501625057833102450:2344], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-05-07T08:56:30.889263Z node 12 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [12:7501625057833102501:2349] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:56:30.987115Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 2025-05-07T08:56:31.101065Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 2025-05-07T08:56:36.624784Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7261: Cannot get console configs 2025-05-07T08:56:36.624831Z node 12 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:56:40.092599Z node 12 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 12, TabletId: 72075186224037888 not found 2025-05-07T08:56:40.240744Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480 2025-05-07T08:56:41.314761Z node 12 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [12:7501625100782776038:2468], TxId: 281474976710672, task: 1. Ctx: { SessionId : ydb://session/3?node_id=12&id=NTc5M2FlMjQtYmNmNDg0ZTktNWEzZjJiNzAtZGRiNTg1MA==. CustomerSuppliedId : . TraceId : 01jtmzc6x99g6c5tz656r3qxqk. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: PRECONDITION_FAILED DEFAULT_ERROR: {
: Error: Terminate was called, reason(57): ERROR: invalid input syntax for type boolean: "pg_proc" }. 2025-05-07T08:56:41.358273Z node 12 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1216: SelfId: [12:7501625100782776039:2469], TxId: 281474976710672, task: 2. Ctx: { SessionId : ydb://session/3?node_id=12&id=NTc5M2FlMjQtYmNmNDg0ZTktNWEzZjJiNzAtZGRiNTg1MA==. TraceId : 01jtmzc6x99g6c5tz656r3qxqk. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [12:7501625100782776035:2464], status: PRECONDITION_FAILED, reason: {
: Error: Terminate execution } 2025-05-07T08:56:41.384940Z node 12 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=12&id=NTc5M2FlMjQtYmNmNDg0ZTktNWEzZjJiNzAtZGRiNTg1MA==, ActorId: [12:7501625100782776027:2464], ActorState: ExecuteState, TraceId: 01jtmzc6x99g6c5tz656r3qxqk, Create QueryResponse for error on request, msg: |90.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_sequence/ydb-core-tx-schemeshard-ut_sequence |90.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_sequence/ydb-core-tx-schemeshard-ut_sequence |90.7%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_sequence/ydb-core-tx-schemeshard-ut_sequence |90.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data/unittest >> TableCreator::CreateTables [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeCdcStream [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeBlobDepot [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeExternalTable |90.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/mind/address_classification/ut/ydb-core-mind-address_classification-ut |90.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/mind/address_classification/ut/ydb-core-mind-address_classification-ut |90.7%| [LD] {RESULT} $(B)/ydb/core/mind/address_classification/ut/ydb-core-mind-address_classification-ut >> BackupRestore::TestAllIndexTypes-EIndexTypeGlobalVectorKmeansTree [GOOD] >> BackupRestore::TestAllPrimitiveTypes-BOOL >> TColumnShardTestReadWrite::CompactionInGranule_PKInt64_Reboot [GOOD] >> SystemView::AuthUsers_ResultOrder [GOOD] >> SystemView::AuthUsers_TableRange ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/workload_service/ut/unittest >> KqpWorkloadServiceTables::TestLeaseUpdates [GOOD] Test command err: 2025-05-07T08:54:40.733178Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624585338425222:2208];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:40.735894Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00482c/r3tmp/tmpWrkNHY/pdisk_1.dat 2025-05-07T08:54:41.548854Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:41.566692Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:41.566794Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:41.575043Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7411, node 1 2025-05-07T08:54:41.878665Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:41.878694Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:41.878706Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:41.878831Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17970 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:42.343492Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:45.110205Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:440: [WorkloadService] [Service] Started workload service initialization 2025-05-07T08:54:45.119423Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=MTE1MmUxOTctZTZjMTg0ZGItMWFmMmVkOTMtZDY1NmRmOGY=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id MTE1MmUxOTctZTZjMTg0ZGItMWFmMmVkOTMtZDY1NmRmOGY= 2025-05-07T08:54:45.151283Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:241: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7501624606813262166:2329], Start check tables existence, number paths: 2 2025-05-07T08:54:45.151404Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=MTE1MmUxOTctZTZjMTg0ZGItMWFmMmVkOTMtZDY1NmRmOGY=, ActorId: [1:7501624606813262167:2330], ActorState: unknown state, session actor bootstrapped 2025-05-07T08:54:45.151576Z node 1 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:125: [WorkloadService] [Service] Updated node info, noode count: 1 2025-05-07T08:54:45.151598Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-05-07T08:54:45.151618Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:111: [WorkloadService] [Service] Resource pools was enanbled 2025-05-07T08:54:45.158131Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7501624606813262166:2329], Describe table /Root/.metadata/workload_manager/delayed_requests status PathErrorUnknown 2025-05-07T08:54:45.158194Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7501624606813262166:2329], Describe table /Root/.metadata/workload_manager/running_requests status PathErrorUnknown 2025-05-07T08:54:45.158230Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:289: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7501624606813262166:2329], Successfully finished 2025-05-07T08:54:45.158322Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-05-07T08:54:45.166513Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624606813262193:2301], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-05-07T08:54:45.171642Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480 2025-05-07T08:54:45.174473Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:429: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624606813262193:2301], DatabaseId: Root, PoolId: sample_pool_id, Subscribe on create pool tx: 281474976710658 2025-05-07T08:54:45.175142Z node 1 :KQP_WORKLOAD_SERVICE TRACE: scheme_actors.cpp:352: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624606813262193:2301], DatabaseId: Root, PoolId: sample_pool_id, Tablet to pipe successfully connected 2025-05-07T08:54:45.184354Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624606813262193:2301], DatabaseId: Root, PoolId: sample_pool_id, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-05-07T08:54:45.254930Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624606813262193:2301], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-05-07T08:54:45.260508Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501624606813262244:2333] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/sample_pool_id\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:54:45.260682Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:480: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624606813262193:2301], DatabaseId: Root, PoolId: sample_pool_id, Pool successfully created 2025-05-07T08:54:45.261223Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: Root, PoolId: sample_pool_id 2025-05-07T08:54:45.261252Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:561: [WorkloadService] [Service] Creating new database state for id Root 2025-05-07T08:54:45.261344Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624606813262251:2332], DatabaseId: Root, PoolId: sample_pool_id, Start pool fetching 2025-05-07T08:54:45.266432Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:223: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624606813262251:2332], DatabaseId: Root, PoolId: sample_pool_id, Pool info successfully fetched 2025-05-07T08:54:45.266501Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:253: [WorkloadService] [Service] Successfully fetched pool sample_pool_id, DatabaseId: Root 2025-05-07T08:54:45.266527Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:571: [WorkloadService] [Service] Creating new handler for pool /Root/sample_pool_id 2025-05-07T08:54:45.266755Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:466: [WorkloadService] [TPoolHandlerActorBase] ActorId: [1:7501624606813262260:2333], DatabaseId: Root, PoolId: sample_pool_id, Subscribed on schemeboard notifications for path: [OwnerId: 72057594046644480, LocalPathId: 5] 2025-05-07T08:54:45.267995Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:274: [WorkloadService] [TPoolHandlerActorBase] ActorId: [1:7501624606813262260:2333], DatabaseId: Root, PoolId: sample_pool_id, Got watch notification 2025-05-07T08:54:45.290530Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: /Root, PoolId: default 2025-05-07T08:54:45.290555Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:561: [WorkloadService] [Service] Creating new database state for id /Root 2025-05-07T08:54:45.290599Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624606813262272:2335], DatabaseId: /Root, PoolId: default, Start pool fetching 2025-05-07T08:54:45.290765Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=1&id=MTE1MmUxOTctZTZjMTg0ZGItMWFmMmVkOTMtZDY1NmRmOGY=, ActorId: [1:7501624606813262167:2330], ActorState: ReadyState, TraceId: 01jtmz8p979r0f8d6sjppmbbze, received request, proxyRequestId: 3 prepared: 0 tx_control: 0 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_DDL text: DROP RESOURCE POOL sample_pool_id; rpcActor: [0:0:0] database: /Root databaseId: /Root pool id: default 2025-05-07T08:54:45.295876Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624606813262272:2335], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:45.295975Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:45.498069Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:294: [WorkloadService] [TPoolHandlerActorBase] ActorId: [1:7501624606813262260:2333], DatabaseId: Root, PoolId: sample_pool_id, Got delete notification 2025-05-07T08:54:45.500729Z node 1 :KQP_SESSION INFO: kqp_session_actor.cpp:2473: SessionId: ydb://session/3?node_id=1&id=MTE1MmUxOTctZTZjMTg0ZGItMWFmMmVkOTMtZDY1NmRmOGY=, ActorId: [1:7501624606813262167:2330], ActorState: ExecuteState, TraceId: 01jtmz8p979r0f8d6sjppmbbze, Cleanup start, isFinal: 0 CleanupCtx: 1 TransactionsToBeAborted.size(): 0 WorkerId: [1:7501624606813262273:2330] WorkloadServiceCleanup: 0 2025-05-07T08:54:45.502257Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2534: SessionId: ydb://session/3?node_id=1&id=MTE1MmUxOTctZTZjMTg0ZGItMWFmMmVkOTMtZDY1NmRmOGY=, ActorId: [1:7501624606813262167:2330], ActorState: CleanupState, TraceId: 01jtmz8p979r0f8d6sjppmbbze, EndCleanup, isFinal: 0 2025-05-07T08:54:45.502324Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2270: SessionId: ydb://session/3?node_id=1&id= ... SSION INFO: kqp_session_actor.cpp:1958: SessionId: ydb://session/3?node_id=10&id=MjcwNjdkZmEtZTk2NmQ5NjMtMjhiNzc3YTMtMzNmYTAyNDc=, ActorId: [10:7501625164450396355:2549], ActorState: ExecuteState, TraceId: 01jtmzcnbe04eaxky9n7905x0m, txInfo Status: Committed Kind: ReadWrite TotalDuration: 30.374 ServerDuration: 29.78 QueriesCount: 2 2025-05-07T08:56:55.440988Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2113: SessionId: ydb://session/3?node_id=10&id=MjcwNjdkZmEtZTk2NmQ5NjMtMjhiNzc3YTMtMzNmYTAyNDc=, ActorId: [10:7501625164450396355:2549], ActorState: ExecuteState, TraceId: 01jtmzcnbe04eaxky9n7905x0m, Create QueryResponse for action: QUERY_ACTION_EXECUTE with SUCCESS status 2025-05-07T08:56:55.441063Z node 10 :KQP_SESSION INFO: kqp_session_actor.cpp:2473: SessionId: ydb://session/3?node_id=10&id=MjcwNjdkZmEtZTk2NmQ5NjMtMjhiNzc3YTMtMzNmYTAyNDc=, ActorId: [10:7501625164450396355:2549], ActorState: ExecuteState, TraceId: 01jtmzcnbe04eaxky9n7905x0m, Cleanup start, isFinal: 0 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-05-07T08:56:55.441105Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2534: SessionId: ydb://session/3?node_id=10&id=MjcwNjdkZmEtZTk2NmQ5NjMtMjhiNzc3YTMtMzNmYTAyNDc=, ActorId: [10:7501625164450396355:2549], ActorState: ExecuteState, TraceId: 01jtmzcnbe04eaxky9n7905x0m, EndCleanup, isFinal: 0 2025-05-07T08:56:55.441176Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2270: SessionId: ydb://session/3?node_id=10&id=MjcwNjdkZmEtZTk2NmQ5NjMtMjhiNzc3YTMtMzNmYTAyNDc=, ActorId: [10:7501625164450396355:2549], ActorState: ExecuteState, TraceId: 01jtmzcnbe04eaxky9n7905x0m, Sent query response back to proxy, proxyRequestId: 28, proxyId: [10:7501625052781245327:2233] 2025-05-07T08:56:55.442587Z node 10 :KQP_WORKLOAD_SERVICE DEBUG: query_actor.cpp:240: [TQueryBase] [TRefreshPoolStateQuery] TraceId: sample_pool_id, RequestDatabase: /Root, RequestSessionId: , State: Update lease, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=10&id=MjcwNjdkZmEtZTk2NmQ5NjMtMjhiNzc3YTMtMzNmYTAyNDc=, TxId: 2025-05-07T08:56:55.442732Z node 10 :KQP_WORKLOAD_SERVICE DEBUG: query_actor.cpp:197: [TQueryBase] [TRefreshPoolStateQuery] TraceId: sample_pool_id, RequestDatabase: /Root, RequestSessionId: , State: Update lease, RunDataQuery: -- TRefreshPoolStateQuery::OnLeaseUpdated DECLARE $database_id AS Text; DECLARE $pool_id AS Text; SELECT COUNT(*) AS delayed_requests FROM `.metadata/workload_manager/delayed_requests` WHERE database = $database_id AND pool_id = $pool_id AND (wait_deadline IS NULL OR wait_deadline >= CurrentUtcTimestamp()) AND lease_deadline >= CurrentUtcTimestamp(); SELECT COUNT(*) AS running_requests FROM `.metadata/workload_manager/running_requests` WHERE database = $database_id AND pool_id = $pool_id AND lease_deadline >= CurrentUtcTimestamp(); 2025-05-07T08:56:55.443931Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=10&id=MjcwNjdkZmEtZTk2NmQ5NjMtMjhiNzc3YTMtMzNmYTAyNDc=, ActorId: [10:7501625164450396355:2549], ActorState: ReadyState, TraceId: 01jtmzcnck9bymyvhnkgcq5vdk, received request, proxyRequestId: 29 prepared: 0 tx_control: 1 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_DML text: -- TRefreshPoolStateQuery::OnLeaseUpdated DECLARE $database_id AS Text; DECLARE $pool_id AS Text; SELECT COUNT(*) AS delayed_requests FROM `.metadata/workload_manager/delayed_requests` WHERE database = $database_id AND pool_id = $pool_id AND (wait_deadline IS NULL OR wait_deadline >= CurrentUtcTimestamp()) AND lease_deadline >= CurrentUtcTimestamp(); SELECT COUNT(*) AS running_requests FROM `.metadata/workload_manager/running_requests` WHERE database = $database_id AND pool_id = $pool_id AND lease_deadline >= CurrentUtcTimestamp(); rpcActor: [10:7501625164450396382:2555] database: /Root databaseId: /Root pool id: default 2025-05-07T08:56:55.443970Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:264: SessionId: ydb://session/3?node_id=10&id=MjcwNjdkZmEtZTk2NmQ5NjMtMjhiNzc3YTMtMzNmYTAyNDc=, ActorId: [10:7501625164450396355:2549], ActorState: ReadyState, TraceId: 01jtmzcnck9bymyvhnkgcq5vdk, request placed into pool from cache: default 2025-05-07T08:56:55.445160Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1298: SessionId: ydb://session/3?node_id=10&id=MjcwNjdkZmEtZTk2NmQ5NjMtMjhiNzc3YTMtMzNmYTAyNDc=, ActorId: [10:7501625164450396355:2549], ActorState: ExecuteState, TraceId: 01jtmzcnck9bymyvhnkgcq5vdk, ExecutePhyTx, tx: 0x000050C0002DD318 literal: 0 commit: 0 txCtx.DeferredEffects.size(): 0 2025-05-07T08:56:55.445250Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1449: SessionId: ydb://session/3?node_id=10&id=MjcwNjdkZmEtZTk2NmQ5NjMtMjhiNzc3YTMtMzNmYTAyNDc=, ActorId: [10:7501625164450396355:2549], ActorState: ExecuteState, TraceId: 01jtmzcnck9bymyvhnkgcq5vdk, Sending to Executer TraceId: 0 8 2025-05-07T08:56:55.445341Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1507: SessionId: ydb://session/3?node_id=10&id=MjcwNjdkZmEtZTk2NmQ5NjMtMjhiNzc3YTMtMzNmYTAyNDc=, ActorId: [10:7501625164450396355:2549], ActorState: ExecuteState, TraceId: 01jtmzcnck9bymyvhnkgcq5vdk, Created new KQP executer: [10:7501625164450396387:2549] isRollback: 0 2025-05-07T08:56:55.465026Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1699: SessionId: ydb://session/3?node_id=10&id=MjcwNjdkZmEtZTk2NmQ5NjMtMjhiNzc3YTMtMzNmYTAyNDc=, ActorId: [10:7501625164450396355:2549], ActorState: ExecuteState, TraceId: 01jtmzcnck9bymyvhnkgcq5vdk, TEvTxResponse, CurrentTx: 1/2 response.status: SUCCESS 2025-05-07T08:56:55.465138Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1298: SessionId: ydb://session/3?node_id=10&id=MjcwNjdkZmEtZTk2NmQ5NjMtMjhiNzc3YTMtMzNmYTAyNDc=, ActorId: [10:7501625164450396355:2549], ActorState: ExecuteState, TraceId: 01jtmzcnck9bymyvhnkgcq5vdk, ExecutePhyTx, tx: 0x000050C000294298 literal: 1 commit: 1 txCtx.DeferredEffects.size(): 0 2025-05-07T08:56:55.468732Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1699: SessionId: ydb://session/3?node_id=10&id=MjcwNjdkZmEtZTk2NmQ5NjMtMjhiNzc3YTMtMzNmYTAyNDc=, ActorId: [10:7501625164450396355:2549], ActorState: ExecuteState, TraceId: 01jtmzcnck9bymyvhnkgcq5vdk, TEvTxResponse, CurrentTx: 2/2 response.status: SUCCESS 2025-05-07T08:56:55.468961Z node 10 :KQP_SESSION INFO: kqp_session_actor.cpp:1958: SessionId: ydb://session/3?node_id=10&id=MjcwNjdkZmEtZTk2NmQ5NjMtMjhiNzc3YTMtMzNmYTAyNDc=, ActorId: [10:7501625164450396355:2549], ActorState: ExecuteState, TraceId: 01jtmzcnck9bymyvhnkgcq5vdk, txInfo Status: Committed Kind: ReadOnly TotalDuration: 23.941 ServerDuration: 23.812 QueriesCount: 2 2025-05-07T08:56:55.469102Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2113: SessionId: ydb://session/3?node_id=10&id=MjcwNjdkZmEtZTk2NmQ5NjMtMjhiNzc3YTMtMzNmYTAyNDc=, ActorId: [10:7501625164450396355:2549], ActorState: ExecuteState, TraceId: 01jtmzcnck9bymyvhnkgcq5vdk, Create QueryResponse for action: QUERY_ACTION_EXECUTE with SUCCESS status 2025-05-07T08:56:55.469172Z node 10 :KQP_SESSION INFO: kqp_session_actor.cpp:2473: SessionId: ydb://session/3?node_id=10&id=MjcwNjdkZmEtZTk2NmQ5NjMtMjhiNzc3YTMtMzNmYTAyNDc=, ActorId: [10:7501625164450396355:2549], ActorState: ExecuteState, TraceId: 01jtmzcnck9bymyvhnkgcq5vdk, Cleanup start, isFinal: 0 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-05-07T08:56:55.469197Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2534: SessionId: ydb://session/3?node_id=10&id=MjcwNjdkZmEtZTk2NmQ5NjMtMjhiNzc3YTMtMzNmYTAyNDc=, ActorId: [10:7501625164450396355:2549], ActorState: ExecuteState, TraceId: 01jtmzcnck9bymyvhnkgcq5vdk, EndCleanup, isFinal: 0 2025-05-07T08:56:55.469254Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2270: SessionId: ydb://session/3?node_id=10&id=MjcwNjdkZmEtZTk2NmQ5NjMtMjhiNzc3YTMtMzNmYTAyNDc=, ActorId: [10:7501625164450396355:2549], ActorState: ExecuteState, TraceId: 01jtmzcnck9bymyvhnkgcq5vdk, Sent query response back to proxy, proxyRequestId: 29, proxyId: [10:7501625052781245327:2233] 2025-05-07T08:56:55.470122Z node 10 :KQP_WORKLOAD_SERVICE DEBUG: query_actor.cpp:240: [TQueryBase] [TRefreshPoolStateQuery] TraceId: sample_pool_id, RequestDatabase: /Root, RequestSessionId: , State: Describe pool, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=10&id=MjcwNjdkZmEtZTk2NmQ5NjMtMjhiNzc3YTMtMzNmYTAyNDc=, TxId: 2025-05-07T08:56:55.470279Z node 10 :KQP_WORKLOAD_SERVICE DEBUG: query_actor.cpp:367: [TQueryBase] [TRefreshPoolStateQuery] TraceId: sample_pool_id, RequestDatabase: /Root, RequestSessionId: , State: Describe pool, Finish with SUCCESS, SessionId: ydb://session/3?node_id=10&id=MjcwNjdkZmEtZTk2NmQ5NjMtMjhiNzc3YTMtMzNmYTAyNDc=, TxId: 2025-05-07T08:56:55.470475Z node 10 :KQP_SESSION INFO: kqp_session_actor.cpp:2315: SessionId: ydb://session/3?node_id=10&id=MjcwNjdkZmEtZTk2NmQ5NjMtMjhiNzc3YTMtMzNmYTAyNDc=, ActorId: [10:7501625164450396355:2549], ActorState: ReadyState, Session closed due to explicit close event 2025-05-07T08:56:55.470513Z node 10 :KQP_SESSION INFO: kqp_session_actor.cpp:2473: SessionId: ydb://session/3?node_id=10&id=MjcwNjdkZmEtZTk2NmQ5NjMtMjhiNzc3YTMtMzNmYTAyNDc=, ActorId: [10:7501625164450396355:2549], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-05-07T08:56:55.470540Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2534: SessionId: ydb://session/3?node_id=10&id=MjcwNjdkZmEtZTk2NmQ5NjMtMjhiNzc3YTMtMzNmYTAyNDc=, ActorId: [10:7501625164450396355:2549], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-05-07T08:56:55.470570Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2546: SessionId: ydb://session/3?node_id=10&id=MjcwNjdkZmEtZTk2NmQ5NjMtMjhiNzc3YTMtMzNmYTAyNDc=, ActorId: [10:7501625164450396355:2549], ActorState: unknown state, Cleanup temp tables: 0 2025-05-07T08:56:55.470649Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2637: SessionId: ydb://session/3?node_id=10&id=MjcwNjdkZmEtZTk2NmQ5NjMtMjhiNzc3YTMtMzNmYTAyNDc=, ActorId: [10:7501625164450396355:2549], ActorState: unknown state, Session actor destroyed 2025-05-07T08:56:55.487303Z node 10 :KQP_SESSION INFO: kqp_session_actor.cpp:2315: SessionId: ydb://session/3?node_id=10&id=YzFmZjNiNDAtODQ5NzUzMzAtNmNhMjE5NS0yZWJiMWQ4OQ==, ActorId: [10:7501625078551049569:2332], ActorState: ReadyState, Session closed due to explicit close event 2025-05-07T08:56:55.487367Z node 10 :KQP_SESSION INFO: kqp_session_actor.cpp:2473: SessionId: ydb://session/3?node_id=10&id=YzFmZjNiNDAtODQ5NzUzMzAtNmNhMjE5NS0yZWJiMWQ4OQ==, ActorId: [10:7501625078551049569:2332], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-05-07T08:56:55.487406Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2534: SessionId: ydb://session/3?node_id=10&id=YzFmZjNiNDAtODQ5NzUzMzAtNmNhMjE5NS0yZWJiMWQ4OQ==, ActorId: [10:7501625078551049569:2332], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-05-07T08:56:55.487442Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2546: SessionId: ydb://session/3?node_id=10&id=YzFmZjNiNDAtODQ5NzUzMzAtNmNhMjE5NS0yZWJiMWQ4OQ==, ActorId: [10:7501625078551049569:2332], ActorState: unknown state, Cleanup temp tables: 0 2025-05-07T08:56:55.487552Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2637: SessionId: ydb://session/3?node_id=10&id=YzFmZjNiNDAtODQ5NzUzMzAtNmNhMjE5NS0yZWJiMWQ4OQ==, ActorId: [10:7501625078551049569:2332], ActorState: unknown state, Session actor destroyed |90.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data/unittest |90.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/library/table_creator/ut/unittest >> TableCreator::CreateTables [GOOD] Test command err: 2025-05-07T08:56:53.327625Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625157426342603:2138];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:56:53.327671Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003ae1/r3tmp/tmpgNAPxA/pdisk_1.dat 2025-05-07T08:56:53.915327Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:56:53.924210Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:56:53.924330Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:56:53.928643Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:18301 TServer::EnableGrpc on GrpcPort 64576, node 1 2025-05-07T08:56:54.338691Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:56:54.338731Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:56:54.338740Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:56:54.338884Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-05-07T08:56:54.510319Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:56:54.548156Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:2, at schemeshard: 72057594046644480 2025-05-07T08:56:54.550701Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:1, at schemeshard: 72057594046644480 >> BackupRestoreS3::TestAllPrimitiveTypes-UTF8 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-YSON |90.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data/unittest >> SystemView::TabletsFollowers [GOOD] >> SystemView::TabletsRanges >> KqpUserConstraint::KqpReadNull-UploadNull [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-INT16 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-INT32 |90.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data/unittest >> KqpProxy::InvalidSessionID >> TableCreation::SimpleTableCreation >> TableCreation::ConcurrentTableCreation >> BackupRestore::TestAllPrimitiveTypes-UINT32 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-UINT64 >> BackupRestoreS3::TestAllPrimitiveTypes-UINT32 [GOOD] |90.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/syncer/ut/ydb-core-blobstorage-vdisk-syncer-ut |90.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/vdisk/syncer/ut/ydb-core-blobstorage-vdisk-syncer-ut |90.7%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/syncer/ut/ydb-core-blobstorage-vdisk-syncer-ut >> BackupRestoreS3::TestAllPrimitiveTypes-UINT64 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data/unittest >> KqpUserConstraint::KqpReadNull-UploadNull [GOOD] Test command err: 2025-05-07T08:56:58.049026Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:56:58.049197Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:56:58.049528Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004878/r3tmp/tmppw3gIN/pdisk_1.dat 2025-05-07T08:56:58.471516Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:56:58.511573Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:56:58.568603Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:56:58.568700Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:56:58.582759Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:56:58.667166Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:56:59.185508Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:853:2701], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:59.185626Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:864:2706], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:59.185707Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:59.191594Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T08:56:59.364644Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:867:2709], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T08:56:59.489349Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:936:2747] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:56:59.900510Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715660. Ctx: { TraceId: 01jtmzcs1fansegjwjetcvke23, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmI5ODYzYTQtNWNiZTAyM2EtNGQxZmE4ODctNjdiNmFlYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionInGranule_PKInt64_Reboot [GOOD] Test command err: 2025-05-07T08:50:03.753535Z node 1 :BLOB_CACHE NOTICE: ctor_logger.h:56: MaxCacheDataSize: 20971520 InFlightDataSize: 0 2025-05-07T08:50:03.865101Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-05-07T08:50:03.883785Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-05-07T08:50:03.884041Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-05-07T08:50:03.891140Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-05-07T08:50:03.891325Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-05-07T08:50:03.891547Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-05-07T08:50:03.891647Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-05-07T08:50:03.891731Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-05-07T08:50:03.891813Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-05-07T08:50:03.891911Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-05-07T08:50:03.891978Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-05-07T08:50:03.892077Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-05-07T08:50:03.892188Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-05-07T08:50:03.892272Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-05-07T08:50:03.892368Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-05-07T08:50:03.924675Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-05-07T08:50:03.924820Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:137;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=11;current_normalizer=CLASS_NAME=Granules; 2025-05-07T08:50:03.924859Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-05-07T08:50:03.925035Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-05-07T08:50:03.925245Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-05-07T08:50:03.925335Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-05-07T08:50:03.925398Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-05-07T08:50:03.925518Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-05-07T08:50:03.925588Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-05-07T08:50:03.925669Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-05-07T08:50:03.925706Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-05-07T08:50:03.925886Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-05-07T08:50:03.926003Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-05-07T08:50:03.926044Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-05-07T08:50:03.926069Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-05-07T08:50:03.926153Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-05-07T08:50:03.926220Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-05-07T08:50:03.926277Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanInsertionDedup;id=CleanInsertionDedup; 2025-05-07T08:50:03.926317Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=8;type=CleanInsertionDedup; 2025-05-07T08:50:03.926387Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanInsertionDedup;id=8; 2025-05-07T08:50:03.926429Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-05-07T08:50:03.926456Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-05-07T08:50:03.926532Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-05-07T08:50:03.926593Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-05-07T08:50:03.926634Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-05-07T08:50:03.926889Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-05-07T08:50:03.926963Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-05-07T08:50:03.927014Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-05-07T08:50:03.927226Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-05-07T08:50:03.927269Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-05-07T08:50:03.927298Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-05-07T08:50:03.927435Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-05-07T08:50:03.927481Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-05-07T08:50:03.927522Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-05-07T08:50:03.927633Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-05-07T08:50:03.927709Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-05-07T08:50:03.927786Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-05-07T08:50:03.927827Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-05-07T08:50:03.928298Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=59; 2025-05-07T08:50:03.928399Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=44; ... ange:[NO_BLOB:0:2696];;column_id:8;chunk_idx:40;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:41;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:42;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:43;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:44;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:45;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:46;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:47;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:48;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:49;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:50;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:51;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:52;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:53;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:54;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:55;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:56;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:57;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:58;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:59;blob_range:[NO_BLOB:0:2688];;column_id:8;chunk_idx:60;blob_range:[NO_BLOB:0:2688];;column_id:8;chunk_idx:61;blob_range:[NO_BLOB:0:2688];;column_id:8;chunk_idx:62;blob_range:[NO_BLOB:0:2688];;column_id:8;chunk_idx:63;blob_range:[NO_BLOB:0:2688];;column_id:8;chunk_idx:64;blob_range:[NO_BLOB:0:2688];;column_id:8;chunk_idx:65;blob_range:[NO_BLOB:0:2688];;column_id:8;chunk_idx:66;blob_range:[NO_BLOB:0:2688];;column_id:8;chunk_idx:67;blob_range:[NO_BLOB:0:2688];;column_id:8;chunk_idx:68;blob_range:[NO_BLOB:0:2680];;column_id:8;chunk_idx:69;blob_range:[NO_BLOB:0:2680];;column_id:8;chunk_idx:70;blob_range:[NO_BLOB:0:2680];;column_id:8;chunk_idx:71;blob_range:[NO_BLOB:0:2672];;column_id:8;chunk_idx:72;blob_range:[NO_BLOB:0:2664];;column_id:8;chunk_idx:73;blob_range:[NO_BLOB:0:8448];;column_id:9;chunk_idx:0;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:1;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:2;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:3;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:4;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:5;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:6;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:7;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:8;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:9;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:10;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:11;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:12;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:13;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:14;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:15;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:16;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:17;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:18;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:19;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:20;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:21;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:22;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:23;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:24;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:25;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:26;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:27;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:28;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:29;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:30;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:31;blob_range:[NO_BLOB:0:2680];;column_id:9;chunk_idx:32;blob_range:[NO_BLOB:0:2680];;column_id:9;chunk_idx:33;blob_range:[NO_BLOB:0:2680];;column_id:9;chunk_idx:34;blob_range:[NO_BLOB:0:2672];;column_id:9;chunk_idx:35;blob_range:[NO_BLOB:0:2664];;column_id:9;chunk_idx:36;blob_range:[NO_BLOB:0:8464];;column_id:9;chunk_idx:37;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:38;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:39;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:40;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:41;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:42;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:43;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:44;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:45;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:46;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:47;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:48;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:49;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:50;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:51;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:52;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:53;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:54;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:55;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:56;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:57;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:58;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:59;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:60;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:61;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:62;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:63;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:64;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:65;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:66;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:67;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:68;blob_range:[NO_BLOB:0:2680];;column_id:9;chunk_idx:69;blob_range:[NO_BLOB:0:2680];;column_id:9;chunk_idx:70;blob_range:[NO_BLOB:0:2680];;column_id:9;chunk_idx:71;blob_range:[NO_BLOB:0:2672];;column_id:9;chunk_idx:72;blob_range:[NO_BLOB:0:2664];;column_id:9;chunk_idx:73;blob_range:[NO_BLOB:0:8448];;column_id:7;chunk_idx:0;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:1;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:2;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:3;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:4;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:5;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:6;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:7;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:8;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:9;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:10;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:11;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:12;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:13;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:14;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:15;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:16;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:17;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:18;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:19;blob_range:[NO_BLOB:0:2752];;column_id:7;chunk_idx:20;blob_range:[NO_BLOB:0:2752];;column_id:7;chunk_idx:21;blob_range:[NO_BLOB:0:2752];;column_id:7;chunk_idx:22;blob_range:[NO_BLOB:0:2752];;column_id:7;chunk_idx:23;blob_range:[NO_BLOB:0:2752];;column_id:7;chunk_idx:24;blob_range:[NO_BLOB:0:2744];;column_id:7;chunk_idx:25;blob_range:[NO_BLOB:0:2744];;column_id:7;chunk_idx:26;blob_range:[NO_BLOB:0:9040];;column_id:7;chunk_idx:27;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:28;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:29;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:30;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:31;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:32;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:33;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:34;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:35;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:36;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:37;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:38;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:39;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:40;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:41;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:42;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:43;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:44;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:45;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:46;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:47;blob_range:[NO_BLOB:0:2752];;column_id:7;chunk_idx:48;blob_range:[NO_BLOB:0:2752];;column_id:7;chunk_idx:49;blob_range:[NO_BLOB:0:2752];;column_id:7;chunk_idx:50;blob_range:[NO_BLOB:0:2752];;column_id:7;chunk_idx:51;blob_range:[NO_BLOB:0:2744];;column_id:7;chunk_idx:52;blob_range:[NO_BLOB:0:2744];;column_id:7;chunk_idx:53;blob_range:[NO_BLOB:0:9024];;column_id:5;chunk_idx:0;blob_range:[NO_BLOB:0:2696];;column_id:5;chunk_idx:1;blob_range:[NO_BLOB:0:2696];;column_id:5;chunk_idx:2;blob_range:[NO_BLOB:0:2696];;column_id:5;chunk_idx:3;blob_range:[NO_BLOB:0:2696];;column_id:5;chunk_idx:4;blob_range:[NO_BLOB:0:2696];;column_id:5;chunk_idx:5;blob_range:[NO_BLOB:0:2696];;column_id:5;chunk_idx:6;blob_range:[NO_BLOB:0:2696];;column_id:5;chunk_idx:7;blob_range:[NO_BLOB:0:2696];;column_id:5;chunk_idx:8;blob_range:[NO_BLOB:0:2688];;column_id:5;chunk_idx:9;blob_range:[NO_BLOB:0:2688];;column_id:5;chunk_idx:10;blob_range:[NO_BLOB:0:2688];;column_id:5;chunk_idx:11;blob_range:[NO_BLOB:0:2688];;column_id:5;chunk_idx:12;blob_range:[NO_BLOB:0:2688];;column_id:5;chunk_idx:13;blob_range:[NO_BLOB:0:2680];;column_id:5;chunk_idx:14;blob_range:[NO_BLOB:0:2680];;column_id:5;chunk_idx:15;blob_range:[NO_BLOB:0:2672];;column_id:5;chunk_idx:16;blob_range:[NO_BLOB:0:9456];;column_id:5;chunk_idx:17;blob_range:[NO_BLOB:0:2696];;column_id:5;chunk_idx:18;blob_range:[NO_BLOB:0:2696];;column_id:5;chunk_idx:19;blob_range:[NO_BLOB:0:2696];;column_id:5;chunk_idx:20;blob_range:[NO_BLOB:0:2696];;column_id:5;chunk_idx:21;blob_range:[NO_BLOB:0:2696];;column_id:5;chunk_idx:22;blob_range:[NO_BLOB:0:2696];;column_id:5;chunk_idx:23;blob_range:[NO_BLOB:0:2696];;column_id:5;chunk_idx:24;blob_range:[NO_BLOB:0:2696];;column_id:5;chunk_idx:25;blob_range:[NO_BLOB:0:2688];;column_id:5;chunk_idx:26;blob_range:[NO_BLOB:0:2688];;column_id:5;chunk_idx:27;blob_range:[NO_BLOB:0:2688];;column_id:5;chunk_idx:28;blob_range:[NO_BLOB:0:2688];;column_id:5;chunk_idx:29;blob_range:[NO_BLOB:0:2688];;column_id:5;chunk_idx:30;blob_range:[NO_BLOB:0:2680];;column_id:5;chunk_idx:31;blob_range:[NO_BLOB:0:2680];;column_id:5;chunk_idx:32;blob_range:[NO_BLOB:0:2672];;column_id:5;chunk_idx:33;blob_range:[NO_BLOB:0:9448];;;;switched=(portion_id:61;path_id:1;records_count:25002;schema_version:1;level:0;;column_size:2586528;index_size:0;meta:((produced=SPLIT_COMPACTED;)););(portion_id:57;path_id:1;records_count:25002;schema_version:1;level:0;;column_size:2167032;index_size:0;meta:((produced=INSERTED;)););; 2025-05-07T08:56:57.366428Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: event_type=NKikimr::NBlobCache::TEvBlobCache::TEvReadBlobRangeResult;tablet_id=9437184;parent_id=[1:11069:12696];fline=general_compaction.cpp:135;event=blobs_created;appended=1;switched=2; 2025-05-07T08:56:57.369311Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:11069:12696];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:50;event=TEvWriteIndex;count=1; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=write_controller.h:65;event=IWriteController aborted;reason=TTxWriteDraft aborted before complete; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=compacted_blob_constructor.cpp:47;event=TCompactedWriteController::DoAbort;reason=TTxWriteDraft aborted before complete; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TCompactedWriteController destructed with WriteIndexEv and WriteIndexEv->IndexChanges;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionInGranule_PKString_Reboot [GOOD] Test command err: 2025-05-07T08:50:08.515747Z node 1 :BLOB_CACHE NOTICE: ctor_logger.h:56: MaxCacheDataSize: 20971520 InFlightDataSize: 0 2025-05-07T08:50:08.630228Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-05-07T08:50:08.651786Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-05-07T08:50:08.652028Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-05-07T08:50:08.660321Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-05-07T08:50:08.660526Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-05-07T08:50:08.660738Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-05-07T08:50:08.660846Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-05-07T08:50:08.660927Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-05-07T08:50:08.661027Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-05-07T08:50:08.661106Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-05-07T08:50:08.661172Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-05-07T08:50:08.661246Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-05-07T08:50:08.661344Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-05-07T08:50:08.661416Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-05-07T08:50:08.661501Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-05-07T08:50:08.693549Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-05-07T08:50:08.693684Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:137;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=11;current_normalizer=CLASS_NAME=Granules; 2025-05-07T08:50:08.693726Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-05-07T08:50:08.693924Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-05-07T08:50:08.694108Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-05-07T08:50:08.694185Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-05-07T08:50:08.694233Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-05-07T08:50:08.694317Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-05-07T08:50:08.694362Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-05-07T08:50:08.694399Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-05-07T08:50:08.694423Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-05-07T08:50:08.694558Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-05-07T08:50:08.694613Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-05-07T08:50:08.694642Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-05-07T08:50:08.694663Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-05-07T08:50:08.694738Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-05-07T08:50:08.694793Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-05-07T08:50:08.694879Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanInsertionDedup;id=CleanInsertionDedup; 2025-05-07T08:50:08.694920Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=8;type=CleanInsertionDedup; 2025-05-07T08:50:08.694990Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanInsertionDedup;id=8; 2025-05-07T08:50:08.695031Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-05-07T08:50:08.695050Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-05-07T08:50:08.695119Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-05-07T08:50:08.695152Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-05-07T08:50:08.695174Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-05-07T08:50:08.695340Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-05-07T08:50:08.695385Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-05-07T08:50:08.695412Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-05-07T08:50:08.695558Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-05-07T08:50:08.695592Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-05-07T08:50:08.695613Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-05-07T08:50:08.695712Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-05-07T08:50:08.695742Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-05-07T08:50:08.695775Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-05-07T08:50:08.695854Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-05-07T08:50:08.695907Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-05-07T08:50:08.695934Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-05-07T08:50:08.695953Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-05-07T08:50:08.696276Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=34; 2025-05-07T08:50:08.696363Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=42; ... ge:[NO_BLOB:0:2688];;column_id:8;chunk_idx:26;blob_range:[NO_BLOB:0:2688];;column_id:8;chunk_idx:27;blob_range:[NO_BLOB:0:2688];;column_id:8;chunk_idx:28;blob_range:[NO_BLOB:0:2688];;column_id:8;chunk_idx:29;blob_range:[NO_BLOB:0:2680];;column_id:8;chunk_idx:30;blob_range:[NO_BLOB:0:2680];;column_id:8;chunk_idx:31;blob_range:[NO_BLOB:0:2680];;column_id:8;chunk_idx:32;blob_range:[NO_BLOB:0:2672];;column_id:8;chunk_idx:33;blob_range:[NO_BLOB:0:2664];;column_id:8;chunk_idx:34;blob_range:[NO_BLOB:0:8352];;column_id:8;chunk_idx:35;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:36;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:37;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:38;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:39;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:40;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:41;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:42;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:43;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:44;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:45;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:46;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:47;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:48;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:49;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:50;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:51;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:52;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:53;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:54;blob_range:[NO_BLOB:0:2696];;column_id:8;chunk_idx:55;blob_range:[NO_BLOB:0:2688];;column_id:8;chunk_idx:56;blob_range:[NO_BLOB:0:2688];;column_id:8;chunk_idx:57;blob_range:[NO_BLOB:0:2688];;column_id:8;chunk_idx:58;blob_range:[NO_BLOB:0:2688];;column_id:8;chunk_idx:59;blob_range:[NO_BLOB:0:2688];;column_id:8;chunk_idx:60;blob_range:[NO_BLOB:0:2688];;column_id:8;chunk_idx:61;blob_range:[NO_BLOB:0:2688];;column_id:8;chunk_idx:62;blob_range:[NO_BLOB:0:2688];;column_id:8;chunk_idx:63;blob_range:[NO_BLOB:0:2688];;column_id:8;chunk_idx:64;blob_range:[NO_BLOB:0:2680];;column_id:8;chunk_idx:65;blob_range:[NO_BLOB:0:2680];;column_id:8;chunk_idx:66;blob_range:[NO_BLOB:0:2680];;column_id:8;chunk_idx:67;blob_range:[NO_BLOB:0:2672];;column_id:8;chunk_idx:68;blob_range:[NO_BLOB:0:2664];;column_id:8;chunk_idx:69;blob_range:[NO_BLOB:0:8336];;column_id:9;chunk_idx:0;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:1;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:2;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:3;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:4;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:5;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:6;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:7;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:8;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:9;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:10;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:11;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:12;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:13;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:14;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:15;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:16;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:17;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:18;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:19;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:20;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:21;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:22;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:23;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:24;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:25;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:26;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:27;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:28;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:29;blob_range:[NO_BLOB:0:2680];;column_id:9;chunk_idx:30;blob_range:[NO_BLOB:0:2680];;column_id:9;chunk_idx:31;blob_range:[NO_BLOB:0:2680];;column_id:9;chunk_idx:32;blob_range:[NO_BLOB:0:2672];;column_id:9;chunk_idx:33;blob_range:[NO_BLOB:0:2664];;column_id:9;chunk_idx:34;blob_range:[NO_BLOB:0:8352];;column_id:9;chunk_idx:35;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:36;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:37;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:38;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:39;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:40;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:41;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:42;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:43;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:44;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:45;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:46;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:47;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:48;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:49;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:50;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:51;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:52;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:53;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:54;blob_range:[NO_BLOB:0:2696];;column_id:9;chunk_idx:55;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:56;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:57;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:58;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:59;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:60;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:61;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:62;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:63;blob_range:[NO_BLOB:0:2688];;column_id:9;chunk_idx:64;blob_range:[NO_BLOB:0:2680];;column_id:9;chunk_idx:65;blob_range:[NO_BLOB:0:2680];;column_id:9;chunk_idx:66;blob_range:[NO_BLOB:0:2680];;column_id:9;chunk_idx:67;blob_range:[NO_BLOB:0:2672];;column_id:9;chunk_idx:68;blob_range:[NO_BLOB:0:2664];;column_id:9;chunk_idx:69;blob_range:[NO_BLOB:0:8336];;column_id:7;chunk_idx:0;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:1;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:2;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:3;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:4;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:5;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:6;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:7;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:8;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:9;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:10;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:11;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:12;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:13;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:14;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:15;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:16;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:17;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:18;blob_range:[NO_BLOB:0:2752];;column_id:7;chunk_idx:19;blob_range:[NO_BLOB:0:2752];;column_id:7;chunk_idx:20;blob_range:[NO_BLOB:0:2752];;column_id:7;chunk_idx:21;blob_range:[NO_BLOB:0:2752];;column_id:7;chunk_idx:22;blob_range:[NO_BLOB:0:2752];;column_id:7;chunk_idx:23;blob_range:[NO_BLOB:0:2744];;column_id:7;chunk_idx:24;blob_range:[NO_BLOB:0:10208];;column_id:7;chunk_idx:25;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:26;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:27;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:28;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:29;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:30;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:31;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:32;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:33;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:34;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:35;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:36;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:37;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:38;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:39;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:40;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:41;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:42;blob_range:[NO_BLOB:0:2760];;column_id:7;chunk_idx:43;blob_range:[NO_BLOB:0:2752];;column_id:7;chunk_idx:44;blob_range:[NO_BLOB:0:2752];;column_id:7;chunk_idx:45;blob_range:[NO_BLOB:0:2752];;column_id:7;chunk_idx:46;blob_range:[NO_BLOB:0:2752];;column_id:7;chunk_idx:47;blob_range:[NO_BLOB:0:2744];;column_id:7;chunk_idx:48;blob_range:[NO_BLOB:0:2744];;column_id:7;chunk_idx:49;blob_range:[NO_BLOB:0:10208];;column_id:5;chunk_idx:0;blob_range:[NO_BLOB:0:2696];;column_id:5;chunk_idx:1;blob_range:[NO_BLOB:0:2696];;column_id:5;chunk_idx:2;blob_range:[NO_BLOB:0:2696];;column_id:5;chunk_idx:3;blob_range:[NO_BLOB:0:2696];;column_id:5;chunk_idx:4;blob_range:[NO_BLOB:0:2696];;column_id:5;chunk_idx:5;blob_range:[NO_BLOB:0:2696];;column_id:5;chunk_idx:6;blob_range:[NO_BLOB:0:2696];;column_id:5;chunk_idx:7;blob_range:[NO_BLOB:0:2688];;column_id:5;chunk_idx:8;blob_range:[NO_BLOB:0:2688];;column_id:5;chunk_idx:9;blob_range:[NO_BLOB:0:2688];;column_id:5;chunk_idx:10;blob_range:[NO_BLOB:0:2688];;column_id:5;chunk_idx:11;blob_range:[NO_BLOB:0:2688];;column_id:5;chunk_idx:12;blob_range:[NO_BLOB:0:2680];;column_id:5;chunk_idx:13;blob_range:[NO_BLOB:0:2680];;column_id:5;chunk_idx:14;blob_range:[NO_BLOB:0:2672];;column_id:5;chunk_idx:15;blob_range:[NO_BLOB:0:9400];;column_id:5;chunk_idx:16;blob_range:[NO_BLOB:0:2696];;column_id:5;chunk_idx:17;blob_range:[NO_BLOB:0:2696];;column_id:5;chunk_idx:18;blob_range:[NO_BLOB:0:2696];;column_id:5;chunk_idx:19;blob_range:[NO_BLOB:0:2696];;column_id:5;chunk_idx:20;blob_range:[NO_BLOB:0:2696];;column_id:5;chunk_idx:21;blob_range:[NO_BLOB:0:2696];;column_id:5;chunk_idx:22;blob_range:[NO_BLOB:0:2696];;column_id:5;chunk_idx:23;blob_range:[NO_BLOB:0:2688];;column_id:5;chunk_idx:24;blob_range:[NO_BLOB:0:2688];;column_id:5;chunk_idx:25;blob_range:[NO_BLOB:0:2688];;column_id:5;chunk_idx:26;blob_range:[NO_BLOB:0:2688];;column_id:5;chunk_idx:27;blob_range:[NO_BLOB:0:2688];;column_id:5;chunk_idx:28;blob_range:[NO_BLOB:0:2680];;column_id:5;chunk_idx:29;blob_range:[NO_BLOB:0:2680];;column_id:5;chunk_idx:30;blob_range:[NO_BLOB:0:2672];;column_id:5;chunk_idx:31;blob_range:[NO_BLOB:0:9392];;;;switched=(portion_id:60;path_id:1;records_count:23698;schema_version:1;level:0;;column_size:2507632;index_size:0;meta:((produced=SPLIT_COMPACTED;)););(portion_id:56;path_id:1;records_count:23698;schema_version:1;level:0;;column_size:2109896;index_size:0;meta:((produced=INSERTED;)););; 2025-05-07T08:56:49.896804Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: event_type=NKikimr::NBlobCache::TEvBlobCache::TEvReadBlobRangeResult;tablet_id=9437184;parent_id=[1:11153:12780];fline=general_compaction.cpp:135;event=blobs_created;appended=1;switched=2; 2025-05-07T08:56:49.899149Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:11153:12780];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:50;event=TEvWriteIndex;count=1; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=write_controller.h:65;event=IWriteController aborted;reason=TTxWriteDraft aborted before complete; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=compacted_blob_constructor.cpp:47;event=TCompactedWriteController::DoAbort;reason=TTxWriteDraft aborted before complete; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TCompactedWriteController destructed with WriteIndexEv and WriteIndexEv->IndexChanges;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; >> TableCreation::MultipleTablesCreation >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeSequence [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeReplication [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeView >> ScriptExecutionsTest::RunCheckLeaseStatus >> RetryPolicy::TWriteSession_RetryOnTargetCluster [GOOD] >> RetryPolicy::TWriteSession_SwitchBackToLocalCluster >> TResourceBroker::TestResubmitTask |90.7%| [TM] {asan, default-linux-x86_64, release} ydb/library/ncloud/impl/ut/unittest >> BackupRestoreS3::RestoreIndexTableReadReplicasSettings [GOOD] >> BackupRestoreS3::RestoreTableSplitBoundaries |90.7%| [TM] {asan, default-linux-x86_64, release} ydb/library/ncloud/impl/ut/unittest >> TResourceBroker::TestResubmitTask [GOOD] >> TResourceBroker::TestUpdateCookie >> ResourcePoolClassifiersSysView::TestResourcePoolClassifiersSysViewFilters [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeExternalTable [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeExternalDataSource >> KqpProxy::InvalidSessionID [GOOD] >> KqpProxy::LoadedMetadataAfterCompilationTimeout >> TResourceBroker::TestUpdateCookie [GOOD] >> BackupRestore::TestAllPrimitiveTypes-BOOL [GOOD] >> BackupRestore::TestAllPrimitiveTypes-INT16 |90.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TResourceBroker::TestUpdateCookie [GOOD] >> TSequence::CreateSequence >> TableCreation::SimpleTableCreation [GOOD] >> TableCreation::SimpleUpdateTable |90.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/address_classification/ut/unittest >> KqpWorkloadServiceDistributed::TestDistributedLessConcurrentQueryLimit [GOOD] >> KqpWorkloadServiceSubscriptions::TestResourcePoolSubscription >> TableCreation::ConcurrentTableCreation [GOOD] >> TableCreation::ConcurrentMultipleTablesCreation ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/workload_service/ut/unittest >> ResourcePoolClassifiersSysView::TestResourcePoolClassifiersSysViewFilters [GOOD] Test command err: 2025-05-07T08:54:38.464162Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624578061025628:2062];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:38.472565Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004846/r3tmp/tmp18Ntwv/pdisk_1.dat 2025-05-07T08:54:38.990386Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:38.993250Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:38.993352Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:38.997858Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11188, node 1 2025-05-07T08:54:39.124154Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:39.124175Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:39.124189Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:39.124314Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17638 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:39.542885Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:39.570938Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:54:42.876222Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:440: [WorkloadService] [Service] Started workload service initialization 2025-05-07T08:54:42.883730Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=ZjA5YzI3OWItYWQ0MmI4ZTItODNiMGMxMDctMzc0YjkwZWM=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id ZjA5YzI3OWItYWQ0MmI4ZTItODNiMGMxMDctMzc0YjkwZWM= 2025-05-07T08:54:42.884381Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-05-07T08:54:42.884414Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:111: [WorkloadService] [Service] Resource pools was enanbled 2025-05-07T08:54:42.884456Z node 1 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:125: [WorkloadService] [Service] Updated node info, noode count: 1 2025-05-07T08:54:42.884496Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:241: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7501624595240895451:2329], Start check tables existence, number paths: 2 2025-05-07T08:54:42.914156Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=ZjA5YzI3OWItYWQ0MmI4ZTItODNiMGMxMDctMzc0YjkwZWM=, ActorId: [1:7501624595240895452:2330], ActorState: unknown state, session actor bootstrapped 2025-05-07T08:54:42.914521Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7501624595240895451:2329], Describe table /Root/.metadata/workload_manager/delayed_requests status PathErrorUnknown 2025-05-07T08:54:42.914569Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7501624595240895451:2329], Describe table /Root/.metadata/workload_manager/running_requests status PathErrorUnknown 2025-05-07T08:54:42.914600Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:289: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7501624595240895451:2329], Successfully finished 2025-05-07T08:54:42.914747Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-05-07T08:54:42.920226Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624595240895478:2300], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-05-07T08:54:42.925372Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480 2025-05-07T08:54:42.933158Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:429: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624595240895478:2300], DatabaseId: Root, PoolId: sample_pool_id, Subscribe on create pool tx: 281474976710658 2025-05-07T08:54:42.943999Z node 1 :KQP_WORKLOAD_SERVICE TRACE: scheme_actors.cpp:352: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624595240895478:2300], DatabaseId: Root, PoolId: sample_pool_id, Tablet to pipe successfully connected 2025-05-07T08:54:42.950417Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624595240895478:2300], DatabaseId: Root, PoolId: sample_pool_id, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-05-07T08:54:43.034307Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624595240895478:2300], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-05-07T08:54:43.039262Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501624599535862825:2332] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/sample_pool_id\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:54:43.039433Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:480: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624595240895478:2300], DatabaseId: Root, PoolId: sample_pool_id, Pool successfully created 2025-05-07T08:54:43.050769Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: /Root, PoolId: default 2025-05-07T08:54:43.050801Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:561: [WorkloadService] [Service] Creating new database state for id /Root 2025-05-07T08:54:43.050855Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624599535862834:2333], DatabaseId: /Root, PoolId: default, Start pool fetching 2025-05-07T08:54:43.051081Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=1&id=ZjA5YzI3OWItYWQ0MmI4ZTItODNiMGMxMDctMzc0YjkwZWM=, ActorId: [1:7501624595240895452:2330], ActorState: ReadyState, TraceId: 01jtmz8m3954cg7vpn1xt3yrst, received request, proxyRequestId: 3 prepared: 0 tx_control: 0 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_DDL text: GRANT DESCRIBE SCHEMA ON `/Root` TO `user@test`; GRANT DESCRIBE SCHEMA, SELECT ROW ON `/Root/.metadata/workload_manager/pools/sample_pool_id` TO `user@test`; rpcActor: [0:0:0] database: /Root databaseId: /Root pool id: default 2025-05-07T08:54:43.062979Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624599535862834:2333], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:43.063112Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:43.443218Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710660:0, at schemeshard: 72057594046644480 2025-05-07T08:54:43.451489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710661:0, at schemeshard: 72057594046644480 2025-05-07T08:54:43.454062Z node 1 :KQP_SESSION INFO: kqp_session_actor.cpp:2473: SessionId: ydb://session/3?node_id=1&id=ZjA5YzI3OWItYWQ0MmI4ZTItODNiMGMxMDctMzc0YjkwZWM=, ActorId: [1:7501624595240895452:2330], ActorState: ExecuteState, TraceId: 01jtmz8m3954cg7vpn1xt3yrst, Cleanup start, isFinal: 0 CleanupCtx: 1 TransactionsToBeAborted.size(): 0 WorkerId: [1:7501624599535862835:2330] WorkloadServiceCleanup: 0 2025-05-07T08:54:43.455839Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2534: SessionId: ydb://session/3?node_id=1&id=ZjA5YzI3OWItYWQ0MmI4ZTItODNiMGMxMDctMzc0YjkwZWM=, ActorId: [1:7501624595240895452:2330], ActorState: CleanupState, TraceId: 01jtmz8m3954cg7vpn1xt3yrst, EndCleanup, isFinal: 0 2025-05-07T08:54:43.455934Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2270: SessionId: ydb://session/3?node_id=1&id=ZjA5YzI3OWItYWQ0MmI4ZTItODNiMGMxMDctMzc0YjkwZWM=, ActorId: [1:7501624595240895452:2330], ActorState: CleanupState, TraceId: 01jtmz8m3954cg7vpn1xt3yrst, Sent query response back to proxy, proxyRequestId: 3, proxyId: [1:7501624578061025881:2277] 2025-05-07T08:54:43.463762Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=NDJiY2Q2ZTUtMjY4ZWM3OGMtYTc2YjJiYjktM2E5MDRiMDc=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id NDJiY2Q2ZTUtMjY4ZWM3OGMtYTc2YjJiYjktM2E5MDRiMDc= 2025-05-07T08:54:43.463983Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=NDJiY2Q2ZTUtMjY4ZWM3OGMtYTc2YjJiYjktM2E5MDRiMDc=, ActorId: [1:7501624599535862873:2336], ActorState: unknown state, session actor bootstrapped 2025-05-07T08:54:43.464098Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: /Root, PoolId: sample_pool_id 2025-05-07T08:54:43.464148Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [1: ... ESSION DEBUG: kqp_session_actor.cpp:575: SessionId: ydb://session/3?node_id=9&id=ZDY0NGQ1OS1lYjI5MWNkOC0zMmMzZGI5NS1mNDZlZTJlNg==, ActorId: [9:7501625202525100042:3015], ActorState: ExecuteState, TraceId: 01jtmzcxtr2mvzgydcf3sckq5k, Sending CompileQuery request 2025-05-07T08:57:04.105521Z node 9 :SCHEME_BOARD_SUBSCRIBER WARN: subscriber.cpp:948: [main][9:7501625120920718536:2637][/Root/test-shared/.metadata/workload_manager/classifiers/resource_pool_classifiers] Sync is done: cookie# 52, size# 3, half# 1, successes# 0, faulires# 2, partial# 1 2025-05-07T08:57:04.105587Z node 9 :SCHEME_BOARD_SUBSCRIBER WARN: subscriber.cpp:948: [main][9:7501625120920718536:2637][/Root/test-shared/.metadata/workload_manager/classifiers/resource_pool_classifiers] Sync is done: cookie# 53, size# 3, half# 1, successes# 0, faulires# 2, partial# 1 2025-05-07T08:57:04.106712Z node 9 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [9:7501625202525100045:3017], status: UNAVAILABLE, issues:
: Error: Table metadata loading, code: 1050
:1:1: Error: Failed to load metadata for table: db.[//Root/test-shared/.metadata/workload_manager/classifiers/resource_pool_classifiers]
: Error: LookupError, code: 2005 2025-05-07T08:57:04.107326Z node 9 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=9&id=ZDY0NGQ1OS1lYjI5MWNkOC0zMmMzZGI5NS1mNDZlZTJlNg==, ActorId: [9:7501625202525100042:3015], ActorState: ExecuteState, TraceId: 01jtmzcxtr2mvzgydcf3sckq5k, ReplyQueryCompileError, status UNAVAILABLE remove tx with tx_id: 2025-05-07T08:57:04.107375Z node 9 :KQP_SESSION INFO: kqp_session_actor.cpp:2473: SessionId: ydb://session/3?node_id=9&id=ZDY0NGQ1OS1lYjI5MWNkOC0zMmMzZGI5NS1mNDZlZTJlNg==, ActorId: [9:7501625202525100042:3015], ActorState: ExecuteState, TraceId: 01jtmzcxtr2mvzgydcf3sckq5k, Cleanup start, isFinal: 0 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-05-07T08:57:04.107402Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2534: SessionId: ydb://session/3?node_id=9&id=ZDY0NGQ1OS1lYjI5MWNkOC0zMmMzZGI5NS1mNDZlZTJlNg==, ActorId: [9:7501625202525100042:3015], ActorState: ExecuteState, TraceId: 01jtmzcxtr2mvzgydcf3sckq5k, EndCleanup, isFinal: 0 2025-05-07T08:57:04.107581Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2270: SessionId: ydb://session/3?node_id=9&id=ZDY0NGQ1OS1lYjI5MWNkOC0zMmMzZGI5NS1mNDZlZTJlNg==, ActorId: [9:7501625202525100042:3015], ActorState: ExecuteState, TraceId: 01jtmzcxtr2mvzgydcf3sckq5k, Sent query response back to proxy, proxyRequestId: 104, proxyId: [9:7501625090855946526:2211] 2025-05-07T08:57:04.108339Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=request_actor_cb.h:34;event=unexpected reply;response=operation { ready: true status: UNAVAILABLE issues { message: "Table metadata loading" issue_code: 1050 severity: 1 issues { position { row: 1 column: 1 } message: "Failed to load metadata for table: db.[//Root/test-shared/.metadata/workload_manager/classifiers/resource_pool_classifiers]" end_position { row: 1 column: 1 } severity: 1 issues { message: "LookupError" issue_code: 2005 severity: 1 } } } result { [type.googleapis.com/Ydb.Table.ExecuteQueryResult] { tx_meta { } } } } ; 2025-05-07T08:57:04.108658Z node 9 :METADATA_PROVIDER ERROR: log.h:466: accessor_snapshot_base.cpp:16 :cannot construct snapshot: on request failed:
: Error: Table metadata loading, code: 1050
:1:1: Error: Failed to load metadata for table: db.[//Root/test-shared/.metadata/workload_manager/classifiers/resource_pool_classifiers]
: Error: LookupError, code: 2005 2025-05-07T08:57:04.108794Z node 9 :KQP_SESSION INFO: kqp_session_actor.cpp:2315: SessionId: ydb://session/3?node_id=9&id=ZDY0NGQ1OS1lYjI5MWNkOC0zMmMzZGI5NS1mNDZlZTJlNg==, ActorId: [9:7501625202525100042:3015], ActorState: ReadyState, Session closed due to explicit close event 2025-05-07T08:57:04.108841Z node 9 :KQP_SESSION INFO: kqp_session_actor.cpp:2473: SessionId: ydb://session/3?node_id=9&id=ZDY0NGQ1OS1lYjI5MWNkOC0zMmMzZGI5NS1mNDZlZTJlNg==, ActorId: [9:7501625202525100042:3015], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-05-07T08:57:04.108878Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2534: SessionId: ydb://session/3?node_id=9&id=ZDY0NGQ1OS1lYjI5MWNkOC0zMmMzZGI5NS1mNDZlZTJlNg==, ActorId: [9:7501625202525100042:3015], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-05-07T08:57:04.108907Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2546: SessionId: ydb://session/3?node_id=9&id=ZDY0NGQ1OS1lYjI5MWNkOC0zMmMzZGI5NS1mNDZlZTJlNg==, ActorId: [9:7501625202525100042:3015], ActorState: unknown state, Cleanup temp tables: 0 2025-05-07T08:57:04.108995Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2637: SessionId: ydb://session/3?node_id=9&id=ZDY0NGQ1OS1lYjI5MWNkOC0zMmMzZGI5NS1mNDZlZTJlNg==, ActorId: [9:7501625202525100042:3015], ActorState: unknown state, Session actor destroyed 2025-05-07T08:57:04.177150Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=9&id=NGQ3YjZkYWEtZjU4ODk5YjItZDMyNDg2MjAtNjQwYjkyNA==, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id NGQ3YjZkYWEtZjU4ODk5YjItZDMyNDg2MjAtNjQwYjkyNA== 2025-05-07T08:57:04.177555Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=9&id=NGQ3YjZkYWEtZjU4ODk5YjItZDMyNDg2MjAtNjQwYjkyNA==, ActorId: [9:7501625202525100053:3021], ActorState: unknown state, session actor bootstrapped 2025-05-07T08:57:04.178403Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=9&id=NGQ3YjZkYWEtZjU4ODk5YjItZDMyNDg2MjAtNjQwYjkyNA==, ActorId: [9:7501625202525100053:3021], ActorState: ReadyState, TraceId: 01jtmzcxxj917tsk99kczwn9xd, received request, proxyRequestId: 106 prepared: 0 tx_control: 1 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_DML text: SELECT * FROM `//Root/test-shared/.metadata/workload_manager/classifiers/resource_pool_classifiers`; rpcActor: [9:7501625202525100054:3022] database: /Root/test-shared databaseId: /Root/test-shared pool id: default 2025-05-07T08:57:04.178451Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:264: SessionId: ydb://session/3?node_id=9&id=NGQ3YjZkYWEtZjU4ODk5YjItZDMyNDg2MjAtNjQwYjkyNA==, ActorId: [9:7501625202525100053:3021], ActorState: ReadyState, TraceId: 01jtmzcxxj917tsk99kczwn9xd, request placed into pool from cache: default 2025-05-07T08:57:04.178573Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:575: SessionId: ydb://session/3?node_id=9&id=NGQ3YjZkYWEtZjU4ODk5YjItZDMyNDg2MjAtNjQwYjkyNA==, ActorId: [9:7501625202525100053:3021], ActorState: ExecuteState, TraceId: 01jtmzcxxj917tsk99kczwn9xd, Sending CompileQuery request 2025-05-07T08:57:04.197143Z node 9 :SCHEME_BOARD_SUBSCRIBER WARN: subscriber.cpp:948: [main][9:7501625120920718536:2637][/Root/test-shared/.metadata/workload_manager/classifiers/resource_pool_classifiers] Sync is done: cookie# 54, size# 3, half# 1, successes# 0, faulires# 2, partial# 1 2025-05-07T08:57:04.197263Z node 9 :SCHEME_BOARD_SUBSCRIBER WARN: subscriber.cpp:948: [main][9:7501625120920718536:2637][/Root/test-shared/.metadata/workload_manager/classifiers/resource_pool_classifiers] Sync is done: cookie# 55, size# 3, half# 1, successes# 0, faulires# 2, partial# 1 2025-05-07T08:57:04.198299Z node 9 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [9:7501625202525100056:3023], status: UNAVAILABLE, issues:
: Error: Table metadata loading, code: 1050
:1:1: Error: Failed to load metadata for table: db.[//Root/test-shared/.metadata/workload_manager/classifiers/resource_pool_classifiers]
: Error: LookupError, code: 2005 2025-05-07T08:57:04.200752Z node 9 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=9&id=NGQ3YjZkYWEtZjU4ODk5YjItZDMyNDg2MjAtNjQwYjkyNA==, ActorId: [9:7501625202525100053:3021], ActorState: ExecuteState, TraceId: 01jtmzcxxj917tsk99kczwn9xd, ReplyQueryCompileError, status UNAVAILABLE remove tx with tx_id: 2025-05-07T08:57:04.200804Z node 9 :KQP_SESSION INFO: kqp_session_actor.cpp:2473: SessionId: ydb://session/3?node_id=9&id=NGQ3YjZkYWEtZjU4ODk5YjItZDMyNDg2MjAtNjQwYjkyNA==, ActorId: [9:7501625202525100053:3021], ActorState: ExecuteState, TraceId: 01jtmzcxxj917tsk99kczwn9xd, Cleanup start, isFinal: 0 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-05-07T08:57:04.200830Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2534: SessionId: ydb://session/3?node_id=9&id=NGQ3YjZkYWEtZjU4ODk5YjItZDMyNDg2MjAtNjQwYjkyNA==, ActorId: [9:7501625202525100053:3021], ActorState: ExecuteState, TraceId: 01jtmzcxxj917tsk99kczwn9xd, EndCleanup, isFinal: 0 2025-05-07T08:57:04.200963Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2270: SessionId: ydb://session/3?node_id=9&id=NGQ3YjZkYWEtZjU4ODk5YjItZDMyNDg2MjAtNjQwYjkyNA==, ActorId: [9:7501625202525100053:3021], ActorState: ExecuteState, TraceId: 01jtmzcxxj917tsk99kczwn9xd, Sent query response back to proxy, proxyRequestId: 106, proxyId: [9:7501625090855946526:2211] 2025-05-07T08:57:04.201742Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=request_actor_cb.h:34;event=unexpected reply;response=operation { ready: true status: UNAVAILABLE issues { message: "Table metadata loading" issue_code: 1050 severity: 1 issues { position { row: 1 column: 1 } message: "Failed to load metadata for table: db.[//Root/test-shared/.metadata/workload_manager/classifiers/resource_pool_classifiers]" end_position { row: 1 column: 1 } severity: 1 issues { message: "LookupError" issue_code: 2005 severity: 1 } } } result { [type.googleapis.com/Ydb.Table.ExecuteQueryResult] { tx_meta { } } } } ; 2025-05-07T08:57:04.204418Z node 9 :METADATA_PROVIDER ERROR: log.h:466: accessor_snapshot_base.cpp:16 :cannot construct snapshot: on request failed:
: Error: Table metadata loading, code: 1050
:1:1: Error: Failed to load metadata for table: db.[//Root/test-shared/.metadata/workload_manager/classifiers/resource_pool_classifiers]
: Error: LookupError, code: 2005 2025-05-07T08:57:04.204580Z node 9 :KQP_SESSION INFO: kqp_session_actor.cpp:2315: SessionId: ydb://session/3?node_id=9&id=NGQ3YjZkYWEtZjU4ODk5YjItZDMyNDg2MjAtNjQwYjkyNA==, ActorId: [9:7501625202525100053:3021], ActorState: ReadyState, Session closed due to explicit close event 2025-05-07T08:57:04.204622Z node 9 :KQP_SESSION INFO: kqp_session_actor.cpp:2473: SessionId: ydb://session/3?node_id=9&id=NGQ3YjZkYWEtZjU4ODk5YjItZDMyNDg2MjAtNjQwYjkyNA==, ActorId: [9:7501625202525100053:3021], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-05-07T08:57:04.204655Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2534: SessionId: ydb://session/3?node_id=9&id=NGQ3YjZkYWEtZjU4ODk5YjItZDMyNDg2MjAtNjQwYjkyNA==, ActorId: [9:7501625202525100053:3021], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-05-07T08:57:04.204688Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2546: SessionId: ydb://session/3?node_id=9&id=NGQ3YjZkYWEtZjU4ODk5YjItZDMyNDg2MjAtNjQwYjkyNA==, ActorId: [9:7501625202525100053:3021], ActorState: unknown state, Cleanup temp tables: 0 2025-05-07T08:57:04.204776Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2637: SessionId: ydb://session/3?node_id=9&id=NGQ3YjZkYWEtZjU4ODk5YjItZDMyNDg2MjAtNjQwYjkyNA==, ActorId: [9:7501625202525100053:3021], ActorState: unknown state, Session actor destroyed >> TSequence::CreateSequence [GOOD] >> TSequence::CreateDropRecreate >> TableCreation::MultipleTablesCreation [GOOD] >> TableCreation::CreateOldTable |90.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/address_classification/ut/unittest >> BackupRestoreS3::TestAllPrimitiveTypes-INT32 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-INT64 >> TSequence::CreateDropRecreate [GOOD] >> TSequence::CreateSequenceInsideSequenceNotAllowed >> BackupRestoreS3::TestAllPrimitiveTypes-YSON [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-UUID |90.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/address_classification/ut/unittest >> BackupRestore::TestAllPrimitiveTypes-UINT64 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-TIMESTAMP >> TPersQueueTest::ReadFromSeveralPartitionsMigrated [GOOD] >> TPersQueueTest::Init >> GroupWriteTest::ByTableName [GOOD] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Uint32-pk_types9-all_types9-index9-Uint32--] [GOOD] >> TSequence::CreateSequenceInsideSequenceNotAllowed [GOOD] >> TSequence::CreateSequenceInsideIndexTableNotAllowed |90.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_erase_rows/ydb-core-tx-datashard-ut_erase_rows |90.7%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_erase_rows/ydb-core-tx-datashard-ut_erase_rows |90.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_erase_rows/ydb-core-tx-datashard-ut_erase_rows >> TNetClassifierTest::TestInitFromFile >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_DyNumber-pk_types8-all_types8-index8-DyNumber--] [GOOD] |90.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/ext_index/ut/ydb-services-ext_index-ut |90.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/ext_index/ut/ydb-services-ext_index-ut |90.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_change_exchange/ydb-core-tx-datashard-ut_change_exchange |90.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_change_exchange/ydb-core-tx-datashard-ut_change_exchange |90.7%| [LD] {RESULT} $(B)/ydb/services/ext_index/ut/ydb-services-ext_index-ut |90.7%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_change_exchange/ydb-core-tx-datashard-ut_change_exchange |90.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/address_classification/ut/unittest >> BackupRestoreS3::TestAllPrimitiveTypes-UINT64 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-TIMESTAMP ------- [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut/unittest >> GroupWriteTest::ByTableName [GOOD] Test command err: RandomSeed# 13900442400254391745 2025-05-07T08:56:28.592445Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058428954028033 Generation# 1 is bootstrapped, going to send TEvDiscover {TabletId# 72058428954028033 MinGeneration# 1 ReadBody# false DiscoverBlockedGeneration# true ForceBlockedGeneration# 0 FromLeader# true Deadline# 18446744073709551} 2025-05-07T08:56:28.653188Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 72058428954028033 Generation# 1 recieved TEvDiscoverResult {Status# NODATA BlockedGeneration# 0 Id# [0:0:0:0:0:0:0] Size# 0 MinGeneration# 1} 2025-05-07T08:56:28.653260Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058428954028033 Generation# 1 going to send TEvBlock {TabletId# 72058428954028033 Generation# 1 Deadline# 18446744073709551 IsMonitored# 1} 2025-05-07T08:56:28.658258Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 72058428954028033 Generation# 1 recieved TEvBlockResult {Status# OK} 2025-05-07T08:56:28.677195Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058428954028033 Generation# 2 going to send TEvCollectGarbage {TabletId# 72058428954028033 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 0 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-05-07T08:56:28.680129Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 72058428954028033 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 72058428954028033 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Status# OK} 2025-05-07T08:57:09.533438Z 1 00h01m30.010512s :BS_LOAD_TEST DEBUG: Load tablet recieved PoisonPill, going to die 2025-05-07T08:57:09.533544Z 1 00h01m30.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058428954028033 Generation# 2 end working, going to send TEvCollectGarbage {TabletId# 72058428954028033 RecordGeneration# 2 PerGenerationCounter# 32 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-05-07T08:57:09.533620Z 1 00h01m30.010512s :BS_LOAD_TEST DEBUG: Load tablet recieved PoisonPill, going to die 2025-05-07T08:57:09.533660Z 1 00h01m30.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058428954028033 Generation# 2 end working, going to send TEvCollectGarbage {TabletId# 72058428954028033 RecordGeneration# 2 PerGenerationCounter# 33 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-05-07T08:57:09.601096Z 1 00h01m30.010512s :BS_LOAD_TEST INFO: TabletId# 72058428954028033 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 72058428954028033 RecordGeneration# 2 PerGenerationCounter# 32 Channel# 0 Status# OK} 2025-05-07T08:57:09.601186Z 1 00h01m30.010512s :BS_LOAD_TEST INFO: TabletId# 72058428954028033 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 72058428954028033 RecordGeneration# 2 PerGenerationCounter# 33 Channel# 0 Status# OK} >> TSequence::CreateSequenceInsideIndexTableNotAllowed [GOOD] >> TSequence::CopyTableWithSequence >> ColumnStatistics::CountMinSketchStatistics [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeView [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeResourcePool [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeTransfer [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeSysView [GOOD] >> TNetClassifierTest::TestInitFromBadlyFormattedFile ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::IcebergHadoopBasicSelectCount 2025-05-07 08:56:40,910 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-05-07 08:56:41,175 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 60 secs timeout. Process tree before termination: pid rss ref pdirt 216239 46.2M 46.2M 23.3M test_tool run_ut @/home/runner/.ya/build/build_root/zvgn/0048a5/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unittest/testing_out_stuff/chunk1/testing_out_stuff/tes 216679 1.4G 1.4G 960M └─ ydb-core-kqp-ut-federated_query-generic_ut --trace-path-append /home/runner/.ya/build/build_root/zvgn/0048a5/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unit Test command err: Trying to start YDB, gRPC: 62577, MsgBus: 1219 2025-05-07T08:55:43.906543Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624853979054270:2276];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:55:43.906633Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0048a5/r3tmp/tmpB3sbnh/pdisk_1.dat 2025-05-07T08:55:44.885323Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:55:44.963274Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:55:44.963396Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:55:44.971262Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 62577, node 1 2025-05-07T08:55:45.257325Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:55:45.257374Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:55:45.257386Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:55:45.257510Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1219 TClient is connected to server localhost:1219 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:55:46.476886Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:55:49.026123Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501624853979054270:2276];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:55:49.026254Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:55:50.379771Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624884043825778:2335], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:50.379878Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:50.659012Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:2, at schemeshard: 72057594046644480 2025-05-07T08:55:50.815848Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624884043825898:2348], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:50.815998Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:50.816310Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624884043825904:2351], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:50.820404Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:2, at schemeshard: 72057594046644480 2025-05-07T08:55:50.850520Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624884043825906:2352], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-05-07T08:55:50.910364Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501624884043825946:2403] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:55:51.848656Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:55:52.893811Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:1, at schemeshard: 72057594046644480 2025-05-07T08:55:54.091616Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480 2025-05-07T08:55:55.323199Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710678:0, at schemeshard: 72057594046644480 2025-05-07T08:55:56.626262Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710683:0, at schemeshard: 72057594046644480 2025-05-07T08:55:57.563159Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480 2025-05-07T08:55:57.613099Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480 2025-05-07T08:55:59.882168Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7261: Cannot get console configs 2025-05-07T08:55:59.882199Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:56:00.696084Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710706:0, at schemeshard: 72057594046644480 2025-05-07T08:56:00.730719Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jtmzanv430vghddxwma8jab4", SessionId: ydb://session/3?node_id=1&id=ZGNlNjA4NjQtMjliZGY3MjYtNDQzNmMyZTQtZGY1NWM0Njk=, Slow query, duration: 10.355537s, status: SUCCESS, user: UNAUTHENTICATED, results: 0b, text: "\n CREATE OBJECT external_data_source_p (TYPE SECRET) WITH (value=qwerty12345);\n\n CREATE EXTERNAL DATA SOURCE external_data_source WITH (\n SOURCE_TYPE=\"Iceberg\",\n DATABASE_NAME=\"pgdb\",\n WAREHOUSE_TYPE=\"s3\",\n WAREHOUSE_S3_REGION=\"s3_region\",\n WAREHOUSE_S3_ENDPOINT=\"s3_endpoint\",\n WAREHOUSE_S3_URI=\"s3_uri\",\n \n AUTH_METHOD=\"BASIC\",\n LOGIN=\"crab\",\n PASSWORD_SECRET_NAME=\"external_data_source_p\"\n ,\n \n CATALOG_TYPE=\"hadoop\"\n ,\n USE_TLS=\"FALSE\"\n );\n ", parameters: 0b 2025-05-07T08:56:00.790295Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710707:0, at schemeshard: 72057594046644480 2025-05-07T08:56:00.804107Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710709:0, at schemeshard: 72057594046644480 2025-05-07T08:56:00.818215Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710708:0, at schemeshard: 72057594046644480 Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatus ... fe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:56:26.659445Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:56:29.726106Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7501625054132886796:2333], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:29.726213Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:29.756521Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:2, at schemeshard: 72057594046644480 2025-05-07T08:56:29.833583Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7501625054132886914:2346], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:29.833669Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:29.834098Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7501625054132886921:2349], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:29.838493Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:2, at schemeshard: 72057594046644480 2025-05-07T08:56:29.852768Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7501625054132886923:2350], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-05-07T08:56:29.912923Z node 3 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [3:7501625054132886963:2400] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:56:30.850013Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:56:31.651547Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:1, at schemeshard: 72057594046644480 2025-05-07T08:56:32.523187Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480 2025-05-07T08:56:33.223309Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710676:0, at schemeshard: 72057594046644480 2025-05-07T08:56:33.888139Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710681:0, at schemeshard: 72057594046644480 2025-05-07T08:56:34.636214Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480 2025-05-07T08:56:34.733479Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480 2025-05-07T08:56:37.683068Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710702:0, at schemeshard: 72057594046644480 Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "col1" type { type_id: UINT16 } } columns { name: "col2" type { type_id: DOUBLE } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { } from { table: "example_1" } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Expected: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { } from { table: "example_1" } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Actual: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { } from { table: "example_1" } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL ReadSplits result. GRpcStatusCode: 0 Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 764, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: 60 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/8580453620/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/zvgn/0048a5/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unittest/testing_out_stuff/chunk1/testing_out_stuff/test_tool.args']' stopped by 60 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("60 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/8580453620/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/zvgn/0048a5/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unittest/testing_out_stuff/chunk1/testing_out_stuff/test_tool.args']' stopped by 60 seconds timeout",), {}) 2025-05-07 08:57:11,951 WARNING library.python.cores: Core dump dir doesn't exist: /coredumps 2025-05-07 08:57:11,951 WARNING library.python.cores: Core dump dir doesn't exist: /var/tmp/cores >> SystemView::TabletsRanges [GOOD] >> SystemView::TabletsRangesPredicateExtractDisabled >> TSyncBrokerTests::ShouldProcessAfterRelease >> ScriptExecutionsTest::RunCheckLeaseStatus [GOOD] >> ScriptExecutionsTest::UpdatesLeaseAfterExpiring >> TSyncBrokerTests::ShouldReturnToken >> TSyncBrokerTests::ShouldProcessAfterRelease [GOOD] >> TSyncBrokerTests::ShouldReleaseInQueue >> TSyncBrokerTests::ShouldReturnToken [GOOD] >> TSyncBrokerTests::ShouldReleaseToken >> TSyncBrokerTests::ShouldReleaseInQueue [GOOD] >> TSyncBrokerTests::ShouldReleaseToken [GOOD] >> TSequence::CopyTableWithSequence [GOOD] >> TSequence::AlterSequence >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeExternalDataSource [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeBackupCollection [GOOD] >> BackupRestore::TestAllPrimitiveTypes-UINT8 >> TableCreation::ConcurrentMultipleTablesCreation [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/backup_ut/unittest >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeSysView [GOOD] Test command err: 2025-05-07T08:56:33.414009Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625069504381652:2211];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:56:33.414212Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002242/r3tmp/tmp59q9Tx/pdisk_1.dat 2025-05-07T08:56:34.072842Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:56:34.072964Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:56:34.078389Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:56:34.081709Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28352, node 1 2025-05-07T08:56:34.355001Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:56:34.355045Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:56:34.355052Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:56:34.355179Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23823 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:56:34.806351Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:56:37.825867Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625086684251778:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:37.826015Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:38.132808Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7501625069504381767:2136] Handle TEvProposeTransaction 2025-05-07T08:56:38.132846Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7501625069504381767:2136] TxId# 281474976710658 ProcessProposeTransaction 2025-05-07T08:56:38.132889Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7501625069504381767:2136] Cookie# 0 userReqId# "" txid# 281474976710658 SEND to# [1:7501625090979219098:2635] 2025-05-07T08:56:38.183405Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1520: Actor# [1:7501625090979219098:2635] txid# 281474976710658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table" Columns { Name: "Key" Type: "Uint32" NotNull: false } Columns { Name: "Value" Type: "Utf8" NotNull: false } KeyColumnNames: "Key" PartitionConfig { } Temporary: false } } } UserToken: "" DatabaseName: "" 2025-05-07T08:56:38.183468Z node 1 :TX_PROXY DEBUG: schemereq.cpp:563: Actor# [1:7501625090979219098:2635] txid# 281474976710658 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-05-07T08:56:38.184251Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1585: Actor# [1:7501625090979219098:2635] txid# 281474976710658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-05-07T08:56:38.184323Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1575: Actor# [1:7501625090979219098:2635] txid# 281474976710658 TEvNavigateKeySet requested from SchemeCache 2025-05-07T08:56:38.184501Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1408: Actor# [1:7501625090979219098:2635] txid# 281474976710658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-05-07T08:56:38.184662Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1455: Actor# [1:7501625090979219098:2635] HANDLE EvNavigateKeySetResult, txid# 281474976710658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-05-07T08:56:38.184711Z node 1 :TX_PROXY DEBUG: schemereq.cpp:102: Actor# [1:7501625090979219098:2635] txid# 281474976710658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710658 TabletId# 72057594046644480} 2025-05-07T08:56:38.185013Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1310: Actor# [1:7501625090979219098:2635] txid# 281474976710658 HANDLE EvClientConnected 2025-05-07T08:56:38.186511Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T08:56:38.203019Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1332: Actor# [1:7501625090979219098:2635] txid# 281474976710658 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710658} 2025-05-07T08:56:38.203087Z node 1 :TX_PROXY DEBUG: schemereq.cpp:543: Actor# [1:7501625090979219098:2635] txid# 281474976710658 SEND to# [1:7501625090979219097:2344] Source {TEvProposeTransactionStatus txid# 281474976710658 Status# 53} 2025-05-07T08:56:38.400067Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501625069504381652:2211];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:56:38.400149Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:56:38.456478Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625090979219261:2352], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:38.456588Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:38.456892Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625090979219266:2355], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:38.457251Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7501625069504381767:2136] Handle TEvProposeTransaction 2025-05-07T08:56:38.457274Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7501625069504381767:2136] TxId# 281474976710659 ProcessProposeTransaction 2025-05-07T08:56:38.457324Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7501625069504381767:2136] Cookie# 0 userReqId# "" txid# 281474976710659 SEND to# [1:7501625090979219269:2757] 2025-05-07T08:56:38.460333Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1520: Actor# [1:7501625090979219269:2757] txid# 281474976710659 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root/.metadata/workload_manager/pools" OperationType: ESchemeOpCreateResourcePool ModifyACL { Name: "default" DiffACL: "\n!\010\000\022\035\010\001\020\201\004\032\024all-users@well-known \003\n\031\010\000\022\025\010\001\020\201\004\032\014root@builtin \003" NewOwner: "metadata@system" } Internal: true CreateResourcePool { Name: "default" Properties { Properties { key: "concurrent_query_limit" value: "-1" } Properties { key: "database_load_cpu_threshold" value: "-1" } Properties { key: "query_cancel_after_seconds" value: "0" } Properties { key: "query_cpu_limit_percent_per_node" value: "-1" } Properties { key: "query_memory_limit_percent_per_node" value: "-1" } Properties { key: "queue_size" value: "-1" } Properties { key: "resource_weight" value: "-1" } Properties { key: "total_cpu_limit_percent_per_node" value: "-1" } } } } } UserToken: "\n\017metadata@system\022\000" DatabaseName: "/Root" 2025-05-07T08:56:38.460407Z node 1 :TX_PROXY DEBUG: schemereq.cpp:563: Actor# [1:7501625090979219269:2757] txid# 281474976710659 Bootstrap, UserSID: metadata@system CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-05-07T08:56:38.460429Z node 1 :TX_PROXY DEBUG: schemereq.cpp:572: Actor# [1:7501625090979219269:2757] txid# 281474976710659 Bootstrap, UserSID: metadata@system IsClusterAdministrator: 1 2025-05-07T08:56:38.461927Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1585: Actor# [1:7501625090979219269:2757] txid# 281474976710659 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-05-07T08:56:38.462042Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1575: Actor# [1:7501625090979219269:2757] txid# 281474976710659 TEvNavigateKeySet requested from SchemeCache 2025-05-07T08:56:38.462215Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1408: Actor# [1:7501625090979219269:2757] txid# 281474976710659 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-05-07T08:56:38.462351Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1455: Actor# [1:7501625090979219269:2757] HANDLE EvNavigateKeySetResult, txid# 281474976710659 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-05-07T08:56:38.462393Z node 1 :TX_PROXY DEBUG: schemereq.cpp:102: Actor# [1:7501625090979219269:2757] txid# 281474976710659 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710659 TabletId# 72057594046644480} 2025-05-07T08:56:38.462525Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1310: Actor# [1:7501625090979219269:2757] txid# 281474976710659 HANDLE EvClientConnected 2025-05-07T08:56:38.463902Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: ... -amz-date: 20250507T085709Z S3_MOCK::HttpServeRead: /test_bucket/view/create_view.sql / 165 2025-05-07T08:57:09.422103Z node 10 :IMPORT DEBUG: schemeshard_import_getters.cpp:228: HandleScheme TEvExternalStorage::TEvHeadObjectResponse: self# [10:7501625222592554631:2211], result# HeadObjectResult { ETag: 54623f53d68141118383b3390c4965d5 ContentLength: 165 } REQUEST: GET /test_bucket/view/create_view.sql HTTP/1.1 HEADERS: Host: localhost:17630 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: A1252A45-4402-443E-A218-FCAFFCC1CFA7 amz-sdk-request: attempt=1 authorization: AWS4-HMAC-SHA256 Credential=test_key/20250507/us-east-1/s3/aws4_request, SignedHeaders=amz-sdk-invocation-id;amz-sdk-request;content-type;host;range;x-amz-api-version;x-amz-content-sha256;x-amz-date, Signature=3db059a89ad31bb6744321d959c25fe7c985b558952b0056bc2486a9785135e5 content-type: application/xml range: bytes=0-164 user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-content-sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 x-amz-date: 20250507T085709Z S3_MOCK::HttpServeRead: /test_bucket/view/create_view.sql / 165 2025-05-07T08:57:09.438835Z node 10 :IMPORT DEBUG: schemeshard_import_getters.cpp:352: HandleScheme TEvExternalStorage::TEvGetObjectResponse: self# [10:7501625222592554631:2211], result# 54623f53d68141118383b3390c4965d5 2025-05-07T08:57:09.525962Z node 10 :TX_PROXY DEBUG: rpc_operation_request_base.h:50: [GetImport] [10:7501625222592554642:2376] [0] Resolve database: name# /Root 2025-05-07T08:57:09.526546Z node 10 :TX_PROXY DEBUG: rpc_operation_request_base.h:66: [GetImport] [10:7501625222592554642:2376] [0] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-05-07T08:57:09.526576Z node 10 :TX_PROXY DEBUG: rpc_operation_request_base.h:106: [GetImport] [10:7501625222592554642:2376] [0] Send request: schemeShardId# 72057594046644480 REQUEST: HEAD /test_bucket/view/permissions.pb HTTP/1.1 HEADERS: Host: localhost:17630 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: A6D18D3A-2078-4578-8D4F-19E378DE2149 amz-sdk-request: attempt=1 authorization: AWS4-HMAC-SHA256 Credential=test_key/20250507/us-east-1/s3/aws4_request, SignedHeaders=amz-sdk-invocation-id;amz-sdk-request;content-type;host;x-amz-api-version;x-amz-content-sha256;x-amz-date, Signature=9dcf1d3dd5e526a7b34893d2225b4eb4d446b9d982aa232f95c87e1e5b33f074 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-content-sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 x-amz-date: 20250507T085709Z 2025-05-07T08:57:09.543136Z node 10 :TX_PROXY DEBUG: rpc_get_operation.cpp:220: [GetImport] [10:7501625222592554642:2376] [0] Handle TEvImport::TEvGetImportResponse: record# Entry { Id: 281474976715664 Status: SUCCESS Progress: PROGRESS_PREPARING ImportFromS3Settings { endpoint: "localhost:17630" scheme: HTTP bucket: "test_bucket" items { source_prefix: "view" destination_path: "/Root/view" } } StartTime { seconds: 1746608229 } } 2025-05-07T08:57:09.543856Z node 10 :IMPORT DEBUG: schemeshard_import_getters.cpp:249: HandlePermissions TEvExternalStorage::TEvHeadObjectResponse: self# [10:7501625222592554631:2211], result# No response body. REQUEST: GET /test_bucket?prefix=view HTTP/1.1 HEADERS: Host: localhost:17630 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: F2F60AF7-4C8A-4212-B935-F2EAD0967E60 amz-sdk-request: attempt=1 authorization: AWS4-HMAC-SHA256 Credential=test_key/20250507/us-east-1/s3/aws4_request, SignedHeaders=amz-sdk-invocation-id;amz-sdk-request;content-type;host;x-amz-api-version;x-amz-content-sha256;x-amz-date, Signature=1298cea56bca048ce4e7cbcc2d254c9e1720940ebe811957f194d11fccae7a7e content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-content-sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 x-amz-date: 20250507T085709Z S3_MOCK::HttpServeList: view 2025-05-07T08:57:09.610497Z node 10 :IMPORT DEBUG: schemeshard_import_getters.cpp:554: HandleChangefeeds TEvExternalStorage::TEvListObjectResponse: self# [10:7501625222592554631:2211], result# ListObjectsResult { } 2025-05-07T08:57:09.610560Z node 10 :IMPORT INFO: schemeshard_import_getters.cpp:587: Reply: self# [10:7501625222592554631:2211], success# 1, error# 2025-05-07T08:57:09.610686Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:360: TImport::TTxProgress: DoExecute 2025-05-07T08:57:09.610699Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:965: TImport::TTxProgress: OnSchemeResult: id# 281474976715664, itemIdx# 0, success# 1 2025-05-07T08:57:09.650409Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:384: TImport::TTxProgress: DoComplete 2025-05-07T08:57:09.672469Z node 10 :IMPORT DEBUG: schemeshard_import_scheme_query_executor.cpp:77: TSchemeQueryExecutor HandleCompileResponse, self: [10:7501625222592554647:2839], status: SUCCESS 2025-05-07T08:57:09.672547Z node 10 :IMPORT INFO: schemeshard_import_scheme_query_executor.cpp:103: TSchemeQueryExecutor Reply, self: [10:7501625222592554647:2839], status: SUCCESS 2025-05-07T08:57:09.672802Z node 10 :IMPORT DEBUG: schemeshard_import_scheme_query_executor.cpp:111: TSchemeQueryExecutor Reply, self: [10:7501625222592554647:2839], status: SUCCESS, prepared query: "WorkingDir: \"/Root\" OperationType: ESchemeOpCreateView FailedOnAlreadyExists: false CreateView { Name: \"view\" QueryText: \"SELECT 1 AS Key UNION SELECT 2 AS Key UNION SELECT 3 AS Key\" CapturedContext { PathPrefix: \"/Root\" SyntaxVersion: 1 AnsiLexer: false PgParser: false Pragmas: \"AnsiInForEmptyOrNullableItemsCollections\" Pragmas: \"AnsiLike\" Pragmas: \"FlexibleTypes\" Pragmas: \"AnsiCurrentRow\" Pragmas: \"WarnOnAnsiAliasShadowing\" Pragmas: \"AnsiOptionalAs\" Pragmas: \"EmitAggApply\" } }" 2025-05-07T08:57:09.673041Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:360: TImport::TTxProgress: DoExecute 2025-05-07T08:57:09.673067Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:1118: TImport::TTxProgress: OnSchemeQueryPreparation: id# 281474976715664, itemIdx# 0, status# SUCCESS, error# 2025-05-07T08:57:09.673254Z node 10 :IMPORT INFO: schemeshard_import__create.cpp:605: TImport::TTxProgress: Allocate txId: info# { Id: 281474976715664 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/view' DstPathId: State: CreateSchemeObject SubState: AllocateTxId WaitTxId: 0 Issue: '' } 2025-05-07T08:57:09.675896Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:384: TImport::TTxProgress: DoComplete 2025-05-07T08:57:09.676092Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:360: TImport::TTxProgress: DoExecute 2025-05-07T08:57:09.676109Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:1180: TImport::TTxProgress: OnAllocateResult: txId# 281474976710758, id# 281474976715664 2025-05-07T08:57:09.676192Z node 10 :IMPORT INFO: schemeshard_import__create.cpp:437: TImport::TTxProgress: ExecutePreparedQuery: info# { Id: 281474976715664 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/view' DstPathId: State: CreateSchemeObject SubState: Proposed WaitTxId: 0 Issue: '' }, txId# 281474976710758 2025-05-07T08:57:09.676295Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:384: TImport::TTxProgress: DoComplete 2025-05-07T08:57:09.681861Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:360: TImport::TTxProgress: DoExecute 2025-05-07T08:57:09.681890Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:1267: TImport::TTxProgress: OnModifyResult: txId# 281474976710758, status# StatusAccepted 2025-05-07T08:57:09.682117Z node 10 :IMPORT INFO: schemeshard_import__create.cpp:619: TImport::TTxProgress: Wait for completion: info# { Id: 281474976715664 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/view' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 8] State: CreateSchemeObject SubState: Subscribed WaitTxId: 281474976710758 Issue: '' } 2025-05-07T08:57:09.686547Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:384: TImport::TTxProgress: DoComplete 2025-05-07T08:57:09.711707Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:360: TImport::TTxProgress: DoExecute 2025-05-07T08:57:09.711743Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:1425: TImport::TTxProgress: OnNotifyResult: txId# 281474976710758 2025-05-07T08:57:09.713555Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:384: TImport::TTxProgress: DoComplete 2025-05-07T08:57:09.962921Z node 10 :TX_PROXY DEBUG: rpc_operation_request_base.h:50: [GetImport] [10:7501625222592554714:2379] [0] Resolve database: name# /Root 2025-05-07T08:57:09.963729Z node 10 :TX_PROXY DEBUG: rpc_operation_request_base.h:66: [GetImport] [10:7501625222592554714:2379] [0] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-05-07T08:57:09.963760Z node 10 :TX_PROXY DEBUG: rpc_operation_request_base.h:106: [GetImport] [10:7501625222592554714:2379] [0] Send request: schemeShardId# 72057594046644480 2025-05-07T08:57:09.964904Z node 10 :TX_PROXY DEBUG: rpc_get_operation.cpp:220: [GetImport] [10:7501625222592554714:2379] [0] Handle TEvImport::TEvGetImportResponse: record# Entry { Id: 281474976715664 Status: SUCCESS Progress: PROGRESS_DONE ImportFromS3Settings { endpoint: "localhost:17630" scheme: HTTP bucket: "test_bucket" items { source_prefix: "view" destination_path: "/Root/view" } } StartTime { seconds: 1746608229 } EndTime { seconds: 1746608229 } } 2025-05-07T08:57:10.350922Z node 10 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [10:7501625196822749531:2131] Handle TEvExecuteKqpTransaction 2025-05-07T08:57:10.350962Z node 10 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [10:7501625196822749531:2131] TxId# 281474976715665 ProcessProposeKqpTransaction 2025-05-07T08:57:10.351538Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715665. Ctx: { TraceId: 01jtmzd3k29amw1h6s0pk4gcbb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=YzZjYWFjZjItNDZlOTMyYjAtNTM1MjM2ZmEtYzI2NGU2NDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/syncer/ut/unittest >> TSyncBrokerTests::ShouldReleaseInQueue [GOOD] Test command err: 2025-05-07T08:57:12.618535Z node 1 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:64: TEvQuerySyncToken, VDisk actor id: [0:1:1], actor id: [1:5:2052], token sent, active: 1, waiting: 0 2025-05-07T08:57:12.618714Z node 1 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:90: TEvQuerySyncToken, VDisk actor id: [0:1:2], actor id: [1:6:2053], enqueued, active: 1, waiting: 1 2025-05-07T08:57:12.618795Z node 1 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:123: TEvReleaseSyncToken, VDisk actor id: [0:1:1], actor id: [1:5:2052], token released, active: 1, waiting: 1 2025-05-07T08:57:12.618860Z node 1 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:105: ProcessQueue(), VDisk actor id: [0:1:2], actor id: [1:6:2053], token sent, active: 0, waiting: 1 2025-05-07T08:57:12.806219Z node 2 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:64: TEvQuerySyncToken, VDisk actor id: [0:1:1], actor id: [2:5:2052], token sent, active: 1, waiting: 0 2025-05-07T08:57:12.806386Z node 2 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:90: TEvQuerySyncToken, VDisk actor id: [0:1:2], actor id: [2:6:2053], enqueued, active: 1, waiting: 1 2025-05-07T08:57:12.806443Z node 2 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:146: TEvReleaseSyncToken, VDisk actor id: [0:1:2], actor id: [2:6:2053], removed from queue, active: 1, waiting: 0 |90.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/http_proxy/ut/ydb-core-http_proxy-ut >> TableCreation::SimpleUpdateTable [GOOD] >> TableCreation::CreateOldTable [GOOD] |90.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/http_proxy/ut/ydb-core-http_proxy-ut |90.8%| [LD] {RESULT} $(B)/ydb/core/http_proxy/ut/ydb-core-http_proxy-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/syncer/ut/unittest >> TSyncBrokerTests::ShouldReleaseToken [GOOD] Test command err: 2025-05-07T08:57:12.746199Z node 1 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:64: TEvQuerySyncToken, VDisk actor id: [0:1:1], actor id: [1:5:2052], token sent, active: 1, waiting: 0 2025-05-07T08:57:12.858193Z node 2 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:64: TEvQuerySyncToken, VDisk actor id: [0:1:1], actor id: [2:5:2052], token sent, active: 1, waiting: 0 2025-05-07T08:57:12.858320Z node 2 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:123: TEvReleaseSyncToken, VDisk actor id: [0:1:1], actor id: [2:5:2052], token released, active: 1, waiting: 0 ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::IcebergHiveBasicFilterPushdown 2025-05-07 08:56:42,470 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-05-07 08:56:42,687 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 60 secs timeout. Process tree before termination: pid rss ref pdirt 216432 46.0M 46.0M 23.0M test_tool run_ut @/home/runner/.ya/build/build_root/zvgn/004886/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unittest/testing_out_stuff/chunk4/testing_out_stuff/tes 217051 1.5G 1.5G 1.0G └─ ydb-core-kqp-ut-federated_query-generic_ut --trace-path-append /home/runner/.ya/build/build_root/zvgn/004886/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unit Test command err: Trying to start YDB, gRPC: 28608, MsgBus: 26337 2025-05-07T08:55:45.228881Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624865957120941:2205];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:55:45.230713Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004886/r3tmp/tmpxwHgYo/pdisk_1.dat 2025-05-07T08:55:45.981895Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:55:45.995156Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:55:45.995292Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:55:46.005472Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28608, node 1 2025-05-07T08:55:46.269317Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:55:46.269340Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:55:46.269348Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:55:46.269490Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26337 TClient is connected to server localhost:26337 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:55:47.075234Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:55:50.214162Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501624865957120941:2205];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:55:50.214259Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:55:51.343740Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624891726925236:2335], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:51.343850Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:51.705071Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:2, at schemeshard: 72057594046644480 2025-05-07T08:55:51.864108Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624891726925357:2348], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:51.864195Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:51.864569Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624891726925362:2351], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:51.868385Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:2, at schemeshard: 72057594046644480 2025-05-07T08:55:51.882351Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624891726925364:2352], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-05-07T08:55:51.950955Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501624891726925404:2404] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:55:53.941087Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:55:54.914383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:1, at schemeshard: 72057594046644480 2025-05-07T08:55:55.740223Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480 2025-05-07T08:55:57.107564Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710680:0, at schemeshard: 72057594046644480 2025-05-07T08:55:57.722589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710683:0, at schemeshard: 72057594046644480 2025-05-07T08:55:58.408702Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480 2025-05-07T08:55:58.494002Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480 2025-05-07T08:56:00.963363Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7261: Cannot get console configs 2025-05-07T08:56:00.963388Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:56:02.405516Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710706:0, at schemeshard: 72057594046644480 2025-05-07T08:56:02.417014Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jtmzapscb843jywpff27cexn", SessionId: ydb://session/3?node_id=1&id=MjRlMjMxNTYtY2E2NTczY2YtMTZlODg1MWQtOGUwOGZlNTU=, Slow query, duration: 11.073228s, status: SUCCESS, user: UNAUTHENTICATED, results: 0b, text: "\n CREATE OBJECT external_data_source_p (TYPE SECRET) WITH (value=qwerty12345);\n\n CREATE EXTERNAL DATA SOURCE external_data_source WITH (\n SOURCE_TYPE=\"Iceberg\",\n DATABASE_NAME=\"pgdb\",\n WAREHOUSE_TYPE=\"s3\",\n WAREHOUSE_S3_REGION=\"s3_region\",\n WAREHOUSE_S3_ENDPOINT=\"s3_endpoint\",\n WAREHOUSE_S3_URI=\"s3_uri\",\n \n AUTH_METHOD=\"BASIC\",\n LOGIN=\"crab\",\n PASSWORD_SECRET_NAME=\"external_data_source_p\"\n ,\n \n CATALOG_TYPE=\"hive_metastore\",\n CATALOG_HIVE_METASTORE_URI=\"hive_metastore_uri\"\n ,\n USE_TLS=\"FALSE\"\n );\n ", parameters: 0b 2025-05-07T08:56:02.441491Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710707:0, at schemeshard: 72057594046644480 2025-05-07T08:56:02.456319Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710708:0, at schemeshard: 72057594046644480 2025-05-07T08:56:02.457561Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710709:0, at schemeshard: 72057594046644480 Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3 ... ESchemeOpModifyACL, opId: 281474976715680:0, at schemeshard: 72057594046644480 2025-05-07T08:56:32.829364Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715685:0, at schemeshard: 72057594046644480 2025-05-07T08:56:33.546184Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480 2025-05-07T08:56:33.612533Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480 2025-05-07T08:56:36.605930Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715706:0, at schemeshard: 72057594046644480 Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "col1" type { type_id: UINT16 } } columns { name: "col2" type { type_id: DOUBLE } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { } from { table: "example_1" } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Expected: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { } from { table: "example_1" } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Actual: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { } from { table: "example_1" } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL ReadSplits result. GRpcStatusCode: 0 2025-05-07T08:56:37.473614Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7261: Cannot get console configs 2025-05-07T08:56:37.473650Z node 3 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded Trying to start YDB, gRPC: 9143, MsgBus: 11977 2025-05-07T08:56:38.851123Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501625090908048139:2117];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:56:38.851233Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004886/r3tmp/tmp91B3GG/pdisk_1.dat 2025-05-07T08:56:39.061896Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9143, node 4 2025-05-07T08:56:39.176404Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:56:39.176432Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:56:39.176442Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:56:39.176601Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:56:39.196537Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:56:39.196668Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:56:39.201173Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:11977 TClient is connected to server localhost:11977 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:56:39.897838Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:56:39.918708Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 764, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: 60 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/8580453620/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/zvgn/004886/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unittest/testing_out_stuff/chunk4/testing_out_stuff/test_tool.args']' stopped by 60 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("60 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/8580453620/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/zvgn/004886/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unittest/testing_out_stuff/chunk4/testing_out_stuff/test_tool.args']' stopped by 60 seconds timeout",), {}) 2025-05-07 08:57:13,421 WARNING library.python.cores: Core dump dir doesn't exist: /coredumps 2025-05-07 08:57:13,432 WARNING library.python.cores: Core dump dir doesn't exist: /var/tmp/cores >> TVersions::Wreck1 [GOOD] >> TVersions::Wreck1Reverse >> TQuorumTrackerTests::Erasure4Plus2BlockNotIncludingMyFailDomain_8_2 [GOOD] >> TQuorumTrackerTests::ErasureMirror3IncludingMyFailDomain_4_2 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-INT16 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-INT32 >> TNetClassifierTest::TestInitFromFile [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/proxy_service/ut/unittest >> TableCreation::SimpleUpdateTable [GOOD] Test command err: 2025-05-07T08:57:01.386278Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625189864434952:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:01.414515Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0031b2/r3tmp/tmpVrxBxU/pdisk_1.dat 2025-05-07T08:57:02.032082Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:57:02.036954Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:02.037104Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:02.042737Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26265 TServer::EnableGrpc on GrpcPort 16243, node 1 2025-05-07T08:57:02.398008Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:57:02.398048Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:57:02.398076Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:57:02.398219Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-05-07T08:57:02.613098Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:04.847498Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1665: Updated YQL logs priority to current level: 4 2025-05-07T08:57:04.849172Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:442: Cannot start publishing usage, tenants: /dc-1, empty 2025-05-07T08:57:04.851621Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:512: Subscribed for config changes. 2025-05-07T08:57:04.851717Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:519: Updated table service config. 2025-05-07T08:57:04.852499Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1665: Updated YQL logs priority to current level: 4 2025-05-07T08:57:04.852556Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:442: Cannot start publishing usage, tenants: /dc-1, empty 2025-05-07T08:57:04.853339Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T08:57:04.853390Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T08:57:04.853406Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T08:57:04.856928Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table result_sets updater. Describe result: PathErrorUnknown 2025-05-07T08:57:04.856946Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_executions updater. Describe result: PathErrorUnknown 2025-05-07T08:57:04.856952Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table result_sets updater. Creating table 2025-05-07T08:57:04.856953Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_executions updater. Creating table 2025-05-07T08:57:04.856988Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table result_sets updater. Full table path:/dc-1/.metadata/result_sets 2025-05-07T08:57:04.856988Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_executions updater. Full table path:/dc-1/.metadata/script_executions 2025-05-07T08:57:04.857340Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_execution_leases updater. Describe result: PathErrorUnknown 2025-05-07T08:57:04.857359Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_execution_leases updater. Creating table 2025-05-07T08:57:04.857390Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_execution_leases updater. Full table path:/dc-1/.metadata/script_execution_leases 2025-05-07T08:57:04.860609Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T08:57:04.870264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:1, at schemeshard: 72057594046644480 2025-05-07T08:57:04.872828Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T08:57:04.880321Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 2025-05-07T08:57:04.889388Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table script_execution_leases updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976710659 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 3 } 2025-05-07T08:57:04.889418Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table script_executions updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976710658 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 4 } 2025-05-07T08:57:04.889449Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table script_execution_leases updater. Subscribe on create table tx: 281474976710659 2025-05-07T08:57:04.889497Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table script_executions updater. Subscribe on create table tx: 281474976710658 2025-05-07T08:57:04.892013Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table result_sets updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976710660 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 5 } 2025-05-07T08:57:04.892054Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table result_sets updater. Subscribe on create table tx: 281474976710660 2025-05-07T08:57:05.069961Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table script_execution_leases updater. Request: create. Transaction completed: 281474976710659. Doublechecking... 2025-05-07T08:57:05.137009Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table result_sets updater. Request: create. Transaction completed: 281474976710660. Doublechecking... 2025-05-07T08:57:05.144678Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table script_executions updater. Request: create. Transaction completed: 281474976710658. Doublechecking... 2025-05-07T08:57:05.161053Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table script_execution_leases updater. Column diff is empty, finishing 2025-05-07T08:57:05.194213Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table result_sets updater. Column diff is empty, finishing 2025-05-07T08:57:05.224495Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table script_executions updater. Column diff is empty, finishing 2025-05-07T08:57:05.224954Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TCreateScriptOperationQuery] TraceId: 4e34b355-928c2b22-19ef320e-845753a5, Bootstrap. Database: /dc-1 2025-05-07T08:57:05.235111Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1468: Request has 18444997465484.316540s seconds to be completed 2025-05-07T08:57:05.238326Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1543: Created new session, sessionId: ydb://session/3?node_id=1&id=MjEzOGEwYWMtMjE2NTg1OGYtN2QyOTBkNWEtNWZlYzhhNTA=, workerId: [1:7501625207044305033:2333], database: /dc-1, longSession: 1, local sessions count: 1 2025-05-07T08:57:05.238498Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:649: Received create session request, trace_id: 2025-05-07T08:57:05.239510Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TCreateScriptOperationQuery] TraceId: 4e34b355-928c2b22-19ef320e-845753a5, RunDataQuery: -- TCreateScriptOperationQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $run_script_actor_id AS Text; DECLARE $execution_status AS Int32; DECLARE $execution_mode AS Int32; DECLARE $query_text AS Text; DECLARE $syntax AS Int32; DECLARE $meta AS JsonDocument; DECLARE $lease_duration AS Interval; DECLARE $execution_meta_ttl AS Interval; UPSERT INTO `.metadata/script_executions` (database, execution_id, run_script_actor_id, execution_status, execution_mode, start_ts, query_text, syntax, meta, expire_at) VALUES ($database, $execution_id, $run_script_actor_id, $execution_status, $execution_mode, CurrentUtcTimestamp(), $query_text, $syntax, $meta, CurrentUtcTimestamp() + $execution_meta_ttl); UPSERT INTO `.metadata/script_execution_leases` (database, execution_id, lease_deadline, lease_generation, expire_at) VALUES ($database, $execution_id, CurrentUtcTimestamp() + $lease_duration, 1, CurrentUtcTimestamp() + $execution_meta_ttl); 2025-05-07T08:57:05.240162Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=1&id=MjEzOGEwYWMtMjE2NTg1OGYtN2QyOTBkNWEtNWZlYzhhNTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 3, targetId: [1:7501625207044305033:2333] 2025-05-07T08:57:05.240201Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 3 timeout: 300.000000s actor id: [1:7501625207044305035:2462] 2025-05-07T08:57:05.242235Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625207044305036:2335], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:57:05.242353Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to ... 2025-05-07T08:57:12.033871Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:57:12.174597Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 3, sender: [2:7501625235082886934:2335], selfId: [2:7501625217903016990:2168], source: [2:7501625235082886933:2334] 2025-05-07T08:57:12.175181Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TCreateScriptOperationQuery] TraceId: bd1db975-4e4a549f-d8fb8811-827867c0, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=MmQ1NzIxNWEtODczNDljNDgtNGEzNzBiZjUtNGNhZDgwODg=, TxId: 2025-05-07T08:57:12.175203Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TCreateScriptOperationQuery] TraceId: bd1db975-4e4a549f-d8fb8811-827867c0, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=MmQ1NzIxNWEtODczNDljNDgtNGEzNzBiZjUtNGNhZDgwODg=, TxId: 2025-05-07T08:57:12.175231Z node 2 :KQP_PROXY DEBUG: kqp_script_executions.cpp:304: [ScriptExecutions] Create script execution operation. ExecutionId: bd1db975-4e4a549f-d8fb8811-827867c0. Result: SUCCESS. Issues: 2025-05-07T08:57:12.178414Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1543: Created new session, sessionId: ydb://session/3?node_id=2&id=ZTRmNDY2ZDYtOTYxZGVkZDQtYjE3NmU5YTktNTI2Yjk1NGQ=, workerId: [2:7501625239377854345:2353], database: dc-1, longSession: 1, local sessions count: 2 2025-05-07T08:57:12.178613Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:649: Received create session request, trace_id: 2025-05-07T08:57:12.190591Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=ZTRmNDY2ZDYtOTYxZGVkZDQtYjE3NmU5YTktNTI2Yjk1NGQ=, CurrentExecutionId: bd1db975-4e4a549f-d8fb8811-827867c0, CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 604800.000000s timeout: 604800.000000s cancelAfter: 0.000000s. Send request to target, requestId: 5, targetId: [2:7501625239377854345:2353] 2025-05-07T08:57:12.190637Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 5 timeout: 604800.000000s actor id: [2:7501625239377854347:2522] 2025-05-07T08:57:12.191161Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1353: Session closed, sessionId: ydb://session/3?node_id=2&id=MmQ1NzIxNWEtODczNDljNDgtNGEzNzBiZjUtNGNhZDgwODg=, workerId: [2:7501625235082886933:2334], local sessions count: 1 2025-05-07T08:57:12.234956Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1468: TraceId: "01jtmzd5sa8c48349fakrn02nb", Request has 18444997465477.316684s seconds to be completed 2025-05-07T08:57:12.236923Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1543: TraceId: "01jtmzd5sa8c48349fakrn02nb", Created new session, sessionId: ydb://session/3?node_id=2&id=NWE0ZGNkYzktYmNiYzBkMmUtNjhlOTRhYmYtYzZiNGNhZTc=, workerId: [2:7501625239377854362:2363], database: /dc-1, longSession: 1, local sessions count: 2 2025-05-07T08:57:12.237077Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:649: Received create session request, trace_id: 01jtmzd5sa8c48349fakrn02nb 2025-05-07T08:57:12.241129Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:147: Table test_table updater. Describe result: PathErrorUnknown 2025-05-07T08:57:12.241156Z node 2 :KQP_PROXY NOTICE: table_creator.cpp:167: Table test_table updater. Creating table 2025-05-07T08:57:12.241182Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:100: Table test_table updater. Full table path:/dc-1/.test/test_table 2025-05-07T08:57:12.244167Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:1, at schemeshard: 72057594046644480 2025-05-07T08:57:12.246171Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:190: Table test_table updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976715664 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 10 } 2025-05-07T08:57:12.246211Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:261: Table test_table updater. Subscribe on create table tx: 281474976715664 2025-05-07T08:57:12.266499Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: bd1db975-4e4a549f-d8fb8811-827867c0, Bootstrap. Database: /dc-1 2025-05-07T08:57:12.266852Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1468: Request has 18444997465477.284786s seconds to be completed 2025-05-07T08:57:12.268858Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1543: Created new session, sessionId: ydb://session/3?node_id=2&id=MmNmZTFiNDktZWFiZTg4YjAtNjcwNzQ1ZjUtZTZhN2M4ZGU=, workerId: [2:7501625239377854427:2366], database: /dc-1, longSession: 1, local sessions count: 3 2025-05-07T08:57:12.269006Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:649: Received create session request, trace_id: 2025-05-07T08:57:12.269124Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 5, sender: [2:7501625235082886930:2458], selfId: [2:7501625217903016990:2168], source: [2:7501625239377854345:2353] 2025-05-07T08:57:12.269224Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: bd1db975-4e4a549f-d8fb8811-827867c0, RunDataQuery: -- TSaveScriptExecutionResultMetaQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $result_set_metas AS JsonDocument; UPDATE `.metadata/script_executions` SET result_set_metas = $result_set_metas WHERE database = $database AND execution_id = $execution_id; 2025-05-07T08:57:12.269536Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=MmNmZTFiNDktZWFiZTg4YjAtNjcwNzQ1ZjUtZTZhN2M4ZGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 8, targetId: [2:7501625239377854427:2366] 2025-05-07T08:57:12.269570Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 8 timeout: 300.000000s actor id: [2:7501625239377854429:2562] 2025-05-07T08:57:12.313132Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:290: Table test_table updater. Request: create. Transaction completed: 281474976715664. Doublechecking... 2025-05-07T08:57:12.412414Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-05-07T08:57:12.413102Z node 2 :KQP_PROXY NOTICE: table_creator.cpp:365: Table test_table updater. Adding columns. New columns: col4, col5. Existing columns: col1, col2, col3 2025-05-07T08:57:12.413175Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:100: Table test_table updater. Full table path:/dc-1/.test/test_table 2025-05-07T08:57:12.414909Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-05-07T08:57:12.416287Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:190: Table test_table updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976715666 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 } 2025-05-07T08:57:12.416361Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:261: Table test_table updater. Subscribe on create table tx: 281474976715666 2025-05-07T08:57:12.430647Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:290: Table test_table updater. Request: alter. Transaction completed: 281474976715666. Doublechecking... 2025-05-07T08:57:12.440239Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 8, sender: [2:7501625239377854428:2367], selfId: [2:7501625217903016990:2168], source: [2:7501625239377854427:2366] 2025-05-07T08:57:12.440494Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: bd1db975-4e4a549f-d8fb8811-827867c0, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=MmNmZTFiNDktZWFiZTg4YjAtNjcwNzQ1ZjUtZTZhN2M4ZGU=, TxId: 2025-05-07T08:57:12.440541Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: bd1db975-4e4a549f-d8fb8811-827867c0, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=MmNmZTFiNDktZWFiZTg4YjAtNjcwNzQ1ZjUtZTZhN2M4ZGU=, TxId: 2025-05-07T08:57:12.440812Z node 2 :KQP_PROXY DEBUG: kqp_script_executions.cpp:1907: [ScriptExecutions] [TSaveScriptExecutionResultActor] ExecutionId: bd1db975-4e4a549f-d8fb8811-827867c0, start saving rows range [0; 1) 2025-05-07T08:57:12.440951Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TSaveScriptExecutionResultQuery] TraceId: bd1db975-4e4a549f-d8fb8811-827867c0, Bootstrap. Database: /dc-1 2025-05-07T08:57:12.441431Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1468: Request has 18444997465477.110210s seconds to be completed 2025-05-07T08:57:12.444500Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1543: Created new session, sessionId: ydb://session/3?node_id=2&id=MWFkYjE0NzYtM2YwMGU2Yy1lZDFkYjdkYy1kYzYwMjNiMQ==, workerId: [2:7501625239377854512:2378], database: /dc-1, longSession: 1, local sessions count: 4 2025-05-07T08:57:12.444684Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:649: Received create session request, trace_id: 2025-05-07T08:57:12.444771Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1353: Session closed, sessionId: ydb://session/3?node_id=2&id=MmNmZTFiNDktZWFiZTg4YjAtNjcwNzQ1ZjUtZTZhN2M4ZGU=, workerId: [2:7501625239377854427:2366], local sessions count: 3 2025-05-07T08:57:12.445324Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptExecutionResultQuery] TraceId: bd1db975-4e4a549f-d8fb8811-827867c0, RunDataQuery: -- TSaveScriptExecutionResultQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $result_set_id AS Int32; DECLARE $expire_at AS Optional; DECLARE $items AS List>; UPSERT INTO `.metadata/result_sets` SELECT $database as database, $execution_id as execution_id, $result_set_id as result_set_id, T.row_id as row_id, $expire_at as expire_at, T.result_set as result_set, T.accumulated_size as accumulated_size FROM AS_TABLE($items) AS T; 2025-05-07T08:57:12.445917Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=MWFkYjE0NzYtM2YwMGU2Yy1lZDFkYjdkYy1kYzYwMjNiMQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 10, targetId: [2:7501625239377854512:2378] 2025-05-07T08:57:12.445989Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 10 timeout: 300.000000s actor id: [2:7501625239377854514:2623] 2025-05-07T08:57:12.487910Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-05-07T08:57:12.538108Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1353: Session closed, sessionId: ydb://session/3?node_id=2&id=NWE0ZGNkYzktYmNiYzBkMmUtNjhlOTRhYmYtYzZiNGNhZTc=, workerId: [2:7501625239377854362:2363], local sessions count: 2 2025-05-07T08:57:12.623827Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/proxy_service/ut/unittest >> TableCreation::ConcurrentMultipleTablesCreation [GOOD] Test command err: 2025-05-07T08:57:01.493851Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625191618416782:2058];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:01.493889Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0031d7/r3tmp/tmpcUWcvA/pdisk_1.dat 2025-05-07T08:57:02.134459Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:57:02.142604Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:02.142720Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:02.145174Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:17575 TServer::EnableGrpc on GrpcPort 17520, node 1 2025-05-07T08:57:02.462698Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:57:02.462735Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:57:02.462768Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:57:02.462940Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-05-07T08:57:02.636042Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:05.164162Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1665: Updated YQL logs priority to current level: 4 2025-05-07T08:57:05.165463Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:442: Cannot start publishing usage, tenants: /dc-1, empty 2025-05-07T08:57:05.167698Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:512: Subscribed for config changes. 2025-05-07T08:57:05.167741Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:519: Updated table service config. 2025-05-07T08:57:05.167775Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1665: Updated YQL logs priority to current level: 4 2025-05-07T08:57:05.167813Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:442: Cannot start publishing usage, tenants: /dc-1, empty 2025-05-07T08:57:05.167989Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T08:57:05.168043Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T08:57:05.169249Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T08:57:05.169304Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T08:57:05.173057Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_executions updater. Describe result: PathErrorUnknown 2025-05-07T08:57:05.173085Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_executions updater. Creating table 2025-05-07T08:57:05.173118Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_executions updater. Full table path:/dc-1/.metadata/script_executions 2025-05-07T08:57:05.174641Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table result_sets updater. Describe result: PathErrorUnknown 2025-05-07T08:57:05.174661Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table result_sets updater. Creating table 2025-05-07T08:57:05.174685Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table result_sets updater. Full table path:/dc-1/.metadata/result_sets 2025-05-07T08:57:05.175674Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_execution_leases updater. Describe result: PathErrorUnknown 2025-05-07T08:57:05.175683Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_execution_leases updater. Creating table 2025-05-07T08:57:05.175709Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_execution_leases updater. Full table path:/dc-1/.metadata/script_execution_leases 2025-05-07T08:57:05.190874Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:1, at schemeshard: 72057594046644480 2025-05-07T08:57:05.192926Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 2025-05-07T08:57:05.198738Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 2025-05-07T08:57:05.205918Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table script_executions updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976710658 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 3 } 2025-05-07T08:57:05.205937Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table script_execution_leases updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976710660 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 4 } 2025-05-07T08:57:05.206000Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table script_executions updater. Subscribe on create table tx: 281474976710658 2025-05-07T08:57:05.206001Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table script_execution_leases updater. Subscribe on create table tx: 281474976710660 2025-05-07T08:57:05.210782Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table result_sets updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976710659 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 5 } 2025-05-07T08:57:05.210829Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table result_sets updater. Subscribe on create table tx: 281474976710659 2025-05-07T08:57:05.332738Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table script_executions updater. Request: create. Transaction completed: 281474976710658. Doublechecking... 2025-05-07T08:57:05.372457Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table result_sets updater. Request: create. Transaction completed: 281474976710659. Doublechecking... 2025-05-07T08:57:05.380623Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table script_execution_leases updater. Request: create. Transaction completed: 281474976710660. Doublechecking... 2025-05-07T08:57:05.394597Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table script_executions updater. Column diff is empty, finishing 2025-05-07T08:57:05.437130Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table script_execution_leases updater. Column diff is empty, finishing 2025-05-07T08:57:05.467810Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table result_sets updater. Column diff is empty, finishing 2025-05-07T08:57:05.468395Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TCreateScriptOperationQuery] TraceId: 8a6b8a68-43c3718d-7667d289-a721e81b, Bootstrap. Database: /dc-1 2025-05-07T08:57:05.513283Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1468: Request has 18444997465484.038368s seconds to be completed 2025-05-07T08:57:05.516509Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1543: Created new session, sessionId: ydb://session/3?node_id=1&id=YTAxY2UyZTktZDhjYjA0MmYtY2FiNTM2MWQtZmY3ZmYyMzY=, workerId: [1:7501625208798286867:2334], database: /dc-1, longSession: 1, local sessions count: 1 2025-05-07T08:57:05.516695Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:649: Received create session request, trace_id: 2025-05-07T08:57:05.518716Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TCreateScriptOperationQuery] TraceId: 8a6b8a68-43c3718d-7667d289-a721e81b, RunDataQuery: -- TCreateScriptOperationQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $run_script_actor_id AS Text; DECLARE $execution_status AS Int32; DECLARE $execution_mode AS Int32; DECLARE $query_text AS Text; DECLARE $syntax AS Int32; DECLARE $meta AS JsonDocument; DECLARE $lease_duration AS Interval; DECLARE $execution_meta_ttl AS Interval; UPSERT INTO `.metadata/script_executions` (database, execution_id, run_script_actor_id, execution_status, execution_mode, start_ts, query_text, syntax, meta, expire_at) VALUES ($database, $execution_id, $run_script_actor_id, $execution_status, $execution_mode, CurrentUtcTimestamp(), $query_text, $syntax, $meta, CurrentUtcTimestamp() + $execution_meta_ttl); UPSERT INTO `.metadata/script_execution_leases` (database, execution_id, lease_deadline, lease_generation, expire_at) VALUES ($database, $execution_id, CurrentUtcTimestamp() + $lease_duration, 1, CurrentUtcTimestamp() + $execution_meta_ttl); 2025-05-07T08:57:05.519474Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=1&id=YTAxY2UyZTktZDhjYjA0MmYtY2FiNTM2MWQtZmY3ZmYyMzY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 3, targetId: [1:7501625208798286867:2334] 2025-05-07T08:57:05.519512Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 3 timeout: 300.000000s actor id: [1:7501625208798286869:2464] 2025-05-07T08:57:05.522052Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625208798286878:2339], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:57:05.525085Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId ... DEBUG: table_creator.cpp:362: Table test_table1 updater. Column diff is empty, finishing 2025-05-07T08:57:12.254318Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table1 updater. Column diff is empty, finishing 2025-05-07T08:57:12.257097Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table1 updater. Column diff is empty, finishing 2025-05-07T08:57:12.257456Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table1 updater. Column diff is empty, finishing 2025-05-07T08:57:12.257624Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-05-07T08:57:12.258992Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table1 updater. Column diff is empty, finishing 2025-05-07T08:57:12.260529Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table1 updater. Column diff is empty, finishing 2025-05-07T08:57:12.262086Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table1 updater. Column diff is empty, finishing 2025-05-07T08:57:12.262140Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-05-07T08:57:12.266324Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table1 updater. Column diff is empty, finishing 2025-05-07T08:57:12.266410Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-05-07T08:57:12.274490Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-05-07T08:57:12.274576Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-05-07T08:57:12.274640Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-05-07T08:57:12.274663Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-05-07T08:57:12.275654Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-05-07T08:57:12.276172Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-05-07T08:57:12.282414Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-05-07T08:57:12.282523Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-05-07T08:57:12.282586Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-05-07T08:57:12.289441Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-05-07T08:57:12.289539Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-05-07T08:57:12.292415Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-05-07T08:57:12.293254Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-05-07T08:57:12.294336Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-05-07T08:57:12.295335Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-05-07T08:57:12.300968Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-05-07T08:57:12.301990Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-05-07T08:57:12.318138Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 8, sender: [2:7501625238987098237:2365], selfId: [2:7501625217512260588:2265], source: [2:7501625238987098230:2364] 2025-05-07T08:57:12.318525Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 120af5d0-a5724874-bcd49a4a-ae1b64ea, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=OWExNzg2ZjQtZDgyMjQyYTUtNzlhZjAwMDUtY2RmNjk4NGQ=, TxId: 2025-05-07T08:57:12.318549Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 120af5d0-a5724874-bcd49a4a-ae1b64ea, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=OWExNzg2ZjQtZDgyMjQyYTUtNzlhZjAwMDUtY2RmNjk4NGQ=, TxId: 2025-05-07T08:57:12.318710Z node 2 :KQP_PROXY DEBUG: kqp_script_executions.cpp:1907: [ScriptExecutions] [TSaveScriptExecutionResultActor] ExecutionId: 120af5d0-a5724874-bcd49a4a-ae1b64ea, start saving rows range [0; 1) 2025-05-07T08:57:12.318767Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TSaveScriptExecutionResultQuery] TraceId: 120af5d0-a5724874-bcd49a4a-ae1b64ea, Bootstrap. Database: /dc-1 2025-05-07T08:57:12.319043Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1468: Request has 18444997465477.232589s seconds to be completed 2025-05-07T08:57:12.321019Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1543: Created new session, sessionId: ydb://session/3?node_id=2&id=MWUwYTE2MGUtZWQ0ZTlkYmQtMjg0NTc1MzEtMmMwYTdkZjg=, workerId: [2:7501625238987098523:2379], database: /dc-1, longSession: 1, local sessions count: 4 2025-05-07T08:57:12.321156Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:649: Received create session request, trace_id: 2025-05-07T08:57:12.321555Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1353: Session closed, sessionId: ydb://session/3?node_id=2&id=OWExNzg2ZjQtZDgyMjQyYTUtNzlhZjAwMDUtY2RmNjk4NGQ=, workerId: [2:7501625238987098230:2364], local sessions count: 3 2025-05-07T08:57:12.322133Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptExecutionResultQuery] TraceId: 120af5d0-a5724874-bcd49a4a-ae1b64ea, RunDataQuery: -- TSaveScriptExecutionResultQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $result_set_id AS Int32; DECLARE $expire_at AS Optional; DECLARE $items AS List>; UPSERT INTO `.metadata/result_sets` SELECT $database as database, $execution_id as execution_id, $result_set_id as result_set_id, T.row_id as row_id, $expire_at as expire_at, T.result_set as result_set, T.accumulated_size as accumulated_size FROM AS_TABLE($items) AS T; 2025-05-07T08:57:12.322531Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=MWUwYTE2MGUtZWQ0ZTlkYmQtMjg0NTc1MzEtMmMwYTdkZjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 10, targetId: [2:7501625238987098523:2379] 2025-05-07T08:57:12.322576Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 10 timeout: 300.000000s actor id: [2:7501625238987098526:3062] 2025-05-07T08:57:12.372251Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T08:57:12.476347Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1353: Session closed, sessionId: ydb://session/3?node_id=2&id=YTc1MzQzMGYtMjI4OGU1ZGItYmE2MDQ2NDMtOWU1NDE4YjI=, workerId: [2:7501625238987097878:2362], local sessions count: 2 2025-05-07T08:57:12.551219Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 10, sender: [2:7501625238987098525:2380], selfId: [2:7501625217512260588:2265], source: [2:7501625238987098523:2379] 2025-05-07T08:57:12.551675Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptExecutionResultQuery] TraceId: 120af5d0-a5724874-bcd49a4a-ae1b64ea, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=MWUwYTE2MGUtZWQ0ZTlkYmQtMjg0NTc1MzEtMmMwYTdkZjg=, TxId: 2025-05-07T08:57:12.551700Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TSaveScriptExecutionResultQuery] TraceId: 120af5d0-a5724874-bcd49a4a-ae1b64ea, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=MWUwYTE2MGUtZWQ0ZTlkYmQtMjg0NTc1MzEtMmMwYTdkZjg=, TxId: 2025-05-07T08:57:12.551825Z node 2 :KQP_PROXY DEBUG: kqp_script_executions.cpp:1939: [ScriptExecutions] [TSaveScriptExecutionResultActor] ExecutionId: 120af5d0-a5724874-bcd49a4a-ae1b64ea, result part successfully saved 2025-05-07T08:57:12.551838Z node 2 :KQP_PROXY DEBUG: kqp_script_executions.cpp:1946: [ScriptExecutions] [TSaveScriptExecutionResultActor] ExecutionId: 120af5d0-a5724874-bcd49a4a-ae1b64ea, reply SUCCESS, issues: 2025-05-07T08:57:12.552034Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: 120af5d0-a5724874-bcd49a4a-ae1b64ea, Bootstrap. Database: /dc-1 2025-05-07T08:57:12.552155Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1353: Session closed, sessionId: ydb://session/3?node_id=2&id=MWUwYTE2MGUtZWQ0ZTlkYmQtMjg0NTc1MzEtMmMwYTdkZjg=, workerId: [2:7501625238987098523:2379], local sessions count: 1 2025-05-07T08:57:12.552182Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1468: Request has 18444997465476.999447s seconds to be completed 2025-05-07T08:57:12.554446Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1543: Created new session, sessionId: ydb://session/3?node_id=2&id=MWVmMzMwOTgtOWUxZmFjMTAtNTk5YWUyZTYtZWRlYWNkMDE=, workerId: [2:7501625238987098563:2393], database: /dc-1, longSession: 1, local sessions count: 2 2025-05-07T08:57:12.554636Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:649: Received create session request, trace_id: 2025-05-07T08:57:12.554934Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: 120af5d0-a5724874-bcd49a4a-ae1b64ea, RunDataQuery: -- TSaveScriptFinalStatusActor::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; SELECT operation_status, finalization_status, meta, customer_supplied_id, user_token, script_sinks, script_secret_names FROM `.metadata/script_executions` WHERE database = $database AND execution_id = $execution_id AND (expire_at > CurrentUtcTimestamp() OR expire_at IS NULL); SELECT lease_generation FROM `.metadata/script_execution_leases` WHERE database = $database AND execution_id = $execution_id AND (expire_at > CurrentUtcTimestamp() OR expire_at IS NULL); 2025-05-07T08:57:12.555247Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=MWVmMzMwOTgtOWUxZmFjMTAtNTk5YWUyZTYtZWRlYWNkMDE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 12, targetId: [2:7501625238987098563:2393] 2025-05-07T08:57:12.555276Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 12 timeout: 300.000000s actor id: [2:7501625238987098565:3078] 2025-05-07T08:57:12.573844Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7501625217512260381:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:12.573933Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; >> TSyncNeighborsTests::SerDes3 [GOOD] |90.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/syncer/ut/unittest >> TQuorumTrackerTests::ErasureMirror3IncludingMyFailDomain_4_2 [GOOD] >> TSyncBrokerTests::ShouldEnqueue ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/proxy_service/ut/unittest >> TableCreation::CreateOldTable [GOOD] Test command err: 2025-05-07T08:57:02.122574Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625192542496896:2065];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:02.123166Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0031a1/r3tmp/tmpx9irSQ/pdisk_1.dat 2025-05-07T08:57:02.554919Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:57:02.609263Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:02.609376Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:02.616324Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:12957 TServer::EnableGrpc on GrpcPort 25265, node 1 2025-05-07T08:57:02.938729Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:57:02.938751Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:57:02.938758Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:57:02.938885Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-05-07T08:57:03.210618Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:03.226727Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T08:57:05.782606Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1665: Updated YQL logs priority to current level: 4 2025-05-07T08:57:05.784221Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:442: Cannot start publishing usage, tenants: /dc-1, empty 2025-05-07T08:57:05.804567Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_executions updater. Describe result: PathErrorUnknown 2025-05-07T08:57:05.804595Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_executions updater. Creating table 2025-05-07T08:57:05.804648Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_executions updater. Full table path:/dc-1/.metadata/script_executions 2025-05-07T08:57:05.804746Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_execution_leases updater. Describe result: PathErrorUnknown 2025-05-07T08:57:05.804761Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_execution_leases updater. Creating table 2025-05-07T08:57:05.804775Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_execution_leases updater. Full table path:/dc-1/.metadata/script_execution_leases 2025-05-07T08:57:05.804809Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table result_sets updater. Describe result: PathErrorUnknown 2025-05-07T08:57:05.804813Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table result_sets updater. Creating table 2025-05-07T08:57:05.804834Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table result_sets updater. Full table path:/dc-1/.metadata/result_sets 2025-05-07T08:57:05.830369Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:512: Subscribed for config changes. 2025-05-07T08:57:05.830415Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:519: Updated table service config. 2025-05-07T08:57:05.830449Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1665: Updated YQL logs priority to current level: 4 2025-05-07T08:57:05.831917Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:1, at schemeshard: 72057594046644480 2025-05-07T08:57:05.834107Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:442: Cannot start publishing usage, tenants: /dc-1, empty 2025-05-07T08:57:05.843754Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T08:57:05.843838Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T08:57:05.851480Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T08:57:05.851561Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T08:57:05.865719Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table script_executions updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976715658 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 3 } 2025-05-07T08:57:05.865797Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table script_executions updater. Subscribe on create table tx: 281474976715658 2025-05-07T08:57:05.869259Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-05-07T08:57:05.874657Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-05-07T08:57:05.875872Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table script_execution_leases updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976715659 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 4 } 2025-05-07T08:57:05.875896Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table script_execution_leases updater. Subscribe on create table tx: 281474976715659 2025-05-07T08:57:05.877313Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table result_sets updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976715660 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 5 } 2025-05-07T08:57:05.877336Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table result_sets updater. Subscribe on create table tx: 281474976715660 2025-05-07T08:57:06.147778Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table script_executions updater. Request: create. Transaction completed: 281474976715658. Doublechecking... 2025-05-07T08:57:06.216489Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table result_sets updater. Request: create. Transaction completed: 281474976715660. Doublechecking... 2025-05-07T08:57:06.218898Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table script_executions updater. Column diff is empty, finishing 2025-05-07T08:57:06.243866Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table script_execution_leases updater. Request: create. Transaction completed: 281474976715659. Doublechecking... 2025-05-07T08:57:06.298315Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table result_sets updater. Column diff is empty, finishing 2025-05-07T08:57:06.333890Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table script_execution_leases updater. Column diff is empty, finishing 2025-05-07T08:57:06.334451Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TCreateScriptOperationQuery] TraceId: c0bf5698-119dbe07-ca6f200a-91066bc2, Bootstrap. Database: /dc-1 2025-05-07T08:57:06.360496Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1468: Request has 18444997465483.191165s seconds to be completed 2025-05-07T08:57:06.364462Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1543: Created new session, sessionId: ydb://session/3?node_id=1&id=ZjM0MjA4MjMtYjZhOTk0Y2ItOGI0NzAyYjgtODY1YmZmMGY=, workerId: [1:7501625209722366964:2334], database: /dc-1, longSession: 1, local sessions count: 1 2025-05-07T08:57:06.364638Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:649: Received create session request, trace_id: 2025-05-07T08:57:06.366038Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TCreateScriptOperationQuery] TraceId: c0bf5698-119dbe07-ca6f200a-91066bc2, RunDataQuery: -- TCreateScriptOperationQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $run_script_actor_id AS Text; DECLARE $execution_status AS Int32; DECLARE $execution_mode AS Int32; DECLARE $query_text AS Text; DECLARE $syntax AS Int32; DECLARE $meta AS JsonDocument; DECLARE $lease_duration AS Interval; DECLARE $execution_meta_ttl AS Interval; UPSERT INTO `.metadata/script_executions` (database, execution_id, run_script_actor_id, execution_status, execution_mode, start_ts, query_text, syntax, meta, expire_at) VALUES ($database, $execution_id, $run_script_actor_id, $execution_status, $execution_mode, CurrentUtcTimestamp(), $query_text, $syntax, $meta, CurrentUtcTimestamp() + $execution_meta_ttl); UPSERT INTO `.metadata/script_execution_leases` (database, execution_id, lease_deadline, lease_generation, expire_at) VALUES ($database, $execution_id, CurrentUtcTimestamp() + $lease_duration, 1, CurrentUtcTimestamp() + $execution_meta_ttl); 2025-05-07T08:57:06.366803Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=1&id=ZjM0MjA4MjMtYjZhOTk0Y2ItOGI0NzAyYjgtODY1YmZmMGY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 3, targetId: [1:7501625209722366964:2334] 2025-05-07T08:57:06.366843Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 3 timeout: 300.000000s actor id: [1:7501625209722366966:2462] 2025-05-07T08:57:06.369552Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625209722366967:2336], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool ... T_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:57:12.339339Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /dc-1, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:57:12.343102Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:2, at schemeshard: 72057594046644480 2025-05-07T08:57:12.355249Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715661, at schemeshard: 72057594046644480 2025-05-07T08:57:12.355739Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7501625235400846529:2339], DatabaseId: /dc-1, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-05-07T08:57:12.460994Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501625235400846569:2489] txid# 281474976715662, issues: { message: "Check failed: path: \'/dc-1/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:57:12.654600Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 3, sender: [2:7501625235400846513:2334], selfId: [2:7501625218220976680:2274], source: [2:7501625235400846512:2333] 2025-05-07T08:57:12.655185Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TCreateScriptOperationQuery] TraceId: 619dee4c-9d2677cb-16d111a3-62b42291, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=MmU2OTk4Y2EtZGQ2ZTg5MjgtOGNlMzVjMWQtZmI3NjlkMmM=, TxId: 2025-05-07T08:57:12.655211Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TCreateScriptOperationQuery] TraceId: 619dee4c-9d2677cb-16d111a3-62b42291, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=MmU2OTk4Y2EtZGQ2ZTg5MjgtOGNlMzVjMWQtZmI3NjlkMmM=, TxId: 2025-05-07T08:57:12.655223Z node 2 :KQP_PROXY DEBUG: kqp_script_executions.cpp:304: [ScriptExecutions] Create script execution operation. ExecutionId: 619dee4c-9d2677cb-16d111a3-62b42291. Result: SUCCESS. Issues: 2025-05-07T08:57:12.658582Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1543: Created new session, sessionId: ydb://session/3?node_id=2&id=OWZjNGM1YTUtNjc4NGE2Y2YtOTRkYmE2NDUtNDY0ODlkOQ==, workerId: [2:7501625235400846627:2352], database: dc-1, longSession: 1, local sessions count: 2 2025-05-07T08:57:12.658759Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:649: Received create session request, trace_id: 2025-05-07T08:57:12.658947Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1353: Session closed, sessionId: ydb://session/3?node_id=2&id=MmU2OTk4Y2EtZGQ2ZTg5MjgtOGNlMzVjMWQtZmI3NjlkMmM=, workerId: [2:7501625235400846512:2333], local sessions count: 1 2025-05-07T08:57:12.660501Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=OWZjNGM1YTUtNjc4NGE2Y2YtOTRkYmE2NDUtNDY0ODlkOQ==, CurrentExecutionId: 619dee4c-9d2677cb-16d111a3-62b42291, CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 604800.000000s timeout: 604800.000000s cancelAfter: 0.000000s. Send request to target, requestId: 5, targetId: [2:7501625235400846627:2352] 2025-05-07T08:57:12.660542Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 5 timeout: 604800.000000s actor id: [2:7501625235400846630:2520] 2025-05-07T08:57:12.681134Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1468: TraceId: "01jtmzd67840v8jmdms80ynngk", Request has 18444997465476.870511s seconds to be completed 2025-05-07T08:57:12.683333Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1543: TraceId: "01jtmzd67840v8jmdms80ynngk", Created new session, sessionId: ydb://session/3?node_id=2&id=MTZmOWU3ZGUtZTM5ODRjODctMzNlZTZlYTUtY2Y4YjEyNmY=, workerId: [2:7501625235400846643:2362], database: /dc-1, longSession: 1, local sessions count: 2 2025-05-07T08:57:12.683493Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:649: Received create session request, trace_id: 01jtmzd67840v8jmdms80ynngk 2025-05-07T08:57:12.686966Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:147: Table test_table updater. Describe result: PathErrorUnknown 2025-05-07T08:57:12.686989Z node 2 :KQP_PROXY NOTICE: table_creator.cpp:167: Table test_table updater. Creating table 2025-05-07T08:57:12.687031Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:100: Table test_table updater. Full table path:/dc-1/.test/test_table 2025-05-07T08:57:12.690526Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:1, at schemeshard: 72057594046644480 2025-05-07T08:57:12.692959Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:190: Table test_table updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976715664 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 10 } 2025-05-07T08:57:12.692988Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:261: Table test_table updater. Subscribe on create table tx: 281474976715664 2025-05-07T08:57:12.757765Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 619dee4c-9d2677cb-16d111a3-62b42291, Bootstrap. Database: /dc-1 2025-05-07T08:57:12.759154Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 5, sender: [2:7501625235400846509:2457], selfId: [2:7501625218220976680:2274], source: [2:7501625235400846627:2352] 2025-05-07T08:57:12.759261Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1468: Request has 18444997465476.792368s seconds to be completed 2025-05-07T08:57:12.761716Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1543: Created new session, sessionId: ydb://session/3?node_id=2&id=ODIyNGExZTgtYmI0YzEzMDAtZjNiNDliZjgtMjMyOTYxMDg=, workerId: [2:7501625235400846706:2365], database: /dc-1, longSession: 1, local sessions count: 3 2025-05-07T08:57:12.761884Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:649: Received create session request, trace_id: 2025-05-07T08:57:12.762253Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 619dee4c-9d2677cb-16d111a3-62b42291, RunDataQuery: -- TSaveScriptExecutionResultMetaQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $result_set_metas AS JsonDocument; UPDATE `.metadata/script_executions` SET result_set_metas = $result_set_metas WHERE database = $database AND execution_id = $execution_id; 2025-05-07T08:57:12.763008Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=ODIyNGExZTgtYmI0YzEzMDAtZjNiNDliZjgtMjMyOTYxMDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 8, targetId: [2:7501625235400846706:2365] 2025-05-07T08:57:12.763050Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 8 timeout: 300.000000s actor id: [2:7501625235400846708:2559] 2025-05-07T08:57:12.816945Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:290: Table test_table updater. Request: create. Transaction completed: 281474976715664. Doublechecking... 2025-05-07T08:57:12.890294Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-05-07T08:57:12.892287Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-05-07T08:57:12.959805Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1353: Session closed, sessionId: ydb://session/3?node_id=2&id=MTZmOWU3ZGUtZTM5ODRjODctMzNlZTZlYTUtY2Y4YjEyNmY=, workerId: [2:7501625235400846643:2362], local sessions count: 2 2025-05-07T08:57:13.075043Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 8, sender: [2:7501625235400846707:2366], selfId: [2:7501625218220976680:2274], source: [2:7501625235400846706:2365] 2025-05-07T08:57:13.075304Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 619dee4c-9d2677cb-16d111a3-62b42291, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ODIyNGExZTgtYmI0YzEzMDAtZjNiNDliZjgtMjMyOTYxMDg=, TxId: 2025-05-07T08:57:13.075329Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 619dee4c-9d2677cb-16d111a3-62b42291, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ODIyNGExZTgtYmI0YzEzMDAtZjNiNDliZjgtMjMyOTYxMDg=, TxId: 2025-05-07T08:57:13.075530Z node 2 :KQP_PROXY DEBUG: kqp_script_executions.cpp:1907: [ScriptExecutions] [TSaveScriptExecutionResultActor] ExecutionId: 619dee4c-9d2677cb-16d111a3-62b42291, start saving rows range [0; 1) 2025-05-07T08:57:13.075596Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TSaveScriptExecutionResultQuery] TraceId: 619dee4c-9d2677cb-16d111a3-62b42291, Bootstrap. Database: /dc-1 2025-05-07T08:57:13.075697Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1353: Session closed, sessionId: ydb://session/3?node_id=2&id=ODIyNGExZTgtYmI0YzEzMDAtZjNiNDliZjgtMjMyOTYxMDg=, workerId: [2:7501625235400846706:2365], local sessions count: 1 2025-05-07T08:57:13.075753Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1468: Request has 18444997465476.475871s seconds to be completed 2025-05-07T08:57:13.077405Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1543: Created new session, sessionId: ydb://session/3?node_id=2&id=ZDY2ZTQ0N2ItNzEyZjg3Y2UtMmNhNTU1NGMtMzYzNGYzYQ==, workerId: [2:7501625239695814069:2378], database: /dc-1, longSession: 1, local sessions count: 2 2025-05-07T08:57:13.077511Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:649: Received create session request, trace_id: 2025-05-07T08:57:13.082919Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptExecutionResultQuery] TraceId: 619dee4c-9d2677cb-16d111a3-62b42291, RunDataQuery: -- TSaveScriptExecutionResultQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $result_set_id AS Int32; DECLARE $expire_at AS Optional; DECLARE $items AS List>; UPSERT INTO `.metadata/result_sets` SELECT $database as database, $execution_id as execution_id, $result_set_id as result_set_id, T.row_id as row_id, $expire_at as expire_at, T.result_set as result_set, T.accumulated_size as accumulated_size FROM AS_TABLE($items) AS T; 2025-05-07T08:57:13.086753Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=ZDY2ZTQ0N2ItNzEyZjg3Y2UtMmNhNTU1NGMtMzYzNGYzYQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 10, targetId: [2:7501625239695814069:2378] 2025-05-07T08:57:13.086800Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 10 timeout: 300.000000s actor id: [2:7501625239695814071:2601] >> TSyncBrokerTests::ShouldEnqueue [GOOD] >> TSyncBrokerTests::ShouldEnqueueWithSameVDiskId >> TQuorumTrackerTests::ErasureNoneNeverHasQuorum_4_1 [GOOD] >> TQuorumTrackerTests::ErasureMirror3IncludingMyFailDomain_5_2 [GOOD] >> TSyncNeighborsTests::SerDes2 [GOOD] >> TSequence::AlterSequence [GOOD] >> TSequence::AlterTableSetDefaultFromSequence >> TSyncBrokerTests::ShouldEnqueueWithSameVDiskId [GOOD] >> KqpWorkloadServiceSubscriptions::TestResourcePoolSubscription [GOOD] >> KqpWorkloadServiceSubscriptions::TestResourcePoolSubscriptionAfterAlter |90.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/syncer/ut/unittest >> TSyncNeighborsTests::SerDes3 [GOOD] >> TNetClassifierTest::TestInitFromBadlyFormattedFile [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> ColumnStatistics::CountMinSketchStatistics [GOOD] Test command err: 2025-05-07T08:54:02.389514Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:451:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:54:02.389798Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:54:02.389889Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003bfb/r3tmp/tmpmQ5WLt/pdisk_1.dat 2025-05-07T08:54:02.937962Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4290, node 1 2025-05-07T08:54:03.455642Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:03.455706Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:03.455743Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:03.456225Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:54:03.461669Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:54:03.636284Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:03.636434Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:03.656903Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:21239 2025-05-07T08:54:04.324521Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:54:08.756734Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-05-07T08:54:08.826919Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:08.827042Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:08.872749Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-05-07T08:54:08.877473Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:54:09.168108Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T08:54:09.168693Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T08:54:09.169284Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T08:54:09.169461Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T08:54:09.169576Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T08:54:09.169834Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T08:54:09.169927Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T08:54:09.170038Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T08:54:09.170125Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T08:54:09.383225Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:09.383342Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:09.403889Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:54:09.625284Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:09.756121Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-05-07T08:54:09.756261Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-05-07T08:54:09.830273Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-05-07T08:54:09.832082Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-05-07T08:54:09.832362Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-05-07T08:54:09.832441Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-05-07T08:54:09.832502Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-05-07T08:54:09.832586Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-05-07T08:54:09.832658Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-05-07T08:54:09.832722Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-05-07T08:54:09.833444Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-05-07T08:54:09.885236Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7823: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-05-07T08:54:09.885371Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7853: ConnectToSA(), pipe client id: [2:1868:2600], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-05-07T08:54:09.903453Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1878:2607] 2025-05-07T08:54:09.921155Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1921:2627] 2025-05-07T08:54:09.921689Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1921:2627], schemeshard id = 72075186224037897 2025-05-07T08:54:09.922587Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-05-07T08:54:09.953555Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-05-07T08:54:09.953659Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-05-07T08:54:09.953749Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-05-07T08:54:09.981569Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897 2025-05-07T08:54:09.991487Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-05-07T08:54:09.991710Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-05-07T08:54:10.259226Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-05-07T08:54:10.489257Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-05-07T08:54:10.586818Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-05-07T08:54:11.893695Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2210:3056], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:11.896747Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:54:11.919660Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897 2025-05-07T08:54:12.129841Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2295:2836];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-05-07T08:54:12.130581Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2295:2836];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-05-07T08:54:12.131010Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2295:2836];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-05-07T08:54:12.131215Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2295:2836];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-05-07T08:54:12.131348Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2295:2836];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-05-07T08:54:12.131512Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2295:2836];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-05-07T08:54:12.131661Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2295:2836];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-05-07T08:54:12.131832Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2295:2836];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_reg ... chemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-05-07T08:56:56.437622Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-05-07T08:56:56.437732Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-05-07T08:56:56.447258Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 3] is data table. 2025-05-07T08:56:56.447394Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 3] 2025-05-07T08:56:56.457460Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-05-07T08:56:56.685089Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-05-07T08:56:56.848200Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:6933:5142], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:56.848543Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:6943:5147], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:56.848957Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Database, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:57.227769Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720658:2, at schemeshard: 72075186224037897 2025-05-07T08:56:57.564137Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:6947:5150], DatabaseId: /Root/Database, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720658 completed, doublechecking } 2025-05-07T08:56:57.928044Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7045:5197] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:56:59.026198Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:7074:5212]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-05-07T08:56:59.026832Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-05-07T08:56:59.026937Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:7076:5214] 2025-05-07T08:56:59.027636Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:7076:5214] 2025-05-07T08:56:59.028143Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7077:5215] 2025-05-07T08:56:59.028253Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7076:5214], server id = [2:7077:5215], tablet id = 72075186224037894, status = OK 2025-05-07T08:56:59.028326Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:7077:5215], node id = 2, have schemeshards count = 0, need schemeshards count = 1 2025-05-07T08:56:59.029243Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-05-07T08:56:59.030148Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-05-07T08:56:59.031076Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:7074:5212], StatRequests.size() = 1 2025-05-07T08:57:04.224025Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZGJiZTdjMmQtZTJhZDE3ZC0yY2EzMzZiMy1iMjI1OGE4Mg==, TxId: 2025-05-07T08:57:04.224137Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZGJiZTdjMmQtZTJhZDE3ZC0yY2EzMzZiMy1iMjI1OGE4Mg==, TxId: 2025-05-07T08:57:04.251128Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-05-07T08:57:04.275502Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-05-07T08:57:04.275593Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-05-07T08:57:04.354706Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-05-07T08:57:04.354819Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-05-07T08:57:04.465549Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:7076:5214], schemeshard count = 1 2025-05-07T08:57:07.066920Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-05-07T08:57:07.066997Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-05-07T08:57:07.067046Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-05-07T08:57:07.067110Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start schedule traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-05-07T08:57:07.087388Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-05-07T08:57:07.110206Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-05-07T08:57:07.110835Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-05-07T08:57:07.110944Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-05-07T08:57:07.146343Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-05-07T08:57:07.168089Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-05-07T08:57:07.168358Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-05-07T08:57:07.169073Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7185:5274], server id = [2:7186:5275], tablet id = 72075186224037899, status = OK 2025-05-07T08:57:07.169522Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:7185:5274], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-05-07T08:57:07.434901Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-05-07T08:57:07.435156Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-05-07T08:57:07.435581Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:7185:5274], server id = [2:7186:5275], tablet id = 72075186224037899 2025-05-07T08:57:07.435644Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-05-07T08:57:07.435773Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-05-07T08:57:07.436066Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-05-07T08:57:07.436536Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-05-07T08:57:07.444593Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-05-07T08:57:07.702573Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:7206:5294]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-05-07T08:57:07.702828Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-05-07T08:57:07.702876Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:7206:5294], StatRequests.size() = 1 2025-05-07T08:57:08.233495Z node 2 :SYSTEM_VIEWS WARN: tx_interval_summary.cpp:212: [72075186224037891] TEvIntervalQuerySummary, time mismath: node id# 2, interval end# 1970-01-01T00:02:04.000000Z, event interval end# 2025-05-07T08:57:06.000000Z 2025-05-07T08:57:08.242710Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YzBjMGE3ZDAtMmRjY2ViNS1kNjFhZDBhYy1lZDUzYTJm, TxId: 2025-05-07T08:57:08.242803Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YzBjMGE3ZDAtMmRjY2ViNS1kNjFhZDBhYy1lZDUzYTJm, TxId: 2025-05-07T08:57:08.251084Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-05-07T08:57:08.262599Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:7219:5432]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-05-07T08:57:08.263057Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-05-07T08:57:08.263128Z node 1 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-05-07T08:57:08.265483Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-05-07T08:57:08.265560Z node 1 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 1 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-05-07T08:57:08.265648Z node 1 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 4] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-05-07T08:57:08.359227Z node 1 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/address_classification/ut/unittest >> TNetClassifierTest::TestInitFromFile [GOOD] Test command err: 2025-05-07T08:57:10.476159Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625227636237615:2064];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:10.476233Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001672/r3tmp/tmpd7ZIDE/pdisk_1.dat 2025-05-07T08:57:11.013079Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:11.013189Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:11.015679Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:57:11.060472Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:57:11.110862Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/zvgn/001672/r3tmp/yandexZsl6Wp.tmp 2025-05-07T08:57:11.110894Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/zvgn/001672/r3tmp/yandexZsl6Wp.tmp 2025-05-07T08:57:11.111199Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/zvgn/001672/r3tmp/yandexZsl6Wp.tmp 2025-05-07T08:57:11.111327Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration >> BackupRestoreS3::RestoreTableSplitBoundaries [GOOD] >> BackupRestoreS3::RestoreIndexTableSplitBoundaries |90.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/syncer/ut/unittest >> TQuorumTrackerTests::ErasureMirror3IncludingMyFailDomain_5_2 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/syncer/ut/unittest >> TSyncBrokerTests::ShouldEnqueueWithSameVDiskId [GOOD] Test command err: 2025-05-07T08:57:15.503352Z node 1 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:64: TEvQuerySyncToken, VDisk actor id: [0:1:1], actor id: [1:5:2052], token sent, active: 1, waiting: 0 2025-05-07T08:57:15.503517Z node 1 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:90: TEvQuerySyncToken, VDisk actor id: [0:1:2], actor id: [1:6:2053], enqueued, active: 1, waiting: 1 2025-05-07T08:57:15.617165Z node 2 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:64: TEvQuerySyncToken, VDisk actor id: [0:1:1], actor id: [2:5:2052], token sent, active: 1, waiting: 0 2025-05-07T08:57:15.617366Z node 2 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:90: TEvQuerySyncToken, VDisk actor id: [0:1:2], actor id: [2:6:2053], enqueued, active: 1, waiting: 1 2025-05-07T08:57:15.617441Z node 2 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:79: TEvQuerySyncToken, VDisk actor id: [0:1:2], actor id: [2:7:2054], enqueued, active: 1, waiting: 1 |90.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/syncer/ut/unittest >> TSyncNeighborsTests::SerDes2 [GOOD] >> TEvLocalSyncDataTests::SqueezeBlocks1 [GOOD] >> TEvLocalSyncDataTests::SqueezeBlocks2 [GOOD] |90.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/ycloud/impl/ut/ydb-library-ycloud-impl-ut |90.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/ycloud/impl/ut/ydb-library-ycloud-impl-ut |90.8%| [LD] {RESULT} $(B)/ydb/library/ycloud/impl/ut/ydb-library-ycloud-impl-ut >> TSyncBrokerTests::ShouldReturnTokensWithSameVDiskId ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/address_classification/ut/unittest >> TNetClassifierTest::TestInitFromBadlyFormattedFile [GOOD] Test command err: 2025-05-07T08:57:12.066276Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625239204683184:2140];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:12.068698Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001632/r3tmp/tmpCDw0xh/pdisk_1.dat 2025-05-07T08:57:12.442048Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:57:12.455571Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/zvgn/001632/r3tmp/yandexivLCtZ.tmp 2025-05-07T08:57:12.455610Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/zvgn/001632/r3tmp/yandexivLCtZ.tmp 2025-05-07T08:57:12.455770Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:344: invalid NetData format 2025-05-07T08:57:12.455823Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: /home/runner/.ya/build/build_root/zvgn/001632/r3tmp/yandexivLCtZ.tmp 2025-05-07T08:57:12.455988Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:57:12.457889Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:12.458023Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:12.465411Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected >> TSyncBrokerTests::ShouldReturnTokensWithSameVDiskId [GOOD] >> TSyncNeighborsTests::SerDes1 [GOOD] >> TEvLocalSyncDataTests::SqueezeBlocks3 [GOOD] >> TQuorumTrackerTests::Erasure4Plus2BlockIncludingMyFailDomain_8_2 [GOOD] |90.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/syncer/ut/unittest >> TEvLocalSyncDataTests::SqueezeBlocks2 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/syncer/ut/unittest >> TSyncNeighborsTests::SerDes1 [GOOD] Test command err: 2025-05-07T08:57:17.158806Z node 1 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:64: TEvQuerySyncToken, VDisk actor id: [0:1:1], actor id: [1:5:2052], token sent, active: 1, waiting: 0 2025-05-07T08:57:17.159025Z node 1 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:50: TEvQuerySyncToken, VDisk actor id: [0:1:1], actor id: [1:6:2053], token sent, active: 1, waiting: 0 >> BackupRestoreS3::TestAllPrimitiveTypes-INT64 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-FLOAT >> AsyncIndexChangeExchange::SenderShouldBeActivatedOnTableWoIndexes |90.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/syncer/ut/unittest >> TQuorumTrackerTests::Erasure4Plus2BlockIncludingMyFailDomain_8_2 [GOOD] ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::IcebergHadoopSaFilterPushdown 2025-05-07 08:56:46,891 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-05-07 08:56:47,065 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 60 secs timeout. Process tree before termination: pid rss ref pdirt 217787 45.9M 46.0M 23.0M test_tool run_ut @/home/runner/.ya/build/build_root/zvgn/004875/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unittest/testing_out_stuff/chunk2/testing_out_stuff/tes 217990 1.5G 1.5G 1.0G └─ ydb-core-kqp-ut-federated_query-generic_ut --trace-path-append /home/runner/.ya/build/build_root/zvgn/004875/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unit Test command err: Trying to start YDB, gRPC: 11119, MsgBus: 17055 2025-05-07T08:55:49.948547Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624879797068989:2191];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:55:49.948699Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004875/r3tmp/tmpJWxlQE/pdisk_1.dat 2025-05-07T08:55:50.823020Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:55:50.823181Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:55:50.823548Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:55:50.826583Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11119, node 1 2025-05-07T08:55:51.112314Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:55:51.112333Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:55:51.112338Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:55:51.112468Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17055 TClient is connected to server localhost:17055 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:55:52.282617Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:55:54.950361Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501624879797068989:2191];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:55:54.950453Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:55:56.258884Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624909861840609:2335], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:56.259011Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:56.696261Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:2, at schemeshard: 72057594046644480 2025-05-07T08:55:56.910697Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624909861840729:2348], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:56.910850Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:56.911558Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624909861840735:2351], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:56.915409Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:2, at schemeshard: 72057594046644480 2025-05-07T08:55:56.930013Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624909861840737:2352], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-05-07T08:55:57.021406Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501624914156808094:2410] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:55:57.899221Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:55:58.347253Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:1, at schemeshard: 72057594046644480 2025-05-07T08:55:58.975083Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480 2025-05-07T08:55:59.604628Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710676:0, at schemeshard: 72057594046644480 2025-05-07T08:56:00.471344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710681:0, at schemeshard: 72057594046644480 2025-05-07T08:56:01.178230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480 2025-05-07T08:56:01.264339Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480 2025-05-07T08:56:04.027779Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710702:0, at schemeshard: 72057594046644480 2025-05-07T08:56:04.114437Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710704:0, at schemeshard: 72057594046644480 2025-05-07T08:56:04.116290Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710705:0, at schemeshard: 72057594046644480 2025-05-07T08:56:04.117819Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710703:0, at schemeshard: 72057594046644480 Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "col1" type { type_id: UINT16 } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" crede ... 25-05-07T08:56:34.754412Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:1, at schemeshard: 72057594046644480 2025-05-07T08:56:35.879827Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715675:0, at schemeshard: 72057594046644480 2025-05-07T08:56:36.552793Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715680:0, at schemeshard: 72057594046644480 2025-05-07T08:56:37.320220Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715683:0, at schemeshard: 72057594046644480 2025-05-07T08:56:38.001469Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480 2025-05-07T08:56:38.111426Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480 2025-05-07T08:56:41.405798Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715704:0, at schemeshard: 72057594046644480 Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "col1" type { type_id: UINT16 } } columns { name: "col2" type { type_id: DOUBLE } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { } from { table: "example_1" } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Expected: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { } from { table: "example_1" } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Actual: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { } from { table: "example_1" } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL ReadSplits result. GRpcStatusCode: 0 Trying to start YDB, gRPC: 13155, MsgBus: 29354 2025-05-07T08:56:43.656256Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501625111254497284:2062];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:56:43.656370Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004875/r3tmp/tmpWcppkH/pdisk_1.dat 2025-05-07T08:56:43.927586Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13155, node 4 2025-05-07T08:56:44.014215Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:56:44.014408Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:56:44.022065Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:56:44.059198Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:56:44.059225Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:56:44.059235Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:56:44.059407Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29354 TClient is connected to server localhost:29354 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:56:44.887943Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:56:44.896433Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 764, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: 60 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/8580453620/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/zvgn/004875/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unittest/testing_out_stuff/chunk2/testing_out_stuff/test_tool.args']' stopped by 60 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("60 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/8580453620/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/zvgn/004875/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unittest/testing_out_stuff/chunk2/testing_out_stuff/test_tool.args']' stopped by 60 seconds timeout",), {}) 2025-05-07 08:57:17,801 WARNING library.python.cores: Core dump dir doesn't exist: /coredumps 2025-05-07 08:57:17,801 WARNING library.python.cores: Core dump dir doesn't exist: /var/tmp/cores >> Cdc::UuidExchange[PqRunner] |90.8%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |90.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_write/ydb-core-tx-datashard-ut_write |90.8%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_write/ydb-core-tx-datashard-ut_write >> Cdc::DocApi[PqRunner] |90.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_write/ydb-core-tx-datashard-ut_write >> JsonProtoConversion::ProtoMapToJson [GOOD] >> Cdc::KeysOnlyLog[PqRunner] >> JsonProtoConversion::JsonToProtoArray [GOOD] >> BackupRestore::TestAllPrimitiveTypes-TIMESTAMP [GOOD] >> BackupRestore::TestAllPrimitiveTypes-INTERVAL |90.8%| [TA] $(B)/ydb/core/blobstorage/vdisk/syncer/ut/test-results/unittest/{meta.json ... results_accumulator.log} |90.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/backup/impl/ut_table_writer/ydb-core-backup-impl-ut_table_writer |90.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/backup/impl/ut_table_writer/ydb-core-backup-impl-ut_table_writer |90.8%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/syncer/ut/test-results/unittest/{meta.json ... results_accumulator.log} |90.8%| [LD] {RESULT} $(B)/ydb/core/backup/impl/ut_table_writer/ydb-core-backup-impl-ut_table_writer >> TSequence::AlterTableSetDefaultFromSequence [GOOD] |90.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/unittest >> JsonProtoConversion::ProtoMapToJson [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-UUID [GOOD] >> SystemView::AuthUsers_TableRange [GOOD] >> SystemView::AuthPermissions_ResultOrder ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::IcebergHiveSaFilterPushdown 2025-05-07 08:56:48,178 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-05-07 08:56:48,394 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 60 secs timeout. Process tree before termination: pid rss ref pdirt 217991 45.9M 45.9M 23.0M test_tool run_ut @/home/runner/.ya/build/build_root/zvgn/004863/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unittest/testing_out_stuff/chunk5/testing_out_stuff/tes 218353 1.5G 1.5G 1.1G └─ ydb-core-kqp-ut-federated_query-generic_ut --trace-path-append /home/runner/.ya/build/build_root/zvgn/004863/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unit Test command err: Trying to start YDB, gRPC: 5726, MsgBus: 12149 2025-05-07T08:55:51.368698Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624890625431923:2127];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:55:51.368826Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004863/r3tmp/tmpSkPbkk/pdisk_1.dat 2025-05-07T08:55:51.996644Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:55:52.040919Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:55:52.041017Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:55:52.044550Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5726, node 1 2025-05-07T08:55:52.217489Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:55:52.217511Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:55:52.217518Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:55:52.217642Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12149 TClient is connected to server localhost:12149 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:55:53.202545Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:55:53.250915Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:55:56.156143Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624912100268999:2332], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:56.156248Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:56.370168Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501624890625431923:2127];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:55:56.370273Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:55:56.895577Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:2, at schemeshard: 72057594046644480 2025-05-07T08:55:57.081309Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624916395236421:2346], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:57.081474Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:57.082088Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624916395236427:2349], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:57.086282Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:2, at schemeshard: 72057594046644480 2025-05-07T08:55:57.100823Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624916395236429:2350], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-05-07T08:55:57.159577Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501624916395236471:2404] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:55:58.056370Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:55:58.559010Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:1, at schemeshard: 72057594046644480 2025-05-07T08:55:59.124388Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480 2025-05-07T08:55:59.941885Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710678:0, at schemeshard: 72057594046644480 2025-05-07T08:56:01.050856Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710683:0, at schemeshard: 72057594046644480 2025-05-07T08:56:01.651837Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480 2025-05-07T08:56:01.699491Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480 2025-05-07T08:56:05.483378Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710706:0, at schemeshard: 72057594046644480 2025-05-07T08:56:05.547045Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710707:0, at schemeshard: 72057594046644480 2025-05-07T08:56:05.549238Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710708:0, at schemeshard: 72057594046644480 2025-05-07T08:56:05.559465Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710709:0, at schemeshard: 72057594046644480 Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "col1" type { type_id: UINT16 } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_option ... rt proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710680:0, at schemeshard: 72057594046644480 2025-05-07T08:56:36.264789Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710683:0, at schemeshard: 72057594046644480 2025-05-07T08:56:37.133962Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480 2025-05-07T08:56:37.194251Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480 2025-05-07T08:56:40.962555Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710706:0, at schemeshard: 72057594046644480 Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "col1" type { type_id: UINT16 } } columns { name: "col2" type { type_id: DOUBLE } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { } from { table: "example_1" } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Expected: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { } from { table: "example_1" } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Actual: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { } from { table: "example_1" } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL ReadSplits result. GRpcStatusCode: 0 2025-05-07T08:56:41.698097Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7261: Cannot get console configs 2025-05-07T08:56:41.698132Z node 3 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded Trying to start YDB, gRPC: 12370, MsgBus: 20902 2025-05-07T08:56:43.494275Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501625111119718590:2061];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:56:43.494583Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004863/r3tmp/tmpWYXORd/pdisk_1.dat 2025-05-07T08:56:43.851914Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:56:43.902408Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:56:43.902515Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:56:43.904674Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12370, node 4 2025-05-07T08:56:44.154636Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:56:44.154671Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:56:44.154682Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:56:44.154823Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20902 TClient is connected to server localhost:20902 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:56:45.303570Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:56:45.321210Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 764, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: 60 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/8580453620/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/zvgn/004863/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unittest/testing_out_stuff/chunk5/testing_out_stuff/test_tool.args']' stopped by 60 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("60 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/8580453620/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/zvgn/004863/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unittest/testing_out_stuff/chunk5/testing_out_stuff/test_tool.args']' stopped by 60 seconds timeout",), {}) 2025-05-07 08:57:19,097 WARNING library.python.cores: Core dump dir doesn't exist: /coredumps 2025-05-07 08:57:19,097 WARNING library.python.cores: Core dump dir doesn't exist: /var/tmp/cores |90.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/unittest >> JsonProtoConversion::JsonToProtoArray [GOOD] |90.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/unittest |90.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/unittest >> JsonProtoConversion::ProtoMapToJson_ReceiveMessageResult [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-TIMESTAMP [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-TZ_DATE [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-TZ_DATETIME [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-TZ_TIMESTAMP [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-TIMESTAMP64 >> JsonProtoConversion::NlohmannJsonToProtoArray [GOOD] >> JsonProtoConversion::NlohmannJsonToProtoMap [GOOD] ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::IcebergHiveTokenFilterPushdown 2025-05-07 08:56:52,646 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-05-07 08:56:52,929 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 60 secs timeout. Process tree before termination: pid rss ref pdirt 219019 46.0M 46.0M 23.0M test_tool run_ut @/home/runner/.ya/build/build_root/zvgn/00485e/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unittest/testing_out_stuff/chunk6/testing_out_stuff/tes 219099 1.6G 1.6G 1.1G └─ ydb-core-kqp-ut-federated_query-generic_ut --trace-path-append /home/runner/.ya/build/build_root/zvgn/00485e/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unit Test command err: Trying to start YDB, gRPC: 9912, MsgBus: 17861 2025-05-07T08:55:55.037371Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624901118104131:2144];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:55:55.046577Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00485e/r3tmp/tmpBm75x5/pdisk_1.dat 2025-05-07T08:55:55.651345Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:55:55.651467Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:55:55.654058Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:55:55.688666Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9912, node 1 2025-05-07T08:55:55.811547Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:55:55.811575Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:55:55.811596Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:55:55.811721Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17861 TClient is connected to server localhost:17861 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:55:56.620454Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:55:56.670510Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:55:59.402222Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624922592941167:2332], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:59.402355Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:00.009809Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:2, at schemeshard: 72057594046644480 2025-05-07T08:56:00.042316Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501624901118104131:2144];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:56:00.042406Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:56:00.308571Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624926887908585:2346], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:00.309136Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624926887908590:2349], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:00.313326Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:2, at schemeshard: 72057594046644480 2025-05-07T08:56:00.318427Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:00.336879Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480 2025-05-07T08:56:00.339197Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624926887908592:2350], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-05-07T08:56:00.408293Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501624926887908636:2401] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:56:01.423010Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:56:02.102480Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:1, at schemeshard: 72057594046644480 2025-05-07T08:56:03.054785Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480 2025-05-07T08:56:04.267351Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710682:0, at schemeshard: 72057594046644480 2025-05-07T08:56:05.197270Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710687:0, at schemeshard: 72057594046644480 2025-05-07T08:56:06.117011Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480 2025-05-07T08:56:06.196990Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480 2025-05-07T08:56:09.762725Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710710:0, at schemeshard: 72057594046644480 2025-05-07T08:56:09.772819Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jtmzayn0dergbzh5qp9dh6em", SessionId: ydb://session/3?node_id=1&id=Y2I1ZmNjMDEtNTQ2ZmEyOWItYTNkMmNkNzEtMjEwNjJhOGY=, Slow query, duration: 10.377467s, status: SUCCESS, user: UNAUTHENTICATED, results: 0b, text: "\n CREATE OBJECT external_data_source_p (TYPE SECRET) WITH (value=qwerty12345);\n\n CREATE EXTERNAL DATA SOURCE external_data_source WITH (\n SOURCE_TYPE=\"Iceberg\",\n DATABASE_NAME=\"pgdb\",\n WAREHOUSE_TYPE=\"s3\",\n WAREHOUSE_S3_REGION=\"s3_region\",\n WAREHOUSE_S3_ENDPOINT=\"s3_endpoint\",\n WAREHOUSE_S3_URI=\"s3_uri\",\n \n AUTH_METHOD=\"TOKEN\",\n TOKEN_SECRET_NAME=\"external_data_source_p\"\n ,\n \n CATALOG_TYPE=\"hive_metastore\",\n CATALOG_HIVE_METASTORE_URI=\"hive_metastore_uri\"\n ,\n USE_TLS=\"FALSE\"\n );\n ", parameters: 0b 2025-05-07T08:56:09.799396Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710711:0, at schemeshard: 72057594046644480 2025-05-07T08:56:09.801269Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710712:0, at schemeshard: 72057594046644480 2025-05-07T08:56:09.802731Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710713:0, at schemeshard: 72057594046644480 Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { ... uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 2025-05-07T08:56:43.231541Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7261: Cannot get console configs 2025-05-07T08:56:43.231574Z node 3 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded Call ReadSplits. splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { } from { table: "example_1" } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Expected: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { } from { table: "example_1" } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Actual: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { } from { table: "example_1" } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL ReadSplits result. GRpcStatusCode: 0 Trying to start YDB, gRPC: 24839, MsgBus: 3399 2025-05-07T08:56:44.852381Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501625115773072578:2066];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:56:44.852429Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00485e/r3tmp/tmp0YripO/pdisk_1.dat 2025-05-07T08:56:45.070088Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:56:45.074042Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:56:45.074143Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:56:45.075817Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24839, node 4 2025-05-07T08:56:45.276728Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:56:45.276762Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:56:45.276772Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:56:45.276934Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3399 TClient is connected to server localhost:3399 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:56:46.243836Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:56:46.254880Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T08:56:49.856697Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7501625115773072578:2066];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:56:49.856806Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:56:49.995862Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501625137247909709:2333], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:49.995997Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:50.017020Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:2, at schemeshard: 72057594046644480 2025-05-07T08:56:50.106673Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501625141542877127:2346], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:50.107090Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:50.107664Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501625141542877133:2349], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:50.112484Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72057594046644480 2025-05-07T08:56:50.128238Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7501625141542877135:2350], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-05-07T08:56:50.212400Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:7501625141542877175:2400] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:56:50.930696Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-05-07T08:56:51.778825Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:1, at schemeshard: 72057594046644480 2025-05-07T08:56:52.551708Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715675:0, at schemeshard: 72057594046644480 Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 764, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: 60 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/8580453620/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/zvgn/00485e/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unittest/testing_out_stuff/chunk6/testing_out_stuff/test_tool.args']' stopped by 60 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("60 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/8580453620/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/zvgn/00485e/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unittest/testing_out_stuff/chunk6/testing_out_stuff/test_tool.args']' stopped by 60 seconds timeout",), {}) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_sequence/unittest >> TSequence::AlterTableSetDefaultFromSequence [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:57:07.147912Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:57:07.148010Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:57:07.148080Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:57:07.148145Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:57:07.148208Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:57:07.148241Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:57:07.148322Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:57:07.148392Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:57:07.149172Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:57:07.149567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:57:07.266326Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:57:07.266397Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:57:07.285420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:57:07.285687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:57:07.285910Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:57:07.292898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:57:07.293201Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:57:07.293959Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:57:07.294231Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:57:07.298213Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:57:07.299804Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:57:07.299881Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:57:07.299964Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:57:07.300012Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:57:07.300051Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:57:07.300337Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:57:07.308494Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:57:07.466242Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:57:07.466463Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:57:07.466702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:57:07.466886Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:57:07.466931Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:57:07.474009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:57:07.474193Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:57:07.474369Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:57:07.474429Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:57:07.474464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:57:07.474597Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:57:07.487266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:57:07.487358Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:57:07.487403Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:57:07.491007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:57:07.491083Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:57:07.491156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:57:07.491217Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:57:07.495958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:57:07.499644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:57:07.499936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:57:07.501083Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:57:07.501230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:57:07.501271Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:57:07.501628Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:57:07.501698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:57:07.501886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:57:07.502249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:57:07.505615Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:57:07.505684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:57:07.505915Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:57:07.505963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409549 Status: COMPLETE TxId: 114 Step: 5000014 OrderId: 114 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409549 CpuTimeUsec: 1522 } } 2025-05-07T08:57:18.497036Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-05-07T08:57:18.499179Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [7:1051:2989], Recipient [7:134:2157]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:57:18.499239Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:57:18.499268Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046678944 2025-05-07T08:57:18.499584Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269551620, Sender [7:987:2933], Recipient [7:134:2157]: NKikimrTxDataShard.TEvSchemaChanged Source { RawX1: 987 RawX2: 30064774005 } Origin: 72075186233409549 State: 2 TxId: 114 Step: 0 Generation: 2 2025-05-07T08:57:18.499649Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4872: StateWork, processing event TEvDataShard::TEvSchemaChanged 2025-05-07T08:57:18.499778Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 987 RawX2: 30064774005 } Origin: 72075186233409549 State: 2 TxId: 114 Step: 0 Generation: 2 2025-05-07T08:57:18.499841Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 114, tablet: 72075186233409549, partId: 0 2025-05-07T08:57:18.500360Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 114:0, at schemeshard: 72057594046678944, message: Source { RawX1: 987 RawX2: 30064774005 } Origin: 72075186233409549 State: 2 TxId: 114 Step: 0 Generation: 2 2025-05-07T08:57:18.500451Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1005: NTableState::TProposedWaitParts operationId# 114:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 2025-05-07T08:57:18.500597Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1009: NTableState::TProposedWaitParts operationId# 114:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 987 RawX2: 30064774005 } Origin: 72075186233409549 State: 2 TxId: 114 Step: 0 Generation: 2 2025-05-07T08:57:18.500706Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 114:0, shardIdx: 72057594046678944:4, datashard: 72075186233409549, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-05-07T08:57:18.500759Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 114:0, at schemeshard: 72057594046678944 2025-05-07T08:57:18.500807Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 114:0, datashard: 72075186233409549, at schemeshard: 72057594046678944 2025-05-07T08:57:18.500860Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 114:0 129 -> 240 2025-05-07T08:57:18.501087Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-05-07T08:57:18.502194Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-05-07T08:57:18.502343Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 114 2025-05-07T08:57:18.502408Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-05-07T08:57:18.505529Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 114 2025-05-07T08:57:18.505583Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-05-07T08:57:18.505779Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 114:0, at schemeshard: 72057594046678944 2025-05-07T08:57:18.505815Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-05-07T08:57:18.505955Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 114:0, at schemeshard: 72057594046678944 2025-05-07T08:57:18.506046Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-05-07T08:57:18.506094Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 114:0 2025-05-07T08:57:18.506237Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [7:987:2933] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 114 at schemeshard: 72057594046678944 2025-05-07T08:57:18.506656Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435072, Sender [7:134:2157], Recipient [7:134:2157]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-05-07T08:57:18.506716Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4857: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-05-07T08:57:18.506783Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 114:0, at schemeshard: 72057594046678944 2025-05-07T08:57:18.506840Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 114:0 ProgressState 2025-05-07T08:57:18.506977Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-05-07T08:57:18.507015Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#114:0 progress is 1/1 2025-05-07T08:57:18.507064Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 114 ready parts: 1/1 2025-05-07T08:57:18.507121Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#114:0 progress is 1/1 2025-05-07T08:57:18.507166Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 114 ready parts: 1/1 2025-05-07T08:57:18.507218Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 114, ready parts: 1/1, is published: true 2025-05-07T08:57:18.507305Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [7:390:2358] message: TxId: 114 2025-05-07T08:57:18.507399Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 114 ready parts: 1/1 2025-05-07T08:57:18.507477Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 114:0 2025-05-07T08:57:18.507522Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 114:0 2025-05-07T08:57:18.507682Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 3 2025-05-07T08:57:18.511669Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-05-07T08:57:18.511845Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [7:390:2358] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 114 at schemeshard: 72057594046678944 2025-05-07T08:57:18.512058Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 114: got EvNotifyTxCompletionResult 2025-05-07T08:57:18.512112Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 114: satisfy waiter [7:1017:2955] 2025-05-07T08:57:18.512385Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877764, Sender [7:1019:2957], Recipient [7:134:2157]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:57:18.512435Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4938: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-05-07T08:57:18.512462Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5761: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 114 TestModificationResults wait txId: 115 2025-05-07T08:57:18.513825Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122432, Sender [7:1058:2996], Recipient [7:134:2157]: {TEvModifySchemeTransaction txid# 115 TabletId# 72057594046678944} 2025-05-07T08:57:18.513904Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4851: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-05-07T08:57:18.518146Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterTable AlterTable { Name: "Table3" Columns { Name: "value" DefaultFromSequence: "/MyRoot/seq1" } } } TxId: 115 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:57:18.518496Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_table.cpp:508: TAlterTable Propose, path: /MyRoot/Table3, pathId: , opId: 115:0, at schemeshard: 72057594046678944 2025-05-07T08:57:18.519040Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 115:1, propose status:StatusInvalidParameter, reason: Column 'value' is of type Bool but default expression is of type Int64, at schemeshard: 72057594046678944 2025-05-07T08:57:18.519291Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-05-07T08:57:18.522353Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 115, response: Status: StatusInvalidParameter Reason: "Column \'value\' is of type Bool but default expression is of type Int64" TxId: 115 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:57:18.522596Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 115, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Column 'value' is of type Bool but default expression is of type Int64, operation: ALTER TABLE, path: /MyRoot/Table3 2025-05-07T08:57:18.522673Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 TestModificationResult got TxId: 115, wait until txId: 115 >> JsonProtoConversion::JsonToProtoSingleValue [GOOD] |90.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/unittest >> JsonProtoConversion::ProtoMapToJson_ReceiveMessageResult [GOOD] >> KqpProxy::LoadedMetadataAfterCompilationTimeout [GOOD] >> KqpProxy::ExecuteScriptFailsWithoutFeatureFlag |90.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/unittest >> JsonProtoConversion::NlohmannJsonToProtoMap [GOOD] |90.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/unittest >> JsonProtoConversion::NlohmannJsonToProtoArray [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/backup_ut/unittest >> BackupRestoreS3::TestAllPrimitiveTypes-UUID [GOOD] Test command err: 2025-05-07T08:56:39.992084Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625094321235150:2066];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:56:40.028602Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0020eb/r3tmp/tmpmyejxY/pdisk_1.dat 2025-05-07T08:56:41.094476Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:56:41.499806Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:56:41.517738Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:56:41.517857Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:56:41.530775Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27475, node 1 2025-05-07T08:56:41.794773Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:56:41.794795Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:56:41.794802Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:56:41.794925Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31162 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:56:42.287558Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:56:44.952180Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501625094321235150:2066];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:56:44.952256Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:56:45.644187Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625120091040061:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:45.644327Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:45.967159Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7501625098616202713:2117] Handle TEvProposeTransaction 2025-05-07T08:56:45.967195Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7501625098616202713:2117] TxId# 281474976710658 ProcessProposeTransaction 2025-05-07T08:56:45.967246Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7501625098616202713:2117] Cookie# 0 userReqId# "" txid# 281474976710658 SEND to# [1:7501625120091040107:2644] 2025-05-07T08:56:46.096027Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1520: Actor# [1:7501625120091040107:2644] txid# 281474976710658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table" Columns { Name: "Key" Type: "Uint32" NotNull: false } Columns { Name: "Value" Type: "Utf8" NotNull: false } KeyColumnNames: "Key" PartitionConfig { } Temporary: false } } } UserToken: "" DatabaseName: "" 2025-05-07T08:56:46.096096Z node 1 :TX_PROXY DEBUG: schemereq.cpp:563: Actor# [1:7501625120091040107:2644] txid# 281474976710658 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-05-07T08:56:46.096438Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1585: Actor# [1:7501625120091040107:2644] txid# 281474976710658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-05-07T08:56:46.096520Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1575: Actor# [1:7501625120091040107:2644] txid# 281474976710658 TEvNavigateKeySet requested from SchemeCache 2025-05-07T08:56:46.097690Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1408: Actor# [1:7501625120091040107:2644] txid# 281474976710658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-05-07T08:56:46.097865Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1455: Actor# [1:7501625120091040107:2644] HANDLE EvNavigateKeySetResult, txid# 281474976710658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-05-07T08:56:46.097937Z node 1 :TX_PROXY DEBUG: schemereq.cpp:102: Actor# [1:7501625120091040107:2644] txid# 281474976710658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710658 TabletId# 72057594046644480} 2025-05-07T08:56:46.098134Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1310: Actor# [1:7501625120091040107:2644] txid# 281474976710658 HANDLE EvClientConnected 2025-05-07T08:56:46.107531Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T08:56:46.111525Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1332: Actor# [1:7501625120091040107:2644] txid# 281474976710658 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710658} 2025-05-07T08:56:46.111583Z node 1 :TX_PROXY DEBUG: schemereq.cpp:543: Actor# [1:7501625120091040107:2644] txid# 281474976710658 SEND to# [1:7501625120091040106:2346] Source {TEvProposeTransactionStatus txid# 281474976710658 Status# 53} 2025-05-07T08:56:46.334177Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625124386007547:2354], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:46.334261Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:46.426069Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7501625098616202713:2117] Handle TEvProposeTransaction 2025-05-07T08:56:46.426102Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7501625098616202713:2117] TxId# 281474976710659 ProcessProposeTransaction 2025-05-07T08:56:46.426145Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7501625098616202713:2117] Cookie# 0 userReqId# "" txid# 281474976710659 SEND to# [1:7501625124386007560:2764] 2025-05-07T08:56:46.428803Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1520: Actor# [1:7501625124386007560:2764] txid# 281474976710659 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateCdcStream CreateCdcStream { TableName: "table" StreamDescription { Name: "a" Mode: ECdcStreamModeUpdate Format: ECdcStreamFormatJson VirtualTimestamps: false AwsRegion: "" } } } } UserToken: "" DatabaseName: "" PeerName: "" 2025-05-07T08:56:46.428838Z node 1 :TX_PROXY DEBUG: schemereq.cpp:563: Actor# [1:7501625124386007560:2764] txid# 281474976710659 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-05-07T08:56:46.428922Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1575: Actor# [1:7501625124386007560:2764] txid# 281474976710659 TEvNavigateKeySet requested from SchemeCache 2025-05-07T08:56:46.429200Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1408: Actor# [1:7501625124386007560:2764] txid# 281474976710659 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-05-07T08:56:46.429300Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1455: Actor# [1:7501625124386007560:2764] HANDLE EvNavigateKeySetResult, txid# 281474976710659 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-05-07T08:56:46.429335Z node 1 :TX_PROXY DEBUG: schemereq.cpp:102: Actor# [1:7501625124386007560:2764] txid# 281474976710659 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710659 TabletId# 72057594046644480} 2025-05-07T08:56:46.429490Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1310: Actor# [1:7501625124386007560:2764] txid# 281474976710659 HANDLE EvClientConnected 2025-05-07T08:56:46.443341Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1332: Actor# [1:7501625124386007560:2764] txid# 281474976710659 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710659} 2025-05-07T08:56:46.443399Z node 1 :TX_PROXY DEBUG: schemereq.cpp:543: Actor# [1:7501625124386007560:2764] txid# 281474976710659 SEND to# [1:7501625124386007559:2359] Source {TEvProposeTransactionStatus txid# 281474976710659 Status# 53} 2025-05-07T08:56:46.577714Z node 1 :CHANGE_EXCHANGE WARN: change_sender_cdc_stream.cpp:398: [CdcChangeSenderMain][72075186224037888:1][1:7501625124386007734:2366] Failed entry at 'ResolveTopic': entry# { Path: TableId: [72057594046644480:4:0] RequestType: ByTableId Operation: OpTopic RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo } 2025-05-07T08:56:46.620554Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625124386007840:2376], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:46.620636Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Servic ... Changefeeds TEvExternalStorage::TEvListObjectResponse: self# [10:7501625253245858538:2194], result# ListObjectsResult { } 2025-05-07T08:57:16.858530Z node 10 :IMPORT INFO: schemeshard_import_getters.cpp:587: Reply: self# [10:7501625253245858538:2194], success# 1, error# 2025-05-07T08:57:16.858654Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:360: TImport::TTxProgress: DoExecute 2025-05-07T08:57:16.858672Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:965: TImport::TTxProgress: OnSchemeResult: id# 281474976715665, itemIdx# 0, success# 1 2025-05-07T08:57:16.859062Z node 10 :IMPORT INFO: schemeshard_import__create.cpp:605: TImport::TTxProgress: Allocate txId: info# { Id: 281474976715665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/UuidTable' DstPathId: State: CreateSchemeObject SubState: AllocateTxId WaitTxId: 0 Issue: '' } 2025-05-07T08:57:16.870509Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:384: TImport::TTxProgress: DoComplete 2025-05-07T08:57:16.870639Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:360: TImport::TTxProgress: DoExecute 2025-05-07T08:57:16.870653Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:1180: TImport::TTxProgress: OnAllocateResult: txId# 281474976710760, id# 281474976715665 2025-05-07T08:57:16.870705Z node 10 :IMPORT INFO: schemeshard_import__create.cpp:417: TImport::TTxProgress: CreateTable propose: info# { Id: 281474976715665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/UuidTable' DstPathId: State: CreateSchemeObject SubState: Proposed WaitTxId: 0 Issue: '' }, txId# 281474976710760 2025-05-07T08:57:16.870860Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:384: TImport::TTxProgress: DoComplete 2025-05-07T08:57:16.872170Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710760:0, at schemeshard: 72057594046644480 2025-05-07T08:57:16.879033Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:360: TImport::TTxProgress: DoExecute 2025-05-07T08:57:16.879064Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:1267: TImport::TTxProgress: OnModifyResult: txId# 281474976710760, status# StatusAccepted 2025-05-07T08:57:16.879194Z node 10 :IMPORT INFO: schemeshard_import__create.cpp:619: TImport::TTxProgress: Wait for completion: info# { Id: 281474976715665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/UuidTable' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: CreateSchemeObject SubState: Subscribed WaitTxId: 281474976710760 Issue: '' } 2025-05-07T08:57:16.882543Z node 10 :TX_PROXY DEBUG: rpc_operation_request_base.h:50: [GetImport] [10:7501625253245858559:2399] [0] Resolve database: name# /Root 2025-05-07T08:57:16.883592Z node 10 :TX_PROXY DEBUG: rpc_operation_request_base.h:66: [GetImport] [10:7501625253245858559:2399] [0] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-05-07T08:57:16.883615Z node 10 :TX_PROXY DEBUG: rpc_operation_request_base.h:106: [GetImport] [10:7501625253245858559:2399] [0] Send request: schemeShardId# 72057594046644480 2025-05-07T08:57:16.887217Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:384: TImport::TTxProgress: DoComplete 2025-05-07T08:57:16.887823Z node 10 :TX_PROXY DEBUG: rpc_get_operation.cpp:220: [GetImport] [10:7501625253245858559:2399] [0] Handle TEvImport::TEvGetImportResponse: record# Entry { Id: 281474976715665 Status: SUCCESS Progress: PROGRESS_PREPARING ImportFromS3Settings { endpoint: "localhost:23923" scheme: HTTP bucket: "test_bucket" items { source_prefix: "UuidTable" destination_path: "/Root/UuidTable" } } StartTime { seconds: 1746608236 } } 2025-05-07T08:57:16.967354Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:360: TImport::TTxProgress: DoExecute 2025-05-07T08:57:16.967383Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:1425: TImport::TTxProgress: OnNotifyResult: txId# 281474976710760 2025-05-07T08:57:16.967474Z node 10 :IMPORT INFO: schemeshard_import__create.cpp:605: TImport::TTxProgress: Allocate txId: info# { Id: 281474976715665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/UuidTable' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: Transferring SubState: AllocateTxId WaitTxId: 0 Issue: '' } 2025-05-07T08:57:16.969228Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:384: TImport::TTxProgress: DoComplete 2025-05-07T08:57:16.969353Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:360: TImport::TTxProgress: DoExecute 2025-05-07T08:57:16.969367Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:1180: TImport::TTxProgress: OnAllocateResult: txId# 281474976710761, id# 281474976715665 2025-05-07T08:57:16.969416Z node 10 :IMPORT INFO: schemeshard_import__create.cpp:496: TImport::TTxProgress: Restore propose: info# { Id: 281474976715665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/UuidTable' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: Transferring SubState: Proposed WaitTxId: 0 Issue: '' }, txId# 281474976710761 2025-05-07T08:57:16.970151Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:384: TImport::TTxProgress: DoComplete 2025-05-07T08:57:16.970591Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRestore, opId: 281474976710761:0, at schemeshard: 72057594046644480 2025-05-07T08:57:16.972666Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:360: TImport::TTxProgress: DoExecute 2025-05-07T08:57:16.972682Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:1267: TImport::TTxProgress: OnModifyResult: txId# 281474976710761, status# StatusAccepted 2025-05-07T08:57:16.972775Z node 10 :IMPORT INFO: schemeshard_import__create.cpp:619: TImport::TTxProgress: Wait for completion: info# { Id: 281474976715665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/UuidTable' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: Transferring SubState: Subscribed WaitTxId: 281474976710761 Issue: '' } 2025-05-07T08:57:16.975920Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:384: TImport::TTxProgress: DoComplete REQUEST: HEAD /test_bucket/UuidTable/data_00.csv HTTP/1.1 HEADERS: Host: localhost:23923 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 67754016-5B67-48FD-BD9B-CB11DD5C7899 amz-sdk-request: attempt=1 authorization: AWS4-HMAC-SHA256 Credential=test_key/20250507/us-east-1/s3/aws4_request, SignedHeaders=amz-sdk-invocation-id;amz-sdk-request;content-type;host;x-amz-api-version;x-amz-content-sha256;x-amz-date, Signature=90e2953a99e95e73779b59465ca92de0fbabab811b13e2655a6a339a4cceccad content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-content-sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 x-amz-date: 20250507T085717Z S3_MOCK::HttpServeRead: /test_bucket/UuidTable/data_00.csv / 39 REQUEST: GET /test_bucket/UuidTable/data_00.csv HTTP/1.1 HEADERS: Host: localhost:23923 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 21BE20CF-85C9-4EC3-8F9D-C02BDEDC0D1D amz-sdk-request: attempt=1 authorization: AWS4-HMAC-SHA256 Credential=test_key/20250507/us-east-1/s3/aws4_request, SignedHeaders=amz-sdk-invocation-id;amz-sdk-request;content-type;host;range;x-amz-api-version;x-amz-content-sha256;x-amz-date, Signature=707305a7d56c44f8e19a405cea8925b55552b47bdd1c75e0e63859af8e850642 content-type: application/xml range: bytes=0-38 user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-content-sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 x-amz-date: 20250507T085717Z S3_MOCK::HttpServeRead: /test_bucket/UuidTable/data_00.csv / 39 2025-05-07T08:57:17.081944Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:360: TImport::TTxProgress: DoExecute 2025-05-07T08:57:17.081993Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:1425: TImport::TTxProgress: OnNotifyResult: txId# 281474976710761 2025-05-07T08:57:17.088504Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:384: TImport::TTxProgress: DoComplete 2025-05-07T08:57:17.311946Z node 10 :TX_PROXY DEBUG: rpc_operation_request_base.h:50: [GetImport] [10:7501625257540826053:2407] [0] Resolve database: name# /Root 2025-05-07T08:57:17.312905Z node 10 :TX_PROXY DEBUG: rpc_operation_request_base.h:66: [GetImport] [10:7501625257540826053:2407] [0] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-05-07T08:57:17.312944Z node 10 :TX_PROXY DEBUG: rpc_operation_request_base.h:106: [GetImport] [10:7501625257540826053:2407] [0] Send request: schemeShardId# 72057594046644480 2025-05-07T08:57:17.313825Z node 10 :TX_PROXY DEBUG: rpc_get_operation.cpp:220: [GetImport] [10:7501625257540826053:2407] [0] Handle TEvImport::TEvGetImportResponse: record# Entry { Id: 281474976715665 Status: SUCCESS Progress: PROGRESS_DONE ImportFromS3Settings { endpoint: "localhost:23923" scheme: HTTP bucket: "test_bucket" items { source_prefix: "UuidTable" destination_path: "/Root/UuidTable" } } StartTime { seconds: 1746608236 } EndTime { seconds: 1746608237 } } 2025-05-07T08:57:17.437705Z node 10 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [10:7501625223181085929:2138] Handle TEvExecuteKqpTransaction 2025-05-07T08:57:17.437739Z node 10 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [10:7501625223181085929:2138] TxId# 281474976715666 ProcessProposeKqpTransaction 2025-05-07T08:57:17.438838Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715666. Ctx: { TraceId: 01jtmzdar93bre9ffa18zmatrp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=NGYzNDgxZTMtOTQ2Njc2NTMtZTlkMTFkNWEtOTMzMWRlODE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root |90.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/unittest >> JsonProtoConversion::JsonToProtoSingleValue [GOOD] |90.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/unittest >> BuildStatsHistogram::Many_Mixed [GOOD] >> BuildStatsHistogram::Many_Serial >> BackupRestore::TestAllPrimitiveTypes-UINT8 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-UTF8 |90.8%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest >> ScriptExecutionsTest::UpdatesLeaseAfterExpiring [GOOD] >> JsonProtoConversion::JsonToProtoMap [GOOD] |90.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |90.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/unittest >> JsonProtoConversion::JsonToProtoMap [GOOD] >> TableWriter::Restore [GOOD] |90.9%| [TA] $(B)/ydb/core/http_proxy/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> AsyncIndexChangeExchange::SenderShouldBeActivatedOnTableWoIndexes [GOOD] >> AsyncIndexChangeExchange::SenderShouldBeActivatedOnTableWithSyncIndex >> TableWriter::Backup [GOOD] |90.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_table_writer/unittest >> TableWriter::Restore [GOOD] >> SystemView::TabletsRangesPredicateExtractDisabled [GOOD] >> BackupRestore::TestAllPrimitiveTypes-INT32 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-INT64 >> KqpWorkloadServiceSubscriptions::TestResourcePoolSubscriptionAfterAlter [GOOD] >> KqpWorkloadServiceSubscriptions::TestResourcePoolSubscriptionAfterAclChange |90.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_table_writer/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/proxy_service/ut/unittest >> ScriptExecutionsTest::UpdatesLeaseAfterExpiring [GOOD] Test command err: 2025-05-07T08:57:03.508527Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625197027945106:2061];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:03.508638Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003176/r3tmp/tmpKYbREa/pdisk_1.dat 2025-05-07T08:57:04.146498Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:57:04.151760Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:04.151876Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:04.155403Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:30047 TServer::EnableGrpc on GrpcPort 2796, node 1 2025-05-07T08:57:04.650661Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:57:04.650685Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:57:04.650697Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:57:04.650808Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-05-07T08:57:04.867918Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:04.888143Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:57:07.948942Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1665: Updated YQL logs priority to current level: 4 2025-05-07T08:57:07.950666Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:442: Cannot start publishing usage, tenants: /dc-1, empty 2025-05-07T08:57:07.971169Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T08:57:07.971223Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T08:57:07.971313Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:512: Subscribed for config changes. 2025-05-07T08:57:07.971342Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:519: Updated table service config. 2025-05-07T08:57:07.971373Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1665: Updated YQL logs priority to current level: 4 2025-05-07T08:57:07.972468Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T08:57:07.974051Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_executions updater. Describe result: PathErrorUnknown 2025-05-07T08:57:07.974066Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_executions updater. Creating table 2025-05-07T08:57:07.974102Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_executions updater. Full table path:/dc-1/.metadata/script_executions 2025-05-07T08:57:07.974203Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_execution_leases updater. Describe result: PathErrorUnknown 2025-05-07T08:57:07.974210Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_execution_leases updater. Creating table 2025-05-07T08:57:07.974222Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_execution_leases updater. Full table path:/dc-1/.metadata/script_execution_leases 2025-05-07T08:57:07.974274Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table result_sets updater. Describe result: PathErrorUnknown 2025-05-07T08:57:07.974278Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table result_sets updater. Creating table 2025-05-07T08:57:07.974300Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table result_sets updater. Full table path:/dc-1/.metadata/result_sets 2025-05-07T08:57:07.977609Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:1, at schemeshard: 72057594046644480 2025-05-07T08:57:07.979503Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 2025-05-07T08:57:07.981841Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T08:57:07.989597Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table script_execution_leases updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976710659 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 3 } 2025-05-07T08:57:07.989672Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table script_execution_leases updater. Subscribe on create table tx: 281474976710659 2025-05-07T08:57:07.990065Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table result_sets updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976710660 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 4 } 2025-05-07T08:57:07.990089Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table result_sets updater. Subscribe on create table tx: 281474976710660 2025-05-07T08:57:07.993128Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table script_executions updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976710658 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 5 } 2025-05-07T08:57:07.993174Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table script_executions updater. Subscribe on create table tx: 281474976710658 2025-05-07T08:57:08.138934Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table script_execution_leases updater. Request: create. Transaction completed: 281474976710659. Doublechecking... 2025-05-07T08:57:08.196474Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table script_executions updater. Request: create. Transaction completed: 281474976710658. Doublechecking... 2025-05-07T08:57:08.199033Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table result_sets updater. Request: create. Transaction completed: 281474976710660. Doublechecking... 2025-05-07T08:57:08.214700Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table script_execution_leases updater. Column diff is empty, finishing 2025-05-07T08:57:08.266354Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table result_sets updater. Column diff is empty, finishing 2025-05-07T08:57:08.284054Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table script_executions updater. Column diff is empty, finishing 2025-05-07T08:57:08.284606Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TCreateScriptOperationQuery] TraceId: 50367dce-df3ac362-c96fcd92-50d25da7, Bootstrap. Database: /dc-1 2025-05-07T08:57:08.295002Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1468: Request has 18444997465481.256641s seconds to be completed 2025-05-07T08:57:08.297559Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1543: Created new session, sessionId: ydb://session/3?node_id=1&id=YTY2YzI0NjItNGJhNjAxZTktZjBmMzUzMWItODMxZWI1N2I=, workerId: [1:7501625218502782483:2334], database: /dc-1, longSession: 1, local sessions count: 1 2025-05-07T08:57:08.297659Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:649: Received create session request, trace_id: 2025-05-07T08:57:08.298574Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TCreateScriptOperationQuery] TraceId: 50367dce-df3ac362-c96fcd92-50d25da7, RunDataQuery: -- TCreateScriptOperationQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $run_script_actor_id AS Text; DECLARE $execution_status AS Int32; DECLARE $execution_mode AS Int32; DECLARE $query_text AS Text; DECLARE $syntax AS Int32; DECLARE $meta AS JsonDocument; DECLARE $lease_duration AS Interval; DECLARE $execution_meta_ttl AS Interval; UPSERT INTO `.metadata/script_executions` (database, execution_id, run_script_actor_id, execution_status, execution_mode, start_ts, query_text, syntax, meta, expire_at) VALUES ($database, $execution_id, $run_script_actor_id, $execution_status, $execution_mode, CurrentUtcTimestamp(), $query_text, $syntax, $meta, CurrentUtcTimestamp() + $execution_meta_ttl); UPSERT INTO `.metadata/script_execution_leases` (database, execution_id, lease_deadline, lease_generation, expire_at) VALUES ($database, $execution_id, CurrentUtcTimestamp() + $lease_duration, 1, CurrentUtcTimestamp() + $execution_meta_ttl); 2025-05-07T08:57:08.299126Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=1&id=YTY2YzI0NjItNGJhNjAxZTktZjBmMzUzMWItODMxZWI1N2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 3, targetId: [1:7501625218502782483:2334] 2025-05-07T08:57:08.299172Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 3 timeout: 300.000000s actor id: [1:7501625218502782485:2464] 2025-05-07T08:57:08.301314Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625218502782497:2339], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:57:08.301315Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625218502782486:2336], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
... al; DECLARE $ast_compression_method AS Optional; DECLARE $operation_ttl AS Interval; DECLARE $customer_supplied_id AS Text; DECLARE $user_token AS Text; DECLARE $script_sinks AS Optional; DECLARE $script_secret_names AS Optional; DECLARE $applicate_script_external_effect_required AS Bool; UPDATE `.metadata/script_executions` SET operation_status = $operation_status, execution_status = $execution_status, finalization_status = IF($applicate_script_external_effect_required, $finalization_status, NULL), issues = $issues, plan = $plan, end_ts = CurrentUtcTimestamp(), stats = $stats, ast = $ast, ast_compressed = $ast_compressed, ast_compression_method = $ast_compression_method, expire_at = IF($operation_ttl > CAST(0 AS Interval), CurrentUtcTimestamp() + $operation_ttl, NULL), customer_supplied_id = IF($applicate_script_external_effect_required, $customer_supplied_id, NULL), user_token = IF($applicate_script_external_effect_required, $user_token, NULL), script_sinks = IF($applicate_script_external_effect_required, $script_sinks, NULL), script_secret_names = IF($applicate_script_external_effect_required, $script_secret_names, NULL) WHERE database = $database AND execution_id = $execution_id; DELETE FROM `.metadata/script_execution_leases` WHERE database = $database AND execution_id = $execution_id; 2025-05-07T08:57:18.943805Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=ODU1MjQ2ZDItNDgwMjZiZDEtYjk2YmQ4NTgtYzc0M2YzNzE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 18, targetId: [2:7501625262077482582:2397] 2025-05-07T08:57:18.943834Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 18 timeout: 300.000000s actor id: [2:7501625262077482654:2596] 2025-05-07T08:57:19.095232Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 17, sender: [2:7501625262077482625:2411], selfId: [2:7501625236307677692:2070], source: [2:7501625262077482624:2410] 2025-05-07T08:57:19.096893Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TScriptLeaseUpdater] TraceId: 84079dad-da852b36-2f96ea7f-6daca3d5, State: Get lease info, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=MzZkYTljYTAtMTllOTk0ZTMtYTYxZmFmYjAtZGI2Njg4NA==, TxId: 01jtmzdcfe8xe1bx05s01k1ybc 2025-05-07T08:57:19.097058Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TScriptLeaseUpdater] TraceId: 84079dad-da852b36-2f96ea7f-6daca3d5, State: Get lease info, RunDataQuery: -- TScriptLeaseUpdater::OnGetLeaseInfo DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $lease_duration AS Interval; UPDATE `.metadata/script_execution_leases` SET lease_deadline=(CurrentUtcTimestamp() + $lease_duration) WHERE database = $database AND execution_id = $execution_id; 2025-05-07T08:57:19.099104Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=MzZkYTljYTAtMTllOTk0ZTMtYTYxZmFmYjAtZGI2Njg4NA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 19, targetId: [2:7501625262077482624:2410] 2025-05-07T08:57:19.099159Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 19 timeout: 300.000000s actor id: [2:7501625266372449981:2609] 2025-05-07T08:57:19.303191Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 19, sender: [2:7501625266372449980:2431], selfId: [2:7501625236307677692:2070], source: [2:7501625262077482624:2410] 2025-05-07T08:57:19.303975Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TScriptLeaseUpdater] TraceId: 84079dad-da852b36-2f96ea7f-6daca3d5, State: Update lease, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=MzZkYTljYTAtMTllOTk0ZTMtYTYxZmFmYjAtZGI2Njg4NA==, TxId: 2025-05-07T08:57:19.304034Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TScriptLeaseUpdater] TraceId: 84079dad-da852b36-2f96ea7f-6daca3d5, State: Update lease, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=MzZkYTljYTAtMTllOTk0ZTMtYTYxZmFmYjAtZGI2Njg4NA==, TxId: 2025-05-07T08:57:19.309224Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1353: Session closed, sessionId: ydb://session/3?node_id=2&id=MzZkYTljYTAtMTllOTk0ZTMtYTYxZmFmYjAtZGI2Njg4NA==, workerId: [2:7501625262077482624:2410], local sessions count: 3 2025-05-07T08:57:19.314554Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: 01jtmzdcphck2a233zgk218zvp, Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=NzMxNGZmZGEtOWU5YTE5NTEtZDJiMmM1ZWUtNTBmZDUyYjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 20, targetId: [2:7501625257782515175:2362] 2025-05-07T08:57:19.314616Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 20 timeout: 300.000000s actor id: [2:7501625266372450008:2618] 2025-05-07T08:57:19.610974Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 18, sender: [2:7501625262077482653:2420], selfId: [2:7501625236307677692:2070], source: [2:7501625262077482582:2397] 2025-05-07T08:57:19.611571Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: 1addc91b-b24fb98e-6cd30859-c5ba1ec, State: Update final status, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ODU1MjQ2ZDItNDgwMjZiZDEtYjk2YmQ4NTgtYzc0M2YzNzE=, TxId: 2025-05-07T08:57:19.611650Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: 1addc91b-b24fb98e-6cd30859-c5ba1ec, State: Update final status, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ODU1MjQ2ZDItNDgwMjZiZDEtYjk2YmQ4NTgtYzc0M2YzNzE=, TxId: 2025-05-07T08:57:19.611664Z node 2 :KQP_PROXY DEBUG: kqp_script_executions.cpp:2628: [ScriptExecutions] Finish script execution operation. ExecutionId: 1addc91b-b24fb98e-6cd30859-c5ba1ec. SUCCESS. Issues: 2025-05-07T08:57:19.612633Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1353: Session closed, sessionId: ydb://session/3?node_id=2&id=ODU1MjQ2ZDItNDgwMjZiZDEtYjk2YmQ4NTgtYzc0M2YzNzE=, workerId: [2:7501625262077482582:2397], local sessions count: 2 2025-05-07T08:57:19.620774Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1353: Session closed, sessionId: ydb://session/3?node_id=2&id=ZjFlMzUyY2MtZjdjYjg5MjQtMTllNzQzOWItMjM1MjljNjQ=, workerId: [2:7501625257782515159:2352], local sessions count: 1 2025-05-07T08:57:19.762360Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T08:57:20.390921Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: TraceId: "01jtmzdcphck2a233zgk218zvp", Forwarded response to sender actor, requestId: 20, sender: [2:7501625266372450007:2439], selfId: [2:7501625236307677692:2070], source: [2:7501625257782515175:2362] 2025-05-07T08:57:20.399496Z node 2 :KQP_PROXY DEBUG: kqp_script_executions.cpp:791: [ScriptExecutions] [TCheckLeaseStatusActor] ExecutionId: 84079dad-da852b36-2f96ea7f-6daca3d5, Bootstrap. Start TCheckLeaseStatusQueryActor 2025-05-07T08:57:20.399571Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TCheckLeaseStatusQueryActor] TraceId: 84079dad-da852b36-2f96ea7f-6daca3d5, Bootstrap. Database: /dc-1 2025-05-07T08:57:20.401194Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1468: Request has 18444997465469.150460s seconds to be completed 2025-05-07T08:57:20.403631Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1543: Created new session, sessionId: ydb://session/3?node_id=2&id=ZmRmMzRmOGQtYjJmM2U3NWMtNGNlNmFlNWUtZDhkYTQ3NTc=, workerId: [2:7501625270667417372:2457], database: /dc-1, longSession: 1, local sessions count: 2 2025-05-07T08:57:20.403881Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:649: Received create session request, trace_id: 2025-05-07T08:57:20.404234Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TCheckLeaseStatusQueryActor] TraceId: 84079dad-da852b36-2f96ea7f-6daca3d5, RunDataQuery: -- TCheckLeaseStatusQueryActor::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; SELECT operation_status, execution_status, finalization_status, issues, run_script_actor_id FROM `.metadata/script_executions` WHERE database = $database AND execution_id = $execution_id AND (expire_at > CurrentUtcTimestamp() OR expire_at IS NULL); SELECT lease_deadline FROM `.metadata/script_execution_leases` WHERE database = $database AND execution_id = $execution_id AND (expire_at > CurrentUtcTimestamp() OR expire_at IS NULL); 2025-05-07T08:57:20.404645Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=ZmRmMzRmOGQtYjJmM2U3NWMtNGNlNmFlNWUtZDhkYTQ3NTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 22, targetId: [2:7501625270667417372:2457] 2025-05-07T08:57:20.404686Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 22 timeout: 300.000000s actor id: [2:7501625270667417374:2642] 2025-05-07T08:57:20.908168Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 22, sender: [2:7501625270667417373:2458], selfId: [2:7501625236307677692:2070], source: [2:7501625270667417372:2457] 2025-05-07T08:57:20.908404Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TCheckLeaseStatusQueryActor] TraceId: 84079dad-da852b36-2f96ea7f-6daca3d5, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZmRmMzRmOGQtYjJmM2U3NWMtNGNlNmFlNWUtZDhkYTQ3NTc=, TxId: 2025-05-07T08:57:20.908541Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TCheckLeaseStatusQueryActor] TraceId: 84079dad-da852b36-2f96ea7f-6daca3d5, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZmRmMzRmOGQtYjJmM2U3NWMtNGNlNmFlNWUtZDhkYTQ3NTc=, TxId: 2025-05-07T08:57:20.908606Z node 2 :KQP_PROXY DEBUG: kqp_script_executions.cpp:838: [ScriptExecutions] [TCheckLeaseStatusActor] ExecutionId: 84079dad-da852b36-2f96ea7f-6daca3d5, reply success 2025-05-07T08:57:20.909108Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1353: Session closed, sessionId: ydb://session/3?node_id=2&id=ZmRmMzRmOGQtYjJmM2U3NWMtNGNlNmFlNWUtZDhkYTQ3NTc=, workerId: [2:7501625270667417372:2457], local sessions count: 1 2025-05-07T08:57:20.936141Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1353: Session closed, sessionId: ydb://session/3?node_id=2&id=NzMxNGZmZGEtOWU5YTE5NTEtZDJiMmM1ZWUtNTBmZDUyYjk=, workerId: [2:7501625257782515175:2362], local sessions count: 0 |90.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_table_writer/unittest |90.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/ydb/ut/ydb-services-ydb-ut |90.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/ydb/ut/ydb-services-ydb-ut |90.9%| [TA] {RESULT} $(B)/ydb/core/http_proxy/ut/test-results/unittest/{meta.json ... results_accumulator.log} |90.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_table_writer/unittest >> TableWriter::Backup [GOOD] |90.9%| [LD] {RESULT} $(B)/ydb/services/ydb/ut/ydb-services-ydb-ut |90.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_table_writer/unittest >> Cdc::UuidExchange[PqRunner] [GOOD] >> Cdc::UuidExchange[YdsRunner] |90.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_serverless/ydb-core-tx-schemeshard-ut_serverless |90.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_table_writer/unittest |90.9%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_serverless/ydb-core-tx-schemeshard-ut_serverless |90.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_serverless/ydb-core-tx-schemeshard-ut_serverless >> Cdc::KeysOnlyLog[PqRunner] [GOOD] >> Cdc::KeysOnlyLog[YdsRunner] |90.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_table_writer/unittest |90.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_table_writer/unittest |90.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_table_writer/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/ut/unittest >> SystemView::TabletsRangesPredicateExtractDisabled [GOOD] Test command err: 2025-05-07T08:55:13.967281Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624726484030022:2142];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:55:13.967349Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0045a9/r3tmp/tmpQ1ittU/pdisk_1.dat 2025-05-07T08:55:14.582337Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:55:14.587243Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:55:14.587358Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:55:14.595881Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7706, node 1 2025-05-07T08:55:14.758503Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:55:14.758533Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:55:14.758541Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:55:14.758673Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11474 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:55:15.142228Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:55:15.196848Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:55:15.222803Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:55:15.222875Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:55:15.229925Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 5 Cookie 5 2025-05-07T08:55:15.238324Z node 1 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:658: Handle TEvPrivate::TEvProcessCounters: service id# [1:7501624726484029835:2072] waiting... 2025-05-07T08:55:15.279094Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:55:15.292576Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:55:15.292654Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:55:15.300946Z node 1 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:669: Handle TEvPrivate::TEvProcessLabeledCounters: service id# [1:7501624726484029835:2072] 2025-05-07T08:55:15.305728Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 4 Cookie 4 2025-05-07T08:55:15.307653Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:55:15.405305Z node 1 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:669: Handle TEvPrivate::TEvProcessLabeledCounters: service id# [1:7501624726484030094:2208] 2025-05-07T08:55:15.422552Z node 5 :SYSTEM_VIEWS INFO: processor_impl.cpp:41: [72075186224037893] OnActivateExecutor 2025-05-07T08:55:15.422616Z node 5 :SYSTEM_VIEWS DEBUG: tx_init_schema.cpp:15: [72075186224037893] TTxInitSchema::Execute 2025-05-07T08:55:15.452492Z node 2 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:658: Handle TEvPrivate::TEvProcessCounters: service id# [2:7501624727676237444:2063] 2025-05-07T08:55:15.457658Z node 3 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:669: Handle TEvPrivate::TEvProcessLabeledCounters: service id# [3:7501624727519769445:2063] 2025-05-07T08:55:15.583899Z node 5 :SYSTEM_VIEWS DEBUG: partition_stats.cpp:32: NSysView::TPartitionStatsCollector bootstrapped 2025-05-07T08:55:15.584010Z node 5 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:778: Handle TEvSysView::TEvRegisterDbCounters: service id# [5:7501624733812741216:2077], path id# [OwnerId: 72057594046644480, LocalPathId: 2], service# 2 2025-05-07T08:55:15.589252Z node 5 :SYSTEM_VIEWS DEBUG: tx_init_schema.cpp:42: [72075186224037893] TTxInitSchema::Complete 2025-05-07T08:55:15.589368Z node 5 :SYSTEM_VIEWS DEBUG: tx_init.cpp:136: [72075186224037893] TTxInit::Execute 2025-05-07T08:55:15.590885Z node 5 :SYSTEM_VIEWS DEBUG: tx_init.cpp:257: [72075186224037893] Loading interval summaries: query count# 0, node ids count# 0, total count# 0 2025-05-07T08:55:15.590929Z node 5 :SYSTEM_VIEWS DEBUG: tx_init.cpp:284: [72075186224037893] Loading interval metrics: query count# 0 2025-05-07T08:55:15.590953Z node 5 :SYSTEM_VIEWS DEBUG: tx_init.cpp:362: [72075186224037893] Loading interval query tops: total query count# 0 2025-05-07T08:55:15.593698Z node 5 :SYSTEM_VIEWS DEBUG: tx_init.cpp:408: [72075186224037893] Loading nodes to request: nodes count# 0, hashes count# 0 2025-05-07T08:55:15.593774Z node 5 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 6, result count# 0 2025-05-07T08:55:15.593817Z node 5 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 7, result count# 0 2025-05-07T08:55:15.593850Z node 5 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 8, result count# 0 2025-05-07T08:55:15.593894Z node 5 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 9, result count# 0 2025-05-07T08:55:15.593947Z node 5 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 10, result count# 0 2025-05-07T08:55:15.594466Z node 5 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 11, result count# 0 2025-05-07T08:55:15.594520Z node 5 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 12, result count# 0 2025-05-07T08:55:15.594557Z node 5 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 13, result count# 0 2025-05-07T08:55:15.594622Z node 5 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 14, result count# 0 2025-05-07T08:55:15.594673Z node 5 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 15, result count# 0 2025-05-07T08:55:15.594324Z node 4 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:658: Handle TEvPrivate::TEvProcessCounters: service id# [4:7501624727025479489:2063] 2025-05-07T08:55:15.594723Z node 5 :SYSTEM_VIEWS DEBUG: tx_init.cpp:129: [72075186224037893] Loading results: table# 16, partCount count# 0 2025-05-07T08:55:15.594814Z node 5 :SYSTEM_VIEWS DEBUG: tx_init.cpp:129: [72075186224037893] Loading results: table# 19, partCount count# 0 2025-05-07T08:55:15.594861Z node 5 :SYSTEM_VIEWS DEBUG: tx_init.cpp:82: [72075186224037893] Loading results: table# 17, result count# 0 2025-05-07T08:55:15.594939Z node 5 :SYSTEM_VIEWS DEBUG: tx_init.cpp:82: [72075186224037893] Loading results: table# 18, result count# 0 2025-05-07T08:55:15.595105Z node 5 :SYSTEM_VIEWS DEBUG: tx_init.cpp:82: [72075186224037893] Loading results: table# 20, result count# 0 2025-05-07T08:55:15.595839Z node 5 :SYSTEM_VIEWS DEBUG: tx_init.cpp:82: [72075186224037893] Loading results: table# 21, result count# 0 2025-05-07T08:55:15.596064Z node 5 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:333: [72075186224037893] Reset: interval end# 2025-05-07T08:55:15.000000Z 2025-05-07T08:55:15.597554Z node 5 :SYSTEM_VIEWS INFO: sysview_service.cpp:860: Navigate by path id succeeded: service id# [5:7501624733812741216:2077], path id# [OwnerId: 72057594046644480, LocalPathId: 2], database# /Root/Tenant1 2025-05-07T08:55:15.603528Z node 5 :SYSTEM_VIEWS INFO: sysview_service.cpp:886: Navigate by database succeeded: service id# [5:7501624733812741216:2077], database# /Root/Tenant1, no sysview processor 2025-05-07T08:55:15.608903Z node 5 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:55:15.622455Z node 5 :SYSTEM_VIEWS DEBUG: tx_init.cpp:488: [72075186224037893] TTxInit::Complete 2025-05-07T08:55:15.622946Z node 5 :SYSTEM_VIEWS DEBUG: tx_aggregate.cpp:14: [72075186224037893] TTxAggregate::Execute 2025-05-07T08:55:15.623019Z node 5 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:136: [72075186224037893] PersistQueryResults: interval end# 2025-05-07T08:55:15.000000Z, query count# 0 2025-05-07T08:55:15.623044Z node 5 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037893] PersistQueryTopResults: table id# 8, interval end# 2025-05-07T08:55:15.000000Z, query count# 0, persisted# 0 2025-05-07T08:55:15.623061Z node 5 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037893] PersistQueryTopResults: table id# 10, interval end# 2025-05-07T08:55:15.000000Z, query count# 0, persisted# 0 2025-05-07T08:55:15.623079Z node 5 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037893] PersistQueryTopResults: table id# 12, interval end# 2025-05-07T08:55:15.000000Z, query count# 0, persisted# 0 2025-05-07T08:55:15.623147Z node 5 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037893] PersistQueryTopResults: table id# 14, interval end# 2025-05-07T08:55:15.000000Z, query count# 0, persisted# 0 2025-05-07T08:55:15.623169Z node 5 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037893] PersistQueryTopResults: table id# 9, interval end# 2025-05-07T09:00:00.000000Z, query count# 0, persisted# 0 2025-05-07T08:55:15.623211Z node 5 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037893] PersistQueryTopResults: table id# 11, interval end# 2025-05-07T09:00:00.000000Z, query count# 0, persisted# 0 2025-05-07T08:55:15.623234Z node 5 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037893] PersistQueryTopResults: table id# 13, interval end# 2025-05-07T09:00:00.000000Z, query count# 0, persisted# 0 2025-05-07T08:55:15.623262Z node 5 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037893] PersistQueryTopResults: table id# 15, interval end# 2025-05-07T09:00:00.000000Z, query count# 0, persisted# 0 2025-05-07T08:55:15.627994Z node 5 :SYSTEM ... tor_base_impl.h:65: Sending scan batch, actor: [19:7501625230718294833:2442], row count: 3, finished: 1 2025-05-07T08:57:10.546120Z node 19 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:120: Scan finished, actor: [19:7501625230718294833:2442], owner: [19:7501625230718294829:2440], scan id: 0, table id: [72057594046644480:1:0:hive_tablets] 2025-05-07T08:57:10.549053Z node 19 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746608230538, txId: 281474976710675] shutting down 2025-05-07T08:57:10.815300Z node 19 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710678. Ctx: { TraceId: 01jtmzd45cd34c9hck3551tr2g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=19&id=YWM0OTNhOGEtZGZjODY4OWItZDQ4OTBlM2EtZGQ4OTU0ODU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:57:10.818571Z node 19 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:45: Scan started, actor: [19:7501625230718294872:2454], owner: [19:7501625230718294869:2452], scan id: 0, table id: [72057594046644480:1:0:hive_tablets] 2025-05-07T08:57:10.821669Z node 19 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:321: Scan prepared, actor: [19:7501625230718294872:2454], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-05-07T08:57:10.822209Z node 19 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [19:7501625230718294872:2454], row count: 4, finished: 1 2025-05-07T08:57:10.822267Z node 19 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:120: Scan finished, actor: [19:7501625230718294872:2454], owner: [19:7501625230718294869:2452], scan id: 0, table id: [72057594046644480:1:0:hive_tablets] 2025-05-07T08:57:10.825989Z node 19 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746608230813, txId: 281474976710677] shutting down 2025-05-07T08:57:11.123042Z node 19 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710680. Ctx: { TraceId: 01jtmzd4dfcabmstkb1a71b6bw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=19&id=YTJhNzZiMGUtZWYxMzEwMzUtZTY4ZGQ1Y2UtYjcxZDk0OGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:57:11.127505Z node 19 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:45: Scan started, actor: [19:7501625235013262202:2463], owner: [19:7501625235013262198:2461], scan id: 0, table id: [72057594046644480:1:0:hive_tablets] 2025-05-07T08:57:11.129738Z node 19 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:321: Scan prepared, actor: [19:7501625235013262202:2463], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-05-07T08:57:11.130571Z node 19 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [19:7501625235013262202:2463], row count: 4, finished: 1 2025-05-07T08:57:11.130614Z node 19 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:120: Scan finished, actor: [19:7501625235013262202:2463], owner: [19:7501625235013262198:2461], scan id: 0, table id: [72057594046644480:1:0:hive_tablets] 2025-05-07T08:57:11.134663Z node 19 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746608231121, txId: 281474976710679] shutting down 2025-05-07T08:57:12.639523Z node 20 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[20:7501625236913928916:2150];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0045a9/r3tmp/tmpwuToGb/pdisk_1.dat 2025-05-07T08:57:12.780350Z node 20 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:57:12.958179Z node 20 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:57:12.972434Z node 20 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(20, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:12.972538Z node 20 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(20, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:12.974667Z node 20 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(20, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13833, node 20 2025-05-07T08:57:13.214777Z node 20 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:57:13.214810Z node 20 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:57:13.214822Z node 20 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:57:13.215301Z node 20 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27751 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:57:13.890452Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:13.898575Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T08:57:17.637743Z node 20 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[20:7501625236913928916:2150];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:17.637840Z node 20 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:57:20.788520Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T08:57:21.002121Z node 20 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [20:7501625275568635418:2360], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:57:21.002268Z node 20 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:57:21.002777Z node 20 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [20:7501625275568635430:2363], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:57:21.010590Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-05-07T08:57:21.028431Z node 20 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [20:7501625275568635432:2364], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-05-07T08:57:21.127112Z node 20 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [20:7501625275568635483:2496] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:57:21.585684Z node 20 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jtmzdeb2df1dndwxyvsbytbj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=20&id=ODNiOTJlZTktZjI3ZmE4MmEtNzYzYzQ3NDctODU2MTM1MzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:57:21.589857Z node 20 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:45: Scan started, actor: [20:7501625275568635519:2374], owner: [20:7501625275568635517:2373], scan id: 0, table id: [72057594046644480:1:0:hive_tablets] 2025-05-07T08:57:21.593081Z node 20 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:321: Scan prepared, actor: [20:7501625275568635519:2374], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-05-07T08:57:21.594854Z node 20 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [20:7501625275568635519:2374], row count: 4, finished: 1 2025-05-07T08:57:21.594884Z node 20 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:120: Scan finished, actor: [20:7501625275568635519:2374], owner: [20:7501625275568635517:2373], scan id: 0, table id: [72057594046644480:1:0:hive_tablets] 2025-05-07T08:57:21.699677Z node 20 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:45: Scan started, actor: [20:7501625275568635527:2378], owner: [20:7501625275568635517:2373], scan id: 0, table id: [72057594046644480:1:0:hive_tablets] 2025-05-07T08:57:21.710493Z node 20 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:321: Scan prepared, actor: [20:7501625275568635527:2378], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-05-07T08:57:21.711171Z node 20 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [20:7501625275568635527:2378], row count: 4, finished: 1 2025-05-07T08:57:21.711209Z node 20 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:120: Scan finished, actor: [20:7501625275568635527:2378], owner: [20:7501625275568635517:2373], scan id: 0, table id: [72057594046644480:1:0:hive_tablets] 2025-05-07T08:57:21.715601Z node 20 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746608241582, txId: 281474976715661] shutting down >> DataShardWrite::UpsertImmediate |90.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_table_writer/unittest >> BackupRestoreS3::RestoreIndexTableSplitBoundaries [GOOD] >> BackupRestoreS3::RestoreIndexTableDecimalSplitBoundaries |90.9%| [TA] $(B)/ydb/core/backup/impl/ut_table_writer/test-results/unittest/{meta.json ... results_accumulator.log} >> DataShardWrite::WriteImmediateBadRequest >> DataShardWrite::ExecSQLUpsertImmediate-EvWrite >> BackupRestoreS3::TestAllPrimitiveTypes-FLOAT [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-DOUBLE >> RetryPolicy::TWriteSession_SeqNoShift [GOOD] >> RetryPolicy::RetryWithBatching >> KqpProxy::ExecuteScriptFailsWithoutFeatureFlag [GOOD] >> DataShardWrite::ExecSQLUpsertImmediate+EvWrite >> BackupRestore::TestAllPrimitiveTypes-INTERVAL [GOOD] >> BackupRestore::TestAllPrimitiveTypes-TZ_DATE [GOOD] >> BackupRestore::TestAllPrimitiveTypes-TZ_DATETIME [GOOD] >> BackupRestore::TestAllPrimitiveTypes-TZ_TIMESTAMP [GOOD] >> BackupRestore::TestAllPrimitiveTypes-TIMESTAMP64 >> AsyncIndexChangeExchange::SenderShouldBeActivatedOnTableWithSyncIndex [GOOD] >> AsyncIndexChangeExchange::SenderShouldBeActivatedOnTableWithAsyncIndex |90.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Uint32-pk_types9-all_types9-index9-Uint32--] [GOOD] >> DataShardWrite::UpsertImmediateManyColumns |90.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_DyNumber-pk_types8-all_types8-index8-DyNumber--] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/proxy_service/ut/unittest >> KqpProxy::ExecuteScriptFailsWithoutFeatureFlag [GOOD] Test command err: 2025-05-07T08:57:01.245419Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625188317174468:2064];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:01.245524Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003257/r3tmp/tmpepo257/pdisk_1.dat 2025-05-07T08:57:01.751355Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:01.751529Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:01.753420Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:57:01.777806Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TClient is connected to server localhost:22932 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-05-07T08:57:02.105557Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:04.629608Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1665: Updated YQL logs priority to current level: 4 2025-05-07T08:57:04.637837Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:442: Cannot start publishing usage, tenants: /dc-1, empty 2025-05-07T08:57:04.642352Z node 1 :KQP_PROXY WARN: kqp_proxy_service.cpp:1557: Failed to parse session id: ydb://session/1?id=ZjY5NWRlM2EtYWMyYjA5YWEtNzQ0MTVlYTMtM2Q4ZDgzOWQ=&node_id=1234&node_id=12345 2025-05-07T08:57:04.661002Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:512: Subscribed for config changes. 2025-05-07T08:57:04.661058Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:519: Updated table service config. 2025-05-07T08:57:04.661090Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1665: Updated YQL logs priority to current level: 4 2025-05-07T08:57:04.661150Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:442: Cannot start publishing usage, tenants: /dc-1, empty 2025-05-07T08:57:04.661236Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T08:57:04.661339Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T08:57:04.661421Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 2, sender: [1:7501625192612142329:2281], selfId: [1:7501625188317174718:2278], source: [1:7501625188317174718:2278] 2025-05-07T08:57:04.661441Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T08:57:04.670294Z node 1 :KQP_PROXY WARN: kqp_proxy_service.cpp:1557: Failed to parse session id: unknown://session/1?id=ZjY5NWRlM2EtYWMyYjA5YWEtNzQ0MTVlYTMtM2Q4ZDgzOWQ=&node_id=1234&node_id=12345 2025-05-07T08:57:04.670422Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T08:57:04.670507Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 3, sender: [1:7501625192612142329:2281], selfId: [1:7501625188317174718:2278], source: [1:7501625188317174718:2278] 2025-05-07T08:57:04.671212Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625201202076958:2310], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:57:04.671339Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /dc-1, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:57:04.671794Z node 1 :KQP_PROXY WARN: kqp_proxy_service.cpp:1557: Failed to parse session id: ydb://session/1?id=ZjY5NWRlM2EtYWMyYjA5YWEtNzQ0MTVlYTMtM2Q4ZDgzOWQ=&node_id=eqweq 2025-05-07T08:57:04.671905Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 4, sender: [1:7501625192612142329:2281], selfId: [1:7501625188317174718:2278], source: [1:7501625188317174718:2278] 2025-05-07T08:57:04.672263Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625201202076982:2311], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:57:04.674237Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /dc-1, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:57:09.966859Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:311:2354], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:57:09.967184Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:57:09.967308Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003257/r3tmp/tmpHWDUzO/pdisk_1.dat 2025-05-07T08:57:10.592558Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:57:10.658195Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:519: Updated table service config. 2025-05-07T08:57:10.658304Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1665: Updated YQL logs priority to current level: 4 2025-05-07T08:57:10.658757Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:57:10.697912Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2663: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:302:2346], request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: Root/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-05-07T08:57:10.700015Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2550: HandleNotify: self# [2:302:2346], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /Root/.metadata/workload_manager/delayed_requests PathId: Strong: 1 } 2025-05-07T08:57:10.700203Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2425: ResolveCacheItem: self# [2:302:2346], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /Root/.metadata/workload_manager/delayed_requests PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [2:610:2532] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-05-07T08:57:10.700381Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1818: FillEntry for TNavigate: self# [2:302:2346], cacheItem# { Subscriber: { Subscriber: [2:610:2532] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: Root/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-05-07T08:57:10.700538Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2550: HandleNotify: self# [2:302:2346], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /Root/.metadata/workload_manager/running_requests PathId: Strong: 1 } 2025-05-07T08:57:10.700628Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2425: ResolveCacheItem: self# [2:302:2346], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /Root/.metadata/workload_manager/running_requests PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [2:611:2533] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-05-07T08:57:10.700713Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1818: FillEntry for TNavigate: self# [2:302:2346], cacheItem# { Subscriber: { Subscriber: [2:611:2533] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: Root/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-05-07T08:57:10.700928Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:264: Send result: self# [2:624:2534], recipient# [2:311:2354], resul ... _ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to TICKET_PARSER_ACTOR Captured TEvents::TSystem::Wakeup to EXT_COUNTERS_UPDATER_ACTOR Captured TEvents::TSystem::Wakeup to EXT_COUNTERS_UPDATER_ACTOR Captured TEvents::TSystem::Wakeup to KQP_COMPILE_COMPUTATION_PATTERN_SERVICE Captured TEvents::TSystem::Wakeup to KQP_NODE_SERVICE Captured TEvents::TSystem::Wakeup to PROXY_SCHEME_CACHE Captured TEvents::TSystem::Wakeup to PROXY_SCHEME_CACHE Captured TEvents::TSystem::Wakeup to PROXY_SCHEME_CACHE Captured TEvents::TSystem::Wakeup to PROXY_SCHEME_CACHE Captured TEvents::TSystem::Wakeup to DS_PROXY_NODE_MON_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to TABLET_RESPONSIVENESS_PINGER Captured TEvents::TSystem::Wakeup to TABLET_RESPONSIVENESS_PINGER Captured TEvents::TSystem::Wakeup to NKikimr::NKqp::TSchedulerActor Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to NKikimr::NKqp::TSchedulerActor Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to TABLET_COUNTERS_AGGREGATOR Captured TEvents::TSystem::Wakeup to BSC_STAT_PROCESSOR Captured TEvents::TSystem::Wakeup to TICKET_PARSER_ACTOR Captured TEvents::TSystem::Wakeup to NKikimr::NIcNodeCache::TIcNodeCacheServiceActor Captured TEvents::TSystem::Wakeup to EXT_COUNTERS_UPDATER_ACTOR Captured TEvents::TSystem::Wakeup to EXT_COUNTERS_UPDATER_ACTOR Captured TEvents::TSystem::Wakeup to KQP_COMPILE_COMPUTATION_PATTERN_SERVICE Captured TEvents::TSystem::Wakeup to KQP_NODE_SERVICE Captured TEvents::TSystem::Wakeup to PROXY_SCHEME_CACHE Captured TEvents::TSystem::Wakeup to PROXY_SCHEME_CACHE Captured TEvents::TSystem::Wakeup to PROXY_SCHEME_CACHE Captured TEvents::TSystem::Wakeup to PROXY_SCHEME_CACHE Captured TEvents::TSystem::Wakeup to DS_PROXY_NODE_MON_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to NKikimr::NKqp::TSchedulerActor Captured TEvents::TSystem::Wakeup to TABLET_RESPONSIVENESS_PINGER Captured TEvents::TSystem::Wakeup to TABLET_RESPONSIVENESS_PINGER Captured TEvents::TSystem::Wakeup to NKikimr::NBsController::TBlobStorageController::TSelfHealActor Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to NKikimr::NKqp::TSchedulerActor Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to TICKET_PARSER_ACTOR Captured TEvents::TSystem::Wakeup to EXT_COUNTERS_UPDATER_ACTOR Captured TEvents::TSystem::Wakeup to EXT_COUNTERS_UPDATER_ACTOR Captured TEvents::TSystem::Wakeup to KQP_COMPILE_COMPUTATION_PATTERN_SERVICE Captured TEvents::TSystem::Wakeup to KQP_NODE_SERVICE Captured TEvents::TSystem::Wakeup to PROXY_SCHEME_CACHE Captured TEvents::TSystem::Wakeup to PROXY_SCHEME_CACHE Captured TEvents::TSystem::Wakeup to PROXY_SCHEME_CACHE Captured TEvents::TSystem::Wakeup to PROXY_SCHEME_CACHE Captured TEvents::TSystem::Wakeup to DS_PROXY_NODE_MON_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to NKikimr::NKqp::TSchedulerActor Captured TEvents::TSystem::Wakeup to TABLET_RESPONSIVENESS_PINGER Captured TEvents::TSystem::Wakeup to TABLET_RESPONSIVENESS_PINGER Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to NKikimr::NKqp::TSchedulerActor Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR 2025-05-07T08:57:20.217717Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1313: Handle TEvPrivate::TEvOnRequestTimeout(20) 2025-05-07T08:57:20.217804Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1321: Reply timeout: requestId 20 sessionId: ydb://session/3?node_id=2&id=ZDViMWY1ZTQtYjlkMjBmZWItNWVmMmFjMTMtODc3MDZhZGY= status: TIMEOUT round: 0 2025-05-07T08:57:20.217995Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=2&id=ZDViMWY1ZTQtYjlkMjBmZWItNWVmMmFjMTMtODc3MDZhZGY=, ActorId: [2:1112:2915], ActorState: ExecuteState, TraceId: 01jtmzdchw9zkey58xn248mrgm, Create QueryResponse for error on request, msg: 2025-05-07T08:57:20.218241Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 20, sender: [2:593:2518], selfId: [2:57:2104], source: [2:1112:2915] Send scheduled evet back 2025-05-07T08:57:20.218387Z node 2 :KQP_COMPILE_ACTOR NOTICE: kqp_compile_actor.cpp:577: Compilation timeout, self: [2:1114:2917], cluster: db, database: , text: "SELECT * FROM `/Root/Table`;", startTime: 2025-05-07T08:57:19.165237Z 2025-05-07T08:57:20.218502Z node 2 :KQP_COMPILE_ACTOR DEBUG: kqp_compile_actor.cpp:402: Send response, self: [2:1114:2917], owner: [2:296:2340], status: TIMEOUT, issues:
: Error: Query compilation timed out. , uid: 3f8efe5b-f064b0cf-a738e433-dc349391 Send captured event back Send captured event back Send captured event back Send captured event back Send captured event back 2025-05-07T08:57:21.410843Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7501625278162699489:2143];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:21.410902Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003257/r3tmp/tmpUL9zZU/pdisk_1.dat 2025-05-07T08:57:21.905223Z node 3 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:57:21.931584Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:21.931681Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:21.947603Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23488, node 3 2025-05-07T08:57:22.230228Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:57:22.230252Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:57:22.230260Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:57:22.230393Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24096 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:57:22.704513Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:25.979746Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1665: Updated YQL logs priority to current level: 4 2025-05-07T08:57:25.980571Z node 3 :KQP_PROXY INFO: kqp_proxy_service.cpp:442: Cannot start publishing usage, tenants: /Root, empty 2025-05-07T08:57:25.983332Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:512: Subscribed for config changes. 2025-05-07T08:57:25.983362Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:519: Updated table service config. 2025-05-07T08:57:25.983383Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1665: Updated YQL logs priority to current level: 4 2025-05-07T08:57:25.983426Z node 3 :KQP_PROXY INFO: kqp_proxy_service.cpp:442: Cannot start publishing usage, tenants: /Root, empty 2025-05-07T08:57:25.988702Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T08:57:25.988757Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T08:57:25.988784Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T08:57:25.995103Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 >> TPersQueueTest::Init [GOOD] >> TPersQueueTest::NoDecompressionMemoryLeaks |90.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_view/ydb-core-tx-schemeshard-ut_view |90.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_view/ydb-core-tx-schemeshard-ut_view |90.9%| [TA] {RESULT} $(B)/ydb/core/backup/impl/ut_table_writer/test-results/unittest/{meta.json ... results_accumulator.log} |90.9%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_view/ydb-core-tx-schemeshard-ut_view >> Cdc::DocApi[PqRunner] [GOOD] >> Cdc::DocApi[YdsRunner] >> Cdc::KeysOnlyLog[YdsRunner] [GOOD] >> Cdc::KeysOnlyLog[TopicRunner] >> BackupRestore::TestAllPrimitiveTypes-UTF8 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-YSON >> Cdc::UuidExchange[YdsRunner] [GOOD] >> Cdc::UuidExchange[TopicRunner] >> KqpPg::TableDeleteWhere-useSink [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-TIMESTAMP64 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-INTERVAL64 |90.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_kqp_errors/ydb-core-tx-datashard-ut_kqp_errors |90.9%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_kqp_errors/ydb-core-tx-datashard-ut_kqp_errors |90.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_kqp_errors/ydb-core-tx-datashard-ut_kqp_errors |90.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest >> TTableProfileTests::UseDefaultProfile >> DataShardWrite::UpsertImmediate [GOOD] >> DataShardWrite::ReplaceImmediate >> FolderServiceTest::TFolderService >> DataShardWrite::WriteImmediateBadRequest [GOOD] >> DataShardWrite::WriteImmediateSeveralOperations >> TAccessServiceTest::Authenticate |90.9%| [TM] {asan, default-linux-x86_64, release} ydb/library/ycloud/impl/ut/unittest >> BackupRestore::TestAllPrimitiveTypes-INT64 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-FLOAT >> DataShardWrite::ExecSQLUpsertImmediate+EvWrite [GOOD] >> DataShardWrite::DeleteImmediate >> KqpWorkloadServiceSubscriptions::TestResourcePoolSubscriptionAfterAclChange [GOOD] >> FolderServiceTest::TFolderServiceAdapter >> TServiceAccountServiceTest::Get >> TServiceAccountServiceTest::Get [GOOD] >> DataShardWrite::ExecSQLUpsertImmediate-EvWrite [GOOD] >> DataShardWrite::ExecSQLUpsertPrepared-EvWrite-Volatile >> DataShardWrite::UpsertImmediateManyColumns [GOOD] >> DataShardWrite::UpsertPrepared+Volatile >> TTableProfileTests::ExplicitPartitionsSimple >> AsyncIndexChangeExchange::SenderShouldBeActivatedOnTableWithAsyncIndex [GOOD] >> AsyncIndexChangeExchange::SenderShouldShakeHandsOnce >> TAccessServiceTest::PassRequestId |90.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_serverless/unittest |90.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/http_proxy/ut/inside_ydb_ut/ydb-core-http_proxy-ut-inside_ydb_ut |90.9%| [LD] {RESULT} $(B)/ydb/core/http_proxy/ut/inside_ydb_ut/ydb-core-http_proxy-ut-inside_ydb_ut |90.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/http_proxy/ut/inside_ydb_ut/ydb-core-http_proxy-ut-inside_ydb_ut >> TCdcStreamTests::MeteringServerless [GOOD] >> TCdcStreamTests::MeteringDedicated >> Cdc::KeysOnlyLog[TopicRunner] [GOOD] >> Cdc::KeysOnlyLogDebezium >> BackupRestore::TestAllPrimitiveTypes-TIMESTAMP64 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-INTERVAL64 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/pg/unittest >> KqpPg::TableDeleteWhere-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 28208, MsgBus: 28783 2025-05-07T08:50:53.353161Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623608522339420:2197];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:53.364190Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001d1d/r3tmp/tmpQNV3f6/pdisk_1.dat 2025-05-07T08:50:53.937762Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:53.957215Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:50:53.957322Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:50:53.958941Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28208, node 1 2025-05-07T08:50:54.042550Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:54.042572Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:54.042579Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:54.042686Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28783 TClient is connected to server localhost:28783 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:50:54.721610Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:50:57.268948Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480
: Error: Bulk upsert to table '/Root/Coerce_pgbpchar_17472595041006102391_17823623939509273229' Unable to coerce value for pgbpchar: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) 2025-05-07T08:50:57.499873Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480
: Error: Bulk upsert to table '/Root/Coerce__pgbpchar_17472595041006102391_5352544928909966465' Unable to coerce value for _pgbpchar: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) 2025-05-07T08:50:57.593077Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 abcd 2025-05-07T08:50:57.788646Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 {abcd,abcd} 2025-05-07T08:50:57.908369Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 abcd 2025-05-07T08:50:58.062945Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480 {"abcd ","abcd "} 2025-05-07T08:50:58.270583Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480
: Error: Bulk upsert to table '/Root/Coerce_pgvarchar_17472595041006102391_17823623939509273229' Unable to coerce value for pgvarchar: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character varying(2) 2025-05-07T08:50:58.352215Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623608522339420:2197];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:50:58.352291Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:50:58.371316Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480
: Error: Bulk upsert to table '/Root/Coerce__pgvarchar_17472595041006102391_5352544928909966465' Unable to coerce value for _pgvarchar: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character varying(2) 2025-05-07T08:50:58.452167Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480 abcd 2025-05-07T08:50:58.612206Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480 {abcd,abcd} 2025-05-07T08:50:58.782292Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710680:0, at schemeshard: 72057594046644480 abcd 2025-05-07T08:50:58.948174Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710683:0, at schemeshard: 72057594046644480 {abcd,abcd} 2025-05-07T08:50:59.102075Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710686:0, at schemeshard: 72057594046644480
: Error: Bulk upsert to table '/Root/Coerce_pgbit_17472595041006102391_5866627432374416336' Unable to coerce value for pgbit: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: bit string length 4 does not match type bit(2) 2025-05-07T08:50:59.203832Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710687:0, at schemeshard: 72057594046644480
: Error: Bulk upsert to table '/Root/Coerce__pgbit_17472595041006102391_11087201080355820517' Unable to coerce value for _pgbit: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: bit string length 4 does not match type bit(2) 2025-05-07T08:50:59.301508Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710688:0, at schemeshard: 72057594046644480 1111 2025-05-07T08:50:59.466424Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710691:0, at schemeshard: 72057594046644480 {1111,1111} 2025-05-07T08:50:59.604718Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710694:0, at schemeshard: 72057594046644480
: Error: Bulk upsert to table '/Root/Coerce_pgbit_10103374131519304989_5866627432374416336' Unable to coerce value for pgbit: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: bit string length 4 does not match type bit(6) 2025-05-07T08:50:59.752176Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710695:0, at schemeshard: 72057594046644480
: Error: Bulk upsert to table '/Root/Coerce__pgbit_10103374131519304989_11087201080355820517' Unable to coerce value for _pgbit: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: bit string length 4 does not match type bit(6) 2025-05-07T08:50:59.913419Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710696:0, at schemeshard: 72057594046644480
: Error: Bulk upsert to table '/Root/Coerce_pgvarbit_17472595041006102391_5866627432374416336' Unable to coerce value for pgvarbit: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: bit string too long for type bit varying(2) 2025-05-07T08:51:00.022181Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710697:0, at schemeshard: 72057594046644480
: Error: Bulk upsert to table '/Root/Coerce__pgvarbit_17472595041006102391_11087201080355820517' Unable to coerce value for _pgvarbit: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: bit string too long for type bit varying(2) 2025-05-07T08:51:00.158084Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710698:0, at schemeshard: 72057594046644480 1111 2025-05-07T08:51:00.362042Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ... 281474976710819:0, at schemeshard: 72057594046644480 2025-05-07T08:57:22.949275Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-05-07T08:57:22.953325Z node 11 :TX_DATASHARD ERROR: finish_propose_unit.cpp:245: Prepare transaction failed. txid 281474976710821 at tablet 72075186224037936 errors: WRONG_SHARD_STATE (Interrupted operation [0:281474976710821] at 72075186224037936 while waiting for stream clearance) | 2025-05-07T08:57:22.963917Z node 11 :TX_DATASHARD ERROR: finish_propose_unit.cpp:174: Errors while proposing transaction txid 281474976710821 at tablet 72075186224037936 status: ERROR errors: WRONG_SHARD_STATE (Interrupted operation [0:281474976710821] at 72075186224037936 while waiting for stream clearance) | 2025-05-07T08:57:22.982744Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710822:0, at schemeshard: 72057594046644480 603 2025-05-07T08:57:23.185773Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710823:0, at schemeshard: 72057594046644480 2025-05-07T08:57:23.384394Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-05-07T08:57:23.422325Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710825:0, at schemeshard: 72057594046644480 602 2025-05-07T08:57:23.634234Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-05-07T08:57:23.673883Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710827:0, at schemeshard: 72057594046644480 2025-05-07T08:57:23.810612Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-05-07T08:57:23.843964Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710829:0, at schemeshard: 72057594046644480 604 2025-05-07T08:57:23.985096Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-05-07T08:57:24.022677Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710831:0, at schemeshard: 72057594046644480 2025-05-07T08:57:24.242262Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710832:0, at schemeshard: 72057594046644480 718 2025-05-07T08:57:24.402782Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710833:0, at schemeshard: 72057594046644480 2025-05-07T08:57:24.539015Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-05-07T08:57:24.568450Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710835:0, at schemeshard: 72057594046644480 869 2025-05-07T08:57:24.753836Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710836:0, at schemeshard: 72057594046644480 2025-05-07T08:57:24.898126Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-05-07T08:57:24.926355Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710838:0, at schemeshard: 72057594046644480 650 2025-05-07T08:57:25.050080Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-05-07T08:57:25.092238Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710840:0, at schemeshard: 72057594046644480 2025-05-07T08:57:25.277172Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-05-07T08:57:25.299940Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710842:0, at schemeshard: 72057594046644480 829 2025-05-07T08:57:25.426131Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-05-07T08:57:25.472436Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710844:0, at schemeshard: 72057594046644480 2025-05-07T08:57:25.628041Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-05-07T08:57:25.656909Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710846:0, at schemeshard: 72057594046644480 2025-05-07T08:57:25.757607Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 774 2025-05-07T08:57:25.789092Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710848:0, at schemeshard: 72057594046644480 2025-05-07T08:57:25.935952Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710849:0, at schemeshard: 72057594046644480 2950 2025-05-07T08:57:26.073470Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710850:0, at schemeshard: 72057594046644480 2025-05-07T08:57:26.278906Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710851:0, at schemeshard: 72057594046644480 114 2025-05-07T08:57:26.438951Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-05-07T08:57:26.469848Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710853:0, at schemeshard: 72057594046644480 2025-05-07T08:57:26.683115Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710854:0, at schemeshard: 72057594046644480 3802 2025-05-07T08:57:26.830177Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-05-07T08:57:26.871217Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710856:0, at schemeshard: 72057594046644480 2025-05-07T08:57:27.027982Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-05-07T08:57:27.070480Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710858:0, at schemeshard: 72057594046644480 4072 2025-05-07T08:57:27.225677Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-05-07T08:57:27.259660Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710860:0, at schemeshard: 72057594046644480 2025-05-07T08:57:27.506955Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710861:0, at schemeshard: 72057594046644480 142 2025-05-07T08:57:27.716714Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710862:0, at schemeshard: 72057594046644480 2025-05-07T08:57:27.890122Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-05-07T08:57:27.925794Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710864:0, at schemeshard: 72057594046644480 3615 2025-05-07T08:57:28.062257Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-05-07T08:57:28.099561Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710866:0, at schemeshard: 72057594046644480 2025-05-07T08:57:28.302525Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-05-07T08:57:28.331890Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710868:0, at schemeshard: 72057594046644480 3614 2025-05-07T08:57:28.492984Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-05-07T08:57:28.539634Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710870:0, at schemeshard: 72057594046644480 2025-05-07T08:57:28.694942Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-05-07T08:57:28.729392Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710872:0, at schemeshard: 72057594046644480 22 2025-05-07T08:57:28.911630Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710873:0, at schemeshard: 72057594046644480 2025-05-07T08:57:29.107796Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710874:0, at schemeshard: 72057594046644480 2025-05-07T08:57:29.266425Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill >> TAccessServiceTest::Authenticate [GOOD] >> DataShardWrite::WriteImmediateSeveralOperations [GOOD] >> DataShardWrite::UpsertPreparedManyTables+Volatile ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/workload_service/ut/unittest >> KqpWorkloadServiceSubscriptions::TestResourcePoolSubscriptionAfterAclChange [GOOD] Test command err: 2025-05-07T08:54:39.276248Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624580145664943:2212];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:39.276726Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:54:39.383578Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501624578395912768:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:39.383744Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004831/r3tmp/tmp4QCB68/pdisk_1.dat 2025-05-07T08:54:40.112562Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:40.112653Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:40.118349Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:40.118430Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:40.120598Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:54:40.140298Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:40.145408Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-05-07T08:54:40.148143Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6920, node 1 2025-05-07T08:54:40.501398Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:40.501426Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:40.501434Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:40.501549Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8483 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:40.932457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:44.250035Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501624580145664943:2212];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:44.250103Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:54:44.373173Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7501624578395912768:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:44.373250Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:54:44.411292Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:440: [WorkloadService] [Service] Started workload service initialization 2025-05-07T08:54:44.411403Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:241: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7501624601620502156:2335], Start check tables existence, number paths: 2 2025-05-07T08:54:44.412704Z node 2 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:440: [WorkloadService] [Service] Started workload service initialization 2025-05-07T08:54:44.426063Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=ZTlmZGIyZTktMjc3ZDg2NmItY2EyYmI4MjAtMzNlYzk3MGU=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id ZTlmZGIyZTktMjc3ZDg2NmItY2EyYmI4MjAtMzNlYzk3MGU= 2025-05-07T08:54:44.438097Z node 2 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-05-07T08:54:44.439183Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=ZTlmZGIyZTktMjc3ZDg2NmItY2EyYmI4MjAtMzNlYzk3MGU=, ActorId: [1:7501624601620502180:2336], ActorState: unknown state, session actor bootstrapped 2025-05-07T08:54:44.439471Z node 1 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:125: [WorkloadService] [Service] Updated node info, noode count: 2 2025-05-07T08:54:44.439632Z node 2 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:241: [WorkloadService] [TCleanupTablesActor] ActorId: [2:7501624599870749548:2307], Start check tables existence, number paths: 2 2025-05-07T08:54:44.439776Z node 2 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:111: [WorkloadService] [Service] Resource pools was enanbled 2025-05-07T08:54:44.439811Z node 2 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:125: [WorkloadService] [Service] Updated node info, noode count: 2 2025-05-07T08:54:44.439497Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-05-07T08:54:44.439523Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:111: [WorkloadService] [Service] Resource pools was enanbled 2025-05-07T08:54:44.439660Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7501624601620502156:2335], Describe table /Root/.metadata/workload_manager/delayed_requests status PathErrorUnknown 2025-05-07T08:54:44.439726Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7501624601620502156:2335], Describe table /Root/.metadata/workload_manager/running_requests status PathErrorUnknown 2025-05-07T08:54:44.439784Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:289: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7501624601620502156:2335], Successfully finished 2025-05-07T08:54:44.443105Z node 2 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [2:7501624599870749548:2307], Describe table /Root/.metadata/workload_manager/delayed_requests status PathErrorUnknown 2025-05-07T08:54:44.443165Z node 2 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [2:7501624599870749548:2307], Describe table /Root/.metadata/workload_manager/running_requests status PathErrorUnknown 2025-05-07T08:54:44.443202Z node 2 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:289: [WorkloadService] [TCleanupTablesActor] ActorId: [2:7501624599870749548:2307], Successfully finished 2025-05-07T08:54:44.443313Z node 2 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-05-07T08:54:44.443462Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-05-07T08:54:44.478439Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624601620502182:2525], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-05-07T08:54:44.483004Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480 2025-05-07T08:54:44.490521Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:429: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624601620502182:2525], DatabaseId: Root, PoolId: sample_pool_id, Subscribe on create pool tx: 281474976710658 2025-05-07T08:54:44.493160Z node 1 :KQP_WORKLOAD_SERVICE TRACE: scheme_actors.cpp:352: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624601620502182:2525], DatabaseId: Root, PoolId: sample_pool_id, Tablet to pipe successfully connected 2025-05-07T08:54:44.531088Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624601620502182:2525], DatabaseId: Root, PoolId: sample_pool_id, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-05-07T08:54:44.588450Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624601620502182:2525], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-05-07T08:54:44.594644Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501624601620502253:2577] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/sample_pool_id\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:54:44.594804Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:480: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624601620502182:2525], DatabaseId: Root, PoolId: sample_pool_id, Pool successfully created 2025-05-07T08:54:44.601504Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=Y2Q5NDJhYS0zNTZiMTI1LTkwMmE4OGMwLTQ5YzgxNmI0, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id Y2Q5NDJhYS0zNTZiMTI1LTkwMmE4OGMwLTQ5YzgxNmI0 2025-05-07T08:54:44.601898Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: /Root, PoolId: sample_pool_id 2025-05-07T08:54:44.601917Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:561: [WorkloadService] [Service] Creating new database state for id /Root 2025-05-07T08:54:44.601933Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=Y2Q5NDJhYS0zNTZiMTI1LTkwMmE4OGMwLTQ5YzgxNmI0, ActorId: [1:7501624601620502263:2337], ActorState: unknown state, session actor bootstrapped 2025-05-07T08:54:44.601 ... escription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-05-07T08:57:24.567565Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-05-07T08:57:24.579209Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:57:28.606136Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[12:7501625285415630109:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:28.606274Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:57:32.078251Z node 12 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:440: [WorkloadService] [Service] Started workload service initialization 2025-05-07T08:57:32.086967Z node 12 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=12&id=MmJmOWY4OWEtNDY2NzgzMjUtNzlmMmIyMC02MTc3NGRiZg==, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id MmJmOWY4OWEtNDY2NzgzMjUtNzlmMmIyMC02MTc3NGRiZg== 2025-05-07T08:57:32.088046Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:241: [WorkloadService] [TCleanupTablesActor] ActorId: [12:7501625324070336431:2337], Start check tables existence, number paths: 2 2025-05-07T08:57:32.088708Z node 12 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=12&id=MmJmOWY4OWEtNDY2NzgzMjUtNzlmMmIyMC02MTc3NGRiZg==, ActorId: [12:7501625324070336432:2338], ActorState: unknown state, session actor bootstrapped 2025-05-07T08:57:32.088846Z node 12 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:125: [WorkloadService] [Service] Updated node info, noode count: 1 2025-05-07T08:57:32.088868Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-05-07T08:57:32.088897Z node 12 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:111: [WorkloadService] [Service] Resource pools was enanbled 2025-05-07T08:57:32.098781Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [12:7501625324070336449:2310], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-05-07T08:57:32.106546Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [12:7501625324070336431:2337], Describe table /Root/.metadata/workload_manager/delayed_requests status PathErrorUnknown 2025-05-07T08:57:32.106643Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [12:7501625324070336431:2337], Describe table /Root/.metadata/workload_manager/running_requests status PathErrorUnknown 2025-05-07T08:57:32.106696Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:289: [WorkloadService] [TCleanupTablesActor] ActorId: [12:7501625324070336431:2337], Successfully finished 2025-05-07T08:57:32.106776Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-05-07T08:57:32.110738Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480 2025-05-07T08:57:32.126253Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:429: [WorkloadService] [TPoolCreatorActor] ActorId: [12:7501625324070336449:2310], DatabaseId: Root, PoolId: sample_pool_id, Subscribe on create pool tx: 281474976710658 2025-05-07T08:57:32.129346Z node 12 :KQP_WORKLOAD_SERVICE TRACE: scheme_actors.cpp:352: [WorkloadService] [TPoolCreatorActor] ActorId: [12:7501625324070336449:2310], DatabaseId: Root, PoolId: sample_pool_id, Tablet to pipe successfully connected 2025-05-07T08:57:32.159237Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [12:7501625324070336449:2310], DatabaseId: Root, PoolId: sample_pool_id, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-05-07T08:57:32.229829Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [12:7501625324070336449:2310], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-05-07T08:57:32.235190Z node 12 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [12:7501625324070336500:2342] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/sample_pool_id\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:57:32.235408Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:480: [WorkloadService] [TPoolCreatorActor] ActorId: [12:7501625324070336449:2310], DatabaseId: Root, PoolId: sample_pool_id, Pool successfully created 2025-05-07T08:57:32.238461Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: Root, PoolId: sample_pool_id 2025-05-07T08:57:32.238496Z node 12 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:561: [WorkloadService] [Service] Creating new database state for id Root 2025-05-07T08:57:32.238590Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7501625324070336507:2339], DatabaseId: Root, PoolId: sample_pool_id, Start pool fetching 2025-05-07T08:57:32.240632Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:223: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7501625324070336507:2339], DatabaseId: Root, PoolId: sample_pool_id, Pool info successfully fetched 2025-05-07T08:57:32.240703Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:253: [WorkloadService] [Service] Successfully fetched pool sample_pool_id, DatabaseId: Root 2025-05-07T08:57:32.240732Z node 12 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:571: [WorkloadService] [Service] Creating new handler for pool /Root/sample_pool_id 2025-05-07T08:57:32.241247Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:466: [WorkloadService] [TPoolHandlerActorBase] ActorId: [12:7501625324070336516:2340], DatabaseId: Root, PoolId: sample_pool_id, Subscribed on schemeboard notifications for path: [OwnerId: 72057594046644480, LocalPathId: 5] 2025-05-07T08:57:32.242920Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:274: [WorkloadService] [TPoolHandlerActorBase] ActorId: [12:7501625324070336516:2340], DatabaseId: Root, PoolId: sample_pool_id, Got watch notification 2025-05-07T08:57:32.266707Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: /Root, PoolId: default 2025-05-07T08:57:32.266750Z node 12 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:561: [WorkloadService] [Service] Creating new database state for id /Root 2025-05-07T08:57:32.266918Z node 12 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=12&id=MmJmOWY4OWEtNDY2NzgzMjUtNzlmMmIyMC02MTc3NGRiZg==, ActorId: [12:7501625324070336432:2338], ActorState: ReadyState, TraceId: 01jtmzdsb6dz4at2h4nd7hgnn5, received request, proxyRequestId: 3 prepared: 0 tx_control: 0 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_DDL text: GRANT ALL ON `/Root/.metadata/workload_manager/pools/sample_pool_id` TO `test@user`; rpcActor: [0:0:0] database: /Root databaseId: /Root pool id: default 2025-05-07T08:57:32.297398Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7501625324070336528:2342], DatabaseId: /Root, PoolId: default, Start pool fetching 2025-05-07T08:57:32.299546Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7501625324070336528:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:57:32.299682Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:57:32.301118Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710660:0, at schemeshard: 72057594046644480 2025-05-07T08:57:32.305211Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:274: [WorkloadService] [TPoolHandlerActorBase] ActorId: [12:7501625324070336516:2340], DatabaseId: Root, PoolId: sample_pool_id, Got watch notification 2025-05-07T08:57:32.306291Z node 12 :KQP_SESSION INFO: kqp_session_actor.cpp:2473: SessionId: ydb://session/3?node_id=12&id=MmJmOWY4OWEtNDY2NzgzMjUtNzlmMmIyMC02MTc3NGRiZg==, ActorId: [12:7501625324070336432:2338], ActorState: ExecuteState, TraceId: 01jtmzdsb6dz4at2h4nd7hgnn5, Cleanup start, isFinal: 0 CleanupCtx: 1 TransactionsToBeAborted.size(): 0 WorkerId: [12:7501625324070336529:2338] WorkloadServiceCleanup: 0 2025-05-07T08:57:32.308493Z node 12 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2534: SessionId: ydb://session/3?node_id=12&id=MmJmOWY4OWEtNDY2NzgzMjUtNzlmMmIyMC02MTc3NGRiZg==, ActorId: [12:7501625324070336432:2338], ActorState: CleanupState, TraceId: 01jtmzdsb6dz4at2h4nd7hgnn5, EndCleanup, isFinal: 0 2025-05-07T08:57:32.308564Z node 12 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2270: SessionId: ydb://session/3?node_id=12&id=MmJmOWY4OWEtNDY2NzgzMjUtNzlmMmIyMC02MTc3NGRiZg==, ActorId: [12:7501625324070336432:2338], ActorState: CleanupState, TraceId: 01jtmzdsb6dz4at2h4nd7hgnn5, Sent query response back to proxy, proxyRequestId: 3, proxyId: [12:7501625285415630120:2069] 2025-05-07T08:57:32.338993Z node 12 :KQP_SESSION INFO: kqp_session_actor.cpp:2315: SessionId: ydb://session/3?node_id=12&id=MmJmOWY4OWEtNDY2NzgzMjUtNzlmMmIyMC02MTc3NGRiZg==, ActorId: [12:7501625324070336432:2338], ActorState: ReadyState, Session closed due to explicit close event 2025-05-07T08:57:32.339061Z node 12 :KQP_SESSION INFO: kqp_session_actor.cpp:2473: SessionId: ydb://session/3?node_id=12&id=MmJmOWY4OWEtNDY2NzgzMjUtNzlmMmIyMC02MTc3NGRiZg==, ActorId: [12:7501625324070336432:2338], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-05-07T08:57:32.339095Z node 12 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2534: SessionId: ydb://session/3?node_id=12&id=MmJmOWY4OWEtNDY2NzgzMjUtNzlmMmIyMC02MTc3NGRiZg==, ActorId: [12:7501625324070336432:2338], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-05-07T08:57:32.339124Z node 12 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2546: SessionId: ydb://session/3?node_id=12&id=MmJmOWY4OWEtNDY2NzgzMjUtNzlmMmIyMC02MTc3NGRiZg==, ActorId: [12:7501625324070336432:2338], ActorState: unknown state, Cleanup temp tables: 0 2025-05-07T08:57:32.339230Z node 12 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2637: SessionId: ydb://session/3?node_id=12&id=MmJmOWY4OWEtNDY2NzgzMjUtNzlmMmIyMC02MTc3NGRiZg==, ActorId: [12:7501625324070336432:2338], ActorState: unknown state, Session actor destroyed >> Cdc::UuidExchange[TopicRunner] [GOOD] >> Cdc::UpdatesLog[PqRunner] >> TSchemeShardServerLess::TestServerlessComputeResourcesModeFeatureFlag >> FolderServiceTest::TFolderService [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-DOUBLE [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-DATE >> DataShardWrite::ReplaceImmediate [GOOD] >> DataShardWrite::ReplaceImmediate_DefaultValue >> TUserAccountServiceTest::Get >> DataShardWrite::DeleteImmediate [GOOD] >> DataShardWrite::CancelImmediate ------- [TM] {asan, default-linux-x86_64, release} ydb/library/ycloud/impl/ut/unittest >> TAccessServiceTest::Authenticate [GOOD] Test command err: 2025-05-07T08:57:32.454926Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625321999627681:2063];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:32.455081Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0029f4/r3tmp/tmpIQuuj9/pdisk_1.dat 2025-05-07T08:57:33.015571Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:57:33.020947Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:33.021042Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:33.027896Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:7015 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:57:33.343739Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:33.412822Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [51700007be08] Connect to grpc://localhost:12529 2025-05-07T08:57:33.414140Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700007be08] Request AuthenticateRequest { iam_token: "**** (047D44F1)" } 2025-05-07T08:57:33.439125Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [51700007be08] Status 7 Permission Denied 2025-05-07T08:57:33.443912Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700007be08] Request AuthenticateRequest { iam_token: "**** (342498C1)" } 2025-05-07T08:57:33.459501Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [51700007be08] Response AuthenticateResponse { subject { user_account { id: "1234" } } } >> BackupRestore::TestAllPrimitiveTypes-YSON [FAIL] >> BackupRestore::TestAllPrimitiveTypes-UUID >> TSchemeShardServerLess::StorageBillingLabels |90.9%| [TM] {asan, default-linux-x86_64, release} ydb/library/ycloud/impl/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/library/ycloud/impl/ut/unittest >> FolderServiceTest::TFolderService [GOOD] Test command err: 2025-05-07T08:57:32.493155Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625321383915777:2208];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:32.493443Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0029f0/r3tmp/tmpHPgVlF/pdisk_1.dat 2025-05-07T08:57:33.313099Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:57:33.359743Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:33.359841Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:33.367040Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28586 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:57:33.973477Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:33.989571Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [51700007be08] Connect to grpc://localhost:15321 2025-05-07T08:57:33.997901Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700007be08] Request ResolveFoldersRequest { folder_ids: "i_am_not_exists" } 2025-05-07T08:57:34.151302Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [51700007be08] Status 14 failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:15321: Failed to connect to remote host: Connection refused 2025-05-07T08:57:34.155396Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700007be08] Request ResolveFoldersRequest { folder_ids: "i_am_not_exists" } 2025-05-07T08:57:34.157160Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [51700007be08] Status 14 failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:15321: Failed to connect to remote host: Connection refused 2025-05-07T08:57:35.166366Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700007be08] Request ResolveFoldersRequest { folder_ids: "i_am_not_exists" } 2025-05-07T08:57:35.170833Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [51700007be08] Status 5 Not Found 2025-05-07T08:57:35.171990Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700007be08] Request ResolveFoldersRequest { folder_ids: "i_am_exists" } 2025-05-07T08:57:35.175153Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [51700007be08] Response ResolveFoldersResponse { resolved_folders { cloud_id: "response_cloud_id" } } |90.9%| [TA] $(B)/ydb/core/kqp/ut/pg/test-results/unittest/{meta.json ... results_accumulator.log} >> FolderServiceTest::TFolderServiceTransitional >> TSchemeShardServerLess::TestServerlessComputeResourcesModeFeatureFlag [GOOD] >> BackupRestoreS3::RestoreIndexTableDecimalSplitBoundaries [GOOD] >> BackupRestoreS3::RestoreViewQueryText >> FolderServiceTest::TFolderServiceAdapter [GOOD] >> TSchemeShardServerLess::StorageBilling >> TAccessServiceTest::PassRequestId [GOOD] |90.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_serverless/unittest |90.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/query_actor/ut/ydb-library-query_actor-ut |90.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/query_actor/ut/ydb-library-query_actor-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_schemereq/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-72 [FAIL] Test command err: Starting YDB, grpc: 4173, msgbus: 6897 2025-05-07T08:51:45.519204Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623832430722013:2095];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:45.943261Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002c21/r3tmp/tmp3zE6Al/pdisk_1.dat 2025-05-07T08:51:46.699295Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:51:46.715748Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:46.715856Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:46.744510Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4173, node 1 2025-05-07T08:51:47.050722Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:51:47.050746Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:51:47.050757Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:51:47.050880Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6897 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-05-07T08:51:47.578842Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7501623832430722032:2113] Handle TEvNavigate describe path dc-1 2025-05-07T08:51:47.578914Z node 1 :TX_PROXY DEBUG: describe.cpp:227: Actor# [1:7501623841020657329:2445] HANDLE EvNavigateScheme dc-1 2025-05-07T08:51:47.579269Z node 1 :TX_PROXY DEBUG: describe.cpp:311: Actor# [1:7501623841020657329:2445] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-05-07T08:51:47.644322Z node 1 :TX_PROXY DEBUG: describe.cpp:389: Actor# [1:7501623841020657329:2445] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ReturnBoundaries: true ShowPrivateTable: true ReturnRangeKey: true } 2025-05-07T08:51:47.672913Z node 1 :TX_PROXY DEBUG: describe.cpp:402: Actor# [1:7501623841020657329:2445] Handle TEvDescribeSchemeResult Forward to# [1:7501623841020657328:2444] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-05-07T08:51:47.701845Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7501623832430722032:2113] Handle TEvProposeTransaction 2025-05-07T08:51:47.701882Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7501623832430722032:2113] TxId# 281474976710657 ProcessProposeTransaction 2025-05-07T08:51:47.702025Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7501623832430722032:2113] Cookie# 0 userReqId# "" txid# 281474976710657 SEND to# [1:7501623841020657336:2451] 2025-05-07T08:51:47.901686Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1520: Actor# [1:7501623841020657336:2451] txid# 281474976710657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "dc-1" StoragePools { Name: "" Kind: "tenant-db" } StoragePools { Name: "/dc-1:test" Kind: "test" } } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-05-07T08:51:47.901796Z node 1 :TX_PROXY DEBUG: schemereq.cpp:563: Actor# [1:7501623841020657336:2451] txid# 281474976710657 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-05-07T08:51:47.901821Z node 1 :TX_PROXY DEBUG: schemereq.cpp:572: Actor# [1:7501623841020657336:2451] txid# 281474976710657 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-05-07T08:51:47.901909Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1575: Actor# [1:7501623841020657336:2451] txid# 281474976710657 TEvNavigateKeySet requested from SchemeCache 2025-05-07T08:51:47.902274Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1408: Actor# [1:7501623841020657336:2451] txid# 281474976710657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-05-07T08:51:47.902423Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1455: Actor# [1:7501623841020657336:2451] HANDLE EvNavigateKeySetResult, txid# 281474976710657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# false 2025-05-07T08:51:47.902487Z node 1 :TX_PROXY DEBUG: schemereq.cpp:102: Actor# [1:7501623841020657336:2451] txid# 281474976710657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710657 TabletId# 72057594046644480} 2025-05-07T08:51:47.902660Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1310: Actor# [1:7501623841020657336:2451] txid# 281474976710657 HANDLE EvClientConnected 2025-05-07T08:51:47.903529Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-05-07T08:51:47.911851Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1332: Actor# [1:7501623841020657336:2451] txid# 281474976710657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710657} 2025-05-07T08:51:47.911915Z node 1 :TX_PROXY DEBUG: schemereq.cpp:543: Actor# [1:7501623841020657336:2451] txid# 281474976710657 SEND to# [1:7501623841020657335:2450] Source {TEvProposeTransactionStatus txid# 281474976710657 Status# 53} waiting... 2025-05-07T08:51:47.939734Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7501623832430722032:2113] Handle TEvProposeTransaction 2025-05-07T08:51:47.939764Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7501623832430722032:2113] TxId# 281474976710658 ProcessProposeTransaction 2025-05-07T08:51:47.939824Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7501623832430722032:2113] Cookie# 0 userReqId# "" txid# 281474976710658 SEND to# [1:7501623841020657374:2485] 2025-05-07T08:51:47.942265Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1520: Actor# [1:7501623841020657374:2485] txid# 281474976710658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpModifyACL ModifyACL { Name: "dc-1" DiffACL: "\n\032\010\000\022\026\010\001\020\377\377\003\032\014root@builtin \003" } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-05-07T08:51:47.942394Z node 1 :TX_PROXY DEBUG: schemereq.cpp:563: Actor# [1:7501623841020657374:2485] txid# 281474976710658 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-05-07T08:51:47.942413Z node 1 :TX_PROXY DEBUG: schemereq.cpp:572: Actor# [1:7501623841020657374:2485] txid# 281474976710658 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-05-07T08:51:47.942467Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1575: Actor# [1:7501623841020657374:2485] txid# 281474976710658 TEvNavigateKeySet requested from SchemeCache 2025-05-07T08:51:47.942799Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1408: Actor# [1:7501623841020657374:2485] txid# 281474976710658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-05-07T08:51:47.942881Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1455: Actor# [1:7501623841020657374:2485] HANDLE EvNavigateKeySetResult, txid# 281474976710658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-05-07T08:51:47.942919Z node 1 :TX_PROXY DEBUG: schemereq.cpp:102: Actor# [1:7501623841020657374:2485] txid# 281474976710658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710658 TabletId# 72057594046644480} 2025-05-07T08:51:47.943033Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1310: Actor# [1:7501623841020657374:2485] txid# 281474976710658 HANDLE EvClientConnected 2025-05-07T08:51:47.943566Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T08:51:47.952086Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1332: Actor# [1:7501623841020657374:2485] txid# 281474976710658 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976710658} 2025-05-07T08:51:47.952138Z node 1 :TX_PROXY DEBUG: schemereq.cpp:543: Actor# [1:7501623841020657374:2485] txid# 281474976710658 SEND to# [1:7501623841020657373:2484] Source {TEvProposeTransactionStatus txid# 281474976710658 Status# 48} 2025-05-07T08:51:50.442100Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623832430722013:2095];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:50.442184Z node 1 :METADATA_PROVIDER ... [59:7501624892822036585:2454] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-05-07T08:55:52.277144Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7501624884232101231:2108] Handle TEvProposeTransaction 2025-05-07T08:55:52.277180Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7501624884232101231:2108] TxId# 281474976715657 ProcessProposeTransaction 2025-05-07T08:55:52.277296Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7501624884232101231:2108] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [59:7501624892822036592:2460] 2025-05-07T08:55:52.280111Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1520: Actor# [59:7501624892822036592:2460] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "dc-1" StoragePools { Name: "" Kind: "tenant-db" } StoragePools { Name: "/dc-1:test" Kind: "test" } } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-05-07T08:55:52.280186Z node 59 :TX_PROXY DEBUG: schemereq.cpp:563: Actor# [59:7501624892822036592:2460] txid# 281474976715657 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-05-07T08:55:52.280209Z node 59 :TX_PROXY DEBUG: schemereq.cpp:572: Actor# [59:7501624892822036592:2460] txid# 281474976715657 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-05-07T08:55:52.280266Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1575: Actor# [59:7501624892822036592:2460] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-05-07T08:55:52.280566Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1408: Actor# [59:7501624892822036592:2460] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-05-07T08:55:52.280706Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1455: Actor# [59:7501624892822036592:2460] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# false 2025-05-07T08:55:52.280781Z node 59 :TX_PROXY DEBUG: schemereq.cpp:102: Actor# [59:7501624892822036592:2460] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-05-07T08:55:52.280956Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1310: Actor# [59:7501624892822036592:2460] txid# 281474976715657 HANDLE EvClientConnected 2025-05-07T08:55:52.281707Z node 59 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:55:52.327515Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1332: Actor# [59:7501624892822036592:2460] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-05-07T08:55:52.327581Z node 59 :TX_PROXY DEBUG: schemereq.cpp:543: Actor# [59:7501624892822036592:2460] txid# 281474976715657 SEND to# [59:7501624892822036591:2459] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} waiting... 2025-05-07T08:55:52.388521Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7501624884232101231:2108] Handle TEvProposeTransaction 2025-05-07T08:55:52.388558Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7501624884232101231:2108] TxId# 281474976715658 ProcessProposeTransaction 2025-05-07T08:55:52.388602Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7501624884232101231:2108] Cookie# 0 userReqId# "" txid# 281474976715658 SEND to# [59:7501624892822036633:2497] 2025-05-07T08:55:52.391212Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1520: Actor# [59:7501624892822036633:2497] txid# 281474976715658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpModifyACL ModifyACL { Name: "dc-1" DiffACL: "\n\032\010\000\022\026\010\001\020\377\377\003\032\014root@builtin \003" } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-05-07T08:55:52.391285Z node 59 :TX_PROXY DEBUG: schemereq.cpp:563: Actor# [59:7501624892822036633:2497] txid# 281474976715658 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-05-07T08:55:52.391308Z node 59 :TX_PROXY DEBUG: schemereq.cpp:572: Actor# [59:7501624892822036633:2497] txid# 281474976715658 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-05-07T08:55:52.391388Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1575: Actor# [59:7501624892822036633:2497] txid# 281474976715658 TEvNavigateKeySet requested from SchemeCache 2025-05-07T08:55:52.391740Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1408: Actor# [59:7501624892822036633:2497] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-05-07T08:55:52.391846Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1455: Actor# [59:7501624892822036633:2497] HANDLE EvNavigateKeySetResult, txid# 281474976715658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-05-07T08:55:52.391934Z node 59 :TX_PROXY DEBUG: schemereq.cpp:102: Actor# [59:7501624892822036633:2497] txid# 281474976715658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715658 TabletId# 72057594046644480} 2025-05-07T08:55:52.392078Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1310: Actor# [59:7501624892822036633:2497] txid# 281474976715658 HANDLE EvClientConnected 2025-05-07T08:55:52.392626Z node 59 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T08:55:52.398627Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1332: Actor# [59:7501624892822036633:2497] txid# 281474976715658 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976715658} 2025-05-07T08:55:52.398692Z node 59 :TX_PROXY DEBUG: schemereq.cpp:543: Actor# [59:7501624892822036633:2497] txid# 281474976715658 SEND to# [59:7501624892822036632:2496] Source {TEvProposeTransactionStatus txid# 281474976715658 Status# 48} 2025-05-07T08:56:01.287912Z node 59 :KQP_PROXY ERROR: kqp_proxy_service.cpp:1461: TraceId: "01jtmzaqwm1b1704gjjd0f58c4", Request deadline has expired for 3.820894s seconds assertion failed at ydb/core/tx/tx_proxy/schemereq_ut.cpp:256, void NKikimr::NTxProxyUT::CreateLocalUser(const TTestEnv &, const TString &, const TString &, const TString &): (sessionResult.IsSuccess())
: Error: GRpc error: (4): Deadline Exceeded
: Error: Grpc error response on endpoint localhost:11087 TBackTrace::Capture()+28 (0x19090D5C) NUnitTest::NPrivate::RaiseError(char const*, TBasicString> const&, bool)+592 (0x1954C7E0) NKikimr::NTxProxyUT::CreateLocalUser(NKikimr::NTxProxyUT::TTestEnv const&, TBasicString> const&, TBasicString> const&, TBasicString> const&)+2057 (0x18C68019) NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::AlterLoginProtect_RootDB(NUnitTest::TTestContext&, NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::TAlterLoginTestCase)+1859 (0x18C7E423) std::__y1::__bind_return, NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::TAlterLoginTestCase>, std::__y1::tuple, __is_valid_bind_return, NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::TAlterLoginTestCase>, std::__y1::tuple>::value>::type std::__y1::__bind const&, NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::TAlterLoginTestCase const&>::operator()[abi:fe200000](NUnitTest::TTestContext&)+588 (0x18CBD03C) std::__y1::__function::__func, void ()>::operator()()+280 (0x18CAD178) TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool)+534 (0x195839E6) NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool)+505 (0x19553369) NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::TCurrentTest::Execute()+1204 (0x18CAC024) NUnitTest::TTestFactory::Execute()+2438 (0x19554C36) NUnitTest::RunMain(int, char**)+5213 (0x1957DF5D) ??+0 (0x7FF9A0A58D90) __libc_start_main+128 (0x7FF9A0A58E40) _start+41 (0x16600029) |90.9%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/pg/test-results/unittest/{meta.json ... results_accumulator.log} |90.9%| [LD] {RESULT} $(B)/ydb/library/query_actor/ut/ydb-library-query_actor-ut |90.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/persqueue/ut/ut_with_sdk/ydb-core-persqueue-ut-ut_with_sdk ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_serverless/unittest >> TSchemeShardServerLess::TestServerlessComputeResourcesModeFeatureFlag [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:57:38.802949Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:57:38.803106Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:57:38.803150Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:57:38.803204Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:57:38.803250Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:57:38.803298Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:57:38.803349Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:57:38.803425Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:57:38.804120Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:57:38.804469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:57:38.884689Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:57:38.884761Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:57:38.912571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:57:38.912814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:57:38.912999Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:57:38.935251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:57:38.935667Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:57:38.936395Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:57:38.936653Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:57:38.955013Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:57:38.956613Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:57:38.956714Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:57:38.956805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:57:38.956856Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:57:38.956894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:57:38.957136Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:57:38.968395Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:57:39.109867Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:57:39.110180Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:57:39.110423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:57:39.110699Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:57:39.110767Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:57:39.114456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:57:39.114651Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:57:39.114936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:57:39.115016Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:57:39.115065Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:57:39.115108Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:57:39.117837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:57:39.117906Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:57:39.118034Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:57:39.127056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:57:39.127158Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:57:39.127230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:57:39.127314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:57:39.131500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:57:39.138949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:57:39.139250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:57:39.140391Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:57:39.140581Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:57:39.140661Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:57:39.141048Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:57:39.141141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:57:39.141389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:57:39.141503Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:57:39.146053Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:57:39.146117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:57:39.146382Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:57:39.146451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... _TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-05-07T08:57:39.971750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-05-07T08:57:39.971810Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 104:0, at schemeshard: 72057594046678944 2025-05-07T08:57:39.971863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 104:0, at tablet# 72057594046678944 2025-05-07T08:57:39.971927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 104 ready parts: 1/1 2025-05-07T08:57:39.972091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 104 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:57:39.983524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 104:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:104 msg type: 269090816 2025-05-07T08:57:39.983698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 104, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 104 at step: 5000005 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 104 at step: 5000005 2025-05-07T08:57:39.984134Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000005, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:57:39.984303Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 104 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000005 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:57:39.984375Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 104:0, at tablet# 72057594046678944 2025-05-07T08:57:39.984672Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 104:0 128 -> 240 2025-05-07T08:57:39.984738Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 104:0, at tablet# 72057594046678944 2025-05-07T08:57:39.984867Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-05-07T08:57:39.984998Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:567: DoUpdateTenant no hasChanges, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], tenantLink: TSubDomainsLinks::TLink { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 3], Generation: 2, ActorId:[1:612:2540], EffectiveACLVersion: 0, SubdomainVersion: 2, UserAttributesVersion: 1, TenantHive: 18446744073709551615, TenantSysViewProcessor: 18446744073709551615, TenantStatisticsAggregator: 18446744073709551615, TenantGraphShard: 18446744073709551615, TenantRootACL: }, subDomain->GetVersion(): 2, actualEffectiveACLVersion: 0, actualUserAttrsVersion: 1, tenantHive: 18446744073709551615, tenantSysViewProcessor: 18446744073709551615, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 104 2025-05-07T08:57:39.996303Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:57:39.996383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-05-07T08:57:39.996582Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:57:39.996635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 104, path id: 3 2025-05-07T08:57:39.996992Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-05-07T08:57:39.997048Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:787: [72057594046678944] TSyncHive, operationId 104:0, ProgressState, NeedSyncHive: 0 2025-05-07T08:57:39.997088Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 104:0 240 -> 240 2025-05-07T08:57:40.006420Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T08:57:40.006720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T08:57:40.006780Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 104 2025-05-07T08:57:40.006837Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 4 2025-05-07T08:57:40.006901Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 6 2025-05-07T08:57:40.007043Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 104, ready parts: 0/1, is published: true 2025-05-07T08:57:40.020707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-05-07T08:57:40.020787Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 104:0 ProgressState 2025-05-07T08:57:40.020905Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 1/1 2025-05-07T08:57:40.020958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-05-07T08:57:40.021015Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 1/1 2025-05-07T08:57:40.021049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-05-07T08:57:40.021110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: true 2025-05-07T08:57:40.021166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-05-07T08:57:40.021251Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 104:0 2025-05-07T08:57:40.021305Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 104:0 2025-05-07T08:57:40.021532Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-05-07T08:57:40.022369Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 104 2025-05-07T08:57:40.028350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2025-05-07T08:57:40.028416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2025-05-07T08:57:40.028931Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-05-07T08:57:40.029051Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-05-07T08:57:40.029098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:767:2649] TestWaitNotification: OK eventTxId 104 TestModificationResults wait txId: 105 2025-05-07T08:57:40.036360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { Name: "ServerLess0" ServerlessComputeResourcesMode: EServerlessComputeResourcesModeExclusive } } TxId: 105 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:57:40.036589Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1102: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 105:0, feature flag EnableAlterDatabaseCreateHiveFirst 1, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { Name: "ServerLess0" ServerlessComputeResourcesMode: EServerlessComputeResourcesModeExclusive } 2025-05-07T08:57:40.036640Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1108: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 105:0, path /MyRoot/ServerLess0 2025-05-07T08:57:40.036796Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 105:0, explain: Invalid AlterExtSubDomain request: Unsupported: feature flag EnableServerlessExclusiveDynamicNodes is off, at schemeshard: 72057594046678944 2025-05-07T08:57:40.036845Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 105:1, propose status:StatusPreconditionFailed, reason: Invalid AlterExtSubDomain request: Unsupported: feature flag EnableServerlessExclusiveDynamicNodes is off, at schemeshard: 72057594046678944 2025-05-07T08:57:40.048178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 105, response: Status: StatusPreconditionFailed Reason: "Invalid AlterExtSubDomain request: Unsupported: feature flag EnableServerlessExclusiveDynamicNodes is off" TxId: 105 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:57:40.048369Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 105, database: /MyRoot, subject: , status: StatusPreconditionFailed, reason: Invalid AlterExtSubDomain request: Unsupported: feature flag EnableServerlessExclusiveDynamicNodes is off, operation: ALTER DATABASE, path: /MyRoot/ServerLess0 TestModificationResult got TxId: 105, wait until txId: 105 |90.9%| [LD] {RESULT} $(B)/ydb/core/persqueue/ut/ut_with_sdk/ydb-core-persqueue-ut-ut_with_sdk |91.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/persqueue/ut/ut_with_sdk/ydb-core-persqueue-ut-ut_with_sdk >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_0__SYNC-pk_types4-all_types4-index4---SYNC] [GOOD] >> DataShardWrite::UpsertPrepared+Volatile [GOOD] >> DataShardWrite::UpsertPrepared-Volatile ------- [TM] {asan, default-linux-x86_64, release} ydb/library/ycloud/impl/ut/unittest >> FolderServiceTest::TFolderServiceAdapter [GOOD] Test command err: 2025-05-07T08:57:34.026558Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625326348535507:2079];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:34.027492Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0029de/r3tmp/tmp2rATBi/pdisk_1.dat 2025-05-07T08:57:35.129997Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:57:35.131202Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:35.131285Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:35.139438Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:57:35.256051Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TClient is connected to server localhost:62441 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:57:36.510528Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:36.595087Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000072088] Connect to grpc://localhost:19915 2025-05-07T08:57:36.595921Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000072088] Request ListFoldersRequest { id: "i_am_exists" } 2025-05-07T08:57:36.810537Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000072088] Response ListFoldersResponse { result { cloud_id: "cloud_from_old_service" } } 2025-05-07T08:57:36.819213Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000061d88] Connect to grpc://localhost:5886 2025-05-07T08:57:36.820112Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000061d88] Request ResolveFoldersRequest { folder_ids: "i_am_exists" } 2025-05-07T08:57:36.860413Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000061d88] Response ResolveFoldersResponse { resolved_folders { cloud_id: "cloud_from_new_service" } } 2025-05-07T08:57:36.862500Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000061d88] Request ResolveFoldersRequest { folder_ids: "i_am_not_exists" } 2025-05-07T08:57:36.872021Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000061d88] Status 5 Not Found 2025-05-07T08:57:36.874741Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000072088] Request ListFoldersRequest { id: "i_am_not_exists" } 2025-05-07T08:57:36.879473Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000072088] Status 5 Not Found >> TSchemeShardServerLess::TestServerlessComputeResourcesMode >> DataShardWrite::ExecSQLUpsertPrepared-EvWrite-Volatile [GOOD] >> DataShardWrite::ExecSQLUpsertPrepared+EvWrite-Volatile ------- [TM] {asan, default-linux-x86_64, release} ydb/library/ycloud/impl/ut/unittest >> TAccessServiceTest::PassRequestId [GOOD] Test command err: 2025-05-07T08:57:35.759573Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625335103621174:2127];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:35.759618Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0029e4/r3tmp/tmp380eXS/pdisk_1.dat 2025-05-07T08:57:36.708094Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:57:36.716376Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:36.716474Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:36.723700Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:8773 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:57:37.253268Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:37.493070Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [51700007be08]{trololo} Connect to grpc://localhost:30813 2025-05-07T08:57:37.494580Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700007be08]{trololo} Request AuthenticateRequest { iam_token: "**** (717F937C)" } 2025-05-07T08:57:37.564790Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [51700007be08]{trololo} Response AuthenticateResponse { subject { user_account { id: "1234" } } } >> BackupRestore::TestAllPrimitiveTypes-FLOAT [FAIL] >> BackupRestore::TestAllPrimitiveTypes-DOUBLE >> TSchemeShardServerLess::BaseCase-AlterDatabaseCreateHiveFirst-true >> AsyncIndexChangeExchange::SenderShouldShakeHandsOnce [GOOD] >> AsyncIndexChangeExchange::SenderShouldShakeHandsTwice >> TTableProfileTests::UseDefaultProfile [GOOD] >> TTableProfileTests::UseTableProfilePreset >> SystemView::AuthPermissions_ResultOrder [GOOD] >> SystemView::AuthPermissions_Selects >> Cdc::KeysOnlyLogDebezium [GOOD] >> Cdc::NewAndOldImagesLog[PqRunner] >> TSchemeShardServerLess::BaseCase-AlterDatabaseCreateHiveFirst-false >> Cdc::DocApi[YdsRunner] [GOOD] >> Cdc::DocApi[TopicRunner] >> BackupRestoreS3::TestAllPrimitiveTypes-INTERVAL64 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-STRING >> TUserAccountServiceTest::Get [GOOD] >> TSchemeShardServerLess::Fake [GOOD] >> KqpErrors::ProposeResultLost_RwTx+UseSink |91.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_ttl/ydb-core-tx-schemeshard-ut_ttl |91.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_ttl/ydb-core-tx-schemeshard-ut_ttl |91.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_ttl/ydb-core-tx-schemeshard-ut_ttl |91.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_serverless/unittest >> TSchemeShardServerLess::Fake [GOOD] >> TSchemeShardServerLess::BaseCase-AlterDatabaseCreateHiveFirst-true [GOOD] >> KqpErrors::ProposeError >> Cdc::UpdatesLog[PqRunner] [GOOD] >> Cdc::UpdatesLog[YdsRunner] >> DataShardWrite::UpsertPreparedManyTables+Volatile [GOOD] >> DataShardWrite::UpsertPreparedManyTables-Volatile ------- [TM] {asan, default-linux-x86_64, release} ydb/library/ycloud/impl/ut/unittest >> TUserAccountServiceTest::Get [GOOD] Test command err: 2025-05-07T08:57:39.022403Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625353116625048:2196];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:39.022987Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0029bd/r3tmp/tmptCkJp2/pdisk_1.dat 2025-05-07T08:57:39.823629Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:57:39.830439Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:39.830535Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:39.838928Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:9575 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:57:40.463879Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... >> TSchemeShardServerLess::TestServerlessComputeResourcesMode [GOOD] >> KqpErrors::ResolveTableError >> DataShardWrite::CancelImmediate [GOOD] >> DataShardWrite::DeletePrepared+Volatile >> FolderServiceTest::TFolderServiceTransitional [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/library/ycloud/impl/ut/unittest >> TServiceAccountServiceTest::Get [GOOD] Test command err: 2025-05-07T08:57:32.415974Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625324215526029:2068];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:32.416022Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0029eb/r3tmp/tmpWqbR6I/pdisk_1.dat 2025-05-07T08:57:33.213830Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:57:33.263071Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:33.263167Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:33.271026Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:24369 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:57:33.747343Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:33.763018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0029eb/r3tmp/tmpuBmdor/pdisk_1.dat 2025-05-07T08:57:39.033302Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:57:39.115365Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:57:39.144434Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:39.144547Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:39.147211Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:62839 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:57:39.435340Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:39.443082Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_serverless/unittest >> TSchemeShardServerLess::BaseCase-AlterDatabaseCreateHiveFirst-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:57:42.782402Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:57:42.782506Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:57:42.782548Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:57:42.782588Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:57:42.782633Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:57:42.782663Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:57:42.782716Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:57:42.782792Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:57:42.783565Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:57:42.783899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:57:42.871224Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:57:42.871292Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:57:42.889310Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:57:42.889445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:57:42.889617Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:57:42.898359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:57:42.898955Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:57:42.899600Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:57:42.899922Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:57:42.902495Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:57:42.904089Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:57:42.904163Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:57:42.904222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:57:42.904266Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:57:42.904306Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:57:42.904470Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:57:42.911453Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:57:43.047762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:57:43.048043Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:57:43.048311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:57:43.048550Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:57:43.048617Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:57:43.051267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:57:43.051443Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:57:43.051660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:57:43.051746Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:57:43.051789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:57:43.051822Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:57:43.054021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:57:43.054093Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:57:43.054131Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:57:43.056252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:57:43.056316Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:57:43.056383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:57:43.056431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:57:43.060137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:57:43.062344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:57:43.062527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:57:43.063491Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:57:43.063638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:57:43.063714Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:57:43.063997Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:57:43.064053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:57:43.064220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:57:43.064302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:57:43.066520Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:57:43.066567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:57:43.066773Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:57:43.066826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... e, at schemeshard: 72057594046678944, cookie: 106 2025-05-07T08:57:44.079704Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 5 TxId_Deprecated: 5 TabletID: 72075186234409549 Forgetting tablet 72075186234409549 2025-05-07T08:57:44.080081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 5 ShardOwnerId: 72057594046678944 ShardLocalIdx: 5, at schemeshard: 72057594046678944 2025-05-07T08:57:44.080402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-05-07T08:57:44.081302Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:57:44.086233Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 7 TxId_Deprecated: 7 TabletID: 72075186234409551 2025-05-07T08:57:44.086889Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 6 TxId_Deprecated: 6 TabletID: 72075186234409550 2025-05-07T08:57:44.087176Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 7 ShardOwnerId: 72057594046678944 ShardLocalIdx: 7, at schemeshard: 72057594046678944 2025-05-07T08:57:44.087422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 Forgetting tablet 72075186234409551 2025-05-07T08:57:44.088524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 6 ShardOwnerId: 72057594046678944 ShardLocalIdx: 6, at schemeshard: 72057594046678944 2025-05-07T08:57:44.088722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 Forgetting tablet 72075186234409550 2025-05-07T08:57:44.089561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T08:57:44.089621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-05-07T08:57:44.089727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-05-07T08:57:44.090023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-05-07T08:57:44.090216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T08:57:44.090269Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-05-07T08:57:44.090342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:57:44.092778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:5 2025-05-07T08:57:44.092844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:5 tabletId 72075186234409549 2025-05-07T08:57:44.095497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:7 2025-05-07T08:57:44.095575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:7 tabletId 72075186234409551 2025-05-07T08:57:44.095687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:6 2025-05-07T08:57:44.095763Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:6 tabletId 72075186234409550 2025-05-07T08:57:44.095967Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-05-07T08:57:44.096024Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 106, wait until txId: 106 TestWaitNotification wait txId: 106 2025-05-07T08:57:44.096347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 106: send EvNotifyTxCompletion 2025-05-07T08:57:44.096392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 106 2025-05-07T08:57:44.096850Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 106, at schemeshard: 72057594046678944 2025-05-07T08:57:44.096953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-05-07T08:57:44.096986Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [1:931:2795] TestWaitNotification: OK eventTxId 106 2025-05-07T08:57:44.097591Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ServerLess0/dir/table0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:57:44.097818Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ServerLess0/dir/table0" took 239us result status StatusPathDoesNotExist 2025-05-07T08:57:44.098013Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ServerLess0/dir/table0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/ServerLess0/dir/table0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-05-07T08:57:44.098505Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ServerLess0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:57:44.098682Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ServerLess0" took 164us result status StatusPathDoesNotExist 2025-05-07T08:57:44.098823Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ServerLess0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/ServerLess0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-05-07T08:57:44.099333Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:57:44.099593Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 185us result status StatusSuccess 2025-05-07T08:57:44.099984Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 9 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 9 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 7 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "SharedDB" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 wait until 72075186234409549 is deleted wait until 72075186234409550 is deleted wait until 72075186234409551 is deleted wait until 72075186234409552 is deleted 2025-05-07T08:57:44.100603Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72075186233409546] TEvSubscribeToTabletDeletion, 72075186234409549 2025-05-07T08:57:44.100726Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72075186233409546] TEvSubscribeToTabletDeletion, 72075186234409550 2025-05-07T08:57:44.100775Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72075186233409546] TEvSubscribeToTabletDeletion, 72075186234409551 2025-05-07T08:57:44.100829Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72075186233409546] TEvSubscribeToTabletDeletion, 72075186234409552 Deleted tabletId 72075186234409549 Deleted tabletId 72075186234409550 Deleted tabletId 72075186234409551 Deleted tabletId 72075186234409552 >> DataShardWrite::ReplaceImmediate_DefaultValue [GOOD] >> DataShardWrite::UpdateImmediate >> TSchemeShardServerLess::BaseCase-AlterDatabaseCreateHiveFirst-false [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_serverless/unittest >> TSchemeShardServerLess::TestServerlessComputeResourcesMode [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:57:42.986063Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:57:42.986154Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:57:42.986190Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:57:42.986225Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:57:42.986271Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:57:42.986297Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:57:42.986344Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:57:42.986426Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:57:42.987153Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:57:42.987445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:57:43.072164Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:57:43.072227Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:57:43.087879Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:57:43.088004Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:57:43.088159Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:57:43.096176Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:57:43.096593Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:57:43.097468Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:57:43.098086Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:57:43.100416Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:57:43.101757Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:57:43.101814Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:57:43.102068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:57:43.102113Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:57:43.102152Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:57:43.102235Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:57:43.108764Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:57:43.474485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:57:43.474886Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:57:43.475102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:57:43.475368Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:57:43.475432Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:57:43.478011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:57:43.478225Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:57:43.478425Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:57:43.478497Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:57:43.478545Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:57:43.478578Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:57:43.480861Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:57:43.480929Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:57:43.480986Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:57:43.483046Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:57:43.483096Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:57:43.483152Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:57:43.483204Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:57:43.493393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:57:43.495744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:57:43.495956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:57:43.496959Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:57:43.497120Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:57:43.497193Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:57:43.497491Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:57:43.497559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:57:43.497741Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:57:43.497819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:57:43.500042Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:57:43.500094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:57:43.500308Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:57:43.500418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... HARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186234409549 2025-05-07T08:57:44.272753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:712:2614], at schemeshard: 72075186234409549, txId: 0, path id: 1 2025-05-07T08:57:44.273458Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 106:0 from tablet: 72057594046678944 to tablet: 72075186233409546 cookie: 72057594046678944:3 msg type: 268697640 2025-05-07T08:57:44.273551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 106, partId: 0, tablet: 72075186233409546 2025-05-07T08:57:44.273665Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72075186234409549, msg: Owner: 72075186234409549 Generation: 2 LocalPathId: 1 Version: 6 PathOwnerId: 72075186234409549, cookie: 0 2025-05-07T08:57:44.274281Z node 1 :HIVE INFO: tablet_helpers.cpp:1453: [72075186233409546] TEvUpdateDomain, msg: DomainKey { SchemeShard: 72057594046678944 PathId: 3 } ServerlessComputeResourcesMode: EServerlessComputeResourcesModeShared TxId: 106 2025-05-07T08:57:44.274396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5954: Update domain reply, message: Origin: 72075186233409546 TxId: 106, at schemeshard: 72057594046678944 2025-05-07T08:57:44.274433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 106, tablet: 72075186233409546, partId: 0 2025-05-07T08:57:44.274544Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 106:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 106 2025-05-07T08:57:44.274595Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:822: [72057594046678944] TSyncHive, operationId 106:0, HandleReply TEvUpdateDomainReply, from hive: 72075186233409546 2025-05-07T08:57:44.274641Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 106:0 138 -> 240 2025-05-07T08:57:44.276556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-05-07T08:57:44.276643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:36: TTxSyncTenant DoComplete, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-05-07T08:57:44.277860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 106:0, at schemeshard: 72057594046678944 2025-05-07T08:57:44.278045Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 106:0, at schemeshard: 72057594046678944 2025-05-07T08:57:44.278103Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 106:0 ProgressState 2025-05-07T08:57:44.278201Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#106:0 progress is 1/1 2025-05-07T08:57:44.278268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-05-07T08:57:44.278311Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#106:0 progress is 1/1 2025-05-07T08:57:44.278343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-05-07T08:57:44.278389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 106, ready parts: 1/1, is published: true 2025-05-07T08:57:44.278459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-05-07T08:57:44.278503Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 106:0 2025-05-07T08:57:44.278533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 106:0 2025-05-07T08:57:44.278608Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 TestModificationResult got TxId: 106, wait until txId: 106 TestWaitNotification wait txId: 106 2025-05-07T08:57:44.281482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 106: send EvNotifyTxCompletion 2025-05-07T08:57:44.281530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 106 2025-05-07T08:57:44.282096Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 106, at schemeshard: 72057594046678944 2025-05-07T08:57:44.282231Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-05-07T08:57:44.282277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [1:850:2732] TestWaitNotification: OK eventTxId 106 2025-05-07T08:57:44.282992Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ServerLess0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:57:44.283220Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ServerLess0" took 289us result status StatusSuccess 2025-05-07T08:57:44.283567Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ServerLess0" PathDescription { Self { Name: "ServerLess0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 103 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 4 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 4 PlanResolution: 50 Coordinators: 72075186234409550 TimeCastBucketsPerMediator: 2 Mediators: 72075186234409551 SchemeShard: 72075186234409549 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SharedHive: 72075186233409546 ServerlessComputeResourcesMode: EServerlessComputeResourcesModeShared } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:57:44.284225Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ServerLess0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72075186234409549 2025-05-07T08:57:44.284449Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72075186234409549 describe path "/MyRoot/ServerLess0" took 179us result status StatusSuccess 2025-05-07T08:57:44.284939Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ServerLess0" PathDescription { Self { Name: "MyRoot/ServerLess0" PathId: 1 SchemeshardId: 72075186234409549 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 4 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 3 ProcessingParams { Version: 4 PlanResolution: 50 Coordinators: 72075186234409550 TimeCastBucketsPerMediator: 2 Mediators: 72075186234409551 SchemeShard: 72075186234409549 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot/ServerLess0" } SharedHive: 72075186233409546 ServerlessComputeResourcesMode: EServerlessComputeResourcesModeShared } } PathId: 1 PathOwnerId: 72075186234409549, at schemeshard: 72075186234409549 2025-05-07T08:57:44.285605Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ServerLess0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:57:44.285786Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ServerLess0" took 189us result status StatusSuccess 2025-05-07T08:57:44.286161Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ServerLess0" PathDescription { Self { Name: "ServerLess0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 103 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 4 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 4 PlanResolution: 50 Coordinators: 72075186234409550 TimeCastBucketsPerMediator: 2 Mediators: 72075186234409551 SchemeShard: 72075186234409549 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SharedHive: 72075186233409546 ServerlessComputeResourcesMode: EServerlessComputeResourcesModeShared } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:57:44.286684Z node 1 :HIVE INFO: tablet_helpers.cpp:1470: [72075186233409546] TEvRequestDomainInfo, 72057594046678944:3 >> TTableProfileTests::ExplicitPartitionsSimple [GOOD] >> TTableProfileTests::ExplicitPartitionsUnordered ------- [TM] {asan, default-linux-x86_64, release} ydb/library/ycloud/impl/ut/unittest >> FolderServiceTest::TFolderServiceTransitional [GOOD] Test command err: 2025-05-07T08:57:40.634590Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625359718336350:2199];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:40.635170Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0029d1/r3tmp/tmpUm6HP2/pdisk_1.dat 2025-05-07T08:57:41.236703Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:57:41.251053Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:41.251155Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:41.262166Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26370 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:57:41.587588Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:41.630903Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:57:41.651033Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [51700007be08] Connect to grpc://localhost:19534 2025-05-07T08:57:41.727912Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700007be08] Request ListFoldersRequest { id: "i_am_not_exists" } 2025-05-07T08:57:41.740612Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [51700007be08] Status 14 failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:19534: Failed to connect to remote host: Connection refused 2025-05-07T08:57:41.742180Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700007be08] Request ListFoldersRequest { id: "i_am_not_exists" } 2025-05-07T08:57:41.750782Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [51700007be08] Status 14 failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:19534: Failed to connect to remote host: Connection refused 2025-05-07T08:57:42.754259Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700007be08] Request ListFoldersRequest { id: "i_am_not_exists" } 2025-05-07T08:57:42.758283Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [51700007be08] Status 5 Not Found 2025-05-07T08:57:42.759013Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700007be08] Request ListFoldersRequest { id: "i_am_exists" } 2025-05-07T08:57:42.762301Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [51700007be08] Response ListFoldersResponse { result { cloud_id: "response_cloud_id" } } |91.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/cms/ut_sentinel_unstable/ydb-core-cms-ut_sentinel_unstable |91.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/cms/ut_sentinel_unstable/ydb-core-cms-ut_sentinel_unstable |91.0%| [LD] {RESULT} $(B)/ydb/core/cms/ut_sentinel_unstable/ydb-core-cms-ut_sentinel_unstable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_serverless/unittest >> TSchemeShardServerLess::BaseCase-AlterDatabaseCreateHiveFirst-false [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:57:43.803702Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:57:43.803825Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:57:43.803876Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:57:43.803923Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:57:43.803968Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:57:43.804007Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:57:43.804059Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:57:43.804120Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:57:43.804858Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:57:43.805263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:57:43.896314Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:57:43.896375Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:57:43.917728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:57:43.917992Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:57:43.918190Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:57:43.925050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:57:43.925415Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:57:43.926181Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:57:43.926394Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:57:43.931195Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:57:43.932721Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:57:43.932797Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:57:43.932886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:57:43.932934Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:57:43.932979Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:57:43.933279Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:57:43.952084Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:57:44.141734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:57:44.142009Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:57:44.142296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:57:44.142522Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:57:44.142623Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:57:44.147234Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:57:44.147470Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:57:44.147711Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:57:44.147779Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:57:44.147819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:57:44.147856Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:57:44.151617Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:57:44.151693Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:57:44.151744Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:57:44.154057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:57:44.154127Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:57:44.154199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:57:44.154255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:57:44.158098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:57:44.160546Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:57:44.160836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:57:44.161958Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:57:44.162134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:57:44.162204Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:57:44.162507Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:57:44.162578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:57:44.162770Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:57:44.162865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:57:44.165198Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:57:44.165252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:57:44.165513Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:57:44.165567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 44 ShardLocalIdx: 5 TxId_Deprecated: 5 TabletID: 72075186234409546 2025-05-07T08:57:45.151823Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 Forgetting tablet 72075186234409546 2025-05-07T08:57:45.155502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 5 ShardOwnerId: 72057594046678944 ShardLocalIdx: 5, at schemeshard: 72057594046678944 2025-05-07T08:57:45.155918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-05-07T08:57:45.157005Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 7 TxId_Deprecated: 7 TabletID: 72075186234409548 2025-05-07T08:57:45.182267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-05-07T08:57:45.183673Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 6 TxId_Deprecated: 6 TabletID: 72075186234409547 Forgetting tablet 72075186234409548 2025-05-07T08:57:45.184777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 Forgetting tablet 72075186234409547 2025-05-07T08:57:45.185293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 7 ShardOwnerId: 72057594046678944 ShardLocalIdx: 7, at schemeshard: 72057594046678944 2025-05-07T08:57:45.185577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-05-07T08:57:45.186796Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 6 ShardOwnerId: 72057594046678944 ShardLocalIdx: 6, at schemeshard: 72057594046678944 2025-05-07T08:57:45.187031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-05-07T08:57:45.188676Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T08:57:45.188746Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-05-07T08:57:45.188875Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-05-07T08:57:45.189384Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T08:57:45.189438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-05-07T08:57:45.189511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:57:45.195852Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:5 2025-05-07T08:57:45.195932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:5 tabletId 72075186234409546 2025-05-07T08:57:45.196072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:7 2025-05-07T08:57:45.196107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:7 tabletId 72075186234409548 2025-05-07T08:57:45.202144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:6 2025-05-07T08:57:45.202263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:6 tabletId 72075186234409547 2025-05-07T08:57:45.202472Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-05-07T08:57:45.202642Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 106, wait until txId: 106 TestWaitNotification wait txId: 106 2025-05-07T08:57:45.202978Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 106: send EvNotifyTxCompletion 2025-05-07T08:57:45.203024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 106 2025-05-07T08:57:45.203569Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 106, at schemeshard: 72057594046678944 2025-05-07T08:57:45.203702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-05-07T08:57:45.203738Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [1:919:2780] TestWaitNotification: OK eventTxId 106 2025-05-07T08:57:45.204434Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ServerLess0/dir/table0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:57:45.206736Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ServerLess0/dir/table0" took 278us result status StatusPathDoesNotExist 2025-05-07T08:57:45.207074Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ServerLess0/dir/table0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/ServerLess0/dir/table0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-05-07T08:57:45.207913Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ServerLess0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:57:45.208170Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ServerLess0" took 248us result status StatusPathDoesNotExist 2025-05-07T08:57:45.208332Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ServerLess0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/ServerLess0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-05-07T08:57:45.209049Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:57:45.209305Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 260us result status StatusSuccess 2025-05-07T08:57:45.209713Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 9 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 9 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 7 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "SharedDB" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 wait until 72075186233409550 is deleted wait until 72075186233409551 is deleted wait until 72075186233409552 is deleted wait until 72075186233409553 is deleted 2025-05-07T08:57:45.210464Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72075186233409546] TEvSubscribeToTabletDeletion, 72075186233409550 2025-05-07T08:57:45.210605Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72075186233409546] TEvSubscribeToTabletDeletion, 72075186233409551 2025-05-07T08:57:45.210640Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72075186233409546] TEvSubscribeToTabletDeletion, 72075186233409552 2025-05-07T08:57:45.210697Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72075186233409546] TEvSubscribeToTabletDeletion, 72075186233409553 Deleted tabletId 72075186233409550 Deleted tabletId 72075186233409551 Deleted tabletId 72075186233409552 Deleted tabletId 72075186233409553 >> TestYmqHttpProxy::TestCreateQueue >> DataShardWrite::UpsertPrepared-Volatile [GOOD] >> DataShardWrite::UpsertNoLocksArbiter >> TestKinesisHttpProxy::UnauthorizedGetShardIteratorRequest >> BackupRestore::TestAllPrimitiveTypes-INTERVAL64 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-STRING >> TestKinesisHttpProxy::CreateStreamInIncorrectDb >> TestKinesisHttpProxy::DifferentContentTypes >> TestKinesisHttpProxy::MissingAction >> TestYmqHttpProxy::TestCreateQueueWithSameNameAndSameParams >> DataShardWrite::ExecSQLUpsertPrepared+EvWrite-Volatile [GOOD] >> DataShardWrite::ExecSQLUpsertPrepared-EvWrite+Volatile >> TServiceAccountServiceTest::IssueToken [GOOD] >> BackupRestore::TestAllPrimitiveTypes-UUID [GOOD] >> Cdc::NewAndOldImagesLog[PqRunner] [GOOD] >> Cdc::NewAndOldImagesLog[YdsRunner] >> AsyncIndexChangeExchange::SenderShouldShakeHandsTwice [GOOD] >> AsyncIndexChangeExchange::SenderShouldShakeHandsAfterAddingIndex >> BuildStatsHistogram::Many_Serial [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-DATE [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-DATETIME >> DataShardWrite::DeletePrepared+Volatile [GOOD] >> DataShardWrite::DeletePrepared-Volatile >> BackupRestoreS3::RestoreViewQueryText [GOOD] >> BackupRestoreS3::RestoreViewReferenceTable >> DataShardWrite::UpsertPreparedManyTables-Volatile [GOOD] >> DataShardWrite::UpsertPreparedNoTxCache+Volatile >> TPersQueueTest::NoDecompressionMemoryLeaks [GOOD] >> TPersQueueTest::PreferredCluster_TwoEnabledClustersAndWriteSessionsWithDifferentPreferredCluster_SessionWithMismatchedClusterDiesAndOthersAlive >> DataShardWrite::UpdateImmediate [GOOD] >> DataShardWrite::RejectOnChangeQueueOverflow >> Cdc::UpdatesLog[YdsRunner] [GOOD] >> Cdc::UpdatesLog[TopicRunner] >> BackupRestore::TestAllPrimitiveTypes-DOUBLE [GOOD] >> BackupRestore::TestAllPrimitiveTypes-DATE ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut/unittest >> BuildStatsHistogram::Many_Serial [GOOD] Test command err: Got : 24000 2106439 49449 9 9 Expected: 24000 2106439 49449 9 9 { [2455, 2599), [2798, 3624), [4540, 4713), [5654, 7161), [8509, 8794), [8936, 9973), [11888, 14280), [14337, 14882), [15507, 16365), [17368, 19451), [19536, 20135), [20790, 21503), [21589, 23243) } Got : 12816 1121048 49449 9 9 Expected: 12816 1121048 49449 9 9 Got : 24000 3547100 81694 9 9 Expected: 24000 3547100 81694 9 9 { [1012, 1475), [1682, 1985), [2727, 3553), [3599, 3992), [5397, 7244), [9181, 9807), [9993, 10178), [12209, 14029), [15089, 15342), [16198, 16984), [17238, 18436), [21087, 21876), [23701, 23794) } Got : 9582 1425282 81694 9 9 Expected: 9582 1425282 81694 9 9 Got : 24000 2460139 23760 9 9 Expected: 24000 2460139 23760 9 9 { [1296, 2520), [3888, 4320), [5040, 6840), [6912, 7272), [10872, 11160), [11520, 12096), [12096, 13824), [15192, 15624), [17064, 17856), [18216, 19296), [19800, 20160), [20736, 21096), [21096, 22104) } Got : 10440 1060767 23760 9 9 Expected: 10440 1060767 23760 9 9 Got : 24000 4054050 46562 9 9 Expected: 24000 4054050 46562 9 9 { [460, 1518), [2300, 2484), [2760, 4002), [4600, 5842), [6302, 9752), [11178, 12328), [14582, 14858), [16790, 18032), [18216, 18446), [18722, 19504), [19504, 19964), [20378, 20470), [21344, 23506) } Got : 13570 2273213 46562 9 9 Expected: 13570 2273213 46562 9 9 Got : 24000 2106459 49449 9 9 Expected: 24000 2106459 49449 9 9 Got : 24000 2460219 23555 9 9 Expected: 24000 2460219 23555 9 9 Got : 24000 4054270 46543 9 9 Expected: 24000 4054270 46543 9 9 Got : 24000 2106439 25272 38 44 Expected: 24000 2106439 25272 38 44 { [2455, 2599), [2798, 3624), [4540, 4713), [5654, 7161), [8509, 8794), [8936, 9973), [11888, 14280), [14337, 14882), [15507, 16365), [17368, 19451), [19536, 20135), [20790, 21503), [21589, 23243) } Got : 12816 1121048 25272 20 23 Expected: 12816 1121048 25272 20 23 Got : 24000 3547100 49916 64 44 Expected: 24000 3547100 49916 64 44 { [1012, 1475), [1682, 1985), [2727, 3553), [3599, 3992), [5397, 7244), [9181, 9807), [9993, 10178), [12209, 14029), [15089, 15342), [16198, 16984), [17238, 18436), [21087, 21876), [23701, 23794) } Got : 9582 1425198 49916 26 17 Expected: 9582 1425198 49916 26 17 Got : 24000 2460139 13170 42 41 Expected: 24000 2460139 13170 42 41 { [1296, 2520), [3888, 4320), [5040, 6840), [6912, 7272), [10872, 11160), [11520, 12096), [12096, 13824), [15192, 15624), [17064, 17856), [18216, 19296), [19800, 20160), [20736, 21096), [21096, 22104) } Got : 10440 1060798 13170 18 18 Expected: 10440 1060798 13170 18 18 Got : 24000 4054050 29361 68 43 Expected: 24000 4054050 29361 68 43 { [460, 1518), [2300, 2484), [2760, 4002), [4600, 5842), [6302, 9752), [11178, 12328), [14582, 14858), [16790, 18032), [18216, 18446), [18722, 19504), [19504, 19964), [20378, 20470), [21344, 23506) } Got : 13570 2277890 29361 38 24 Expected: 13570 2277890 29361 38 24 Got : 24000 2106459 25428 38 44 Expected: 24000 2106459 25428 38 44 Got : 24000 2460219 13482 41 41 Expected: 24000 2460219 13482 41 41 Got : 24000 4054270 29970 67 43 Expected: 24000 4054270 29970 67 43 Got : 24000 2106479 25458 38 44 Expected: 24000 2106479 25458 38 44 Got : 24000 2460259 13528 42 41 Expected: 24000 2460259 13528 42 41 Got : 24000 4054290 30013 67 43 Expected: 24000 4054290 30013 67 43 1 parts: [0:0:1:0:0:0:0] 240000 rows, 10181 pages, 7 levels: (159964, 53329) (319996, 106673) (479902, 159975) (639565, 213196) (799303, 266442) Checking BTree: Touched 0% bytes, 4 pages RowCountHistogram: 10% (actual 10%) key = (80152, 26725) value = 24033 (actual 24079 - 0% error) 10% (actual 10%) key = (160300, 53441) value = 48088 (actual 48136 - 0% error) 10% (actual 10%) key = (241096, 80373) value = 72280 (actual 72327 - 0% error) 10% (actual 10%) key = (321454, 107159) value = 96428 (actual 96478 - 0% error) 10% (actual 10%) key = (402202, 134075) value = 120604 (actual 120651 - 0% error) 10% (actual 10%) key = (482362, 160795) value = 144727 (actual 144775 - 0% error) 10% (actual 10%) key = (562825, 187616) value = 168893 (actual 168936 - 0% error) 10% (actual 10%) key = (642871, 214298) value = 192974 (actual 193024 - 0% error) 5% (actual 5%) key = (683260, 227761) value = 205073 (actual 205115 - 0% error) 14% (actual 14%) DataSizeHistogram: 10% (actual 10%) key = (80152, 26725) value = 2048715 (actual 2052707 - 0% error) 10% (actual 10%) key = (160300, 53441) value = 4098370 (actual 4102393 - 0% error) 10% (actual 10%) key = (241096, 80373) value = 6145924 (actual 6149966 - 0% error) 10% (actual 10%) key = (321454, 107159) value = 8194622 (actual 8198636 - 0% error) 10% (actual 10%) key = (402202, 134075) value = 10244365 (actual 10248317 - 0% error) 10% (actual 10%) key = (482362, 160795) value = 12292389 (actual 12296360 - 0% error) 10% (actual 10%) key = (562825, 187616) value = 14344066 (actual 14348128 - 0% error) 10% (actual 10%) key = (642871, 214298) value = 16393002 (actual 16396983 - 0% error) 5% (actual 5%) key = (683260, 227761) value = 17416844 (actual 17420850 - 0% error) 14% (actual 14%) Checking Flat: Touched 100% bytes, 1 pages RowCountHistogram: 10% (actual 10%) key = (80065, 26696) value = 24008 (actual 24056 - 0% error) 10% (actual 10%) key = (160045, 53356) value = 48012 (actual 48061 - 0% error) 10% (actual 10%) key = (240238, 80087) value = 72016 (actual 72061 - 0% error) 10% (actual 10%) key = (320152, 106725) value = 96035 (actual 96085 - 0% error) 10% (actual 10%) key = (400354, 133459) value = 120047 (actual 120093 - 0% error) 10% (actual 10%) key = (480133, 160052) value = 144053 (actual 144100 - 0% error) 10% (actual 10%) key = (560080, 186701) value = 168060 (actual 168102 - 0% error) 10% (actual 10%) key = (639892, 213305) value = 192073 (actual 192119 - 0% error) 10% (actual 10%) key = (719776, 239933) value = 216090 (actual 216137 - 0% error) 9% (actual 9%) DataSizeHistogram: 10% (actual 10%) key = (79732, 26585) value = 2038706 (actual 2042645 - 0% error) 10% (actual 10%) key = (159427, 53150) value = 4076220 (actual 4080259 - 0% error) 10% (actual 10%) key = (239872, 79965) value = 6113940 (actual 6117932 - 0% error) 10% (actual 10%) key = (319834, 106619) value = 8152983 (actual 8156951 - 0% error) 10% (actual 10%) key = (400105, 133376) value = 10190566 (actual 10194584 - 0% error) 10% (actual 10%) key = (479833, 159952) value = 12228261 (actual 12232212 - 0% error) 10% (actual 10%) key = (559774, 186599) value = 14265925 (actual 14269984 - 0% error) 10% (actual 10%) key = (639385, 213136) value = 16304923 (actual 16308915 - 0% error) 10% (actual 10%) key = (719437, 239820) value = 18342658 (actual 18346641 - 0% error) 9% (actual 9%) Checking Mixed: Touched 1% bytes, 51 pages RowCountHistogram: 10% (actual 10%) key = (80152, 26725) value = 24033 (actual 24079 - 0% error) 10% (actual 10%) key = (160300, 53441) value = 48088 (actual 48136 - 0% error) 10% (actual 10%) key = (241096, 80373) value = 72280 (actual 72327 - 0% error) 10% (actual 10%) key = (321454, 107159) value = 96428 (actual 96478 - 0% error) 10% (actual 10%) key = (402202, 134075) value = 120604 (actual 120651 - 0% error) 10% (actual 10%) key = (482362, 160795) value = 144727 (actual 144775 - 0% error) 10% (actual 10%) key = (562825, 187616) value = 168893 (actual 168936 - 0% error) 10% (actual 10%) key = (642871, 214298) value = 192974 (actual 193024 - 0% error) 10% (actual 10%) key = (723403, 241142) value = 217180 (actual 217228 - 0% error) 9% (actual 9%) DataSizeHistogram: 10% (actual 10%) key = (80152, 26725) value = 2048715 (actual 2052707 - 0% error) 10% (actual 10%) key = (160300, 53441) value = 4098370 (actual 4102393 - 0% error) 10% (actual 10%) key = (241096, 80373) value = 6145924 (actual 6149966 - 0% error) 10% (actual 10%) key = (321454, 107159) value = 8194622 (actual 8198636 - 0% error) 10% (actual 10%) key = (402202, 134075) value = 10244365 (actual 10248317 - 0% error) 10% (actual 10%) key = (482362, 160795) value = 12292389 (actual 12296360 - 0% error) 10% (actual 10%) key = (562825, 187616) value = 14344066 (actual 14348128 - 0% error) 10% (actual 10%) key = (642871, 214298) value = 16393002 (actual 16396983 - 0% error) 10% (actual 10%) key = (723403, 241142) value = 18443184 (actual 18447186 - 0% error) 9% (actual 9%) { [12965, 17271), [20685, 27602), [31405, 43682), [58051, 73731), [81074, 85635), [86559, 89297), [92588, 112654), [134937, 148111), [152568, 158136), [169526, 171272), [181381, 184364), [188301, 199001), [201179, 227534) } 1 parts: [0:0:1:0:0:0:0] 240000 rows, 10181 pages, 7 levels: (159964, 53329) (319996, 106673) (479902, 159975) (639565, 213196) (799303, 266442) Checking BTree: Touched 3% bytes, 111 pages RowCountHistogram: 6% (actual 6%) key = (80152, 26725) value = 7654 (actual 7700 - 0% error) 11% (actual 11%) key = (140245, 46756) value = 21908 (actual 21959 - 0% error) 12% (actual 12%) key = (241096, 80373) value = 37729 (actual 37776 - 0% error) 5% (actual 5%) key = (291388, 97137) value = 44561 (actual 44610 - 0% error) 14% (actual 14%) key = (361831, 120618) value = 62406 (actual 62455 - 0% error) 6% (actual 6%) key = (462178, 154067) value = 70269 (actual 70314 - 0% error) 10% (actual 10%) key = (522574, 174199) value = 83950 (actual 83996 - 0% error) 9% (actual 9%) key = (647905, 215976) value = 96207 (actual 96256 - 0% error) 11% (actual 11%) key = (703270, 234431) value = 110645 (actual 110694 - 0% error) 12% (actual 12%) DataSizeHistogram: 6% (actual 6%) key = (80152, 26725) value = 650681 (actual 654673 - 0% error) 11% (actual 11%) key = (140245, 46756) value = 1862907 (actual 1866988 - 0% error) 12% (actual 12%) key = (241096, 80373) value = 3200081 (actual 3204123 - 0% error) 5% (actual 5%) key = (291388, 97137) value = 3780473 (actual 3784554 - 0% error) 14% (actual 14%) key = (361831, 120618) value = 5294670 (actual 5298760 - 0% error) 6% (actual 6%) key = (462178, 154067) value = 5965285 (actual 5969310 - 0% error) 10% (actual 10%) key = (522574, 174199) value = 7125413 (actual 7129406 - 0% error) 9% (actual 9%) key = (647905, 215976) value = 8166922 (actual 8170966 - 0% error) 11% (actual 11%) key = (703270, 234431) value = 9391370 (actual 9395383 - 0% error) 12% (actual 12%) { [12965, 17271), [20685, 27602), [31405, 43682), [58051, 73731), [81074, 85635), [86559, 89297), [92588, 112654), [134937, 148111), [152568, 158136), [169526, 171272), [181381, 184364), [188301, 199001), [201179, 227534) } Checking Flat: Touched 100% bytes, 1 pages RowCountHistogram: 10% (actual 10%) key = (109672, 36565) value = 12716 (actual 12760 - 0% error) 10% (actual 10%) key = (200011, 66678) value = 25439 (actual 25485 - 0% error) 10% (actual 10%) key = (242497, 80840) value = 38151 (actual 38197 - 0% error) 10% (actual 10%) key = (323278, 107767) value = 50861 (actual 50910 - 0% error) 9% (actual 9%) key = (365755, 121926) value = 63568 (actual 63614 - 0% error) 10% (actual 10%) key = (482191, 160738) value = 76283 (actual 76335 - 0% error) 10% (actual 9%) key = (610882, 203635) value = 88992 (actual 89039 - 0% error) 10% (actual 10%) key = (673702, 224575) value = 101722 (actual 101768 - 0% error) 10% (actual 10%) key = (715753, 238592) value = 114435 (actual 114484 - 0% error) 9% (actual 9%) DataSizeHistogram: 10% (actual 10%) ... 140, NULL) (311209, NULL) (311281, NULL) (311344, NULL) (311416, NULL) [0:0:935:0:0:0:0] 100 rows, 100 pages, 4 levels: (311479, NULL) (311542, NULL) (311614, NULL) (311683, NULL) (311755, NULL) [0:0:936:0:0:0:0] 100 rows, 100 pages, 4 levels: (311821, NULL) (311890, NULL) (311956, NULL) (312034, NULL) (312100, NULL) [0:0:937:0:0:0:0] 100 rows, 100 pages, 4 levels: (312172, NULL) (312232, NULL) (312301, NULL) (312370, NULL) (312439, NULL) [0:0:938:0:0:0:0] 100 rows, 100 pages, 4 levels: (312508, NULL) (312571, NULL) (312637, NULL) (312700, NULL) (312760, NULL) [0:0:939:0:0:0:0] 100 rows, 100 pages, 4 levels: (312835, NULL) (312904, NULL) (312970, NULL) (313030, NULL) (313102, NULL) [0:0:940:0:0:0:0] 100 rows, 100 pages, 4 levels: (313174, NULL) (313240, NULL) (313300, NULL) (313366, NULL) (313429, NULL) [0:0:941:0:0:0:0] 100 rows, 100 pages, 4 levels: (313498, NULL) (313573, NULL) (313639, NULL) (313699, NULL) (313768, NULL) [0:0:942:0:0:0:0] 100 rows, 100 pages, 4 levels: (313828, NULL) (313891, NULL) (313957, NULL) (314023, NULL) (314086, NULL) [0:0:943:0:0:0:0] 100 rows, 100 pages, 4 levels: (314149, NULL) (314212, NULL) (314275, NULL) (314338, NULL) (314401, NULL) [0:0:944:0:0:0:0] 100 rows, 100 pages, 4 levels: (314464, NULL) (314530, NULL) (314590, NULL) (314656, NULL) (314719, NULL) [0:0:945:0:0:0:0] 100 rows, 100 pages, 4 levels: (314788, NULL) (314854, NULL) (314920, NULL) (314983, NULL) (315046, NULL) [0:0:946:0:0:0:0] 100 rows, 100 pages, 4 levels: (315109, NULL) (315178, NULL) (315238, NULL) (315304, NULL) (315370, NULL) [0:0:947:0:0:0:0] 100 rows, 100 pages, 4 levels: (315433, NULL) (315496, NULL) (315565, NULL) (315631, NULL) (315697, NULL) [0:0:948:0:0:0:0] 100 rows, 100 pages, 4 levels: (315766, NULL) (315826, NULL) (315889, NULL) (315952, NULL) (316024, NULL) [0:0:949:0:0:0:0] 100 rows, 100 pages, 4 levels: (316087, NULL) (316156, NULL) (316222, NULL) (316288, NULL) (316357, NULL) [0:0:950:0:0:0:0] 100 rows, 100 pages, 4 levels: (316432, NULL) (316498, NULL) (316564, NULL) (316636, NULL) (316705, NULL) [0:0:951:0:0:0:0] 100 rows, 100 pages, 4 levels: (316768, NULL) (316831, NULL) (316891, NULL) (316951, NULL) (317011, NULL) [0:0:952:0:0:0:0] 100 rows, 100 pages, 4 levels: (317080, NULL) (317143, NULL) (317218, NULL) (317287, NULL) (317356, NULL) [0:0:953:0:0:0:0] 100 rows, 100 pages, 4 levels: (317422, NULL) (317497, NULL) (317563, NULL) (317632, NULL) (317701, NULL) [0:0:954:0:0:0:0] 100 rows, 100 pages, 4 levels: (317764, NULL) (317824, NULL) (317887, NULL) (317953, NULL) (318019, NULL) [0:0:955:0:0:0:0] 100 rows, 100 pages, 4 levels: (318088, NULL) (318166, NULL) (318235, NULL) (318304, NULL) (318370, NULL) [0:0:956:0:0:0:0] 100 rows, 100 pages, 4 levels: (318442, NULL) (318511, NULL) (318574, NULL) (318640, NULL) (318703, NULL) [0:0:957:0:0:0:0] 100 rows, 100 pages, 4 levels: (318772, NULL) (318838, NULL) (318898, NULL) (318970, NULL) (319036, NULL) [0:0:958:0:0:0:0] 100 rows, 100 pages, 4 levels: (319099, NULL) (319162, NULL) (319225, NULL) (319294, NULL) (319360, NULL) [0:0:959:0:0:0:0] 100 rows, 100 pages, 4 levels: (319423, NULL) (319492, NULL) (319555, NULL) (319621, NULL) (319687, NULL) [0:0:960:0:0:0:0] 100 rows, 100 pages, 4 levels: (319753, NULL) (319828, NULL) (319900, NULL) (319963, NULL) (320035, NULL) [0:0:961:0:0:0:0] 100 rows, 100 pages, 4 levels: (320104, NULL) (320164, NULL) (320233, NULL) (320299, NULL) (320365, NULL) [0:0:962:0:0:0:0] 100 rows, 100 pages, 4 levels: (320428, NULL) (320500, NULL) (320569, NULL) (320629, NULL) (320698, NULL) [0:0:963:0:0:0:0] 100 rows, 100 pages, 4 levels: (320764, NULL) (320833, NULL) (320893, NULL) (320959, NULL) (321019, NULL) [0:0:964:0:0:0:0] 100 rows, 100 pages, 4 levels: (321085, NULL) (321151, NULL) (321214, NULL) (321277, NULL) (321352, NULL) [0:0:965:0:0:0:0] 100 rows, 100 pages, 4 levels: (321421, NULL) (321493, NULL) (321562, NULL) (321631, NULL) (321691, NULL) [0:0:966:0:0:0:0] 100 rows, 100 pages, 4 levels: (321757, NULL) (321823, NULL) (321886, NULL) (321949, NULL) (322009, NULL) [0:0:967:0:0:0:0] 100 rows, 100 pages, 4 levels: (322081, NULL) (322159, NULL) (322225, NULL) (322294, NULL) (322363, NULL) [0:0:968:0:0:0:0] 100 rows, 100 pages, 4 levels: (322429, NULL) (322498, NULL) (322564, NULL) (322642, NULL) (322711, NULL) [0:0:969:0:0:0:0] 100 rows, 100 pages, 4 levels: (322783, NULL) (322846, NULL) (322915, NULL) (322978, NULL) (323041, NULL) [0:0:970:0:0:0:0] 100 rows, 100 pages, 4 levels: (323104, NULL) (323164, NULL) (323230, NULL) (323305, NULL) (323368, NULL) [0:0:971:0:0:0:0] 100 rows, 100 pages, 4 levels: (323434, NULL) (323506, NULL) (323569, NULL) (323632, NULL) (323707, NULL) [0:0:972:0:0:0:0] 100 rows, 100 pages, 4 levels: (323776, NULL) (323851, NULL) (323917, NULL) (323986, NULL) (324052, NULL) [0:0:973:0:0:0:0] 100 rows, 100 pages, 4 levels: (324115, NULL) (324184, NULL) (324256, NULL) (324316, NULL) (324379, NULL) [0:0:974:0:0:0:0] 100 rows, 100 pages, 4 levels: (324442, NULL) (324502, NULL) (324568, NULL) (324631, NULL) (324703, NULL) [0:0:975:0:0:0:0] 100 rows, 100 pages, 4 levels: (324769, NULL) (324838, NULL) (324904, NULL) (324973, NULL) (325033, NULL) [0:0:976:0:0:0:0] 100 rows, 100 pages, 4 levels: (325105, NULL) (325174, NULL) (325234, NULL) (325297, NULL) (325363, NULL) [0:0:977:0:0:0:0] 100 rows, 100 pages, 4 levels: (325438, NULL) (325504, NULL) (325570, NULL) (325630, NULL) (325699, NULL) [0:0:978:0:0:0:0] 100 rows, 100 pages, 4 levels: (325771, NULL) (325834, NULL) (325900, NULL) (325966, NULL) (326032, NULL) [0:0:979:0:0:0:0] 100 rows, 100 pages, 4 levels: (326101, NULL) (326170, NULL) (326233, NULL) (326296, NULL) (326359, NULL) [0:0:980:0:0:0:0] 100 rows, 100 pages, 4 levels: (326434, NULL) (326497, NULL) (326563, NULL) (326632, NULL) (326701, NULL) [0:0:981:0:0:0:0] 100 rows, 100 pages, 4 levels: (326773, NULL) (326836, NULL) (326905, NULL) (326965, NULL) (327025, NULL) [0:0:982:0:0:0:0] 100 rows, 100 pages, 4 levels: (327097, NULL) (327169, NULL) (327232, NULL) (327301, NULL) (327364, NULL) [0:0:983:0:0:0:0] 100 rows, 100 pages, 4 levels: (327430, NULL) (327496, NULL) (327559, NULL) (327622, NULL) (327682, NULL) [0:0:984:0:0:0:0] 100 rows, 100 pages, 4 levels: (327742, NULL) (327811, NULL) (327871, NULL) (327934, NULL) (327997, NULL) [0:0:985:0:0:0:0] 100 rows, 100 pages, 4 levels: (328072, NULL) (328138, NULL) (328222, NULL) (328291, NULL) (328363, NULL) [0:0:986:0:0:0:0] 100 rows, 100 pages, 4 levels: (328432, NULL) (328501, NULL) (328573, NULL) (328648, NULL) (328717, NULL) [0:0:987:0:0:0:0] 100 rows, 100 pages, 4 levels: (328783, NULL) (328849, NULL) (328915, NULL) (328978, NULL) (329044, NULL) [0:0:988:0:0:0:0] 100 rows, 100 pages, 4 levels: (329119, NULL) (329185, NULL) (329248, NULL) (329317, NULL) (329383, NULL) [0:0:989:0:0:0:0] 100 rows, 100 pages, 4 levels: (329455, NULL) (329518, NULL) (329590, NULL) (329662, NULL) (329722, NULL) [0:0:990:0:0:0:0] 100 rows, 100 pages, 4 levels: (329782, NULL) (329854, NULL) (329917, NULL) (329983, NULL) (330049, NULL) [0:0:991:0:0:0:0] 100 rows, 100 pages, 4 levels: (330118, NULL) (330187, NULL) (330253, NULL) (330322, NULL) (330382, NULL) [0:0:992:0:0:0:0] 100 rows, 100 pages, 4 levels: (330454, NULL) (330520, NULL) (330595, NULL) (330673, NULL) (330739, NULL) [0:0:993:0:0:0:0] 100 rows, 100 pages, 4 levels: (330808, NULL) (330874, NULL) (330940, NULL) (331003, NULL) (331072, NULL) [0:0:994:0:0:0:0] 100 rows, 100 pages, 4 levels: (331132, NULL) (331204, NULL) (331276, NULL) (331342, NULL) (331405, NULL) [0:0:995:0:0:0:0] 100 rows, 100 pages, 4 levels: (331465, NULL) (331540, NULL) (331615, NULL) (331684, NULL) (331753, NULL) [0:0:996:0:0:0:0] 100 rows, 100 pages, 4 levels: (331816, NULL) (331891, NULL) (331960, NULL) (332026, NULL) (332086, NULL) [0:0:997:0:0:0:0] 100 rows, 100 pages, 4 levels: (332152, NULL) (332215, NULL) (332284, NULL) (332350, NULL) (332419, NULL) [0:0:998:0:0:0:0] 100 rows, 100 pages, 4 levels: (332491, NULL) (332557, NULL) (332623, NULL) (332686, NULL) (332752, NULL) [0:0:999:0:0:0:0] 100 rows, 100 pages, 4 levels: (332818, NULL) (332884, NULL) (332944, NULL) (333013, NULL) (333073, NULL) [0:0:1000:0:0:0:0] 100 rows, 100 pages, 4 levels: (333148, NULL) (333214, NULL) (333274, NULL) (333340, NULL) (333403, NULL) Checking BTree: Touched 0% bytes, 0 pages RowCountHistogram: 5% (actual 6%) key = (16984, 5669) value = 5100 (actual 6998 - -1% error) 10% (actual 9%) key = (50416, 16813) value = 15100 (actual 16798 - -1% error) 10% (actual 9%) key = (83701, 27908) value = 25100 (actual 26598 - -1% error) 10% (actual 9%) key = (116986, 39003) value = 35100 (actual 36398 - -1% error) 10% (actual 9%) key = (150319, 50114) value = 45100 (actual 46198 - -1% error) 10% (actual 9%) key = (183700, 61241) value = 55100 (actual 55998 - 0% error) 10% (actual 9%) key = (217081, 72368) value = 65100 (actual 65798 - 0% error) 10% (actual 9%) key = (250486, 83503) value = 75100 (actual 75598 - 0% error) 10% (actual 9%) key = (283771, 94598) value = 85100 (actual 85398 - 0% error) 14% (actual 14%) DataSizeHistogram: 5% (actual 6%) key = (16648, 5557) value = 524891 (actual 723287 - -1% error) 10% (actual 9%) key = (50086, 16703) value = 1569936 (actual 1747238 - -1% error) 9% (actual 9%) key = (83356, 27793) value = 2610698 (actual 2767306 - -1% error) 10% (actual 9%) key = (116647, 38890) value = 3652143 (actual 3787394 - -1% error) 9% (actual 9%) key = (149656, 49893) value = 4685435 (actual 4800597 - -1% error) 10% (actual 9%) key = (183040, 61021) value = 5728420 (actual 5822785 - 0% error) 10% (actual 9%) key = (216727, 72250) value = 6776444 (actual 6848929 - 0% error) 9% (actual 9%) key = (250144, 83389) value = 7813547 (actual 7865227 - 0% error) 9% (actual 9%) key = (283444, 94489) value = 8853697 (actual 8884838 - 0% error) 14% (actual 14%) Checking Flat: Touched 100% bytes, 1000 pages RowCountHistogram: 10% (actual 11%) key = (33379, 11134) value = 10000 (actual 11800 - -1% error) 10% (actual 9%) key = (66721, 22248) value = 20000 (actual 21600 - -1% error) 10% (actual 9%) key = (100015, 33346) value = 30000 (actual 31400 - -1% error) 10% (actual 9%) key = (133258, 44427) value = 40000 (actual 41200 - -1% error) 10% (actual 9%) key = (166621, 55548) value = 50000 (actual 51000 - -1% error) 10% (actual 9%) key = (200041, 66688) value = 60000 (actual 60800 - 0% error) 10% (actual 9%) key = (233449, 77824) value = 70000 (actual 70600 - 0% error) 10% (actual 9%) key = (266824, 88949) value = 80000 (actual 80400 - 0% error) 10% (actual 9%) key = (300073, 100032) value = 90000 (actual 90200 - 0% error) 10% (actual 9%) DataSizeHistogram: 10% (actual 11%) key = (33187, NULL) value = 1041247 (actual 1229534 - -1% error) 10% (actual 9%) key = (66517, NULL) value = 2082456 (actual 2249844 - -1% error) 10% (actual 9%) key = (99709, NULL) value = 3123684 (actual 3270138 - -1% error) 10% (actual 9%) key = (132925, NULL) value = 4164886 (actual 4290603 - -1% error) 10% (actual 9%) key = (166246, NULL) value = 5206111 (actual 5311117 - -1% error) 10% (actual 9%) key = (199678, NULL) value = 6247321 (actual 6331068 - 0% error) 10% (actual 9%) key = (233290, NULL) value = 7288529 (actual 7350869 - 0% error) 10% (actual 9%) key = (266701, NULL) value = 8329759 (actual 8371441 - 0% error) 10% (actual 9%) key = (300052, NULL) value = 9371030 (actual 9392083 - 0% error) 9% (actual 9%) Checking Mixed: Touched 0% bytes, 0 pages RowCountHistogram: 100% (actual 100%) DataSizeHistogram: 100% (actual 100%) >> TTableProfileTests::UseTableProfilePreset [GOOD] >> TTableProfileTests::UseTableProfilePresetViaSdk >> DataShardWrite::UpsertNoLocksArbiter [GOOD] >> DataShardWrite::UpsertLostPrepareArbiter >> BackupRestoreS3::TestAllPrimitiveTypes-STRING [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-JSON ------- [TM] {asan, default-linux-x86_64, release} ydb/library/ycloud/impl/ut/unittest >> TServiceAccountServiceTest::IssueToken [GOOD] Test command err: 2025-05-07T08:57:43.031191Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625370039210554:2268];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:43.031240Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0029b7/r3tmp/tmpl5lXQ3/pdisk_1.dat 2025-05-07T08:57:43.895523Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:43.895682Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:43.898068Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:57:43.991671Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TClient is connected to server localhost:63423 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:57:44.345444Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:44.367214Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:57:47.882202Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501625386020221961:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:47.882415Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0029b7/r3tmp/tmprXgLDX/pdisk_1.dat 2025-05-07T08:57:48.084708Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:57:48.128395Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:48.128529Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:48.131563Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:63097 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:57:48.400838Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... |91.0%| [TA] $(B)/ydb/library/ycloud/impl/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TTableProfileTests::ExplicitPartitionsUnordered [GOOD] >> TTableProfileTests::ExplicitPartitionsComplex >> DataShardWrite::ExecSQLUpsertPrepared-EvWrite+Volatile [GOOD] >> DataShardWrite::ExecSQLUpsertPrepared+EvWrite+Volatile |91.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_0__SYNC-pk_types4-all_types4-index4---SYNC] [GOOD] >> KqpErrors::ProposeResultLost_RwTx+UseSink [GOOD] >> KqpErrors::ProposeResultLost_RwTx-UseSink >> TestYmqHttpProxy::TestCreateQueue [GOOD] >> Cdc::DocApi[TopicRunner] [GOOD] >> Cdc::HugeKey[PqRunner] >> Cdc::NewAndOldImagesLog[YdsRunner] [GOOD] >> Cdc::NewAndOldImagesLog[TopicRunner] >> TestKinesisHttpProxy::CreateStreamInIncorrectDb [GOOD] >> TestYmqHttpProxy::TestCreateQueueWithBadQueueName >> TestKinesisHttpProxy::UnauthorizedGetShardIteratorRequest [GOOD] >> AsyncIndexChangeExchange::SenderShouldShakeHandsAfterAddingIndex [GOOD] >> AsyncIndexChangeExchange::ShouldDeliverChangesOnFreshTable >> TestYmqHttpProxy::TestCreateQueueWithSameNameAndSameParams [GOOD] >> BackupRestore::TestAllPrimitiveTypes-STRING [GOOD] >> BackupRestore::TestAllPrimitiveTypes-JSON >> DataShardWrite::DeletePrepared-Volatile [GOOD] >> DataShardWrite::DelayedVolatileTxAndEvWrite >> KqpErrors::ResolveTableError [GOOD] >> DataShardWrite::UpsertPreparedNoTxCache+Volatile [GOOD] >> DataShardWrite::UpsertPreparedNoTxCache-Volatile >> TestKinesisHttpProxy::MissingAction [GOOD] >> TestKinesisHttpProxy::CreateStreamWithInvalidName >> Balancing::Balancing_OneTopic_TopicApi >> TestKinesisHttpProxy::TestRequestWithWrongRegion >> TestKinesisHttpProxy::DifferentContentTypes [GOOD] >> TestYmqHttpProxy::TestCreateQueueWithSameNameAndDifferentParams >> TVersions::Wreck1Reverse [GOOD] >> TVersions::Wreck0 >> Cdc::UpdatesLog[TopicRunner] [GOOD] >> Cdc::VirtualTimestamps[PqRunner] >> TopicAutoscaling::PartitionSplit_PQv1 >> DataShardWrite::RejectOnChangeQueueOverflow [GOOD] >> DataShardWrite::UpsertBrokenLockArbiter >> TestKinesisHttpProxy::PutRecordsWithLongExplicitHashKey >> TestKinesisHttpProxy::GoodRequestPutRecords >> RetryPolicy::TWriteSession_SwitchBackToLocalCluster [GOOD] >> RetryPolicy::TWriteSession_SeqNoShift ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_kqp_errors/unittest >> KqpErrors::ResolveTableError [GOOD] Test command err: 2025-05-07T08:57:51.979782Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:57:51.980526Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0047ac/r3tmp/tmpkFYKeu/pdisk_1.dat 2025-05-07T08:57:52.501750Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:57:52.736579Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:57:52.822621Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:52.822872Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:52.830661Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:52.830772Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:52.849337Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-05-07T08:57:52.849931Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:57:52.850320Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:57:53.156566Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:57:54.683392Z node 1 :KQP_EXECUTER TRACE: kqp_executer_impl.h:190: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jtmzeewn7wrxy4bs2d06pjhb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODgyZjJjMzEtNzkxNjJiNDgtYzIxMWRmODctNzFmNTA0MzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Bootstrap done, become ReadyState 2025-05-07T08:57:54.683662Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:584: ActorId: [1:1540:2934] TxId: 281474976715658. Ctx: { TraceId: 01jtmzeewn7wrxy4bs2d06pjhb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODgyZjJjMzEtNzkxNjJiNDgtYzIxMWRmODctNzFmNTA0MzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Executing physical tx, type: 2, stages: 1 2025-05-07T08:57:54.683791Z node 1 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:25: StageInfo: StageId #[0,0], InputsCount: 0, OutputsCount: 1 2025-05-07T08:57:54.683942Z node 1 :KQP_EXECUTER TRACE: kqp_executer_impl.h:599: ActorId: [1:1540:2934] TxId: 281474976715658. Ctx: { TraceId: 01jtmzeewn7wrxy4bs2d06pjhb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODgyZjJjMzEtNzkxNjJiNDgtYzIxMWRmODctNzFmNTA0MzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Got request, become WaitResolveState 2025-05-07T08:57:54.684224Z node 1 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:271: TxId: 281474976715658. Resolved key sets: 1 2025-05-07T08:57:54.684407Z node 1 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:295: TxId: 281474976715658. Resolved key: { TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 2 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 4 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 } 2025-05-07T08:57:54.684557Z node 1 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:2034: ActorId: [1:1540:2934] TxId: 281474976715658. Ctx: { TraceId: 01jtmzeewn7wrxy4bs2d06pjhb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODgyZjJjMzEtNzkxNjJiNDgtYzIxMWRmODctNzFmNTA0MzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Stage [0,0] AST: ( (return (lambda '() (block '( (let $1 (Just (Uint32 '1))) (let $2 (Just (Uint32 '2))) (let $3 (Just (Uint32 '3))) (return (Iterator (AsList (AsStruct '('"key" $1) '('"value" $1)) (AsStruct '('"key" $2) '('"value" $2)) (AsStruct '('"key" $3) '('"value" $3))))) )))) ) 2025-05-07T08:57:54.684735Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:1465: ActorId: [1:1540:2934] TxId: 281474976715658. Ctx: { TraceId: 01jtmzeewn7wrxy4bs2d06pjhb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODgyZjJjMzEtNzkxNjJiNDgtYzIxMWRmODctNzFmNTA0MzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Stage [0,0] create compute task: 1 2025-05-07T08:57:54.684840Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715658. Ctx: { TraceId: 01jtmzeewn7wrxy4bs2d06pjhb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODgyZjJjMzEtNzkxNjJiNDgtYzIxMWRmODctNzFmNTA0MzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Database not set, use /Root 2025-05-07T08:57:54.684894Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:553: TxId: 281474976715658. Ctx: { TraceId: 01jtmzeewn7wrxy4bs2d06pjhb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODgyZjJjMzEtNzkxNjJiNDgtYzIxMWRmODctNzFmNTA0MzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Total tasks: 0, readonly: true, 0 scan tasks on 0 nodes, localComputeTasks: 0, snapshot: {0, 0} 2025-05-07T08:57:54.685297Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:793: TxId: 281474976715658. Ctx: { TraceId: 01jtmzeewn7wrxy4bs2d06pjhb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODgyZjJjMzEtNzkxNjJiNDgtYzIxMWRmODctNzFmNTA0MzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Collect channels updates for task: 1 at actor [1:1543:2934] 2025-05-07T08:57:54.685380Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:785: TxId: 281474976715658. Ctx: { TraceId: 01jtmzeewn7wrxy4bs2d06pjhb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODgyZjJjMzEtNzkxNjJiNDgtYzIxMWRmODctNzFmNTA0MzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Sending channels info to compute actor: [1:1543:2934], channels: 0 2025-05-07T08:57:54.685447Z node 1 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2800: ActorId: [1:1540:2934] TxId: 281474976715658. Ctx: { TraceId: 01jtmzeewn7wrxy4bs2d06pjhb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODgyZjJjMzEtNzkxNjJiNDgtYzIxMWRmODctNzFmNTA0MzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Total tasks: 1, readonly: 0, datashardTxs: 0, evWriteTxs: 0, topicTxs: 0, volatile: 0, immediate: 1, pending compute tasks0, useFollowers: 0 2025-05-07T08:57:54.685488Z node 1 :KQP_EXECUTER TRACE: kqp_data_executer.cpp:2803: ActorId: [1:1540:2934] TxId: 281474976715658. Ctx: { TraceId: 01jtmzeewn7wrxy4bs2d06pjhb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODgyZjJjMzEtNzkxNjJiNDgtYzIxMWRmODctNzFmNTA0MzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Updating channels after the creation of compute actors 2025-05-07T08:57:54.685526Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:793: TxId: 281474976715658. Ctx: { TraceId: 01jtmzeewn7wrxy4bs2d06pjhb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODgyZjJjMzEtNzkxNjJiNDgtYzIxMWRmODctNzFmNTA0MzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Collect channels updates for task: 1 at actor [1:1543:2934] 2025-05-07T08:57:54.685578Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:785: TxId: 281474976715658. Ctx: { TraceId: 01jtmzeewn7wrxy4bs2d06pjhb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODgyZjJjMzEtNzkxNjJiNDgtYzIxMWRmODctNzFmNTA0MzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Sending channels info to compute actor: [1:1543:2934], channels: 0 2025-05-07T08:57:54.685639Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:644: ActorId: [1:1540:2934] TxId: 281474976715658. Ctx: { TraceId: 01jtmzeewn7wrxy4bs2d06pjhb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODgyZjJjMzEtNzkxNjJiNDgtYzIxMWRmODctNzFmNTA0MzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Waiting for: CA [1:1543:2934], 2025-05-07T08:57:54.685712Z node 1 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:157: ActorId: [1:1540:2934] TxId: 281474976715658. Ctx: { TraceId: 01jtmzeewn7wrxy4bs2d06pjhb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODgyZjJjMzEtNzkxNjJiNDgtYzIxMWRmODctNzFmNTA0MzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. ActorState: WaitResolveState, waiting for 1 compute actor(s) and 0 datashard(s): CA [1:1543:2934], 2025-05-07T08:57:54.685767Z node 1 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:2362: ActorId: [1:1540:2934] TxId: 281474976715658. Ctx: { TraceId: 01jtmzeewn7wrxy4bs2d06pjhb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODgyZjJjMzEtNzkxNjJiNDgtYzIxMWRmODctNzFmNTA0MzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. ActorState: WaitResolveState, immediate tx, become ExecuteState 2025-05-07T08:57:54.697257Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:433: ActorId: [1:1540:2934] TxId: 281474976715658. Ctx: { TraceId: 01jtmzeewn7wrxy4bs2d06pjhb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODgyZjJjMzEtNzkxNjJiNDgtYzIxMWRmODctNzFmNTA0MzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. ActorState: ExecuteState, got execution state from compute actor: [1:1543:2934], task: 1, state: COMPUTE_STATE_EXECUTING, stats: { } 2025-05-07T08:57:54.697381Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:644: ActorId: [1:1540:2934] TxId: 281474976715658. Ctx: { TraceId: 01jtmzeewn7wrxy4bs2d06pjhb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODgyZjJjMzEtNzkxNjJiNDgtYzIxMWRmODctNzFmNTA0MzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Waiting for: CA [1:1543:2934], 2025-05-07T08:57:54.697464Z node 1 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:157: ActorId: [1:1540:2934] TxId: 281474976715658. Ctx: { TraceId: 01jtmzeewn7wrxy4bs2d06pjhb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODgyZjJjMzEtNzkxNjJiNDgtYzIxMWRmODctNzFmNTA0MzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. ActorState: ExecuteState, waiting for 1 compute actor(s) and 0 datashard(s): CA [1:1543:2934], 2025-05-07T08:57:54.698981Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:433: ActorId: [1:1540:2934] TxId: 281474976715658. Ctx: { TraceId: 01jtmzeewn7wrxy4bs2d06pjhb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODgyZjJjMzEtNzkxNjJiNDgtYzIxMWRmODctNzFmNTA0MzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. ActorState: ExecuteState, got execution state from compute actor: [1:1543:2934], task: 1, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 2086 Tasks { TaskId: 1 CpuTimeUs: 960 FinishTimeMs: 1746608274698 EgressBytes: 30 EgressRows: 3 ComputeCpuTimeUs: 38 BuildCpuTimeUs: 922 HostName: "ghrun-sykirh5vua" NodeId: 1 CreateTimeMs: 1746608274686 UpdateTimeMs: 1746608274698 } MaxMemoryUsage: 1048576 } 2025-05-07T08:57:54.699137Z node 1 :KQP_EXECUTER INFO: kqp_planner.cpp:688: TxId: 281474976715658. Ctx: { TraceId: 01jtmzeewn7wrxy4bs2d06pjhb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODgyZjJjMzEtNzkxNjJiNDgtYzIxMWRmODctNzFmNTA0MzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Compute actor has finished execution: [1:1543:2934] 2025-05-07T08:57:54.699218Z node 1 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:281: ActorId: [1:1540:2934] TxId: 281474976715658. Ctx: { TraceId: 01jtmzeewn7wrxy4bs2d06pjhb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODgyZjJjMzEtNzkxNjJiNDgtYzIxMWRmODctNzFmNTA0MzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Send Commit to BufferActor=[1:1539:2934] 2025-05-07T08:57:54.699307Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:838: ActorId: [1:1540:2934] TxId: 281474976715658. Ctx: { TraceId: 01jtmzeewn7wrxy4bs2d06pjhb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODgyZjJjMzEtNzkxNjJiNDgtYzIxMWRmODctNzFmNTA0MzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Resource usage for last stat interval: ComputeTime: 0.002086s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 2025-05-07T08:57:54.760594Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2140: ActorId: [1:1540:2934] TxId: 281474976715658. Ctx: { TraceId: 01jtmzeewn7wrxy4bs2d06pjhb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODgyZjJjMzEtNzkxNjJiNDgtYzIxMWRmODctNzFmNTA0MzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. terminate execution. 2025-05-07T08:57:54.760669Z node 1 :KQP_EXECUTER TRACE: kqp_executer_impl.h:2154: ActorId: [1:1540:2934] TxId: 281474976715658. Ctx: { TraceId: 01jtmzeewn7wrxy4bs2d06pjhb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODgyZjJjMzEtNzkxNjJiNDgtYzIxMWRmODctNzFmNTA0MzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Terminate, become ZombieState 2025-05-07T08:57:54.807888Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:1559:2952], status: UNAVAILABLE, issues:
: Error: Table metadata loading, code: 1050
:1:1: Error: Failed to load metadata for table: db.[/Root/table-1]
: Error: LookupError, code: 2005 2025-05-07T08:57:54.809678Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=1&id=ZGNjMTUyZDUtYWUzZTZlMzEtYzY4NGQ3N2YtOTBhZTY1NTU=, ActorId: [1:1557:2950], ActorState: ExecuteState, TraceId: 01jtmzefac9x7qhw90x7v3cmrc, ReplyQueryCompileError, status UNAVAILABLE remove tx with tx_id: >> DataShardWrite::UpsertLostPrepareArbiter [GOOD] >> DataShardWrite::UpsertNoLocksArbiterRestart >> KqpErrors::ProposeError [GOOD] >> KqpErrors::ProposeErrorEvWrite >> BackupRestoreS3::TestAllPrimitiveTypes-DATETIME [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-INTERVAL >> TopicAutoscaling::Simple_BeforeAutoscaleAwareSDK >> BackupRestore::TestAllPrimitiveTypes-DATE [GOOD] >> BackupRestore::TestAllPrimitiveTypes-DATETIME >> RetryPolicy::RetryWithBatching [GOOD] >> DataShardWrite::ExecSQLUpsertPrepared+EvWrite+Volatile [GOOD] >> DataShardWrite::InsertImmediate >> Cdc::NewAndOldImagesLog[TopicRunner] [GOOD] >> Cdc::NewAndOldImagesLogDebezium >> DataShardWrite::DelayedVolatileTxAndEvWrite [GOOD] >> DataShardWrite::DoubleWriteUncommittedThenDoubleReadWithCommit >> AsyncIndexChangeExchange::ShouldDeliverChangesOnFreshTable [GOOD] >> AsyncIndexChangeExchange::ShouldDeliverChangesOnAlteredTable >> DataShardWrite::UpsertPreparedNoTxCache-Volatile [GOOD] >> TestYmqHttpProxy::TestCreateQueueWithBadQueueName [GOOD] >> BackupRestoreS3::RestoreViewReferenceTable [GOOD] >> BackupRestoreS3::RestoreViewDependentOnAnotherView >> BackupRestoreS3::TestAllPrimitiveTypes-JSON [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-JSON_DOCUMENT >> Cdc::VirtualTimestamps[PqRunner] [GOOD] >> Cdc::VirtualTimestamps[YdsRunner] >> DataShardWrite::UpsertBrokenLockArbiter [GOOD] >> DataShardWrite::PreparedDistributedWritePageFault >> TTableProfileTests::ExplicitPartitionsComplex [GOOD] >> TTableProfileTests::ExplicitPartitionsWrongKeyFormat >> TestYmqHttpProxy::TestCreateQueueWithEmptyName >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_all_types-pk_types7-all_types7-index7---] >> TestYmqHttpProxy::TestCreateQueueWithSameNameAndDifferentParams [GOOD] >> TestKinesisHttpProxy::TestRequestWithWrongRegion [GOOD] >> Cdc::HugeKey[PqRunner] [GOOD] >> Cdc::HugeKey[YdsRunner] >> TestKinesisHttpProxy::CreateStreamWithInvalidName [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_write/unittest >> DataShardWrite::UpsertPreparedNoTxCache-Volatile [GOOD] Test command err: 2025-05-07T08:57:30.171750Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:57:30.171996Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:57:30.172393Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0034e5/r3tmp/tmptRmmj6/pdisk_1.dat 2025-05-07T08:57:30.609855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:57:30.672811Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:57:30.736179Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:30.736385Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:30.750661Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:57:30.854602Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:57:30.915002Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828672, Sender [1:656:2563], Recipient [1:665:2569]: NKikimr::TEvTablet::TEvBoot 2025-05-07T08:57:30.916460Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828673, Sender [1:656:2563], Recipient [1:665:2569]: NKikimr::TEvTablet::TEvRestored 2025-05-07T08:57:30.916943Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:665:2569] 2025-05-07T08:57:30.917267Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T08:57:30.948714Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3111: StateInactive, received event# 268828684, Sender [1:656:2563], Recipient [1:665:2569]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-05-07T08:57:31.031649Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T08:57:31.031795Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T08:57:31.033602Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-05-07T08:57:31.033700Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-05-07T08:57:31.033760Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-05-07T08:57:31.034929Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T08:57:31.035119Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T08:57:31.035222Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:681:2569] in generation 1 2025-05-07T08:57:31.050529Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T08:57:31.118488Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-05-07T08:57:31.118746Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T08:57:31.118867Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:683:2579] 2025-05-07T08:57:31.118922Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T08:57:31.118962Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-05-07T08:57:31.119008Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:57:31.119249Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 2146435072, Sender [1:665:2569], Recipient [1:665:2569]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-05-07T08:57:31.119325Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3155: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-05-07T08:57:31.119679Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T08:57:31.119794Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T08:57:31.119894Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T08:57:31.119943Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:57:31.119989Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-05-07T08:57:31.120042Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-05-07T08:57:31.120087Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-05-07T08:57:31.120123Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T08:57:31.120210Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:57:31.120357Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877761, Sender [1:672:2573], Recipient [1:665:2569]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:57:31.120396Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:57:31.120446Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:672:2573], sessionId# [0:0:0] 2025-05-07T08:57:31.120884Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269549568, Sender [1:410:2405], Recipient [1:672:2573] 2025-05-07T08:57:31.120941Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3136: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-05-07T08:57:31.121050Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T08:57:31.121339Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-05-07T08:57:31.121394Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-05-07T08:57:31.121495Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-05-07T08:57:31.121545Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-05-07T08:57:31.121587Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-05-07T08:57:31.121628Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-05-07T08:57:31.121665Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-05-07T08:57:31.126049Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-05-07T08:57:31.126182Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-05-07T08:57:31.126227Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-05-07T08:57:31.126270Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-05-07T08:57:31.126348Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-05-07T08:57:31.126405Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-05-07T08:57:31.126453Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-05-07T08:57:31.126489Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-05-07T08:57:31.126519Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-05-07T08:57:31.128200Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269746185, Sender [1:684:2580], Recipient [1:665:2569]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-05-07T08:57:31.128264Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:57:31.142658Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T08:57:31.142751Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-05-07T08:57:31.142809Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-05-07T08:57:31.142872Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose latency: 0 ms, status: PREPARED 2025-05-07T08:57:31.142957Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-05-07T08:57:31.320372Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877761, Sender [1:699:2589], Recipient [1:665:2569]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:57:31.320441Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:57:31.320483Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075 ... r [1500:100] at 72075186224037888 executing on unit BuildWriteOutRS 2025-05-07T08:58:01.328360Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1500:100] at 72075186224037888 to execution unit StoreAndSendWriteOutRS 2025-05-07T08:58:01.328396Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1500:100] at 72075186224037888 on unit StoreAndSendWriteOutRS 2025-05-07T08:58:01.328423Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1500:100] at 72075186224037888 is Executed 2025-05-07T08:58:01.328445Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1500:100] at 72075186224037888 executing on unit StoreAndSendWriteOutRS 2025-05-07T08:58:01.328466Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1500:100] at 72075186224037888 to execution unit PrepareWriteTxInRS 2025-05-07T08:58:01.328488Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1500:100] at 72075186224037888 on unit PrepareWriteTxInRS 2025-05-07T08:58:01.328511Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1500:100] at 72075186224037888 is Executed 2025-05-07T08:58:01.328533Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1500:100] at 72075186224037888 executing on unit PrepareWriteTxInRS 2025-05-07T08:58:01.328568Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1500:100] at 72075186224037888 to execution unit LoadAndWaitInRS 2025-05-07T08:58:01.328595Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1500:100] at 72075186224037888 on unit LoadAndWaitInRS 2025-05-07T08:58:01.328625Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1500:100] at 72075186224037888 is Executed 2025-05-07T08:58:01.328651Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1500:100] at 72075186224037888 executing on unit LoadAndWaitInRS 2025-05-07T08:58:01.328672Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1500:100] at 72075186224037888 to execution unit ExecuteWrite 2025-05-07T08:58:01.328695Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1500:100] at 72075186224037888 on unit ExecuteWrite 2025-05-07T08:58:01.328726Z node 6 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [1500:100] at 72075186224037888 2025-05-07T08:58:01.328857Z node 6 :TX_DATASHARD DEBUG: execute_write_unit.cpp:410: Executed write operation for [1500:100] at 72075186224037888, row count=3 2025-05-07T08:58:01.328896Z node 6 :TX_DATASHARD TRACE: execute_write_unit.cpp:47: add locks to result: 0 2025-05-07T08:58:01.328982Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1500:100] at 72075186224037888 is ExecutedNoMoreRestarts 2025-05-07T08:58:01.329021Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1500:100] at 72075186224037888 executing on unit ExecuteWrite 2025-05-07T08:58:01.329064Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1500:100] at 72075186224037888 to execution unit CompleteWrite 2025-05-07T08:58:01.329139Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1500:100] at 72075186224037888 on unit CompleteWrite 2025-05-07T08:58:01.329342Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1500:100] at 72075186224037888 is DelayComplete 2025-05-07T08:58:01.329372Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1500:100] at 72075186224037888 executing on unit CompleteWrite 2025-05-07T08:58:01.329410Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1500:100] at 72075186224037888 to execution unit CompletedOperations 2025-05-07T08:58:01.329458Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1500:100] at 72075186224037888 on unit CompletedOperations 2025-05-07T08:58:01.329502Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1500:100] at 72075186224037888 is Executed 2025-05-07T08:58:01.329527Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1500:100] at 72075186224037888 executing on unit CompletedOperations 2025-05-07T08:58:01.329562Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [1500:100] at 72075186224037888 has finished 2025-05-07T08:58:01.329599Z node 6 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:58:01.329631Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-05-07T08:58:01.329671Z node 6 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-05-07T08:58:01.329714Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-05-07T08:58:01.351216Z node 6 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1500} 2025-05-07T08:58:01.351343Z node 6 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:58:01.351408Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1500:100] at 72075186224037888 on unit CompleteWrite 2025-05-07T08:58:01.351502Z node 6 :TX_DATASHARD DEBUG: datashard.cpp:826: Complete write [1500 : 100] from 72075186224037888 at tablet 72075186224037888 send result to client [6:593:2518] 2025-05-07T08:58:01.351565Z node 6 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:58:01.352976Z node 6 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877761, Sender [6:757:2627], Recipient [6:664:2568]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:58:01.353054Z node 6 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:58:01.353135Z node 6 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [6:756:2626], serverId# [6:757:2627], sessionId# [0:0:0] 2025-05-07T08:58:01.353350Z node 6 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269553169, Sender [6:755:2625], Recipient [6:664:2568]: NKikimrTxDataShard.TEvGetInfoRequest 2025-05-07T08:58:01.355472Z node 6 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877761, Sender [6:760:2630], Recipient [6:664:2568]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:58:01.355554Z node 6 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:58:01.355615Z node 6 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [6:759:2629], serverId# [6:760:2630], sessionId# [0:0:0] 2025-05-07T08:58:01.355911Z node 6 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269553215, Sender [6:758:2628], Recipient [6:664:2568]: NKikimrTxDataShard.TEvRead ReadId: 1000 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC RangesSize: 1 2025-05-07T08:58:01.356064Z node 6 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2435: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-05-07T08:58:01.356130Z node 6 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v1500/100 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-05-07T08:58:01.356180Z node 6 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2538: 72075186224037888 changed HEAD read to non-repeatable v1500/18446744073709551615 2025-05-07T08:58:01.356261Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037888 on unit CheckRead 2025-05-07T08:58:01.356367Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037888 is Executed 2025-05-07T08:58:01.356413Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037888 executing on unit CheckRead 2025-05-07T08:58:01.356457Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-05-07T08:58:01.356501Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037888 on unit BuildAndWaitDependencies 2025-05-07T08:58:01.356554Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:3] at 72075186224037888 2025-05-07T08:58:01.356600Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037888 is Executed 2025-05-07T08:58:01.356637Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-05-07T08:58:01.356672Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 72075186224037888 to execution unit ExecuteRead 2025-05-07T08:58:01.356713Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037888 on unit ExecuteRead 2025-05-07T08:58:01.356831Z node 6 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1566: 72075186224037888 Execute read# 1, request: { ReadId: 1000 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC } 2025-05-07T08:58:01.357148Z node 6 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2146: 72075186224037888 Complete read# {[6:758:2628], 1000} after executionsCount# 1 2025-05-07T08:58:01.357219Z node 6 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2120: 72075186224037888 read iterator# {[6:758:2628], 1000} sends rowCount# 3, bytes# 96, quota rows left# 18446744073709551612, quota bytes left# 18446744073709551519, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-05-07T08:58:01.357313Z node 6 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2171: 72075186224037888 read iterator# {[6:758:2628], 1000} finished in read 2025-05-07T08:58:01.357388Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037888 is Executed 2025-05-07T08:58:01.357417Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037888 executing on unit ExecuteRead 2025-05-07T08:58:01.357440Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 72075186224037888 to execution unit CompletedOperations 2025-05-07T08:58:01.357466Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037888 on unit CompletedOperations 2025-05-07T08:58:01.357518Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037888 is Executed 2025-05-07T08:58:01.357541Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037888 executing on unit CompletedOperations 2025-05-07T08:58:01.357589Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:3] at 72075186224037888 has finished 2025-05-07T08:58:01.357657Z node 6 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2670: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-05-07T08:58:01.357778Z node 6 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2719: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 >> TTableProfileTests::UseTableProfilePresetViaSdk [GOOD] >> TTableProfileTests::WrongTableProfile >> TestKinesisHttpProxy::TestRequestWithIAM >> SystemView::AuthPermissions_Selects [GOOD] >> TestYmqHttpProxy::TestCreateQueueWithWrongBody >> BackupRestore::TestAllPrimitiveTypes-JSON [GOOD] >> BackupRestore::TestAllPrimitiveTypes-JSON_DOCUMENT >> TestKinesisHttpProxy::PutRecordsWithLongExplicitHashKey [GOOD] |91.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/ydb-core-fq-libs-row_dispatcher-format_handler-ut |91.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/ydb-core-fq-libs-row_dispatcher-format_handler-ut |91.0%| [TA] {RESULT} $(B)/ydb/library/ycloud/impl/ut/test-results/unittest/{meta.json ... results_accumulator.log} |91.0%| [LD] {RESULT} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/ydb-core-fq-libs-row_dispatcher-format_handler-ut >> TestKinesisHttpProxy::GoodRequestPutRecords [GOOD] >> DataShardWrite::UpsertNoLocksArbiterRestart [GOOD] >> DataShardWrite::UpsertLostPrepareArbiterRestart >> TestKinesisHttpProxy::CreateStreamWithDifferentRetentions >> TestKinesisHttpProxy::PutRecordsWithIncorrectHashKey >> TestKinesisHttpProxy::DoubleCreateStream >> TSchemeShardTTLTests::CreateTableShouldFailOnWrongUnit-EnableTablePgTypes-false ------- [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/ut/unittest >> SystemView::AuthPermissions_Selects [GOOD] Test command err: 2025-05-07T08:55:13.374458Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624725691415577:2066];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:55:13.381785Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0045d4/r3tmp/tmpeC7KVo/pdisk_1.dat 2025-05-07T08:55:14.051823Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:55:14.051937Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:55:14.171548Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:55:14.187912Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27355, node 1 2025-05-07T08:55:14.446526Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:55:14.446552Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:55:14.446562Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:55:14.446682Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16967 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:55:14.964937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:55:14.987776Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:55:14.999717Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:55:17.396653Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624742871285494:2333], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:17.396808Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:17.397130Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624742871285506:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:17.404314Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480 2025-05-07T08:55:17.426762Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624742871285508:2337], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-05-07T08:55:17.489139Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501624742871285561:2390] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:55:18.374610Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501624725691415577:2066];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:55:18.374677Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:55:19.974194Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710661. Ctx: { TraceId: 01jtmz9nmfe8zegra65hbpmjwq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzJmNWIwOTQtODZmMDI1Y2MtZjViNjhmMTktNTFjYjk2NGY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:55:21.263850Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710662. Ctx: { TraceId: 01jtmz9sc6dbxayeksmqcc4tm1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTEwZmY2M2ItZDg1MjVlMzUtNDlkYjcyZjEtYTllMjYwNjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:55:21.804615Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710664. Ctx: { TraceId: 01jtmz9shv8jp57h8qe10fqhnh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjYxMGE2YjEtODBiZmJiNzUtOTVmNTk4NWEtOTQ2OWY5ZDc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:55:21.834261Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:45: Scan started, actor: [1:7501624760051154873:2378], owner: [1:7501624760051154869:2376], scan id: 0, table id: [72057594046644480:1:0:top_queries_by_read_bytes_one_minute] 2025-05-07T08:55:21.835463Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:321: Scan prepared, actor: [1:7501624760051154873:2378], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-05-07T08:55:21.854197Z node 1 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [1:7501624760051154873:2378], row count: 2, finished: 1 2025-05-07T08:55:21.854247Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:120: Scan finished, actor: [1:7501624760051154873:2378], owner: [1:7501624760051154869:2376], scan id: 0, table id: [72057594046644480:1:0:top_queries_by_read_bytes_one_minute] 2025-05-07T08:55:21.884654Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746608121797, txId: 281474976710663] shutting down 2025-05-07T08:55:22.677714Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501624767109263248:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:55:22.677760Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0045d4/r3tmp/tmpmnD1HD/pdisk_1.dat 2025-05-07T08:55:22.770329Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24205, node 2 2025-05-07T08:55:22.819607Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:55:22.819686Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:55:22.849804Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:55:22.906825Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:55:22.906851Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:55:22.906858Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:55:22.906955Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4808 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-05-07T08:55:23.144708Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:55:23.156690Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:55:26.517384Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501624784289133163:2333], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:26.517490Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Servi ... count: 1 2025-05-07T08:58:00.065338Z node 33 :SYSTEM_VIEWS DEBUG: auth_scan_base.h:99: ProceedToScan, tenant name: /Root tenant owner: root@builtin subject sid: empty require admin access: 0 is admin: 1 2025-05-07T08:58:00.065425Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:209: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-05-07T08:58:00.065763Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:170: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [{ Sid: user2 },{ Sid: user1 }] Groups: [] } Children [.metadata,Dir1,Table0,Tenant1,Tenant2] }] } 2025-05-07T08:58:00.065826Z node 33 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [33:7501625445519515647:2443], row count: 0, finished: 0 2025-05-07T08:58:00.066238Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:209: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-05-07T08:58:00.066614Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:170: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1 TableId: [72057594046644480:9:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Children [SubDir1,SubDir2] }] } 2025-05-07T08:58:00.066676Z node 33 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [33:7501625445519515647:2443], row count: 0, finished: 0 2025-05-07T08:58:00.066889Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:209: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1/SubDir1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-05-07T08:58:00.067243Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:170: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1/SubDir1 TableId: [72057594046644480:10:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Children [] }] } 2025-05-07T08:58:00.067316Z node 33 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [33:7501625445519515647:2443], row count: 2, finished: 0 2025-05-07T08:58:00.067467Z node 33 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:120: Scan finished, actor: [33:7501625445519515647:2443], owner: [33:7501625445519515644:2441], scan id: 0, table id: [72057594046644480:1:0:auth_permissions] 2025-05-07T08:58:00.069681Z node 33 :SYSTEM_VIEWS TRACE: sysview_service.cpp:900: Collect query stats: service id# [33:7501625376800036602:2088], database# , query hash# 3187945588805523718, cpu time# 209697 2025-05-07T08:58:00.070595Z node 33 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746608280056, txId: 281474976710687] shutting down 2025-05-07T08:58:00.267992Z node 33 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710690. Ctx: { TraceId: 01jtmzemgje7a8fa0gqfnh8qqx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=33&id=YjhhNGNjYzQtYjA1MTlkMTItZjUxODUyNS03MDQyZTVjNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:58:00.269859Z node 33 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:45: Scan started, actor: [33:7501625445519515688:2452], owner: [33:7501625445519515684:2450], scan id: 0, table id: [72057594046644480:1:0:auth_permissions] 2025-05-07T08:58:00.270730Z node 33 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:321: Scan prepared, actor: [33:7501625445519515688:2452], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-05-07T08:58:00.270755Z node 33 :SYSTEM_VIEWS DEBUG: auth_scan_base.h:99: ProceedToScan, tenant name: /Root tenant owner: root@builtin subject sid: empty require admin access: 0 is admin: 1 2025-05-07T08:58:00.270911Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:209: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-05-07T08:58:00.271249Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:170: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [{ Sid: user2 },{ Sid: user1 }] Groups: [] } Children [.metadata,Dir1,Table0,Tenant1,Tenant2] }] } 2025-05-07T08:58:00.271312Z node 33 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [33:7501625445519515688:2452], row count: 0, finished: 0 2025-05-07T08:58:00.271425Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:209: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-05-07T08:58:00.273838Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:170: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1 TableId: [72057594046644480:9:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Children [SubDir1,SubDir2] }] } 2025-05-07T08:58:00.273941Z node 33 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [33:7501625445519515688:2452], row count: 0, finished: 0 2025-05-07T08:58:00.276582Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:209: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1/SubDir1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-05-07T08:58:00.277344Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:170: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1/SubDir1 TableId: [72057594046644480:10:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Children [] }] } 2025-05-07T08:58:00.277416Z node 33 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [33:7501625445519515688:2452], row count: 1, finished: 0 2025-05-07T08:58:00.277608Z node 33 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:120: Scan finished, actor: [33:7501625445519515688:2452], owner: [33:7501625445519515684:2450], scan id: 0, table id: [72057594046644480:1:0:auth_permissions] 2025-05-07T08:58:00.280041Z node 33 :SYSTEM_VIEWS TRACE: sysview_service.cpp:900: Collect query stats: service id# [33:7501625376800036602:2088], database# , query hash# 15123460272068726277, cpu time# 181022 2025-05-07T08:58:00.280901Z node 33 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746608280266, txId: 281474976710689] shutting down 2025-05-07T08:58:00.290382Z node 34 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:58:00.294971Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:58:00.292318Z node 33 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 35 2025-05-07T08:58:00.292935Z node 33 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(35, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-05-07T08:58:00.293045Z node 33 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 36 2025-05-07T08:58:00.293210Z node 33 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(36, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-05-07T08:58:00.293303Z node 33 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 34 2025-05-07T08:58:00.293610Z node 33 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(34, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-05-07T08:58:00.294446Z node 33 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 37 2025-05-07T08:58:00.295108Z node 33 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(37, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-05-07T08:58:00.299897Z node 33 :HIVE WARN: hive_impl.cpp:934: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[37:7501625384123844805:2099], Type=268959746 >> DataShardWrite::InsertImmediate [GOOD] >> DataShardWrite::ImmediateAndPlannedCommittedOpsRace >> KqpErrors::ProposeResultLost_RwTx-UseSink [GOOD] >> Cdc::NewAndOldImagesLogDebezium [GOOD] >> Cdc::OldImageLogDebezium |91.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/tools/kqprun/kqprun |91.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/tools/kqprun/kqprun |91.0%| [LD] {RESULT} $(B)/ydb/tests/tools/kqprun/kqprun |91.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/solomon/actors/ut/ydb-library-yql-providers-solomon-actors-ut |91.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/providers/solomon/actors/ut/ydb-library-yql-providers-solomon-actors-ut |91.0%| [LD] {RESULT} $(B)/ydb/library/yql/providers/solomon/actors/ut/ydb-library-yql-providers-solomon-actors-ut >> BackupRestoreS3::TestAllPrimitiveTypes-INTERVAL [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-DATE32 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_kqp_errors/unittest >> KqpErrors::ProposeResultLost_RwTx-UseSink [GOOD] Test command err: 2025-05-07T08:57:50.116602Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:698:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:57:50.117069Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:57:50.117286Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:57:50.119291Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:695:2355], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:57:50.119672Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:57:50.119724Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0047bd/r3tmp/tmpR01r7N/pdisk_1.dat 2025-05-07T08:57:50.602745Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:57:50.840651Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:57:50.967825Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:50.967962Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:50.972825Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:50.972939Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:50.989345Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-05-07T08:57:50.990312Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:57:50.990695Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:57:51.303179Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:57:52.243902Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:1582:2951], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:57:52.244032Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:1591:2956], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:57:52.244104Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:57:52.249729Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T08:57:52.810032Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:1596:2959], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T08:57:52.990041Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:1735:3038] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:57:53.332688Z node 1 :KQP_EXECUTER TRACE: kqp_executer_impl.h:190: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jtmzecvh4sy8233tnektsf23, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzI5ZTE4N2YtYWY5ZmNlYWMtMTRhNWZlM2YtNzBjZTM5ODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Bootstrap done, become ReadyState 2025-05-07T08:57:53.333006Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:584: ActorId: [1:1761:2949] TxId: 281474976715660. Ctx: { TraceId: 01jtmzecvh4sy8233tnektsf23, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzI5ZTE4N2YtYWY5ZmNlYWMtMTRhNWZlM2YtNzBjZTM5ODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Executing physical tx, type: 2, stages: 1 2025-05-07T08:57:53.333152Z node 1 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:25: StageInfo: StageId #[0,0], InputsCount: 0, OutputsCount: 1 2025-05-07T08:57:53.333371Z node 1 :KQP_EXECUTER TRACE: kqp_executer_impl.h:599: ActorId: [1:1761:2949] TxId: 281474976715660. Ctx: { TraceId: 01jtmzecvh4sy8233tnektsf23, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzI5ZTE4N2YtYWY5ZmNlYWMtMTRhNWZlM2YtNzBjZTM5ODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Got request, become WaitResolveState 2025-05-07T08:57:53.333644Z node 1 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:271: TxId: 281474976715660. Resolved key sets: 1 2025-05-07T08:57:53.333829Z node 1 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:295: TxId: 281474976715660. Resolved key: { TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 2 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 4 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 } 2025-05-07T08:57:53.333991Z node 1 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:2034: ActorId: [1:1761:2949] TxId: 281474976715660. Ctx: { TraceId: 01jtmzecvh4sy8233tnektsf23, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzI5ZTE4N2YtYWY5ZmNlYWMtMTRhNWZlM2YtNzBjZTM5ODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Stage [0,0] AST: ( (return (lambda '() (block '( (let $1 (Just (Uint32 '1))) (let $2 (Just (Uint32 '2))) (let $3 (Just (Uint32 '3))) (return (Iterator (AsList (AsStruct '('"key" $1) '('"value" $1)) (AsStruct '('"key" $2) '('"value" $2)) (AsStruct '('"key" $3) '('"value" $3))))) )))) ) 2025-05-07T08:57:53.334147Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:1465: ActorId: [1:1761:2949] TxId: 281474976715660. Ctx: { TraceId: 01jtmzecvh4sy8233tnektsf23, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzI5ZTE4N2YtYWY5ZmNlYWMtMTRhNWZlM2YtNzBjZTM5ODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Stage [0,0] create compute task: 1 2025-05-07T08:57:53.334273Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715660. Ctx: { TraceId: 01jtmzecvh4sy8233tnektsf23, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzI5ZTE4N2YtYWY5ZmNlYWMtMTRhNWZlM2YtNzBjZTM5ODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:57:53.334350Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:553: TxId: 281474976715660. Ctx: { TraceId: 01jtmzecvh4sy8233tnektsf23, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzI5ZTE4N2YtYWY5ZmNlYWMtMTRhNWZlM2YtNzBjZTM5ODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: true, 0 scan tasks on 0 nodes, localComputeTasks: 0, snapshot: {0, 0} 2025-05-07T08:57:53.334769Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:793: TxId: 281474976715660. Ctx: { TraceId: 01jtmzecvh4sy8233tnektsf23, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzI5ZTE4N2YtYWY5ZmNlYWMtMTRhNWZlM2YtNzBjZTM5ODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Collect channels updates for task: 1 at actor [1:1764:2949] 2025-05-07T08:57:53.334847Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:785: TxId: 281474976715660. Ctx: { TraceId: 01jtmzecvh4sy8233tnektsf23, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzI5ZTE4N2YtYWY5ZmNlYWMtMTRhNWZlM2YtNzBjZTM5ODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Sending channels info to compute actor: [1:1764:2949], channels: 0 2025-05-07T08:57:53.334927Z node 1 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2800: ActorId: [1:1761:2949] TxId: 281474976715660. Ctx: { TraceId: 01jtmzecvh4sy8233tnektsf23, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzI5ZTE4N2YtYWY5ZmNlYWMtMTRhNWZlM2YtNzBjZTM5ODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 1, readonly: 0, datashardTxs: 0, evWriteTxs: 0, topicTxs: 0, volatile: 0, immediate: 1, pending compute tasks0, useFollowers: 0 2025-05-07T08:57:53.334977Z node 1 :KQP_EXECUTER TRACE: kqp_data_executer.cpp:2803: ActorId: [1:1761:2949] TxId: 281474976715660. Ctx: { TraceId: 01jtmzecvh4sy8233tnektsf23, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzI5ZTE4N2YtYWY5ZmNlYWMtMTRhNWZlM2YtNzBjZTM5ODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Updating channels after the creation of compute actors 2025-05-07T08:57:53.335025Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:793: TxId: 281474976715660. Ctx: { TraceId: 01jtmzecvh4sy8233tnektsf23, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzI5ZTE4N2YtYWY5ZmNlYWMtMTRhNWZlM2YtNzBjZTM5ODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Collect channels updates for task: 1 at actor [1:1764:2949] 2025-05-07T08:57:53.335072Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:785: TxId: 281474976715660. Ctx: { TraceId: 01jtmzecvh4sy8233tnektsf23, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzI5ZTE4N2YtYWY5ZmNlYWMtMTRhNWZlM2YtNzBjZTM5ODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Sending channels info to compute actor: [1:1764:2949], channels: 0 2025-05-07T08:57:53.335171Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:644: ActorId: [1:1761:2949] TxId: 281474976715660. Ctx: { TraceId: 01jtmzecvh4sy8233tnektsf23, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzI5ZTE4N2YtYWY5ZmNlYWMtMTRhNWZlM2YtNzBjZTM5ODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [1:1764:2949], 2025-05-07T08:57:53.335238Z node 1 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:157: ActorId: [1:1761:2949] TxId: 281474976715660. Ctx: { TraceId: 01jtmzecvh4sy82 ... nId: ydb://session/3?node_id=3&id=YjEyNzNiOTUtOTAxODBmM2UtNmE2Zjk5MjEtYTM1ODRhMmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CT 1, CA [3:1822:3089], 2025-05-07T08:58:04.778751Z node 3 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:157: ActorId: [3:1814:3089] TxId: 281474976715663. Ctx: { TraceId: 01jtmzerz4cfns3gwgzzb064rs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YjEyNzNiOTUtOTAxODBmM2UtNmE2Zjk5MjEtYTM1ODRhMmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 1 compute actor(s) and 0 datashard(s): CA [3:1822:3089], 2025-05-07T08:58:04.779239Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:767: ActorId: [3:1814:3089] TxId: 281474976715663. Ctx: { TraceId: 01jtmzerz4cfns3gwgzzb064rs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YjEyNzNiOTUtOTAxODBmM2UtNmE2Zjk5MjEtYTM1ODRhMmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Executing task: 1 on compute actor: [4:1824:2467] 2025-05-07T08:58:04.779313Z node 3 :KQP_EXECUTER DEBUG: kqp_planner.cpp:793: TxId: 281474976715663. Ctx: { TraceId: 01jtmzerz4cfns3gwgzzb064rs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YjEyNzNiOTUtOTAxODBmM2UtNmE2Zjk5MjEtYTM1ODRhMmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Collect channels updates for task: 1 at actor [4:1824:2467] 2025-05-07T08:58:04.779372Z node 3 :KQP_EXECUTER DEBUG: kqp_planner.cpp:829: TxId: 281474976715663. Ctx: { TraceId: 01jtmzerz4cfns3gwgzzb064rs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YjEyNzNiOTUtOTAxODBmM2UtNmE2Zjk5MjEtYTM1ODRhMmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Task: 1, output channelId: 1, dst task: 2, at actor [3:1822:3089] 2025-05-07T08:58:04.779428Z node 3 :KQP_EXECUTER DEBUG: kqp_planner.cpp:785: TxId: 281474976715663. Ctx: { TraceId: 01jtmzerz4cfns3gwgzzb064rs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YjEyNzNiOTUtOTAxODBmM2UtNmE2Zjk5MjEtYTM1ODRhMmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Sending channels info to compute actor: [4:1824:2467], channels: 1 2025-05-07T08:58:04.779482Z node 3 :KQP_EXECUTER DEBUG: kqp_planner.cpp:785: TxId: 281474976715663. Ctx: { TraceId: 01jtmzerz4cfns3gwgzzb064rs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YjEyNzNiOTUtOTAxODBmM2UtNmE2Zjk5MjEtYTM1ODRhMmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Sending channels info to compute actor: [3:1822:3089], channels: 1 2025-05-07T08:58:04.779681Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:433: ActorId: [3:1814:3089] TxId: 281474976715663. Ctx: { TraceId: 01jtmzerz4cfns3gwgzzb064rs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YjEyNzNiOTUtOTAxODBmM2UtNmE2Zjk5MjEtYTM1ODRhMmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [4:1824:2467], task: 1, state: COMPUTE_STATE_EXECUTING, stats: { } 2025-05-07T08:58:04.779738Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:644: ActorId: [3:1814:3089] TxId: 281474976715663. Ctx: { TraceId: 01jtmzerz4cfns3gwgzzb064rs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YjEyNzNiOTUtOTAxODBmM2UtNmE2Zjk5MjEtYTM1ODRhMmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [4:1824:2467], CA [3:1822:3089], 2025-05-07T08:58:04.779780Z node 3 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:157: ActorId: [3:1814:3089] TxId: 281474976715663. Ctx: { TraceId: 01jtmzerz4cfns3gwgzzb064rs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YjEyNzNiOTUtOTAxODBmM2UtNmE2Zjk5MjEtYTM1ODRhMmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 2 compute actor(s) and 0 datashard(s): CA [4:1824:2467], CA [3:1822:3089], 2025-05-07T08:58:04.780443Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:433: ActorId: [3:1814:3089] TxId: 281474976715663. Ctx: { TraceId: 01jtmzerz4cfns3gwgzzb064rs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YjEyNzNiOTUtOTAxODBmM2UtNmE2Zjk5MjEtYTM1ODRhMmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [4:1824:2467], task: 1, state: COMPUTE_STATE_EXECUTING, stats: { CpuTimeUs: 658 Tasks { TaskId: 1 CpuTimeUs: 412 ComputeCpuTimeUs: 12 BuildCpuTimeUs: 400 HostName: "ghrun-sykirh5vua" NodeId: 4 CreateTimeMs: 1746608284777 UpdateTimeMs: 1746608284778 } MaxMemoryUsage: 1048576 } 2025-05-07T08:58:04.780568Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:644: ActorId: [3:1814:3089] TxId: 281474976715663. Ctx: { TraceId: 01jtmzerz4cfns3gwgzzb064rs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YjEyNzNiOTUtOTAxODBmM2UtNmE2Zjk5MjEtYTM1ODRhMmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [4:1824:2467], CA [3:1822:3089], 2025-05-07T08:58:04.780619Z node 3 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:157: ActorId: [3:1814:3089] TxId: 281474976715663. Ctx: { TraceId: 01jtmzerz4cfns3gwgzzb064rs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YjEyNzNiOTUtOTAxODBmM2UtNmE2Zjk5MjEtYTM1ODRhMmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 2 compute actor(s) and 0 datashard(s): CA [4:1824:2467], CA [3:1822:3089], 2025-05-07T08:58:04.788802Z node 3 :KQP_EXECUTER TRACE: kqp_executer_impl.h:382: ActorId: [3:1814:3089] TxId: 281474976715663. Ctx: { TraceId: 01jtmzerz4cfns3gwgzzb064rs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YjEyNzNiOTUtOTAxODBmM2UtNmE2Zjk5MjEtYTM1ODRhMmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Got result, channelId: 2, shardId: 0, inputIndex: 0, from: [3:1823:3089], finished: 0 2025-05-07T08:58:04.788952Z node 3 :KQP_EXECUTER TRACE: kqp_executer_impl.h:385: ActorId: [3:1814:3089] TxId: 281474976715663. Ctx: { TraceId: 01jtmzerz4cfns3gwgzzb064rs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YjEyNzNiOTUtOTAxODBmM2UtNmE2Zjk5MjEtYTM1ODRhMmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Send ack to channelId: 2, seqNo: 1, to: [3:1823:3089] 2025-05-07T08:58:04.796192Z node 3 :KQP_EXECUTER TRACE: kqp_executer_impl.h:382: ActorId: [3:1814:3089] TxId: 281474976715663. Ctx: { TraceId: 01jtmzerz4cfns3gwgzzb064rs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YjEyNzNiOTUtOTAxODBmM2UtNmE2Zjk5MjEtYTM1ODRhMmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Got result, channelId: 2, shardId: 0, inputIndex: 0, from: [3:1823:3089], finished: 1 2025-05-07T08:58:04.796288Z node 3 :KQP_EXECUTER TRACE: kqp_executer_impl.h:385: ActorId: [3:1814:3089] TxId: 281474976715663. Ctx: { TraceId: 01jtmzerz4cfns3gwgzzb064rs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YjEyNzNiOTUtOTAxODBmM2UtNmE2Zjk5MjEtYTM1ODRhMmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Send ack to channelId: 2, seqNo: 2, to: [3:1823:3089] 2025-05-07T08:58:04.797456Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:433: ActorId: [3:1814:3089] TxId: 281474976715663. Ctx: { TraceId: 01jtmzerz4cfns3gwgzzb064rs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YjEyNzNiOTUtOTAxODBmM2UtNmE2Zjk5MjEtYTM1ODRhMmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [3:1822:3089], task: 2, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 1545 Tasks { TaskId: 2 StageId: 1 CpuTimeUs: 813 FinishTimeMs: 1746608284796 InputRows: 3 InputBytes: 12 OutputRows: 3 OutputBytes: 12 ResultRows: 3 ResultBytes: 12 ComputeCpuTimeUs: 240 BuildCpuTimeUs: 573 HostName: "ghrun-sykirh5vua" NodeId: 3 CreateTimeMs: 1746608284776 UpdateTimeMs: 1746608284796 } MaxMemoryUsage: 1048576 } 2025-05-07T08:58:04.797601Z node 3 :KQP_EXECUTER INFO: kqp_planner.cpp:688: TxId: 281474976715663. Ctx: { TraceId: 01jtmzerz4cfns3gwgzzb064rs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YjEyNzNiOTUtOTAxODBmM2UtNmE2Zjk5MjEtYTM1ODRhMmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [3:1822:3089] 2025-05-07T08:58:04.797710Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:644: ActorId: [3:1814:3089] TxId: 281474976715663. Ctx: { TraceId: 01jtmzerz4cfns3gwgzzb064rs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YjEyNzNiOTUtOTAxODBmM2UtNmE2Zjk5MjEtYTM1ODRhMmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [4:1824:2467], 2025-05-07T08:58:04.797752Z node 3 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:157: ActorId: [3:1814:3089] TxId: 281474976715663. Ctx: { TraceId: 01jtmzerz4cfns3gwgzzb064rs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YjEyNzNiOTUtOTAxODBmM2UtNmE2Zjk5MjEtYTM1ODRhMmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 1 compute actor(s) and 0 datashard(s): CA [4:1824:2467], 2025-05-07T08:58:04.798202Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:433: ActorId: [3:1814:3089] TxId: 281474976715663. Ctx: { TraceId: 01jtmzerz4cfns3gwgzzb064rs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YjEyNzNiOTUtOTAxODBmM2UtNmE2Zjk5MjEtYTM1ODRhMmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [4:1824:2467], task: 1, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 1822 DurationUs: 10000 Tasks { TaskId: 1 CpuTimeUs: 589 FinishTimeMs: 1746608284796 OutputRows: 3 OutputBytes: 12 Tables { TablePath: "/Root/table-1" ReadRows: 3 ReadBytes: 24 AffectedPartitions: 4 } IngressRows: 3 ComputeCpuTimeUs: 189 BuildCpuTimeUs: 400 WaitInputTimeUs: 8216 HostName: "ghrun-sykirh5vua" NodeId: 4 StartTimeMs: 1746608284786 CreateTimeMs: 1746608284777 UpdateTimeMs: 1746608284796 } MaxMemoryUsage: 1048576 } 2025-05-07T08:58:04.798274Z node 3 :KQP_EXECUTER INFO: kqp_planner.cpp:688: TxId: 281474976715663. Ctx: { TraceId: 01jtmzerz4cfns3gwgzzb064rs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YjEyNzNiOTUtOTAxODBmM2UtNmE2Zjk5MjEtYTM1ODRhMmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [4:1824:2467] 2025-05-07T08:58:04.798440Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2140: ActorId: [3:1814:3089] TxId: 281474976715663. Ctx: { TraceId: 01jtmzerz4cfns3gwgzzb064rs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YjEyNzNiOTUtOTAxODBmM2UtNmE2Zjk5MjEtYTM1ODRhMmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-05-07T08:58:04.798489Z node 3 :KQP_EXECUTER TRACE: kqp_executer_impl.h:2154: ActorId: [3:1814:3089] TxId: 281474976715663. Ctx: { TraceId: 01jtmzerz4cfns3gwgzzb064rs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YjEyNzNiOTUtOTAxODBmM2UtNmE2Zjk5MjEtYTM1ODRhMmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Terminate, become ZombieState 2025-05-07T08:58:04.798540Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:838: ActorId: [3:1814:3089] TxId: 281474976715663. Ctx: { TraceId: 01jtmzerz4cfns3gwgzzb064rs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YjEyNzNiOTUtOTAxODBmM2UtNmE2Zjk5MjEtYTM1ODRhMmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.003367s ReadRows: 3 ReadBytes: 24 ru: 3 rate limiter was not found force flag: 1 { items { uint32_value: 1 } items { uint32_value: 1 } }, { items { uint32_value: 2 } items { uint32_value: 2 } }, { items { uint32_value: 3 } items { uint32_value: 3 } } >> BackupRestore::TestAllPrimitiveTypes-DATETIME [GOOD] >> BackupRestore::TestAllPrimitiveTypes-DATE32 >> TSchemeShardTTLTests::CreateTableShouldSucceedOnIndexedTable >> Cdc::VirtualTimestamps[YdsRunner] [GOOD] >> Cdc::VirtualTimestamps[TopicRunner] ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> RetryPolicy::RetryWithBatching [GOOD] Test command err: 2025-05-07T08:51:40.491095Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:40.491128Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:40.491163Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:51:40.491625Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-05-07T08:51:40.491677Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:40.491710Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:40.493015Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.009084s 2025-05-07T08:51:40.493620Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-05-07T08:51:40.493656Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:40.493681Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:40.493722Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.008847s 2025-05-07T08:51:40.494243Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-05-07T08:51:40.494271Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:40.494293Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:51:40.494340Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.005358s 2025-05-07T08:51:40.547047Z :TWriteSession_TestPolicy INFO: Random seed for debugging is 1746607900547005 2025-05-07T08:51:41.198705Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623816780543010:2277];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:41.199189Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:51:41.265854Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501623816001603594:2071];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:41.265902Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:51:41.680102Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0035c5/r3tmp/tmpJAsjwc/pdisk_1.dat 2025-05-07T08:51:41.743949Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-05-07T08:51:42.236216Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:51:42.271763Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:51:42.345400Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:42.345522Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:42.351184Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:42.351272Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:42.360923Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:51:42.361101Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-05-07T08:51:42.368577Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:51:42.410906Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13547, node 1 2025-05-07T08:51:42.635049Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T08:51:42.635120Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T08:51:42.686705Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/zvgn/0035c5/r3tmp/yandexFA4BqQ.tmp 2025-05-07T08:51:42.686735Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/zvgn/0035c5/r3tmp/yandexFA4BqQ.tmp 2025-05-07T08:51:42.686921Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/zvgn/0035c5/r3tmp/yandexFA4BqQ.tmp 2025-05-07T08:51:42.687068Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:51:42.888792Z INFO: TTestServer started on Port 18967 GrpcPort 13547 TClient is connected to server localhost:18967 PQClient connected to localhost:13547 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:51:43.551851Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... 2025-05-07T08:51:43.690332Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:46.206719Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623816780543010:2277];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:46.206989Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:51:46.269649Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7501623816001603594:2071];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:46.269719Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:51:47.441647Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501623841771407687:2313], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:47.441776Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501623841771407714:2316], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:47.441844Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:47.466108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710657:3, at schemeshard: 72057594046644480 2025-05-07T08:51:47.514232Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7501623841771407720:2317], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710657 completed, doublechecking } 2025-05-07T08:51:47.599786Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501623841771407748:2136] txid# 281474976710658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:51:48.066690Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 2025-05-07T08:51:48.084587Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7501623841771407763:2321], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T08:51:48.084979Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=2&id=ZTM2NjU4NjktYzQ5ZTg4MTYtZWRhZWZmZDItM2I5YTAwYmE=, ActorId: [2:7501623841771407685:2312], ActorState: ExecuteState, TraceId: 01jtmz38jf5cry9jg47ta4cvst, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T08:51:48.087457Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_E ... e 1208 2025-05-07T08:57:58.136723Z node 17 :PERSQUEUE DEBUG: cache_eviction.h:315: Caching head blob in L1. Partition 0 offset 0 count 10 size 1208 actorID [17:7501625427514591384:2630] 2025-05-07T08:57:58.136869Z node 17 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037892' partition 0 offset 0 partno 0 count 10 parts 0 size 1208 2025-05-07T08:57:58.136892Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 1230 WriteNewSizeFromSupportivePartitions# 0 2025-05-07T08:57:58.136968Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-05-07T08:57:58.137038Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 1, partNo: 0, Offset: 0 is stored on disk 2025-05-07T08:57:58.137079Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-05-07T08:57:58.137121Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 2, partNo: 0, Offset: 1 is stored on disk 2025-05-07T08:57:58.137139Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-05-07T08:57:58.137163Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 3, partNo: 0, Offset: 2 is stored on disk 2025-05-07T08:57:58.137180Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-05-07T08:57:58.137200Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 4, partNo: 0, Offset: 3 is stored on disk 2025-05-07T08:57:58.137216Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-05-07T08:57:58.137238Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 5, partNo: 0, Offset: 4 is stored on disk 2025-05-07T08:57:58.137261Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-05-07T08:57:58.137283Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 6, partNo: 0, Offset: 5 is stored on disk 2025-05-07T08:57:58.137299Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-05-07T08:57:58.137322Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 7, partNo: 0, Offset: 6 is stored on disk 2025-05-07T08:57:58.137341Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-05-07T08:57:58.137371Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 8, partNo: 0, Offset: 7 is stored on disk 2025-05-07T08:57:58.137394Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-05-07T08:57:58.137424Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 9, partNo: 0, Offset: 8 is stored on disk 2025-05-07T08:57:58.137446Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-05-07T08:57:58.137475Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 10, partNo: 0, Offset: 9 is stored on disk 2025-05-07T08:57:58.137708Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:774: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-05-07T08:57:58.137756Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:816: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 user user send read request for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 1 rrg 0 2025-05-07T08:57:58.137961Z node 17 :PERSQUEUE DEBUG: pq_impl.cpp:377: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 1 requestId: cookie: 1 2025-05-07T08:57:58.138128Z node 17 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-05-07T08:57:58.138681Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:731: [PQ: 72075186224037892, Partition: 0, State: StateIdle] read cookie 0 Topic 'rt3.dc1--test-topic' partition 0 user user offset 0 count 1 size 1024000 endOffset 10 max time lag 0ms effective offset 0 2025-05-07T08:57:58.138729Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:931: [PQ: 72075186224037892, Partition: 0, State: StateIdle] read cookie 0 added 0 blobs, size 0 count 0 last offset 0, current partition end offset: 10 2025-05-07T08:57:58.139810Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|9a739a42-a01ab547-28b719d2-740a148b_0] Write session got write response: sequence_numbers: 1 sequence_numbers: 2 sequence_numbers: 3 sequence_numbers: 4 sequence_numbers: 5 sequence_numbers: 6 sequence_numbers: 7 sequence_numbers: 8 sequence_numbers: 9 sequence_numbers: 10 offsets: 0 offsets: 1 offsets: 2 offsets: 3 offsets: 4 offsets: 5 offsets: 6 offsets: 7 offsets: 8 offsets: 9 already_written: false already_written: false already_written: false already_written: false already_written: false already_written: false already_written: false already_written: false already_written: false already_written: false write_statistics { persist_duration_ms: 13 queued_in_partition_duration_ms: 2 } 2025-05-07T08:57:58.139890Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|9a739a42-a01ab547-28b719d2-740a148b_0] Write session: acknoledged message 1 2025-05-07T08:57:58.139941Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|9a739a42-a01ab547-28b719d2-740a148b_0] Write session: acknoledged message 2 2025-05-07T08:57:58.139967Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|9a739a42-a01ab547-28b719d2-740a148b_0] Write session: acknoledged message 3 2025-05-07T08:57:58.140003Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|9a739a42-a01ab547-28b719d2-740a148b_0] Write session: acknoledged message 4 2025-05-07T08:57:58.140030Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|9a739a42-a01ab547-28b719d2-740a148b_0] Write session: acknoledged message 5 2025-05-07T08:57:58.140058Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|9a739a42-a01ab547-28b719d2-740a148b_0] Write session: acknoledged message 6 2025-05-07T08:57:58.140090Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|9a739a42-a01ab547-28b719d2-740a148b_0] Write session: acknoledged message 7 2025-05-07T08:57:58.140114Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|9a739a42-a01ab547-28b719d2-740a148b_0] Write session: acknoledged message 8 2025-05-07T08:57:58.140157Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|9a739a42-a01ab547-28b719d2-740a148b_0] Write session: acknoledged message 9 2025-05-07T08:57:58.140186Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|9a739a42-a01ab547-28b719d2-740a148b_0] Write session: acknoledged message 10 2025-05-07T08:57:58.138971Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:948: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Reading cookie 0. All data is from uncompacted head. 2025-05-07T08:57:58.139017Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:420: FormAnswer for 0 blobs 2025-05-07T08:57:58.139115Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:856: Topic 'rt3.dc1--test-topic' partition 0 user user readTimeStamp done, result 1746608278122 queuesize 0 startOffset 0 2025-05-07T08:57:58.140596Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|9a739a42-a01ab547-28b719d2-740a148b_0] Write session: close. Timeout = 0 ms 2025-05-07T08:57:58.140661Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|9a739a42-a01ab547-28b719d2-740a148b_0] Write session will now close 2025-05-07T08:57:58.140715Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|9a739a42-a01ab547-28b719d2-740a148b_0] Write session: aborting 2025-05-07T08:57:58.141225Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|9a739a42-a01ab547-28b719d2-740a148b_0] Write session: gracefully shut down, all writes complete 2025-05-07T08:57:58.141278Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|9a739a42-a01ab547-28b719d2-740a148b_0] Write session: destroy 2025-05-07T08:57:58.145730Z node 17 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 7 sessionId: test-message-group-id|9a739a42-a01ab547-28b719d2-740a148b_0 grpc read done: success: 0 data: 2025-05-07T08:57:58.145770Z node 17 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 7 sessionId: test-message-group-id|9a739a42-a01ab547-28b719d2-740a148b_0 grpc read failed 2025-05-07T08:57:58.160039Z node 17 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:818: session v1 closed cookie: 7 sessionId: test-message-group-id|9a739a42-a01ab547-28b719d2-740a148b_0 2025-05-07T08:57:58.160101Z node 17 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 7 sessionId: test-message-group-id|9a739a42-a01ab547-28b719d2-740a148b_0 is DEAD 2025-05-07T08:57:58.161814Z node 17 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-05-07T08:57:58.162029Z node 17 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037892] server disconnected, pipe [17:7501625436104526194:2660] destroyed 2025-05-07T08:57:58.162088Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:138: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. |91.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/fq/pq_async_io/ut/ydb-tests-fq-pq_async_io-ut |91.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/fq/pq_async_io/ut/ydb-tests-fq-pq_async_io-ut |91.0%| [LD] {RESULT} $(B)/ydb/tests/fq/pq_async_io/ut/ydb-tests-fq-pq_async_io-ut >> DataShardWrite::PreparedDistributedWritePageFault [GOOD] >> TTxAllocatorClientTest::ZeroRange [GOOD] >> AsyncIndexChangeExchange::ShouldDeliverChangesOnAlteredTable [GOOD] >> AsyncIndexChangeExchange::ShouldRemoveRecordsAfterDroppingIndex >> TestYmqHttpProxy::TestCreateQueueWithEmptyName [GOOD] |91.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TPersQueueTest::PreferredCluster_TwoEnabledClustersAndWriteSessionsWithDifferentPreferredCluster_SessionWithMismatchedClusterDiesAndOthersAlive [GOOD] >> TPersQueueTest::PreferredCluster_DisabledRemoteClusterAndWriteSessionsWithDifferentPreferredClusterAndLaterRemoteClusterEnabled_SessionWithMismatchedClusterDiesAfterPreferredClusterEnabledAndOtherSessionsAlive >> TSchemeShardTTLTests::CreateTableShouldSucceedOnIndexedTable [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator_client/ut/unittest >> TTxAllocatorClientTest::ZeroRange [GOOD] Test command err: 2025-05-07T08:56:08.018607Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1904: Tablet: 72057594046447617 LockedInitializationPath Marker# TSYS32 2025-05-07T08:56:08.019061Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:917: Tablet: 72057594046447617 HandleFindLatestLogEntry, NODATA Promote Marker# TSYS19 2025-05-07T08:56:08.019816Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:221: Tablet: 72057594046447617 TTablet::WriteZeroEntry. logid# [72057594046447617:2:0:0:0:0:0] Marker# TSYS01 2025-05-07T08:56:08.021575Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:0:0:0:20:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:56:08.022118Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:17: tablet# 72057594046447617 OnActivateExecutor 2025-05-07T08:56:08.032382Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:1:28672:35:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:56:08.032505Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:0:0:42:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:56:08.032590Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:1:8192:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:56:08.032681Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1381: Tablet: 72057594046447617 GcCollect 0 channel, tablet:gen:step => 2:0 Marker# TSYS28 2025-05-07T08:56:08.032807Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:56:08.032922Z node 1 :TX_ALLOCATOR DEBUG: txallocator__scheme.cpp:22: tablet# 72057594046447617 TTxSchema Complete 2025-05-07T08:56:08.033113Z node 1 :TABLET_MAIN INFO: tablet_sys.cpp:1015: Tablet: 72057594046447617 Active! Generation: 2, Type: TxAllocator started in 0msec Marker# TSYS24 2025-05-07T08:56:08.033915Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:70:2105] requested range size#5000 2025-05-07T08:56:08.034532Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:1:24576:70:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:56:08.034610Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T08:56:08.034726Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 0 Reserved to# 5000 2025-05-07T08:56:08.034794Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:70:2105] TEvAllocateResult from# 0 to# 5000 >> TestYmqHttpProxy::TestCreateQueueWithWrongBody [GOOD] >> TestYmqHttpProxy::TestCreateQueueWithAllAttributes >> TestKinesisHttpProxy::TestRequestWithIAM [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_write/unittest >> DataShardWrite::PreparedDistributedWritePageFault [GOOD] Test command err: 2025-05-07T08:57:29.792741Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:57:29.792919Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:57:29.793238Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003505/r3tmp/tmpF4jLVT/pdisk_1.dat 2025-05-07T08:57:30.291255Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:57:30.354545Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:57:30.416312Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:30.416488Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:30.431306Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:57:30.525420Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:57:30.587192Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828672, Sender [1:656:2563], Recipient [1:665:2569]: NKikimr::TEvTablet::TEvBoot 2025-05-07T08:57:30.588731Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828673, Sender [1:656:2563], Recipient [1:665:2569]: NKikimr::TEvTablet::TEvRestored 2025-05-07T08:57:30.589273Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:665:2569] 2025-05-07T08:57:30.589568Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T08:57:30.603065Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3111: StateInactive, received event# 268828684, Sender [1:656:2563], Recipient [1:665:2569]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-05-07T08:57:30.656274Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T08:57:30.656421Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T08:57:30.658256Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-05-07T08:57:30.658351Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-05-07T08:57:30.658418Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-05-07T08:57:30.658916Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T08:57:30.659094Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T08:57:30.659190Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:681:2569] in generation 1 2025-05-07T08:57:30.670128Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T08:57:30.735005Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-05-07T08:57:30.735256Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T08:57:30.735379Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:683:2579] 2025-05-07T08:57:30.735438Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T08:57:30.735476Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-05-07T08:57:30.735515Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:57:30.735757Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 2146435072, Sender [1:665:2569], Recipient [1:665:2569]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-05-07T08:57:30.735809Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3155: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-05-07T08:57:30.736145Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T08:57:30.736243Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T08:57:30.736330Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T08:57:30.736374Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:57:30.736417Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-05-07T08:57:30.736482Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-05-07T08:57:30.736526Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-05-07T08:57:30.736562Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T08:57:30.736609Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:57:30.736732Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877761, Sender [1:672:2573], Recipient [1:665:2569]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:57:30.736778Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:57:30.736828Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:672:2573], sessionId# [0:0:0] 2025-05-07T08:57:30.737214Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269549568, Sender [1:410:2405], Recipient [1:672:2573] 2025-05-07T08:57:30.737279Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3136: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-05-07T08:57:30.737391Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T08:57:30.737660Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-05-07T08:57:30.737720Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-05-07T08:57:30.737823Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-05-07T08:57:30.737878Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-05-07T08:57:30.737918Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-05-07T08:57:30.737988Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-05-07T08:57:30.738034Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-05-07T08:57:30.738344Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-05-07T08:57:30.738381Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-05-07T08:57:30.738417Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-05-07T08:57:30.738458Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-05-07T08:57:30.738518Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-05-07T08:57:30.738557Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-05-07T08:57:30.738602Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-05-07T08:57:30.738639Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-05-07T08:57:30.738672Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-05-07T08:57:30.740169Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269746185, Sender [1:684:2580], Recipient [1:665:2569]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-05-07T08:57:30.740228Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:57:30.754704Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T08:57:30.754805Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-05-07T08:57:30.754871Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-05-07T08:57:30.754926Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose latency: 0 ms, status: PREPARED 2025-05-07T08:57:30.754997Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-05-07T08:57:30.924496Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877761, Sender [1:699:2589], Recipient [1:665:2569]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:57:30.924565Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:57:30.924605Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075 ... Add [3500:1234567890011] at 72075186224037888 to execution unit LoadWriteDetails 2025-05-07T08:58:08.024134Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:1234567890011] at 72075186224037888 on unit LoadTxDetails 2025-05-07T08:58:08.024476Z node 7 :TX_DATASHARD TRACE: datashard_write_operation.cpp:64: Parsing write transaction for 1234567890011 at 72075186224037888, record: Operations { Type: OPERATION_UPSERT TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } ColumnIds: 1 ColumnIds: 2 PayloadIndex: 0 PayloadFormat: FORMAT_CELLVEC } TxId: 1234567890011 TxMode: MODE_PREPARE Locks { Op: Commit } 2025-05-07T08:58:08.024578Z node 7 :TX_DATASHARD TRACE: datashard_write_operation.cpp:190: Table /Root/table, shard: 72075186224037888, write point (Int32 : 1) 2025-05-07T08:58:08.024653Z node 7 :TX_DATASHARD TRACE: key_validator.cpp:54: -- AddWriteRange: (Int32 : 1) table: [72057594046644480:2:1] 2025-05-07T08:58:08.024761Z node 7 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:683: LoadWriteDetails at 72075186224037888 loaded writeOp from db 3500:1234567890011 keys extracted: 1 2025-05-07T08:58:08.024811Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:1234567890011] at 72075186224037888 is Executed 2025-05-07T08:58:08.024840Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:1234567890011] at 72075186224037888 executing on unit LoadWriteDetails 2025-05-07T08:58:08.024869Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:1234567890011] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-05-07T08:58:08.024896Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:1234567890011] at 72075186224037888 on unit BuildAndWaitDependencies 2025-05-07T08:58:08.024964Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:455: Operation [3500:1234567890011] is the new logically complete end at 72075186224037888 2025-05-07T08:58:08.025013Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:461: Operation [3500:1234567890011] is the new logically incomplete end at 72075186224037888 2025-05-07T08:58:08.025074Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [3500:1234567890011] at 72075186224037888 2025-05-07T08:58:08.025124Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:1234567890011] at 72075186224037888 is Executed 2025-05-07T08:58:08.025151Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:1234567890011] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-05-07T08:58:08.025177Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:1234567890011] at 72075186224037888 to execution unit BuildWriteOutRS 2025-05-07T08:58:08.025213Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:1234567890011] at 72075186224037888 on unit BuildWriteOutRS 2025-05-07T08:58:08.025275Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:1234567890011] at 72075186224037888 is Executed 2025-05-07T08:58:08.025303Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:1234567890011] at 72075186224037888 executing on unit BuildWriteOutRS 2025-05-07T08:58:08.025327Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:1234567890011] at 72075186224037888 to execution unit StoreAndSendWriteOutRS 2025-05-07T08:58:08.025351Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:1234567890011] at 72075186224037888 on unit StoreAndSendWriteOutRS 2025-05-07T08:58:08.025377Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:1234567890011] at 72075186224037888 is Executed 2025-05-07T08:58:08.025399Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:1234567890011] at 72075186224037888 executing on unit StoreAndSendWriteOutRS 2025-05-07T08:58:08.025422Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:1234567890011] at 72075186224037888 to execution unit PrepareWriteTxInRS 2025-05-07T08:58:08.025445Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:1234567890011] at 72075186224037888 on unit PrepareWriteTxInRS 2025-05-07T08:58:08.025477Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:1234567890011] at 72075186224037888 is Executed 2025-05-07T08:58:08.025502Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:1234567890011] at 72075186224037888 executing on unit PrepareWriteTxInRS 2025-05-07T08:58:08.025523Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:1234567890011] at 72075186224037888 to execution unit LoadAndWaitInRS 2025-05-07T08:58:08.025551Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:1234567890011] at 72075186224037888 on unit LoadAndWaitInRS 2025-05-07T08:58:08.025581Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:1234567890011] at 72075186224037888 is Executed 2025-05-07T08:58:08.025607Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:1234567890011] at 72075186224037888 executing on unit LoadAndWaitInRS 2025-05-07T08:58:08.025632Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:1234567890011] at 72075186224037888 to execution unit ExecuteWrite 2025-05-07T08:58:08.025657Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:1234567890011] at 72075186224037888 on unit ExecuteWrite 2025-05-07T08:58:08.025704Z node 7 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [3500:1234567890011] at 72075186224037888 2025-05-07T08:58:08.029286Z node 7 :TX_DATASHARD TRACE: execute_write_unit.cpp:122: Tablet 72075186224037888 is not ready for [3500:1234567890011] execution 2025-05-07T08:58:08.029483Z node 7 :TX_DATASHARD DEBUG: datashard_write_operation.cpp:431: tx 1234567890011 at 72075186224037888 released its data 2025-05-07T08:58:08.029558Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:1234567890011] at 72075186224037888 is Restart 2025-05-07T08:58:08.029608Z node 7 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-05-07T08:58:08.029679Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-05-07T08:58:08.029734Z node 7 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-05-07T08:58:08.029781Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-05-07T08:58:08.030399Z node 7 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T08:58:08.030476Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:1234567890011] at 72075186224037888 on unit ExecuteWrite 2025-05-07T08:58:08.030536Z node 7 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [3500:1234567890011] at 72075186224037888 2025-05-07T08:58:08.031056Z node 7 :TX_DATASHARD TRACE: datashard_write_operation.cpp:64: Parsing write transaction for 1234567890011 at 72075186224037888, record: Operations { Type: OPERATION_UPSERT TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } ColumnIds: 1 ColumnIds: 2 PayloadIndex: 0 PayloadFormat: FORMAT_CELLVEC } TxId: 1234567890011 TxMode: MODE_PREPARE Locks { Op: Commit } 2025-05-07T08:58:08.031185Z node 7 :TX_DATASHARD TRACE: datashard_write_operation.cpp:190: Table /Root/table, shard: 72075186224037888, write point (Int32 : 1) 2025-05-07T08:58:08.031256Z node 7 :TX_DATASHARD TRACE: key_validator.cpp:54: -- AddWriteRange: (Int32 : 1) table: [72057594046644480:2:1] 2025-05-07T08:58:08.031362Z node 7 :TX_DATASHARD DEBUG: datashard_write_operation.cpp:524: tx 1234567890011 at 72075186224037888 restored its data 2025-05-07T08:58:08.031579Z node 7 :TX_DATASHARD DEBUG: execute_write_unit.cpp:410: Executed write operation for [3500:1234567890011] at 72075186224037888, row count=1 2025-05-07T08:58:08.031644Z node 7 :TX_DATASHARD TRACE: locks.cpp:194: Lock 1234567890001 marked broken at v{min} 2025-05-07T08:58:08.031769Z node 7 :TX_DATASHARD TRACE: execute_write_unit.cpp:47: add locks to result: 0 2025-05-07T08:58:08.031857Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:1234567890011] at 72075186224037888 is ExecutedNoMoreRestarts 2025-05-07T08:58:08.031935Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:1234567890011] at 72075186224037888 executing on unit ExecuteWrite 2025-05-07T08:58:08.031991Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:1234567890011] at 72075186224037888 to execution unit CompleteWrite 2025-05-07T08:58:08.032045Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:1234567890011] at 72075186224037888 on unit CompleteWrite 2025-05-07T08:58:08.032298Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:1234567890011] at 72075186224037888 is DelayComplete 2025-05-07T08:58:08.032355Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:1234567890011] at 72075186224037888 executing on unit CompleteWrite 2025-05-07T08:58:08.032425Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:1234567890011] at 72075186224037888 to execution unit CompletedOperations 2025-05-07T08:58:08.032470Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:1234567890011] at 72075186224037888 on unit CompletedOperations 2025-05-07T08:58:08.032511Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:1234567890011] at 72075186224037888 is Executed 2025-05-07T08:58:08.032550Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:1234567890011] at 72075186224037888 executing on unit CompletedOperations 2025-05-07T08:58:08.032600Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [3500:1234567890011] at 72075186224037888 has finished 2025-05-07T08:58:08.032657Z node 7 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:58:08.032702Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-05-07T08:58:08.032755Z node 7 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-05-07T08:58:08.032804Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-05-07T08:58:08.033345Z node 7 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 3500} 2025-05-07T08:58:08.034680Z node 7 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:58:08.034759Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [3500:1234567890011] at 72075186224037888 on unit CompleteWrite 2025-05-07T08:58:08.034842Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:826: Complete write [3500 : 1234567890011] from 72075186224037888 at tablet 72075186224037888 send result to client [7:790:2649] 2025-05-07T08:58:08.034912Z node 7 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 >> TTableProfileTests::ExplicitPartitionsWrongKeyFormat [GOOD] >> TTableProfileTests::ExplicitPartitionsWrongKeyType >> TSchemeShardTTLTests::CreateTableShouldFailOnUnknownColumn ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::CreateTableShouldSucceedOnIndexedTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:58:08.597602Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:58:08.597690Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:08.597733Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:58:08.597778Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:58:08.597821Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:58:08.597858Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:58:08.597924Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:08.598032Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:58:08.598763Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:58:08.599123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:58:08.687987Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:58:08.688038Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:08.703573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:58:08.703758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:58:08.703896Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:58:08.709508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:58:08.709738Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:58:08.710271Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:08.710423Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:58:08.712924Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:08.713961Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:08.714030Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:08.714089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:58:08.714124Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:08.714153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:58:08.714302Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:58:08.720401Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:58:08.851501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:08.851773Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:08.852055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:58:08.852294Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:58:08.852361Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:08.855580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:08.855749Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:58:08.856001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:08.856068Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:58:08.856140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:58:08.856182Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:58:08.860829Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:08.860918Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:58:08.860969Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:58:08.871776Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:08.871888Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:08.871965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:08.872054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:58:08.881123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:58:08.884348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:58:08.884828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:58:08.886421Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:08.886704Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:08.886789Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:08.887178Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:58:08.887260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:08.887525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:08.887649Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:58:08.891425Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:08.891516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:08.891800Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:08.891866Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... ns: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:09.307979Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 101:2, at schemeshard: 72057594046678944 2025-05-07T08:58:09.308016Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 101:2, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-05-07T08:58:09.308056Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 101:2 129 -> 240 2025-05-07T08:58:09.308488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 326 RawX2: 4294969605 } Origin: 72075186233409547 State: 2 TxId: 101 Step: 0 Generation: 2 2025-05-07T08:58:09.308521Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409547, partId: 0 2025-05-07T08:58:09.308627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: Source { RawX1: 326 RawX2: 4294969605 } Origin: 72075186233409547 State: 2 TxId: 101 Step: 0 Generation: 2 2025-05-07T08:58:09.308664Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1005: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 2025-05-07T08:58:09.308755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1009: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 326 RawX2: 4294969605 } Origin: 72075186233409547 State: 2 TxId: 101 Step: 0 Generation: 2 2025-05-07T08:58:09.308808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 101:0, shardIdx: 72057594046678944:1, datashard: 72075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:09.308837Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:58:09.308868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 101:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-05-07T08:58:09.308893Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 101:0 129 -> 240 2025-05-07T08:58:09.312898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T08:58:09.318370Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T08:58:09.318514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T08:58:09.318623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:2, at schemeshard: 72057594046678944 2025-05-07T08:58:09.318772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:58:09.318830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T08:58:09.318887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:2, at schemeshard: 72057594046678944 2025-05-07T08:58:09.319167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:58:09.319383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 101:2, at schemeshard: 72057594046678944 2025-05-07T08:58:09.319428Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 101:2 ProgressState 2025-05-07T08:58:09.319542Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:2 progress is 2/3 2025-05-07T08:58:09.319589Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 2/3 2025-05-07T08:58:09.319632Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:2 progress is 2/3 2025-05-07T08:58:09.319667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 2/3 2025-05-07T08:58:09.319702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 101, ready parts: 2/3, is published: true 2025-05-07T08:58:09.320018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:58:09.320054Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 101:0 ProgressState 2025-05-07T08:58:09.320112Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 3/3 2025-05-07T08:58:09.320135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 3/3 2025-05-07T08:58:09.320169Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 3/3 2025-05-07T08:58:09.320189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 3/3 2025-05-07T08:58:09.320227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 101, ready parts: 3/3, is published: true 2025-05-07T08:58:09.320304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:378:2346] message: TxId: 101 2025-05-07T08:58:09.320352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 3/3 2025-05-07T08:58:09.320397Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:0 2025-05-07T08:58:09.320428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 101:0 2025-05-07T08:58:09.320579Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T08:58:09.320632Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:1 2025-05-07T08:58:09.320653Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 101:1 2025-05-07T08:58:09.320717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-05-07T08:58:09.320744Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:2 2025-05-07T08:58:09.320763Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 101:2 2025-05-07T08:58:09.320809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-05-07T08:58:09.323374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-05-07T08:58:09.323423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:379:2347] TestWaitNotification: OK eventTxId 101 2025-05-07T08:58:09.323985Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:58:09.324247Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 250us result status StatusSuccess 2025-05-07T08:58:09.324732Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "UserDefinedIndexByExpireAt" LocalPathId: 3 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "modified_at" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 1 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TestYmqHttpProxy::TestCreateQueueWithWrongAttribute >> KqpErrors::ProposeErrorEvWrite [GOOD] >> DataShardWrite::UpsertLostPrepareArbiterRestart [GOOD] >> TestKinesisHttpProxy::TestRequestNoAuthorization >> TestKinesisHttpProxy::CreateStreamWithDifferentRetentions [GOOD] >> YdbIndexTable::OnlineBuildWithDataColumn [GOOD] >> Cdc::HugeKey[YdsRunner] [GOOD] >> Cdc::HugeKey[TopicRunner] >> TestKinesisHttpProxy::PutRecordsWithIncorrectHashKey [GOOD] >> TestKinesisHttpProxy::DoubleCreateStream [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-JSON_DOCUMENT [GOOD] >> BackupRestore::TestAllPrimitiveTypes-JSON_DOCUMENT [GOOD] |91.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TestKinesisHttpProxy::CreateDeleteStream >> TSchemeShardTTLTests::CreateTableShouldFailOnUnknownColumn [GOOD] >> BackupRestoreS3::RestoreViewDependentOnAnotherView [GOOD] >> BackupRestoreS3::TestAllIndexTypes-EIndexTypeGlobal >> TestKinesisHttpProxy::ListShards >> TSchemeShardTTLTestsWithReboots::CreateTable >> TTableProfileTests::WrongTableProfile [GOOD] >> TYqlDateTimeTests::DateKey |91.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TestKinesisHttpProxy::GoodRequestGetRecords |91.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_reassign/ydb-core-tx-datashard-ut_reassign |91.0%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_reassign/ydb-core-tx-datashard-ut_reassign |91.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_reassign/ydb-core-tx-datashard-ut_reassign ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_kqp_errors/unittest >> KqpErrors::ProposeErrorEvWrite [GOOD] Test command err: 2025-05-07T08:57:52.099557Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:698:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:57:52.099939Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:57:52.100228Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:57:52.102304Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:695:2355], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:57:52.102676Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:57:52.102722Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0047b9/r3tmp/tmpDM1n29/pdisk_1.dat 2025-05-07T08:57:52.625506Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:57:52.891354Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:57:53.039785Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:53.039941Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:53.044804Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:53.044905Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:53.059722Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-05-07T08:57:53.060251Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:57:53.060643Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:57:53.396253Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:57:54.540260Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:1586:2952], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:57:54.540394Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:1597:2957], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:57:54.540484Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:57:54.546797Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T08:57:55.153163Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:1600:2960], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T08:57:55.355066Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:1739:3038] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:57:55.785612Z node 1 :KQP_EXECUTER DEBUG: kqp_literal_executer.cpp:96: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: , Database: , DatabaseId: , SessionId: , CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Begin literal execution. Operation timeout: 0.000000s, cancelAfter: (empty maybe) 2025-05-07T08:57:55.785688Z node 1 :KQP_EXECUTER DEBUG: kqp_literal_executer.cpp:125: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: , Database: , DatabaseId: , SessionId: , CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Begin literal execution, txs: 1 2025-05-07T08:57:55.785757Z node 1 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:25: StageInfo: StageId #[0,0], InputsCount: 0, OutputsCount: 1 2025-05-07T08:57:55.785799Z node 1 :KQP_EXECUTER DEBUG: kqp_literal_executer.cpp:135: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: , Database: , DatabaseId: , SessionId: , CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Stage [0,0] AST: ( (return (lambda '() (block '( (let $1 (Just (Uint32 '1))) (let $2 (Just (Uint32 '2))) (let $3 (Just (Uint32 '3))) (return (ToStream (Just (AsList (AsStruct '('"key" $1) '('"value" $1)) (AsStruct '('"key" $2) '('"value" $2)) (AsStruct '('"key" $3) '('"value" $3)))))) )))) ) 2025-05-07T08:57:55.785864Z node 1 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:234: Create result channelId: 1 from task: 1 with index: 0 2025-05-07T08:57:55.789029Z node 1 :KQP_EXECUTER DEBUG: kqp_literal_executer.cpp:275: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: , Database: , DatabaseId: , SessionId: , CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Execution is complete, results: 1 2025-05-07T08:57:55.798253Z node 1 :KQP_EXECUTER DEBUG: kqp_literal_executer.cpp:96: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jtmzef39039wzrxb1mbmfj4p, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODhhZGVjODYtMThjZjQ5ZGMtOGM2ODZkZDItZTU5NWI0NmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Begin literal execution. Operation timeout: 299.436577s, cancelAfter: (empty maybe) 2025-05-07T08:57:55.798331Z node 1 :KQP_EXECUTER DEBUG: kqp_literal_executer.cpp:125: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jtmzef39039wzrxb1mbmfj4p, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODhhZGVjODYtMThjZjQ5ZGMtOGM2ODZkZDItZTU5NWI0NmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Begin literal execution, txs: 1 2025-05-07T08:57:55.798388Z node 1 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:25: StageInfo: StageId #[0,0], InputsCount: 0, OutputsCount: 1 2025-05-07T08:57:55.798440Z node 1 :KQP_EXECUTER DEBUG: kqp_literal_executer.cpp:135: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jtmzef39039wzrxb1mbmfj4p, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODhhZGVjODYtMThjZjQ5ZGMtOGM2ODZkZDItZTU5NWI0NmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Stage [0,0] AST: ( (return (lambda '() (block '( (let $1 (Just (Uint32 '1))) (let $2 (Just (Uint32 '2))) (let $3 (Just (Uint32 '3))) (return (ToStream (Just (AsList (AsStruct '('"key" $1) '('"value" $1)) (AsStruct '('"key" $2) '('"value" $2)) (AsStruct '('"key" $3) '('"value" $3)))))) )))) ) 2025-05-07T08:57:55.798506Z node 1 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:234: Create result channelId: 1 from task: 1 with index: 0 2025-05-07T08:57:55.799155Z node 1 :KQP_EXECUTER DEBUG: kqp_literal_executer.cpp:275: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jtmzef39039wzrxb1mbmfj4p, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODhhZGVjODYtMThjZjQ5ZGMtOGM2ODZkZDItZTU5NWI0NmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Execution is complete, results: 1 2025-05-07T08:57:55.799411Z node 1 :KQP_EXECUTER TRACE: kqp_executer_impl.h:190: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jtmzef39039wzrxb1mbmfj4p, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODhhZGVjODYtMThjZjQ5ZGMtOGM2ODZkZDItZTU5NWI0NmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Bootstrap done, become ReadyState 2025-05-07T08:57:55.799682Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:584: ActorId: [1:1765:2950] TxId: 281474976715660. Ctx: { TraceId: 01jtmzef39039wzrxb1mbmfj4p, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODhhZGVjODYtMThjZjQ5ZGMtOGM2ODZkZDItZTU5NWI0NmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Executing physical tx, type: 2, stages: 1 2025-05-07T08:57:55.799733Z node 1 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:25: StageInfo: StageId #[0,0], InputsCount: 0, OutputsCount: 1 2025-05-07T08:57:55.799900Z node 1 :KQP_EXECUTER TRACE: kqp_executer_impl.h:599: ActorId: [1:1765:2950] TxId: 281474976715660. Ctx: { TraceId: 01jtmzef39039wzrxb1mbmfj4p, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODhhZGVjODYtMThjZjQ5ZGMtOGM2ODZkZDItZTU5NWI0NmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Got request, become WaitResolveState 2025-05-07T08:57:55.800175Z node 1 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:271: TxId: 281474976715660. Resolved key sets: 1 2025-05-07T08:57:55.800368Z node 1 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:295: TxId: 281474976715660. Resolved key: { TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 2 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 4 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 } 2025-05-07T08:57:55.800505Z node 1 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:2034: ActorId: [1:1765:2950] TxId: 281474976715660. Ctx: { TraceId: 01jtmzef39039wzrxb1mbmfj4p, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODhhZGVjODYtMThjZjQ5ZGMtOGM2ODZkZDItZTU5NWI0NmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Stage [0,0] AST: ( (declare %kqp%tx_result_binding_0_0 (ListType (StructType '('"key" (OptionalType (DataType 'Uint32))) '('"value" (OptionalType (DataType 'Uint32)))))) (return (lambda '() (block '( (let $1 (KqpTable '"/Root/table-1" '"72057594046644480:2" '"" '1)) (let $2 (OptionalType (DataType 'Uint32))) (return (KqpEffects (KqpUpsertRows $1 (Iterator %kqp%tx_result_binding_0_0) '('"key" '"value") '('('"Mode" '"upsert"))))) )))) ) 2025-05-07T08:57:55.800766Z node 1 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:1723: ActorId: [1:1765:2950] TxId: 281474976715660. Ctx: { TraceId: 01jtmzef39039wzrxb1mbmfj4p, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODhhZGVjODYtMThjZjQ5ZGMtOGM2ODZkZDItZTU5NWI0NmQ=, CurrentExecutionId: , CustomerSuppliedId: ... tZWI1ZjkzZTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:58:09.390671Z node 3 :KQP_EXECUTER DEBUG: kqp_planner.cpp:553: TxId: 281474976715672. Ctx: { TraceId: 01jtmzexhe2xrstaj360trpz22, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NTI2MDkzYzctNGRjYjIxODUtNmY4ZGJmNzMtZWI1ZjkzZTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: true, 0 scan tasks on 0 nodes, localComputeTasks: 0, snapshot: {0, 0} 2025-05-07T08:58:09.390976Z node 3 :KQP_EXECUTER DEBUG: kqp_planner.cpp:793: TxId: 281474976715672. Ctx: { TraceId: 01jtmzexhe2xrstaj360trpz22, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NTI2MDkzYzctNGRjYjIxODUtNmY4ZGJmNzMtZWI1ZjkzZTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Collect channels updates for task: 1 at actor [3:2024:3186] 2025-05-07T08:58:09.391065Z node 3 :KQP_EXECUTER DEBUG: kqp_planner.cpp:785: TxId: 281474976715672. Ctx: { TraceId: 01jtmzexhe2xrstaj360trpz22, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NTI2MDkzYzctNGRjYjIxODUtNmY4ZGJmNzMtZWI1ZjkzZTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Sending channels info to compute actor: [3:2024:3186], channels: 0 2025-05-07T08:58:09.391152Z node 3 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2800: ActorId: [3:2021:3186] TxId: 281474976715672. Ctx: { TraceId: 01jtmzexhe2xrstaj360trpz22, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NTI2MDkzYzctNGRjYjIxODUtNmY4ZGJmNzMtZWI1ZjkzZTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 1, readonly: 0, datashardTxs: 0, evWriteTxs: 0, topicTxs: 0, volatile: 0, immediate: 1, pending compute tasks0, useFollowers: 0 2025-05-07T08:58:09.391207Z node 3 :KQP_EXECUTER TRACE: kqp_data_executer.cpp:2803: ActorId: [3:2021:3186] TxId: 281474976715672. Ctx: { TraceId: 01jtmzexhe2xrstaj360trpz22, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NTI2MDkzYzctNGRjYjIxODUtNmY4ZGJmNzMtZWI1ZjkzZTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Updating channels after the creation of compute actors 2025-05-07T08:58:09.391265Z node 3 :KQP_EXECUTER DEBUG: kqp_planner.cpp:793: TxId: 281474976715672. Ctx: { TraceId: 01jtmzexhe2xrstaj360trpz22, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NTI2MDkzYzctNGRjYjIxODUtNmY4ZGJmNzMtZWI1ZjkzZTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Collect channels updates for task: 1 at actor [3:2024:3186] 2025-05-07T08:58:09.391340Z node 3 :KQP_EXECUTER DEBUG: kqp_planner.cpp:785: TxId: 281474976715672. Ctx: { TraceId: 01jtmzexhe2xrstaj360trpz22, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NTI2MDkzYzctNGRjYjIxODUtNmY4ZGJmNzMtZWI1ZjkzZTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Sending channels info to compute actor: [3:2024:3186], channels: 0 2025-05-07T08:58:09.391411Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:644: ActorId: [3:2021:3186] TxId: 281474976715672. Ctx: { TraceId: 01jtmzexhe2xrstaj360trpz22, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NTI2MDkzYzctNGRjYjIxODUtNmY4ZGJmNzMtZWI1ZjkzZTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [3:2024:3186], 2025-05-07T08:58:09.391481Z node 3 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:157: ActorId: [3:2021:3186] TxId: 281474976715672. Ctx: { TraceId: 01jtmzexhe2xrstaj360trpz22, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NTI2MDkzYzctNGRjYjIxODUtNmY4ZGJmNzMtZWI1ZjkzZTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: WaitResolveState, waiting for 1 compute actor(s) and 0 datashard(s): CA [3:2024:3186], 2025-05-07T08:58:09.391535Z node 3 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:2362: ActorId: [3:2021:3186] TxId: 281474976715672. Ctx: { TraceId: 01jtmzexhe2xrstaj360trpz22, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NTI2MDkzYzctNGRjYjIxODUtNmY4ZGJmNzMtZWI1ZjkzZTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: WaitResolveState, immediate tx, become ExecuteState 2025-05-07T08:58:09.392486Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:433: ActorId: [3:2021:3186] TxId: 281474976715672. Ctx: { TraceId: 01jtmzexhe2xrstaj360trpz22, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NTI2MDkzYzctNGRjYjIxODUtNmY4ZGJmNzMtZWI1ZjkzZTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [3:2024:3186], task: 1, state: COMPUTE_STATE_EXECUTING, stats: { } 2025-05-07T08:58:09.392570Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:644: ActorId: [3:2021:3186] TxId: 281474976715672. Ctx: { TraceId: 01jtmzexhe2xrstaj360trpz22, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NTI2MDkzYzctNGRjYjIxODUtNmY4ZGJmNzMtZWI1ZjkzZTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [3:2024:3186], 2025-05-07T08:58:09.392634Z node 3 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:157: ActorId: [3:2021:3186] TxId: 281474976715672. Ctx: { TraceId: 01jtmzexhe2xrstaj360trpz22, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NTI2MDkzYzctNGRjYjIxODUtNmY4ZGJmNzMtZWI1ZjkzZTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 1 compute actor(s) and 0 datashard(s): CA [3:2024:3186], 2025-05-07T08:58:09.393847Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:433: ActorId: [3:2021:3186] TxId: 281474976715672. Ctx: { TraceId: 01jtmzexhe2xrstaj360trpz22, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NTI2MDkzYzctNGRjYjIxODUtNmY4ZGJmNzMtZWI1ZjkzZTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [3:2024:3186], task: 1, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 676 Tasks { TaskId: 1 CpuTimeUs: 106 FinishTimeMs: 1746608289393 EgressBytes: 10 EgressRows: 1 ComputeCpuTimeUs: 18 BuildCpuTimeUs: 88 HostName: "ghrun-sykirh5vua" NodeId: 3 CreateTimeMs: 1746608289391 UpdateTimeMs: 1746608289393 } MaxMemoryUsage: 1048576 } 2025-05-07T08:58:09.394184Z node 3 :KQP_EXECUTER INFO: kqp_planner.cpp:688: TxId: 281474976715672. Ctx: { TraceId: 01jtmzexhe2xrstaj360trpz22, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NTI2MDkzYzctNGRjYjIxODUtNmY4ZGJmNzMtZWI1ZjkzZTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [3:2024:3186] 2025-05-07T08:58:09.394331Z node 3 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:281: ActorId: [3:2021:3186] TxId: 281474976715672. Ctx: { TraceId: 01jtmzexhe2xrstaj360trpz22, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NTI2MDkzYzctNGRjYjIxODUtNmY4ZGJmNzMtZWI1ZjkzZTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Send Commit to BufferActor=[3:2020:3186] 2025-05-07T08:58:09.394412Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:838: ActorId: [3:2021:3186] TxId: 281474976715672. Ctx: { TraceId: 01jtmzexhe2xrstaj360trpz22, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NTI2MDkzYzctNGRjYjIxODUtNmY4ZGJmNzMtZWI1ZjkzZTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.000676s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 2025-05-07T08:58:09.413192Z node 3 :KQP_COMPUTE WARN: kqp_write_actor.cpp:661: SelfId: [3:2027:3186], Table: `/Root/table-1` ([72057594046644480:2:1]), SessionActorId: [3:2011:3186]Got OUT_OF_SPACE for table `/Root/table-1`. ShardID=72075186224037888, Sink=[3:2027:3186]. Ignored this error. 2025-05-07T08:58:09.413346Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:2833: SelfId: [3:2020:3186], SessionActorId: [3:2011:3186], statusCode=OVERLOADED. Issue=
: Error: Tablet 72075186224037888 is out of space. Table `/Root/table-1`., code: 2006 . sessionActorId=[3:2011:3186]. isRollback=0 2025-05-07T08:58:09.413725Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:1840: SessionId: ydb://session/3?node_id=3&id=NTI2MDkzYzctNGRjYjIxODUtNmY4ZGJmNzMtZWI1ZjkzZTQ=, ActorId: [3:2011:3186], ActorState: ExecuteState, TraceId: 01jtmzexhe2xrstaj360trpz22, got TEvKqpBuffer::TEvError in ExecuteState, status: OVERLOADED send to: [3:2021:3186] from: [3:2020:3186] 2025-05-07T08:58:09.414101Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:790: ActorId: [3:2021:3186] TxId: 281474976715672. Ctx: { TraceId: 01jtmzexhe2xrstaj360trpz22, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NTI2MDkzYzctNGRjYjIxODUtNmY4ZGJmNzMtZWI1ZjkzZTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Got EvAbortExecution, status: OVERLOADED, message: {
: Error: Tablet 72075186224037888 is out of space. Table `/Root/table-1`., code: 2006 } 2025-05-07T08:58:09.414192Z node 3 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1944: ActorId: [3:2021:3186] TxId: 281474976715672. Ctx: { TraceId: 01jtmzexhe2xrstaj360trpz22, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NTI2MDkzYzctNGRjYjIxODUtNmY4ZGJmNzMtZWI1ZjkzZTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. OVERLOADED: {
: Error: Tablet 72075186224037888 is out of space. Table `/Root/table-1`., code: 2006 } 2025-05-07T08:58:09.414281Z node 3 :KQP_EXECUTER INFO: kqp_executer_impl.h:1903: ActorId: [3:2021:3186] TxId: 281474976715672. Ctx: { TraceId: 01jtmzexhe2xrstaj360trpz22, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NTI2MDkzYzctNGRjYjIxODUtNmY4ZGJmNzMtZWI1ZjkzZTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. task: 1, does not have the CA id yet or is already complete 2025-05-07T08:58:09.414481Z node 3 :KQP_EXECUTER TRACE: kqp_executer_impl.h:2014: ActorId: [3:2021:3186] TxId: 281474976715672. Ctx: { TraceId: 01jtmzexhe2xrstaj360trpz22, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NTI2MDkzYzctNGRjYjIxODUtNmY4ZGJmNzMtZWI1ZjkzZTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ReplyErrorAndDie. Response: Status: OVERLOADED Issues { message: "Tablet 72075186224037888 is out of space. Table `/Root/table-1`." issue_code: 2006 severity: 1 } Result { Stats { CpuTimeUs: 676 } } , to ActorId: [3:2011:3186] 2025-05-07T08:58:09.414539Z node 3 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2866: ActorId: [3:2021:3186] TxId: 281474976715672. Ctx: { TraceId: 01jtmzexhe2xrstaj360trpz22, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NTI2MDkzYzctNGRjYjIxODUtNmY4ZGJmNzMtZWI1ZjkzZTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shutdown immediately - nothing to wait 2025-05-07T08:58:09.414684Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2140: ActorId: [3:2021:3186] TxId: 281474976715672. Ctx: { TraceId: 01jtmzexhe2xrstaj360trpz22, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NTI2MDkzYzctNGRjYjIxODUtNmY4ZGJmNzMtZWI1ZjkzZTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-05-07T08:58:09.414765Z node 3 :KQP_EXECUTER TRACE: kqp_executer_impl.h:2154: ActorId: [3:2021:3186] TxId: 281474976715672. Ctx: { TraceId: 01jtmzexhe2xrstaj360trpz22, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NTI2MDkzYzctNGRjYjIxODUtNmY4ZGJmNzMtZWI1ZjkzZTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Terminate, become ZombieState 2025-05-07T08:58:09.414984Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=3&id=NTI2MDkzYzctNGRjYjIxODUtNmY4ZGJmNzMtZWI1ZjkzZTQ=, ActorId: [3:2011:3186], ActorState: ExecuteState, TraceId: 01jtmzexhe2xrstaj360trpz22, Create QueryResponse for error on request, msg: >> DataShardWrite::ImmediateAndPlannedCommittedOpsRace [GOOD] >> TSchemeShardTTLUtility::GetExpireAfter [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::CreateTableShouldFailOnUnknownColumn [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:58:11.065305Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:58:11.065419Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:11.065479Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:58:11.065519Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:58:11.065566Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:58:11.065590Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:58:11.065644Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:11.065723Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:58:11.066512Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:58:11.066892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:58:11.148486Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:58:11.148557Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:11.165658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:58:11.165811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:58:11.166022Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:58:11.175916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:58:11.176597Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:58:11.177370Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:11.177709Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:58:11.180474Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:11.182227Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:11.182305Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:11.182366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:58:11.182463Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:11.182578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:58:11.182856Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:58:11.190681Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:58:11.329112Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:11.329444Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:11.329701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:58:11.329960Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:58:11.330042Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:11.332998Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:11.333193Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:58:11.333410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:11.333480Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:58:11.333680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:58:11.333715Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:58:11.336156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:11.336229Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:58:11.336275Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:58:11.338598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:11.338648Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:11.338709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:11.338787Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:58:11.342522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:58:11.344915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:58:11.345165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:58:11.346241Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:11.346393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:11.346453Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:11.346754Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:58:11.346827Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:11.347025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:11.347137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:58:11.349910Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:11.349998Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:11.350259Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:11.350307Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2208], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-05-07T08:58:11.350389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:11.350439Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 1:0 ProgressState 2025-05-07T08:58:11.350559Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-05-07T08:58:11.350591Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:58:11.350631Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-05-07T08:58:11.350663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:58:11.350697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-05-07T08:58:11.350742Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:58:11.350801Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1:0 2025-05-07T08:58:11.350830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 1:0 2025-05-07T08:58:11.350906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:58:11.350942Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-05-07T08:58:11.350971Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-05-07T08:58:11.354138Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-05-07T08:58:11.354275Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-05-07T08:58:11.354341Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-05-07T08:58:11.354386Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-05-07T08:58:11.354426Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:11.354543Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-05-07T08:58:11.358049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-05-07T08:58:11.358671Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-05-07T08:58:11.360061Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:269:2260] Bootstrap 2025-05-07T08:58:11.374476Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:269:2260] Become StateWork (SchemeCache [1:274:2265]) 2025-05-07T08:58:11.377467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateTable CreateTable { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "Timestamp" } KeyColumnNames: "key" TTLSettings { Enabled { ColumnName: "created_at" } } } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:11.377821Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /MyRoot/TTLEnabledTable, opId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:58:11.377956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:433: TCreateTable Propose, path: /MyRoot/TTLEnabledTable, opId: 101:0, schema: Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "Timestamp" } KeyColumnNames: "key" TTLSettings { Enabled { ColumnName: "created_at" } }, at schemeshard: 72057594046678944 2025-05-07T08:58:11.378430Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 101:1, propose status:StatusSchemeError, reason: Cannot enable TTL on unknown column: 'created_at', at schemeshard: 72057594046678944 2025-05-07T08:58:11.379782Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:269:2260] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-05-07T08:58:11.384363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 101, response: Status: StatusSchemeError Reason: "Cannot enable TTL on unknown column: \'created_at\'" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:11.384537Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusSchemeError, reason: Cannot enable TTL on unknown column: 'created_at', operation: CREATE TABLE, path: /MyRoot/TTLEnabledTable 2025-05-07T08:58:11.385251Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_write/unittest >> DataShardWrite::UpsertLostPrepareArbiterRestart [GOOD] Test command err: 2025-05-07T08:57:32.533523Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:57:32.533719Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:57:32.534038Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0034b3/r3tmp/tmpNl9fPY/pdisk_1.dat 2025-05-07T08:57:33.015945Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:57:33.138456Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:57:33.202076Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:33.202241Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:33.215211Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:57:33.310461Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:57:33.355433Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828672, Sender [1:656:2563], Recipient [1:665:2569]: NKikimr::TEvTablet::TEvBoot 2025-05-07T08:57:33.356683Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828673, Sender [1:656:2563], Recipient [1:665:2569]: NKikimr::TEvTablet::TEvRestored 2025-05-07T08:57:33.357146Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:665:2569] 2025-05-07T08:57:33.357416Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T08:57:33.369364Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3111: StateInactive, received event# 268828684, Sender [1:656:2563], Recipient [1:665:2569]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-05-07T08:57:33.411210Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T08:57:33.411349Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T08:57:33.413049Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-05-07T08:57:33.413143Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-05-07T08:57:33.413254Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-05-07T08:57:33.413653Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T08:57:33.413811Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T08:57:33.413912Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:681:2569] in generation 1 2025-05-07T08:57:33.426521Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T08:57:33.542416Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-05-07T08:57:33.542673Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T08:57:33.542782Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:683:2579] 2025-05-07T08:57:33.542833Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T08:57:33.542868Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-05-07T08:57:33.542903Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:57:33.543122Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 2146435072, Sender [1:665:2569], Recipient [1:665:2569]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-05-07T08:57:33.543187Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3155: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-05-07T08:57:33.543486Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T08:57:33.543579Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T08:57:33.543657Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T08:57:33.543695Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:57:33.543731Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-05-07T08:57:33.543787Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-05-07T08:57:33.543828Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-05-07T08:57:33.543869Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T08:57:33.543919Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:57:33.544052Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877761, Sender [1:672:2573], Recipient [1:665:2569]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:57:33.544084Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:57:33.544123Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:672:2573], sessionId# [0:0:0] 2025-05-07T08:57:33.544512Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269549568, Sender [1:410:2405], Recipient [1:672:2573] 2025-05-07T08:57:33.544557Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3136: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-05-07T08:57:33.544650Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T08:57:33.544866Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-05-07T08:57:33.544923Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-05-07T08:57:33.545007Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-05-07T08:57:33.545131Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-05-07T08:57:33.545174Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-05-07T08:57:33.545208Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-05-07T08:57:33.545266Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-05-07T08:57:33.547669Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-05-07T08:57:33.547721Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-05-07T08:57:33.547758Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-05-07T08:57:33.547791Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-05-07T08:57:33.547859Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-05-07T08:57:33.547898Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-05-07T08:57:33.547947Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-05-07T08:57:33.547986Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-05-07T08:57:33.548020Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-05-07T08:57:33.550110Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269746185, Sender [1:684:2580], Recipient [1:665:2569]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-05-07T08:57:33.550167Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:57:33.564056Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T08:57:33.564134Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-05-07T08:57:33.564185Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-05-07T08:57:33.564238Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose latency: 0 ms, status: PREPARED 2025-05-07T08:57:33.564303Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-05-07T08:57:33.740163Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877761, Sender [1:699:2589], Recipient [1:665:2569]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:57:33.740236Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:57:33.740289Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075 ... : NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:58:09.886128Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:58:09.886169Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037890, clientId# [7:965:2780], serverId# [7:966:2781], sessionId# [0:0:0] 2025-05-07T08:58:09.886347Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269553169, Sender [7:964:2779], Recipient [7:717:2596]: NKikimrTxDataShard.TEvGetInfoRequest 2025-05-07T08:58:09.887075Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877761, Sender [7:969:2784], Recipient [7:717:2596]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:58:09.887117Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:58:09.887147Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037890, clientId# [7:968:2783], serverId# [7:969:2784], sessionId# [0:0:0] 2025-05-07T08:58:09.887278Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269553215, Sender [7:967:2782], Recipient [7:717:2596]: NKikimrTxDataShard.TEvRead ReadId: 1002 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC RangesSize: 1 2025-05-07T08:58:09.887388Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2435: TTxReadViaPipeline execute: at tablet# 72075186224037890, FollowerId 0 2025-05-07T08:58:09.887424Z node 7 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037890 CompleteEdge# v1001/1000001 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-05-07T08:58:09.887454Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2538: 72075186224037890 changed HEAD read to non-repeatable v4000/18446744073709551615 2025-05-07T08:58:09.887500Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037890 on unit CheckRead 2025-05-07T08:58:09.887566Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037890 is Executed 2025-05-07T08:58:09.887594Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037890 executing on unit CheckRead 2025-05-07T08:58:09.887620Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 72075186224037890 to execution unit BuildAndWaitDependencies 2025-05-07T08:58:09.887645Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037890 on unit BuildAndWaitDependencies 2025-05-07T08:58:09.887685Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:3] at 72075186224037890 2025-05-07T08:58:09.887713Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037890 is Executed 2025-05-07T08:58:09.887737Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037890 executing on unit BuildAndWaitDependencies 2025-05-07T08:58:09.887757Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 72075186224037890 to execution unit ExecuteRead 2025-05-07T08:58:09.887779Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037890 on unit ExecuteRead 2025-05-07T08:58:09.887860Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1566: 72075186224037890 Execute read# 1, request: { ReadId: 1002 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC } 2025-05-07T08:58:09.888008Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2146: 72075186224037890 Complete read# {[7:967:2782], 1002} after executionsCount# 1 2025-05-07T08:58:09.888046Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2120: 72075186224037890 read iterator# {[7:967:2782], 1002} sends rowCount# 0, bytes# 0, quota rows left# 18446744073709551615, quota bytes left# 18446744073709551615, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-05-07T08:58:09.888104Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2171: 72075186224037890 read iterator# {[7:967:2782], 1002} finished in read 2025-05-07T08:58:09.888153Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037890 is Executed 2025-05-07T08:58:09.888181Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037890 executing on unit ExecuteRead 2025-05-07T08:58:09.888203Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 72075186224037890 to execution unit CompletedOperations 2025-05-07T08:58:09.888228Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037890 on unit CompletedOperations 2025-05-07T08:58:09.888266Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037890 is Executed 2025-05-07T08:58:09.888288Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037890 executing on unit CompletedOperations 2025-05-07T08:58:09.888311Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:3] at 72075186224037890 has finished 2025-05-07T08:58:09.888338Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2670: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037890 2025-05-07T08:58:09.888409Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2719: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037890 2025-05-07T08:58:09.889026Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877761, Sender [7:972:2787], Recipient [7:714:2594]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:58:09.889086Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:58:09.889125Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037891, clientId# [7:971:2786], serverId# [7:972:2787], sessionId# [0:0:0] 2025-05-07T08:58:09.889231Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269553169, Sender [7:970:2785], Recipient [7:714:2594]: NKikimrTxDataShard.TEvGetInfoRequest 2025-05-07T08:58:09.892745Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877761, Sender [7:975:2790], Recipient [7:714:2594]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:58:09.892830Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:58:09.892868Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037891, clientId# [7:974:2789], serverId# [7:975:2790], sessionId# [0:0:0] 2025-05-07T08:58:09.893117Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269553215, Sender [7:973:2788], Recipient [7:714:2594]: NKikimrTxDataShard.TEvRead ReadId: 1003 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC RangesSize: 1 2025-05-07T08:58:09.893232Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2435: TTxReadViaPipeline execute: at tablet# 72075186224037891, FollowerId 0 2025-05-07T08:58:09.893275Z node 7 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037891 CompleteEdge# v1000/281474976715657 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-05-07T08:58:09.893305Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2538: 72075186224037891 changed HEAD read to non-repeatable v4000/18446744073709551615 2025-05-07T08:58:09.893347Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037891 on unit CheckRead 2025-05-07T08:58:09.893411Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037891 is Executed 2025-05-07T08:58:09.893440Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037891 executing on unit CheckRead 2025-05-07T08:58:09.893467Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 72075186224037891 to execution unit BuildAndWaitDependencies 2025-05-07T08:58:09.893494Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037891 on unit BuildAndWaitDependencies 2025-05-07T08:58:09.893537Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:2] at 72075186224037891 2025-05-07T08:58:09.893566Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037891 is Executed 2025-05-07T08:58:09.893590Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037891 executing on unit BuildAndWaitDependencies 2025-05-07T08:58:09.893610Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 72075186224037891 to execution unit ExecuteRead 2025-05-07T08:58:09.893633Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037891 on unit ExecuteRead 2025-05-07T08:58:09.893714Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1566: 72075186224037891 Execute read# 1, request: { ReadId: 1003 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC } 2025-05-07T08:58:09.893848Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2146: 72075186224037891 Complete read# {[7:973:2788], 1003} after executionsCount# 1 2025-05-07T08:58:09.893896Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2120: 72075186224037891 read iterator# {[7:973:2788], 1003} sends rowCount# 0, bytes# 0, quota rows left# 18446744073709551615, quota bytes left# 18446744073709551615, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-05-07T08:58:09.893951Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2171: 72075186224037891 read iterator# {[7:973:2788], 1003} finished in read 2025-05-07T08:58:09.894027Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037891 is Executed 2025-05-07T08:58:09.894053Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037891 executing on unit ExecuteRead 2025-05-07T08:58:09.894076Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 72075186224037891 to execution unit CompletedOperations 2025-05-07T08:58:09.894104Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037891 on unit CompletedOperations 2025-05-07T08:58:09.894145Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037891 is Executed 2025-05-07T08:58:09.894173Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037891 executing on unit CompletedOperations 2025-05-07T08:58:09.894197Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:2] at 72075186224037891 has finished 2025-05-07T08:58:09.894224Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2670: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037891 2025-05-07T08:58:09.894299Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2719: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037891 >> Cdc::OldImageLogDebezium [GOOD] >> Cdc::NewImageLogDebezium ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/backup_ut/unittest >> BackupRestore::TestAllPrimitiveTypes-JSON_DOCUMENT [GOOD] Test command err: 2025-05-07T08:56:38.669471Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625092026101874:2202];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00218c/r3tmp/tmp7byYYx/pdisk_1.dat 2025-05-07T08:56:38.982048Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:56:39.207734Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:56:39.221278Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:56:39.221374Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:56:39.227324Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19508, node 1 2025-05-07T08:56:39.324965Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:56:39.325002Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:56:39.325010Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:56:39.325145Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29126 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:56:39.948339Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:56:43.067445Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625113500939264:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:43.067545Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:43.067588Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625113500939272:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:43.078208Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480 2025-05-07T08:56:43.117430Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501625113500939278:2343], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-05-07T08:56:43.218204Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501625113500939351:2683] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:56:43.578114Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 2025-05-07T08:56:43.664657Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501625092026101874:2202];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:56:43.664721Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:56:43.879279Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710661. Ctx: { TraceId: 01jtmzc9zh5ckxgsjs3eg0caba, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzA1ZjkxODQtYWRkZGQxZmItYWMxZDQ5OWUtNWUyOTE5OWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:44.117699Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710662. Ctx: { TraceId: 01jtmzca4f769dn3knvzxddqhv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzA1ZjkxODQtYWRkZGQxZmItYWMxZDQ5OWUtNWUyOTE5OWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Backup "/Root" to "/home/runner/.ya/build/build_root/zvgn/00218c/r3tmp/tmpGl3Lre/"Create temporary directory "/Root/~backup_20250507T085644" in databaseProcess "/home/runner/.ya/build/build_root/zvgn/00218c/r3tmp/tmpGl3Lre/Int8Table"Copy tables: { src: "/Root/Int8Table", dst: "/Root/~backup_20250507T085644/Int8Table" }Backup table "/Root/~backup_20250507T085644/Int8Table" to "/home/runner/.ya/build/build_root/zvgn/00218c/r3tmp/tmpGl3Lre/Int8Table"Describe table "/Root/~backup_20250507T085644/Int8Table"Write scheme into "/home/runner/.ya/build/build_root/zvgn/00218c/r3tmp/tmpGl3Lre/Int8Table/scheme.pb"Describe table "/Root/Int8Table"Write ACL into "/home/runner/.ya/build/build_root/zvgn/00218c/r3tmp/tmpGl3Lre/Int8Table/permissions.pb"Read table "/Root/~backup_20250507T085644/Int8Table"Write data into "/home/runner/.ya/build/build_root/zvgn/00218c/r3tmp/tmpGl3Lre/Int8Table/data_00.csv"Drop table "/Root/~backup_20250507T085644/Int8Table"Remove temporary directory "/Root/~backup_20250507T085644" in database2025-05-07T08:56:44.786404Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037889 not found 2025-05-07T08:56:44.789738Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 281474976710668:0, at schemeshard: 72057594046644480 Backup completed successfullyRestore "/home/runner/.ya/build/build_root/zvgn/00218c/r3tmp/tmpGl3Lre/" to "/Root"Resolved db base path: "/Root"2025-05-07T08:56:44.941393Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found Restore folder "/home/runner/.ya/build/build_root/zvgn/00218c/r3tmp/tmpGl3Lre/" to "/Root"Process "/home/runner/.ya/build/build_root/zvgn/00218c/r3tmp/tmpGl3Lre/Int8Table"Read scheme from "/home/runner/.ya/build/build_root/zvgn/00218c/r3tmp/tmpGl3Lre/Int8Table/scheme.pb"Restore table "/home/runner/.ya/build/build_root/zvgn/00218c/r3tmp/tmpGl3Lre/Int8Table" to "/Root/Int8Table"2025-05-07T08:56:44.963330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480 Created "/Root/Int8Table"Read data from "/home/runner/.ya/build/build_root/zvgn/00218c/r3tmp/tmpGl3Lre/Int8Table/data_00.csv"2025-05-07T08:56:45.142495Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710671. Ctx: { TraceId: 01jtmzcb7e10q8qfyaz70f528x, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGI3OTg4YmEtZTlmM2YzMTctMjVkNjc0NDMtZmRhMmJlYTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Restore ACL "/home/runner/.ya/build/build_root/zvgn/00218c/r3tmp/tmpGl3Lre/Int8Table" to "/Root/Int8Table"Read ACL from "/home/runner/.ya/build/build_root/zvgn/00218c/r3tmp/tmpGl3Lre/Int8Table/permissions.pb"2025-05-07T08:56:45.181794Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710672:0, at schemeshard: 72057594046644480 Restore completed successfully2025-05-07T08:56:45.300897Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710673. Ctx: { TraceId: 01jtmzcbcg2bfyceaydg2q009z, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzA1ZjkxODQtYWRkZGQxZmItYWMxZDQ5OWUtNWUyOTE5OWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:56:46.780452Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501625124373815107:2076];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:56:46.780524Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00218c/r3tmp/tmpmTJJ4E/pdisk_1.dat 2025-05-07T08:56:46.931981Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:56:46.958496Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:56:46.958558Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:56:46.961359Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19203, node 4 2025-05-07T08:56:47.080456Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:56:47.080486Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:56:47.080495Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:56:47.080638Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21947 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { ... ble" to "/Root/JsonTable"Read ACL from "/home/runner/.ya/build/build_root/zvgn/00218c/r3tmp/tmpSJxe2K/JsonTable/permissions.pb"2025-05-07T08:58:02.182504Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715674:0, at schemeshard: 72057594046644480 Restore completed successfully2025-05-07T08:58:02.312602Z node 28 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715675. Ctx: { TraceId: 01jtmzepjjfpmgstadqnzbtzh5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=OGYzMDczNTQtMmE1YWQ0ZDQtMTFiNjdiY2MtM2FkY2EzNWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:58:04.026509Z node 31 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[31:7501625462109851012:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:58:04.026574Z node 31 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00218c/r3tmp/tmprnzyuM/pdisk_1.dat 2025-05-07T08:58:04.250639Z node 31 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:04.298180Z node 31 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(31, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:58:04.298296Z node 31 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(31, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:58:04.302794Z node 31 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(31, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8386, node 31 2025-05-07T08:58:04.375033Z node 31 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:58:04.375064Z node 31 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:58:04.375074Z node 31 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:58:04.375228Z node 31 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8340 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:58:04.679077Z node 31 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:58:08.055686Z node 31 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [31:7501625479289721273:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:58:08.055773Z node 31 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [31:7501625479289721265:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:58:08.056102Z node 31 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:58:08.060811Z node 31 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T08:58:08.100802Z node 31 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [31:7501625479289721279:2343], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T08:58:08.195480Z node 31 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [31:7501625479289721361:2682] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:58:08.246062Z node 31 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [31:7501625479289721394:2695] txid# 281474976715660, issues: { message: "Column Key has wrong key type JsonDocument" severity: 1 } 2025-05-07T08:58:08.246377Z node 31 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=31&id=NzE1NWJkOWQtY2VhOWFiYjgtZDQzZDFiMzEtNzhiNzAxNjc=, ActorId: [31:7501625474994753939:2334], ActorState: ExecuteState, TraceId: 01jtmzew9p2w3j105e8he2a32q, Create QueryResponse for error on request, msg: 2025-05-07T08:58:08.248205Z node 31 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jtmzew9p2w3j105e8he2a32q, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=31&id=NzE1NWJkOWQtY2VhOWFiYjgtZDQzZDFiMzEtNzhiNzAxNjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:58:08.289043Z node 31 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-05-07T08:58:08.491663Z node 31 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715663. Ctx: { TraceId: 01jtmzewmp1g3adxanmmt2ny4w, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=31&id=NzE1NWJkOWQtY2VhOWFiYjgtZDQzZDFiMzEtNzhiNzAxNjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:58:08.664229Z node 31 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715664. Ctx: { TraceId: 01jtmzewqy75g81mj7fa6j44ef, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=31&id=NzE1NWJkOWQtY2VhOWFiYjgtZDQzZDFiMzEtNzhiNzAxNjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Backup "/Root" to "/home/runner/.ya/build/build_root/zvgn/00218c/r3tmp/tmpJvsROR/"Create temporary directory "/Root/~backup_20250507T085808" in databaseProcess "/home/runner/.ya/build/build_root/zvgn/00218c/r3tmp/tmpJvsROR/JsonDocumentTable"Copy tables: { src: "/Root/JsonDocumentTable", dst: "/Root/~backup_20250507T085808/JsonDocumentTable" }Backup table "/Root/~backup_20250507T085808/JsonDocumentTable" to "/home/runner/.ya/build/build_root/zvgn/00218c/r3tmp/tmpJvsROR/JsonDocumentTable"Describe table "/Root/~backup_20250507T085808/JsonDocumentTable"Write scheme into "/home/runner/.ya/build/build_root/zvgn/00218c/r3tmp/tmpJvsROR/JsonDocumentTable/scheme.pb"Describe table "/Root/JsonDocumentTable"Write ACL into "/home/runner/.ya/build/build_root/zvgn/00218c/r3tmp/tmpJvsROR/JsonDocumentTable/permissions.pb"Read table "/Root/~backup_20250507T085808/JsonDocumentTable"2025-05-07T08:58:09.031000Z node 31 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[31:7501625462109851012:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:58:09.031094Z node 31 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Write data into "/home/runner/.ya/build/build_root/zvgn/00218c/r3tmp/tmpJvsROR/JsonDocumentTable/data_00.csv"Drop table "/Root/~backup_20250507T085808/JsonDocumentTable"Remove temporary directory "/Root/~backup_20250507T085808" in database2025-05-07T08:58:09.234950Z node 31 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 31, TabletId: 72075186224037889 not found 2025-05-07T08:58:09.249866Z node 31 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 281474976715670:0, at schemeshard: 72057594046644480 Backup completed successfullyRestore "/home/runner/.ya/build/build_root/zvgn/00218c/r3tmp/tmpJvsROR/" to "/Root"Resolved db base path: "/Root"2025-05-07T08:58:09.382695Z node 31 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 31, TabletId: 72075186224037888 not found Restore folder "/home/runner/.ya/build/build_root/zvgn/00218c/r3tmp/tmpJvsROR/" to "/Root"Process "/home/runner/.ya/build/build_root/zvgn/00218c/r3tmp/tmpJvsROR/JsonDocumentTable"Read scheme from "/home/runner/.ya/build/build_root/zvgn/00218c/r3tmp/tmpJvsROR/JsonDocumentTable/scheme.pb"Restore table "/home/runner/.ya/build/build_root/zvgn/00218c/r3tmp/tmpJvsROR/JsonDocumentTable" to "/Root/JsonDocumentTable"2025-05-07T08:58:09.412049Z node 31 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 Created "/Root/JsonDocumentTable"Read data from "/home/runner/.ya/build/build_root/zvgn/00218c/r3tmp/tmpJvsROR/JsonDocumentTable/data_00.csv"2025-05-07T08:58:09.621626Z node 31 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715673. Ctx: { TraceId: 01jtmzexr1273htgvctsjneafx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=31&id=ODYxZGVmZGMtYWVjOGY1ZjQtNzc4YjFhNjktYzk2NjUyM2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Restore ACL "/home/runner/.ya/build/build_root/zvgn/00218c/r3tmp/tmpJvsROR/JsonDocumentTable" to "/Root/JsonDocumentTable"Read ACL from "/home/runner/.ya/build/build_root/zvgn/00218c/r3tmp/tmpJvsROR/JsonDocumentTable/permissions.pb"2025-05-07T08:58:09.663518Z node 31 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715674:0, at schemeshard: 72057594046644480 Restore completed successfully2025-05-07T08:58:09.835011Z node 31 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715675. Ctx: { TraceId: 01jtmzexwnfzv8t7mdx78bkhtr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=31&id=NzE1NWJkOWQtY2VhOWFiYjgtZDQzZDFiMzEtNzhiNzAxNjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root |91.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLUtility::GetExpireAfter [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/idx_test/unittest >> YdbIndexTable::OnlineBuildWithDataColumn [GOOD] Test command err: Trying to start YDB, gRPC: 16293, MsgBus: 30021 2025-05-07T08:51:41.274266Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501623816614935993:2198];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:41.284368Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001e78/r3tmp/tmp0I4Uvy/pdisk_1.dat 2025-05-07T08:51:41.793286Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:51:41.801353Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:51:41.801477Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:51:41.807334Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16293, node 1 2025-05-07T08:51:41.990089Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:51:41.990112Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:51:41.990136Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:51:41.990265Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30021 TClient is connected to server localhost:30021 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:51:42.680187Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:42.706177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:51:42.721027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:42.924548Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:43.104734Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:43.204289Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:51:45.058187Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623833794806714:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:45.058317Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:45.532598Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:51:45.623162Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T08:51:45.674024Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:51:45.744920Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:51:45.796203Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:51:45.861711Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:51:45.917677Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T08:51:46.037797Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623838089774678:2471], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:46.037886Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:46.038285Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501623838089774683:2474], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:51:46.043486Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T08:51:46.059883Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501623838089774685:2475], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T08:51:46.151928Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501623838089774736:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:51:46.246459Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501623816614935993:2198];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:51:46.246553Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:51:47.761018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-05-07T08:51:48.443176Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710673. Ctx: { TraceId: 01jtmz39jn51egq15vq6gqka2a, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmI4MzYyYTUtNGVlZDYzMjMtYjFmNjc3YTktZjY1MzAyMTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:51:48.452263Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710674. Ctx: { TraceId: 01jtmz39jn51egq15vq6gqka2a, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmI4MzYyYTUtNGVlZDYzMjMtYjFmNjc3YTktZjY1MzAyMTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:51:48.541933Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710675. Ctx: { TraceId: 01jtmz39n911sb1atp2rm8efhj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGU3ZWVjOGYtN2E0OTkzNzMtNzE3M2QxY2QtMWMzZmFiYTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:51:48.559512Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710676. Ctx: { TraceId: 01jtmz39n911sb1atp2rm8efhj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGU3ZWVjOGYtN2E0OTkzNzMtNzE3M2QxY2QtMWMzZmFiYTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:51:48.618590Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710677. Ctx: { TraceId: 01jtmz39r68ap03wfr2754ryda, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmI4MzYyYTUtNGVlZDYzMjMtYjFmNjc3YTktZjY1MzAyMTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:51:48.625264Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710678. Ctx: { TraceId: 01jtmz39r68ap03wfr2754ryda, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmI4MzYyYTUtNGVlZDYzMjMtYjFmNjc3YTktZjY1MzAyMTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:51:48.695946Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710679. Ctx: { TraceId: 01jtmz39t10t2fwjvv905fqdnn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGU3ZWVjOGYtN2E0OTkzNzMtNzE3M2QxY2QtMWMzZmFiYTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:51:48.705381Z node 1 :KQP ... Ctx: { TraceId: 01jtmzet3w7bv39hpnkwy3nxh8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NDBkZjM4YjYtYzE3ODYwMzYtMTM5NTgxNi05OGQ1OGFjNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:58:05.836505Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715638. Ctx: { TraceId: 01jtmzet3w7bv39hpnkwy3nxh8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NDBkZjM4YjYtYzE3ODYwMzYtMTM5NTgxNi05OGQ1OGFjNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:58:05.900247Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715639. Ctx: { TraceId: 01jtmzet664z2mseke2a2t9ebx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=M2IyNjUyZWUtODBkZDUxYmQtYjRiZTMyYWYtZGFiYWFmMTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:58:05.905749Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715640. Ctx: { TraceId: 01jtmzet664z2mseke2a2t9ebx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=M2IyNjUyZWUtODBkZDUxYmQtYjRiZTMyYWYtZGFiYWFmMTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:58:05.942127Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715641. Ctx: { TraceId: 01jtmzet7h08vaxdbcpxrajv11, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NDBkZjM4YjYtYzE3ODYwMzYtMTM5NTgxNi05OGQ1OGFjNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:58:05.948341Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715642. Ctx: { TraceId: 01jtmzet7h08vaxdbcpxrajv11, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NDBkZjM4YjYtYzE3ODYwMzYtMTM5NTgxNi05OGQ1OGFjNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:58:05.984523Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715643. Ctx: { TraceId: 01jtmzet8s7c28vhzqjtf268m5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=M2IyNjUyZWUtODBkZDUxYmQtYjRiZTMyYWYtZGFiYWFmMTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:58:05.992787Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715644. Ctx: { TraceId: 01jtmzet8s7c28vhzqjtf268m5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=M2IyNjUyZWUtODBkZDUxYmQtYjRiZTMyYWYtZGFiYWFmMTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:58:06.027145Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715645. Ctx: { TraceId: 01jtmzeta660ee3p0qxp1ae8c8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NDBkZjM4YjYtYzE3ODYwMzYtMTM5NTgxNi05OGQ1OGFjNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:58:06.043094Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715646. Ctx: { TraceId: 01jtmzeta660ee3p0qxp1ae8c8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NDBkZjM4YjYtYzE3ODYwMzYtMTM5NTgxNi05OGQ1OGFjNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:58:06.093595Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715647. Ctx: { TraceId: 01jtmzetc43tty1y0rfrdybrrw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=M2IyNjUyZWUtODBkZDUxYmQtYjRiZTMyYWYtZGFiYWFmMTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:58:06.103302Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715648. Ctx: { TraceId: 01jtmzetc43tty1y0rfrdybrrw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=M2IyNjUyZWUtODBkZDUxYmQtYjRiZTMyYWYtZGFiYWFmMTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:58:06.149585Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715649. Ctx: { TraceId: 01jtmzete02534ssj7bhq9rna7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NDBkZjM4YjYtYzE3ODYwMzYtMTM5NTgxNi05OGQ1OGFjNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:58:06.156500Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715650. Ctx: { TraceId: 01jtmzete02534ssj7bhq9rna7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NDBkZjM4YjYtYzE3ODYwMzYtMTM5NTgxNi05OGQ1OGFjNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:58:06.202428Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715651. Ctx: { TraceId: 01jtmzetfc28be9zx8ds8qaqsm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=M2IyNjUyZWUtODBkZDUxYmQtYjRiZTMyYWYtZGFiYWFmMTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:58:06.213583Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715652. Ctx: { TraceId: 01jtmzetfc28be9zx8ds8qaqsm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=M2IyNjUyZWUtODBkZDUxYmQtYjRiZTMyYWYtZGFiYWFmMTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:58:06.253481Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715653. Ctx: { TraceId: 01jtmzeth61f27r7mg4y0pkdag, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NDBkZjM4YjYtYzE3ODYwMzYtMTM5NTgxNi05OGQ1OGFjNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:58:06.262304Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715654. Ctx: { TraceId: 01jtmzeth61f27r7mg4y0pkdag, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NDBkZjM4YjYtYzE3ODYwMzYtMTM5NTgxNi05OGQ1OGFjNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:58:06.302744Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715655. Ctx: { TraceId: 01jtmzetjq0c4crdj1h77pq6zp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=M2IyNjUyZWUtODBkZDUxYmQtYjRiZTMyYWYtZGFiYWFmMTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:58:06.309498Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715656. Ctx: { TraceId: 01jtmzetjq0c4crdj1h77pq6zp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=M2IyNjUyZWUtODBkZDUxYmQtYjRiZTMyYWYtZGFiYWFmMTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:58:06.356845Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720657. Ctx: { TraceId: 01jtmzetmbevqa6r8zf4ttt55a, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NDBkZjM4YjYtYzE3ODYwMzYtMTM5NTgxNi05OGQ1OGFjNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:58:06.363622Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720658. Ctx: { TraceId: 01jtmzetmbevqa6r8zf4ttt55a, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NDBkZjM4YjYtYzE3ODYwMzYtMTM5NTgxNi05OGQ1OGFjNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:58:06.403757Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720659. Ctx: { TraceId: 01jtmzetnxf7j99zm9wwer6vbp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=M2IyNjUyZWUtODBkZDUxYmQtYjRiZTMyYWYtZGFiYWFmMTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:58:06.410747Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720660. Ctx: { TraceId: 01jtmzetnxf7j99zm9wwer6vbp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=M2IyNjUyZWUtODBkZDUxYmQtYjRiZTMyYWYtZGFiYWFmMTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:58:06.454611Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720661. Ctx: { TraceId: 01jtmzetqjf7cam4prgxydwmny, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NDBkZjM4YjYtYzE3ODYwMzYtMTM5NTgxNi05OGQ1OGFjNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:58:06.460813Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720662. Ctx: { TraceId: 01jtmzetqjf7cam4prgxydwmny, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NDBkZjM4YjYtYzE3ODYwMzYtMTM5NTgxNi05OGQ1OGFjNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:58:06.492013Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720663. Ctx: { TraceId: 01jtmzetrqadr79gpvb27q92pm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=M2IyNjUyZWUtODBkZDUxYmQtYjRiZTMyYWYtZGFiYWFmMTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:58:06.501296Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720664. Ctx: { TraceId: 01jtmzetrqadr79gpvb27q92pm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=M2IyNjUyZWUtODBkZDUxYmQtYjRiZTMyYWYtZGFiYWFmMTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:58:06.535515Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720665. Ctx: { TraceId: 01jtmzetsz3c19w3yabxv8qcf6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NDBkZjM4YjYtYzE3ODYwMzYtMTM5NTgxNi05OGQ1OGFjNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:58:06.544314Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720666. Ctx: { TraceId: 01jtmzetsz3c19w3yabxv8qcf6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NDBkZjM4YjYtYzE3ODYwMzYtMTM5NTgxNi05OGQ1OGFjNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:58:06.584774Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720667. Ctx: { TraceId: 01jtmzetvj4ahxm54t05s3bjev, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=M2IyNjUyZWUtODBkZDUxYmQtYjRiZTMyYWYtZGFiYWFmMTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:58:06.593227Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720668. Ctx: { TraceId: 01jtmzetvj4ahxm54t05s3bjev, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=M2IyNjUyZWUtODBkZDUxYmQtYjRiZTMyYWYtZGFiYWFmMTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:58:06.641140Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720669. Ctx: { TraceId: 01jtmzetx7856p3a7m7kcf53vx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NDBkZjM4YjYtYzE3ODYwMzYtMTM5NTgxNi05OGQ1OGFjNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:58:06.652106Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720670. Ctx: { TraceId: 01jtmzetx7856p3a7m7kcf53vx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NDBkZjM4YjYtYzE3ODYwMzYtMTM5NTgxNi05OGQ1OGFjNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS |91.1%| [TA] $(B)/ydb/core/tx/datashard/ut_kqp_errors/test-results/unittest/{meta.json ... results_accumulator.log} >> TExternalDataSourceTest::RemovingReferencesFromDataSources |91.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/backup_ut/unittest >> BackupRestoreS3::TestAllPrimitiveTypes-JSON_DOCUMENT [GOOD] Test command err: 2025-05-07T08:56:36.165253Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625083615652654:2206];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:56:36.165373Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0021ad/r3tmp/tmpH6tSpD/pdisk_1.dat 2025-05-07T08:56:36.787551Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:56:36.795387Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:56:36.795513Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:56:36.842215Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13513, node 1 2025-05-07T08:56:37.295880Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:56:37.295923Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:56:37.295937Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:56:37.296070Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17764 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:56:37.761329Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:56:40.291189Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625100795522782:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:40.291336Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:40.291938Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625100795522794:2343], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:40.294728Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7501625083615652757:2118] Handle TEvProposeTransaction 2025-05-07T08:56:40.294771Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7501625083615652757:2118] TxId# 281474976710658 ProcessProposeTransaction 2025-05-07T08:56:40.294821Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7501625083615652757:2118] Cookie# 0 userReqId# "" txid# 281474976710658 SEND to# [1:7501625100795522797:2640] 2025-05-07T08:56:40.375531Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1520: Actor# [1:7501625100795522797:2640] txid# 281474976710658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root/.metadata/workload_manager/pools" OperationType: ESchemeOpCreateResourcePool ModifyACL { Name: "default" DiffACL: "\n!\010\000\022\035\010\001\020\201\004\032\024all-users@well-known \003\n\031\010\000\022\025\010\001\020\201\004\032\014root@builtin \003" NewOwner: "metadata@system" } Internal: true CreateResourcePool { Name: "default" Properties { Properties { key: "concurrent_query_limit" value: "-1" } Properties { key: "database_load_cpu_threshold" value: "-1" } Properties { key: "query_cancel_after_seconds" value: "0" } Properties { key: "query_cpu_limit_percent_per_node" value: "-1" } Properties { key: "query_memory_limit_percent_per_node" value: "-1" } Properties { key: "queue_size" value: "-1" } Properties { key: "resource_weight" value: "-1" } Properties { key: "total_cpu_limit_percent_per_node" value: "-1" } } } } } UserToken: "\n\017metadata@system\022\000" DatabaseName: "/Root" 2025-05-07T08:56:40.375627Z node 1 :TX_PROXY DEBUG: schemereq.cpp:563: Actor# [1:7501625100795522797:2640] txid# 281474976710658 Bootstrap, UserSID: metadata@system CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-05-07T08:56:40.375646Z node 1 :TX_PROXY DEBUG: schemereq.cpp:572: Actor# [1:7501625100795522797:2640] txid# 281474976710658 Bootstrap, UserSID: metadata@system IsClusterAdministrator: 1 2025-05-07T08:56:40.378712Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1585: Actor# [1:7501625100795522797:2640] txid# 281474976710658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-05-07T08:56:40.378838Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1575: Actor# [1:7501625100795522797:2640] txid# 281474976710658 TEvNavigateKeySet requested from SchemeCache 2025-05-07T08:56:40.379073Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1408: Actor# [1:7501625100795522797:2640] txid# 281474976710658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-05-07T08:56:40.379249Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1455: Actor# [1:7501625100795522797:2640] HANDLE EvNavigateKeySetResult, txid# 281474976710658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-05-07T08:56:40.379308Z node 1 :TX_PROXY DEBUG: schemereq.cpp:102: Actor# [1:7501625100795522797:2640] txid# 281474976710658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710658 TabletId# 72057594046644480} 2025-05-07T08:56:40.379490Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1310: Actor# [1:7501625100795522797:2640] txid# 281474976710658 HANDLE EvClientConnected 2025-05-07T08:56:40.380987Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480 2025-05-07T08:56:40.389412Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1332: Actor# [1:7501625100795522797:2640] txid# 281474976710658 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710658} 2025-05-07T08:56:40.389479Z node 1 :TX_PROXY DEBUG: schemereq.cpp:543: Actor# [1:7501625100795522797:2640] txid# 281474976710658 SEND to# [1:7501625100795522796:2344] Source {TEvProposeTransactionStatus txid# 281474976710658 Status# 53} 2025-05-07T08:56:40.421189Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501625100795522796:2344], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-05-07T08:56:40.490832Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7501625083615652757:2118] Handle TEvProposeTransaction 2025-05-07T08:56:40.490863Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7501625083615652757:2118] TxId# 281474976710659 ProcessProposeTransaction 2025-05-07T08:56:40.490906Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7501625083615652757:2118] Cookie# 0 userReqId# "" txid# 281474976710659 SEND to# [1:7501625100795522893:2697] 2025-05-07T08:56:40.493758Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1520: Actor# [1:7501625100795522893:2697] txid# 281474976710659 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root/.metadata/workload_manager/pools" OperationType: ESchemeOpCreateResourcePool ModifyACL { Name: "default" DiffACL: "\n!\010\000\022\035\010\001\020\201\004\032\024all-users@well-known \003\n\031\010\000\022\025\010\001\020\201\004\032\014root@builtin \003" NewOwner: "metadata@system" } Internal: true CreateResourcePool { Name: "default" Properties { Properties { key: "concurrent_query_limit" value: "-1" } Properties { key: "database_load_cpu_threshold" value: "-1" } Properties { key: "query_cancel_after_seconds" value: "0" } Properties { key: "query_cpu_limit_percent_per_node" value: "-1" } Properties { key: "query_memory_limit_percent_per_node" value: "-1" } Properties { key: "queue_size" value: "-1" } Properties { key: "resource_weight" value: "-1" } Properties { key: "total_cpu_limit_percent_per_node" value: "-1" } } } } } UserToken: "\n\017metadata@system\022\000" DatabaseName: "/Root" 2025-05-07T08:56:40.493813Z node 1 :TX_PROXY DEBUG: schemereq.cpp:563: Actor# [1:7501625100795522893:2697] txid# 281474976710659 Bootstrap, UserSID: metadata@system CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-05-07T08:56:40.493827Z node 1 :TX_PROXY DEBUG: schemereq.cpp:572: Actor# [1:7501625100795522893:2697] txid# 281474976710659 Bootstrap, UserSID: metadata@system IsClusterAdministrator: 1 2025-05-07T08:56:40.494948Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1585: Actor# [1:7501625100795522893:2697] txid# 281474976710659 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-05-07T08:56:40.495038Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1575: Actor# [1:7501625100795522893:2697] txid# 281474976710659 TEvNavigateKeySet requested from SchemeCache 2025-05-07T08:56:40.495230Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1408: Actor# [1:7501625100795522893:2697] txid# 281474976710659 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-05-07T08:56:40.495358Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1455: Actor# [1:7501625100795522893:2697] HANDLE EvNavigateKeySetResult, txid# 281474976710659 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-05-07T08:56:40.495401Z node 1 :TX_PROXY DEBUG: schemereq.cpp:102: Actor# [1:7501625100795522893:2697] txid# 281474976710659 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710659 TabletId# 72057594046644480} 2025-05-07T08:56:40.495531Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1310: Actor# [1:7501625100795522893:2697] txid# 281474976710659 HANDLE EvClientConnected 2025-05-07T08:56:40.498898Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1332: Actor# [1:7501625100795522893:2697] txid# 28147497671 ... 0020:2403] [0] Send request: schemeShardId# 72057594046644480 2025-05-07T08:58:09.188675Z node 28 :TX_PROXY DEBUG: rpc_get_operation.cpp:220: [GetImport] [28:7501625480716470020:2403] [0] Handle TEvImport::TEvGetImportResponse: record# Entry { Id: 281474976715667 Status: SUCCESS Progress: PROGRESS_PREPARING ImportFromS3Settings { endpoint: "localhost:26721" scheme: HTTP bucket: "test_bucket" items { source_prefix: "JsonDocumentTable" destination_path: "/Root/JsonDocumentTable" } } StartTime { seconds: 1746608288 } } REQUEST: GET /test_bucket?prefix=JsonDocumentTable HTTP/1.1 HEADERS: Host: localhost:26721 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: BADEDF6D-E318-40F0-84E9-CD1E3F3732E1 amz-sdk-request: attempt=1 authorization: AWS4-HMAC-SHA256 Credential=test_key/20250507/us-east-1/s3/aws4_request, SignedHeaders=amz-sdk-invocation-id;amz-sdk-request;content-type;host;x-amz-api-version;x-amz-content-sha256;x-amz-date, Signature=b5858df59e822e14332a8536c0da0907e194dd06465ffe6a58bc7821fd45937c content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-content-sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 x-amz-date: 20250507T085809Z S3_MOCK::HttpServeList: JsonDocumentTable 2025-05-07T08:58:09.191133Z node 28 :IMPORT DEBUG: schemeshard_import_getters.cpp:554: HandleChangefeeds TEvExternalStorage::TEvListObjectResponse: self# [28:7501625476421502709:2206], result# ListObjectsResult { } 2025-05-07T08:58:09.191200Z node 28 :IMPORT INFO: schemeshard_import_getters.cpp:587: Reply: self# [28:7501625476421502709:2206], success# 1, error# 2025-05-07T08:58:09.191304Z node 28 :IMPORT DEBUG: schemeshard_import__create.cpp:360: TImport::TTxProgress: DoExecute 2025-05-07T08:58:09.191326Z node 28 :IMPORT DEBUG: schemeshard_import__create.cpp:965: TImport::TTxProgress: OnSchemeResult: id# 281474976715667, itemIdx# 0, success# 1 2025-05-07T08:58:09.191717Z node 28 :IMPORT INFO: schemeshard_import__create.cpp:605: TImport::TTxProgress: Allocate txId: info# { Id: 281474976715667 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/JsonDocumentTable' DstPathId: State: CreateSchemeObject SubState: AllocateTxId WaitTxId: 0 Issue: '' } 2025-05-07T08:58:09.207729Z node 28 :IMPORT DEBUG: schemeshard_import__create.cpp:384: TImport::TTxProgress: DoComplete 2025-05-07T08:58:09.207891Z node 28 :IMPORT DEBUG: schemeshard_import__create.cpp:360: TImport::TTxProgress: DoExecute 2025-05-07T08:58:09.207907Z node 28 :IMPORT DEBUG: schemeshard_import__create.cpp:1180: TImport::TTxProgress: OnAllocateResult: txId# 281474976710760, id# 281474976715667 2025-05-07T08:58:09.207953Z node 28 :IMPORT INFO: schemeshard_import__create.cpp:417: TImport::TTxProgress: CreateTable propose: info# { Id: 281474976715667 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/JsonDocumentTable' DstPathId: State: CreateSchemeObject SubState: Proposed WaitTxId: 0 Issue: '' }, txId# 281474976710760 2025-05-07T08:58:09.208084Z node 28 :IMPORT DEBUG: schemeshard_import__create.cpp:384: TImport::TTxProgress: DoComplete 2025-05-07T08:58:09.209183Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710760:0, at schemeshard: 72057594046644480 2025-05-07T08:58:09.212112Z node 28 :IMPORT DEBUG: schemeshard_import__create.cpp:360: TImport::TTxProgress: DoExecute 2025-05-07T08:58:09.212147Z node 28 :IMPORT DEBUG: schemeshard_import__create.cpp:1267: TImport::TTxProgress: OnModifyResult: txId# 281474976710760, status# StatusAccepted 2025-05-07T08:58:09.212309Z node 28 :IMPORT INFO: schemeshard_import__create.cpp:619: TImport::TTxProgress: Wait for completion: info# { Id: 281474976715667 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/JsonDocumentTable' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: CreateSchemeObject SubState: Subscribed WaitTxId: 281474976710760 Issue: '' } 2025-05-07T08:58:09.218674Z node 28 :IMPORT DEBUG: schemeshard_import__create.cpp:384: TImport::TTxProgress: DoComplete 2025-05-07T08:58:09.280938Z node 28 :IMPORT DEBUG: schemeshard_import__create.cpp:360: TImport::TTxProgress: DoExecute 2025-05-07T08:58:09.280967Z node 28 :IMPORT DEBUG: schemeshard_import__create.cpp:1425: TImport::TTxProgress: OnNotifyResult: txId# 281474976710760 2025-05-07T08:58:09.281089Z node 28 :IMPORT INFO: schemeshard_import__create.cpp:605: TImport::TTxProgress: Allocate txId: info# { Id: 281474976715667 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/JsonDocumentTable' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: Transferring SubState: AllocateTxId WaitTxId: 0 Issue: '' } 2025-05-07T08:58:09.282894Z node 28 :IMPORT DEBUG: schemeshard_import__create.cpp:384: TImport::TTxProgress: DoComplete 2025-05-07T08:58:09.282987Z node 28 :IMPORT DEBUG: schemeshard_import__create.cpp:360: TImport::TTxProgress: DoExecute 2025-05-07T08:58:09.283001Z node 28 :IMPORT DEBUG: schemeshard_import__create.cpp:1180: TImport::TTxProgress: OnAllocateResult: txId# 281474976710761, id# 281474976715667 2025-05-07T08:58:09.283052Z node 28 :IMPORT INFO: schemeshard_import__create.cpp:496: TImport::TTxProgress: Restore propose: info# { Id: 281474976715667 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/JsonDocumentTable' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: Transferring SubState: Proposed WaitTxId: 0 Issue: '' }, txId# 281474976710761 2025-05-07T08:58:09.283792Z node 28 :IMPORT DEBUG: schemeshard_import__create.cpp:384: TImport::TTxProgress: DoComplete 2025-05-07T08:58:09.284255Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRestore, opId: 281474976710761:0, at schemeshard: 72057594046644480 2025-05-07T08:58:09.286437Z node 28 :IMPORT DEBUG: schemeshard_import__create.cpp:360: TImport::TTxProgress: DoExecute 2025-05-07T08:58:09.286460Z node 28 :IMPORT DEBUG: schemeshard_import__create.cpp:1267: TImport::TTxProgress: OnModifyResult: txId# 281474976710761, status# StatusAccepted 2025-05-07T08:58:09.286561Z node 28 :IMPORT INFO: schemeshard_import__create.cpp:619: TImport::TTxProgress: Wait for completion: info# { Id: 281474976715667 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/JsonDocumentTable' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: Transferring SubState: Subscribed WaitTxId: 281474976710761 Issue: '' } 2025-05-07T08:58:09.288232Z node 28 :IMPORT DEBUG: schemeshard_import__create.cpp:384: TImport::TTxProgress: DoComplete REQUEST: HEAD /test_bucket/JsonDocumentTable/data_00.csv HTTP/1.1 HEADERS: Host: localhost:26721 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 7A5DBF78-5261-4280-9AAD-782FFA85DACB amz-sdk-request: attempt=1 authorization: AWS4-HMAC-SHA256 Credential=test_key/20250507/us-east-1/s3/aws4_request, SignedHeaders=amz-sdk-invocation-id;amz-sdk-request;content-type;host;x-amz-api-version;x-amz-content-sha256;x-amz-date, Signature=5f60e42ea80910ee852cf697c4c4fdbbb8a63c469bea10e3a2b5f3b3836f3bae content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-content-sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 x-amz-date: 20250507T085809Z S3_MOCK::HttpServeRead: /test_bucket/JsonDocumentTable/data_00.csv / 32 REQUEST: GET /test_bucket/JsonDocumentTable/data_00.csv HTTP/1.1 HEADERS: Host: localhost:26721 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 80F51AEA-1858-4113-B1E3-4BAD640F6733 amz-sdk-request: attempt=1 authorization: AWS4-HMAC-SHA256 Credential=test_key/20250507/us-east-1/s3/aws4_request, SignedHeaders=amz-sdk-invocation-id;amz-sdk-request;content-type;host;range;x-amz-api-version;x-amz-content-sha256;x-amz-date, Signature=5e3a5f814e5887ec2856a9d3eef5d58fbbbf9bfaf8bfa36686e770ede66a1be9 content-type: application/xml range: bytes=0-31 user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-content-sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 x-amz-date: 20250507T085809Z S3_MOCK::HttpServeRead: /test_bucket/JsonDocumentTable/data_00.csv / 32 2025-05-07T08:58:09.406306Z node 28 :IMPORT DEBUG: schemeshard_import__create.cpp:360: TImport::TTxProgress: DoExecute 2025-05-07T08:58:09.406344Z node 28 :IMPORT DEBUG: schemeshard_import__create.cpp:1425: TImport::TTxProgress: OnNotifyResult: txId# 281474976710761 2025-05-07T08:58:09.410955Z node 28 :IMPORT DEBUG: schemeshard_import__create.cpp:384: TImport::TTxProgress: DoComplete 2025-05-07T08:58:09.599648Z node 28 :TX_PROXY DEBUG: rpc_operation_request_base.h:50: [GetImport] [28:7501625480716470221:2409] [0] Resolve database: name# /Root 2025-05-07T08:58:09.600096Z node 28 :TX_PROXY DEBUG: rpc_operation_request_base.h:66: [GetImport] [28:7501625480716470221:2409] [0] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-05-07T08:58:09.600115Z node 28 :TX_PROXY DEBUG: rpc_operation_request_base.h:106: [GetImport] [28:7501625480716470221:2409] [0] Send request: schemeShardId# 72057594046644480 2025-05-07T08:58:09.600843Z node 28 :TX_PROXY DEBUG: rpc_get_operation.cpp:220: [GetImport] [28:7501625480716470221:2409] [0] Handle TEvImport::TEvGetImportResponse: record# Entry { Id: 281474976715667 Status: SUCCESS Progress: PROGRESS_DONE ImportFromS3Settings { endpoint: "localhost:26721" scheme: HTTP bucket: "test_bucket" items { source_prefix: "JsonDocumentTable" destination_path: "/Root/JsonDocumentTable" } } StartTime { seconds: 1746608288 } EndTime { seconds: 1746608289 } } 2025-05-07T08:58:09.718715Z node 28 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [28:7501625450651697239:2112] Handle TEvExecuteKqpTransaction 2025-05-07T08:58:09.718755Z node 28 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [28:7501625450651697239:2112] TxId# 281474976715668 ProcessProposeKqpTransaction 2025-05-07T08:58:09.720280Z node 28 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715668. Ctx: { TraceId: 01jtmzext75xx8gheeep515c3s, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=NjNmMTAzN2QtM2NlYTUxODktZWUwYzJlZjYtZTAyOThiY2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> Cdc::VirtualTimestamps[TopicRunner] [GOOD] >> Cdc::Write[PqRunner] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_write/unittest >> DataShardWrite::ImmediateAndPlannedCommittedOpsRace [GOOD] Test command err: 2025-05-07T08:57:30.891844Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:57:30.892076Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:57:30.892377Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0034ca/r3tmp/tmpfgmU45/pdisk_1.dat 2025-05-07T08:57:31.467073Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:57:31.624463Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:57:31.688307Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:31.688500Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:31.703461Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:57:31.815665Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:57:31.874278Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828672, Sender [1:656:2563], Recipient [1:665:2569]: NKikimr::TEvTablet::TEvBoot 2025-05-07T08:57:31.875636Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828673, Sender [1:656:2563], Recipient [1:665:2569]: NKikimr::TEvTablet::TEvRestored 2025-05-07T08:57:31.876201Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:665:2569] 2025-05-07T08:57:31.876494Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T08:57:31.888305Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3111: StateInactive, received event# 268828684, Sender [1:656:2563], Recipient [1:665:2569]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-05-07T08:57:31.964758Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T08:57:31.964920Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T08:57:31.975159Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-05-07T08:57:31.975299Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-05-07T08:57:31.975386Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-05-07T08:57:31.975897Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T08:57:31.976101Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T08:57:31.976241Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:681:2569] in generation 1 2025-05-07T08:57:31.990684Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T08:57:32.046941Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-05-07T08:57:32.047202Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T08:57:32.047322Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:683:2579] 2025-05-07T08:57:32.047393Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T08:57:32.047453Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-05-07T08:57:32.047493Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:57:32.047724Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 2146435072, Sender [1:665:2569], Recipient [1:665:2569]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-05-07T08:57:32.047787Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3155: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-05-07T08:57:32.048126Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T08:57:32.048239Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T08:57:32.048377Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T08:57:32.048426Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:57:32.048482Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-05-07T08:57:32.048526Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-05-07T08:57:32.048581Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-05-07T08:57:32.048623Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T08:57:32.048688Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:57:32.048883Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877761, Sender [1:672:2573], Recipient [1:665:2569]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:57:32.048932Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:57:32.048994Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:672:2573], sessionId# [0:0:0] 2025-05-07T08:57:32.049481Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269549568, Sender [1:410:2405], Recipient [1:672:2573] 2025-05-07T08:57:32.049565Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3136: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-05-07T08:57:32.049684Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T08:57:32.049950Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-05-07T08:57:32.062201Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-05-07T08:57:32.062397Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-05-07T08:57:32.062466Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-05-07T08:57:32.062540Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-05-07T08:57:32.062606Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-05-07T08:57:32.062657Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-05-07T08:57:32.063013Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-05-07T08:57:32.063067Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-05-07T08:57:32.063110Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-05-07T08:57:32.063150Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-05-07T08:57:32.063221Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-05-07T08:57:32.063259Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-05-07T08:57:32.063299Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-05-07T08:57:32.063337Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-05-07T08:57:32.063371Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-05-07T08:57:32.065094Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269746185, Sender [1:684:2580], Recipient [1:665:2569]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-05-07T08:57:32.065155Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:57:32.078667Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T08:57:32.078755Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-05-07T08:57:32.078799Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-05-07T08:57:32.078884Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose latency: 0 ms, status: PREPARED 2025-05-07T08:57:32.078987Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-05-07T08:57:32.260697Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877761, Sender [1:699:2589], Recipient [1:665:2569]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:57:32.260798Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:57:32.260845Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075 ... outreadset.cpp:150: Receive RS Ack at 72075186224037888 source 72075186224037888 dest 72075186224037889 consumer 72075186224037889 txId 1234567890011 ... observed 2 more commits after readset unblock ... unblocking NKikimr::TEvBlobStorage::TEvPut from TABLET_REQ_WRITE_LOG to BS_PROXY_ACTOR ... unblocking NKikimr::TEvBlobStorage::TEvPut from TABLET_REQ_WRITE_LOG to BS_PROXY_ACTOR ... unblocking NKikimr::TEvBlobStorage::TEvPut from TABLET_REQ_WRITE_LOG to BS_PROXY_ACTOR ... unblocking NKikimr::TEvBlobStorage::TEvPut from TABLET_REQ_WRITE_LOG to BS_PROXY_ACTOR 2025-05-07T08:58:11.673879Z node 7 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:58:11.674003Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [2000:1234567890012] at 72075186224037888 on unit StoreAndSendWriteOutRS 2025-05-07T08:58:11.674082Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3990: Send RS 2 at 72075186224037888 from 72075186224037888 to 72075186224037889 txId 1234567890012 2025-05-07T08:58:11.674607Z node 7 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:58:11.674644Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [2000:1234567890012] at 72075186224037888 on unit CompleteWrite 2025-05-07T08:58:11.674714Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:826: Complete write [2000 : 1234567890012] from 72075186224037888 at tablet 72075186224037888 send result to client [7:837:2689] 2025-05-07T08:58:11.674783Z node 7 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:58:11.674913Z node 7 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037888 2025-05-07T08:58:11.675116Z node 7 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:58:11.675148Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1500:1234567890011] at 72075186224037888 on unit CompleteWrite 2025-05-07T08:58:11.675222Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:826: Complete write [1500 : 1234567890011] from 72075186224037888 at tablet 72075186224037888 send result to client [7:799:2663] 2025-05-07T08:58:11.675310Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 72075186224037888 {TEvReadSet step# 1500 txid# 1234567890011 TabletSource# 72075186224037889 TabletDest# 72075186224037888 SetTabletConsumer# 72075186224037888 Flags# 0 Seqno# 1} 2025-05-07T08:58:11.675365Z node 7 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:58:11.675546Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269287425, Sender [7:695:2584], Recipient [7:698:2586]: {TEvReadSet step# 2000 txid# 1234567890012 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037888 ReadSet.Size()# 2 Seqno# 2 Flags# 0} 2025-05-07T08:58:11.675627Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3149: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-05-07T08:58:11.675689Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037889 source 72075186224037888 dest 72075186224037889 producer 72075186224037888 txId 1234567890012 2025-05-07T08:58:11.675815Z node 7 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037889 got read set: {TEvReadSet step# 2000 txid# 1234567890012 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037888 ReadSet.Size()# 2 Seqno# 2 Flags# 0} 2025-05-07T08:58:11.675916Z node 7 :TX_DATASHARD TRACE: operation.cpp:67: Filled readset for [2000:1234567890012] from=72075186224037888 to=72075186224037889origin=72075186224037888 2025-05-07T08:58:11.676010Z node 7 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037889 2025-05-07T08:58:11.676129Z node 7 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:58:11.676163Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:8] at 72075186224037888 on unit FinishProposeWrite 2025-05-07T08:58:11.676241Z node 7 :TX_DATASHARD TRACE: finish_propose_write_unit.cpp:163: Propose transaction complete txid 8 at tablet 72075186224037888 send to client, propose latency: 3 ms, status: STATUS_COMPLETED 2025-05-07T08:58:11.676359Z node 7 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:58:11.676519Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269287938, Sender [7:695:2584], Recipient [7:698:2586]: {TEvReadSet step# 1500 txid# 1234567890011 TabletSource# 72075186224037889 TabletDest# 72075186224037888 SetTabletConsumer# 72075186224037888 Flags# 0 Seqno# 1} 2025-05-07T08:58:11.676561Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-05-07T08:58:11.676624Z node 7 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037889 source 72075186224037889 dest 72075186224037888 consumer 72075186224037888 txId 1234567890011 2025-05-07T08:58:11.676768Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 2146435072, Sender [7:698:2586], Recipient [7:698:2586]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-05-07T08:58:11.676812Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3155: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-05-07T08:58:11.676895Z node 7 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-05-07T08:58:11.676976Z node 7 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 1 active planned 1 immediate 0 planned 1 2025-05-07T08:58:11.677070Z node 7 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [2000:1234567890012] at 72075186224037889 for LoadAndWaitInRS 2025-05-07T08:58:11.677124Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [2000:1234567890012] at 72075186224037889 on unit LoadAndWaitInRS 2025-05-07T08:58:11.677174Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [2000:1234567890012] at 72075186224037889 is Executed 2025-05-07T08:58:11.677255Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [2000:1234567890012] at 72075186224037889 executing on unit LoadAndWaitInRS 2025-05-07T08:58:11.677309Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [2000:1234567890012] at 72075186224037889 to execution unit ExecuteWrite 2025-05-07T08:58:11.677355Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [2000:1234567890012] at 72075186224037889 on unit ExecuteWrite 2025-05-07T08:58:11.677394Z node 7 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [2000:1234567890012] at 72075186224037889 2025-05-07T08:58:11.677458Z node 7 :TX_DATASHARD TRACE: execute_write_unit.cpp:384: Operation [2000:1234567890012] at 72075186224037889 aborting because locks are not valid 2025-05-07T08:58:11.677531Z node 7 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=Operation is aborting because locks are not valid;tx_id=1234567890012; 2025-05-07T08:58:11.677622Z node 7 :TX_DATASHARD INFO: datashard_write_operation.cpp:684: Write transaction 1234567890012 at 72075186224037889 has an error: Operation is aborting because locks are not valid 2025-05-07T08:58:11.677685Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [2000:1234567890012] at 72075186224037889 is Executed 2025-05-07T08:58:11.677731Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [2000:1234567890012] at 72075186224037889 executing on unit ExecuteWrite 2025-05-07T08:58:11.677773Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [2000:1234567890012] at 72075186224037889 to execution unit CompleteWrite 2025-05-07T08:58:11.677818Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [2000:1234567890012] at 72075186224037889 on unit CompleteWrite 2025-05-07T08:58:11.678149Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [2000:1234567890012] at 72075186224037889 is DelayComplete 2025-05-07T08:58:11.678215Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [2000:1234567890012] at 72075186224037889 executing on unit CompleteWrite 2025-05-07T08:58:11.678263Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [2000:1234567890012] at 72075186224037889 to execution unit CompletedOperations 2025-05-07T08:58:11.678305Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [2000:1234567890012] at 72075186224037889 on unit CompletedOperations 2025-05-07T08:58:11.678348Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [2000:1234567890012] at 72075186224037889 is Executed 2025-05-07T08:58:11.678376Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [2000:1234567890012] at 72075186224037889 executing on unit CompletedOperations 2025-05-07T08:58:11.678409Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [2000:1234567890012] at 72075186224037889 has finished 2025-05-07T08:58:11.678469Z node 7 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:58:11.678531Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037889 2025-05-07T08:58:11.678576Z node 7 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037889 has no attached operations 2025-05-07T08:58:11.678626Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037889 2025-05-07T08:58:11.679328Z node 7 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-05-07T08:58:11.679371Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [2000:1234567890012] at 72075186224037889 on unit CompleteWrite 2025-05-07T08:58:11.679417Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:826: Complete write [2000 : 1234567890012] from 72075186224037889 at tablet 72075186224037889 send result to client [7:837:2689] 2025-05-07T08:58:11.679484Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 72075186224037889 {TEvReadSet step# 2000 txid# 1234567890012 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletConsumer# 72075186224037889 Flags# 0 Seqno# 2} 2025-05-07T08:58:11.679531Z node 7 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-05-07T08:58:11.679698Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269287938, Sender [7:698:2586], Recipient [7:695:2584]: {TEvReadSet step# 2000 txid# 1234567890012 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletConsumer# 72075186224037889 Flags# 0 Seqno# 2} 2025-05-07T08:58:11.679733Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-05-07T08:58:11.679770Z node 7 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037888 source 72075186224037888 dest 72075186224037889 consumer 72075186224037889 txId 1234567890012 |91.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TExternalDataSourceTest::ReplaceExternalDataSourceIfNotExistsShouldFailIfFeatureFlagIsNotSet >> TSchemeShardTTLTests::AlterTableShouldSuccess >> TExternalDataSourceTest::ReplaceExternalDataSourceIfNotExists >> AsyncIndexChangeExchange::ShouldRemoveRecordsAfterDroppingIndex [GOOD] >> AsyncIndexChangeExchange::ShouldRemoveRecordsAfterCancelIndexBuild |91.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest |91.1%| [TA] $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> Balancing::Balancing_OneTopic_TopicApi [GOOD] >> Balancing::Balancing_OneTopic_PQv1 |91.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest |91.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/checkpointing/ut/ydb-core-fq-libs-checkpointing-ut |91.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/checkpointing/ut/ydb-core-fq-libs-checkpointing-ut |91.1%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_kqp_errors/test-results/unittest/{meta.json ... results_accumulator.log} >> TopicAutoscaling::PartitionSplit_PQv1 [GOOD] >> KqpWorkloadService::TestHandlerActorCleanup [GOOD] >> TSchemeShardColumnTableTTL::AlterColumnTable_Negative >> TSchemeShardTTLTests::AlterTableShouldSuccess [GOOD] >> TopicAutoscaling::PartitionSplit_PreferedPartition_BeforeAutoscaleAwareSDK >> BackupRestore::TestAllPrimitiveTypes-DATE32 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-DATETIME64 >> TSchemeShardTTLTests::ConditionalErase >> DataShardWrite::DoubleWriteUncommittedThenDoubleReadWithCommit [GOOD] >> TestYmqHttpProxy::TestCreateQueueWithAllAttributes [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-DATE32 [GOOD] >> TestYmqHttpProxy::TestCreateQueueWithWrongAttribute [GOOD] >> TExternalDataSourceTest::ReplaceExternalDataSourceIfNotExistsShouldFailIfFeatureFlagIsNotSet [GOOD] >> TExternalDataSourceTest::RemovingReferencesFromDataSources [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-DATETIME64 >> TExternalDataSourceTest::ReplaceExternalDataSourceIfNotExists [GOOD] >> TestKinesisHttpProxy::TestRequestNoAuthorization [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_DyNumber-pk_types8-all_types8-index8-DyNumber--] >> TSchemeShardTTLTests::CreateTableShouldFailOnBeforeEpochTTL >> TestYmqHttpProxy::BillingRecordsForJsonApi ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::AlterTableShouldSuccess [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:58:15.224498Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:58:15.224596Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:15.224640Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:58:15.224724Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:58:15.224769Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:58:15.224811Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:58:15.224876Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:15.224966Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:58:15.225735Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:58:15.226128Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:58:15.318254Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:58:15.318333Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:15.339638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:58:15.339893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:58:15.340104Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:58:15.350616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:58:15.351030Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:58:15.351996Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:15.352232Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:58:15.356159Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:15.357911Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:15.358017Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:15.358114Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:58:15.358171Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:15.358224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:58:15.358585Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:58:15.368060Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:58:15.554087Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:15.554356Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:15.554616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:58:15.554840Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:58:15.555094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:15.560104Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:15.560328Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:58:15.560604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:15.560683Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:58:15.560760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:58:15.560807Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:58:15.569003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:15.569131Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:58:15.569186Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:58:15.578118Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:15.578226Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:15.578299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:15.578380Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:58:15.587399Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:58:15.596215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:58:15.596542Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:58:15.597787Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:15.598010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:15.598087Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:15.598470Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:58:15.598535Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:15.598764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:15.598856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:58:15.602285Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:15.602368Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:15.602608Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:15.602656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... shard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:16.125120Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T08:58:16.125397Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:16.125443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 104, path id: 2 2025-05-07T08:58:16.125728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-05-07T08:58:16.125788Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1036: NTableState::TProposedWaitParts operationId# 104:0 ProgressState at tablet: 72057594046678944 2025-05-07T08:58:16.126661Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T08:58:16.126782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T08:58:16.126822Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 104 2025-05-07T08:58:16.126865Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-05-07T08:58:16.126904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T08:58:16.127013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 104, ready parts: 0/1, is published: true 2025-05-07T08:58:16.137680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-05-07T08:58:16.150487Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6245: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 104 Step: 5000004 OrderId: 104 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1093 } } 2025-05-07T08:58:16.150548Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 104, tablet: 72075186233409546, partId: 0 2025-05-07T08:58:16.150645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 104:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 104 Step: 5000004 OrderId: 104 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1093 } } 2025-05-07T08:58:16.150725Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 104 Step: 5000004 OrderId: 104 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1093 } } FAKE_COORDINATOR: Erasing txId 104 2025-05-07T08:58:16.151643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 305 RawX2: 4294969588 } Origin: 72075186233409546 State: 2 TxId: 104 Step: 0 Generation: 2 2025-05-07T08:58:16.151706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 104, tablet: 72075186233409546, partId: 0 2025-05-07T08:58:16.151844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 104:0, at schemeshard: 72057594046678944, message: Source { RawX1: 305 RawX2: 4294969588 } Origin: 72075186233409546 State: 2 TxId: 104 Step: 0 Generation: 2 2025-05-07T08:58:16.151894Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1005: NTableState::TProposedWaitParts operationId# 104:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 2025-05-07T08:58:16.152002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1009: NTableState::TProposedWaitParts operationId# 104:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 305 RawX2: 4294969588 } Origin: 72075186233409546 State: 2 TxId: 104 Step: 0 Generation: 2 2025-05-07T08:58:16.152076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 104:0, shardIdx: 72057594046678944:1, datashard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:16.152116Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 104:0, at schemeshard: 72057594046678944 2025-05-07T08:58:16.152153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 104:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-05-07T08:58:16.152191Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 104:0 129 -> 240 2025-05-07T08:58:16.158182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-05-07T08:58:16.158686Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-05-07T08:58:16.158979Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-05-07T08:58:16.159063Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 104:0 ProgressState 2025-05-07T08:58:16.159179Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 1/1 2025-05-07T08:58:16.159214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-05-07T08:58:16.159262Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 1/1 2025-05-07T08:58:16.159311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-05-07T08:58:16.159355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: true 2025-05-07T08:58:16.159427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:334:2313] message: TxId: 104 2025-05-07T08:58:16.159481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-05-07T08:58:16.159533Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 104:0 2025-05-07T08:58:16.159565Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 104:0 2025-05-07T08:58:16.159676Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T08:58:16.161655Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-05-07T08:58:16.161735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:441:2413] TestWaitNotification: OK eventTxId 104 2025-05-07T08:58:16.162339Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:58:16.162616Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 257us result status StatusSuccess 2025-05-07T08:58:16.163032Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 3 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 3 TTLSettings { Disabled { } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TestYmqHttpProxy::TestCreateQueueWithTags ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_data_source/unittest >> TExternalDataSourceTest::ReplaceExternalDataSourceIfNotExists [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:127:2058] recipient: [1:109:2141] 2025-05-07T08:58:16.307712Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:58:16.307861Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:16.307913Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:58:16.307958Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:58:16.317885Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:58:16.318022Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:58:16.318135Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:16.318215Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:58:16.318891Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:58:16.333764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:58:16.450509Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7610: Got new config: QueryServiceConfig { AvailableExternalDataSources: "ObjectStorage" AvailableExternalDataSources: "ClickHouse" AvailableExternalDataSources: "PostgreSQL" AvailableExternalDataSources: "MySQL" AvailableExternalDataSources: "Ydb" AvailableExternalDataSources: "YT" AvailableExternalDataSources: "Greenplum" AvailableExternalDataSources: "MsSQLServer" AvailableExternalDataSources: "Oracle" AvailableExternalDataSources: "Logging" AvailableExternalDataSources: "Solomon" } 2025-05-07T08:58:16.450614Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:16.451582Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# ObjectStorage, ClickHouse, PostgreSQL, MySQL, Ydb, YT, Greenplum, MsSQLServer, Oracle, Logging, Solomon 2025-05-07T08:58:16.495346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:58:16.495984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:58:16.496193Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:58:16.523278Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:58:16.523542Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:58:16.528431Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:16.528887Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:58:16.567920Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:16.576816Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:16.576919Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:16.576986Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:58:16.577058Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:16.577245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:58:16.584305Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:58:16.600086Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:58:16.780098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:16.780335Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:16.780553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:58:16.780764Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:58:16.780828Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:16.791704Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:16.791874Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:58:16.792067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:16.792175Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:58:16.792233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:58:16.792267Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:58:16.795186Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:16.795261Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:58:16.795315Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:58:16.798220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:16.798289Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:16.798346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:16.798407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:58:16.802419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:58:16.804743Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:58:16.809459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:58:16.810695Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:16.810847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 134 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:16.810902Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:16.819307Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:58:16.819402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:16.819560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:16.819648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:58:16.829146Z node 1 :FLAT_TX_SCHEMESHARD INF ... NATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 2025-05-07T08:58:16.964060Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000003, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:16.964164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 102 Coordinator: 72057594046316545 AckTo { RawX1: 134 RawX2: 4294969453 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:16.964217Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_external_data_source.cpp:35: [72057594046678944] TAlterExternalDataSource TPropose, operationId: 102:0HandleReply TEvOperationPlan: step# 5000003 2025-05-07T08:58:16.964335Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 102:0 128 -> 240 2025-05-07T08:58:16.964505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:16.964573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T08:58:16.964877Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-05-07T08:58:16.965398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 FAKE_COORDINATOR: Erasing txId 102 2025-05-07T08:58:16.967090Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:16.967123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:16.967236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T08:58:16.967290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T08:58:16.967379Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:16.967407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2208], at schemeshard: 72057594046678944, txId: 102, path id: 1 2025-05-07T08:58:16.967445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2208], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-05-07T08:58:16.967480Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2208], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-05-07T08:58:16.967751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:58:16.967795Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 102:0 ProgressState 2025-05-07T08:58:16.967907Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T08:58:16.967944Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:58:16.967973Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T08:58:16.967995Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:58:16.968023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: false 2025-05-07T08:58:16.968075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:58:16.968116Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-05-07T08:58:16.968144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 102:0 2025-05-07T08:58:16.968200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T08:58:16.968226Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 102, publications: 2, subscribers: 0 2025-05-07T08:58:16.968257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-05-07T08:58:16.968286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-05-07T08:58:16.969026Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:58:16.969103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:58:16.969136Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-05-07T08:58:16.969175Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-05-07T08:58:16.969215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:58:16.970199Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:58:16.970302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:58:16.970341Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-05-07T08:58:16.970368Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-05-07T08:58:16.970396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T08:58:16.970523Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-05-07T08:58:16.973779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-05-07T08:58:16.974785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-05-07T08:58:16.975008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-05-07T08:58:16.975049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-05-07T08:58:16.975550Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-05-07T08:58:16.975665Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T08:58:16.975716Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:334:2325] TestWaitNotification: OK eventTxId 102 2025-05-07T08:58:16.976205Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/MyExternalDataSource" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:58:16.976498Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/MyExternalDataSource" took 279us result status StatusSuccess 2025-05-07T08:58:16.976747Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/MyExternalDataSource" PathDescription { Self { Name: "MyExternalDataSource" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalDataSource CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalDataSourceVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } ExternalDataSourceDescription { Name: "MyExternalDataSource" PathId { OwnerId: 72057594046678944 LocalId: 2 } Version: 2 SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_new_bucket" Installation: "" Auth { None { } } Properties { } References { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_data_source/unittest >> TExternalDataSourceTest::ReplaceExternalDataSourceIfNotExistsShouldFailIfFeatureFlagIsNotSet [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:127:2058] recipient: [1:109:2141] 2025-05-07T08:58:16.308213Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:58:16.308404Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:16.308466Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:58:16.308517Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:58:16.317926Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:58:16.318039Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:58:16.318150Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:16.318263Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:58:16.319228Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:58:16.333547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:58:16.451119Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7610: Got new config: QueryServiceConfig { AvailableExternalDataSources: "ObjectStorage" AvailableExternalDataSources: "ClickHouse" AvailableExternalDataSources: "PostgreSQL" AvailableExternalDataSources: "MySQL" AvailableExternalDataSources: "Ydb" AvailableExternalDataSources: "YT" AvailableExternalDataSources: "Greenplum" AvailableExternalDataSources: "MsSQLServer" AvailableExternalDataSources: "Oracle" AvailableExternalDataSources: "Logging" AvailableExternalDataSources: "Solomon" } 2025-05-07T08:58:16.451202Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:16.452173Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# ObjectStorage, ClickHouse, PostgreSQL, MySQL, Ydb, YT, Greenplum, MsSQLServer, Oracle, Logging, Solomon 2025-05-07T08:58:16.497049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:58:16.497580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:58:16.497748Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:58:16.516245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:58:16.516633Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:58:16.528249Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:16.528835Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:58:16.565747Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:16.578700Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:16.578776Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:16.578886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:58:16.578939Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:16.579062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:58:16.586482Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:58:16.604330Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:58:16.777302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:16.777565Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:16.777799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:58:16.778232Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:58:16.778317Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:16.782206Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:16.782350Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:58:16.782538Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:16.782647Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:58:16.782702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:58:16.782740Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:58:16.785199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:16.785301Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:58:16.785346Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:58:16.787396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:16.787479Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:16.787542Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:16.787594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:58:16.795567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:58:16.798971Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:58:16.807359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:58:16.808784Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:16.808986Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 134 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:16.809071Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:16.818460Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:58:16.818571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:16.818793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:16.818904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:58:16.824934Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:16.825025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:16.825206Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:16.825245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2208], at schemeshard: 72057594046678944, txId: 1, path id: 1 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:58:16.825614Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:16.825676Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 1:0 ProgressState 2025-05-07T08:58:16.825777Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-05-07T08:58:16.825811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:58:16.825850Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-05-07T08:58:16.825984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:58:16.826036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-05-07T08:58:16.826082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:58:16.826135Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1:0 2025-05-07T08:58:16.826166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 1:0 2025-05-07T08:58:16.826249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:58:16.826386Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-05-07T08:58:16.826418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-05-07T08:58:16.828418Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-05-07T08:58:16.828548Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-05-07T08:58:16.828592Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-05-07T08:58:16.828632Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-05-07T08:58:16.828690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:16.828818Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-05-07T08:58:16.838664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-05-07T08:58:16.846468Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-05-07T08:58:16.873796Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalDataSource CreateExternalDataSource { Name: "MyExternalDataSource" SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Auth { None { } } ReplaceIfExists: true } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:16.874237Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_data_source.cpp:336: [72057594046678944] CreateNewExternalDataSource, opId 101:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalDataSource FailOnExist: false CreateExternalDataSource { Name: "MyExternalDataSource" SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Auth { None { } } ReplaceIfExists: true } 2025-05-07T08:58:16.874358Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 101:0, explain: Invalid TCreateExternalDataSource request: Unsupported: feature flag EnableReplaceIfExistsForExternalEntities is off, at schemeshard: 72057594046678944 2025-05-07T08:58:16.874431Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 101:1, propose status:StatusPreconditionFailed, reason: Invalid TCreateExternalDataSource request: Unsupported: feature flag EnableReplaceIfExistsForExternalEntities is off, at schemeshard: 72057594046678944 2025-05-07T08:58:16.877228Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:269:2260] Bootstrap 2025-05-07T08:58:16.898292Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:269:2260] Become StateWork (SchemeCache [1:274:2265]) 2025-05-07T08:58:16.899104Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:269:2260] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-05-07T08:58:16.905142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 101, response: Status: StatusPreconditionFailed Reason: "Invalid TCreateExternalDataSource request: Unsupported: feature flag EnableReplaceIfExistsForExternalEntities is off" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:16.914490Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusPreconditionFailed, reason: Invalid TCreateExternalDataSource request: Unsupported: feature flag EnableReplaceIfExistsForExternalEntities is off, operation: CREATE EXTERNAL DATA SOURCE, path: /MyRoot/MyExternalDataSource 2025-05-07T08:58:16.915179Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-05-07T08:58:16.921293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-05-07T08:58:16.921352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-05-07T08:58:16.923211Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-05-07T08:58:16.923305Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-05-07T08:58:16.923341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:284:2275] TestWaitNotification: OK eventTxId 101 2025-05-07T08:58:16.923840Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/MyExternalDataSource" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:58:16.924030Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/MyExternalDataSource" took 197us result status StatusPathDoesNotExist 2025-05-07T08:58:16.924268Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/MyExternalDataSource\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/MyExternalDataSource" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TSchemeShardColumnTableTTL::AlterColumnTable >> TopicAutoscaling::Simple_BeforeAutoscaleAwareSDK [GOOD] >> TestKinesisHttpProxy::CreateDeleteStream [GOOD] >> TestKinesisHttpProxy::TestUnauthorizedPutRecords >> TopicAutoscaling::Simple_AutoscaleAwareSDK >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_1__SYNC-pk_types3-all_types3-index3---SYNC] >> TSchemeShardTTLTests::CreateTableShouldFailOnBeforeEpochTTL [GOOD] >> TestKinesisHttpProxy::ListShards [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_data_source/unittest >> TExternalDataSourceTest::RemovingReferencesFromDataSources [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:127:2058] recipient: [1:109:2141] 2025-05-07T08:58:16.307711Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:58:16.307947Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:16.307996Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:58:16.308038Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:58:16.319551Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:58:16.319638Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:58:16.319745Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:16.319820Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:58:16.320661Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:58:16.333555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:58:16.452678Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7610: Got new config: QueryServiceConfig { AvailableExternalDataSources: "ObjectStorage" AvailableExternalDataSources: "ClickHouse" AvailableExternalDataSources: "PostgreSQL" AvailableExternalDataSources: "MySQL" AvailableExternalDataSources: "Ydb" AvailableExternalDataSources: "YT" AvailableExternalDataSources: "Greenplum" AvailableExternalDataSources: "MsSQLServer" AvailableExternalDataSources: "Oracle" AvailableExternalDataSources: "Logging" AvailableExternalDataSources: "Solomon" } 2025-05-07T08:58:16.452741Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:16.453515Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# ObjectStorage, ClickHouse, PostgreSQL, MySQL, Ydb, YT, Greenplum, MsSQLServer, Oracle, Logging, Solomon 2025-05-07T08:58:16.495472Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:58:16.496127Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:58:16.496310Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:58:16.520185Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:58:16.520493Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:58:16.528882Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:16.529373Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:58:16.565675Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:16.578040Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:16.578151Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:16.578317Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:58:16.578403Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:16.578570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:58:16.586202Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:58:16.599267Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:58:16.762777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:16.764424Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:16.765934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:58:16.769414Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:58:16.769525Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:16.781718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:16.781906Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:58:16.782135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:16.782318Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:58:16.782369Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:58:16.782403Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:58:16.791112Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:16.791196Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:58:16.791261Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:58:16.801541Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:16.801614Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:16.801682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:16.801736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:58:16.807587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:58:16.814199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:58:16.814672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:58:16.815904Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:16.816074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 134 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:16.816154Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:16.820256Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:58:16.820359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:16.820542Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:16.820698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:58:16.830010Z node 1 :FLAT_TX_SCHEMESHARD INF ... ation RegisterRelationByTabletId, TxId: 104, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 104 at step: 5000005 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 104 at step: 5000005 2025-05-07T08:58:17.074671Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000005, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:17.074837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 104 Coordinator: 72057594046316545 AckTo { RawX1: 134 RawX2: 4294969453 } } Step: 5000005 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:17.074900Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_external_data_source.cpp:40: [72057594046678944] TDropExternalDataSource TPropose opId# 104:0 HandleReply TEvOperationPlan: step# 5000005 2025-05-07T08:58:17.075037Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T08:58:17.075125Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 104:0 128 -> 240 2025-05-07T08:58:17.075355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:17.075433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-05-07T08:58:17.076622Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-05-07T08:58:17.083348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-05-07T08:58:17.085238Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:17.085301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:17.085486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T08:58:17.085650Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:17.085685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2208], at schemeshard: 72057594046678944, txId: 104, path id: 1 2025-05-07T08:58:17.085724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2208], at schemeshard: 72057594046678944, txId: 104, path id: 2 FAKE_COORDINATOR: Erasing txId 104 2025-05-07T08:58:17.086129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-05-07T08:58:17.086200Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 104:0 ProgressState 2025-05-07T08:58:17.086316Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 1/1 2025-05-07T08:58:17.086352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-05-07T08:58:17.086410Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 1/1 2025-05-07T08:58:17.086443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-05-07T08:58:17.086480Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: false 2025-05-07T08:58:17.086522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-05-07T08:58:17.086562Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 104:0 2025-05-07T08:58:17.086594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 104:0 2025-05-07T08:58:17.086675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T08:58:17.086728Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 104, publications: 2, subscribers: 0 2025-05-07T08:58:17.086778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 1], 11 2025-05-07T08:58:17.086811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 2], 18446744073709551615 2025-05-07T08:58:17.087400Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T08:58:17.087484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T08:58:17.087522Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 104 2025-05-07T08:58:17.087572Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-05-07T08:58:17.087616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-05-07T08:58:17.087916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T08:58:17.087960Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T08:58:17.088023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:58:17.088188Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T08:58:17.088275Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T08:58:17.088301Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-05-07T08:58:17.088325Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 11 2025-05-07T08:58:17.088344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:17.088411Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 0 2025-05-07T08:58:17.091761Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-05-07T08:58:17.091878Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-05-07T08:58:17.091953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 104 2025-05-07T08:58:17.092144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2025-05-07T08:58:17.092193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2025-05-07T08:58:17.092562Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-05-07T08:58:17.092709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-05-07T08:58:17.092747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:393:2384] TestWaitNotification: OK eventTxId 104 2025-05-07T08:58:17.093355Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalDataSource" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:58:17.093649Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalDataSource" took 279us result status StatusPathDoesNotExist 2025-05-07T08:58:17.093861Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ExternalDataSource\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/ExternalDataSource" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 |91.1%| [TA] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_write/unittest >> DataShardWrite::DoubleWriteUncommittedThenDoubleReadWithCommit [GOOD] Test command err: 2025-05-07T08:57:30.956515Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:57:30.956698Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:57:30.956947Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0034c2/r3tmp/tmpdMx12t/pdisk_1.dat 2025-05-07T08:57:31.398262Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:57:31.455071Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:57:31.513482Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:31.513636Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:31.525425Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:57:31.628370Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:57:31.694115Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828672, Sender [1:656:2563], Recipient [1:665:2569]: NKikimr::TEvTablet::TEvBoot 2025-05-07T08:57:31.695285Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828673, Sender [1:656:2563], Recipient [1:665:2569]: NKikimr::TEvTablet::TEvRestored 2025-05-07T08:57:31.695717Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:665:2569] 2025-05-07T08:57:31.695969Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T08:57:31.706556Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3111: StateInactive, received event# 268828684, Sender [1:656:2563], Recipient [1:665:2569]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-05-07T08:57:31.745953Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T08:57:31.746123Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T08:57:31.747931Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-05-07T08:57:31.748038Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-05-07T08:57:31.748129Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-05-07T08:57:31.748536Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T08:57:31.748693Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T08:57:31.749684Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:681:2569] in generation 1 2025-05-07T08:57:31.762558Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T08:57:31.833334Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-05-07T08:57:31.833638Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T08:57:31.833788Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:683:2579] 2025-05-07T08:57:31.833848Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T08:57:31.833909Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-05-07T08:57:31.833956Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:57:31.834271Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 2146435072, Sender [1:665:2569], Recipient [1:665:2569]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-05-07T08:57:31.834342Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3155: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-05-07T08:57:31.834716Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T08:57:31.834843Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T08:57:31.834954Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T08:57:31.835021Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:57:31.835084Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-05-07T08:57:31.835128Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-05-07T08:57:31.835176Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-05-07T08:57:31.835228Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T08:57:31.835295Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:57:31.835449Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877761, Sender [1:672:2573], Recipient [1:665:2569]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:57:31.835491Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:57:31.835549Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:672:2573], sessionId# [0:0:0] 2025-05-07T08:57:31.835999Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269549568, Sender [1:410:2405], Recipient [1:672:2573] 2025-05-07T08:57:31.836068Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3136: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-05-07T08:57:31.836204Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T08:57:31.836475Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-05-07T08:57:31.836545Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-05-07T08:57:31.836657Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-05-07T08:57:31.836711Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-05-07T08:57:31.836766Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-05-07T08:57:31.836818Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-05-07T08:57:31.836858Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-05-07T08:57:31.837255Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-05-07T08:57:31.837294Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-05-07T08:57:31.837350Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-05-07T08:57:31.837395Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-05-07T08:57:31.837459Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-05-07T08:57:31.837493Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-05-07T08:57:31.837536Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-05-07T08:57:31.837573Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-05-07T08:57:31.837606Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-05-07T08:57:31.851748Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269746185, Sender [1:684:2580], Recipient [1:665:2569]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-05-07T08:57:31.851852Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:57:31.865959Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T08:57:31.866088Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-05-07T08:57:31.866126Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-05-07T08:57:31.866194Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose latency: 0 ms, status: PREPARED 2025-05-07T08:57:31.866287Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-05-07T08:57:32.048678Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877761, Sender [1:699:2589], Recipient [1:665:2569]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:57:32.048756Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:57:32.048802Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075 ... 888 on unit CompletedOperations 2025-05-07T08:58:15.049846Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-05-07T08:58:15.049873Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit CompletedOperations 2025-05-07T08:58:15.049900Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:6] at 72075186224037888 has finished 2025-05-07T08:58:15.049934Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2670: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-05-07T08:58:15.050052Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2719: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2025-05-07T08:58:15.050590Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269553219, Sender [8:1646:2439], Recipient [8:1302:2390]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-05-07T08:58:15.050650Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3392: 72075186224037888 ReadCancel: { ReadId: 0 } 2025-05-07T08:58:15.052199Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269553219, Sender [8:1648:2440], Recipient [8:1302:2390]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-05-07T08:58:15.052260Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3392: 72075186224037888 ReadCancel: { ReadId: 0 } 2025-05-07T08:58:15.056715Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 278003712, Sender [7:1631:2937], Recipient [8:1576:2434] 2025-05-07T08:58:15.056786Z node 8 :TX_DATASHARD TRACE: datashard__write.cpp:182: Handle TTxWrite: at tablet# 72075186224037888 2025-05-07T08:58:15.056959Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 2146435074, Sender [8:1302:2390], Recipient [8:1302:2390]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvDelayedProposeTransaction 2025-05-07T08:58:15.057003Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvDelayedProposeTransaction 2025-05-07T08:58:15.057098Z node 8 :TX_DATASHARD TRACE: datashard__write.cpp:28: TTxWrite:: execute at tablet# 72075186224037888 2025-05-07T08:58:15.057279Z node 8 :TX_DATASHARD TRACE: datashard_write_operation.cpp:64: Parsing write transaction for 0 at 72075186224037888, record: TxMode: MODE_IMMEDIATE Locks { Locks { LockId: 281474976715661 DataShard: 72075186224037888 Generation: 1 Counter: 0 SchemeShard: 72057594046644480 PathId: 2 HasWrites: true } SendingShards: 72075186224037888 ReceivingShards: 72075186224037888 Op: Commit } 2025-05-07T08:58:15.057398Z node 8 :TX_DATASHARD TRACE: key_validator.cpp:33: -- AddReadRange: (Uint64 : 281474976715661, Uint64 : 72075186224037888, Uint64 : 72057594046644480, Uint64 : 2) table: [1:997:0] 2025-05-07T08:58:15.057516Z node 8 :TX_DATASHARD TRACE: key_validator.cpp:54: -- AddWriteRange: (Uint64 : 281474976715661, Uint64 : 72075186224037888, Uint64 : 72057594046644480, Uint64 : 2) table: [1:997:0] 2025-05-07T08:58:15.057620Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit CheckWrite 2025-05-07T08:58:15.057671Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-05-07T08:58:15.057705Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit CheckWrite 2025-05-07T08:58:15.057738Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-05-07T08:58:15.057770Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit BuildAndWaitDependencies 2025-05-07T08:58:15.057812Z node 8 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v1500/0 IncompleteEdge# v{min} UnprotectedReadEdge# v1500/18446744073709551615 ImmediateWriteEdge# v1500/18446744073709551615 ImmediateWriteEdgeReplied# v1500/18446744073709551615 2025-05-07T08:58:15.057883Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:7] at 72075186224037888 2025-05-07T08:58:15.057923Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-05-07T08:58:15.057953Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-05-07T08:58:15.058001Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit ExecuteWrite 2025-05-07T08:58:15.058028Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit ExecuteWrite 2025-05-07T08:58:15.058063Z node 8 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:7] at 72075186224037888 2025-05-07T08:58:15.058134Z node 8 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v1500/0 IncompleteEdge# v{min} UnprotectedReadEdge# v1500/18446744073709551615 ImmediateWriteEdge# v1500/18446744073709551615 ImmediateWriteEdgeReplied# v1500/18446744073709551615 2025-05-07T08:58:15.058298Z node 8 :TX_DATASHARD TRACE: datashard_kqp.cpp:843: KqpCommitLock LockId: 281474976715661 DataShard: 72075186224037888 Generation: 1 Counter: 0 SchemeShard: 72057594046644480 PathId: 2 HasWrites: true 2025-05-07T08:58:15.058384Z node 8 :TX_DATASHARD TRACE: datashard_user_db.cpp:368: Committing changes lockId# 281474976715661 in localTid# 1001 shard# 72075186224037888 2025-05-07T08:58:15.058500Z node 8 :TX_DATASHARD DEBUG: execute_write_unit.cpp:414: Skip empty write operation for [0:7] at 72075186224037888 2025-05-07T08:58:15.058691Z node 8 :TX_DATASHARD TRACE: execute_write_unit.cpp:47: add locks to result: 0 2025-05-07T08:58:15.058758Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is ExecutedNoMoreRestarts 2025-05-07T08:58:15.058792Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit ExecuteWrite 2025-05-07T08:58:15.058824Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit FinishProposeWrite 2025-05-07T08:58:15.058856Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit FinishProposeWrite 2025-05-07T08:58:15.058937Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-05-07T08:58:15.058976Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit FinishProposeWrite 2025-05-07T08:58:15.059030Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit CompletedOperations 2025-05-07T08:58:15.059084Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit CompletedOperations 2025-05-07T08:58:15.059135Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-05-07T08:58:15.059164Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit CompletedOperations 2025-05-07T08:58:15.059197Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:7] at 72075186224037888 has finished 2025-05-07T08:58:15.064071Z node 8 :TX_DATASHARD TRACE: datashard__write.cpp:150: TTxWrite complete: at tablet# 72075186224037888 2025-05-07T08:58:15.064156Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:7] at 72075186224037888 on unit FinishProposeWrite 2025-05-07T08:58:15.064208Z node 8 :TX_DATASHARD TRACE: finish_propose_write_unit.cpp:163: Propose transaction complete txid 7 at tablet 72075186224037888 send to client, propose latency: 1 ms, status: STATUS_COMPLETED 2025-05-07T08:58:15.064323Z node 8 :TX_DATASHARD DEBUG: datashard.cpp:2560: Waiting for PlanStep# 1501 from mediator time cast 2025-05-07T08:58:15.064422Z node 8 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:58:15.066556Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 270270977, Sender [8:58:2063], Recipient [8:1302:2390]: {TEvNotifyPlanStep TabletId# 72075186224037888 PlanStep# 1501} 2025-05-07T08:58:15.066633Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3170: StateWork, processing event TEvMediatorTimecast::TEvNotifyPlanStep 2025-05-07T08:58:15.066691Z node 8 :TX_DATASHARD DEBUG: datashard.cpp:3780: Notified by mediator time cast with PlanStep# 1501 at tablet 72075186224037888 2025-05-07T08:58:15.066773Z node 8 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 { items { int64_value: 0 } items { int64_value: 1000 } }, { items { int64_value: 1 } items { int64_value: 1001 } }, { items { int64_value: 2 } items { int64_value: 1002 } }, { items { int64_value: 3 } items { int64_value: 1003 } }, { items { int64_value: 4 } items { int64_value: 1004 } }, { items { int64_value: 5 } items { int64_value: 1005 } }, { items { int64_value: 6 } items { int64_value: 5001 } } { items { int64_value: 0 } items { int64_value: 2000 } }, { items { int64_value: 1 } items { int64_value: 2001 } }, { items { int64_value: 2 } items { int64_value: 2002 } }, { items { int64_value: 3 } items { int64_value: 2003 } }, { items { int64_value: 4 } items { int64_value: 2004 } }, { items { int64_value: 5 } items { int64_value: 2005 } }, { items { int64_value: 6 } items { int64_value: 5002 } } result_sets { columns { name: "index" type { optional_type { item { type_id: INT64 } } } } columns { name: "value" type { optional_type { item { type_id: INT64 } } } } rows { items { int64_value: 0 } items { int64_value: 1000 } } rows { items { int64_value: 1 } items { int64_value: 1001 } } rows { items { int64_value: 2 } items { int64_value: 1002 } } rows { items { int64_value: 3 } items { int64_value: 1003 } } rows { items { int64_value: 4 } items { int64_value: 1004 } } rows { items { int64_value: 5 } items { int64_value: 1005 } } rows { items { int64_value: 6 } items { int64_value: 5001 } } } result_sets { columns { name: "index" type { optional_type { item { type_id: INT64 } } } } columns { name: "value" type { optional_type { item { type_id: INT64 } } } } rows { items { int64_value: 0 } items { int64_value: 2000 } } rows { items { int64_value: 1 } items { int64_value: 2001 } } rows { items { int64_value: 2 } items { int64_value: 2002 } } rows { items { int64_value: 3 } items { int64_value: 2003 } } rows { items { int64_value: 4 } items { int64_value: 2004 } } rows { items { int64_value: 5 } items { int64_value: 2005 } } rows { items { int64_value: 6 } items { int64_value: 5002 } } } tx_meta { } |91.1%| [LD] {RESULT} $(B)/ydb/core/fq/libs/checkpointing/ut/ydb-core-fq-libs-checkpointing-ut >> Cdc::Write[PqRunner] [GOOD] >> Cdc::Write[YdsRunner] >> TestKinesisHttpProxy::CreateDeleteStreamWithConsumer >> Cdc::HugeKey[TopicRunner] [GOOD] >> Cdc::HugeKeyDebezium >> Cdc::NewImageLogDebezium [GOOD] >> Cdc::NaN[PqRunner] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::CreateTableShouldFailOnBeforeEpochTTL [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:58:18.258722Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:58:18.258819Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:18.258856Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:58:18.258889Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:58:18.258940Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:58:18.258964Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:58:18.259009Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:18.259076Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:58:18.259725Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:58:18.260006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:58:18.354991Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:58:18.355056Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:18.411355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:58:18.411492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:58:18.411667Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:58:18.430768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:58:18.431569Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:58:18.432456Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:18.432754Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:58:18.435718Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:18.437515Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:18.437583Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:18.437641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:58:18.437704Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:18.437761Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:58:18.438003Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:58:18.448216Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:58:18.617545Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:18.617801Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:18.618139Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:58:18.618416Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:58:18.618489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:18.621123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:18.621283Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:58:18.621506Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:18.621571Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:58:18.621613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:58:18.621648Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:58:18.623934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:18.623994Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:58:18.624183Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:58:18.626326Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:18.626395Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:18.626445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:18.626527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:58:18.630402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:58:18.633218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:58:18.633433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:58:18.634577Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:18.634744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:18.634829Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:18.635164Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:58:18.635239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:18.635451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:18.635542Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:58:18.638183Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:18.638233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:18.638468Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:18.638516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2208], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-05-07T08:58:18.638601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:18.638652Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 1:0 ProgressState 2025-05-07T08:58:18.638773Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-05-07T08:58:18.638813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:58:18.638854Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-05-07T08:58:18.638886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:58:18.638928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-05-07T08:58:18.639004Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:58:18.639047Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1:0 2025-05-07T08:58:18.639079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 1:0 2025-05-07T08:58:18.639162Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:58:18.639203Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-05-07T08:58:18.639240Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-05-07T08:58:18.642322Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-05-07T08:58:18.642455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-05-07T08:58:18.642500Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-05-07T08:58:18.642551Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-05-07T08:58:18.642596Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:18.642713Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-05-07T08:58:18.645993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-05-07T08:58:18.646593Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 WARNING: All log messages before y_absl::InitializeLog() is called are written to STDERR W0000 00:00:1746608298.647792 259212 text_format.cc:398] Warning parsing text-format NKikimrSchemeOp.TTableDescription: 9:35: text format contains deprecated field "ExpireAfterSeconds" TestModificationResults wait txId: 101 2025-05-07T08:58:18.648217Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:269:2260] Bootstrap 2025-05-07T08:58:18.665175Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:269:2260] Become StateWork (SchemeCache [1:274:2265]) 2025-05-07T08:58:18.668242Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateTable CreateTable { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "Timestamp" } KeyColumnNames: "key" TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3153600000 Tiers { ApplyAfterSeconds: 3153600000 Delete { } } } } } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:18.668629Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /MyRoot/TTLEnabledTable, opId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:58:18.668766Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:433: TCreateTable Propose, path: /MyRoot/TTLEnabledTable, opId: 101:0, schema: Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "Timestamp" } KeyColumnNames: "key" TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3153600000 Tiers { ApplyAfterSeconds: 3153600000 Delete { } } } }, at schemeshard: 72057594046678944 2025-05-07T08:58:18.669278Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 101:1, propose status:StatusSchemeError, reason: TTL should be less than 1746608298 seconds (20215 days, 55 years). The ttl behaviour is undefined before 1970., at schemeshard: 72057594046678944 2025-05-07T08:58:18.670603Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:269:2260] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-05-07T08:58:18.683834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 101, response: Status: StatusSchemeError Reason: "TTL should be less than 1746608298 seconds (20215 days, 55 years). The ttl behaviour is undefined before 1970." TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:18.684148Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusSchemeError, reason: TTL should be less than 1746608298 seconds (20215 days, 55 years). The ttl behaviour is undefined before 1970., operation: CREATE TABLE, path: /MyRoot/TTLEnabledTable 2025-05-07T08:58:18.691208Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 |91.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_sequence/ydb-core-tx-datashard-ut_sequence |91.1%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_sequence/ydb-core-tx-datashard-ut_sequence |91.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_sequence/ydb-core-tx-datashard-ut_sequence >> TTableProfileTests::ExplicitPartitionsWrongKeyType [GOOD] >> TestKinesisHttpProxy::ListShardsEmptyFields |91.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest |91.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/workload_service/ut/unittest >> KqpWorkloadService::TestHandlerActorCleanup [GOOD] Test command err: 2025-05-07T08:54:38.791713Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624575040609581:2062];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:54:38.792407Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004836/r3tmp/tmpcQooZX/pdisk_1.dat 2025-05-07T08:54:39.392204Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:39.401033Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:54:39.401127Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:54:39.403608Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27697, node 1 2025-05-07T08:54:39.555363Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:39.555385Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:39.555391Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:39.555507Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5754 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:54:39.887858Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:54:42.272612Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:440: [WorkloadService] [Service] Started workload service initialization 2025-05-07T08:54:42.287353Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=YjdkOGY1YzctMWM1OWUyMWUtMTZjNmNkYjktMTcxNTBiZTc=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id YjdkOGY1YzctMWM1OWUyMWUtMTZjNmNkYjktMTcxNTBiZTc= 2025-05-07T08:54:42.329421Z node 1 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:125: [WorkloadService] [Service] Updated node info, noode count: 1 2025-05-07T08:54:42.329469Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-05-07T08:54:42.329499Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:111: [WorkloadService] [Service] Resource pools was enanbled 2025-05-07T08:54:42.329573Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:241: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7501624592220479396:2328], Start check tables existence, number paths: 2 2025-05-07T08:54:42.329704Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=YjdkOGY1YzctMWM1OWUyMWUtMTZjNmNkYjktMTcxNTBiZTc=, ActorId: [1:7501624592220479397:2329], ActorState: unknown state, session actor bootstrapped 2025-05-07T08:54:42.336673Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7501624592220479396:2328], Describe table /Root/.metadata/workload_manager/delayed_requests status PathErrorUnknown 2025-05-07T08:54:42.339873Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624592220479423:2296], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-05-07T08:54:42.343879Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480 2025-05-07T08:54:42.345221Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:429: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624592220479423:2296], DatabaseId: Root, PoolId: sample_pool_id, Subscribe on create pool tx: 281474976710658 2025-05-07T08:54:42.345461Z node 1 :KQP_WORKLOAD_SERVICE TRACE: scheme_actors.cpp:352: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624592220479423:2296], DatabaseId: Root, PoolId: sample_pool_id, Tablet to pipe successfully connected 2025-05-07T08:54:42.345625Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7501624592220479396:2328], Describe table /Root/.metadata/workload_manager/running_requests status PathErrorUnknown 2025-05-07T08:54:42.345685Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:289: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7501624592220479396:2328], Successfully finished 2025-05-07T08:54:42.345785Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-05-07T08:54:42.356870Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624592220479423:2296], DatabaseId: Root, PoolId: sample_pool_id, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-05-07T08:54:42.458407Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624592220479423:2296], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-05-07T08:54:42.465422Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501624592220479474:2328] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/sample_pool_id\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:54:42.465655Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:480: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624592220479423:2296], DatabaseId: Root, PoolId: sample_pool_id, Pool successfully created 2025-05-07T08:54:42.468293Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=ZTZmMTRjMzUtZGI3Yjg1MzAtYWQ1MzU0NDQtOWM1MzNmNmI=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id ZTZmMTRjMzUtZGI3Yjg1MzAtYWQ1MzU0NDQtOWM1MzNmNmI= 2025-05-07T08:54:42.468626Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=ZTZmMTRjMzUtZGI3Yjg1MzAtYWQ1MzU0NDQtOWM1MzNmNmI=, ActorId: [1:7501624592220479482:2331], ActorState: unknown state, session actor bootstrapped 2025-05-07T08:54:42.468778Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=1&id=ZTZmMTRjMzUtZGI3Yjg1MzAtYWQ1MzU0NDQtOWM1MzNmNmI=, ActorId: [1:7501624592220479482:2331], ActorState: ReadyState, TraceId: 01jtmz8kh45c8b9fdxzkstkt70, received request, proxyRequestId: 3 prepared: 0 tx_control: 0 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_GENERIC_QUERY text: SELECT 42; rpcActor: [1:7501624592220479481:2334] database: Root databaseId: /Root pool id: sample_pool_id 2025-05-07T08:54:42.468833Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: /Root, PoolId: sample_pool_id 2025-05-07T08:54:42.468871Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:561: [WorkloadService] [Service] Creating new database state for id /Root 2025-05-07T08:54:42.468939Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:169: [WorkloadService] [Service] Recieved new request from [1:7501624592220479482:2331], DatabaseId: /Root, PoolId: sample_pool_id, SessionId: ydb://session/3?node_id=1&id=ZTZmMTRjMzUtZGI3Yjg1MzAtYWQ1MzU0NDQtOWM1MzNmNmI= 2025-05-07T08:54:42.468987Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624592220479484:2332], DatabaseId: /Root, PoolId: sample_pool_id, Start pool fetching 2025-05-07T08:54:42.469052Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:574: [WorkloadService] [TDatabaseFetcherActor] ActorId: [1:7501624592220479485:2333], Database: /Root, Start database fetching 2025-05-07T08:54:42.469511Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:600: [WorkloadService] [TDatabaseFetcherActor] ActorId: [1:7501624592220479485:2333], Database: /Root, Database info successfully fetched, serverless: 0 2025-05-07T08:54:42.469588Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:240: [WorkloadService] [Service] Successfully fetched database info, DatabaseId: /Root, Serverless: 0 2025-05-07T08:54:42.469652Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:44: [WorkloadService] [TPoolResolverActor] ActorId: [1:7501624592220479493:2334], DatabaseId: /Root, PoolId: sample_pool_id, SessionId: ydb://session/3?node_id=1&id=ZTZmMTRjMzUtZGI3Yjg1MzAtYWQ1MzU0NDQtOWM1MzNmNmI=, Start pool fetching 2025-05-07T08:54:42.469679Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624592220479495:2335], DatabaseId: /Root, PoolId: sample_pool_id, Start pool fetching 2025-05-07T08:54:42.470447Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:223: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624592220479495:2335], DatabaseId: /Root, PoolId: sample_pool_id, Pool info successfully fetched 2025-05-07T08:54:42.470500Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:223: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624592220479484:2332], DatabaseId: /Root, PoolId: sample_pool_id, Pool info successfully fetched 2025-05-07T08:54:42.470524Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:107: [WorkloadService] [TPoolResolverActor] ActorId: [1:7501624592220479493:2334], DatabaseId: /Root, PoolId: sample_pool_id, SessionId: ydb://session/3?node_id=1&id=ZTZmMTRjMzUtZGI3Yjg1MzAtYWQ1MzU0NDQtOWM1MzNmNmI=, Pool info successfully resolved 2025-05-07T08:54:42.470552Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:253: [WorkloadService] [Service] Successfully fetched pool sample_pool_id, DatabaseId: /Root 2025-05-07T08:54:42.470569Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:571: [WorkloadService] [Service] Creating new handler for pool /Root/sample_pool_id 2025-05-07T08:54:42.470862Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:279: [WorkloadService] [Service] Successfully fetched pool sample_pool_id, DatabaseId: /Root, SessionId: ydb://se ... roxyRequestId: 15, proxyId: [6:7501625093774638092:2126] 2025-05-07T08:56:50.497552Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2546: SessionId: ydb://session/3?node_id=6&id=NDcxNWQ2YjMtOWEzNjQwMTgtYTViYzhhNjQtZjQ0OWNlYWU=, ActorId: [6:7501625141019279456:2452], ActorState: unknown state, TraceId: 01jtmzcgfgd0wfcf4xbyrse2yw, Cleanup temp tables: 0 2025-05-07T08:56:50.497857Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2637: SessionId: ydb://session/3?node_id=6&id=NDcxNWQ2YjMtOWEzNjQwMTgtYTViYzhhNjQtZjQ0OWNlYWU=, ActorId: [6:7501625141019279456:2452], ActorState: unknown state, TraceId: 01jtmzcgfgd0wfcf4xbyrse2yw, Session actor destroyed 2025-05-07T08:56:50.509764Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=6&id=NTY0ZjNhMDAtZTI4YmYzNS04M2Q2MGJjMi1kY2FkNjgwYg==, ActorId: [6:7501625119544442432:2334], ActorState: ReadyState, TraceId: 01jtmzcgjddbrjy8e3pe6mr3sk, received request, proxyRequestId: 18 prepared: 0 tx_control: 0 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_DDL text: DROP RESOURCE POOL sample_pool_id; DROP RESOURCE POOL default; rpcActor: [0:0:0] database: /Root databaseId: /Root pool id: default 2025-05-07T08:56:50.546106Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:294: [WorkloadService] [TPoolHandlerActorBase] ActorId: [6:7501625119544442521:2339], DatabaseId: /Root, PoolId: sample_pool_id, Got delete notification 2025-05-07T08:56:50.546213Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: /Root, PoolId: sample_pool_id 2025-05-07T08:56:50.546336Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7501625141019279525:2466], DatabaseId: /Root, PoolId: sample_pool_id, Start pool fetching 2025-05-07T08:56:50.547052Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7501625141019279525:2466], DatabaseId: /Root, PoolId: sample_pool_id, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool sample_pool_id not found or you don't have access permissions } 2025-05-07T08:56:50.547182Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool sample_pool_id, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool sample_pool_id not found or you don't have access permissions } 2025-05-07T08:56:50.554230Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:294: [WorkloadService] [TPoolHandlerActorBase] ActorId: [6:7501625119544442717:2363], DatabaseId: /Root, PoolId: default, Got delete notification 2025-05-07T08:56:50.554331Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: /Root, PoolId: default 2025-05-07T08:56:50.554406Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7501625141019279545:2467], DatabaseId: /Root, PoolId: default, Start pool fetching 2025-05-07T08:56:50.555651Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7501625141019279545:2467], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:50.555757Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:50.561734Z node 6 :KQP_SESSION INFO: kqp_session_actor.cpp:2473: SessionId: ydb://session/3?node_id=6&id=NTY0ZjNhMDAtZTI4YmYzNS04M2Q2MGJjMi1kY2FkNjgwYg==, ActorId: [6:7501625119544442432:2334], ActorState: ExecuteState, TraceId: 01jtmzcgjddbrjy8e3pe6mr3sk, Cleanup start, isFinal: 0 CleanupCtx: 1 TransactionsToBeAborted.size(): 0 WorkerId: [6:7501625141019279513:2334] WorkloadServiceCleanup: 0 2025-05-07T08:56:50.564165Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2534: SessionId: ydb://session/3?node_id=6&id=NTY0ZjNhMDAtZTI4YmYzNS04M2Q2MGJjMi1kY2FkNjgwYg==, ActorId: [6:7501625119544442432:2334], ActorState: CleanupState, TraceId: 01jtmzcgjddbrjy8e3pe6mr3sk, EndCleanup, isFinal: 0 2025-05-07T08:56:50.564236Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2270: SessionId: ydb://session/3?node_id=6&id=NTY0ZjNhMDAtZTI4YmYzNS04M2Q2MGJjMi1kY2FkNjgwYg==, ActorId: [6:7501625119544442432:2334], ActorState: CleanupState, TraceId: 01jtmzcgjddbrjy8e3pe6mr3sk, Sent query response back to proxy, proxyRequestId: 18, proxyId: [6:7501625093774638092:2126] Wait pool handlers 0.000020s: number handlers = 2 Wait pool handlers 1.003746s: number handlers = 2 Wait pool handlers 2.007745s: number handlers = 2 Wait pool handlers 3.008075s: number handlers = 2 2025-05-07T08:56:54.529021Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7261: Cannot get console configs 2025-05-07T08:56:54.529077Z node 6 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded Wait pool handlers 4.009283s: number handlers = 2 Wait pool handlers 5.011750s: number handlers = 2 Wait pool handlers 6.011894s: number handlers = 2 Wait pool handlers 7.015740s: number handlers = 2 Wait pool handlers 8.019745s: number handlers = 2 Wait pool handlers 9.022252s: number handlers = 2 Wait pool handlers 10.022988s: number handlers = 2 Wait pool handlers 11.023149s: number handlers = 2 Wait pool handlers 12.023832s: number handlers = 2 2025-05-07T08:57:03.366635Z node 6 :KQP_WORKLOAD_SERVICE TRACE: pool_handlers_actors.cpp:689: [WorkloadService] [TPoolHandlerActorBase] ActorId: [6:7501625119544442521:2339], DatabaseId: /Root, PoolId: sample_pool_id, Try to start scheduled refresh Wait pool handlers 13.024996s: number handlers = 2 Wait pool handlers 14.029200s: number handlers = 2 Wait pool handlers 15.030627s: number handlers = 2 Wait pool handlers 16.032231s: number handlers = 2 Wait pool handlers 17.034057s: number handlers = 2 Wait pool handlers 18.039758s: number handlers = 2 Wait pool handlers 19.040106s: number handlers = 2 Wait pool handlers 20.046075s: number handlers = 2 Wait pool handlers 21.047799s: number handlers = 2 Wait pool handlers 22.049355s: number handlers = 2 Wait pool handlers 23.051758s: number handlers = 2 Wait pool handlers 24.051908s: number handlers = 2 Wait pool handlers 25.055611s: number handlers = 2 Wait pool handlers 26.055931s: number handlers = 2 Wait pool handlers 27.059764s: number handlers = 2 Wait pool handlers 28.061461s: number handlers = 2 Wait pool handlers 29.063989s: number handlers = 2 Wait pool handlers 30.067752s: number handlers = 2 Wait pool handlers 31.068342s: number handlers = 2 Wait pool handlers 32.071551s: number handlers = 2 Wait pool handlers 33.071710s: number handlers = 2 Wait pool handlers 34.073485s: number handlers = 2 Wait pool handlers 35.075434s: number handlers = 2 Wait pool handlers 36.076330s: number handlers = 2 Wait pool handlers 37.077924s: number handlers = 2 Wait pool handlers 38.080140s: number handlers = 2 Wait pool handlers 39.084349s: number handlers = 2 Wait pool handlers 40.087994s: number handlers = 2 Wait pool handlers 41.091764s: number handlers = 2 Wait pool handlers 42.091981s: number handlers = 2 Wait pool handlers 43.095866s: number handlers = 2 Wait pool handlers 44.098067s: number handlers = 2 Wait pool handlers 45.101841s: number handlers = 2 Wait pool handlers 46.103664s: number handlers = 2 Wait pool handlers 47.107760s: number handlers = 2 Wait pool handlers 48.107963s: number handlers = 2 Wait pool handlers 49.111223s: number handlers = 2 Wait pool handlers 50.111765s: number handlers = 2 Wait pool handlers 51.113723s: number handlers = 2 Wait pool handlers 52.115763s: number handlers = 2 Wait pool handlers 53.116858s: number handlers = 2 Wait pool handlers 54.119768s: number handlers = 2 Wait pool handlers 55.124092s: number handlers = 2 Wait pool handlers 56.127362s: number handlers = 2 Wait pool handlers 57.127720s: number handlers = 2 Wait pool handlers 58.131760s: number handlers = 2 Wait pool handlers 59.132005s: number handlers = 2 Wait pool handlers 60.133078s: number handlers = 2 Wait pool handlers 61.133755s: number handlers = 2 Wait pool handlers 62.135830s: number handlers = 2 Wait pool handlers 63.139761s: number handlers = 2 Wait pool handlers 64.139990s: number handlers = 2 Wait pool handlers 65.140153s: number handlers = 2 Wait pool handlers 66.143442s: number handlers = 2 Wait pool handlers 67.143850s: number handlers = 2 Wait pool handlers 68.144435s: number handlers = 2 Wait pool handlers 69.146064s: number handlers = 2 Wait pool handlers 70.146202s: number handlers = 2 Wait pool handlers 71.146355s: number handlers = 2 Wait pool handlers 72.146494s: number handlers = 2 Wait pool handlers 73.147786s: number handlers = 2 Wait pool handlers 74.151763s: number handlers = 2 Wait pool handlers 75.151893s: number handlers = 2 Wait pool handlers 76.155767s: number handlers = 2 Wait pool handlers 77.156585s: number handlers = 2 Wait pool handlers 78.156733s: number handlers = 2 Wait pool handlers 79.158402s: number handlers = 2 Wait pool handlers 80.159381s: number handlers = 2 Wait pool handlers 81.163753s: number handlers = 2 Wait pool handlers 82.169578s: number handlers = 2 Wait pool handlers 83.169869s: number handlers = 2 Wait pool handlers 84.170012s: number handlers = 2 2025-05-07T08:58:15.379549Z node 6 :KQP_WORKLOAD_SERVICE INFO: pool_handlers_actors.cpp:178: [WorkloadService] [TPoolHandlerActorBase] ActorId: [6:7501625119544442521:2339], DatabaseId: /Root, PoolId: sample_pool_id, Got stop pool handler request, waiting for 0 requests 2025-05-07T08:58:15.379554Z node 6 :KQP_WORKLOAD_SERVICE INFO: pool_handlers_actors.cpp:178: [WorkloadService] [TPoolHandlerActorBase] ActorId: [6:7501625119544442717:2363], DatabaseId: /Root, PoolId: default, Got stop pool handler request, waiting for 0 requests 2025-05-07T08:58:15.379755Z node 6 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:425: [WorkloadService] [Service] Got stop pool handler response, DatabaseId: /Root, PoolId: sample_pool_id 2025-05-07T08:58:15.379783Z node 6 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:425: [WorkloadService] [Service] Got stop pool handler response, DatabaseId: /Root, PoolId: default 2025-05-07T08:58:15.752003Z node 6 :KQP_SESSION INFO: kqp_session_actor.cpp:2315: SessionId: ydb://session/3?node_id=6&id=NTY0ZjNhMDAtZTI4YmYzNS04M2Q2MGJjMi1kY2FkNjgwYg==, ActorId: [6:7501625119544442432:2334], ActorState: ReadyState, Session closed due to explicit close event 2025-05-07T08:58:15.752082Z node 6 :KQP_SESSION INFO: kqp_session_actor.cpp:2473: SessionId: ydb://session/3?node_id=6&id=NTY0ZjNhMDAtZTI4YmYzNS04M2Q2MGJjMi1kY2FkNjgwYg==, ActorId: [6:7501625119544442432:2334], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-05-07T08:58:15.752129Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2534: SessionId: ydb://session/3?node_id=6&id=NTY0ZjNhMDAtZTI4YmYzNS04M2Q2MGJjMi1kY2FkNjgwYg==, ActorId: [6:7501625119544442432:2334], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-05-07T08:58:15.752167Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2546: SessionId: ydb://session/3?node_id=6&id=NTY0ZjNhMDAtZTI4YmYzNS04M2Q2MGJjMi1kY2FkNjgwYg==, ActorId: [6:7501625119544442432:2334], ActorState: unknown state, Cleanup temp tables: 0 2025-05-07T08:58:15.752305Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2637: SessionId: ydb://session/3?node_id=6&id=NTY0ZjNhMDAtZTI4YmYzNS04M2Q2MGJjMi1kY2FkNjgwYg==, ActorId: [6:7501625119544442432:2334], ActorState: unknown state, Session actor destroyed |91.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TestKinesisHttpProxy::GoodRequestGetRecords [GOOD] |91.1%| [TA] $(B)/ydb/core/tx/datashard/ut_write/test-results/unittest/{meta.json ... results_accumulator.log} |91.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/sequenceshard/ut/ydb-core-tx-sequenceshard-ut |91.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/sequenceshard/ut/ydb-core-tx-sequenceshard-ut >> TSchemeShardTTLTests::CreateTableShouldFailOnWrongUnit-EnableTablePgTypes-true >> TSchemeShardTTLTests::ShouldSkipDroppedColumn >> TestKinesisHttpProxy::GoodRequestGetRecordsCbor ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TTableProfileTests::ExplicitPartitionsWrongKeyType [GOOD] Test command err: 2025-05-07T08:57:35.896023Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625338297407117:2212];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:35.923330Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028fa/r3tmp/tmp0KvNwk/pdisk_1.dat 2025-05-07T08:57:36.972184Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:57:37.014095Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:57:37.022782Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:37.022883Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:37.035718Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4784, node 1 2025-05-07T08:57:37.397767Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:57:37.397788Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:57:37.397796Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:57:37.397925Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19354 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:57:37.847795Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... TClient is connected to server localhost:19354 2025-05-07T08:57:38.410901Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:38.470178Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:38.982439Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7501625350810989342:2093];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:38.982531Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:57:39.171216Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:39.181401Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:39.216815Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-05-07T08:57:39.230663Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19354 2025-05-07T08:57:40.137644Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 2025-05-07T08:57:40.898124Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501625338297407117:2212];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:40.898226Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; TClient is connected to server localhost:19354 TClient::Ls request: /Root/ydb_ut_tenant/table-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "table-1" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710660 CreateStep: 1746608260720 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table-1" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 ... (TRUNCATED) 2025-05-07T08:57:41.283977Z node 1 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 3 2025-05-07T08:57:41.284745Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-05-07T08:57:46.626383Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501625381862542935:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:46.626436Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028fa/r3tmp/tmp942YKu/pdisk_1.dat 2025-05-07T08:57:47.029794Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:57:47.087545Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:47.087639Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:47.099605Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5278, node 4 2025-05-07T08:57:47.466529Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:57:47.466551Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:57:47.466558Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:57:47.466679Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22135 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:57:47.915215Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... TClient is connected to server localhost:22135 2025-05-07T08:57:48.424067Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:57:48.483830Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710659:0, at schemeshard: 72057594046644480 2025-05-07T08:57:49.006741Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7501625397012561352:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:49.006820Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:57:49.153964Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:49.154089Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:49.167909Z node 4 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 6 Cookie 6 2025-05-07T08:57:49.173501Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:22135 2025-05-07T08:57:49.786 ... 78Z node 7 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 9 2025-05-07T08:57:59.293459Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-05-07T08:58:03.095491Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7501625457857089238:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:58:03.096434Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028fa/r3tmp/tmpcnwcHw/pdisk_1.dat 2025-05-07T08:58:03.274094Z node 10 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:03.315419Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:58:03.315516Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:58:03.320198Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7706, node 10 2025-05-07T08:58:03.410765Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:58:03.410793Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:58:03.410804Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:58:03.410953Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17947 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:58:03.764936Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... TClient is connected to server localhost:17947 2025-05-07T08:58:04.241460Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:58:04.269332Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:58:04.779870Z node 12 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[12:7501625459992180268:2140];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:58:04.780754Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:58:04.820592Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:58:04.820687Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:58:04.826336Z node 10 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 12 Cookie 12 2025-05-07T08:58:04.827370Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:17947 2025-05-07T08:58:05.195464Z node 10 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 12 2025-05-07T08:58:05.195934Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-05-07T08:58:05.782380Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:58:06.790807Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:58:07.792618Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:58:10.777881Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7501625487007367276:2076];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:58:10.777997Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028fa/r3tmp/tmpBkDfNP/pdisk_1.dat 2025-05-07T08:58:11.136219Z node 13 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:11.191216Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:58:11.191360Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:58:11.195032Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 64803, node 13 2025-05-07T08:58:11.414962Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:58:11.414987Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:58:11.414993Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:58:11.415123Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2717 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:58:11.817226Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... TClient is connected to server localhost:2717 2025-05-07T08:58:12.363320Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:58:12.419585Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:58:12.939681Z node 15 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[15:7501625495584314477:2149];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:58:12.940028Z node 15 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:58:13.012787Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(15, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:58:13.012930Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(15, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:58:13.030849Z node 13 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 15 Cookie 15 2025-05-07T08:58:13.038751Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(15, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2717 2025-05-07T08:58:13.555592Z node 13 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [13:7501625499892270546:2911] txid# 281474976715660, issues: { message: "Error at split boundary 0: Value of type Uint64 expected in tuple at position 1" severity: 1 } 2025-05-07T08:58:13.587983Z node 13 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 15 2025-05-07T08:58:13.588536Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(15, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-05-07T08:58:17.940227Z node 15 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[15:7501625495584314477:2149];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:58:17.940355Z node 15 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=timeout; |91.1%| [TA] $(B)/ydb/core/kqp/ut/idx_test/test-results/unittest/{meta.json ... results_accumulator.log} |91.1%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_write/test-results/unittest/{meta.json ... results_accumulator.log} |91.1%| [LD] {RESULT} $(B)/ydb/core/tx/sequenceshard/ut/ydb-core-tx-sequenceshard-ut |91.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest |91.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/sequenceproxy/ut/ydb-core-tx-sequenceproxy-ut |91.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/sequenceproxy/ut/ydb-core-tx-sequenceproxy-ut >> TSchemeShardTTLTests::ConditionalErase [GOOD] |91.1%| [LD] {RESULT} $(B)/ydb/core/tx/sequenceproxy/ut/ydb-core-tx-sequenceproxy-ut >> AsyncIndexChangeExchange::ShouldRemoveRecordsAfterCancelIndexBuild [GOOD] >> TSchemeShardTTLTests::RacyAlterTableAndConditionalErase >> AsyncIndexChangeExchange::ShouldDeliverChangesOnSplitMerge >> TSchemeShardTTLTests::AlterTableShouldFailOnSimultaneousDropColumnAndEnableTTL >> TYqlDateTimeTests::DateKey [GOOD] |91.1%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/idx_test/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::ConditionalErase [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:58:17.363657Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:58:17.363762Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:17.369089Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:58:17.369172Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:58:17.369229Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:58:17.369263Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:58:17.369395Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:17.369514Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:58:17.370415Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:58:17.370831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:58:17.540141Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:58:17.540203Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:17.572094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:58:17.572389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:58:17.572542Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:58:17.610690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:58:17.610955Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:58:17.611883Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:17.613502Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:58:17.629119Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:17.630691Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:17.631139Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:17.631249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:58:17.631302Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:17.631345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:58:17.631588Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:58:17.650456Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:58:17.813855Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:17.814132Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:17.814423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:58:17.814726Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:58:17.814794Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:17.818952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:17.819084Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:58:17.819297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:17.819343Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:58:17.819393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:58:17.819423Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:58:17.822813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:17.822905Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:58:17.822949Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:58:17.825225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:17.825295Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:17.825345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:17.825416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:58:17.829285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:58:17.833040Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:58:17.833271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:58:17.834391Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:17.834573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:17.834649Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:17.834974Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:58:17.835059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:17.835250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:17.835325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:58:17.837886Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:17.837943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:17.838200Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:17.838243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... amp: 1600463040219000 ColumnUnit: UNIT_AUTO } SchemaVersion: 1 Limits { BatchMaxBytes: 512000 BatchMinKeys: 1 BatchMaxKeys: 256 }, at schemeshard: 72057594046678944 2025-05-07T08:58:22.207559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__conditional_erase.cpp:213: Run conditional erase, tabletId: 72075186233409551, request: TableId: 7 Expiration { ColumnId: 2 WallClockTimestamp: 1600466640219000 ColumnUnit: UNIT_MICROSECONDS } SchemaVersion: 1 Limits { BatchMaxBytes: 512000 BatchMinKeys: 1 BatchMaxKeys: 256 }, at schemeshard: 72057594046678944 2025-05-07T08:58:22.208004Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6620: Conditional erase accepted: tabletId: 72075186233409550, at schemeshard: 72057594046678944 2025-05-07T08:58:22.208595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6620: Conditional erase accepted: tabletId: 72075186233409547, at schemeshard: 72057594046678944 2025-05-07T08:58:22.209209Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6620: Conditional erase accepted: tabletId: 72075186233409548, at schemeshard: 72057594046678944 2025-05-07T08:58:22.209396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6620: Conditional erase accepted: tabletId: 72075186233409549, at schemeshard: 72057594046678944 2025-05-07T08:58:22.209480Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6620: Conditional erase accepted: tabletId: 72075186233409551, at schemeshard: 72057594046678944 2025-05-07T08:58:22.209620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6620: Conditional erase accepted: tabletId: 72075186233409546, at schemeshard: 72057594046678944 2025-05-07T08:58:22.209699Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:346: TTxScheduleConditionalErase Execute: at schemeshard: 72057594046678944 2025-05-07T08:58:22.209738Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:396: Successful conditional erase: tabletId: 72075186233409547, at schemeshard: 72057594046678944 2025-05-07T08:58:22.214628Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:346: TTxScheduleConditionalErase Execute: at schemeshard: 72057594046678944 2025-05-07T08:58:22.214677Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:396: Successful conditional erase: tabletId: 72075186233409550, at schemeshard: 72057594046678944 2025-05-07T08:58:22.215849Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:346: TTxScheduleConditionalErase Execute: at schemeshard: 72057594046678944 2025-05-07T08:58:22.215884Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:396: Successful conditional erase: tabletId: 72075186233409549, at schemeshard: 72057594046678944 2025-05-07T08:58:22.218815Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:346: TTxScheduleConditionalErase Execute: at schemeshard: 72057594046678944 2025-05-07T08:58:22.218886Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:396: Successful conditional erase: tabletId: 72075186233409548, at schemeshard: 72057594046678944 2025-05-07T08:58:22.219387Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:346: TTxScheduleConditionalErase Execute: at schemeshard: 72057594046678944 2025-05-07T08:58:22.219425Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:396: Successful conditional erase: tabletId: 72075186233409546, at schemeshard: 72057594046678944 2025-05-07T08:58:22.219853Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:452: TTxScheduleConditionalErase Complete: at schemeshard: 72057594046678944 2025-05-07T08:58:22.220025Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-05-07T08:58:22.220078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__conditional_erase.cpp:106: Skip conditional erase: shardIdx: 72057594046678944:2, run at: 2020-09-18T23:04:00.219000Z, at schemeshard: 72057594046678944 2025-05-07T08:58:22.223044Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:452: TTxScheduleConditionalErase Complete: at schemeshard: 72057594046678944 2025-05-07T08:58:22.223159Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:452: TTxScheduleConditionalErase Complete: at schemeshard: 72057594046678944 2025-05-07T08:58:22.223208Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:452: TTxScheduleConditionalErase Complete: at schemeshard: 72057594046678944 2025-05-07T08:58:22.223251Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:452: TTxScheduleConditionalErase Complete: at schemeshard: 72057594046678944 2025-05-07T08:58:22.223289Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 2025-05-07T08:58:22.223359Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-05-07T08:58:22.223399Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__conditional_erase.cpp:106: Skip conditional erase: shardIdx: 72057594046678944:5, run at: 2020-09-18T23:04:00.220000Z, at schemeshard: 72057594046678944 2025-05-07T08:58:22.223450Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 2025-05-07T08:58:22.223496Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-05-07T08:58:22.223521Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__conditional_erase.cpp:106: Skip conditional erase: shardIdx: 72057594046678944:4, run at: 2020-09-18T23:04:00.220000Z, at schemeshard: 72057594046678944 2025-05-07T08:58:22.223625Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 2025-05-07T08:58:22.223674Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-05-07T08:58:22.223702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__conditional_erase.cpp:106: Skip conditional erase: shardIdx: 72057594046678944:3, run at: 2020-09-18T23:04:00.221000Z, at schemeshard: 72057594046678944 2025-05-07T08:58:22.223727Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 2025-05-07T08:58:22.223762Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-05-07T08:58:22.223790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__conditional_erase.cpp:106: Skip conditional erase: shardIdx: 72057594046678944:1, run at: 2020-09-18T23:04:00.221000Z, at schemeshard: 72057594046678944 2025-05-07T08:58:22.223815Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 2025-05-07T08:58:22.294541Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:588: Started TEvPersistStats at tablet 72057594046678944, queue size# 5 2025-05-07T08:58:22.294719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046678944:4 data size 43 row count 1 2025-05-07T08:58:22.294823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409549 maps to shardIdx: 72057594046678944:4 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], pathId map=TTLEnabledTable4, is column=0, is olap=0, RowCount 1, DataSize 43 2025-05-07T08:58:22.294948Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186233409549 2025-05-07T08:58:22.295022Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046678944:1 data size 0 row count 0 2025-05-07T08:58:22.295065Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409546 maps to shardIdx: 72057594046678944:1 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], pathId map=TTLEnabledTable1, is column=0, is olap=0, RowCount 0, DataSize 0 2025-05-07T08:58:22.295101Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186233409546 2025-05-07T08:58:22.295129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 3 shard idx 72057594046678944:2 data size 0 row count 0 2025-05-07T08:58:22.295165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409547 maps to shardIdx: 72057594046678944:2 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], pathId map=TTLEnabledTable2, is column=0, is olap=0, RowCount 0, DataSize 0 2025-05-07T08:58:22.295195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186233409547 2025-05-07T08:58:22.295222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:3 data size 603 row count 2 2025-05-07T08:58:22.295258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409548 maps to shardIdx: 72057594046678944:3 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=TTLEnabledTable3, is column=0, is olap=0, RowCount 2, DataSize 603 2025-05-07T08:58:22.295314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186233409548 2025-05-07T08:58:22.295347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 6 shard idx 72057594046678944:5 data size 627 row count 2 2025-05-07T08:58:22.295384Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409550 maps to shardIdx: 72057594046678944:5 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 6], pathId map=TTLEnabledTable5, is column=0, is olap=0, RowCount 2, DataSize 627, with borrowed parts 2025-05-07T08:58:22.295443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186233409550 2025-05-07T08:58:22.309815Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:346: TTxScheduleConditionalErase Execute: at schemeshard: 72057594046678944 2025-05-07T08:58:22.309883Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:396: Successful conditional erase: tabletId: 72075186233409551, at schemeshard: 72057594046678944 2025-05-07T08:58:22.313938Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:452: TTxScheduleConditionalErase Complete: at schemeshard: 72057594046678944 2025-05-07T08:58:22.314094Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-05-07T08:58:22.314146Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__conditional_erase.cpp:106: Skip conditional erase: shardIdx: 72057594046678944:6, run at: 2020-09-18T23:04:00.223000Z, at schemeshard: 72057594046678944 2025-05-07T08:58:22.314208Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/backup_ut/unittest >> BackupRestore::TestAllPrimitiveTypes-UUID [GOOD] Test command err: 2025-05-07T08:56:43.830950Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625113093865015:2219];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:56:43.831423Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0020db/r3tmp/tmpBXOHmb/pdisk_1.dat 2025-05-07T08:56:44.450350Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:56:44.495583Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:56:44.495706Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:56:44.503273Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22324, node 1 2025-05-07T08:56:44.966675Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:56:44.966702Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:56:44.966715Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:56:44.966879Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19164 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:56:45.317100Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... Backup "/Root" to "/home/runner/.ya/build/build_root/zvgn/0020db/r3tmp/tmpCwQ2g8/"Create temporary directory "/Root/~backup_20250507T085645" in databaseProcess "/home/runner/.ya/build/build_root/zvgn/0020db/r3tmp/tmpCwQ2g8/dir"Create directory "/Root/~backup_20250507T085645/dir" in databaseWrite ACL into "/home/runner/.ya/build/build_root/zvgn/0020db/r3tmp/tmpCwQ2g8/dir/permissions.pb"Remove directory "/Root/~backup_20250507T085645/dir"2025-05-07T08:56:45.838050Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 281474976710661:0, at schemeshard: 72057594046644480 Remove temporary directory "/Root/~backup_20250507T085645" in database2025-05-07T08:56:45.893538Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 281474976710662:0, at schemeshard: 72057594046644480 Backup completed successfully2025-05-07T08:56:45.938459Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 281474976710663:0, at schemeshard: 72057594046644480 Restore "/home/runner/.ya/build/build_root/zvgn/0020db/r3tmp/tmpCwQ2g8/" to "/Root"Resolved db base path: "/Root"Restore folder "/home/runner/.ya/build/build_root/zvgn/0020db/r3tmp/tmpCwQ2g8/" to "/Root"Process "/home/runner/.ya/build/build_root/zvgn/0020db/r3tmp/tmpCwQ2g8/dir"Restore empty directory "/home/runner/.ya/build/build_root/zvgn/0020db/r3tmp/tmpCwQ2g8/dir" to "/Root/dir"Restore ACL "/home/runner/.ya/build/build_root/zvgn/0020db/r3tmp/tmpCwQ2g8/dir" to "/Root/dir"Read ACL from "/home/runner/.ya/build/build_root/zvgn/0020db/r3tmp/tmpCwQ2g8/dir/permissions.pb"2025-05-07T08:56:46.083219Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710665:0, at schemeshard: 72057594046644480 Restore completed successfully 2025-05-07T08:56:49.947272Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501625139056690112:2293];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:56:49.947329Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0020db/r3tmp/tmpgwyr1G/pdisk_1.dat 2025-05-07T08:56:50.199419Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:56:50.250688Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:56:50.250775Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:56:50.264169Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17862, node 4 2025-05-07T08:56:50.438316Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:56:50.438344Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:56:50.438352Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:56:50.438488Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30037 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:56:50.864891Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:56:53.822653Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501625156236560121:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:53.822802Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:54.149393Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T08:56:54.418999Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501625160531527591:2348], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:54.419095Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:54.689878Z node 4 :CHANGE_EXCHANGE WARN: change_sender_cdc_stream.cpp:398: [CdcChangeSenderMain][72075186224037888:1][4:7501625160531527790:2360] Failed entry at 'ResolveTopic': entry# { Path: TableId: [72057594046644480:4:0] RequestType: ByTableId Operation: OpTopic RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo } 2025-05-07T08:56:54.755406Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501625160531527894:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:54.755482Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:54.864365Z node 4 :CHANGE_EXCHANGE WARN: change_sender_cdc_stream.cpp:398: [CdcChangeSenderMain][72075186224037888:1][4:7501625160531528074:2381] Failed entry at 'ResolveTopic': entry# { Path: TableId: [72057594046644480:6:0] RequestType: ByTableId Operation: OpTopic RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo } 2025-05-07T08:56:54.907251Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501625160531528170:2390], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:54.907328Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:54.947864Z node 4 :METADATA_PROVI ... arentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:57:31.816143Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:35.902125Z node 19 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[19:7501625312812413923:2238];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:35.902226Z node 19 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:57:37.520109Z node 19 :KQP_PROXY ERROR: kqp_proxy_service.cpp:1461: TraceId: "01jtmzds32c3rhs73tgrcsx618", Request deadline has expired for 0.520093s seconds (NYdb::Dev::TContractViolation) Attempt to use result with not successfull status. TCreateSessionResult::GetSession 2025-05-07T08:57:39.774721Z node 22 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[22:7501625353787501080:2192];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0020db/r3tmp/tmpaZpR61/pdisk_1.dat 2025-05-07T08:57:40.104943Z node 22 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:57:40.319449Z node 22 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:57:40.363033Z node 22 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(22, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:40.363193Z node 22 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(22, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:40.369125Z node 22 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(22, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15392, node 22 2025-05-07T08:57:40.606550Z node 22 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:57:40.606578Z node 22 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:57:40.606589Z node 22 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:57:40.606759Z node 22 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8052 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-05-07T08:57:40.927845Z node 22 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:57:44.692697Z node 22 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[22:7501625353787501080:2192];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:44.692805Z node 22 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:57:45.169211Z node 22 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [22:7501625379557305845:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:57:45.169308Z node 22 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [22:7501625379557305856:2343], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:57:45.169387Z node 22 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:57:45.174526Z node 22 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T08:57:45.220615Z node 22 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [22:7501625379557305859:2344], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T08:57:45.317985Z node 22 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [22:7501625379557305932:2690] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:57:45.382217Z node 22 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-05-07T08:57:45.659364Z node 22 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jtmze69y3c6gnq5wc81rnmvg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=22&id=NzU3ZWUyNmYtYTQ2ZjIwYWItMTZiNjQzOC00NzU1ODVkYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:57:45.832891Z node 22 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jtmze6eh37s9f5zceb28nt4j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=22&id=NzU3ZWUyNmYtYTQ2ZjIwYWItMTZiNjQzOC00NzU1ODVkYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Backup "/Root" to "/home/runner/.ya/build/build_root/zvgn/0020db/r3tmp/tmpHIoY58/"Create temporary directory "/Root/~backup_20250507T085745" in databaseProcess "/home/runner/.ya/build/build_root/zvgn/0020db/r3tmp/tmpHIoY58/UuidTable"Copy tables: { src: "/Root/UuidTable", dst: "/Root/~backup_20250507T085745/UuidTable" }Backup table "/Root/~backup_20250507T085745/UuidTable" to "/home/runner/.ya/build/build_root/zvgn/0020db/r3tmp/tmpHIoY58/UuidTable"Describe table "/Root/~backup_20250507T085745/UuidTable"Write scheme into "/home/runner/.ya/build/build_root/zvgn/0020db/r3tmp/tmpHIoY58/UuidTable/scheme.pb"Describe table "/Root/UuidTable"Write ACL into "/home/runner/.ya/build/build_root/zvgn/0020db/r3tmp/tmpHIoY58/UuidTable/permissions.pb"Read table "/Root/~backup_20250507T085745/UuidTable"Write data into "/home/runner/.ya/build/build_root/zvgn/0020db/r3tmp/tmpHIoY58/UuidTable/data_00.csv"Drop table "/Root/~backup_20250507T085745/UuidTable"Remove temporary directory "/Root/~backup_20250507T085745" in database2025-05-07T08:57:46.436318Z node 22 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 22, TabletId: 72075186224037889 not found 2025-05-07T08:57:46.443004Z node 22 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 281474976715668:0, at schemeshard: 72057594046644480 Backup completed successfullyRestore "/home/runner/.ya/build/build_root/zvgn/0020db/r3tmp/tmpHIoY58/" to "/Root"Resolved db base path: "/Root"2025-05-07T08:57:46.571721Z node 22 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 22, TabletId: 72075186224037888 not found Restore folder "/home/runner/.ya/build/build_root/zvgn/0020db/r3tmp/tmpHIoY58/" to "/Root"Process "/home/runner/.ya/build/build_root/zvgn/0020db/r3tmp/tmpHIoY58/UuidTable"Read scheme from "/home/runner/.ya/build/build_root/zvgn/0020db/r3tmp/tmpHIoY58/UuidTable/scheme.pb"Restore table "/home/runner/.ya/build/build_root/zvgn/0020db/r3tmp/tmpHIoY58/UuidTable" to "/Root/UuidTable"2025-05-07T08:57:46.609539Z node 22 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480 Created "/Root/UuidTable"Read data from "/home/runner/.ya/build/build_root/zvgn/0020db/r3tmp/tmpHIoY58/UuidTable/data_00.csv"2025-05-07T08:57:46.810749Z node 22 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715671. Ctx: { TraceId: 01jtmze7f28zbdq8dsbr6txpdf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=22&id=OWJmZWI4YTMtODFkYTgyNS1iYjcyYzVlMS04NWNmMjExNA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Restore ACL "/home/runner/.ya/build/build_root/zvgn/0020db/r3tmp/tmpHIoY58/UuidTable" to "/Root/UuidTable"Read ACL from "/home/runner/.ya/build/build_root/zvgn/0020db/r3tmp/tmpHIoY58/UuidTable/permissions.pb"2025-05-07T08:57:46.842137Z node 22 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480 Restore completed successfully2025-05-07T08:57:47.002780Z node 22 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715673. Ctx: { TraceId: 01jtmze7ksfrrp7a8xye0g28y8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=22&id=NzU3ZWUyNmYtYTQ2ZjIwYWItMTZiNjQzOC00NzU1ODVkYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> BackupRestoreS3::TestAllIndexTypes-EIndexTypeGlobal [GOOD] >> BackupRestoreS3::TestAllIndexTypes-EIndexTypeGlobalAsync >> TExternalDataSourceTest::CreateExternalDataSource >> TSchemeShardTTLTests::AlterTableShouldFailOnSimultaneousDropColumnAndEnableTTL [GOOD] |91.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kafka_proxy/ut/ydb-core-kafka_proxy-ut |91.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kafka_proxy/ut/ydb-core-kafka_proxy-ut |91.1%| [LD] {RESULT} $(B)/ydb/core/kafka_proxy/ut/ydb-core-kafka_proxy-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::AlterTableShouldFailOnSimultaneousDropColumnAndEnableTTL [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:58:23.798486Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:58:23.798566Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:23.798611Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:58:23.798651Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:58:23.798696Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:58:23.798739Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:58:23.798803Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:23.798897Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:58:23.799675Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:58:23.800012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:58:23.901743Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:58:23.901811Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:23.922337Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:58:23.922565Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:58:23.922768Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:58:23.929479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:58:23.929853Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:58:23.930591Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:23.930827Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:58:23.934297Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:23.935728Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:23.935797Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:23.935878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:58:23.935926Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:23.935968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:58:23.936266Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:58:23.946962Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:58:24.086605Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:24.086847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:24.087100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:58:24.087368Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:58:24.087431Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:24.091130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:24.091291Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:58:24.091509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:24.091567Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:58:24.091626Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:58:24.091662Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:58:24.094173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:24.094238Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:58:24.094285Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:58:24.097853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:24.097916Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:24.097951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:24.098036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:58:24.100647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:58:24.102724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:58:24.102958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:58:24.103903Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:24.104068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:24.104124Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:24.104402Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:58:24.104451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:24.104660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:24.104758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:58:24.107178Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:24.107241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:24.107473Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:24.107521Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... ublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:24.370952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-05-07T08:58:24.371025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 101, path id: 2 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 2025-05-07T08:58:24.371602Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:58:24.371661Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1036: NTableState::TProposedWaitParts operationId# 101:0 ProgressState at tablet: 72057594046678944 2025-05-07T08:58:24.373334Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T08:58:24.373446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T08:58:24.373503Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 101 2025-05-07T08:58:24.373565Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-05-07T08:58:24.373609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:58:24.375263Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T08:58:24.375358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T08:58:24.375395Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 101 2025-05-07T08:58:24.375428Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-05-07T08:58:24.375464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T08:58:24.375531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 101, ready parts: 0/1, is published: true FAKE_COORDINATOR: Erasing txId 101 2025-05-07T08:58:24.379621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6245: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 4100 } } 2025-05-07T08:58:24.379708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409546, partId: 0 2025-05-07T08:58:24.379881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 4100 } } 2025-05-07T08:58:24.379989Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 4100 } } 2025-05-07T08:58:24.381388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 305 RawX2: 4294969588 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-05-07T08:58:24.381449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409546, partId: 0 2025-05-07T08:58:24.381647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: Source { RawX1: 305 RawX2: 4294969588 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-05-07T08:58:24.381708Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1005: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 2025-05-07T08:58:24.381798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1009: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 305 RawX2: 4294969588 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-05-07T08:58:24.381881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 101:0, shardIdx: 72057594046678944:1, datashard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:24.381960Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:58:24.382030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 101:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-05-07T08:58:24.382080Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 101:0 129 -> 240 2025-05-07T08:58:24.387328Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T08:58:24.387703Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T08:58:24.388368Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:58:24.388985Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:58:24.389156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:58:24.389202Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 101:0 ProgressState 2025-05-07T08:58:24.389328Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T08:58:24.389366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T08:58:24.389406Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T08:58:24.389439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T08:58:24.389473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: true 2025-05-07T08:58:24.389575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:334:2313] message: TxId: 101 2025-05-07T08:58:24.389625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T08:58:24.389668Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:0 2025-05-07T08:58:24.389705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 101:0 2025-05-07T08:58:24.389849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T08:58:24.392199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-05-07T08:58:24.392257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:335:2314] TestWaitNotification: OK eventTxId 101 TestModificationResults wait txId: 102 2025-05-07T08:58:24.395455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterTable AlterTable { Name: "TTLEnabledTable" DropColumns { Name: "modified_at" } TTLSettings { Enabled { ColumnName: "modified_at" } } } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:24.395715Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_table.cpp:508: TAlterTable Propose, path: /MyRoot/TTLEnabledTable, pathId: , opId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:58:24.396111Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 102:1, propose status:StatusInvalidParameter, reason: Cannot enable TTL on dropped column: 'modified_at', at schemeshard: 72057594046678944 2025-05-07T08:58:24.399066Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 102, response: Status: StatusInvalidParameter Reason: "Cannot enable TTL on dropped column: \'modified_at\'" TxId: 102 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:24.399251Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Cannot enable TTL on dropped column: 'modified_at', operation: ALTER TABLE, path: /MyRoot/TTLEnabledTable TestModificationResult got TxId: 102, wait until txId: 102 |91.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/coordinator/ut/ydb-core-tx-coordinator-ut |91.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/coordinator/ut/ydb-core-tx-coordinator-ut |91.1%| [LD] {RESULT} $(B)/ydb/core/tx/coordinator/ut/ydb-core-tx-coordinator-ut >> TExternalDataSourceTest::CreateExternalDataSource [GOOD] >> TExternalDataSourceTest::CreateExternalDataSourceShouldFailIfSuchEntityAlreadyExists >> TSchemeShardColumnTableTTL::AlterColumnTable_Negative [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TYqlDateTimeTests::DateKey [GOOD] Test command err: 2025-05-07T08:57:32.458593Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625323298471886:2201];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:32.458827Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00290a/r3tmp/tmp0dlWF0/pdisk_1.dat 2025-05-07T08:57:33.098967Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:33.099071Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:33.105457Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:57:33.112615Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11939, node 1 2025-05-07T08:57:33.422161Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T08:57:33.422534Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T08:57:33.460368Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:57:33.460391Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:57:33.460405Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:57:33.460585Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1131 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:57:33.829295Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... TClient is connected to server localhost:1131 2025-05-07T08:57:34.324028Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:34.455448Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:34.982219Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7501625330162963111:2210];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:34.982495Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:57:35.074688Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:35.074777Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:35.084196Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-05-07T08:57:35.085425Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:1131 2025-05-07T08:57:36.426415Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:1131 TClient::Ls request: /Root/table-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "table-1" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710660 CreateStep: 1746608256764 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table-1" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 ... (TRUNCATED) 2025-05-07T08:57:37.298967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 2025-05-07T08:57:37.450835Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501625323298471886:2201];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:37.450904Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; TClient is connected to server localhost:1131 TClient::Ls request: /Root/table-2 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "table-2" PathId: 4 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710661 CreateStep: 1746608257464 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table-2" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 ... (TRUNCATED) 2025-05-07T08:57:37.865019Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:1131 TClient::Ls request: /Root/table-3 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "table-3" PathId: 5 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710662 CreateStep: 1746608257975 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table-3" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 ... (TRUNCATED) 2025-05-07T08:57:38.387098Z node 1 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 3 2025-05-07T08:57:38.387665Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-05-07T08:57:39.986134Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7501625330162963111:2210];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:39.986241Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:57:43.516412Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501625368638440703:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:43.517645Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00290a/r3tmp/tmpfTUQRk/pdisk_1.dat 2025-05-07T08:57:43.839723Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:57:43.889919Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:43.890041Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:43.893833Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25906, node 4 2025-05-07T08:57:44.114956Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:57:44.114991Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:57:44.115002Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:57:44.115187Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributab ... use file: (empty maybe) 2025-05-07T08:58:04.468663Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:58:04.468672Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:58:04.468815Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21650 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:58:04.904780Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... TClient is connected to server localhost:21650 2025-05-07T08:58:05.470162Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:58:05.498905Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:58:06.005908Z node 12 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[12:7501625468649565599:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:58:06.006016Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:58:06.013706Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:58:06.013839Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:58:06.022324Z node 10 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 12 Cookie 12 2025-05-07T08:58:06.078737Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:21650 2025-05-07T08:58:06.637570Z node 10 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 12 2025-05-07T08:58:06.638298Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-05-07T08:58:07.009822Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:58:08.012760Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:58:09.020360Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:58:10.021376Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:58:12.803100Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7501625493171852710:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:58:12.806283Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00290a/r3tmp/tmpcFp0ii/pdisk_1.dat 2025-05-07T08:58:13.298888Z node 13 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:13.368060Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:58:13.368522Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:58:13.373484Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23847, node 13 2025-05-07T08:58:13.774798Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:58:13.774832Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:58:13.783824Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:58:13.784136Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30862 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:58:14.421631Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:58:17.802159Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[13:7501625493171852710:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:58:17.802298Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:58:19.968862Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T08:58:20.097204Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7501625527531592380:2349], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:58:20.097327Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7501625527531592372:2346], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:58:20.097418Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:58:20.103801Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-05-07T08:58:20.131475Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7501625527531592386:2350], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-05-07T08:58:20.231670Z node 13 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [13:7501625527531592463:2816] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:58:20.494548Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jtmzf81yd1nyczxdbanj8xfb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=NTc5OGI5Y2UtODBkMTkwMjItZjQ4YjE4NjAtODE5ZDRlZTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:58:20.991978Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jtmzf8ge51t8z6nphbaz1fp4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=NTc5OGI5Y2UtODBkMTkwMjItZjQ4YjE4NjAtODE5ZDRlZTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:58:21.347238Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715663. Ctx: { TraceId: 01jtmzf8ya4499qykzbw2rbj0j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=NTc5OGI5Y2UtODBkMTkwMjItZjQ4YjE4NjAtODE5ZDRlZTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:58:21.627871Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715664. Ctx: { TraceId: 01jtmzf99e7j43s40hz144nta5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=NTc5OGI5Y2UtODBkMTkwMjItZjQ4YjE4NjAtODE5ZDRlZTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> BackupRestore::TestAllPrimitiveTypes-DATETIME64 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-DYNUMBER >> SystemView::PartitionStatsTtlFields [GOOD] >> SystemView::PartitionStatsLocksFields |91.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::TtlTiersValidation >> TExternalDataSourceTest::CreateExternalDataSourceShouldFailIfSuchEntityAlreadyExists [GOOD] >> Cdc::Write[YdsRunner] [GOOD] >> Cdc::Write[TopicRunner] >> TestYmqHttpProxy::TestCreateQueueWithTags [GOOD] >> TestKinesisHttpProxy::TestUnauthorizedPutRecords [GOOD] |91.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_background_compaction/ydb-core-tx-datashard-ut_background_compaction |91.1%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_background_compaction/ydb-core-tx-datashard-ut_background_compaction |91.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_background_compaction/ydb-core-tx-datashard-ut_background_compaction ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_data_source/unittest >> TExternalDataSourceTest::CreateExternalDataSourceShouldFailIfSuchEntityAlreadyExists [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] Leader for TabletID 72057594046678944 is [1:126:2152] sender: [1:129:2058] recipient: [1:109:2141] 2025-05-07T08:58:24.970672Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:58:24.970772Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:24.970833Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:58:24.970866Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:58:24.970910Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:58:24.970947Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:58:24.971036Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:24.971135Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:58:24.971881Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:58:24.972237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:58:25.052785Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7610: Got new config: QueryServiceConfig { AvailableExternalDataSources: "ObjectStorage" AvailableExternalDataSources: "ClickHouse" AvailableExternalDataSources: "PostgreSQL" AvailableExternalDataSources: "MySQL" AvailableExternalDataSources: "Ydb" AvailableExternalDataSources: "YT" AvailableExternalDataSources: "Greenplum" AvailableExternalDataSources: "MsSQLServer" AvailableExternalDataSources: "Oracle" AvailableExternalDataSources: "Logging" AvailableExternalDataSources: "Solomon" } 2025-05-07T08:58:25.052853Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:25.053571Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# ObjectStorage, ClickHouse, PostgreSQL, MySQL, Ydb, YT, Greenplum, MsSQLServer, Oracle, Logging, Solomon 2025-05-07T08:58:25.082645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:58:25.082795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:58:25.082975Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:58:25.089717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:58:25.089927Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:58:25.090622Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:25.090887Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:58:25.093039Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:25.094517Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:25.094579Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:25.094754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:58:25.094804Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:25.094850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:58:25.095011Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:58:25.114610Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2152] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:58:25.306839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:25.307099Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:25.307332Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:58:25.307554Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:58:25.307609Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:25.319136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:25.319326Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:58:25.319538Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:25.319593Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:58:25.319664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:58:25.319709Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:58:25.331220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:25.331349Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:58:25.331400Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:58:25.338083Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:25.338175Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:25.338258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:25.338313Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:58:25.342500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:58:25.345100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:58:25.345404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:58:25.346539Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:25.346695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 131 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:25.346762Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:25.347116Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:58:25.347178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:25.347360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:25.347472Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:58:25.350090 ... MESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-05-07T08:58:26.149240Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 2 2025-05-07T08:58:26.150239Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T08:58:26.150334Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T08:58:26.150383Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-05-07T08:58:26.150425Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-05-07T08:58:26.150469Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:58:26.151259Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T08:58:26.151346Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T08:58:26.151401Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-05-07T08:58:26.151432Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 2 2025-05-07T08:58:26.151464Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T08:58:26.151542Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-05-07T08:58:26.154470Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T08:58:26.155745Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-05-07T08:58:26.155978Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-05-07T08:58:26.156024Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-05-07T08:58:26.156495Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-05-07T08:58:26.156627Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-05-07T08:58:26.156671Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [2:302:2293] TestWaitNotification: OK eventTxId 101 2025-05-07T08:58:26.157208Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/MyExternalDataSource" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:58:26.157432Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/MyExternalDataSource" took 268us result status StatusSuccess 2025-05-07T08:58:26.157786Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/MyExternalDataSource" PathDescription { Self { Name: "MyExternalDataSource" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalDataSource CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalDataSourceVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } ExternalDataSourceDescription { Name: "MyExternalDataSource" PathId { OwnerId: 72057594046678944 LocalId: 2 } Version: 1 SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Installation: "" Auth { None { } } Properties { } References { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 102 2025-05-07T08:58:26.161534Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalDataSource CreateExternalDataSource { Name: "MyExternalDataSource" SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_new_bucket" Auth { None { } } } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:26.161901Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_data_source.cpp:336: [72057594046678944] CreateNewExternalDataSource, opId 102:0, feature flag EnableReplaceIfExistsForExternalEntities 1, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalDataSource FailOnExist: false CreateExternalDataSource { Name: "MyExternalDataSource" SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_new_bucket" Auth { None { } } } 2025-05-07T08:58:26.162021Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_data_source.cpp:232: [72057594046678944] TCreateExternalDataSource Propose: opId# 102:0, path# /MyRoot/MyExternalDataSource 2025-05-07T08:58:26.162228Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 102:1, propose status:StatusAlreadyExists, reason: Check failed: path: '/MyRoot/MyExternalDataSource', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeExternalDataSource, state: EPathStateNoChanges), at schemeshard: 72057594046678944 2025-05-07T08:58:26.164903Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 102, response: Status: StatusAlreadyExists Reason: "Check failed: path: \'/MyRoot/MyExternalDataSource\', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeExternalDataSource, state: EPathStateNoChanges)" TxId: 102 SchemeshardId: 72057594046678944 PathId: 2 PathCreateTxId: 101, at schemeshard: 72057594046678944 2025-05-07T08:58:26.165092Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusAlreadyExists, reason: Check failed: path: '/MyRoot/MyExternalDataSource', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeExternalDataSource, state: EPathStateNoChanges), operation: CREATE EXTERNAL DATA SOURCE, path: /MyRoot/MyExternalDataSource TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-05-07T08:58:26.165413Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-05-07T08:58:26.165462Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-05-07T08:58:26.165937Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-05-07T08:58:26.166078Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T08:58:26.166121Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [2:310:2301] TestWaitNotification: OK eventTxId 102 2025-05-07T08:58:26.166618Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/MyExternalDataSource" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:58:26.166816Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/MyExternalDataSource" took 225us result status StatusSuccess 2025-05-07T08:58:26.167166Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/MyExternalDataSource" PathDescription { Self { Name: "MyExternalDataSource" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalDataSource CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalDataSourceVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } ExternalDataSourceDescription { Name: "MyExternalDataSource" PathId { OwnerId: 72057594046678944 LocalId: 2 } Version: 1 SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Installation: "" Auth { None { } } Properties { } References { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardColumnTableTTL::AlterColumnTable_Negative [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:58:16.945510Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:58:16.945626Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:16.945676Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:58:16.945720Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:58:16.945772Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:58:16.945807Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:58:16.945873Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:16.945960Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:58:16.946875Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:58:16.947320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:58:17.042253Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:58:17.042330Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:17.060288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:58:17.060424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:58:17.060623Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:58:17.075266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:58:17.076019Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:58:17.076968Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:17.077327Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:58:17.080259Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:17.082300Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:17.082383Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:17.082458Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:58:17.082523Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:17.082576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:58:17.082847Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:58:17.091348Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:58:17.268923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:17.269201Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:17.269490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:58:17.269821Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:58:17.269888Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:17.273144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:17.273336Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:58:17.273578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:17.273638Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:58:17.273687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:58:17.273730Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:58:17.276345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:17.276420Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:58:17.276468Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:58:17.279072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:17.279142Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:17.279197Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:17.279293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:58:17.283486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:58:17.285695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:58:17.286154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:58:17.287242Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:17.287406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:17.287488Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:17.287800Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:58:17.287869Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:17.288035Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:17.288122Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:58:17.293589Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:17.293656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:17.293854Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:17.293903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... :24.901552Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:58:24.901659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:58:24.901729Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:58:24.901800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:58:24.901878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:58:24.901954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:58:24.902077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:58:24.902141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:58:24.902215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:58:24.902306Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:58:24.902412Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:58:24.902524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:58:24.902590Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 101:0 ProgressState 2025-05-07T08:58:24.902719Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T08:58:24.902770Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T08:58:24.902849Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T08:58:24.902887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T08:58:24.902932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: true 2025-05-07T08:58:24.903041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:2808:4073] message: TxId: 101 2025-05-07T08:58:24.903098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T08:58:24.903164Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:0 2025-05-07T08:58:24.903223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 101:0 2025-05-07T08:58:24.904795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 66 2025-05-07T08:58:24.909318Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-05-07T08:58:24.909391Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:2809:4074] TestWaitNotification: OK eventTxId 101 2025-05-07T08:58:24.910073Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:58:24.910394Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 393us result status StatusSuccess 2025-05-07T08:58:24.911205Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeColumnTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 ColumnTableVersion: 1 ColumnTableSchemaVersion: 1 } ChildrenExist: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 64 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } ColumnTableDescription { Name: "TTLEnabledTable" Schema { Columns { Id: 1 Name: "key" Type: "Uint64" TypeId: 4 NotNull: true StorageId: "" DefaultValue { } ColumnFamilyId: 0 } Columns { Id: 2 Name: "modified_at" Type: "Timestamp" TypeId: 50 NotNull: false StorageId: "" DefaultValue { } ColumnFamilyId: 0 } Columns { Id: 3 Name: "str" Type: "String" TypeId: 4097 NotNull: false StorageId: "" DefaultValue { } ColumnFamilyId: 0 } KeyColumnNames: "key" NextColumnId: 4 Version: 1 Options { SchemeNeedActualization: false } ColumnFamilies { Id: 0 Name: "default" } NextColumnFamilyId: 1 } ColumnShardCount: 64 Sharding { ColumnShards: 72075186233409546 ColumnShards: 72075186233409547 ColumnShards: 72075186233409548 ColumnShards: 72075186233409549 ColumnShards: 72075186233409550 ColumnShards: 72075186233409551 ColumnShards: 72075186233409552 ColumnShards: 72075186233409553 ColumnShards: 72075186233409554 ColumnShards: 72075186233409555 ColumnShards: 72075186233409556 ColumnShards: 72075186233409557 ColumnShards: 72075186233409558 ColumnShards: 72075186233409559 ColumnShards: 72075186233409560 ColumnShards: 72075186233409561 ColumnShards: 72075186233409562 ColumnShards: 72075186233409563 ColumnShards: 72075186233409564 ColumnShards: 72075186233409565 ColumnShards: 72075186233409566 ColumnShards: 72075186233409567 ColumnShards: 72075186233409568 ColumnShards: 72075186233409569 ColumnShards: 72075186233409570 ColumnShards: 72075186233409571 ColumnShards: 72075186233409572 ColumnShards: 72075186233409573 ColumnShards: 72075186233409574 ColumnShards: 72075186233409575 ColumnShards: 72075186233409576 ColumnShards: 72075186233409577 ColumnShards: 72075186233409578 ColumnShards: 72075186233409579 ColumnShards: 72075186233409580 ColumnShards: 72075186233409581 ColumnShards: 72075186233409582 ColumnShards: 72075186233409583 ColumnShards: 72075186233409584 ColumnShards: 72075186233409585 ColumnShards: 72075186233409586 ColumnShards: 72075186233409587 ColumnShards: 72075186233409588 ColumnShards: 72075186233409589 ColumnShards: 72075186233409590 ColumnShards: 72075186233409591 ColumnShards: 72075186233409592 ColumnShards: 72075186233409593 ColumnShards: 72075186233409594 ColumnShards: 72075186233409595 ColumnShards: 72075186233409596 ColumnShards: 72075186233409597 ColumnShards: 72075186233409598 ColumnShards: 72075186233409599 ColumnShards: 72075186233409600 ColumnShards: 72075186233409601 ColumnShards: 72075186233409602 ColumnShards: 72075186233409603 ColumnShards: 72075186233409604 ColumnShards: 72075186233409605 ColumnShards: 72075186233409606 ColumnShards: 72075186233409607 ColumnShards: 72075186233409608 ColumnShards: 72075186233409609 HashSharding { Function: HASH_FUNCTION_CONSISTENCY_64 Columns: "key" } } StorageConfig { DataChannelCount: 64 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 WARNING: All log messages before y_absl::InitializeLog() is called are written to STDERR W0000 00:00:1746608304.911963 258858 text_format.cc:398] Warning parsing text-format NKikimrSchemeOp.TAlterColumnTable: 6:35: text format contains deprecated field "ExpireAfterSeconds" TestModificationResults wait txId: 102 2025-05-07T08:58:24.915222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterColumnTable AlterColumnTable { Name: "TTLEnabledTable" AlterTtlSettings { Enabled { ColumnName: "str" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:24.915465Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: alter_table.cpp:282: TAlterColumnTable Propose, path: /MyRoot/TTLEnabledTable, opId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:58:24.916013Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 102:1, propose status:StatusSchemeError, reason: ttl update error: Unsupported column type. in alter constructor STANDALONE_UPDATE, at schemeshard: 72057594046678944 2025-05-07T08:58:24.919190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 102, response: Status: StatusSchemeError Reason: "ttl update error: Unsupported column type. in alter constructor STANDALONE_UPDATE" TxId: 102 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:24.919390Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusSchemeError, reason: ttl update error: Unsupported column type. in alter constructor STANDALONE_UPDATE, operation: ALTER COLUMN TABLE, path: /MyRoot/TTLEnabledTable TestModificationResult got TxId: 102, wait until txId: 102 >> TestYmqHttpProxy::TestDeleteMessage >> TestKinesisHttpProxy::CreateDeleteStreamWithConsumer [GOOD] >> TestKinesisHttpProxy::TestWrongStream >> BackupRestoreS3::TestAllPrimitiveTypes-DATETIME64 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-DYNUMBER >> TSchemeShardTTLTests::BuildAsyncIndexShouldSucceed >> TSchemeShardTTLTests::TtlTiersValidation [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Uint32-pk_types9-all_types9-index9-Uint32--] >> TPersQueueTest::PreferredCluster_DisabledRemoteClusterAndWriteSessionsWithDifferentPreferredClusterAndLaterRemoteClusterEnabled_SessionWithMismatchedClusterDiesAfterPreferredClusterEnabledAndOtherSessionsAlive [GOOD] >> TPersQueueTest::PreferredCluster_EnabledRemotePreferredClusterAndCloseClientSessionWithEnabledRemotePreferredClusterDelaySec_SessionDiesOnlyAfterDelay >> TSchemeShardTTLTests::CreateTableShouldFailOnWrongColumnType >> TestKinesisHttpProxy::CreateDeleteStreamWithConsumerWithFlag >> TestKinesisHttpProxy::ListShardsEmptyFields [GOOD] >> TSchemeShardColumnTableTTL::CreateColumnTableNegative_ColumnType ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::TtlTiersValidation [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:58:26.769737Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:58:26.769814Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:26.769853Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:58:26.769889Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:58:26.769937Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:58:26.770025Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:58:26.770089Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:26.770186Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:58:26.770906Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:58:26.771211Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:58:26.854841Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:58:26.854880Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:26.885669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:58:26.885876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:58:26.886089Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:58:26.892338Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:58:26.892686Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:58:26.893437Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:26.893631Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:58:26.896819Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:26.898312Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:26.898379Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:26.898450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:58:26.898512Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:26.898556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:58:26.898784Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:58:26.905888Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:58:27.070723Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:27.071025Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:27.071289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:58:27.071562Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:58:27.071633Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:27.074439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:27.074612Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:58:27.074853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:27.074911Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:58:27.074969Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:58:27.075008Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:58:27.077690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:27.077755Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:58:27.077796Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:58:27.086997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:27.087087Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:27.087135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:27.087215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:58:27.105517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:58:27.112263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:58:27.112481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:58:27.113550Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:27.113725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:27.113790Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:27.114132Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:58:27.114191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:27.114376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:27.114498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:58:27.121718Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:27.121791Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:27.121996Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:27.122041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... sh, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-05-07T08:58:27.439903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:58:27.441188Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T08:58:27.441294Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T08:58:27.441322Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 101 2025-05-07T08:58:27.441342Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-05-07T08:58:27.441367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T08:58:27.441423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 101, ready parts: 0/1, is published: true FAKE_COORDINATOR: Erasing txId 101 2025-05-07T08:58:27.442917Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6245: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1101 } } 2025-05-07T08:58:27.442973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409546, partId: 0 2025-05-07T08:58:27.443105Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1101 } } 2025-05-07T08:58:27.443217Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1101 } } 2025-05-07T08:58:27.444360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 305 RawX2: 4294969588 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-05-07T08:58:27.444410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409546, partId: 0 2025-05-07T08:58:27.444566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: Source { RawX1: 305 RawX2: 4294969588 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-05-07T08:58:27.444618Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1005: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 2025-05-07T08:58:27.444715Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1009: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 305 RawX2: 4294969588 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-05-07T08:58:27.444794Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 101:0, shardIdx: 72057594046678944:1, datashard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:27.444851Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:58:27.444903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 101:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-05-07T08:58:27.444966Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 101:0 129 -> 240 2025-05-07T08:58:27.448146Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T08:58:27.448452Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T08:58:27.449111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:58:27.449633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:58:27.449772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:58:27.449845Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 101:0 ProgressState 2025-05-07T08:58:27.449926Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T08:58:27.449951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T08:58:27.450010Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T08:58:27.450034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T08:58:27.450064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: true 2025-05-07T08:58:27.450125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:334:2313] message: TxId: 101 2025-05-07T08:58:27.450164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T08:58:27.450215Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:0 2025-05-07T08:58:27.450246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 101:0 2025-05-07T08:58:27.450361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T08:58:27.451915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-05-07T08:58:27.451955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:335:2314] TestWaitNotification: OK eventTxId 101 TestModificationResults wait txId: 102 2025-05-07T08:58:27.454580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterTable AlterTable { Name: "TTLEnabledTable" TTLSettings { Enabled { ColumnName: "modified_at" Tiers { ApplyAfterSeconds: 3600 Delete { } } Tiers { ApplyAfterSeconds: 7200 Delete { } } } } } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:27.454839Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_table.cpp:508: TAlterTable Propose, path: /MyRoot/TTLEnabledTable, pathId: , opId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:58:27.455188Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 102:1, propose status:StatusInvalidParameter, reason: Tier 0: only the last tier in TTL settings can have Delete action, at schemeshard: 72057594046678944 2025-05-07T08:58:27.458001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 102, response: Status: StatusInvalidParameter Reason: "Tier 0: only the last tier in TTL settings can have Delete action" TxId: 102 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:27.458173Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Tier 0: only the last tier in TTL settings can have Delete action, operation: ALTER TABLE, path: /MyRoot/TTLEnabledTable TestModificationResult got TxId: 102, wait until txId: 102 TestModificationResults wait txId: 103 2025-05-07T08:58:27.461405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterTable AlterTable { Name: "TTLEnabledTable" TTLSettings { Enabled { ColumnName: "modified_at" Tiers { ApplyAfterSeconds: 3600 EvictToExternalStorage { Storage: "/Root/abc" } } Tiers { ApplyAfterSeconds: 7200 Delete { } } } } } } TxId: 103 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:27.461663Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_table.cpp:508: TAlterTable Propose, path: /MyRoot/TTLEnabledTable, pathId: , opId: 103:0, at schemeshard: 72057594046678944 2025-05-07T08:58:27.462044Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 103:1, propose status:StatusInvalidParameter, reason: Only DELETE via TTL is allowed for row-oriented tables, at schemeshard: 72057594046678944 2025-05-07T08:58:27.470819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 103, response: Status: StatusInvalidParameter Reason: "Only DELETE via TTL is allowed for row-oriented tables" TxId: 103 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:27.470994Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 103, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Only DELETE via TTL is allowed for row-oriented tables, operation: ALTER TABLE, path: /MyRoot/TTLEnabledTable TestModificationResult got TxId: 103, wait until txId: 103 |91.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest |91.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/mediator/ut/ydb-core-tx-mediator-ut |91.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/mediator/ut/ydb-core-tx-mediator-ut |91.2%| [LD] {RESULT} $(B)/ydb/core/tx/mediator/ut/ydb-core-tx-mediator-ut >> TestKinesisHttpProxy::ListShardsExclusiveStartShardId |91.2%| [TA] $(B)/ydb/core/kqp/workload_service/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TSchemeShardTTLTests::CreateTableShouldFailOnWrongColumnType [GOOD] >> TSchemeShardColumnTableTTL::AlterColumnTable [GOOD] >> TSchemeShardColumnTableTTL::CreateColumnTableNegative_UnknownColumn >> TestKinesisHttpProxy::GoodRequestGetRecordsCbor [GOOD] >> TSchemeShardColumnTableTTL::CreateColumnTableNegative_ColumnType [GOOD] >> TSchemeShardTTLTests::BuildAsyncIndexShouldSucceed [GOOD] >> Cdc::HugeKeyDebezium [GOOD] >> Cdc::Drop[PqRunner] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::CreateTableShouldFailOnWrongColumnType [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:58:28.817497Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:58:28.817608Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:28.817666Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:58:28.817707Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:58:28.817750Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:58:28.817779Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:58:28.817829Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:28.817902Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:58:28.818687Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:58:28.819039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:58:28.904611Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:58:28.904662Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:28.919663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:58:28.919809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:58:28.919986Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:58:28.928356Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:58:28.928952Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:58:28.929603Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:28.929857Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:58:28.931932Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:28.933455Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:28.933514Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:28.933611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:58:28.933663Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:28.933705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:58:28.933916Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:58:28.940473Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:58:29.135034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:29.135264Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:29.135510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:58:29.135814Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:58:29.135893Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:29.139894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:29.140048Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:58:29.140252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:29.140317Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:58:29.140368Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:58:29.140405Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:58:29.142415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:29.142484Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:58:29.142525Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:58:29.144459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:29.144507Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:29.144571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:29.144653Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:58:29.156536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:58:29.168583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:58:29.168816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:58:29.169887Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:29.170077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:29.170186Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:29.170525Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:58:29.170598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:29.170826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:29.170940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:58:29.177501Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:29.177560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:29.177740Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:29.177775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2208], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-05-07T08:58:29.177835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:29.177870Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 1:0 ProgressState 2025-05-07T08:58:29.177956Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-05-07T08:58:29.178013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:58:29.178057Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-05-07T08:58:29.178090Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:58:29.178128Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-05-07T08:58:29.178173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:58:29.178242Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1:0 2025-05-07T08:58:29.178277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 1:0 2025-05-07T08:58:29.178361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:58:29.178405Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-05-07T08:58:29.178440Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-05-07T08:58:29.181823Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-05-07T08:58:29.181984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-05-07T08:58:29.182040Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-05-07T08:58:29.182088Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-05-07T08:58:29.182137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:29.182252Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-05-07T08:58:29.185379Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-05-07T08:58:29.186010Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-05-07T08:58:29.187414Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:269:2260] Bootstrap 2025-05-07T08:58:29.200016Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:269:2260] Become StateWork (SchemeCache [1:274:2265]) 2025-05-07T08:58:29.202222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateTable CreateTable { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "String" } KeyColumnNames: "key" TTLSettings { Enabled { ColumnName: "modified_at" } } } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:29.202485Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /MyRoot/TTLEnabledTable, opId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:58:29.202585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:433: TCreateTable Propose, path: /MyRoot/TTLEnabledTable, opId: 101:0, schema: Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "String" } KeyColumnNames: "key" TTLSettings { Enabled { ColumnName: "modified_at" } }, at schemeshard: 72057594046678944 2025-05-07T08:58:29.203094Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 101:1, propose status:StatusSchemeError, reason: Unsupported column type, at schemeshard: 72057594046678944 2025-05-07T08:58:29.204011Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:269:2260] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-05-07T08:58:29.207385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 101, response: Status: StatusSchemeError Reason: "Unsupported column type" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:29.207570Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusSchemeError, reason: Unsupported column type, operation: CREATE TABLE, path: /MyRoot/TTLEnabledTable 2025-05-07T08:58:29.208315Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardColumnTableTTL::CreateColumnTableNegative_ColumnType [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:58:29.148372Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:58:29.148485Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:29.148527Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:58:29.148573Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:58:29.148622Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:58:29.148650Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:58:29.148702Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:29.148776Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:58:29.149557Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:58:29.149890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:58:29.236461Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:58:29.236525Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:29.251952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:58:29.252048Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:58:29.252214Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:58:29.260688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:58:29.261279Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:58:29.262008Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:29.262283Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:58:29.264502Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:29.265813Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:29.265862Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:29.265915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:58:29.266044Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:29.266118Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:58:29.266359Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:58:29.276272Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:58:29.426694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:29.426917Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:29.427142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:58:29.427404Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:58:29.427474Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:29.431001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:29.431155Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:58:29.431386Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:29.431470Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:58:29.431516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:58:29.431558Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:58:29.435131Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:29.435204Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:58:29.435253Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:58:29.438695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:29.438764Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:29.438830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:29.438915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:58:29.451537Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:58:29.453859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:58:29.454100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:58:29.455898Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:29.456095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:29.456165Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:29.456467Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:58:29.456549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:29.456759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:29.456914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:58:29.460298Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:29.460358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:29.460567Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:29.460618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2208], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-05-07T08:58:29.460696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:29.460749Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 1:0 ProgressState 2025-05-07T08:58:29.460876Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-05-07T08:58:29.460918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:58:29.460987Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-05-07T08:58:29.461027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:58:29.461112Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-05-07T08:58:29.461174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:58:29.461223Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1:0 2025-05-07T08:58:29.461258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 1:0 2025-05-07T08:58:29.461344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:58:29.461392Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-05-07T08:58:29.461425Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-05-07T08:58:29.472533Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-05-07T08:58:29.472722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-05-07T08:58:29.472773Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-05-07T08:58:29.472824Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-05-07T08:58:29.472881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:29.473020Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-05-07T08:58:29.476456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-05-07T08:58:29.477060Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 WARNING: All log messages before y_absl::InitializeLog() is called are written to STDERR W0000 00:00:1746608309.478289 262941 text_format.cc:398] Warning parsing text-format NKikimrSchemeOp.TColumnTableDescription: 11:43: text format contains deprecated field "ExpireAfterSeconds" TestModificationResults wait txId: 101 2025-05-07T08:58:29.478751Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:269:2260] Bootstrap 2025-05-07T08:58:29.495872Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:269:2260] Become StateWork (SchemeCache [1:274:2265]) 2025-05-07T08:58:29.498809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateColumnTable CreateColumnTable { Name: "TTLEnabledTable" Schema { Columns { Name: "key" Type: "Uint64" NotNull: true } Columns { Name: "modified_at" Type: "String" } KeyColumnNames: "key" } TtlSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:29.499215Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: create_table.cpp:593: TCreateColumnTable Propose, path: /MyRoot/TTLEnabledTable, opId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:58:29.500113Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 101:1, propose status:StatusSchemeError, reason: Unsupported column type, at schemeshard: 72057594046678944 2025-05-07T08:58:29.501477Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:269:2260] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-05-07T08:58:29.504836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 101, response: Status: StatusSchemeError Reason: "Unsupported column type" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:29.505086Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusSchemeError, reason: Unsupported column type, operation: CREATE COLUMN TABLE, path: /MyRoot/ 2025-05-07T08:58:29.505719Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 W0000 00:00:1746608309.506237 262941 text_format.cc:398] Warning parsing text-format NKikimrSchemeOp.TColumnTableDescription: 11:43: text format contains deprecated field "ExpireAfterSeconds" TestModificationResults wait txId: 102 2025-05-07T08:58:29.508994Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateColumnTable CreateColumnTable { Name: "TTLEnabledTable" Schema { Columns { Name: "key" Type: "Uint64" NotNull: true } Columns { Name: "modified_at" Type: "DyNumber" } KeyColumnNames: "key" } TtlSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:29.509309Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: create_table.cpp:593: TCreateColumnTable Propose, path: /MyRoot/TTLEnabledTable, opId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:58:29.509540Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 102:1, propose status:StatusSchemeError, reason: Type 'DyNumber' specified for column 'modified_at' is not supported, at schemeshard: 72057594046678944 2025-05-07T08:58:29.512236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 102, response: Status: StatusSchemeError Reason: "Type \'DyNumber\' specified for column \'modified_at\' is not supported" TxId: 102 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:29.512435Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusSchemeError, reason: Type 'DyNumber' specified for column 'modified_at' is not supported, operation: CREATE COLUMN TABLE, path: /MyRoot/ TestModificationResult got TxId: 102, wait until txId: 102 >> TSchemeShardServerLess::StorageBilling [GOOD] >> TestKinesisHttpProxy::GoodRequestGetRecordsLongStreamName ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::BuildAsyncIndexShouldSucceed [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:58:28.085749Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:58:28.085833Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:28.085876Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:58:28.085912Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:58:28.085954Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:58:28.086322Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:58:28.086398Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:28.086509Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:58:28.087195Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:58:28.087584Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:58:28.293941Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:58:28.294040Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:28.341750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:58:28.341989Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:58:28.342191Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:58:28.349128Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:58:28.349567Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:58:28.350407Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:28.350658Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:58:28.354608Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:28.356055Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:28.356119Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:28.356194Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:58:28.356240Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:28.356281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:58:28.356527Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:58:28.364245Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:58:28.511325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:28.511578Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:28.511851Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:58:28.512113Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:58:28.512179Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:28.515061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:28.515259Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:58:28.515507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:28.515575Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:58:28.515640Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:58:28.515682Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:58:28.518472Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:28.518543Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:58:28.518586Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:58:28.521059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:28.521129Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:28.521180Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:28.521250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:58:28.525758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:58:28.528513Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:58:28.528728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:58:28.529783Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:28.529934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:28.530018Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:28.530406Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:58:28.530473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:28.530682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:28.530773Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:58:28.533729Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:28.533822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:28.534096Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:28.534145Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 95 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:281474976710760 msg type: 269090816 2025-05-07T08:58:29.345996Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 281474976710760, partId: 4294967295, tablet: 72057594046316545 2025-05-07T08:58:29.346210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710760, at schemeshard: 72057594046678944 2025-05-07T08:58:29.346251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976710760, ready parts: 0/1, is published: true 2025-05-07T08:58:29.346289Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710760, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Add transaction: 281474976710760 at step: 5000006 FAKE_COORDINATOR: advance: minStep5000006 State->FrontStep: 5000005 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710760 at step: 5000006 2025-05-07T08:58:29.346552Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000006, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:29.346658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710760 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000006 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:29.346706Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_lock.cpp:44: [72057594046678944] TDropLock TPropose opId# 281474976710760:0 HandleReply TEvOperationPlan: step# 5000006 2025-05-07T08:58:29.346757Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976710760:0 128 -> 240 2025-05-07T08:58:29.348612Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710760:0, at schemeshard: 72057594046678944 2025-05-07T08:58:29.348683Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 281474976710760:0 ProgressState 2025-05-07T08:58:29.348771Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710760:0 progress is 1/1 2025-05-07T08:58:29.348807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-05-07T08:58:29.348840Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710760:0 progress is 1/1 2025-05-07T08:58:29.348869Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-05-07T08:58:29.348901Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976710760, ready parts: 1/1, is published: true 2025-05-07T08:58:29.348983Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:124:2150] message: TxId: 281474976710760 2025-05-07T08:58:29.349034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-05-07T08:58:29.349095Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976710760:0 2025-05-07T08:58:29.349133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976710760:0 2025-05-07T08:58:29.349197Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 FAKE_COORDINATOR: Erasing txId 281474976710760 2025-05-07T08:58:29.351426Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6706: Handle: TEvNotifyTxCompletionResult: txId# 281474976710760 2025-05-07T08:58:29.351505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6708: Message: TxId: 281474976710760 2025-05-07T08:58:29.351599Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:2321: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, txId# 281474976710760, buildInfoId: 102 2025-05-07T08:58:29.351700Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:2324: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, txId# 281474976710760, buildInfo: TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobalAsync, IndexName: UserDefinedIndexByValue, IndexColumn: value, State: Unlocking, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:384:2356], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000004, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-05-07T08:58:29.357465Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1105: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Unlocking TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobalAsync, IndexName: UserDefinedIndexByValue, IndexColumn: value, State: Unlocking, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:384:2356], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000004, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-05-07T08:58:29.357578Z node 1 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:25: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Unlocking to Done 2025-05-07T08:58:29.363973Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1105: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Done TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobalAsync, IndexName: UserDefinedIndexByValue, IndexColumn: value, State: Done, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:384:2356], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000004, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-05-07T08:58:29.364108Z node 1 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:325: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 102, subscribers count# 1 2025-05-07T08:58:29.364270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T08:58:29.364318Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:473:2434] TestWaitNotification: OK eventTxId 102 2025-05-07T08:58:29.364962Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:58:29.365254Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 317us result status StatusSuccess 2025-05-07T08:58:29.365805Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 TableSchemaVersion: 3 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "UserDefinedIndexByValue" LocalPathId: 3 Type: EIndexTypeGlobalAsync State: EIndexStateReady KeyColumnNames: "value" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 3 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardColumnTableTTL::AlterColumnTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:58:19.309794Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:58:19.309896Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:19.309946Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:58:19.310013Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:58:19.310063Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:58:19.310116Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:58:19.310182Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:19.310284Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:58:19.311075Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:58:19.311440Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:58:19.400400Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:58:19.400464Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:19.418390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:58:19.418627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:58:19.418807Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:58:19.426976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:58:19.427360Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:58:19.428103Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:19.428324Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:58:19.432138Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:19.433638Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:19.433709Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:19.433787Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:58:19.433834Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:19.433881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:58:19.434171Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:58:19.443974Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:58:19.603733Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:19.604012Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:19.604283Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:58:19.604531Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:58:19.604603Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:19.607088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:19.607253Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:58:19.607464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:19.607522Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:58:19.607580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:58:19.607622Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:58:19.609820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:19.609886Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:58:19.609927Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:58:19.612327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:19.612438Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:19.612486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:19.612554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:58:19.622932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:58:19.625284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:58:19.625483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:58:19.626586Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:19.626751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:19.626850Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:19.627169Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:58:19.627226Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:19.627429Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:19.627510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:58:19.631071Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:19.631133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:19.631374Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:19.631418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 57594046678944 2025-05-07T08:58:28.658928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-05-07T08:58:28.659003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-05-07T08:58:28.659088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-05-07T08:58:28.659154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-05-07T08:58:28.659239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-05-07T08:58:28.661655Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-05-07T08:58:28.661813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-05-07T08:58:28.661895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-05-07T08:58:28.662071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-05-07T08:58:28.662126Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 103:0 ProgressState 2025-05-07T08:58:28.662271Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-05-07T08:58:28.662313Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-05-07T08:58:28.662357Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-05-07T08:58:28.662394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-05-07T08:58:28.662449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: true 2025-05-07T08:58:28.662560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:2807:4073] message: TxId: 103 2025-05-07T08:58:28.662621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-05-07T08:58:28.662685Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 103:0 2025-05-07T08:58:28.662722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 103:0 2025-05-07T08:58:28.664181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 66 2025-05-07T08:58:28.669271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-05-07T08:58:28.669344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:4026:5223] TestWaitNotification: OK eventTxId 103 2025-05-07T08:58:28.670055Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:58:28.670355Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 353us result status StatusSuccess 2025-05-07T08:58:28.671015Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeColumnTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 11 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 11 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 ColumnTableVersion: 3 ColumnTableSchemaVersion: 1 ColumnTableTtlSettingsVersion: 3 } ChildrenExist: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 64 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } ColumnTableDescription { Name: "TTLEnabledTable" Schema { Columns { Id: 1 Name: "key" Type: "Uint64" TypeId: 4 NotNull: true StorageId: "" DefaultValue { } ColumnFamilyId: 0 } Columns { Id: 2 Name: "modified_at" Type: "Timestamp" TypeId: 50 NotNull: true StorageId: "" DefaultValue { } ColumnFamilyId: 0 } Columns { Id: 3 Name: "saved_at" Type: "Datetime" TypeId: 49 NotNull: false StorageId: "" DefaultValue { } ColumnFamilyId: 0 } Columns { Id: 4 Name: "data" Type: "Utf8" TypeId: 4608 NotNull: false StorageId: "" DefaultValue { } ColumnFamilyId: 0 } KeyColumnNames: "modified_at" NextColumnId: 5 Version: 1 Options { SchemeNeedActualization: false } ColumnFamilies { Id: 0 Name: "default" } NextColumnFamilyId: 1 } TtlSettings { Disabled { } Version: 3 } ColumnShardCount: 64 Sharding { ColumnShards: 72075186233409546 ColumnShards: 72075186233409547 ColumnShards: 72075186233409548 ColumnShards: 72075186233409549 ColumnShards: 72075186233409550 ColumnShards: 72075186233409551 ColumnShards: 72075186233409552 ColumnShards: 72075186233409553 ColumnShards: 72075186233409554 ColumnShards: 72075186233409555 ColumnShards: 72075186233409556 ColumnShards: 72075186233409557 ColumnShards: 72075186233409558 ColumnShards: 72075186233409559 ColumnShards: 72075186233409560 ColumnShards: 72075186233409561 ColumnShards: 72075186233409562 ColumnShards: 72075186233409563 ColumnShards: 72075186233409564 ColumnShards: 72075186233409565 ColumnShards: 72075186233409566 ColumnShards: 72075186233409567 ColumnShards: 72075186233409568 ColumnShards: 72075186233409569 ColumnShards: 72075186233409570 ColumnShards: 72075186233409571 ColumnShards: 72075186233409572 ColumnShards: 72075186233409573 ColumnShards: 72075186233409574 ColumnShards: 72075186233409575 ColumnShards: 72075186233409576 ColumnShards: 72075186233409577 ColumnShards: 72075186233409578 ColumnShards: 72075186233409579 ColumnShards: 72075186233409580 ColumnShards: 72075186233409581 ColumnShards: 72075186233409582 ColumnShards: 72075186233409583 ColumnShards: 72075186233409584 ColumnShards: 72075186233409585 ColumnShards: 72075186233409586 ColumnShards: 72075186233409587 ColumnShards: 72075186233409588 ColumnShards: 72075186233409589 ColumnShards: 72075186233409590 ColumnShards: 72075186233409591 ColumnShards: 72075186233409592 ColumnShards: 72075186233409593 ColumnShards: 72075186233409594 ColumnShards: 72075186233409595 ColumnShards: 72075186233409596 ColumnShards: 72075186233409597 ColumnShards: 72075186233409598 ColumnShards: 72075186233409599 ColumnShards: 72075186233409600 ColumnShards: 72075186233409601 ColumnShards: 72075186233409602 ColumnShards: 72075186233409603 ColumnShards: 72075186233409604 ColumnShards: 72075186233409605 ColumnShards: 72075186233409606 ColumnShards: 72075186233409607 ColumnShards: 72075186233409608 ColumnShards: 72075186233409609 HashSharding { Function: HASH_FUNCTION_CONSISTENCY_64 Columns: "modified_at" } } StorageConfig { DataChannelCount: 64 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 104 2025-05-07T08:58:28.674755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterColumnTable AlterColumnTable { Name: "TTLEnabledTable" AlterSchema { AlterColumns { Name: "data" DefaultValue: "10" } } } } TxId: 104 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:28.674992Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: alter_table.cpp:282: TAlterColumnTable Propose, path: /MyRoot/TTLEnabledTable, opId: 104:0, at schemeshard: 72057594046678944 2025-05-07T08:58:28.675467Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 104:1, propose status:StatusSchemeError, reason: schema update error: sparsed columns are disabled, at schemeshard: 72057594046678944 2025-05-07T08:58:28.678338Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 104, response: Status: StatusSchemeError Reason: "schema update error: sparsed columns are disabled" TxId: 104 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:28.678525Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 104, database: /MyRoot, subject: , status: StatusSchemeError, reason: schema update error: sparsed columns are disabled, operation: ALTER COLUMN TABLE, path: /MyRoot/TTLEnabledTable TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 104 2025-05-07T08:58:28.678895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2025-05-07T08:58:28.678941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2025-05-07T08:58:28.679517Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-05-07T08:58:28.679642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-05-07T08:58:28.679686Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:4363:5559] TestWaitNotification: OK eventTxId 104 >> TSchemeShardColumnTableTTL::CreateColumnTableNegative_UnknownColumn [GOOD] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_4__SYNC-pk_types0-all_types0-index0---SYNC] [GOOD] >> TSchemeShardTTLTests::CheckCounters >> TExternalDataSourceTest::SchemeErrors >> TestYmqHttpProxy::BillingRecordsForJsonApi [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_serverless/unittest >> TSchemeShardServerLess::StorageBilling [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:57:41.354415Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:57:41.354514Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:57:41.354550Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:57:41.354585Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:57:41.354627Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:57:41.354672Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:57:41.354722Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:57:41.354786Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:57:41.355489Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:57:41.355813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:57:41.475759Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:57:41.475822Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:57:41.496561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:57:41.496688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:57:41.496845Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:57:41.506654Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:57:41.507211Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:57:41.507843Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:57:41.508162Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:57:41.510512Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:57:41.512138Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:57:41.512202Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:57:41.512254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:57:41.512317Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:57:41.512361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:57:41.512529Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:57:41.520387Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:57:41.709284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:57:41.709499Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:57:41.709737Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:57:41.709952Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:57:41.714150Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:57:41.719803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:57:41.719963Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:57:41.720168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:57:41.720240Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:57:41.720297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:57:41.720335Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:57:41.726924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:57:41.727002Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:57:41.727047Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:57:41.734950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:57:41.735019Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:57:41.735086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:57:41.735135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:57:41.739115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:57:41.746747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:57:41.746937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:57:41.747892Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:57:41.748064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:57:41.748146Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:57:41.748427Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:57:41.748487Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:57:41.748654Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:57:41.748743Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:57:41.752485Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:57:41.752543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:57:41.752725Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:57:41.752795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... d: 72075186233409549, txId: 107, path id: 1 2025-05-07T08:58:29.929183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:666:2578], at schemeshard: 72075186233409549, txId: 107, path id: 2 2025-05-07T08:58:29.929752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 107:0, at schemeshard: 72075186233409549 2025-05-07T08:58:29.929809Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1036: NTableState::TProposedWaitParts operationId# 107:0 ProgressState at tablet: 72075186233409549 2025-05-07T08:58:29.929901Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 107:0, at schemeshard: 72075186233409549 2025-05-07T08:58:29.929939Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 107:0, datashard: 72075186233409552, at schemeshard: 72075186233409549 2025-05-07T08:58:29.930008Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 107:0 129 -> 240 2025-05-07T08:58:29.930797Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72075186233409549, msg: Owner: 72075186233409549 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72075186233409549, cookie: 107 2025-05-07T08:58:29.930909Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409549, msg: Owner: 72075186233409549 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72075186233409549, cookie: 107 2025-05-07T08:58:29.930957Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409549, txId: 107 2025-05-07T08:58:29.930997Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409549, txId: 107, pathId: [OwnerId: 72075186233409549, LocalPathId: 1], version: 9 2025-05-07T08:58:29.931036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409549, LocalPathId: 1] was 5 2025-05-07T08:58:29.932217Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72075186233409549, msg: Owner: 72075186233409549 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72075186233409549, cookie: 107 2025-05-07T08:58:29.932310Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409549, msg: Owner: 72075186233409549 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72075186233409549, cookie: 107 2025-05-07T08:58:29.932337Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409549, txId: 107 2025-05-07T08:58:29.932383Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409549, txId: 107, pathId: [OwnerId: 72075186233409549, LocalPathId: 2], version: 18446744073709551615 2025-05-07T08:58:29.932414Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409549, LocalPathId: 2] was 4 2025-05-07T08:58:29.932474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 107, ready parts: 0/1, is published: true 2025-05-07T08:58:29.935952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 107:0, at schemeshard: 72075186233409549 2025-05-07T08:58:29.936010Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 107:0 ProgressState, at schemeshard: 72075186233409549 2025-05-07T08:58:29.936352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72075186233409549, LocalPathId: 2] was 3 2025-05-07T08:58:29.936478Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#107:0 progress is 1/1 2025-05-07T08:58:29.936514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 107 ready parts: 1/1 2025-05-07T08:58:29.936551Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#107:0 progress is 1/1 2025-05-07T08:58:29.936584Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 107 ready parts: 1/1 2025-05-07T08:58:29.936682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 107, ready parts: 1/1, is published: true 2025-05-07T08:58:29.936756Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:806:2687] message: TxId: 107 2025-05-07T08:58:29.936797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 107 ready parts: 1/1 2025-05-07T08:58:29.936834Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 107:0 2025-05-07T08:58:29.936869Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 107:0 2025-05-07T08:58:29.936980Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72075186233409549, LocalPathId: 2] was 2 2025-05-07T08:58:29.937635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409549, cookie: 107 2025-05-07T08:58:29.939593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409549, cookie: 107 2025-05-07T08:58:29.941072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 107: got EvNotifyTxCompletionResult 2025-05-07T08:58:29.941123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 107: satisfy waiter [1:2182:4027] TestWaitNotification: OK eventTxId 107 2025-05-07T08:58:29.962401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5509: Handle TEvStateChanged, at schemeshard: 72075186233409549, message: Source { RawX1: 779 RawX2: 4294969964 } TabletId: 72075186233409552 State: 4 2025-05-07T08:58:29.962530Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186233409552, state: Offline, at schemeshard: 72075186233409549 2025-05-07T08:58:29.964684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72075186233409549:4 hive 72057594037968897 at ss 72075186233409549 2025-05-07T08:58:29.965307Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72075186233409549 ShardLocalIdx: 4 TxId_Deprecated: 4 TabletID: 72075186233409552 2025-05-07T08:58:29.967564Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72075186233409549 ShardLocalIdx: 4, at schemeshard: 72075186233409549 2025-05-07T08:58:29.967935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72075186233409549, LocalPathId: 2] was 1 2025-05-07T08:58:29.968809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72075186233409549 2025-05-07T08:58:29.968855Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72075186233409549, LocalPathId: 2], at schemeshard: 72075186233409549 2025-05-07T08:58:29.968919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72075186233409549, LocalPathId: 1] was 4 2025-05-07T08:58:29.977657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72075186233409549:4 2025-05-07T08:58:29.977762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72075186233409549:4 tabletId 72075186233409552 2025-05-07T08:58:29.978237Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72075186233409549 2025-05-07T08:58:30.121939Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6592: Handle: TEvRunConditionalErase, at schemeshard: 72075186233409549 2025-05-07T08:58:30.122073Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72075186233409549 2025-05-07T08:58:30.122147Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72075186233409549 2025-05-07T08:58:30.122228Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6592: Handle: TEvRunConditionalErase, at schemeshard: 72075186233409546 2025-05-07T08:58:30.122284Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72075186233409546 2025-05-07T08:58:30.122326Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72075186233409546 2025-05-07T08:58:30.122376Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6592: Handle: TEvRunConditionalErase, at schemeshard: 72057594046678944 2025-05-07T08:58:30.122411Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-05-07T08:58:30.122459Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 2025-05-07T08:58:30.206443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:58:30.206817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:191: TTxServerlessStorageBilling: make a bill, record: '{"usage":{"start":1600452180,"quantity":59,"finish":1600452239,"type":"delta","unit":"byte*second"},"tags":{"ydb_size":0},"id":"72057594046678944-3-1600452180-1600452239-0","cloud_id":"CLOUD_ID_VAL","source_wt":1600452240,"source_id":"sless-docapi-ydb-storage","resource_id":"DATABASE_ID_VAL","schema":"ydb.serverless.v1","folder_id":"FOLDER_ID_VAL","version":"1.0.0"} ', schemeshardId: 72075186233409549, domainId: [OwnerId: 72057594046678944, LocalPathId: 3], now: 2020-09-18T18:04:00.028000Z, LastBillTime: 2020-09-18T18:02:00.000000Z, lastBilled: 2020-09-18T18:02:00.000000Z--2020-09-18T18:02:59.000000Z, toBill: 2020-09-18T18:03:00.000000Z--2020-09-18T18:03:59.000000Z, next retry at: 2020-09-18T18:05:00.000000Z 2025-05-07T08:58:30.214965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete grabMeteringMessage has happened 2025-05-07T08:58:30.215147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:335: tests -- TFakeMetering got TEvMetering::TEvWriteMeteringJson ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardColumnTableTTL::CreateColumnTableNegative_UnknownColumn [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:58:30.148880Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:58:30.149018Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:30.149066Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:58:30.149106Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:58:30.149152Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:58:30.149182Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:58:30.149236Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:30.149323Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:58:30.153583Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:58:30.153995Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:58:30.258243Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:58:30.258307Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:30.282324Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:58:30.282491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:58:30.282661Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:58:30.305277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:58:30.306067Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:58:30.306779Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:30.307103Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:58:30.309714Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:30.311439Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:30.311574Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:30.311641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:58:30.311708Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:30.311766Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:58:30.312003Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:58:30.333321Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:58:30.579732Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:30.580005Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:30.580296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:58:30.580609Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:58:30.580700Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:30.591750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:30.591955Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:58:30.592241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:30.592325Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:58:30.592375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:58:30.592421Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:58:30.607295Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:30.607386Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:58:30.607447Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:58:30.623313Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:30.623413Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:30.623466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:30.623563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:58:30.628439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:58:30.639262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:58:30.639526Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:58:30.640642Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:30.640806Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:30.640873Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:30.641227Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:58:30.641306Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:30.641566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:30.641659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:58:30.644357Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:30.644420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:30.644635Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:30.644677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2208], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-05-07T08:58:30.644761Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:30.644805Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 1:0 ProgressState 2025-05-07T08:58:30.644952Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-05-07T08:58:30.644992Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:58:30.645033Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-05-07T08:58:30.645064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:58:30.645102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-05-07T08:58:30.645162Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:58:30.645199Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1:0 2025-05-07T08:58:30.645228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 1:0 2025-05-07T08:58:30.645304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:58:30.645340Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-05-07T08:58:30.645373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-05-07T08:58:30.666419Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-05-07T08:58:30.666609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-05-07T08:58:30.666654Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-05-07T08:58:30.666699Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-05-07T08:58:30.666747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:30.666855Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-05-07T08:58:30.669985Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-05-07T08:58:30.670525Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-05-07T08:58:30.671812Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:269:2260] Bootstrap 2025-05-07T08:58:30.697234Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:269:2260] Become StateWork (SchemeCache [1:274:2265]) 2025-05-07T08:58:30.700205Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateColumnTable CreateColumnTable { Name: "TTLEnabledTable" Schema { Columns { Name: "key" Type: "Uint64" NotNull: true } Columns { Name: "modified_at" Type: "Timestamp" } KeyColumnNames: "key" } TtlSettings { Enabled { ColumnName: "created_at" } } } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:30.700594Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: create_table.cpp:593: TCreateColumnTable Propose, path: /MyRoot/TTLEnabledTable, opId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:58:30.701020Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 101:1, propose status:StatusSchemeError, reason: Incorrect ttl column - not found in scheme, at schemeshard: 72057594046678944 2025-05-07T08:58:30.706715Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:269:2260] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-05-07T08:58:30.711261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 101, response: Status: StatusSchemeError Reason: "Incorrect ttl column - not found in scheme" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:30.711432Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusSchemeError, reason: Incorrect ttl column - not found in scheme, operation: CREATE COLUMN TABLE, path: /MyRoot/ 2025-05-07T08:58:30.712013Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 |91.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_replication/ydb-core-tx-datashard-ut_replication |91.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_replication/ydb-core-tx-datashard-ut_replication >> TestYmqHttpProxy::TestChangeMessageVisibility >> Cdc::Write[TopicRunner] [GOOD] >> Cdc::UpdateStream >> TSchemeShardTTLTestsWithReboots::AlterTable >> TExternalDataSourceTest::SchemeErrors [GOOD] >> TSchemeShardTTLTestsWithReboots::MoveTable >> TCmsTest::ManageRequestsWrong >> TCmsTenatsTest::TestNoneTenantPolicy >> TDowntimeTest::AddDowntime [GOOD] >> Cdc::NaN[PqRunner] [GOOD] >> TSchemeShardTTLTests::CreateTableShouldFailOnWrongUnit-EnableTablePgTypes-false [GOOD] >> TSchemeShardTTLTests::LegacyTtlSettingsNoTiersAlterTable >> AsyncIndexChangeExchange::ShouldDeliverChangesOnSplitMerge [GOOD] >> Cdc::NaN[YdsRunner] >> AsyncIndexChangeExchange::ShouldRejectChangesOnQueueOverflowByCount >> TDowntimeTest::HasUpcomingDowntime [GOOD] >> TDowntimeTest::CleanupOldSegments [GOOD] |91.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TDowntimeTest::CleanupOldSegments [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_data_source/unittest >> TExternalDataSourceTest::SchemeErrors [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] Leader for TabletID 72057594046678944 is [1:126:2152] sender: [1:129:2058] recipient: [1:109:2141] 2025-05-07T08:58:31.975091Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:58:31.975201Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:31.975242Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:58:31.975279Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:58:31.975330Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:58:31.975364Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:58:31.975450Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:31.975538Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:58:31.976303Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:58:31.976699Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:58:32.065259Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7610: Got new config: QueryServiceConfig { AvailableExternalDataSources: "ObjectStorage" AvailableExternalDataSources: "ClickHouse" AvailableExternalDataSources: "PostgreSQL" AvailableExternalDataSources: "MySQL" AvailableExternalDataSources: "Ydb" AvailableExternalDataSources: "YT" AvailableExternalDataSources: "Greenplum" AvailableExternalDataSources: "MsSQLServer" AvailableExternalDataSources: "Oracle" AvailableExternalDataSources: "Logging" AvailableExternalDataSources: "Solomon" } 2025-05-07T08:58:32.065344Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:32.066184Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# ObjectStorage, ClickHouse, PostgreSQL, MySQL, Ydb, YT, Greenplum, MsSQLServer, Oracle, Logging, Solomon 2025-05-07T08:58:32.086728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:58:32.086876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:58:32.087075Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:58:32.094578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:58:32.094810Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:58:32.095542Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:32.095780Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:58:32.097866Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:32.099338Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:32.099404Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:32.099590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:58:32.099644Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:32.099694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:58:32.099830Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:58:32.107561Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2152] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:58:32.285505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:32.286118Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:32.286368Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:58:32.286598Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:58:32.286696Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:32.295089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:32.295259Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:58:32.295531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:32.295596Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:58:32.295651Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:58:32.295688Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:58:32.302939Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:32.303030Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:58:32.303079Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:58:32.315095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:32.315174Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:32.315253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:32.315315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:58:32.319582Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:58:32.326514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:58:32.326824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:58:32.328064Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:32.328235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 131 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:32.328314Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:32.328650Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:58:32.328720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:32.328910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:32.329040Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:58:32.336039 ... 03Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_data_source.cpp:336: [72057594046678944] CreateNewExternalDataSource, opId 126:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalDataSource FailOnExist: false CreateExternalDataSource { Name: "MyExternalDataSource" SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" } 2025-05-07T08:58:32.607649Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_data_source.cpp:232: [72057594046678944] TCreateExternalDataSource Propose: opId# 126:0, path# /MyRoot/DirA/MyExternalDataSource 2025-05-07T08:58:32.607873Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 126:1, propose status:StatusSchemeError, reason: Authorization method isn't specified, at schemeshard: 72057594046678944 2025-05-07T08:58:32.615018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 126, response: Status: StatusSchemeError Reason: "Authorization method isn\'t specified" TxId: 126 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:32.615288Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 126, database: /MyRoot, subject: , status: StatusSchemeError, reason: Authorization method isn't specified, operation: CREATE EXTERNAL DATA SOURCE, path: /MyRoot/DirA/MyExternalDataSource TestModificationResult got TxId: 126, wait until txId: 126 TestModificationResults wait txId: 127 2025-05-07T08:58:32.622988Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalDataSource CreateExternalDataSource { Name: "MyExternalDataSource" SourceType: "ObjectStorage" Location: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" Auth { None { } } } } TxId: 127 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:32.623445Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_data_source.cpp:336: [72057594046678944] CreateNewExternalDataSource, opId 127:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalDataSource FailOnExist: false CreateExternalDataSource { Name: "MyExternalDataSource" SourceType: "ObjectStorage" Location: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" Auth { None { } } } 2025-05-07T08:58:32.623557Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_data_source.cpp:232: [72057594046678944] TCreateExternalDataSource Propose: opId# 127:0, path# /MyRoot/DirA/MyExternalDataSource 2025-05-07T08:58:32.623765Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 127:1, propose status:StatusSchemeError, reason: Maximum length of location must be less or equal equal to 1000 but got 1001, at schemeshard: 72057594046678944 2025-05-07T08:58:32.626511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 127, response: Status: StatusSchemeError Reason: "Maximum length of location must be less or equal equal to 1000 but got 1001" TxId: 127 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:32.626724Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 127, database: /MyRoot, subject: , status: StatusSchemeError, reason: Maximum length of location must be less or equal equal to 1000 but got 1001, operation: CREATE EXTERNAL DATA SOURCE, path: /MyRoot/DirA/MyExternalDataSource TestModificationResult got TxId: 127, wait until txId: 127 TestModificationResults wait txId: 128 2025-05-07T08:58:32.634141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalDataSource CreateExternalDataSource { Name: "MyExternalDataSource" SourceType: "ObjectStorage" Installation: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" Auth { None { } } } } TxId: 128 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:32.634585Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_data_source.cpp:336: [72057594046678944] CreateNewExternalDataSource, opId 128:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalDataSource FailOnExist: false CreateExternalDataSource { Name: "MyExternalDataSource" SourceType: "ObjectStorage" Installation: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" Auth { None { } } } 2025-05-07T08:58:32.634719Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_data_source.cpp:232: [72057594046678944] TCreateExternalDataSource Propose: opId# 128:0, path# /MyRoot/DirA/MyExternalDataSource 2025-05-07T08:58:32.634937Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 128:1, propose status:StatusSchemeError, reason: Maximum length of installation must be less or equal equal to 1000 but got 1001, at schemeshard: 72057594046678944 2025-05-07T08:58:32.643055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 128, response: Status: StatusSchemeError Reason: "Maximum length of installation must be less or equal equal to 1000 but got 1001" TxId: 128 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:32.643377Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 128, database: /MyRoot, subject: , status: StatusSchemeError, reason: Maximum length of installation must be less or equal equal to 1000 but got 1001, operation: CREATE EXTERNAL DATA SOURCE, path: /MyRoot/DirA/MyExternalDataSource TestModificationResult got TxId: 128, wait until txId: 128 TestModificationResults wait txId: 129 2025-05-07T08:58:32.650972Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalDataSource CreateExternalDataSource { Name: "" SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Auth { None { } } } } TxId: 129 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:32.651367Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_data_source.cpp:336: [72057594046678944] CreateNewExternalDataSource, opId 129:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalDataSource CreateExternalDataSource { Name: "" SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Auth { None { } } } 2025-05-07T08:58:32.651506Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_data_source.cpp:232: [72057594046678944] TCreateExternalDataSource Propose: opId# 129:0, path# /MyRoot/DirA/ 2025-05-07T08:58:32.651643Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 129:1, propose status:StatusSchemeError, reason: Check failed: path: '/MyRoot/DirA/', error: path part shouldn't be empty, at schemeshard: 72057594046678944 2025-05-07T08:58:32.659550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 129, response: Status: StatusSchemeError Reason: "Check failed: path: \'/MyRoot/DirA/\', error: path part shouldn\'t be empty" TxId: 129 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:32.659848Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 129, database: /MyRoot, subject: , status: StatusSchemeError, reason: Check failed: path: '/MyRoot/DirA/', error: path part shouldn't be empty, operation: CREATE EXTERNAL DATA SOURCE, path: /MyRoot/DirA/ TestModificationResult got TxId: 129, wait until txId: 129 >> Balancing::Balancing_OneTopic_PQv1 [GOOD] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_3__SYNC-pk_types1-all_types1-index1---SYNC] [GOOD] >> Balancing::Balancing_ManyTopics_TopicApi ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::CreateTableShouldFailOnWrongUnit-EnableTablePgTypes-false [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:58:05.946381Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:58:05.946509Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:05.946555Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:58:05.946593Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:58:05.946667Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:58:05.946705Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:58:05.946758Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:05.946850Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:58:05.947564Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:58:05.947968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:58:06.051393Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:58:06.051467Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:06.078768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:58:06.079014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:58:06.079219Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:58:06.098018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:58:06.098487Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:58:06.099294Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:06.099525Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:58:06.103908Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:06.105586Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:06.105660Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:06.105747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:58:06.105802Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:06.105850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:58:06.106138Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:58:06.116398Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:58:06.302306Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:06.302551Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:06.302781Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:58:06.303064Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:58:06.303131Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:06.306398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:06.306576Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:58:06.306800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:06.306855Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:58:06.306915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:58:06.306950Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:58:06.309961Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:06.310050Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:58:06.310091Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:58:06.312929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:06.313030Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:06.313107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:06.313172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:58:06.317412Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:58:06.320246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:58:06.320474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:58:06.321555Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:06.321722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:06.321791Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:06.322152Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:58:06.322210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:06.322406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:06.322501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:58:06.325310Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:06.325365Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:06.325569Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:06.325610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... Root 2025-05-07T08:58:33.826032Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:33.826148Z node 27 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:58:33.826230Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:58:33.826302Z node 27 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:58:33.828860Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:33.828984Z node 27 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:58:33.829072Z node 27 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:58:33.834903Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:33.834987Z node 27 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:33.835106Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:33.835216Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:58:33.835468Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:58:33.841723Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:58:33.842106Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:58:33.843388Z node 27 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:33.843620Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 132 RawX2: 115964119147 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:33.843726Z node 27 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:33.844145Z node 27 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:58:33.844252Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:33.844646Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:33.844789Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:58:33.856311Z node 27 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:33.856425Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:33.856797Z node 27 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:33.856897Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [27:206:2208], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-05-07T08:58:33.857503Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:33.857606Z node 27 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 1:0 ProgressState 2025-05-07T08:58:33.857875Z node 27 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-05-07T08:58:33.857962Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:58:33.858098Z node 27 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-05-07T08:58:33.858177Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:58:33.858272Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-05-07T08:58:33.858370Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:58:33.858468Z node 27 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1:0 2025-05-07T08:58:33.858537Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 1:0 2025-05-07T08:58:33.858678Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:58:33.858762Z node 27 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-05-07T08:58:33.858837Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-05-07T08:58:33.860236Z node 27 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-05-07T08:58:33.860458Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-05-07T08:58:33.860561Z node 27 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-05-07T08:58:33.860639Z node 27 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-05-07T08:58:33.860732Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:33.860936Z node 27 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-05-07T08:58:33.865123Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-05-07T08:58:33.866022Z node 27 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-05-07T08:58:33.867575Z node 27 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [27:269:2260] Bootstrap 2025-05-07T08:58:33.899806Z node 27 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [27:269:2260] Become StateWork (SchemeCache [27:274:2265]) 2025-05-07T08:58:33.904032Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateTable CreateTable { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "DyNumber" } KeyColumnNames: "key" TTLSettings { Enabled { ColumnName: "modified_at" ColumnUnit: UNIT_AUTO } } } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:33.904735Z node 27 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /MyRoot/TTLEnabledTable, opId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:58:33.904977Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:433: TCreateTable Propose, path: /MyRoot/TTLEnabledTable, opId: 101:0, schema: Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "DyNumber" } KeyColumnNames: "key" TTLSettings { Enabled { ColumnName: "modified_at" ColumnUnit: UNIT_AUTO } }, at schemeshard: 72057594046678944 2025-05-07T08:58:33.905750Z node 27 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 101:1, propose status:StatusSchemeError, reason: To enable TTL on integral type column 'ValueSinceUnixEpochModeSettings' should be specified, at schemeshard: 72057594046678944 2025-05-07T08:58:33.908734Z node 27 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [27:269:2260] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-05-07T08:58:33.912182Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 101, response: Status: StatusSchemeError Reason: "To enable TTL on integral type column \'ValueSinceUnixEpochModeSettings\' should be specified" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:33.912497Z node 27 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusSchemeError, reason: To enable TTL on integral type column 'ValueSinceUnixEpochModeSettings' should be specified, operation: CREATE TABLE, path: /MyRoot/TTLEnabledTable 2025-05-07T08:58:33.913362Z node 27 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 >> TopicAutoscaling::Simple_AutoscaleAwareSDK [GOOD] >> BackupRestore::TestAllPrimitiveTypes-DYNUMBER [GOOD] >> TVersions::Wreck0 [GOOD] >> BackupRestoreS3::TestAllIndexTypes-EIndexTypeGlobalAsync [GOOD] >> Viewer::JsonStorageListingV2PDiskIdFilter [GOOD] >> TestKinesisHttpProxy::TestWrongStream [GOOD] >> TopicAutoscaling::Simple_PQv1 >> TSchemeShardTTLTests::LegacyTtlSettingsNoTiersAlterTable [GOOD] >> TVersions::Wreck0Reverse >> TestYmqHttpProxy::TestDeleteMessage [GOOD] >> TestKinesisHttpProxy::TestWrongStream2 >> TestKinesisHttpProxy::CreateDeleteStreamWithConsumerWithFlag [GOOD] >> BackupRestoreS3::TestAllIndexTypes-EIndexTypeGlobalUnique [GOOD] >> BackupRestoreS3::TestAllIndexTypes-EIndexTypeGlobalVectorKmeansTree |91.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test |91.2%| [TA] {RESULT} $(B)/ydb/core/kqp/workload_service/ut/test-results/unittest/{meta.json ... results_accumulator.log} |91.2%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_replication/ydb-core-tx-datashard-ut_replication ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::LegacyTtlSettingsNoTiersAlterTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:58:35.172902Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:58:35.173022Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:35.173063Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:58:35.173099Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:58:35.173143Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:58:35.173186Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:58:35.173234Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:35.173303Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:58:35.174471Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:58:35.174836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:58:35.264405Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:58:35.264478Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:35.287232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:58:35.287366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:58:35.287550Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:58:35.306946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:58:35.307842Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:58:35.308713Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:35.309022Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:58:35.314912Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:35.316866Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:35.316970Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:35.317036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:58:35.317093Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:35.317152Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:58:35.317407Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:58:35.352688Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:58:35.564573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:35.564825Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:35.565136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:58:35.565393Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:58:35.565454Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:35.579396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:35.579574Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:58:35.579798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:35.579879Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:58:35.579940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:58:35.579979Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:58:35.584122Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:35.584241Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:58:35.584288Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:58:35.590998Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:35.591088Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:35.591157Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:35.591244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:58:35.603562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:58:35.615864Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:58:35.616141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:58:35.617338Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:35.617518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:35.617595Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:35.617937Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:58:35.622200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:35.622484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:35.622631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:58:35.630229Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:35.630304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:35.630550Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:35.630597Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... oSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:36.081069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T08:58:36.081369Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:36.081417Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2208], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-05-07T08:58:36.081829Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:58:36.081881Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1036: NTableState::TProposedWaitParts operationId# 102:0 ProgressState at tablet: 72057594046678944 2025-05-07T08:58:36.082628Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:58:36.082761Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:58:36.082821Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-05-07T08:58:36.082871Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 4 2025-05-07T08:58:36.082926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T08:58:36.083053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true 2025-05-07T08:58:36.086890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-05-07T08:58:36.102479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6245: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1137 } } 2025-05-07T08:58:36.102553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-05-07T08:58:36.102732Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1137 } } 2025-05-07T08:58:36.102858Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1137 } } FAKE_COORDINATOR: Erasing txId 102 2025-05-07T08:58:36.103856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 309 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-05-07T08:58:36.103899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-05-07T08:58:36.104002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 309 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-05-07T08:58:36.104045Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1005: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 2025-05-07T08:58:36.104115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1009: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 309 RawX2: 4294969592 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-05-07T08:58:36.104165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, datashard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:36.104222Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:58:36.104268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-05-07T08:58:36.104303Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 102:0 129 -> 240 2025-05-07T08:58:36.106269Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:58:36.107344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:58:36.107547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:58:36.107605Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 102:0 ProgressState 2025-05-07T08:58:36.107712Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T08:58:36.107754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:58:36.107794Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T08:58:36.107847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:58:36.107895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-05-07T08:58:36.107991Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:337:2316] message: TxId: 102 2025-05-07T08:58:36.108047Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:58:36.108098Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-05-07T08:58:36.108140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 102:0 2025-05-07T08:58:36.108293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T08:58:36.113886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T08:58:36.113964Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:397:2369] TestWaitNotification: OK eventTxId 102 2025-05-07T08:58:36.114574Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:58:36.114859Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 294us result status StatusSuccess 2025-05-07T08:58:36.115423Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 2 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |91.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> TestYmqHttpProxy::TestDeleteMessageBatch >> Cdc::Drop[PqRunner] [GOOD] >> Cdc::Drop[YdsRunner] >> TestKinesisHttpProxy::ListShardsExclusiveStartShardId [GOOD] >> TestKinesisHttpProxy::BadRequestUnknownMethod >> TSchemeShardTTLTests::ShouldCheckQuotas >> TCdcStreamTests::MeteringDedicated [GOOD] >> TCdcStreamTests::ChangeOwner >> BackupRestoreS3::TestAllPrimitiveTypes-DYNUMBER [GOOD] >> TestKinesisHttpProxy::ListShardsTimestamp |91.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/quoter/ut/ydb-core-quoter-ut |91.2%| [LD] {RESULT} $(B)/ydb/core/quoter/ut/ydb-core-quoter-ut |91.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/quoter/ut/ydb-core-quoter-ut |91.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/time_cast/ut/ydb-core-tx-time_cast-ut >> TCmsTest::ManageRequestsWrong [GOOD] >> TCmsTest::ManageRequestsDry >> Cdc::UpdateStream [GOOD] >> Cdc::UpdateShardCount |91.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/time_cast/ut/ydb-core-tx-time_cast-ut |91.2%| [LD] {RESULT} $(B)/ydb/core/tx/time_cast/ut/ydb-core-tx-time_cast-ut |91.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::BuildIndexShouldSucceed >> TopicAutoscaling::PartitionSplit_PreferedPartition_BeforeAutoscaleAwareSDK [GOOD] >> TopicAutoscaling::PartitionSplit_PreferedPartition_AutoscaleAwareSDK >> TCmsTenatsTest::TestNoneTenantPolicy [GOOD] >> TCmsTenatsTest::TestDefaultTenantPolicyWithSingleTenantHost ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/backup_ut/unittest >> BackupRestore::TestAllPrimitiveTypes-DYNUMBER [GOOD] Test command err: 2025-05-07T08:56:41.449707Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625104647797842:2206];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:56:41.449885Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0020ff/r3tmp/tmpEriEqA/pdisk_1.dat 2025-05-07T08:56:42.311600Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:56:42.337715Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:56:42.337817Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:56:42.347963Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12458, node 1 2025-05-07T08:56:42.533611Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:56:42.533637Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:56:42.533647Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:56:42.533784Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16613 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:56:42.893793Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:56:45.887579Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625121827667935:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:45.887716Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:46.227663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T08:56:46.442812Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501625104647797842:2206];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:56:46.448356Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Backup "/Root" to "/home/runner/.ya/build/build_root/zvgn/0020ff/r3tmp/tmpdub5Xh/"Create temporary directory "/Root/~backup_20250507T085646" in databaseProcess "/home/runner/.ya/build/build_root/zvgn/0020ff/r3tmp/tmpdub5Xh/table"Copy tables: { src: "/Root/table", dst: "/Root/~backup_20250507T085646/table" }Backup table "/Root/~backup_20250507T085646/table" to "/home/runner/.ya/build/build_root/zvgn/0020ff/r3tmp/tmpdub5Xh/table"Describe table "/Root/~backup_20250507T085646/table"Write scheme into "/home/runner/.ya/build/build_root/zvgn/0020ff/r3tmp/tmpdub5Xh/table/scheme.pb"Describe table "/Root/table"Write ACL into "/home/runner/.ya/build/build_root/zvgn/0020ff/r3tmp/tmpdub5Xh/table/permissions.pb"Read table "/Root/~backup_20250507T085646/table"Write data into "/home/runner/.ya/build/build_root/zvgn/0020ff/r3tmp/tmpdub5Xh/table/data_00.csv"Drop table "/Root/~backup_20250507T085646/table"Remove temporary directory "/Root/~backup_20250507T085646" in database2025-05-07T08:56:47.255095Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037891 not found 2025-05-07T08:56:47.256080Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037890 not found 2025-05-07T08:56:47.263396Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 281474976710664:0, at schemeshard: 72057594046644480 Backup completed successfully2025-05-07T08:56:47.326366Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625130417603397:2398], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:47.326465Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:47.493447Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-05-07T08:56:47.493501Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037889 not found Restore "/home/runner/.ya/build/build_root/zvgn/0020ff/r3tmp/tmpdub5Xh/" to "/Root"Resolved db base path: "/Root"Restore folder "/home/runner/.ya/build/build_root/zvgn/0020ff/r3tmp/tmpdub5Xh/" to "/Root"Process "/home/runner/.ya/build/build_root/zvgn/0020ff/r3tmp/tmpdub5Xh/table"Read scheme from "/home/runner/.ya/build/build_root/zvgn/0020ff/r3tmp/tmpdub5Xh/table/scheme.pb"Restore table "/home/runner/.ya/build/build_root/zvgn/0020ff/r3tmp/tmpdub5Xh/table" to "/Root/table"2025-05-07T08:56:47.563042Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 Created "/Root/table"Read data from "/home/runner/.ya/build/build_root/zvgn/0020ff/r3tmp/tmpdub5Xh/table/data_00.csv"Restore index "byValue" on "/Root/table"2025-05-07T08:56:47.706891Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480 2025-05-07T08:56:47.805614Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480 Restore ACL "/home/runner/.ya/build/build_root/zvgn/0020ff/r3tmp/tmpdub5Xh/table" to "/Root/table"Read ACL from "/home/runner/.ya/build/build_root/zvgn/0020ff/r3tmp/tmpdub5Xh/table/permissions.pb"2025-05-07T08:56:48.098122Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710669:0, at schemeshard: 72057594046644480 Restore completed successfully 2025-05-07T08:56:50.365109Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501625143375253305:2077];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:56:50.382260Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0020ff/r3tmp/tmpHCKJMk/pdisk_1.dat 2025-05-07T08:56:50.790268Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:56:50.857687Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:56:50.857772Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:56:50.868017Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24800, node 4 2025-05-07T08:56:51.090575Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:56:51.090607Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:56:51.090616Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:56:51.090878Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14235 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:56:51.471310Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 7205759 ... emeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480 Created "/Root/Datetime64Table"Read data from "/home/runner/.ya/build/build_root/zvgn/0020ff/r3tmp/tmpmHTKYX/Datetime64Table/data_00.csv"2025-05-07T08:58:24.043593Z node 34 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715671. Ctx: { TraceId: 01jtmzfbt25j41n1fpsn8bvjna, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=34&id=ZTkyMzY1Y2ItZWZiYTg0NzctOGJkYzhkM2MtYmUxYzgwYmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Restore ACL "/home/runner/.ya/build/build_root/zvgn/0020ff/r3tmp/tmpmHTKYX/Datetime64Table" to "/Root/Datetime64Table"Read ACL from "/home/runner/.ya/build/build_root/zvgn/0020ff/r3tmp/tmpmHTKYX/Datetime64Table/permissions.pb"2025-05-07T08:58:24.077565Z node 34 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480 Restore completed successfully2025-05-07T08:58:24.419806Z node 34 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715673. Ctx: { TraceId: 01jtmzfbz0f89pt23xzt2vcevh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=34&id=MzY2MjAyMjItNTExODdlYzgtNGZmODlmOTctNjhmYWY5Zjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:58:26.347891Z node 37 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[37:7501625555465334773:2208];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:58:26.366170Z node 37 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0020ff/r3tmp/tmp1xIlXN/pdisk_1.dat 2025-05-07T08:58:26.548938Z node 37 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:26.595633Z node 37 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(37, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:58:26.595740Z node 37 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(37, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:58:26.611335Z node 37 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(37, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7419, node 37 2025-05-07T08:58:26.766763Z node 37 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:58:26.766793Z node 37 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:58:26.766803Z node 37 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:58:26.766975Z node 37 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26213 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:58:27.055668Z node 37 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:58:27.074505Z node 37 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:58:31.350116Z node 37 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[37:7501625555465334773:2208];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:58:31.350210Z node 37 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:58:31.662195Z node 37 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [37:7501625576940172183:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:58:31.662288Z node 37 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:58:31.662866Z node 37 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [37:7501625576940172195:2343], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:58:31.668207Z node 37 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480 2025-05-07T08:58:31.710185Z node 37 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [37:7501625576940172197:2344], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-05-07T08:58:31.786370Z node 37 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [37:7501625576940172281:2685] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:58:31.814098Z node 37 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 2025-05-07T08:58:32.086736Z node 37 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710661. Ctx: { TraceId: 01jtmzfkmhew2gvr8axgam2eee, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=37&id=NWVlNTI5YzQtNGIxZGViODgtOWQ0ZTBhY2MtOWY1ZjI1NzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:58:32.283187Z node 37 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710662. Ctx: { TraceId: 01jtmzfksgc11dcwntxgahw3cb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=37&id=NWVlNTI5YzQtNGIxZGViODgtOWQ0ZTBhY2MtOWY1ZjI1NzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Backup "/Root" to "/home/runner/.ya/build/build_root/zvgn/0020ff/r3tmp/tmpCox6NU/"Create temporary directory "/Root/~backup_20250507T085832" in databaseProcess "/home/runner/.ya/build/build_root/zvgn/0020ff/r3tmp/tmpCox6NU/DyNumberTable"Copy tables: { src: "/Root/DyNumberTable", dst: "/Root/~backup_20250507T085832/DyNumberTable" }Backup table "/Root/~backup_20250507T085832/DyNumberTable" to "/home/runner/.ya/build/build_root/zvgn/0020ff/r3tmp/tmpCox6NU/DyNumberTable"Describe table "/Root/~backup_20250507T085832/DyNumberTable"Write scheme into "/home/runner/.ya/build/build_root/zvgn/0020ff/r3tmp/tmpCox6NU/DyNumberTable/scheme.pb"Describe table "/Root/DyNumberTable"Write ACL into "/home/runner/.ya/build/build_root/zvgn/0020ff/r3tmp/tmpCox6NU/DyNumberTable/permissions.pb"Read table "/Root/~backup_20250507T085832/DyNumberTable"Write data into "/home/runner/.ya/build/build_root/zvgn/0020ff/r3tmp/tmpCox6NU/DyNumberTable/data_00.csv"Drop table "/Root/~backup_20250507T085832/DyNumberTable"2025-05-07T08:58:32.991000Z node 37 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 37, TabletId: 72075186224037889 not found Remove temporary directory "/Root/~backup_20250507T085832" in database2025-05-07T08:58:33.011666Z node 37 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 281474976710668:0, at schemeshard: 72057594046644480 Backup completed successfullyRestore "/home/runner/.ya/build/build_root/zvgn/0020ff/r3tmp/tmpCox6NU/" to "/Root"Resolved db base path: "/Root"Restore folder "/home/runner/.ya/build/build_root/zvgn/0020ff/r3tmp/tmpCox6NU/" to "/Root"Process "/home/runner/.ya/build/build_root/zvgn/0020ff/r3tmp/tmpCox6NU/DyNumberTable"Read scheme from "/home/runner/.ya/build/build_root/zvgn/0020ff/r3tmp/tmpCox6NU/DyNumberTable/scheme.pb"Restore table "/home/runner/.ya/build/build_root/zvgn/0020ff/r3tmp/tmpCox6NU/DyNumberTable" to "/Root/DyNumberTable"2025-05-07T08:58:33.181594Z node 37 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 37, TabletId: 72075186224037888 not found 2025-05-07T08:58:33.201437Z node 37 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480 Created "/Root/DyNumberTable"Read data from "/home/runner/.ya/build/build_root/zvgn/0020ff/r3tmp/tmpCox6NU/DyNumberTable/data_00.csv"2025-05-07T08:58:33.511457Z node 37 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710671. Ctx: { TraceId: 01jtmzfn2294vvpjqhd26wrvym, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=37&id=ODAzMGFmZWUtN2IxMDA1ODQtMWZhNDNmYTMtMmI4ODA4MWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Restore ACL "/home/runner/.ya/build/build_root/zvgn/0020ff/r3tmp/tmpCox6NU/DyNumberTable" to "/Root/DyNumberTable"Read ACL from "/home/runner/.ya/build/build_root/zvgn/0020ff/r3tmp/tmpCox6NU/DyNumberTable/permissions.pb"2025-05-07T08:58:33.548742Z node 37 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710672:0, at schemeshard: 72057594046644480 Restore completed successfully2025-05-07T08:58:33.806803Z node 37 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710673. Ctx: { TraceId: 01jtmzfn6zax5wv7x9s5cdsctq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=37&id=NWVlNTI5YzQtNGIxZGViODgtOWQ0ZTBhY2MtOWY1ZjI1NzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root |91.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/long_tx_service/ut/ydb-core-tx-long_tx_service-ut |91.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/long_tx_service/ut/ydb-core-tx-long_tx_service-ut |91.2%| [LD] {RESULT} $(B)/ydb/core/tx/long_tx_service/ut/ydb-core-tx-long_tx_service-ut >> TestKinesisHttpProxy::GoodRequestGetRecordsLongStreamName [GOOD] >> TSchemeShardTTLTests::ShouldCheckQuotas [GOOD] >> TCdcStreamTests::ChangeOwner [GOOD] >> TCdcStreamTests::DropIndexWithStream ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::ShouldCheckQuotas [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:58:38.883773Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:58:38.883908Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:38.884142Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:58:38.884199Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:58:38.884254Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:58:38.884290Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:58:38.884361Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:38.884463Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:58:38.885312Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:58:38.885715Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:58:38.976477Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:58:38.976531Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:38.994787Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:58:38.994911Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:58:38.995082Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:58:39.014389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:58:39.015267Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:58:39.016117Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:39.016438Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:58:39.023233Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:39.024682Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:39.024743Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:39.024788Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:58:39.024832Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:39.024874Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:58:39.025082Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:58:39.039334Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:58:39.228254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:39.228508Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:39.228805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:58:39.229106Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:58:39.229194Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:39.232238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:39.232465Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:58:39.232711Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:39.232793Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:58:39.232836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:58:39.232870Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:58:39.235041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:39.235098Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:58:39.235145Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:58:39.237473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:39.237546Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:39.237611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:39.237751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:58:39.242462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:58:39.245222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:58:39.245468Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:58:39.246778Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:39.246926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:39.246984Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:39.247329Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:58:39.247423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:39.247632Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:39.247746Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:58:39.250195Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:39.250284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:39.250489Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:39.250549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... ESHARD INFO: schemeshard__operation_common.cpp:1036: NTableState::TProposedWaitParts operationId# 103:0 ProgressState at tablet: 72057594046678944 2025-05-07T08:58:39.895513Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 7 PathOwnerId: 72057594046678944, cookie: 103 2025-05-07T08:58:39.895622Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 7 PathOwnerId: 72057594046678944, cookie: 103 2025-05-07T08:58:39.895663Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-05-07T08:58:39.895703Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 7 2025-05-07T08:58:39.895748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-05-07T08:58:39.896726Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 3 PathOwnerId: 72057594046678944, cookie: 103 2025-05-07T08:58:39.896834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 3 PathOwnerId: 72057594046678944, cookie: 103 2025-05-07T08:58:39.896866Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-05-07T08:58:39.896913Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 3 2025-05-07T08:58:39.896949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-05-07T08:58:39.897023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 103, ready parts: 0/1, is published: true 2025-05-07T08:58:39.897842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6245: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409549 Status: COMPLETE TxId: 103 Step: 200 OrderId: 103 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72075186233409546 TxStats { PerShardStats { ShardId: 72075186233409549 CpuTimeUsec: 1372 } } 2025-05-07T08:58:39.897890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 103, tablet: 72075186233409549, partId: 0 2025-05-07T08:58:39.898063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 103:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409549 Status: COMPLETE TxId: 103 Step: 200 OrderId: 103 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72075186233409546 TxStats { PerShardStats { ShardId: 72075186233409549 CpuTimeUsec: 1372 } } 2025-05-07T08:58:39.898199Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409549 Status: COMPLETE TxId: 103 Step: 200 OrderId: 103 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72075186233409546 TxStats { PerShardStats { ShardId: 72075186233409549 CpuTimeUsec: 1372 } } 2025-05-07T08:58:39.898936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 549 RawX2: 4294969790 } Origin: 72075186233409549 State: 2 TxId: 103 Step: 0 Generation: 2 2025-05-07T08:58:39.898982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 103, tablet: 72075186233409549, partId: 0 2025-05-07T08:58:39.899131Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 103:0, at schemeshard: 72057594046678944, message: Source { RawX1: 549 RawX2: 4294969790 } Origin: 72075186233409549 State: 2 TxId: 103 Step: 0 Generation: 2 2025-05-07T08:58:39.899198Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1005: NTableState::TProposedWaitParts operationId# 103:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 2025-05-07T08:58:39.899352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1009: NTableState::TProposedWaitParts operationId# 103:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 549 RawX2: 4294969790 } Origin: 72075186233409549 State: 2 TxId: 103 Step: 0 Generation: 2 2025-05-07T08:58:39.899423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 103:0, shardIdx: 72057594046678944:4, datashard: 72075186233409549, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:39.899461Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 103:0, at schemeshard: 72057594046678944 2025-05-07T08:58:39.899497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 103:0, datashard: 72075186233409549, at schemeshard: 72057594046678944 2025-05-07T08:58:39.899539Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 103:0 129 -> 240 2025-05-07T08:58:39.902442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-05-07T08:58:39.904522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-05-07T08:58:39.904656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-05-07T08:58:39.904782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-05-07T08:58:39.905167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-05-07T08:58:39.905220Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 103:0 ProgressState 2025-05-07T08:58:39.905331Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-05-07T08:58:39.905377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-05-07T08:58:39.905433Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-05-07T08:58:39.905482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-05-07T08:58:39.905527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: true 2025-05-07T08:58:39.905616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:413:2380] message: TxId: 103 2025-05-07T08:58:39.905672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-05-07T08:58:39.905709Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 103:0 2025-05-07T08:58:39.905747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 103:0 2025-05-07T08:58:39.905873Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-05-07T08:58:39.908082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-05-07T08:58:39.908137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:580:2517] TestWaitNotification: OK eventTxId 103 W0000 00:00:1746608319.908741 266199 text_format.cc:398] Warning parsing text-format NKikimrSchemeOp.TTableDescription: 9:35: text format contains deprecated field "ExpireAfterSeconds" TestModificationResults wait txId: 104 2025-05-07T08:58:39.911644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/SubDomain" OperationType: ESchemeOpCreateTable CreateTable { Name: "Table4" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "Timestamp" } KeyColumnNames: "key" TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 SysSettings { RunInterval: 1799999999 } Tiers { ApplyAfterSeconds: 3600 Delete { } } } } } } TxId: 104 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:39.912135Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /MyRoot/SubDomain/Table4, opId: 104:0, at schemeshard: 72057594046678944 2025-05-07T08:58:39.912265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:433: TCreateTable Propose, path: /MyRoot/SubDomain/Table4, opId: 104:0, schema: Name: "Table4" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "Timestamp" } KeyColumnNames: "key" TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 SysSettings { RunInterval: 1799999999 } Tiers { ApplyAfterSeconds: 3600 Delete { } } } }, at schemeshard: 72057594046678944 2025-05-07T08:58:39.912697Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 104:1, propose status:StatusSchemeError, reason: TTL run interval cannot be less than limit: 1800, at schemeshard: 72057594046678944 2025-05-07T08:58:39.915633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 104, response: Status: StatusSchemeError Reason: "TTL run interval cannot be less than limit: 1800" TxId: 104 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:39.915828Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 104, database: /MyRoot/SubDomain, subject: , status: StatusSchemeError, reason: TTL run interval cannot be less than limit: 1800, operation: CREATE TABLE, path: /MyRoot/SubDomain/Table4 TestModificationResult got TxId: 104, wait until txId: 104 >> TSchemeShardTTLTests::BuildIndexShouldSucceed [GOOD] >> TestKinesisHttpProxy::ErroneousRequestGetRecords ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/backup_ut/unittest >> BackupRestoreS3::TestAllPrimitiveTypes-DYNUMBER [GOOD] Test command err: 2025-05-07T08:56:34.214556Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625074983684356:2076];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:56:34.214650Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002204/r3tmp/tmpyEyPNw/pdisk_1.dat 2025-05-07T08:56:35.228899Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:56:35.242234Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:56:35.257839Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:56:35.258481Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:56:35.268749Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16257, node 1 2025-05-07T08:56:35.571731Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:56:35.571757Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:56:35.571764Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:56:35.571908Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31849 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:56:36.301272Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:56:36.334112Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T08:56:39.216844Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501625074983684356:2076];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:56:39.216936Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:56:40.350785Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625100753489246:2344], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:40.350918Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:40.351642Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625100753489254:2347], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:40.352030Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7501625074983684608:2135] Handle TEvProposeTransaction 2025-05-07T08:56:40.352051Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7501625074983684608:2135] TxId# 281474976715658 ProcessProposeTransaction 2025-05-07T08:56:40.352101Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7501625074983684608:2135] Cookie# 0 userReqId# "" txid# 281474976715658 SEND to# [1:7501625100753489261:2641] 2025-05-07T08:56:40.454064Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1520: Actor# [1:7501625100753489261:2641] txid# 281474976715658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root/.metadata/workload_manager/pools" OperationType: ESchemeOpCreateResourcePool ModifyACL { Name: "default" DiffACL: "\n!\010\000\022\035\010\001\020\201\004\032\024all-users@well-known \003\n\031\010\000\022\025\010\001\020\201\004\032\014root@builtin \003" NewOwner: "metadata@system" } Internal: true CreateResourcePool { Name: "default" Properties { Properties { key: "concurrent_query_limit" value: "-1" } Properties { key: "database_load_cpu_threshold" value: "-1" } Properties { key: "query_cancel_after_seconds" value: "0" } Properties { key: "query_cpu_limit_percent_per_node" value: "-1" } Properties { key: "query_memory_limit_percent_per_node" value: "-1" } Properties { key: "queue_size" value: "-1" } Properties { key: "resource_weight" value: "-1" } Properties { key: "total_cpu_limit_percent_per_node" value: "-1" } } } } } UserToken: "\n\017metadata@system\022\000" DatabaseName: "/Root" 2025-05-07T08:56:40.454187Z node 1 :TX_PROXY DEBUG: schemereq.cpp:563: Actor# [1:7501625100753489261:2641] txid# 281474976715658 Bootstrap, UserSID: metadata@system CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-05-07T08:56:40.454214Z node 1 :TX_PROXY DEBUG: schemereq.cpp:572: Actor# [1:7501625100753489261:2641] txid# 281474976715658 Bootstrap, UserSID: metadata@system IsClusterAdministrator: 1 2025-05-07T08:56:40.456519Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1585: Actor# [1:7501625100753489261:2641] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-05-07T08:56:40.456613Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1575: Actor# [1:7501625100753489261:2641] txid# 281474976715658 TEvNavigateKeySet requested from SchemeCache 2025-05-07T08:56:40.456830Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1408: Actor# [1:7501625100753489261:2641] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-05-07T08:56:40.456993Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1455: Actor# [1:7501625100753489261:2641] HANDLE EvNavigateKeySetResult, txid# 281474976715658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-05-07T08:56:40.457046Z node 1 :TX_PROXY DEBUG: schemereq.cpp:102: Actor# [1:7501625100753489261:2641] txid# 281474976715658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715658 TabletId# 72057594046644480} 2025-05-07T08:56:40.457163Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1310: Actor# [1:7501625100753489261:2641] txid# 281474976715658 HANDLE EvClientConnected 2025-05-07T08:56:40.462979Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T08:56:40.472580Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1332: Actor# [1:7501625100753489261:2641] txid# 281474976715658 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715658} 2025-05-07T08:56:40.472655Z node 1 :TX_PROXY DEBUG: schemereq.cpp:543: Actor# [1:7501625100753489261:2641] txid# 281474976715658 SEND to# [1:7501625100753489260:2348] Source {TEvProposeTransactionStatus txid# 281474976715658 Status# 53} 2025-05-07T08:56:40.492909Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501625100753489260:2348], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T08:56:40.586421Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7501625074983684608:2135] Handle TEvProposeTransaction 2025-05-07T08:56:40.586452Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7501625074983684608:2135] TxId# 281474976715659 ProcessProposeTransaction 2025-05-07T08:56:40.586497Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7501625074983684608:2135] Cookie# 0 userReqId# "" txid# 281474976715659 SEND to# [1:7501625100753489327:2687] 2025-05-07T08:56:40.589289Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1520: Actor# [1:7501625100753489327:2687] txid# 281474976715659 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root/.metadata/workload_manager/pools" OperationType: ESchemeOpCreateResourcePool ModifyACL { Name: "default" DiffACL: "\n!\010\000\022\035\010\001\020\201\004\032\024all-users@well-known \003\n\031\010\000\022\025\010\001\020\201\004\032\014root@builtin \003" NewOwner: "metadata@system" } Internal: true CreateResourcePool { Name: "default" Properties { Properties { key: "concurrent_query_limit" value: "-1" } Properties { key: "database_load_cpu_threshold" value: "-1" } Properties { key: "query_cancel_after_seconds" value: "0" } Properties { key: "query_cpu_limit_percent_per_node" value: "-1" } Properties { key: "query_memory_limit_percent_per_node" value: "-1" } Properties { key: "queue_size" value: "-1" } Properties { key: "resource_weight" value: "-1" } Properties { key: "total_cpu_limit_percent_per_node" value: "-1" } } } } } UserToken: "\n\017metadata@system\022\000" DatabaseName: "/Root" 2025-05-07T08:56:40.589336Z node 1 :TX_PROXY DEBUG: schemereq.cpp:563: Actor# [1:7501625100753489327:2687] txid# 281474976715659 Bootstrap, UserSID: metadata@system CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-05-07T08:56:40.589351Z node 1 :TX_PROXY DEBUG: schemereq.cpp:572: Actor# [1:7501625100753489327:2687] txid# 281474976715659 Bootstrap, UserSID: metadata@system IsClusterAdministrator: 1 2025-05-07T08:56:40.593083Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1585: Actor# [1:7501625100753489327:2687] txid# 281474976715659 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-05-07T08:56:40.593232Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1575: Actor# [1:7501625100753489327:2687] txid# 281474976715659 TEvNavigateKeySet requested from SchemeCache 2025-05-07T08:56:40.593572Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1408: Actor# [1:7501625100753489327:2687] txid# 281474976715659 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-05-07T08:56:40.593734Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1455: Actor# [1:75016251007 ... : rpc_operation_request_base.h:106: [GetImport] [37:7501625598571813188:2403] [0] Send request: schemeShardId# 72057594046644480 2025-05-07T08:58:36.662894Z node 37 :TX_PROXY DEBUG: rpc_get_operation.cpp:220: [GetImport] [37:7501625598571813188:2403] [0] Handle TEvImport::TEvGetImportResponse: record# Entry { Id: 281474976710665 Status: SUCCESS Progress: PROGRESS_PREPARING ImportFromS3Settings { endpoint: "localhost:23150" scheme: HTTP bucket: "test_bucket" items { source_prefix: "DyNumberTable" destination_path: "/Root/DyNumberTable" } } StartTime { seconds: 1746608316 } } REQUEST: GET /test_bucket?prefix=DyNumberTable HTTP/1.1 HEADERS: Host: localhost:23150 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 8170418E-5414-4B7A-886D-0205BA62A0FD amz-sdk-request: attempt=1 authorization: AWS4-HMAC-SHA256 Credential=test_key/20250507/us-east-1/s3/aws4_request, SignedHeaders=amz-sdk-invocation-id;amz-sdk-request;content-type;host;x-amz-api-version;x-amz-content-sha256;x-amz-date, Signature=ad9a7946132880c2720e69495cec1b4396bfcb9b299ba2cc0af721af910ff740 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-content-sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 x-amz-date: 20250507T085836Z S3_MOCK::HttpServeList: DyNumberTable 2025-05-07T08:58:36.675706Z node 37 :IMPORT DEBUG: schemeshard_import_getters.cpp:554: HandleChangefeeds TEvExternalStorage::TEvListObjectResponse: self# [37:7501625598571813177:2197], result# ListObjectsResult { } 2025-05-07T08:58:36.675776Z node 37 :IMPORT INFO: schemeshard_import_getters.cpp:587: Reply: self# [37:7501625598571813177:2197], success# 1, error# 2025-05-07T08:58:36.675896Z node 37 :IMPORT DEBUG: schemeshard_import__create.cpp:360: TImport::TTxProgress: DoExecute 2025-05-07T08:58:36.675913Z node 37 :IMPORT DEBUG: schemeshard_import__create.cpp:965: TImport::TTxProgress: OnSchemeResult: id# 281474976710665, itemIdx# 0, success# 1 2025-05-07T08:58:36.676341Z node 37 :IMPORT INFO: schemeshard_import__create.cpp:605: TImport::TTxProgress: Allocate txId: info# { Id: 281474976710665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/DyNumberTable' DstPathId: State: CreateSchemeObject SubState: AllocateTxId WaitTxId: 0 Issue: '' } 2025-05-07T08:58:36.742188Z node 37 :IMPORT DEBUG: schemeshard_import__create.cpp:384: TImport::TTxProgress: DoComplete 2025-05-07T08:58:36.742401Z node 37 :IMPORT DEBUG: schemeshard_import__create.cpp:360: TImport::TTxProgress: DoExecute 2025-05-07T08:58:36.742420Z node 37 :IMPORT DEBUG: schemeshard_import__create.cpp:1180: TImport::TTxProgress: OnAllocateResult: txId# 281474976715760, id# 281474976710665 2025-05-07T08:58:36.742484Z node 37 :IMPORT INFO: schemeshard_import__create.cpp:417: TImport::TTxProgress: CreateTable propose: info# { Id: 281474976710665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/DyNumberTable' DstPathId: State: CreateSchemeObject SubState: Proposed WaitTxId: 0 Issue: '' }, txId# 281474976715760 2025-05-07T08:58:36.742680Z node 37 :IMPORT DEBUG: schemeshard_import__create.cpp:384: TImport::TTxProgress: DoComplete 2025-05-07T08:58:36.744357Z node 37 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715760:0, at schemeshard: 72057594046644480 2025-05-07T08:58:36.755695Z node 37 :IMPORT DEBUG: schemeshard_import__create.cpp:360: TImport::TTxProgress: DoExecute 2025-05-07T08:58:36.755729Z node 37 :IMPORT DEBUG: schemeshard_import__create.cpp:1267: TImport::TTxProgress: OnModifyResult: txId# 281474976715760, status# StatusAccepted 2025-05-07T08:58:36.755898Z node 37 :IMPORT INFO: schemeshard_import__create.cpp:619: TImport::TTxProgress: Wait for completion: info# { Id: 281474976710665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/DyNumberTable' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: CreateSchemeObject SubState: Subscribed WaitTxId: 281474976715760 Issue: '' } 2025-05-07T08:58:36.761721Z node 37 :IMPORT DEBUG: schemeshard_import__create.cpp:384: TImport::TTxProgress: DoComplete 2025-05-07T08:58:36.886353Z node 37 :IMPORT DEBUG: schemeshard_import__create.cpp:360: TImport::TTxProgress: DoExecute 2025-05-07T08:58:36.886388Z node 37 :IMPORT DEBUG: schemeshard_import__create.cpp:1425: TImport::TTxProgress: OnNotifyResult: txId# 281474976715760 2025-05-07T08:58:36.886510Z node 37 :IMPORT INFO: schemeshard_import__create.cpp:605: TImport::TTxProgress: Allocate txId: info# { Id: 281474976710665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/DyNumberTable' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: Transferring SubState: AllocateTxId WaitTxId: 0 Issue: '' } 2025-05-07T08:58:36.894813Z node 37 :IMPORT DEBUG: schemeshard_import__create.cpp:384: TImport::TTxProgress: DoComplete 2025-05-07T08:58:36.894978Z node 37 :IMPORT DEBUG: schemeshard_import__create.cpp:360: TImport::TTxProgress: DoExecute 2025-05-07T08:58:36.894997Z node 37 :IMPORT DEBUG: schemeshard_import__create.cpp:1180: TImport::TTxProgress: OnAllocateResult: txId# 281474976715761, id# 281474976710665 2025-05-07T08:58:36.895084Z node 37 :IMPORT INFO: schemeshard_import__create.cpp:496: TImport::TTxProgress: Restore propose: info# { Id: 281474976710665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/DyNumberTable' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: Transferring SubState: Proposed WaitTxId: 0 Issue: '' }, txId# 281474976715761 2025-05-07T08:58:36.896117Z node 37 :IMPORT DEBUG: schemeshard_import__create.cpp:384: TImport::TTxProgress: DoComplete 2025-05-07T08:58:36.896748Z node 37 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRestore, opId: 281474976715761:0, at schemeshard: 72057594046644480 2025-05-07T08:58:36.900842Z node 37 :IMPORT DEBUG: schemeshard_import__create.cpp:360: TImport::TTxProgress: DoExecute 2025-05-07T08:58:36.900875Z node 37 :IMPORT DEBUG: schemeshard_import__create.cpp:1267: TImport::TTxProgress: OnModifyResult: txId# 281474976715761, status# StatusAccepted 2025-05-07T08:58:36.901024Z node 37 :IMPORT INFO: schemeshard_import__create.cpp:619: TImport::TTxProgress: Wait for completion: info# { Id: 281474976710665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/DyNumberTable' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: Transferring SubState: Subscribed WaitTxId: 281474976715761 Issue: '' } 2025-05-07T08:58:36.909696Z node 37 :IMPORT DEBUG: schemeshard_import__create.cpp:384: TImport::TTxProgress: DoComplete REQUEST: HEAD /test_bucket/DyNumberTable/data_00.csv HTTP/1.1 HEADERS: Host: localhost:23150 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: F8F3EA90-1FEB-4C46-AA53-956A67A7306E amz-sdk-request: attempt=1 authorization: AWS4-HMAC-SHA256 Credential=test_key/20250507/us-east-1/s3/aws4_request, SignedHeaders=amz-sdk-invocation-id;amz-sdk-request;content-type;host;x-amz-api-version;x-amz-content-sha256;x-amz-date, Signature=63d41cc5fac287f544b5539b4f75c62a89a30d9258e2cde202004125eb5784f1 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-content-sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 x-amz-date: 20250507T085836Z S3_MOCK::HttpServeRead: /test_bucket/DyNumberTable/data_00.csv / 7 REQUEST: GET /test_bucket/DyNumberTable/data_00.csv HTTP/1.1 HEADERS: Host: localhost:23150 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 18056195-8AF9-4120-A1B5-3FC227D1DE4B amz-sdk-request: attempt=1 authorization: AWS4-HMAC-SHA256 Credential=test_key/20250507/us-east-1/s3/aws4_request, SignedHeaders=amz-sdk-invocation-id;amz-sdk-request;content-type;host;range;x-amz-api-version;x-amz-content-sha256;x-amz-date, Signature=c21248df201ca422ccf69964e9fdd3d168f786ed6a6be7fa32486498d77b63d2 content-type: application/xml range: bytes=0-6 user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-content-sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 x-amz-date: 20250507T085837Z S3_MOCK::HttpServeRead: /test_bucket/DyNumberTable/data_00.csv / 7 2025-05-07T08:58:37.046989Z node 37 :IMPORT DEBUG: schemeshard_import__create.cpp:360: TImport::TTxProgress: DoExecute 2025-05-07T08:58:37.047020Z node 37 :IMPORT DEBUG: schemeshard_import__create.cpp:1425: TImport::TTxProgress: OnNotifyResult: txId# 281474976715761 2025-05-07T08:58:37.052066Z node 37 :IMPORT DEBUG: schemeshard_import__create.cpp:384: TImport::TTxProgress: DoComplete 2025-05-07T08:58:37.097372Z node 37 :TX_PROXY DEBUG: rpc_operation_request_base.h:50: [GetImport] [37:7501625602866780686:2410] [0] Resolve database: name# /Root 2025-05-07T08:58:37.098114Z node 37 :TX_PROXY DEBUG: rpc_operation_request_base.h:66: [GetImport] [37:7501625602866780686:2410] [0] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-05-07T08:58:37.098147Z node 37 :TX_PROXY DEBUG: rpc_operation_request_base.h:106: [GetImport] [37:7501625602866780686:2410] [0] Send request: schemeShardId# 72057594046644480 2025-05-07T08:58:37.099145Z node 37 :TX_PROXY DEBUG: rpc_get_operation.cpp:220: [GetImport] [37:7501625602866780686:2410] [0] Handle TEvImport::TEvGetImportResponse: record# Entry { Id: 281474976710665 Status: SUCCESS Progress: PROGRESS_DONE ImportFromS3Settings { endpoint: "localhost:23150" scheme: HTTP bucket: "test_bucket" items { source_prefix: "DyNumberTable" destination_path: "/Root/DyNumberTable" } } StartTime { seconds: 1746608316 } EndTime { seconds: 1746608317 } } 2025-05-07T08:58:37.326963Z node 37 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [37:7501625564212073129:2138] Handle TEvExecuteKqpTransaction 2025-05-07T08:58:37.327008Z node 37 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [37:7501625564212073129:2138] TxId# 281474976710666 ProcessProposeKqpTransaction 2025-05-07T08:58:37.328268Z node 37 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710666. Ctx: { TraceId: 01jtmzfrnn5z9m6s2x176kjc0x, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=37&id=NWVkMTlkZWMtNWVjYzAyYjEtODc4YTlmNGYtNmI2MTlkMzU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> TCmsTest::ManageRequestsDry [GOOD] >> TCmsTest::Notifications >> TSchemeShardTTLTests::AlterTableShouldSuccessOnSimultaneousAddColumnAndEnableTTL |91.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest |91.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_range_ops/ydb-core-tx-datashard-ut_range_ops |91.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_range_ops/ydb-core-tx-datashard-ut_range_ops |91.2%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_range_ops/ydb-core-tx-datashard-ut_range_ops |91.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> TCmsTenatsTest::TestDefaultTenantPolicyWithSingleTenantHost [GOOD] >> TCmsTenatsTest::TestLimitsWithDownNode ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::BuildIndexShouldSucceed [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:58:39.964642Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:58:39.964735Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:39.964782Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:58:39.964819Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:58:39.964862Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:58:39.964934Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:58:39.964994Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:39.965087Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:58:39.965789Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:58:39.966255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:58:40.059169Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:58:40.059258Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:40.076563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:58:40.076800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:58:40.076993Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:58:40.083423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:58:40.083753Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:58:40.084515Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:40.084719Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:58:40.087892Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:40.089249Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:40.089321Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:40.089400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:58:40.089443Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:40.089495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:58:40.089718Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:58:40.096809Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:58:40.249495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:40.249706Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:40.249919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:58:40.250192Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:58:40.250251Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:40.252282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:40.252414Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:58:40.252583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:40.252640Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:58:40.252697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:58:40.252734Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:58:40.258929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:40.259006Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:58:40.259051Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:58:40.261204Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:40.261281Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:40.261325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:40.261385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:58:40.265039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:58:40.267158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:58:40.267343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:58:40.268308Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:40.268462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:40.268520Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:40.268810Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:58:40.268861Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:40.269051Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:40.269127Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:58:40.271293Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:40.271357Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:40.271531Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:40.271589Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 74976710760:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:281474976710760 msg type: 269090816 2025-05-07T08:58:40.988136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 281474976710760, partId: 4294967295, tablet: 72057594046316545 2025-05-07T08:58:40.988241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710760, at schemeshard: 72057594046678944 2025-05-07T08:58:40.988263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976710760, ready parts: 0/1, is published: true 2025-05-07T08:58:40.988289Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710760, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Add transaction: 281474976710760 at step: 5000006 FAKE_COORDINATOR: advance: minStep5000006 State->FrontStep: 5000005 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710760 at step: 5000006 2025-05-07T08:58:40.988499Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000006, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:40.988590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710760 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000006 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:40.988638Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_lock.cpp:44: [72057594046678944] TDropLock TPropose opId# 281474976710760:0 HandleReply TEvOperationPlan: step# 5000006 2025-05-07T08:58:40.988676Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976710760:0 128 -> 240 2025-05-07T08:58:40.990262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710760:0, at schemeshard: 72057594046678944 2025-05-07T08:58:40.990313Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 281474976710760:0 ProgressState 2025-05-07T08:58:40.990410Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710760:0 progress is 1/1 2025-05-07T08:58:40.990435Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-05-07T08:58:40.990465Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710760:0 progress is 1/1 2025-05-07T08:58:40.990490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-05-07T08:58:40.990524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976710760, ready parts: 1/1, is published: true 2025-05-07T08:58:40.990564Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:124:2150] message: TxId: 281474976710760 2025-05-07T08:58:40.990589Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-05-07T08:58:40.990620Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976710760:0 2025-05-07T08:58:40.990654Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976710760:0 2025-05-07T08:58:40.990719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 FAKE_COORDINATOR: Erasing txId 281474976710760 2025-05-07T08:58:40.992217Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6706: Handle: TEvNotifyTxCompletionResult: txId# 281474976710760 2025-05-07T08:58:40.992288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6708: Message: TxId: 281474976710760 2025-05-07T08:58:40.992356Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:2321: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, txId# 281474976710760, buildInfoId: 102 2025-05-07T08:58:40.992453Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:2324: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, txId# 281474976710760, buildInfo: TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobal, IndexName: UserDefinedIndexByValue, IndexColumn: value, State: Unlocking, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:384:2356], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000004, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-05-07T08:58:40.994030Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1105: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Unlocking TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobal, IndexName: UserDefinedIndexByValue, IndexColumn: value, State: Unlocking, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:384:2356], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000004, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-05-07T08:58:40.994118Z node 1 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:25: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Unlocking to Done 2025-05-07T08:58:40.995876Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1105: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Done TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobal, IndexName: UserDefinedIndexByValue, IndexColumn: value, State: Done, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:384:2356], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000004, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-05-07T08:58:40.995961Z node 1 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:325: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 102, subscribers count# 1 2025-05-07T08:58:40.996090Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T08:58:40.996160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:473:2434] TestWaitNotification: OK eventTxId 102 2025-05-07T08:58:40.996778Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:58:40.997058Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 301us result status StatusSuccess 2025-05-07T08:58:40.997555Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 TableSchemaVersion: 3 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "UserDefinedIndexByValue" LocalPathId: 3 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "value" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 3 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_3__SYNC-pk_types1-all_types1-index1---SYNC] >> TestYmqHttpProxy::TestChangeMessageVisibility [GOOD] >> AsyncIndexChangeExchange::ShouldRejectChangesOnQueueOverflowByCount [GOOD] >> AsyncIndexChangeExchange::ShouldRejectChangesOnQueueOverflowBySize >> TSchemeShardTTLTests::AlterTableShouldSuccessOnSimultaneousAddColumnAndEnableTTL [GOOD] |91.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_3__SYNC-pk_types1-all_types1-index1---SYNC] [GOOD] >> TFlatTableExecutor_IndexLoading::PrechargeAndSeek_BTreeIndex [GOOD] >> TFlatTableExecutor_IndexLoading::Scan_FlatIndex >> TCdcStreamTests::DropIndexWithStream [GOOD] >> TCdcStreamTests::DropTableWithIndexWithStream >> TSchemeShardTTLTests::CreateTableShouldFailOnUnspecifiedTTL >> TSchemeShardTTLTests::LegacyTtlSettingsNoTiers ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::AlterTableShouldSuccessOnSimultaneousAddColumnAndEnableTTL [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:58:42.475257Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:58:42.475359Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:42.475420Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:58:42.475460Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:58:42.475505Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:58:42.475535Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:58:42.475588Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:42.475717Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:58:42.476501Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:58:42.476886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:58:42.574571Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:58:42.574632Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:42.594424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:58:42.594601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:58:42.594725Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:58:42.600742Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:58:42.601089Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:58:42.601784Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:42.602004Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:58:42.605425Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:42.606883Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:42.606959Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:42.607041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:58:42.607092Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:42.607135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:58:42.607453Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:58:42.615156Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:58:42.767162Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:42.767387Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:42.767622Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:58:42.767853Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:58:42.767910Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:42.770587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:42.770734Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:58:42.770921Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:42.770982Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:58:42.771039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:58:42.771080Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:58:42.773621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:42.773697Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:58:42.773738Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:58:42.777026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:42.777115Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:42.777181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:42.777249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:58:42.780962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:58:42.783888Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:58:42.784138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:58:42.785133Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:42.785293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:42.785357Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:42.785635Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:58:42.785687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:42.785882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:42.785999Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:58:42.788336Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:42.788388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:42.788674Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:42.788721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 594046678944 2025-05-07T08:58:43.136884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T08:58:43.137196Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:43.137257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-05-07T08:58:43.137863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:58:43.137928Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1036: NTableState::TProposedWaitParts operationId# 102:0 ProgressState at tablet: 72057594046678944 2025-05-07T08:58:43.138673Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:58:43.138786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:58:43.138831Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-05-07T08:58:43.138871Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 4 2025-05-07T08:58:43.138928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T08:58:43.139028Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true 2025-05-07T08:58:43.139760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6245: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1217 } } 2025-05-07T08:58:43.139876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-05-07T08:58:43.140017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1217 } } 2025-05-07T08:58:43.140132Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1217 } } FAKE_COORDINATOR: Erasing txId 102 2025-05-07T08:58:43.140724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 305 RawX2: 4294969588 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-05-07T08:58:43.140765Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-05-07T08:58:43.140905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 305 RawX2: 4294969588 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-05-07T08:58:43.140956Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1005: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 2025-05-07T08:58:43.141076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1009: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 305 RawX2: 4294969588 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-05-07T08:58:43.141158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, datashard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:43.141194Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:58:43.141240Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-05-07T08:58:43.141279Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 102:0 129 -> 240 2025-05-07T08:58:43.145539Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-05-07T08:58:43.145678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:58:43.145818Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:58:43.146145Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:58:43.146205Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 102:0 ProgressState 2025-05-07T08:58:43.146321Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T08:58:43.146356Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:58:43.146409Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T08:58:43.146453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:58:43.146491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-05-07T08:58:43.146567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:334:2313] message: TxId: 102 2025-05-07T08:58:43.146624Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:58:43.146668Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-05-07T08:58:43.146698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 102:0 2025-05-07T08:58:43.146824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T08:58:43.149017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T08:58:43.149072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:393:2365] TestWaitNotification: OK eventTxId 102 2025-05-07T08:58:43.149645Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:58:43.149940Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 284us result status StatusSuccess 2025-05-07T08:58:43.150455Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 2 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> Cdc::Drop[YdsRunner] [GOOD] >> Cdc::Drop[TopicRunner] >> TestYmqHttpProxy::TestChangeMessageVisibilityBatch >> TSchemeShardTTLTestsWithReboots::CopyTable |91.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TCmsTest::Notifications [GOOD] >> TCmsTest::Mirror3dcPermissions >> SystemView::PartitionStatsLocksFields [GOOD] >> SystemView::QueryStatsAllTables >> TCmsTenatsTest::TestLimitsWithDownNode [GOOD] >> TCmsTenatsTest::TestScheduledPermissionWithDefaultPolicy |91.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::CreateTableShouldFailOnUnspecifiedTTL [GOOD] >> TestKinesisHttpProxy::TestWrongStream2 [GOOD] >> TSchemeShardTTLTests::LegacyTtlSettingsNoTiers [GOOD] |91.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::CreateTableShouldFailOnUnspecifiedTTL [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:58:44.270628Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:58:44.270743Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:44.270784Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:58:44.270824Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:58:44.270867Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:58:44.270898Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:58:44.270960Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:44.271033Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:58:44.271785Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:58:44.272129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:58:44.363194Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:58:44.363258Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:44.380773Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:58:44.380930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:58:44.381104Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:58:44.395195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:58:44.396249Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:58:44.397008Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:44.397312Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:58:44.400509Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:44.402251Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:44.402326Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:44.402384Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:58:44.402444Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:44.402533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:58:44.402752Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:58:44.409884Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:58:44.562147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:44.562347Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:44.562535Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:58:44.562749Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:58:44.562793Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:44.564974Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:44.565109Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:58:44.565296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:44.565361Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:58:44.565402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:58:44.565435Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:58:44.568243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:44.568307Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:58:44.568350Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:58:44.570457Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:44.570523Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:44.570571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:44.570646Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:58:44.574525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:58:44.577166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:58:44.577371Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:58:44.578391Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:44.578529Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:44.578592Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:44.578875Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:58:44.578966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:44.579150Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:44.579234Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:58:44.581552Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:44.581604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:44.581807Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:44.581846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2208], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-05-07T08:58:44.581919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:44.581986Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 1:0 ProgressState 2025-05-07T08:58:44.582104Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-05-07T08:58:44.582144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:58:44.582184Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-05-07T08:58:44.582215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:58:44.582252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-05-07T08:58:44.582322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:58:44.582357Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1:0 2025-05-07T08:58:44.582386Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 1:0 2025-05-07T08:58:44.582454Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:58:44.582490Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-05-07T08:58:44.582524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-05-07T08:58:44.585233Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-05-07T08:58:44.585358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-05-07T08:58:44.585408Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-05-07T08:58:44.585449Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-05-07T08:58:44.585493Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:44.585602Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-05-07T08:58:44.588482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-05-07T08:58:44.589086Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-05-07T08:58:44.590318Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:269:2260] Bootstrap 2025-05-07T08:58:44.605848Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:269:2260] Become StateWork (SchemeCache [1:274:2265]) 2025-05-07T08:58:44.608687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateTable CreateTable { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "Timestamp" } KeyColumnNames: "key" TTLSettings { } } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:44.609019Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /MyRoot/TTLEnabledTable, opId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:58:44.609142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:433: TCreateTable Propose, path: /MyRoot/TTLEnabledTable, opId: 101:0, schema: Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "Timestamp" } KeyColumnNames: "key" TTLSettings { }, at schemeshard: 72057594046678944 2025-05-07T08:58:44.609553Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 101:1, propose status:StatusSchemeError, reason: TTL status must be specified, at schemeshard: 72057594046678944 2025-05-07T08:58:44.610727Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:269:2260] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-05-07T08:58:44.613540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 101, response: Status: StatusSchemeError Reason: "TTL status must be specified" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:44.613734Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusSchemeError, reason: TTL status must be specified, operation: CREATE TABLE, path: /MyRoot/TTLEnabledTable 2025-05-07T08:58:44.614357Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 >> TestKinesisHttpProxy::BadRequestUnknownMethod [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::LegacyTtlSettingsNoTiers [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:58:44.473374Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:58:44.473468Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:44.473531Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:58:44.473568Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:58:44.473612Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:58:44.473660Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:58:44.473727Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:44.473828Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:58:44.474714Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:58:44.475163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:58:44.565548Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:58:44.565603Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:44.584320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:58:44.584521Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:58:44.584671Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:58:44.591610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:58:44.592164Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:58:44.593106Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:44.593302Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:58:44.597557Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:44.599102Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:44.599175Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:44.599241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:58:44.599281Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:44.599317Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:58:44.599849Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:58:44.608567Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:58:44.753640Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:44.753901Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:44.761146Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:58:44.761488Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:58:44.761584Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:44.770252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:44.770411Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:58:44.770630Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:44.770693Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:58:44.770758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:58:44.770802Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:58:44.779053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:44.779132Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:58:44.779174Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:58:44.786859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:44.786946Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:44.786992Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:44.787055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:58:44.793018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:58:44.802426Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:58:44.802710Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:58:44.803947Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:44.804132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:44.804210Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:44.804575Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:58:44.804645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:44.804872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:44.804960Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:58:44.811255Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:44.811314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:44.811482Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:44.811511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 8:58:45.037547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T08:58:45.037592Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 101 2025-05-07T08:58:45.037630Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-05-07T08:58:45.037678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:58:45.039178Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T08:58:45.039293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T08:58:45.039326Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 101 2025-05-07T08:58:45.039357Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-05-07T08:58:45.039395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T08:58:45.039476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 101, ready parts: 0/1, is published: true FAKE_COORDINATOR: Erasing txId 101 2025-05-07T08:58:45.040998Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6245: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1350 } } 2025-05-07T08:58:45.041044Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409546, partId: 0 2025-05-07T08:58:45.041182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1350 } } 2025-05-07T08:58:45.041276Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1350 } } 2025-05-07T08:58:45.042332Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 305 RawX2: 4294969588 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-05-07T08:58:45.042379Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409546, partId: 0 2025-05-07T08:58:45.042530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: Source { RawX1: 305 RawX2: 4294969588 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-05-07T08:58:45.042584Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1005: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 2025-05-07T08:58:45.042669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1009: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 305 RawX2: 4294969588 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-05-07T08:58:45.042740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 101:0, shardIdx: 72057594046678944:1, datashard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:45.042783Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:58:45.042818Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 101:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-05-07T08:58:45.042862Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 101:0 129 -> 240 2025-05-07T08:58:45.045926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T08:58:45.046233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T08:58:45.046842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:58:45.047335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:58:45.047524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:58:45.047570Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 101:0 ProgressState 2025-05-07T08:58:45.047680Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T08:58:45.047717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T08:58:45.047758Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T08:58:45.047790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T08:58:45.047848Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: true 2025-05-07T08:58:45.047929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:334:2313] message: TxId: 101 2025-05-07T08:58:45.047977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T08:58:45.048019Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:0 2025-05-07T08:58:45.048070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 101:0 2025-05-07T08:58:45.048232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T08:58:45.050216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-05-07T08:58:45.050271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:335:2314] TestWaitNotification: OK eventTxId 101 2025-05-07T08:58:45.050795Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:58:45.051060Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 300us result status StatusSuccess 2025-05-07T08:58:45.051556Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |91.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TCdcStreamTests::DropTableWithIndexWithStream [GOOD] >> TestKinesisHttpProxy::TestWrongRequest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/viewer/ut/unittest >> Viewer::JsonStorageListingV2PDiskIdFilter [GOOD] Test command err: 2025-05-07T08:50:22.246108Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:3169:2439], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:50:22.247084Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:50:22.247677Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:50:22.250033Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [8:1531:2379], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:50:22.250221Z node 9 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [9:1534:2379], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:50:22.251818Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:50:22.251877Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:50:22.252641Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:50:22.252684Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:50:22.253127Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:3172:2382], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:50:22.253873Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:3165:2382], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:50:22.254846Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:50:22.255059Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:3181:2382], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:50:22.255796Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:50:22.255855Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:50:22.256018Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:50:22.256220Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:3178:2382], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:50:22.256313Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:50:22.257346Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:50:22.257452Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:3184:2380], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:50:22.257723Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:50:22.257810Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:50:22.258563Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:3175:2382], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:50:22.258633Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:50:22.258689Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:50:22.259512Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:50:22.260806Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-05-07T08:50:22.813493Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:23.039995Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-05-07T08:50:23.089591Z node 1 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:422} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-05-07T08:50:23.750023Z node 1 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:362} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 1984, node 1 TClient is connected to server localhost:25066 2025-05-07T08:50:24.128358Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:24.128416Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:24.128451Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:24.129091Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:52:13.303609Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [10:2928:2436], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:52:13.306124Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:52:13.307761Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:52:13.311130Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [12:2931:2379], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:52:13.311382Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [13:2934:2379], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:52:13.312718Z node 16 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [16:2943:2379], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:52:13.312918Z node 17 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [17:2946:2379], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:52:13.314470Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:52:13.314681Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:52:13.314744Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:52:13.315118Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:52:13.315326Z node 16 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:52:13.315474Z node 17 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:52:13.316653Z node 15 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [15:2940:2379], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:52:13.316762Z node 16 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:52:13.316825Z node 17 :METADATA_PROVIDER ERROR: log.cpp:784: fline=access ... correct path status: LookupError; 2025-05-07T08:54:49.640619Z node 25 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [25:2451:2379], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:54:49.640740Z node 26 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:54:49.640925Z node 27 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [27:2457:2379], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:54:49.643254Z node 20 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [20:3141:2379], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:54:49.643452Z node 25 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:54:49.643543Z node 27 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:54:49.644151Z node 25 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:54:49.644214Z node 27 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:54:49.645083Z node 20 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:54:49.645367Z node 20 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-05-07T08:54:50.463976Z node 19 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:54:50.834534Z node 19 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-05-07T08:54:50.886884Z node 19 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:422} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-05-07T08:54:52.336804Z node 19 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:362} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 4969, node 19 TClient is connected to server localhost:9731 2025-05-07T08:54:53.660224Z node 19 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:54:53.660357Z node 19 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:54:53.660458Z node 19 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:54:53.661011Z node 19 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:57:56.706173Z node 28 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [28:3131:2436], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:57:56.712521Z node 28 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:57:56.713408Z node 28 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:57:56.730574Z node 32 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [32:3140:2379], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:57:56.730913Z node 34 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [34:3146:2379], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:57:56.733818Z node 31 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [31:3137:2379], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:57:56.733959Z node 32 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:57:56.735511Z node 33 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [33:3143:2379], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:57:56.735639Z node 34 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:57:56.736803Z node 32 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:57:56.736994Z node 34 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:57:56.738252Z node 29 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [29:3111:2379], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:57:56.738482Z node 30 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [30:3134:2379], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:57:56.738620Z node 31 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:57:56.738733Z node 33 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:57:56.738839Z node 33 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:57:56.738997Z node 35 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [35:3149:2379], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:57:56.739189Z node 36 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [36:3152:2379], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:57:56.740093Z node 31 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:57:56.740317Z node 30 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:57:56.740955Z node 29 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:57:56.741015Z node 29 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:57:56.741077Z node 30 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:57:56.741179Z node 35 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:57:56.741256Z node 36 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:57:56.742148Z node 35 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:57:56.742213Z node 36 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-05-07T08:57:57.950936Z node 28 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:57:58.394405Z node 28 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-05-07T08:57:58.478403Z node 28 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:422} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-05-07T08:58:00.034703Z node 28 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:362} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 9560, node 28 TClient is connected to server localhost:8639 2025-05-07T08:58:01.051076Z node 28 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:58:01.051206Z node 28 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:58:01.051308Z node 28 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:58:01.052300Z node 28 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration >> TSchemeShardTTLTests::CreateTableShouldSucceedAsyncOnIndexedTable |91.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/nodewarden/ut_sequence/ydb-core-blobstorage-nodewarden-ut_sequence |91.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/nodewarden/ut_sequence/ydb-core-blobstorage-nodewarden-ut_sequence |91.3%| [LD] {RESULT} $(B)/ydb/core/blobstorage/nodewarden/ut_sequence/ydb-core-blobstorage-nodewarden-ut_sequence >> TSchemeShardTTLTests::ShouldSkipDroppedColumn [GOOD] >> TestYmqHttpProxy::TestDeleteMessageBatch [GOOD] >> HttpProxyInsideYdb::TestIfEnvVariableSet [GOOD] >> TCmsTenatsTest::TestScheduledPermissionWithDefaultPolicy [GOOD] >> Cdc::UpdateShardCount [GOOD] >> Cdc::UpdateRetentionPeriod >> TSchemeShardTTLTests::RacyAlterTableAndConditionalErase [GOOD] >> TSchemeShardColumnTableTTL::CreateColumnTable |91.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTenatsTest::TestScheduledPermissionWithDefaultPolicy [GOOD] >> TestKinesisHttpProxy::ListShardsTimestamp [GOOD] >> TSchemeShardTTLTests::BackupCopyHasNoTtlSettings ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::ShouldSkipDroppedColumn [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:58:22.432674Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:58:22.432763Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:22.432811Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:58:22.432854Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:58:22.432904Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:58:22.432958Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:58:22.433035Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:22.433150Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:58:22.433963Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:58:22.434396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:58:22.531737Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:58:22.531807Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:22.564671Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:58:22.564929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:58:22.565171Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:58:22.572344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:58:22.572763Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:58:22.573673Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:22.573923Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:58:22.577772Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:22.579412Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:22.579504Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:22.579591Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:58:22.579646Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:22.579695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:58:22.579965Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:58:22.588035Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:58:22.824804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:22.825122Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:22.825395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:58:22.825681Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:58:22.825761Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:22.829300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:22.829528Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:58:22.829810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:22.829895Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:58:22.829991Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:58:22.830041Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:58:22.833016Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:22.833116Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:58:22.833167Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:58:22.838544Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:22.838636Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:22.838690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:22.838768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:58:22.857164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:58:22.867149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:58:22.867410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:58:22.868533Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:22.868730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:22.868800Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:22.869185Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:58:22.869245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:22.869448Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:22.869533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:58:22.872355Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:22.872441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:22.872653Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:22.872697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 2025-05-07T08:58:23.548527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:503:2464] TestWaitNotification: OK eventTxId 103 2025-05-07T08:58:29.504784Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7261: Cannot get console configs 2025-05-07T08:58:29.504856Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:31.525690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:561: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409546 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 4] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0732 2025-05-07T08:58:31.525839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:561: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409547 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.1903 2025-05-07T08:58:31.574411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:588: Started TEvPersistStats at tablet 72057594046678944, queue size# 2 2025-05-07T08:58:31.574659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:2 data size 0 row count 0 2025-05-07T08:58:31.574775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409546 maps to shardIdx: 72057594046678944:2 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=indexImplTable, is column=0, is olap=0, RowCount 0, DataSize 0 2025-05-07T08:58:31.574893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186233409546 2025-05-07T08:58:31.574965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046678944:1 data size 0 row count 0 2025-05-07T08:58:31.575018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409547 maps to shardIdx: 72057594046678944:1 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], pathId map=TTLEnabledTable, is column=0, is olap=0, RowCount 0, DataSize 0 2025-05-07T08:58:31.575118Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186233409547 2025-05-07T08:58:31.586405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:588: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-05-07T08:58:35.458960Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:561: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409546 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 4] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0195 2025-05-07T08:58:35.459095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:561: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409547 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.048 2025-05-07T08:58:35.505990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:588: Started TEvPersistStats at tablet 72057594046678944, queue size# 2 2025-05-07T08:58:35.506202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:2 data size 0 row count 0 2025-05-07T08:58:35.506317Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409546 maps to shardIdx: 72057594046678944:2 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=indexImplTable, is column=0, is olap=0, RowCount 0, DataSize 0 2025-05-07T08:58:35.506422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186233409546 2025-05-07T08:58:35.506494Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046678944:1 data size 0 row count 0 2025-05-07T08:58:35.506549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409547 maps to shardIdx: 72057594046678944:1 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], pathId map=TTLEnabledTable, is column=0, is olap=0, RowCount 0, DataSize 0 2025-05-07T08:58:35.506601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186233409547 2025-05-07T08:58:35.517227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:588: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-05-07T08:58:39.189849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:561: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409546 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 4] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0195 2025-05-07T08:58:39.189986Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:561: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409547 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.048 2025-05-07T08:58:39.236661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:588: Started TEvPersistStats at tablet 72057594046678944, queue size# 2 2025-05-07T08:58:39.236822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:2 data size 0 row count 0 2025-05-07T08:58:39.236927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409546 maps to shardIdx: 72057594046678944:2 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=indexImplTable, is column=0, is olap=0, RowCount 0, DataSize 0 2025-05-07T08:58:39.237019Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186233409546 2025-05-07T08:58:39.237064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046678944:1 data size 0 row count 0 2025-05-07T08:58:39.237109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409547 maps to shardIdx: 72057594046678944:1 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], pathId map=TTLEnabledTable, is column=0, is olap=0, RowCount 0, DataSize 0 2025-05-07T08:58:39.237144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186233409547 2025-05-07T08:58:39.248015Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:588: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-05-07T08:58:42.949056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:561: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409547 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0126 2025-05-07T08:58:42.949181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:561: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409546 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 4] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0066 2025-05-07T08:58:42.997620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:588: Started TEvPersistStats at tablet 72057594046678944, queue size# 2 2025-05-07T08:58:42.997827Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046678944:1 data size 0 row count 0 2025-05-07T08:58:42.997938Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409547 maps to shardIdx: 72057594046678944:1 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], pathId map=TTLEnabledTable, is column=0, is olap=0, RowCount 0, DataSize 0 2025-05-07T08:58:42.998071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186233409547 2025-05-07T08:58:42.998141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:2 data size 0 row count 0 2025-05-07T08:58:42.998197Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409546 maps to shardIdx: 72057594046678944:2 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=indexImplTable, is column=0, is olap=0, RowCount 0, DataSize 0 2025-05-07T08:58:42.998241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186233409546 2025-05-07T08:58:43.008908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:588: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-05-07T08:58:46.636691Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6592: Handle: TEvRunConditionalErase, at schemeshard: 72057594046678944 2025-05-07T08:58:46.636865Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-05-07T08:58:46.637110Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 2025-05-07T08:58:46.637403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__conditional_erase.cpp:213: Run conditional erase, tabletId: 72075186233409547, request: TableId: 2 Expiration { ColumnId: 2 WallClockTimestamp: 60024000 ColumnUnit: UNIT_AUTO } SchemaVersion: 3 Indexes { OwnerId: 72057594046678944 PathId: 4 SchemaVersion: 1 KeyMap { IndexColumnId: 1 MainColumnId: 3 } KeyMap { IndexColumnId: 2 MainColumnId: 1 } } Limits { BatchMaxBytes: 512000 BatchMinKeys: 1 BatchMaxKeys: 256 }, at schemeshard: 72057594046678944 2025-05-07T08:58:46.638059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6620: Conditional erase accepted: tabletId: 72075186233409547, at schemeshard: 72057594046678944 2025-05-07T08:58:46.638838Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:346: TTxScheduleConditionalErase Execute: at schemeshard: 72057594046678944 2025-05-07T08:58:46.638913Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:396: Successful conditional erase: tabletId: 72075186233409547, at schemeshard: 72057594046678944 2025-05-07T08:58:46.655657Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:452: TTxScheduleConditionalErase Complete: at schemeshard: 72057594046678944 2025-05-07T08:58:46.655961Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-05-07T08:58:46.656056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__conditional_erase.cpp:106: Skip conditional erase: shardIdx: 72057594046678944:1, run at: 1970-01-01T01:01:00.024000Z, at schemeshard: 72057594046678944 2025-05-07T08:58:46.656188Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 >> TSchemeShardTTLTests::AlterTableShouldSucceedOnAsyncIndexedTable |91.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/cms/ut_sentinel/ydb-core-cms-ut_sentinel |91.3%| [LD] {RESULT} $(B)/ydb/core/cms/ut_sentinel/ydb-core-cms-ut_sentinel |91.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/cms/ut_sentinel/ydb-core-cms-ut_sentinel ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::RacyAlterTableAndConditionalErase [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:58:23.681604Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:58:23.681687Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:23.681732Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:58:23.681773Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:58:23.681820Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:58:23.681861Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:58:23.681928Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:23.682404Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:58:23.683145Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:58:23.683480Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:58:23.770156Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:58:23.770209Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:23.787148Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:58:23.787360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:58:23.787560Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:58:23.793488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:58:23.793806Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:58:23.794526Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:23.794711Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:58:23.797714Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:23.799210Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:23.799279Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:23.799365Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:58:23.799412Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:23.799453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:58:23.799678Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:58:23.811099Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:58:23.956175Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:23.956427Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:23.956685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:58:23.956931Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:58:23.957013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:23.959598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:23.959772Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:58:23.960002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:23.960059Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:58:23.960135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:58:23.960173Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:58:23.962330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:23.962401Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:58:23.962441Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:58:23.964680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:23.964742Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:23.964792Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:23.964856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:58:23.975622Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:58:23.977919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:58:23.978174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:58:23.979287Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:23.979482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:23.979555Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:23.979892Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:58:23.979951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:23.980146Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:23.980249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:58:23.982511Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:23.982571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:23.982781Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:23.982819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72075186233409546 for txId: 102 at step: 5000003 2025-05-07T08:58:46.963785Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000003, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:46.963921Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 102 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:46.963982Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_table.cpp:359: TAlterTable TPropose operationId# 102:0 HandleReply TEvOperationPlan, operationId: 102:0, stepId: 5000003, at schemeshard: 72057594046678944 2025-05-07T08:58:46.964274Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 102:0 128 -> 129 2025-05-07T08:58:46.964408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 2025-05-07T08:58:46.975996Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:46.976049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T08:58:46.976341Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:46.976417Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-05-07T08:58:46.977078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:58:46.977157Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1036: NTableState::TProposedWaitParts operationId# 102:0 ProgressState at tablet: 72057594046678944 2025-05-07T08:58:46.982548Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:58:46.982688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:58:46.982736Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-05-07T08:58:46.982778Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 4 2025-05-07T08:58:46.982835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T08:58:46.982927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true FAKE_COORDINATOR: Erasing txId 102 2025-05-07T08:58:46.985246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6245: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1373 } } 2025-05-07T08:58:46.985300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-05-07T08:58:46.985471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1373 } } 2025-05-07T08:58:46.985578Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1373 } } 2025-05-07T08:58:46.986474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 305 RawX2: 4294969588 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-05-07T08:58:46.986518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-05-07T08:58:46.986705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 305 RawX2: 4294969588 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-05-07T08:58:46.986763Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1005: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 2025-05-07T08:58:46.986849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1009: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 305 RawX2: 4294969588 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-05-07T08:58:46.986914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, datashard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:46.986950Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:58:46.986983Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-05-07T08:58:46.987020Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 102:0 129 -> 240 2025-05-07T08:58:46.989888Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-05-07T08:58:46.990449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:58:46.990584Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:58:46.990722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:58:46.990777Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 102:0 ProgressState 2025-05-07T08:58:46.990906Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T08:58:46.990959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:58:46.990998Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T08:58:46.991034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:58:46.991069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-05-07T08:58:46.991141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:334:2313] message: TxId: 102 2025-05-07T08:58:46.991197Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:58:46.991239Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-05-07T08:58:46.991270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 102:0 2025-05-07T08:58:46.991393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T08:58:46.993706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T08:58:46.993756Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:609:2564] TestWaitNotification: OK eventTxId 102 2025-05-07T08:58:46.994301Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:346: TTxScheduleConditionalErase Execute: at schemeshard: 72057594046678944 2025-05-07T08:58:46.994361Z node 1 :FLAT_TX_SCHEMESHARD ERROR: schemeshard__conditional_erase.cpp:390: Unsuccessful conditional erase: tabletId: 72075186233409546, status: SCHEME_ERROR, error: Schema version mismatch: got 1, expected 2, retry after: 300.000000s, at schemeshard: 72057594046678944 2025-05-07T08:58:46.995988Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:452: TTxScheduleConditionalErase Complete: at schemeshard: 72057594046678944 2025-05-07T08:58:46.996089Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-05-07T08:58:46.996143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__conditional_erase.cpp:106: Skip conditional erase: shardIdx: 72057594046678944:1, run at: 1970-01-01T00:06:00.038500Z, at schemeshard: 72057594046678944 2025-05-07T08:58:46.996201Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 >> TSchemeShardTTLUtility::ValidateTiers [GOOD] >> TSchemeShardTTLTests::CreateTableShouldSucceedAsyncOnIndexedTable [GOOD] |91.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/inside_ydb_ut/unittest >> HttpProxyInsideYdb::TestIfEnvVariableSet [GOOD] Test command err: 2025-05-07T08:57:47.590521Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625386647683060:2062];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:47.590933Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0017fa/r3tmp/tmp4OD4Kw/pdisk_1.dat 2025-05-07T08:57:48.345857Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:48.345957Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:48.359122Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:57:48.463521Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 12832, node 1 2025-05-07T08:57:48.738687Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:57:48.738711Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:57:48.738727Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:57:48.738848Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10820 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:57:49.217951Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:49.243522Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 TClient is connected to server localhost:10820 2025-05-07T08:57:49.652247Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:49.667846Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-05-07T08:57:49.669555Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:49.695255Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:49.834372Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:57:49.938396Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710663, at schemeshard: 72057594046644480 2025-05-07T08:57:49.947085Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:57:49.992323Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710665, at schemeshard: 72057594046644480 2025-05-07T08:57:49.997704Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:50.064625Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:50.125474Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:50.199001Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:50.231244Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:50.277478Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:50.358321Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:52.220584Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625408122520943:2376], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:57:52.220693Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625408122520954:2379], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:57:52.220747Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:57:52.224857Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710673:3, at schemeshard: 72057594046644480 2025-05-07T08:57:52.238356Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501625408122520957:2380], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710673 completed, doublechecking } 2025-05-07T08:57:52.344579Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501625408122521008:2868] txid# 281474976710674, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 18], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:57:52.591846Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501625386647683060:2062];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:52.591931Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:57:52.783324Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710675. Ctx: { TraceId: 01jtmzectt56p1sxcv43qdwrw5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmZiOTEyMjMtZjJkZWQ2ZDMtMjFlMjM4MmYtM2E1MmM0ODA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:57:52.821640Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:52.880028Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:52.950262Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710678:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:53.007914Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710679:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:53.051343Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710680:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:53.094084Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710681:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:53.143929Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710682:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:53.190363Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710683:0, at schemeshard: 72057594046644480 waiting... ... a Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-05-07T08:58:45.023739Z node 7 :SQS DEBUG: executor.cpp:287: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] Attempt 1 execution duration: 16ms 2025-05-07T08:58:45.024022Z node 7 :SQS TRACE: executor.cpp:325: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] Sending mkql execution result: { Status: 48 TxId: 281474976715685 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "settings" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Name" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Value" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-05-07T08:58:45.024056Z node 7 :SQS TRACE: executor.cpp:327: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] Minikql data response: {"settings": [], "truncated": false} 2025-05-07T08:58:45.024145Z node 7 :SQS DEBUG: executor.cpp:401: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] execution duration: 21ms 2025-05-07T08:58:45.024537Z node 7 :SQS TRACE: user_settings_reader.cpp:89: Handle user settings: { Status: 48 TxId: 281474976715685 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "settings" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Name" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Value" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-05-07T08:58:45.024882Z node 7 :SQS TRACE: executor.cpp:286: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] HandleResponse { Status: 48 TxId: 281474976715686 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "queues" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "CreatedTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "CustomQueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "FolderId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "MasterTabletId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "QueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "QueueState" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "Shards" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "TablesFormat" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 2 } } } } } Member { Name: "Version" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-05-07T08:58:45.024909Z node 7 :SQS DEBUG: executor.cpp:287: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] Attempt 1 execution duration: 16ms 2025-05-07T08:58:45.025343Z node 7 :SQS TRACE: executor.cpp:325: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] Sending mkql execution result: { Status: 48 TxId: 281474976715686 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "queues" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "CreatedTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "CustomQueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "FolderId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "MasterTabletId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "QueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "QueueState" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "Shards" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "TablesFormat" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 2 } } } } } Member { Name: "Version" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-05-07T08:58:45.025383Z node 7 :SQS TRACE: executor.cpp:327: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] Minikql data response: {"queues": [], "truncated": false} 2025-05-07T08:58:45.025538Z node 7 :SQS DEBUG: executor.cpp:401: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] execution duration: 16ms 2025-05-07T08:58:45.026124Z node 7 :SQS TRACE: queues_list_reader.cpp:82: Handle queues list: { Status: 48 TxId: 281474976715686 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "queues" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "CreatedTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "CustomQueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "FolderId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "MasterTabletId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "QueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "QueueState" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "Shards" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "TablesFormat" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 2 } } } } } Member { Name: "Version" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-05-07T08:58:45.262590Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:552: [WorkloadService] [Service] Reply cleanup error NOT_FOUND to [7:7501625630531943845:2442]: Pool not found 2025-05-07T08:58:45.263346Z node 7 :SQS DEBUG: monitoring.cpp:60: [monitoring] Report deletion queue data lag: 0.000000s, count: 0 2025-05-07T08:58:45.702477Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:552: [WorkloadService] [Service] Reply cleanup error NOT_FOUND to [7:7501625630531943837:2441]: Pool not found 2025-05-07T08:58:45.703047Z node 7 :SQS DEBUG: cleanup_queue_data.cpp:100: [cleanup removed queues] getting queues... 2025-05-07T08:58:45.707203Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7501625634826911258:2461], DatabaseId: /Root/SQS, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:58:45.707290Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:602: [WorkloadService] [TDatabaseFetcherActor] ActorId: [7:7501625634826911259:2462], Database: /Root/SQS, Failed to fetch database info, UNSUPPORTED, issues: {
: Error: Invalid database path /Root/SQS, please check the correctness of the path } 2025-05-07T08:58:45.707348Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/SQS, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:58:45.952449Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:83: (#37,[::1]:55066) incoming connection opened 2025-05-07T08:58:45.952533Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:145: (#37,[::1]:55066) -> (POST /Root) 2025-05-07T08:58:45.952724Z node 7 :HTTP_PROXY INFO: http_service.cpp:102: proxy service: incoming request from [d87f:2e00:6050:0:c07f:2e00:6050:0] request [UnknownMethodName] url [/Root] database [/Root] requestId: 9fe7f037-df2442f5-f48be00-72ea6299 2025-05-07T08:58:45.953051Z node 7 :HTTP_PROXY INFO: http_req.cpp:1211: http request [UnknownMethodName] requestId [9fe7f037-df2442f5-f48be00-72ea6299] reply with status: UNSUPPORTED message: Missing method name UnknownMethodName 2025-05-07T08:58:45.953253Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:243: (#37,[::1]:55066) <- (400 InvalidAction) 2025-05-07T08:58:45.953294Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:252: (#37,[::1]:55066) Request: POST /Root HTTP/1.1 Host: example.amazonaws.com X-Amz-Target: kinesisApi.UnknownMethodName X-Amz-Date: 20150830T123600Z Authorization: Content-Type: application/json Connection: Close Transfer-Encoding: chunked 3 { } 0 2025-05-07T08:58:45.953324Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:259: (#37,[::1]:55066) Response: HTTP/1.1 400 InvalidAction Connection: close x-amzn-requestid: 9fe7f037-df2442f5-f48be00-72ea6299 x-amz-crc32: 139748724 Content-Type: application/x-amz-json-1.1 Content-Length: 76 {"__type":"InvalidAction","message":"Missing method name UnknownMethodName"} 2025-05-07T08:58:45.953403Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:296: (#37,[::1]:55066) connection closed Http output full {"__type":"InvalidAction","message":"Missing method name UnknownMethodName"} 400 {"__type":"InvalidAction","message":"Missing method name UnknownMethodName"} >> TestKinesisHttpProxy::ListShardsToken |91.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLUtility::ValidateTiers [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::CreateTableShouldSucceedAsyncOnIndexedTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:58:47.455638Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:58:47.455754Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:47.455806Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:58:47.455848Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:58:47.455900Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:58:47.455935Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:58:47.455992Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:47.456119Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:58:47.456999Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:58:47.457387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:58:47.555705Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:58:47.555786Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:47.569444Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:58:47.569544Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:58:47.569693Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:58:47.577403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:58:47.578003Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:58:47.578739Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:47.579059Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:58:47.581672Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:47.583434Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:47.583504Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:47.583549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:58:47.583599Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:47.583652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:58:47.583888Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:58:47.591363Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:58:47.716136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:47.716341Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:47.716558Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:58:47.716862Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:58:47.716941Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:47.720010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:47.720193Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:58:47.720427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:47.720513Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:58:47.720565Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:58:47.720604Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:58:47.723062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:47.723135Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:58:47.723204Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:58:47.725653Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:47.725728Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:47.725786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:47.725896Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:58:47.730034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:58:47.732901Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:58:47.733137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:58:47.734384Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:47.734571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:47.734644Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:47.735019Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:58:47.735092Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:47.735297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:47.735400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:58:47.738060Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:47.738133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:47.738360Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:47.738411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... , at schemeshard: 72057594046678944 2025-05-07T08:58:48.145021Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 101:2, at schemeshard: 72057594046678944 2025-05-07T08:58:48.145082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 101:2, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-05-07T08:58:48.145153Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 101:2 129 -> 240 2025-05-07T08:58:48.146168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 327 RawX2: 4294969606 } Origin: 72075186233409547 State: 2 TxId: 101 Step: 0 Generation: 2 2025-05-07T08:58:48.146224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409547, partId: 0 2025-05-07T08:58:48.146380Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: Source { RawX1: 327 RawX2: 4294969606 } Origin: 72075186233409547 State: 2 TxId: 101 Step: 0 Generation: 2 2025-05-07T08:58:48.146450Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1005: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 2025-05-07T08:58:48.146531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1009: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 327 RawX2: 4294969606 } Origin: 72075186233409547 State: 2 TxId: 101 Step: 0 Generation: 2 2025-05-07T08:58:48.146588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 101:0, shardIdx: 72057594046678944:1, datashard: 72075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:48.146640Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:58:48.146675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 101:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-05-07T08:58:48.146792Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 101:0 129 -> 240 2025-05-07T08:58:48.162743Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T08:58:48.162890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T08:58:48.170822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T08:58:48.171251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:2, at schemeshard: 72057594046678944 2025-05-07T08:58:48.171711Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:58:48.172666Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T08:58:48.173062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:2, at schemeshard: 72057594046678944 2025-05-07T08:58:48.173703Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:58:48.174113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 101:2, at schemeshard: 72057594046678944 2025-05-07T08:58:48.174177Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 101:2 ProgressState 2025-05-07T08:58:48.174312Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:2 progress is 2/3 2025-05-07T08:58:48.174367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 2/3 2025-05-07T08:58:48.174422Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:2 progress is 2/3 2025-05-07T08:58:48.174464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 2/3 2025-05-07T08:58:48.174511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 101, ready parts: 2/3, is published: true 2025-05-07T08:58:48.175061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:58:48.175123Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 101:0 ProgressState 2025-05-07T08:58:48.175227Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 3/3 2025-05-07T08:58:48.175266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 3/3 2025-05-07T08:58:48.175305Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 3/3 2025-05-07T08:58:48.175337Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 3/3 2025-05-07T08:58:48.175411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 101, ready parts: 3/3, is published: true 2025-05-07T08:58:48.175514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:377:2345] message: TxId: 101 2025-05-07T08:58:48.175577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 3/3 2025-05-07T08:58:48.175639Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:0 2025-05-07T08:58:48.175723Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 101:0 2025-05-07T08:58:48.175956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T08:58:48.176045Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:1 2025-05-07T08:58:48.176270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 101:1 2025-05-07T08:58:48.176335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-05-07T08:58:48.176391Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:2 2025-05-07T08:58:48.176435Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 101:2 2025-05-07T08:58:48.176507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-05-07T08:58:48.180412Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-05-07T08:58:48.180472Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:378:2346] TestWaitNotification: OK eventTxId 101 2025-05-07T08:58:48.181187Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:58:48.181501Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 324us result status StatusSuccess 2025-05-07T08:58:48.182117Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "UserDefinedIndexByExpireAt" LocalPathId: 3 Type: EIndexTypeGlobalAsync State: EIndexStateReady KeyColumnNames: "modified_at" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 1 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> Cdc::NaN[YdsRunner] [GOOD] >> Cdc::NaN[TopicRunner] >> TSchemeShardTTLTests::BackupCopyHasNoTtlSettings [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/inside_ydb_ut/unittest >> TestYmqHttpProxy::TestDeleteMessageBatch [GOOD] Test command err: 2025-05-07T08:57:48.288249Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625393156247873:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:48.292174Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0017be/r3tmp/tmpBgVWbp/pdisk_1.dat 2025-05-07T08:57:49.043590Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:57:49.092485Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:49.092582Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:49.107573Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 61169, node 1 2025-05-07T08:57:49.375163Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:57:49.375185Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:57:49.375192Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:57:49.375307Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2969 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:57:49.702325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:49.717603Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 TClient is connected to server localhost:2969 2025-05-07T08:57:49.962644Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:49.977387Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-05-07T08:57:49.983933Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:57:50.032450Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 2025-05-07T08:57:50.208105Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:57:50.260872Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710663, at schemeshard: 72057594046644480 2025-05-07T08:57:50.266591Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:57:50.321488Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:50.369347Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:50.435758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:50.475029Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:50.555678Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:50.637160Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:50.720593Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:52.367299Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625410336118468:2378], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:57:52.367674Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625410336118460:2375], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:57:52.367746Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:57:52.371697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710673:3, at schemeshard: 72057594046644480 2025-05-07T08:57:52.388130Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501625410336118474:2379], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710673 completed, doublechecking } 2025-05-07T08:57:52.476917Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501625410336118527:2866] txid# 281474976710674, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 18], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:57:52.882625Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710675. Ctx: { TraceId: 01jtmzeczc833p7y68npkyy5rf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTM4YzM3MTUtYmU5ODA2MDgtMjBlYzZiZDMtYTUwYjgxMWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:57:52.925746Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:52.994760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:53.040786Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710678:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:53.076958Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710679:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:53.150027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710680:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:53.189665Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710681:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:53.229001Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710682:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:53.269146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710683:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:53.288544Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501625393156247873:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:53.288600Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:57:53.310584Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreate ... M\003?\000\004\003?\002\177t\257\312\247?e\332\252\003?\004\177w\247k\263\235 {\362\003?\006\000\003?\010\237\000\002~\252e\037/" } FlatMKQL: true } } ExecTimeoutPeriod: 60000 }. Params: {"QUEUE_ID_NUMBER": 2, "QUEUE_ID_NUMBER_HASH": 17472595041006102391, "SHARD": 0, "QUEUE_ID_NUMBER_AND_SHARD_HASH": 12311263855443095412, "TIME_FROM": 1746608326658} 2025-05-07T08:58:46.873931Z node 7 :HTTP_PROXY DEBUG: http_req.cpp:379: http request [DeleteMessageBatch] requestId [bd87c0c0-c74a038c-f8f0dcfb-aa197fd0] Got succesfult GRPC response. 2025-05-07T08:58:46.874168Z node 7 :HTTP_PROXY INFO: http_req.cpp:1207: http request [DeleteMessageBatch] requestId [bd87c0c0-c74a038c-f8f0dcfb-aa197fd0] reply ok 2025-05-07T08:58:46.874311Z node 7 :HTTP_PROXY DEBUG: http_req.cpp:1267: http request [DeleteMessageBatch] requestId [bd87c0c0-c74a038c-f8f0dcfb-aa197fd0] Send metering event. HttpStatusCode: 200 IsFifo: 0 FolderId: folder4 RequestSizeInBytes: 716 ResponseSizeInBytes: 222 SourceAddress: d8bb:a600:6050:0:c0bb:a600:6050:0 ResourceId: 000000000000000101v0 Action: DeleteMessageBatch 2025-05-07T08:58:46.874446Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:243: (#37,[::1]:52086) <- (200 ) 2025-05-07T08:58:46.874577Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:296: (#37,[::1]:52086) connection closed Http output full {"Successful":[{"Id":"Id-0"},{"Id":"Id-1"}]} 2025-05-07T08:58:46.879285Z node 7 :SQS TRACE: executor.cpp:286: Request [] Query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) Queue [cloud4/000000000000000101v0] HandleResponse { Status: 48 TxId: 281474976710712 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "messages" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Offset" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "SentTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } } } Value { Struct { Optional { } } } } } 2025-05-07T08:58:46.879323Z node 7 :SQS DEBUG: executor.cpp:287: Request [] Query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) Queue [cloud4/000000000000000101v0] Attempt 1 execution duration: 5ms 2025-05-07T08:58:46.879482Z node 7 :SQS TRACE: executor.cpp:325: Request [] Query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) Queue [cloud4/000000000000000101v0] Sending mkql execution result: { Status: 48 TxId: 281474976710712 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "messages" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Offset" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "SentTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } } } Value { Struct { Optional { } } } } } 2025-05-07T08:58:46.879510Z node 7 :SQS TRACE: executor.cpp:327: Request [] Query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) Queue [cloud4/000000000000000101v0] Minikql data response: {"messages": []} 2025-05-07T08:58:46.879587Z node 7 :SQS DEBUG: executor.cpp:401: Request [] Query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) Queue [cloud4/000000000000000101v0] execution duration: 7ms 2025-05-07T08:58:46.879688Z node 7 :SQS DEBUG: queue_leader.cpp:556: Request [] Sending executed reply 2025-05-07T08:58:46.879778Z node 7 :SQS DEBUG: queue_leader.cpp:1913: Handle oldest timestamp metrics for [cloud4/000000000000000101v0/0] 2025-05-07T08:58:46.888291Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:83: (#37,[::1]:52096) incoming connection opened 2025-05-07T08:58:46.888362Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:145: (#37,[::1]:52096) -> (POST /Root) 2025-05-07T08:58:46.888478Z node 7 :HTTP_PROXY INFO: http_service.cpp:102: proxy service: incoming request from [d85f:7800:6050:0:c05f:7800:6050:0] request [ReceiveMessage] url [/Root] database [/Root] requestId: 52986903-4b257210-92cd731f-ec125ec4 2025-05-07T08:58:46.888762Z node 7 :HTTP_PROXY INFO: http_req.cpp:520: http request [ReceiveMessage] requestId [52986903-4b257210-92cd731f-ec125ec4] got new request from [d85f:7800:6050:0:c05f:7800:6050:0] 2025-05-07T08:58:46.889101Z node 7 :HTTP_PROXY DEBUG: http_req.cpp:454: http request [ReceiveMessage] requestId [52986903-4b257210-92cd731f-ec125ec4] Got cloud auth response. FolderId: folder4 CloudId: cloud4 UserSid: fake_user_sid@as 2025-05-07T08:58:46.889125Z node 7 :HTTP_PROXY INFO: http_req.cpp:280: http request [ReceiveMessage] requestId [52986903-4b257210-92cd731f-ec125ec4] sending grpc request to '' database: '/Root' iam token size: 0 2025-05-07T08:58:46.889297Z node 7 :SQS DEBUG: ymq_proxy.cpp:148: Got new request in YMQ proxy. FolderId: folder4, CloudId: cloud4, UserSid: fake_user_sid@as, RequestId: 52986903-4b257210-92cd731f-ec125ec4 2025-05-07T08:58:46.889410Z node 7 :SQS DEBUG: proxy_actor.cpp:263: Request [52986903-4b257210-92cd731f-ec125ec4] Proxy actor: used user_name='cloud4', queue_name='000000000000000101v0', folder_id='folder4' 2025-05-07T08:58:46.889419Z node 7 :SQS DEBUG: proxy_actor.cpp:78: Request [52986903-4b257210-92cd731f-ec125ec4] Request proxy started 2025-05-07T08:58:46.889522Z node 7 :SQS DEBUG: service.cpp:742: Request [52986903-4b257210-92cd731f-ec125ec4] Answer configuration for queue [cloud4/000000000000000101v0] without leader 2025-05-07T08:58:46.889585Z node 7 :SQS DEBUG: proxy_actor.cpp:97: Request [52986903-4b257210-92cd731f-ec125ec4] Get configuration duration: 0ms 2025-05-07T08:58:46.889675Z node 7 :SQS DEBUG: proxy_service.cpp:246: Request [52986903-4b257210-92cd731f-ec125ec4] Send get leader node request to sqs service for cloud4/000000000000000101v0 2025-05-07T08:58:46.889696Z node 7 :SQS DEBUG: service.cpp:562: Request [52986903-4b257210-92cd731f-ec125ec4] Leader node for queue [cloud4/000000000000000101v0] is 7 2025-05-07T08:58:46.889732Z node 7 :SQS DEBUG: proxy_service.cpp:170: Request [52986903-4b257210-92cd731f-ec125ec4] Got leader node for queue response. Node id: 7. Status: 0 2025-05-07T08:58:46.889836Z node 7 :SQS TRACE: proxy_service.cpp:303: Request [52986903-4b257210-92cd731f-ec125ec4] Sending request from proxy to leader node 7: ReceiveMessage { Auth { UserName: "cloud4" FolderId: "folder4" UserSID: "fake_user_sid@as" } QueueName: "000000000000000101v0" } RequestId: "52986903-4b257210-92cd731f-ec125ec4" 2025-05-07T08:58:46.889904Z node 7 :SQS DEBUG: proxy_service.cpp:70: Request [52986903-4b257210-92cd731f-ec125ec4] Received Sqs Request: ReceiveMessage { Auth { UserName: "cloud4" FolderId: "folder4" UserSID: "fake_user_sid@as" } QueueName: "000000000000000101v0" } RequestId: "52986903-4b257210-92cd731f-ec125ec4" 2025-05-07T08:58:46.889981Z node 7 :SQS DEBUG: action.h:131: Request [52986903-4b257210-92cd731f-ec125ec4] Request started. Actor: [7:7501625641574372555:3708] 2025-05-07T08:58:46.890033Z node 7 :SQS TRACE: service.cpp:1453: Inc local leader ref for actor [7:7501625641574372555:3708] 2025-05-07T08:58:46.890055Z node 7 :SQS DEBUG: service.cpp:735: Request [52986903-4b257210-92cd731f-ec125ec4] Forward configuration request to queue [cloud4/000000000000000101v0] leader 2025-05-07T08:58:46.890091Z node 7 :SQS DEBUG: action.h:623: Request [52986903-4b257210-92cd731f-ec125ec4] Get configuration duration: 0ms 2025-05-07T08:58:46.890111Z node 7 :SQS TRACE: action.h:643: Request [52986903-4b257210-92cd731f-ec125ec4] Got configuration. Root url: http://ghrun-sykirh5vua.auto.internal:8771, Shards: 4, Fail: 0 2025-05-07T08:58:46.890136Z node 7 :SQS TRACE: action.h:658: Request [52986903-4b257210-92cd731f-ec125ec4] Got configuration. Attributes: { ContentBasedDeduplication: 0 DelaySeconds: 0.000000s FifoQueue: 0 MaximumMessageSize: 262144 MessageRetentionPeriod: 345600.000000s ReceiveMessageWaitTime: 0.000000s VisibilityTimeout: 30.000000s } 2025-05-07T08:58:46.890150Z node 7 :SQS TRACE: action.h:425: Request [52986903-4b257210-92cd731f-ec125ec4] DoRoutine 2025-05-07T08:58:46.890195Z node 7 :SQS TRACE: queue_leader.cpp:2424: Increment active message requests for [cloud4/000000000000000101v0/3]. ActiveMessageRequests: 1 2025-05-07T08:58:46.890218Z node 7 :SQS DEBUG: queue_leader.cpp:938: Request [52986903-4b257210-92cd731f-ec125ec4] Received empty result from shard 3 infly. Infly capacity: 0. Messages count: 0 2025-05-07T08:58:46.890230Z node 7 :SQS DEBUG: queue_leader.cpp:1162: Request [52986903-4b257210-92cd731f-ec125ec4] No known messages in this shard. Skip attempt to add messages to infly 2025-05-07T08:58:46.890256Z node 7 :SQS DEBUG: queue_leader.cpp:1168: Request [52986903-4b257210-92cd731f-ec125ec4] Already tried to add messages to infly 2025-05-07T08:58:46.890304Z node 7 :SQS TRACE: queue_leader.cpp:2434: Decrement active message requests for [[cloud4/000000000000000101v0/3]. ActiveMessageRequests: 0 2025-05-07T08:58:46.890373Z node 7 :SQS TRACE: action.h:262: Request [52986903-4b257210-92cd731f-ec125ec4] SendReplyAndDie from action actor { ReceiveMessage { RequestId: "52986903-4b257210-92cd731f-ec125ec4" } } 2025-05-07T08:58:46.890408Z node 7 :SQS DEBUG: queue_leader.cpp:384: Request ReceiveMessage working duration: 0ms 2025-05-07T08:58:46.890459Z node 7 :SQS TRACE: proxy_service.h:35: Request [52986903-4b257210-92cd731f-ec125ec4] Sending sqs response: { ReceiveMessage { RequestId: "52986903-4b257210-92cd731f-ec125ec4" } RequestId: "52986903-4b257210-92cd731f-ec125ec4" FolderId: "folder4" ResourceId: "000000000000000101v0" IsFifo: false } 2025-05-07T08:58:46.890563Z node 7 :SQS TRACE: proxy_service.cpp:194: HandleSqsResponse ReceiveMessage { RequestId: "52986903-4b257210-92cd731f-ec125ec4" } RequestId: "52986903-4b257210-92cd731f-ec125ec4" FolderId: "folder4" ResourceId: "000000000000000101v0" IsFifo: false 2025-05-07T08:58:46.890742Z node 7 :SQS TRACE: proxy_service.cpp:208: Sending answer to proxy actor [7:7501625641574372554:2550]: ReceiveMessage { RequestId: "52986903-4b257210-92cd731f-ec125ec4" } RequestId: "52986903-4b257210-92cd731f-ec125ec4" FolderId: "folder4" ResourceId: "000000000000000101v0" IsFifo: false 2025-05-07T08:58:46.890789Z node 7 :SQS TRACE: service.cpp:1464: Dec local leader ref for actor [7:7501625641574372555:3708]. Found: 1 2025-05-07T08:58:46.890873Z node 7 :SQS TRACE: proxy_actor.cpp:178: Request [52986903-4b257210-92cd731f-ec125ec4] HandleResponse: { ReceiveMessage { RequestId: "52986903-4b257210-92cd731f-ec125ec4" } RequestId: "52986903-4b257210-92cd731f-ec125ec4" FolderId: "folder4" ResourceId: "000000000000000101v0" IsFifo: false }, status: OK 2025-05-07T08:58:46.890953Z node 7 :SQS DEBUG: proxy_actor.cpp:147: Request [52986903-4b257210-92cd731f-ec125ec4] Sending reply from proxy actor: { ReceiveMessage { RequestId: "52986903-4b257210-92cd731f-ec125ec4" } RequestId: "52986903-4b257210-92cd731f-ec125ec4" FolderId: "folder4" ResourceId: "000000000000000101v0" IsFifo: false } Http output full {} 2025-05-07T08:58:46.891103Z node 7 :HTTP_PROXY DEBUG: http_req.cpp:379: http request [ReceiveMessage] requestId [52986903-4b257210-92cd731f-ec125ec4] Got succesfult GRPC response. 2025-05-07T08:58:46.891154Z node 7 :HTTP_PROXY INFO: http_req.cpp:1207: http request [ReceiveMessage] requestId [52986903-4b257210-92cd731f-ec125ec4] reply ok 2025-05-07T08:58:46.891237Z node 7 :HTTP_PROXY DEBUG: http_req.cpp:1267: http request [ReceiveMessage] requestId [52986903-4b257210-92cd731f-ec125ec4] Send metering event. HttpStatusCode: 200 IsFifo: 0 FolderId: folder4 RequestSizeInBytes: 526 ResponseSizeInBytes: 179 SourceAddress: d85f:7800:6050:0:c05f:7800:6050:0 ResourceId: 000000000000000101v0 Action: ReceiveMessage 2025-05-07T08:58:46.891332Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:243: (#37,[::1]:52096) <- (200 ) 2025-05-07T08:58:46.891453Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:296: (#37,[::1]:52096) connection closed >> BackupRestoreS3::TestAllIndexTypes-EIndexTypeGlobalVectorKmeansTree [GOOD] >> BackupRestoreS3::PrefixedVectorIndex >> TSchemeShardTTLTests::AlterTableShouldSucceedOnAsyncIndexedTable [GOOD] >> TCmsTest::Mirror3dcPermissions [GOOD] |91.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest |91.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest |91.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_4__SYNC-pk_types0-all_types0-index0---SYNC] [GOOD] >> TestKinesisHttpProxy::ErroneousRequestGetRecords [GOOD] >> TSchemeShardTTLTests::CreateTableShouldSucceed-EnableTablePgTypes-true ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::AlterTableShouldSucceedOnAsyncIndexedTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:58:48.643174Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:58:48.643271Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:48.643317Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:58:48.643375Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:58:48.643422Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:58:48.643473Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:58:48.643540Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:48.643649Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:58:48.644460Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:58:48.644815Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:58:48.728467Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:58:48.728506Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:48.752696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:58:48.752920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:58:48.753087Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:58:48.760394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:58:48.760682Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:58:48.761408Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:48.761586Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:58:48.767568Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:48.768954Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:48.769025Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:48.769148Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:58:48.769202Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:48.769246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:58:48.769474Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:58:48.777728Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:58:48.930798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:48.931030Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:48.931239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:58:48.931499Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:58:48.931561Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:48.935305Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:48.935437Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:58:48.935633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:48.935698Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:58:48.935762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:58:48.935805Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:58:48.940456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:48.940537Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:58:48.940581Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:58:48.943074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:48.943143Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:48.943188Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:48.943250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:58:48.946889Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:58:48.950786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:58:48.951026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:58:48.952006Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:48.952160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:48.952226Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:48.952531Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:58:48.952598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:48.952779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:48.952893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:58:48.957358Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:48.957445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:48.957665Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:48.957719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 44, LocalPathId: 2] 2025-05-07T08:58:49.330870Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:49.330915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-05-07T08:58:49.331531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:58:49.331592Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1036: NTableState::TProposedWaitParts operationId# 102:0 ProgressState at tablet: 72057594046678944 2025-05-07T08:58:49.332245Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:58:49.332338Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:58:49.332376Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-05-07T08:58:49.332416Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 4 2025-05-07T08:58:49.332457Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-05-07T08:58:49.332582Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true 2025-05-07T08:58:49.336329Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-05-07T08:58:49.348606Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6245: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409547 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409547 CpuTimeUsec: 1196 } } 2025-05-07T08:58:49.348675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409547, partId: 0 2025-05-07T08:58:49.348800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409547 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409547 CpuTimeUsec: 1196 } } 2025-05-07T08:58:49.348924Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409547 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409547 CpuTimeUsec: 1196 } } FAKE_COORDINATOR: Erasing txId 102 2025-05-07T08:58:49.350003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 326 RawX2: 4294969605 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-05-07T08:58:49.350064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409547, partId: 0 2025-05-07T08:58:49.350229Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 326 RawX2: 4294969605 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-05-07T08:58:49.350284Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1005: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 2025-05-07T08:58:49.350409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1009: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 326 RawX2: 4294969605 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-05-07T08:58:49.350590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, datashard: 72075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:49.350644Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:58:49.350712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 102:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-05-07T08:58:49.350766Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 102:0 129 -> 240 2025-05-07T08:58:49.355557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:58:49.355927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:58:49.356066Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:58:49.356122Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 102:0 ProgressState 2025-05-07T08:58:49.356232Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T08:58:49.356264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:58:49.356300Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T08:58:49.356348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:58:49.356403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-05-07T08:58:49.356479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:378:2346] message: TxId: 102 2025-05-07T08:58:49.356548Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:58:49.356585Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-05-07T08:58:49.356617Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 102:0 2025-05-07T08:58:49.356751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T08:58:49.358884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T08:58:49.358938Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:507:2432] TestWaitNotification: OK eventTxId 102 2025-05-07T08:58:49.359482Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:58:49.359845Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 359us result status StatusSuccess 2025-05-07T08:58:49.360237Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "UserDefinedIndexByExpireAt" LocalPathId: 3 Type: EIndexTypeGlobalAsync State: EIndexStateReady KeyColumnNames: "modified_at" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 2 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::BackupCopyHasNoTtlSettings [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:58:48.486759Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:58:48.486851Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:48.486893Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:58:48.486937Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:58:48.486988Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:58:48.487030Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:58:48.487097Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:48.487188Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:58:48.487944Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:58:48.488297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:58:48.631332Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:58:48.631406Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:48.669933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:58:48.674367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:58:48.674560Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:58:48.704150Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:58:48.704457Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:58:48.705163Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:48.705338Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:58:48.709026Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:48.710369Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:48.710432Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:48.710510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:58:48.710559Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:48.710604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:58:48.710809Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:58:48.726807Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:58:48.862945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:48.863173Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:48.863410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:58:48.863649Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:58:48.863720Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:48.866327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:48.866476Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:58:48.866695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:48.866746Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:58:48.866803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:58:48.866846Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:58:48.869529Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:48.869593Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:58:48.869627Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:58:48.871754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:48.871842Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:48.871888Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:48.871952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:58:48.882710Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:58:48.884878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:58:48.885111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:58:48.886034Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:48.886202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:48.886276Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:48.886730Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:58:48.886788Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:48.886986Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:48.887065Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:58:48.889398Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:48.889456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:48.889657Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:48.889693Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 33409547 CpuTimeUsec: 1263 } } 2025-05-07T08:58:49.254695Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409547 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 2 ProposeLatency: 4 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409547 CpuTimeUsec: 1263 } } 2025-05-07T08:58:49.255243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 409 RawX2: 4294969674 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-05-07T08:58:49.255295Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409547, partId: 0 2025-05-07T08:58:49.255462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 409 RawX2: 4294969674 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-05-07T08:58:49.255515Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1005: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 2025-05-07T08:58:49.255588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1009: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 409 RawX2: 4294969674 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-05-07T08:58:49.255645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:2, datashard: 72075186233409547, left await: 1, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:49.255689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1014: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvSchemaChanged CollectSchemaChanged: false 2025-05-07T08:58:49.260371Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:58:49.260560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:58:49.272752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 305 RawX2: 4294969588 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-05-07T08:58:49.272822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-05-07T08:58:49.272975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 305 RawX2: 4294969588 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-05-07T08:58:49.273020Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1005: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 2025-05-07T08:58:49.273114Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1009: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 305 RawX2: 4294969588 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-05-07T08:58:49.273185Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, datashard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:49.273227Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:58:49.273296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-05-07T08:58:49.273348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 102:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-05-07T08:58:49.273380Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 102:0 129 -> 240 2025-05-07T08:58:49.275330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:58:49.275734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:58:49.275784Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_copy_table.cpp:306: TCopyTable TCopyTableBarrier operationId: 102:0ProgressState, operation type TxCopyTable 2025-05-07T08:58:49.275854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1059: Set barrier, OperationId: 102:0, name: CopyTableBarrier, done: 0, blocked: 1, parts count: 1 2025-05-07T08:58:49.275926Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1103: All parts have reached barrier, tx: 102, done: 0, blocked: 1 2025-05-07T08:58:49.276007Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_copy_table.cpp:289: TCopyTable TCopyTableBarrier operationId: 102:0 HandleReply TEvPrivate::TEvCompleteBarrier, msg: NKikimr::NSchemeShard::TEvPrivate::TEvCompleteBarrier { TxId: 102 Name: CopyTableBarrier }, at tablet# 72057594046678944 2025-05-07T08:58:49.276044Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 102:0 240 -> 240 2025-05-07T08:58:49.278631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:58:49.278688Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 102:0 ProgressState 2025-05-07T08:58:49.278794Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T08:58:49.278832Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:58:49.278873Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T08:58:49.278907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:58:49.278954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-05-07T08:58:49.279047Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:334:2313] message: TxId: 102 2025-05-07T08:58:49.279107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:58:49.279148Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-05-07T08:58:49.279182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 102:0 2025-05-07T08:58:49.279334Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-05-07T08:58:49.279370Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T08:58:49.281559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T08:58:49.281620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:438:2399] TestWaitNotification: OK eventTxId 102 2025-05-07T08:58:49.282213Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTableCopy" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:58:49.282490Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTableCopy" took 320us result status StatusSuccess 2025-05-07T08:58:49.282954Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTableCopy" PathDescription { Self { Name: "TTLEnabledTableCopy" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TTLEnabledTableCopy" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "ts" Type: "Timestamp" TypeId: 50 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: true IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |91.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::Mirror3dcPermissions [GOOD] >> TFlatTableExecutor_IndexLoading::Scan_FlatIndex [GOOD] >> TFlatTableExecutor_IndexLoading::Scan_BTreeIndex >> TSchemeShardTTLTests::CreateTableShouldSucceed-EnableTablePgTypes-false >> TSchemeShardTTLTests::AlterTableShouldSucceedOnIndexedTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_cdc_stream/unittest >> TCdcStreamTests::DropTableWithIndexWithStream [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:55:57.864048Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:55:57.864145Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:55:57.864197Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:55:57.864243Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:55:57.864287Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:55:57.864319Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:55:57.864389Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:55:57.864465Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:55:57.865301Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:55:57.865663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:55:57.957665Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:55:57.957750Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:55:57.976760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:55:57.976983Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:55:57.977177Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:55:57.984290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:55:57.984657Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:55:57.985511Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:55:57.985755Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:55:57.990007Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:55:57.991562Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:55:57.991634Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:55:57.991743Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:55:57.991794Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:55:57.991906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:55:57.992207Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:55:58.009708Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:55:58.171809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:55:58.172065Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:55:58.172325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:55:58.172556Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:55:58.172617Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:55:58.180223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:55:58.180405Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:55:58.180658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:55:58.180756Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:55:58.180807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:55:58.180845Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:55:58.187486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:55:58.187597Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:55:58.187664Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:55:58.190222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:55:58.190301Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:55:58.190357Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:55:58.190422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:55:58.194386Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:55:58.197171Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:55:58.197437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:55:58.198642Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:55:58.198814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:55:58.198872Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:55:58.199198Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:55:58.199270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:55:58.199464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:55:58.199571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:55:58.207282Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:55:58.207384Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:55:58.207599Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:55:58.207645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-05-07T08:58:45.734243Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-05-07T08:58:45.734298Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-05-07T08:58:45.734984Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-05-07T08:58:45.735087Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-05-07T08:58:45.735122Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-05-07T08:58:45.735158Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-05-07T08:58:45.735212Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-05-07T08:58:45.735321Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 103, ready parts: 4/5, is published: true 2025-05-07T08:58:45.744202Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-05-07T08:58:45.744312Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 103:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:58:45.744765Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T08:58:45.744949Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 5/5 2025-05-07T08:58:45.744993Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 103 ready parts: 5/5 2025-05-07T08:58:45.745045Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 5/5 2025-05-07T08:58:45.745079Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 103 ready parts: 5/5 2025-05-07T08:58:45.745125Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 103, ready parts: 5/5, is published: true 2025-05-07T08:58:45.745228Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [19:374:2342] message: TxId: 103 2025-05-07T08:58:45.745338Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 103 ready parts: 5/5 2025-05-07T08:58:45.745419Z node 19 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 103:0 2025-05-07T08:58:45.745494Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 103:0 2025-05-07T08:58:45.745678Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T08:58:45.745758Z node 19 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 103:1 2025-05-07T08:58:45.745787Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 103:1 2025-05-07T08:58:45.745849Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-05-07T08:58:45.745910Z node 19 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 103:2 2025-05-07T08:58:45.745939Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 103:2 2025-05-07T08:58:45.749440Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-05-07T08:58:45.749548Z node 19 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 103:3 2025-05-07T08:58:45.749594Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 103:3 2025-05-07T08:58:45.749646Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-05-07T08:58:45.749675Z node 19 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 103:4 2025-05-07T08:58:45.749702Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 103:4 2025-05-07T08:58:45.749779Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 1 2025-05-07T08:58:45.751313Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T08:58:45.751437Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 6], at schemeshard: 72057594046678944 2025-05-07T08:58:45.751593Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 1 2025-05-07T08:58:45.751689Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 5], at schemeshard: 72057594046678944 2025-05-07T08:58:45.751752Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-05-07T08:58:45.752283Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-05-07T08:58:45.752844Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-05-07T08:58:45.755114Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-05-07T08:58:45.755201Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-05-07T08:58:45.755331Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-05-07T08:58:45.755404Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-05-07T08:58:45.757909Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-05-07T08:58:45.758028Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [19:752:2655] 2025-05-07T08:58:45.758204Z node 19 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 2 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 103 2025-05-07T08:58:45.759101Z node 19 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/Index/indexImplTable/Stream" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-05-07T08:58:45.759485Z node 19 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/Index/indexImplTable/Stream" took 437us result status StatusPathDoesNotExist 2025-05-07T08:58:45.759735Z node 19 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table/Index/indexImplTable/Stream\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/Table/Index/indexImplTable\' (id: [OwnerId: 72057594046678944, LocalPathId: 4])" Path: "/MyRoot/Table/Index/indexImplTable/Stream" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-05-07T08:58:45.760590Z node 19 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/Index/indexImplTable/Stream/streamImpl" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-05-07T08:58:45.760980Z node 19 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/Index/indexImplTable/Stream/streamImpl" took 425us result status StatusPathDoesNotExist 2025-05-07T08:58:45.761196Z node 19 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table/Index/indexImplTable/Stream/streamImpl\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/Table/Index/indexImplTable\' (id: [OwnerId: 72057594046678944, LocalPathId: 4])" Path: "/MyRoot/Table/Index/indexImplTable/Stream/streamImpl" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 |91.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TestKinesisHttpProxy::GoodRequestCreateStream |91.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/ymq/actor/ut/ydb-core-ymq-actor-ut |91.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/ymq/actor/ut/ydb-core-ymq-actor-ut |91.3%| [LD] {RESULT} $(B)/ydb/core/ymq/actor/ut/ydb-core-ymq-actor-ut >> Cdc::Drop[TopicRunner] [GOOD] >> Cdc::DescribeStream >> TCmsTest::TestKeepAvailableMode >> CommitOffset::Commit_WithoutSession_TopPast >> AsyncIndexChangeExchange::ShouldRejectChangesOnQueueOverflowBySize [GOOD] >> TPersQueueTest::PreferredCluster_EnabledRemotePreferredClusterAndCloseClientSessionWithEnabledRemotePreferredClusterDelaySec_SessionDiesOnlyAfterDelay [GOOD] >> TPersQueueTest::PreferredCluster_NonExistentPreferredCluster_SessionDiesOnlyAfterDelay >> AsyncIndexChangeExchange::ShouldNotReorderChangesOnRace >> TSchemeShardTTLTests::AlterTableShouldSucceedOnIndexedTable [GOOD] >> TopicAutoscaling::PartitionSplit_ReadEmptyPartitions_BeforeAutoscaleAwareSDK ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::AlterTableShouldSucceedOnIndexedTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:58:51.871974Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:58:51.872084Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:51.872129Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:58:51.872166Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:58:51.872209Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:58:51.872236Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:58:51.872288Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:51.872377Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:58:51.873149Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:58:51.873492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:58:51.956697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:58:51.956767Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:51.974679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:58:51.974804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:58:51.974949Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:58:51.983357Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:58:51.983980Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:58:51.984627Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:51.984905Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:58:51.987055Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:51.988506Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:51.988563Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:51.988614Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:58:51.988660Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:51.988708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:58:51.988962Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:58:51.995559Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:58:52.136963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:52.137125Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:52.137292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:58:52.137503Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:58:52.137559Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:52.140543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:52.140691Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:58:52.140904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:52.140976Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:58:52.141018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:58:52.141051Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:58:52.144477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:52.144544Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:58:52.144584Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:58:52.146669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:52.146719Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:52.146758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:52.146812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:58:52.149385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:58:52.151345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:58:52.151504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:58:52.152439Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:52.152576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:52.152637Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:52.152938Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:58:52.153006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:52.153194Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:52.153284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:58:52.156493Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:52.156539Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:52.156698Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:52.156738Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 6678944, LocalPathId: 2] 2025-05-07T08:58:52.636434Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:52.636485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2208], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-05-07T08:58:52.637616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:58:52.637697Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1036: NTableState::TProposedWaitParts operationId# 102:0 ProgressState at tablet: 72057594046678944 2025-05-07T08:58:52.638710Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:58:52.638823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:58:52.638866Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-05-07T08:58:52.638911Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 4 2025-05-07T08:58:52.638961Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-05-07T08:58:52.639056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true 2025-05-07T08:58:52.650143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-05-07T08:58:52.678661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6245: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409547 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409547 CpuTimeUsec: 1237 } } 2025-05-07T08:58:52.678724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409547, partId: 0 2025-05-07T08:58:52.678890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409547 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409547 CpuTimeUsec: 1237 } } 2025-05-07T08:58:52.679060Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409547 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409547 CpuTimeUsec: 1237 } } FAKE_COORDINATOR: Erasing txId 102 2025-05-07T08:58:52.679893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 327 RawX2: 4294969606 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-05-07T08:58:52.679999Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409547, partId: 0 2025-05-07T08:58:52.680159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 327 RawX2: 4294969606 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-05-07T08:58:52.680209Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1005: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 2025-05-07T08:58:52.680297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1009: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 327 RawX2: 4294969606 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-05-07T08:58:52.680357Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, datashard: 72075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:52.680404Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:58:52.680439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 102:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-05-07T08:58:52.680478Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 102:0 129 -> 240 2025-05-07T08:58:52.683695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:58:52.689732Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:58:52.690160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:58:52.690216Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 102:0 ProgressState 2025-05-07T08:58:52.690352Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T08:58:52.690395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:58:52.690438Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T08:58:52.690468Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:58:52.690505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-05-07T08:58:52.690577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:377:2345] message: TxId: 102 2025-05-07T08:58:52.690630Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:58:52.690672Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-05-07T08:58:52.690765Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 102:0 2025-05-07T08:58:52.690949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T08:58:52.693154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T08:58:52.693214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:460:2421] TestWaitNotification: OK eventTxId 102 2025-05-07T08:58:52.693776Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:58:52.694071Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 301us result status StatusSuccess 2025-05-07T08:58:52.694586Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "UserDefinedIndexByExpireAt" LocalPathId: 3 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "modified_at" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 2 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TopicAutoscaling::Simple_PQv1 [GOOD] >> TopicAutoscaling::ReadingAfterSplitTest_PreferedPartition_BeforeAutoscaleAwareSDK >> TestYmqHttpProxy::TestChangeMessageVisibilityBatch [GOOD] >> TestKinesisHttpProxy::TestWrongRequest [GOOD] >> TopicAutoscaling::ReadingAfterSplitTest_BeforeAutoscaleAwareSDK >> CommitOffset::PartitionSplit_OffsetCommit >> TCmsTest::TestKeepAvailableMode [GOOD] >> TCmsTest::TestKeepAvailableModeDisconnects |91.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |91.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |91.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/security/certificate_check/ut/unittest >> TCertificateCheckerTest::CheckSubjectDns >> TCertificateAuthUtilsTest::ClientCertAuthorizationParamsMatch [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Uint64-pk_types10-all_types10-index10-Uint64--] >> Cdc::UpdateRetentionPeriod [GOOD] >> Cdc::SupportedTypes |91.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test |91.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/security/certificate_check/ut/unittest >> TCertificateAuthUtilsTest::ClientCertAuthorizationParamsMatch [GOOD] >> Balancing::Balancing_ManyTopics_TopicApi [GOOD] >> Balancing::Balancing_ManyTopics_PQv1 >> RetryPolicy::TWriteSession_SeqNoShift [GOOD] >> RetryPolicy::RetryWithBatching ------- [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/inside_ydb_ut/unittest >> TestYmqHttpProxy::TestChangeMessageVisibilityBatch [GOOD] Test command err: 2025-05-07T08:57:46.645730Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625384172391938:2192];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:46.645865Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0018b3/r3tmp/tmpoRCZ3U/pdisk_1.dat 2025-05-07T08:57:47.402436Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:57:47.429872Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:47.429946Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:47.439113Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19653, node 1 2025-05-07T08:57:47.593468Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:57:47.593506Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:57:47.593521Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:57:47.593665Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17626 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:57:48.085842Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:48.098596Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 TClient is connected to server localhost:17626 2025-05-07T08:57:48.443363Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:48.459337Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-05-07T08:57:48.466991Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:48.495774Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:48.788240Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:57:48.858315Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710663, at schemeshard: 72057594046644480 2025-05-07T08:57:48.862657Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:57:48.928625Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:48.991882Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:49.073686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:49.141643Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:49.214519Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:49.290196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:49.368500Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:51.650139Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501625384172391938:2192];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:51.650234Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:57:51.790633Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625405647229678:2377], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:57:51.790726Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:57:51.790932Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625405647229690:2380], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:57:51.794987Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710673:3, at schemeshard: 72057594046644480 2025-05-07T08:57:51.810185Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501625405647229692:2381], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710673 completed, doublechecking } 2025-05-07T08:57:51.870825Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501625405647229743:2866] txid# 281474976710674, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 18], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:57:52.222525Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710675. Ctx: { TraceId: 01jtmzecdbdh07vbnq0e3tmpwp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NGRhMmE4YzYtZTg3ZTdlMTUtNGVlMWZlOTYtYjdhNmJkMGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:57:52.269820Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:52.307641Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:52.352631Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710678:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:52.403729Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710679:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:52.437329Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710680:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:52.475311Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710681:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:52.516127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710682:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:52.553346Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710683:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:52.593190Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCrea ... or actor for query(idx=CHANGE_VISIBILITY_ID). Mode: COMPILE_AND_EXEC 2025-05-07T08:58:54.182202Z node 7 :SQS TRACE: executor.cpp:154: Request [2d226ade-f13fcf4d-6cdd537c-2737d02d] Query(idx=CHANGE_VISIBILITY_ID) Queue [cloud4/000000000000000101v0] Serializing params: {"QUEUE_ID_NUMBER": 2, "QUEUE_ID_NUMBER_HASH": 17472595041006102391, "SHARD": 1, "QUEUE_ID_NUMBER_AND_SHARD_HASH": 5923258363543965525, "NOW": 1746608334178, "GROUPS_READ_ATTEMPT_IDS_PERIOD": 300000, "KEYS": [{"LockTimestamp": 1746608333934, "Offset": 1, "NewVisibilityDeadline": 1746608335178}, {"LockTimestamp": 1746608334005, "Offset": 2, "NewVisibilityDeadline": 1746608336178}]} 2025-05-07T08:58:54.182628Z node 7 :SQS TRACE: executor.cpp:203: Request [2d226ade-f13fcf4d-6cdd537c-2737d02d] Query(idx=CHANGE_VISIBILITY_ID) Queue [cloud4/000000000000000101v0] Execute program: { Transaction { MiniKQLTransaction { Mode: COMPILE_AND_EXEC Program { Bin: "O\034\014Exists*NewVisibilityDeadline\014Offset\006Arg\014Member\nFlags\010Name\010Args\016Payload\022Parameter\006And\032LockTimestamp$VisibilityDeadline\014Invoke\t\211\004\206\202?\000\206\202\030Extend\000\006\002?\000\t\211\004\202\203\005@\206\205\n\203\014\207\203\010\203\014\203\010?\020(ChangeConddCurrentVisibilityDeadline\002\006\n$SetResult\000\003?\006\014result\t\211\006?\024\206\205\006?\020?\020?\020.\006\n?\032?\0220MapParameter\000\t\351\000?\034\005\205\004\206\205\004\203\010\203\005@\026\032\203\005@\036\"\006\000?&\003?(\010KEYS\003&\000\t\251\000?\032\016\000\005?\022\t\211\004?\010\207\203\014?\010 Coalesce\000\t\211\004?<\207\203\014\207\203\014*\000\t\211\006?B\203\005@\203\010?\0146\000\003?J\026LessOrEqual\t\351\000?L\005\205\004\206\205\004\203\010\203\005@\026\032\203\005@\036\"\006\000?X\003?Z\006NOW\003&\000\t\211\004?\014\207\205\004\207\203\010?\014.2\203\004\022\000\t\211\n?n\203\005\004\200\205\004\203\004\203\004.2\213\010\203\010\203\010\203\004?\020\203\004$SelectRow\000\003?t \000\001\205\000\000\000\000\001\030\000\000\000\000\000\000\000?l\005?z\003?v\020\003?x\026\003\013?\202\t\351\000?|\005\205\004\206\205\004\203\010\203\005@\026\032\203\005@\036\"\006\000?\226\003?\230> TestKinesisHttpProxy::TestWrongRequest [GOOD] Test command err: 2025-05-07T08:57:47.130609Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625389709001916:2061];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:47.130874Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001855/r3tmp/tmpGIcFa3/pdisk_1.dat 2025-05-07T08:57:47.933659Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:57:47.935278Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:47.935347Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:47.956560Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25167, node 1 2025-05-07T08:57:48.177654Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:57:48.177685Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:57:48.177716Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:57:48.177860Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27092 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:57:48.739079Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:48.795074Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 TClient is connected to server localhost:27092 2025-05-07T08:57:49.300677Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:49.315927Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:49.330119Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 2025-05-07T08:57:49.340375Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:49.495226Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:57:49.552871Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710663, at schemeshard: 72057594046644480 2025-05-07T08:57:49.558333Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:57:49.642379Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710665, at schemeshard: 72057594046644480 2025-05-07T08:57:49.653417Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:49.740702Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:49.796967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:49.893701Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:49.984660Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:50.042724Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:50.099507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:52.136432Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501625389709001916:2061];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:52.136500Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:57:52.321770Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625411183839802:2377], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:57:52.321896Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:57:52.322381Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625411183839814:2380], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:57:52.326677Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710673:3, at schemeshard: 72057594046644480 2025-05-07T08:57:52.345904Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501625411183839816:2381], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710673 completed, doublechecking } 2025-05-07T08:57:52.415839Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501625411183839867:2868] txid# 281474976710674, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 18], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:57:52.832839Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710675. Ctx: { TraceId: 01jtmzecxz1fb0r4hzcqp2wjhq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDgwMzI5N2YtMzg0OTMyNzQtYzZiYTVhZmYtOTA1OWNhZTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:57:52.906607Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:52.941401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:52.985242Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710678:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:53.017870Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710679:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:53.063028Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710680:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:53.100746Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710681:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:53.153544Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710682:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:53.201930Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710683:0, at schemeshard: 72057594046644480 waiting... ... tedResponse { Type { Kind: Struct Struct { Member { Name: "settings" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Name" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Value" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-05-07T08:58:53.319409Z node 8 :SQS TRACE: executor.cpp:327: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] Minikql data response: {"settings": [], "truncated": false} 2025-05-07T08:58:53.319491Z node 8 :SQS DEBUG: executor.cpp:401: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] execution duration: 22ms 2025-05-07T08:58:53.319804Z node 8 :SQS TRACE: user_settings_reader.cpp:89: Handle user settings: { Status: 48 TxId: 281474976710685 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "settings" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Name" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Value" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-05-07T08:58:53.321064Z node 8 :SQS TRACE: executor.cpp:286: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] HandleResponse { Status: 48 TxId: 281474976710686 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "queues" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "CreatedTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "CustomQueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "FolderId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "MasterTabletId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "QueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "QueueState" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "Shards" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "TablesFormat" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 2 } } } } } Member { Name: "Version" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-05-07T08:58:53.321086Z node 8 :SQS DEBUG: executor.cpp:287: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] Attempt 1 execution duration: 21ms 2025-05-07T08:58:53.321550Z node 8 :SQS TRACE: executor.cpp:325: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] Sending mkql execution result: { Status: 48 TxId: 281474976710686 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "queues" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "CreatedTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "CustomQueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "FolderId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "MasterTabletId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "QueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "QueueState" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "Shards" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "TablesFormat" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 2 } } } } } Member { Name: "Version" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-05-07T08:58:53.321580Z node 8 :SQS TRACE: executor.cpp:327: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] Minikql data response: {"queues": [], "truncated": false} 2025-05-07T08:58:53.321709Z node 8 :SQS DEBUG: executor.cpp:401: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] execution duration: 21ms 2025-05-07T08:58:53.326402Z node 8 :SQS TRACE: queues_list_reader.cpp:82: Handle queues list: { Status: 48 TxId: 281474976710686 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "queues" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "CreatedTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "CustomQueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "FolderId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "MasterTabletId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "QueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "QueueState" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "Shards" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "TablesFormat" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 2 } } } } } Member { Name: "Version" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-05-07T08:58:53.576459Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:552: [WorkloadService] [Service] Reply cleanup error NOT_FOUND to [8:7501625671446988120:2443]: Pool not found 2025-05-07T08:58:53.577200Z node 8 :SQS DEBUG: monitoring.cpp:60: [monitoring] Report deletion queue data lag: 0.000000s, count: 0 2025-05-07T08:58:54.214277Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:552: [WorkloadService] [Service] Reply cleanup error NOT_FOUND to [8:7501625671446988118:2442]: Pool not found 2025-05-07T08:58:54.215171Z node 8 :SQS DEBUG: cleanup_queue_data.cpp:100: [cleanup removed queues] getting queues... 2025-05-07T08:58:54.221995Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:602: [WorkloadService] [TDatabaseFetcherActor] ActorId: [8:7501625675741955531:2464], Database: /Root/SQS, Failed to fetch database info, UNSUPPORTED, issues: {
: Error: Invalid database path /Root/SQS, please check the correctness of the path } 2025-05-07T08:58:54.222117Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7501625675741955530:2463], DatabaseId: /Root/SQS, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:58:54.222224Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/SQS, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:58:54.245394Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:83: (#37,[::1]:49282) incoming connection opened 2025-05-07T08:58:54.245470Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:145: (#37,[::1]:49282) -> (POST /) 2025-05-07T08:58:54.245633Z node 8 :HTTP_PROXY INFO: http_service.cpp:102: proxy service: incoming request from [d86c:f500:6050:0:c06c:f500:6050:0] request [CreateStream] url [/] database [] requestId: 81e356bd-28ff8045-e3851c7a-62b91b23 Http output full {"__type":"InvalidArgumentException","message":"ydb/core/http_proxy/json_proto_conversion.h:400: Unexpected json key: WrongStreamName"} 400 {"__type":"InvalidArgumentException","message":"ydb/core/http_proxy/json_proto_conversion.h:400: Unexpected json key: WrongStreamName"} 2025-05-07T08:58:54.246216Z node 8 :HTTP_PROXY WARN: http_req.cpp:948: http request [CreateStream] requestId [81e356bd-28ff8045-e3851c7a-62b91b23] got new request with incorrect json from [d86c:f500:6050:0:c06c:f500:6050:0] database '' 2025-05-07T08:58:54.246421Z node 8 :HTTP_PROXY INFO: http_req.cpp:1211: http request [CreateStream] requestId [81e356bd-28ff8045-e3851c7a-62b91b23] reply with status: BAD_REQUEST message: ydb/core/http_proxy/json_proto_conversion.h:400: Unexpected json key: WrongStreamName 2025-05-07T08:58:54.246761Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:243: (#37,[::1]:49282) <- (400 InvalidArgumentException) 2025-05-07T08:58:54.246821Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:252: (#37,[::1]:49282) Request: POST / HTTP/1.1 Host: example.amazonaws.com X-Amz-Target: kinesisApi.CreateStream X-Amz-Date: 20150830T123600Z Authorization: Content-Type: application/json Connection: Close Transfer-Encoding: chunked 57 { "ShardCount":5, "StreamName":"testtopic", "WrongStreamName":"WrongStreamName" } 0 2025-05-07T08:58:54.246862Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:259: (#37,[::1]:49282) Response: HTTP/1.1 400 InvalidArgumentException Connection: close x-amzn-requestid: 81e356bd-28ff8045-e3851c7a-62b91b23 x-amz-crc32: 3053902336 Content-Type: application/x-amz-json-1.1 Content-Length: 135 {"__type":"InvalidArgumentException","message":"ydb/core/http_proxy/json_proto_conversion.h:400: Unexpected json key: WrongStreamName"} 2025-05-07T08:58:54.246992Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:296: (#37,[::1]:49282) connection closed >> TopicAutoscaling::PartitionSplit_BeforeAutoscaleAwareSDK >> TCertificateCheckerTest::CheckSubjectDns [GOOD] >> TopicAutoscaling::ControlPlane_BackCompatibility >> TCmsTest::TestKeepAvailableModeDisconnects [GOOD] >> TCmsTest::TestForceRestartModeScheduled >> TopicAutoscaling::PartitionMerge_PreferedPartition_BeforeAutoscaleAwareSDK |91.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/security/certificate_check/ut/unittest >> TCertificateCheckerTest::CheckSubjectDns [GOOD] >> TestKinesisHttpProxy::TestPing >> TestYmqHttpProxy::TestSendMessageEmptyQueueUrl >> TestYmqHttpProxy::TestGetQueueUrl >> TestYmqHttpProxy::TestSendMessage >> TestKinesisHttpProxy::ListShardsToken [GOOD] |91.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/grpc_services/tablet/ut/ydb-core-grpc_services-tablet-ut |91.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/grpc_services/tablet/ut/ydb-core-grpc_services-tablet-ut |91.3%| [LD] {RESULT} $(B)/ydb/core/grpc_services/tablet/ut/ydb-core-grpc_services-tablet-ut |91.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/config/ut/ydb-services-config-ut >> TestKinesisHttpProxy::GoodRequestCreateStream [GOOD] |91.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/config/ut/ydb-services-config-ut |91.3%| [LD] {RESULT} $(B)/ydb/services/config/ut/ydb-services-config-ut >> TCmsTest::TestForceRestartModeScheduled [GOOD] >> TCmsTest::TestForceRestartModeScheduledDisconnects >> Cdc::DescribeStream [GOOD] >> Cdc::DecimalKey >> TSchemeShardServerLess::StorageBillingLabels [GOOD] >> TFlatTableExecutor_IndexLoading::Scan_BTreeIndex [GOOD] >> TFlatTableExecutor_IndexLoading::Scan_History_FlatIndex ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_serverless/unittest >> TSchemeShardServerLess::StorageBillingLabels [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:57:40.478158Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:57:40.478257Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:57:40.478308Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:57:40.478356Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:57:40.478398Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:57:40.478441Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:57:40.478502Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:57:40.478576Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:57:40.479282Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:57:40.479639Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:57:40.571116Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:57:40.571184Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:57:40.588589Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:57:40.588820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:57:40.589017Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:57:40.600652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:57:40.600889Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:57:40.601421Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:57:40.601585Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:57:40.604810Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:57:40.606271Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:57:40.606351Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:57:40.606435Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:57:40.606482Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:57:40.606528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:57:40.606757Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:57:40.613297Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:57:40.773141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:57:40.773414Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:57:40.773644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:57:40.773933Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:57:40.774022Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:57:40.777752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:57:40.777925Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:57:40.778201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:57:40.778280Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:57:40.778324Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:57:40.778368Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:57:40.782236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:57:40.782352Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:57:40.782398Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:57:40.784936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:57:40.785009Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:57:40.785113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:57:40.785174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:57:40.810192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:57:40.813609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:57:40.813870Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:57:40.815121Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:57:40.815313Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:57:40.815383Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:57:40.815735Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:57:40.815823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:57:40.816037Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:57:40.816143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:57:40.819171Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:57:40.819242Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:57:40.819476Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:57:40.819526Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... ESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 105, path id: 3 FAKE_COORDINATOR: Erasing txId 105 2025-05-07T08:57:41.415246Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 5 PathOwnerId: 72057594046678944, cookie: 105 2025-05-07T08:57:41.415383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 5 PathOwnerId: 72057594046678944, cookie: 105 2025-05-07T08:57:41.415433Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 105 2025-05-07T08:57:41.415475Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 5 2025-05-07T08:57:41.415525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-05-07T08:57:41.415620Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 105, subscribers: 0 2025-05-07T08:57:41.418184Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5769: Handle TEvSyncTenantSchemeShard, at schemeshard: 72057594046678944, msg: DomainSchemeShard: 72057594046678944 DomainPathId: 3 TabletID: 72075186233409549 Generation: 2 EffectiveACLVersion: 0 SubdomainVersion: 2 UserAttributesVersion: 2 TenantHive: 18446744073709551615 TenantSysViewProcessor: 18446744073709551615 TenantRootACL: "" TenantStatisticsAggregator: 18446744073709551615 TenantGraphShard: 18446744073709551615 2025-05-07T08:57:41.418276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:26: TTxSyncTenant DoExecute, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-05-07T08:57:41.418384Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:567: DoUpdateTenant no hasChanges, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], tenantLink: TSubDomainsLinks::TLink { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 3], Generation: 2, ActorId:[1:563:2501], EffectiveACLVersion: 0, SubdomainVersion: 2, UserAttributesVersion: 2, TenantHive: 18446744073709551615, TenantSysViewProcessor: 18446744073709551615, TenantStatisticsAggregator: 18446744073709551615, TenantGraphShard: 18446744073709551615, TenantRootACL: }, subDomain->GetVersion(): 2, actualEffectiveACLVersion: 0, actualUserAttrsVersion: 2, tenantHive: 18446744073709551615, tenantSysViewProcessor: 18446744073709551615, at schemeshard: 72057594046678944 2025-05-07T08:57:41.419016Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186233409549 2025-05-07T08:57:41.419061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409549, txId: 0, path id: [OwnerId: 72075186233409549, LocalPathId: 1] 2025-05-07T08:57:41.419215Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186233409549 2025-05-07T08:57:41.419295Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:664:2576], at schemeshard: 72075186233409549, txId: 0, path id: 1 2025-05-07T08:57:41.420173Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72075186233409549, msg: Owner: 72075186233409549 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72075186233409549, cookie: 0 2025-05-07T08:57:41.420337Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-05-07T08:57:41.420415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:36: TTxSyncTenant DoComplete, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 2025-05-07T08:57:41.420702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 105: send EvNotifyTxCompletion 2025-05-07T08:57:41.420757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 105 2025-05-07T08:57:41.421261Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 105, at schemeshard: 72057594046678944 2025-05-07T08:57:41.421355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-05-07T08:57:41.421393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [1:744:2636] TestWaitNotification: OK eventTxId 105 ... waiting for metering 2025-05-07T08:57:46.554515Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7261: Cannot get console configs 2025-05-07T08:57:46.554620Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:57:46.610269Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7261: Cannot get console configs 2025-05-07T08:57:46.610343Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:57:46.662243Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7261: Cannot get console configs 2025-05-07T08:57:46.662316Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:06.338769Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:58:06.339027Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:90: TTxServerlessStorageBilling: initiate at first time, schemeshardId: 72075186233409549, domainId: [OwnerId: 72057594046678944, LocalPathId: 3], now: 1970-01-01T00:01:00.000000Z, set LastBillTime: 1970-01-01T00:01:00.000000Z, next retry at: 1970-01-01T00:02:00.000000Z 2025-05-07T08:58:06.344228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:58:06.444663Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6592: Handle: TEvRunConditionalErase, at schemeshard: 72057594046678944 2025-05-07T08:58:06.444891Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-05-07T08:58:06.445011Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 2025-05-07T08:58:06.520100Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6592: Handle: TEvRunConditionalErase, at schemeshard: 72075186233409546 2025-05-07T08:58:06.520226Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72075186233409546 2025-05-07T08:58:06.520304Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72075186233409546 2025-05-07T08:58:06.563895Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6592: Handle: TEvRunConditionalErase, at schemeshard: 72075186233409549 2025-05-07T08:58:06.564000Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72075186233409549 2025-05-07T08:58:06.564050Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72075186233409549 2025-05-07T08:58:33.171107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:58:33.171316Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:121: TTxServerlessStorageBilling: too soon call, wait until current period ends, schemeshardId: 72075186233409549, domainId: [OwnerId: 72057594046678944, LocalPathId: 3], now: 1970-01-01T00:02:00.000000Z, LastBillTime: 1970-01-01T00:01:00.000000Z, lastBilled: 1970-01-01T00:01:00.000000Z--1970-01-01T00:01:59.000000Z, toBill: 1970-01-01T00:01:00.000000Z--1970-01-01T00:01:59.000000Z, next retry at: 1970-01-01T00:03:00.000000Z 2025-05-07T08:58:33.171431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:58:33.258447Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6592: Handle: TEvRunConditionalErase, at schemeshard: 72057594046678944 2025-05-07T08:58:33.258588Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-05-07T08:58:33.258666Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 2025-05-07T08:58:33.330615Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6592: Handle: TEvRunConditionalErase, at schemeshard: 72075186233409546 2025-05-07T08:58:33.330745Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72075186233409546 2025-05-07T08:58:33.330835Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72075186233409546 2025-05-07T08:58:33.390454Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6592: Handle: TEvRunConditionalErase, at schemeshard: 72075186233409549 2025-05-07T08:58:33.390564Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72075186233409549 2025-05-07T08:58:33.390630Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72075186233409549 2025-05-07T08:59:00.799257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:59:00.799645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:191: TTxServerlessStorageBilling: make a bill, record: '{"usage":{"start":120,"quantity":59,"finish":179,"type":"delta","unit":"byte*second"},"tags":{"ydb_size":0},"id":"72057594046678944-3-120-179-0","cloud_id":"CLOUD_ID_VAL","source_wt":180,"source_id":"sless-docapi-ydb-storage","resource_id":"DATABASE_ID_VAL","schema":"ydb.serverless.v1","labels":{"k":"v"},"folder_id":"FOLDER_ID_VAL","version":"1.0.0"} ', schemeshardId: 72075186233409549, domainId: [OwnerId: 72057594046678944, LocalPathId: 3], now: 1970-01-01T00:03:00.000000Z, LastBillTime: 1970-01-01T00:01:00.000000Z, lastBilled: 1970-01-01T00:01:00.000000Z--1970-01-01T00:01:59.000000Z, toBill: 1970-01-01T00:02:00.000000Z--1970-01-01T00:02:59.000000Z, next retry at: 1970-01-01T00:04:00.000000Z 2025-05-07T08:59:00.807419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete ... blocking NKikimr::NMetering::TEvMetering::TEvWriteMeteringJson from FLAT_SCHEMESHARD_ACTOR to TFakeMetering cookie 0 ... waiting for metering (done) >> TSchemeShardTTLTests::CheckCounters [GOOD] >> BackupRestoreS3::PrefixedVectorIndex [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/inside_ydb_ut/unittest >> TestKinesisHttpProxy::GoodRequestCreateStream [GOOD] Test command err: 2025-05-07T08:57:48.003819Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625388301519784:2139];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:48.003960Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0017f7/r3tmp/tmp4367VD/pdisk_1.dat 2025-05-07T08:57:48.910399Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:48.910481Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:48.917040Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:57:49.059030Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13996, node 1 2025-05-07T08:57:49.318624Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:57:49.318646Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:57:49.318653Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:57:49.318761Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13080 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:57:49.958016Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:49.994276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 TClient is connected to server localhost:13080 2025-05-07T08:57:50.429055Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:50.440914Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-05-07T08:57:50.443111Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:50.458167Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 2025-05-07T08:57:50.465009Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:50.646853Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:57:50.704528Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710663, at schemeshard: 72057594046644480 2025-05-07T08:57:50.708980Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:57:50.815279Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:57:50.874318Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:57:50.942146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:51.016432Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:51.073820Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:51.129395Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:51.196134Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:52.934756Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625409776357576:2376], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:57:52.934854Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:57:52.935555Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625409776357588:2379], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:57:52.940010Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710673:3, at schemeshard: 72057594046644480 2025-05-07T08:57:52.953214Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501625409776357590:2380], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710673 completed, doublechecking } 2025-05-07T08:57:53.002091Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501625388301519784:2139];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:53.002203Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:57:53.029492Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501625414071324937:2866] txid# 281474976710674, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 18], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:57:53.415629Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710675. Ctx: { TraceId: 01jtmzedh4djzzrhnbp55a2bex, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzFmZTYxMjQtYjE2ZmMzMmUtZWM0ZWE4YWYtYmI4ZjFhMmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:57:53.478567Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:53.552208Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:53.586728Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710678:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:53.627528Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710679:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:53.674429Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710680:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:53.719913Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710681:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:57:53.758057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710682:0, at schemeshard: 72057594046644480 2025-05-07T08:57:53.793991Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710683:0, at schemeshard: 72057594046644480 waiting... ... 05-07T08:59:00.057169Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72075186224037907] server connected, pipe [8:7501625702385434059:2527], now have 1 active actors on pipe 2025-05-07T08:59:00.057205Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72075186224037908] server connected, pipe [8:7501625702385434060:2528], now have 1 active actors on pipe 2025-05-07T08:59:00.057251Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72075186224037909] server connected, pipe [8:7501625702385434061:2529], now have 1 active actors on pipe Http output full {"StreamDescription":{"RetentionPeriodHours":24,"WriteQuotaKbPerSec":1024,"StreamModeDetails":{"StreamMode":"ON_DEMAND"},"StreamArn":"testtopic","PartitioningSettings":{"MinActivePartitions":5,"AutoPartitioningSettings":{"Strategy":"AUTO_PARTITIONING_STRATEGY_DISABLED","PartitionWriteSpeed":{"StabilizationWindow":{"Nanos":0,"Seconds":300},"DownUtilizationPercent":30,"UpUtilizationPercent":90}},"MaxActivePartitions":5},"Shards":[{"ShardId":"shard-000000","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"68056473384187692692674921486353642290","StartingHashKey":"0"}},{"ShardId":"shard-000001","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"136112946768375385385349842972707284581","StartingHashKey":"68056473384187692692674921486353642291"}},{"ShardId":"shard-000002","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"204169420152563078078024764459060926872","StartingHashKey":"136112946768375385385349842972707284582"}},{"ShardId":"shard-000003","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"272225893536750770770699685945414569163","StartingHashKey":"204169420152563078078024764459060926873"}},{"ShardId":"shard-000004","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"340282366920938463463374607431768211455","StartingHashKey":"272225893536750770770699685945414569164"}}],"KeyId":"","Owner":"Service1_id@as","StreamStatus":"ACTIVE","HasMoreShards":false,"EncryptionType":"ENCRYPTION_UNDEFINED","StreamCreationTimestamp":1746608340,"StorageLimitMb":0,"StreamName":"testtopic"}} 200 {"StreamDescription":{"RetentionPeriodHours":24,"WriteQuotaKbPerSec":1024,"StreamModeDetails":{"StreamMode":"ON_DEMAND"},"StreamArn":"testtopic","PartitioningSettings":{"MinActivePartitions":5,"AutoPartitioningSettings":{"Strategy":"AUTO_PARTITIONING_STRATEGY_DISABLED","PartitionWriteSpeed":{"StabilizationWindow":{"Nanos":0,"Seconds":300},"DownUtilizationPercent":30,"UpUtilizationPercent":90}},"MaxActivePartitions":5},"Shards":[{"ShardId":"shard-000000","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"68056473384187692692674921486353642290","StartingHashKey":"0"}},{"ShardId":"shard-000001","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"136112946768375385385349842972707284581","StartingHashKey":"68056473384187692692674921486353642291"}},{"ShardId":"shard-000002","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"204169420152563078078024764459060926872","StartingHashKey":"136112946768375385385349842972707284582"}},{"ShardId":"shard-000003","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"272225893536750770770699685945414569163","StartingHashKey":"204169420152563078078024764459060926873"}},{"ShardId":"shard-000004","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"340282366920938463463374607431768211455","StartingHashKey":"272225893536750770770699685945414569164"}}],"KeyId":"","Owner":"Service1_id@as","StreamStatus":"ACTIVE","HasMoreShards":false,"EncryptionType":"ENCRYPTION_UNDEFINED","StreamCreationTimestamp":1746608340,"StorageLimitMb":0,"StreamName":"testtopic"}} 2025-05-07T08:59:00.059691Z node 8 :HTTP_PROXY INFO: http_req.cpp:1207: http request [DescribeStream] requestId [4ca322de-923fffb6-41007a78-1195285f] reply ok 2025-05-07T08:59:00.060354Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:243: (#37,[::1]:56260) <- (200 ) 2025-05-07T08:59:00.060454Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:296: (#37,[::1]:56260) connection closed 2025-05-07T08:59:00.060761Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037907] server disconnected, pipe [8:7501625702385434059:2527] destroyed 2025-05-07T08:59:00.060806Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037908] server disconnected, pipe [8:7501625702385434060:2528] destroyed 2025-05-07T08:59:00.060828Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037909] server disconnected, pipe [8:7501625702385434061:2529] destroyed 2025-05-07T08:59:00.060851Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037910] server disconnected, pipe [8:7501625702385434062:2530] destroyed 2025-05-07T08:59:00.060872Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037911] server disconnected, pipe [8:7501625702385434063:2531] destroyed 2025-05-07T08:59:00.062108Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:83: (#37,[::1]:56270) incoming connection opened 2025-05-07T08:59:00.062162Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:145: (#37,[::1]:56270) -> (POST /Root) 2025-05-07T08:59:00.062283Z node 8 :HTTP_PROXY INFO: http_service.cpp:102: proxy service: incoming request from [b85a:fa00:6050:0:a05a:fa00:6050:0] request [DescribeStreamSummary] url [/Root] database [/Root] requestId: abf5c27b-6c22637b-c45bd05d-f7a41745 2025-05-07T08:59:00.062686Z node 8 :HTTP_PROXY INFO: http_req.cpp:959: http request [DescribeStreamSummary] requestId [abf5c27b-6c22637b-c45bd05d-f7a41745] got new request from [b85a:fa00:6050:0:a05a:fa00:6050:0] database '/Root' stream 'testtopic' 2025-05-07T08:59:00.063183Z node 8 :HTTP_PROXY DEBUG: http_req.cpp:1500: http request [DescribeStreamSummary] requestId [abf5c27b-6c22637b-c45bd05d-f7a41745] [auth] Authorized successfully 2025-05-07T08:59:00.063308Z node 8 :HTTP_PROXY INFO: http_req.cpp:678: http request [DescribeStreamSummary] requestId [abf5c27b-6c22637b-c45bd05d-f7a41745] sending grpc request to '' database: '/Root' iam token size: 0 Http output full {"StreamDescriptionSummary":{"RetentionPeriodHours":24,"OpenShardCount":5,"StreamArn":"testtopic","ConsumerCount":0,"KeyId":"","StreamStatus":"ACTIVE","EncryptionType":"NONE","StreamCreationTimestamp":1746608.339,"StreamName":"testtopic"}} 200 {"StreamDescriptionSummary":{"RetentionPeriodHours":24,"OpenShardCount":5,"StreamArn":"testtopic","ConsumerCount":0,"KeyId":"","StreamStatus":"ACTIVE","EncryptionType":"NONE","StreamCreationTimestamp":1746608.339,"StreamName":"testtopic"}} 2025-05-07T08:59:00.064522Z node 8 :HTTP_PROXY INFO: http_req.cpp:1207: http request [DescribeStreamSummary] requestId [abf5c27b-6c22637b-c45bd05d-f7a41745] reply ok 2025-05-07T08:59:00.064862Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:243: (#37,[::1]:56270) <- (200 ) 2025-05-07T08:59:00.064948Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:296: (#37,[::1]:56270) connection closed 2025-05-07T08:59:00.066075Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:83: (#40,[::1]:56272) incoming connection opened 2025-05-07T08:59:00.066126Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:145: (#40,[::1]:56272) -> (POST /Root) 2025-05-07T08:59:00.066265Z node 8 :HTTP_PROXY INFO: http_service.cpp:102: proxy service: incoming request from [d8e5:5200:6050:0:c0e5:5200:6050:0] request [DescribeStream] url [/Root] database [/Root] requestId: a31c789a-dd2a333-dc4500ef-cb9ff322 2025-05-07T08:59:00.066580Z node 8 :HTTP_PROXY INFO: http_req.cpp:959: http request [DescribeStream] requestId [a31c789a-dd2a333-dc4500ef-cb9ff322] got new request from [d8e5:5200:6050:0:c0e5:5200:6050:0] database '/Root' stream 'testtopic' 2025-05-07T08:59:00.067002Z node 8 :HTTP_PROXY DEBUG: http_req.cpp:1500: http request [DescribeStream] requestId [a31c789a-dd2a333-dc4500ef-cb9ff322] [auth] Authorized successfully 2025-05-07T08:59:00.067079Z node 8 :HTTP_PROXY INFO: http_req.cpp:678: http request [DescribeStream] requestId [a31c789a-dd2a333-dc4500ef-cb9ff322] sending grpc request to '' database: '/Root' iam token size: 0 2025-05-07T08:59:00.068132Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72075186224037908] server connected, pipe [8:7501625702385434087:2540], now have 1 active actors on pipe 2025-05-07T08:59:00.068189Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72075186224037909] server connected, pipe [8:7501625702385434088:2541], now have 1 active actors on pipe 2025-05-07T08:59:00.068229Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72075186224037910] server connected, pipe [8:7501625702385434089:2542], now have 1 active actors on pipe 2025-05-07T08:59:00.068260Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72075186224037911] server connected, pipe [8:7501625702385434090:2543], now have 1 active actors on pipe 2025-05-07T08:59:00.068295Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72075186224037907] server connected, pipe [8:7501625702385434086:2539], now have 1 active actors on pipe 2025-05-07T08:59:00.069886Z node 8 :HTTP_PROXY INFO: http_req.cpp:1207: http request [DescribeStream] requestId [a31c789a-dd2a333-dc4500ef-cb9ff322] reply ok 2025-05-07T08:59:00.070067Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037909] server disconnected, pipe [8:7501625702385434088:2541] destroyed 2025-05-07T08:59:00.070099Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037910] server disconnected, pipe [8:7501625702385434089:2542] destroyed 2025-05-07T08:59:00.070121Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037911] server disconnected, pipe [8:7501625702385434090:2543] destroyed Http output full {"StreamDescription":{"RetentionPeriodHours":24,"WriteQuotaKbPerSec":1024,"StreamModeDetails":{"StreamMode":"ON_DEMAND"},"StreamArn":"testtopic","PartitioningSettings":{"MinActivePartitions":5,"AutoPartitioningSettings":{"Strategy":"AUTO_PARTITIONING_STRATEGY_DISABLED","PartitionWriteSpeed":{"StabilizationWindow":{"Nanos":0,"Seconds":300},"DownUtilizationPercent":30,"UpUtilizationPercent":90}},"MaxActivePartitions":5},"Shards":[{"ShardId":"shard-000000","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"68056473384187692692674921486353642290","StartingHashKey":"0"}},{"ShardId":"shard-000001","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"136112946768375385385349842972707284581","StartingHashKey":"68056473384187692692674921486353642291"}},{"ShardId":"shard-000002","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"204169420152563078078024764459060926872","StartingHashKey":"136112946768375385385349842972707284582"}},{"ShardId":"shard-000003","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"272225893536750770770699685945414569163","StartingHashKey":"204169420152563078078024764459060926873"}},{"ShardId":"shard-000004","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"340282366920938463463374607431768211455","StartingHashKey":"272225893536750770770699685945414569164"}}],"KeyId":"","Owner":"Service1_id@as","StreamStatus":"ACTIVE","HasMoreShards":false,"EncryptionType":"ENCRYPTION_UNDEFINED","StreamCreationTimestamp":1746608340,"StorageLimitMb":0,"StreamName":"testtopic"}} 2025-05-07T08:59:00.070190Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:243: (#40,[::1]:56272) <- (200 ) 2025-05-07T08:59:00.070274Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:296: (#40,[::1]:56272) connection closed 2025-05-07T08:59:00.072258Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037907] server disconnected, pipe [8:7501625702385434086:2539] destroyed 2025-05-07T08:59:00.072280Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037908] server disconnected, pipe [8:7501625702385434087:2540] destroyed ------- [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/inside_ydb_ut/unittest >> TestKinesisHttpProxy::ListShardsToken [GOOD] Test command err: 2025-05-07T08:57:48.142788Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625390061252850:2208];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:48.164571Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0017c7/r3tmp/tmpopnJVD/pdisk_1.dat 2025-05-07T08:57:48.882371Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:48.882475Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:48.885379Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:57:48.955632Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 6734, node 1 2025-05-07T08:57:49.226169Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:57:49.226195Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:57:49.226203Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:57:49.226316Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23348 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:57:49.828213Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:49.858388Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 TClient is connected to server localhost:23348 2025-05-07T08:57:50.252830Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:50.260638Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-05-07T08:57:50.263144Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:50.282769Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 2025-05-07T08:57:50.290084Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:57:50.477736Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:50.535725Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:57:50.607805Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:57:50.647747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T08:57:50.693378Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:50.740193Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:50.781122Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:57:50.825508Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480 2025-05-07T08:57:50.878826Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:52.897209Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625407241123265:2376], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:57:52.897334Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:57:52.897625Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625407241123277:2379], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:57:52.902207Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710673:3, at schemeshard: 72057594046644480 2025-05-07T08:57:52.914657Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501625407241123279:2380], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710673 completed, doublechecking } 2025-05-07T08:57:53.008257Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501625411536090626:2863] txid# 281474976710674, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 18], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:57:53.142080Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501625390061252850:2208];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:53.142153Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:57:53.488034Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710675. Ctx: { TraceId: 01jtmzedfy5f9mtj4zbkey8f0x, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzAwY2Y4NjAtOGY1YTU5MGUtYTA5Yzc1OTAtNjM2MmJiZTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:57:53.526956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:53.606141Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:53.681027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710678:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:53.712721Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710679:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:53.744348Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710680:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:53.814031Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710681:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:53.852499Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710682:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:53.900252Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710683:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:53.952480Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreat ... 4037907] TxId 281474976710689, State DELETING 2025-05-07T08:58:59.759542Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:4536: [PQ: 72075186224037907] delete TxId 281474976710689 2025-05-07T08:58:59.759898Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:1225: [PQ: 72075186224037910] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-05-07T08:58:59.759914Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:4279: [PQ: 72075186224037910] Try execute txs with state DELETING 2025-05-07T08:58:59.759928Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:4324: [PQ: 72075186224037910] TxId 281474976710689, State DELETING 2025-05-07T08:58:59.759944Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:4536: [PQ: 72075186224037910] delete TxId 281474976710689 2025-05-07T08:58:59.760413Z node 8 :HTTP_PROXY INFO: http_req.cpp:1207: http request [CreateStream] requestId [b18e6bd-73b9cc3b-784ff61d-820a1ebc] reply ok 2025-05-07T08:58:59.760559Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:3081: [PQ: 72075186224037907] Registered with mediator time cast 2025-05-07T08:58:59.760594Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:3081: [PQ: 72075186224037909] Registered with mediator time cast 2025-05-07T08:58:59.760614Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:3081: [PQ: 72075186224037911] Registered with mediator time cast 2025-05-07T08:58:59.760635Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:3081: [PQ: 72075186224037910] Registered with mediator time cast 2025-05-07T08:58:59.760889Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:243: (#37,[::1]:42536) <- (200 ) 2025-05-07T08:58:59.760920Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:1225: [PQ: 72075186224037908] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-05-07T08:58:59.760933Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:4279: [PQ: 72075186224037908] Try execute txs with state DELETING 2025-05-07T08:58:59.760944Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:4324: [PQ: 72075186224037908] TxId 281474976710689, State DELETING 2025-05-07T08:58:59.760957Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:4536: [PQ: 72075186224037908] delete TxId 281474976710689 2025-05-07T08:58:59.761011Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:296: (#37,[::1]:42536) connection closed Http output full {} 200 {} 2025-05-07T08:58:59.763110Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:83: (#40,[::1]:42544) incoming connection opened 2025-05-07T08:58:59.763182Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:145: (#40,[::1]:42544) -> (POST /Root) 2025-05-07T08:58:59.763295Z node 8 :HTTP_PROXY INFO: http_service.cpp:102: proxy service: incoming request from [b8dd:6b00:6050:0:a0dd:6b00:6050:0] request [ListShards] url [/Root] database [/Root] requestId: 9f367889-6483e2d8-f5d0e0c0-4bac3335 2025-05-07T08:58:59.763737Z node 8 :HTTP_PROXY INFO: http_req.cpp:959: http request [ListShards] requestId [9f367889-6483e2d8-f5d0e0c0-4bac3335] got new request from [b8dd:6b00:6050:0:a0dd:6b00:6050:0] database '/Root' stream 'teststream' 2025-05-07T08:58:59.764060Z node 8 :HTTP_PROXY DEBUG: http_req.cpp:1500: http request [ListShards] requestId [9f367889-6483e2d8-f5d0e0c0-4bac3335] [auth] Authorized successfully 2025-05-07T08:58:59.764158Z node 8 :HTTP_PROXY INFO: http_req.cpp:678: http request [ListShards] requestId [9f367889-6483e2d8-f5d0e0c0-4bac3335] sending grpc request to '' database: '/Root' iam token size: 0 E0000 00:00:1746608339.764261 270331 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn 2025-05-07T08:58:59.765954Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72075186224037907] server connected, pipe [8:7501625697769441859:2525], now have 1 active actors on pipe 2025-05-07T08:58:59.766005Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72075186224037911] server connected, pipe [8:7501625697769441860:2526], now have 1 active actors on pipe 2025-05-07T08:58:59.766758Z node 8 :HTTP_PROXY INFO: http_req.cpp:1207: http request [ListShards] requestId [9f367889-6483e2d8-f5d0e0c0-4bac3335] reply ok 2025-05-07T08:58:59.766983Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037911] server disconnected, pipe [8:7501625697769441860:2526] destroyed 2025-05-07T08:58:59.766994Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:243: (#40,[::1]:42544) <- (200 ) 2025-05-07T08:58:59.767009Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037907] server disconnected, pipe [8:7501625697769441859:2525] destroyed 2025-05-07T08:58:59.767131Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:296: (#40,[::1]:42544) connection closed Http output full {"NextToken":"CLb24M/qMhACGAIiCnRlc3RzdHJlYW0=","Shards":[{"ShardId":"shard-000000","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"68056473384187692692674921486353642290","StartingHashKey":"0"}},{"ShardId":"shard-000001","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"136112946768375385385349842972707284581","StartingHashKey":"68056473384187692692674921486353642291"}}]} 200 {"NextToken":"CLb24M/qMhACGAIiCnRlc3RzdHJlYW0=","Shards":[{"ShardId":"shard-000000","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"68056473384187692692674921486353642290","StartingHashKey":"0"}},{"ShardId":"shard-000001","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"136112946768375385385349842972707284581","StartingHashKey":"68056473384187692692674921486353642291"}}]} 2025-05-07T08:58:59.768715Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:83: (#37,[::1]:42550) incoming connection opened 2025-05-07T08:58:59.768800Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:145: (#37,[::1]:42550) -> (POST /Root) 2025-05-07T08:58:59.768923Z node 8 :HTTP_PROXY INFO: http_service.cpp:102: proxy service: incoming request from [f836:6c00:6050:0:e036:6c00:6050:0] request [ListShards] url [/Root] database [/Root] requestId: ee44acf8-1f7db2d-fc2cab85-78f68b68 2025-05-07T08:58:59.769321Z node 8 :HTTP_PROXY INFO: http_req.cpp:959: http request [ListShards] requestId [ee44acf8-1f7db2d-fc2cab85-78f68b68] got new request from [f836:6c00:6050:0:e036:6c00:6050:0] database '/Root' stream 'teststream' 2025-05-07T08:58:59.769707Z node 8 :HTTP_PROXY DEBUG: http_req.cpp:1500: http request [ListShards] requestId [ee44acf8-1f7db2d-fc2cab85-78f68b68] [auth] Authorized successfully 2025-05-07T08:58:59.769793Z node 8 :HTTP_PROXY INFO: http_req.cpp:678: http request [ListShards] requestId [ee44acf8-1f7db2d-fc2cab85-78f68b68] sending grpc request to '' database: '/Root' iam token size: 0 E0000 00:00:1746608339.769860 270331 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn Http output full {"NextToken":"CLv24M/qMhACGAIiCnRlc3RzdHJlYW0=","Shards":[{"ShardId":"shard-000000","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"68056473384187692692674921486353642290","StartingHashKey":"0"}},{"ShardId":"shard-000001","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"136112946768375385385349842972707284581","StartingHashKey":"68056473384187692692674921486353642291"}}]} 200 {"NextToken":"CLv24M/qMhACGAIiCnRlc3RzdHJlYW0=","Shards":[{"ShardId":"shard-000000","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"68056473384187692692674921486353642290","StartingHashKey":"0"}},{"ShardId":"shard-000001","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"136112946768375385385349842972707284581","StartingHashKey":"68056473384187692692674921486353642291"}}]} 2025-05-07T08:58:59.771054Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72075186224037907] server connected, pipe [8:7501625697769441871:2530], now have 1 active actors on pipe 2025-05-07T08:58:59.771101Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72075186224037911] server connected, pipe [8:7501625697769441872:2531], now have 1 active actors on pipe 2025-05-07T08:58:59.772040Z node 8 :HTTP_PROXY INFO: http_req.cpp:1207: http request [ListShards] requestId [ee44acf8-1f7db2d-fc2cab85-78f68b68] reply ok 2025-05-07T08:58:59.772400Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:243: (#37,[::1]:42550) <- (200 ) 2025-05-07T08:58:59.772496Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:296: (#37,[::1]:42550) connection closed 2025-05-07T08:58:59.774123Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037907] server disconnected, pipe [8:7501625697769441871:2530] destroyed 2025-05-07T08:58:59.774154Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037911] server disconnected, pipe [8:7501625697769441872:2531] destroyed 2025-05-07T08:58:59.774251Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:83: (#40,[::1]:42554) incoming connection opened 2025-05-07T08:58:59.774301Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:145: (#40,[::1]:42554) -> (POST /Root) 2025-05-07T08:58:59.774456Z node 8 :HTTP_PROXY INFO: http_service.cpp:102: proxy service: incoming request from [9852:bf00:6050:0:8052:bf00:6050:0] request [ListShards] url [/Root] database [/Root] requestId: 460b5e9d-e6a0af23-975e293-967662c4 2025-05-07T08:58:59.774894Z node 8 :HTTP_PROXY INFO: http_req.cpp:959: http request [ListShards] requestId [460b5e9d-e6a0af23-975e293-967662c4] got new request from [9852:bf00:6050:0:8052:bf00:6050:0] database '/Root' stream 'teststream' E0000 00:00:1746608339.775714 270332 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn 2025-05-07T08:58:59.775524Z node 8 :HTTP_PROXY DEBUG: http_req.cpp:1500: http request [ListShards] requestId [460b5e9d-e6a0af23-975e293-967662c4] [auth] Authorized successfully 2025-05-07T08:58:59.775641Z node 8 :HTTP_PROXY INFO: http_req.cpp:678: http request [ListShards] requestId [460b5e9d-e6a0af23-975e293-967662c4] sending grpc request to '' database: '/Root' iam token size: 0 2025-05-07T08:58:59.776962Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72075186224037907] server connected, pipe [8:7501625697769441883:2535], now have 1 active actors on pipe 2025-05-07T08:58:59.776993Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72075186224037911] server connected, pipe [8:7501625697769441884:2536], now have 1 active actors on pipe 2025-05-07T08:58:59.777662Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037907] server disconnected, pipe [8:7501625697769441883:2535] destroyed 2025-05-07T08:58:59.777693Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037911] server disconnected, pipe [8:7501625697769441884:2536] destroyed 2025-05-07T08:58:59.777738Z node 8 :HTTP_PROXY INFO: http_req.cpp:1207: http request [ListShards] requestId [460b5e9d-e6a0af23-975e293-967662c4] reply ok 2025-05-07T08:58:59.777919Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:243: (#40,[::1]:42554) <- (200 ) Http output full {"NextToken":"CMH24M/qMhACGAIiCnRlc3RzdHJlYW0=","Shards":[{"ShardId":"shard-000000","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"68056473384187692692674921486353642290","StartingHashKey":"0"}},{"ShardId":"shard-000001","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"136112946768375385385349842972707284581","StartingHashKey":"68056473384187692692674921486353642291"}}]} 200 {"NextToken":"CMH24M/qMhACGAIiCnRlc3RzdHJlYW0=","Shards":[{"ShardId":"shard-000000","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"68056473384187692692674921486353642290","StartingHashKey":"0"}},{"ShardId":"shard-000001","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"136112946768375385385349842972707284581","StartingHashKey":"68056473384187692692674921486353642291"}}]} 2025-05-07T08:58:59.782442Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:296: (#40,[::1]:42554) connection closed >> TCmsTest::TestForceRestartModeScheduledDisconnects [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::CheckCounters [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:125:2058] recipient: [1:108:2140] 2025-05-07T08:58:31.675461Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:58:31.675564Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:31.675611Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:58:31.675660Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:58:31.675722Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:58:31.675760Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:58:31.675830Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:31.675918Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:58:31.676726Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:58:31.677138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:58:31.765115Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:58:31.765197Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:31.781151Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:58:31.781590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:58:31.781747Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:58:31.794791Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:58:31.798639Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:58:31.799326Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:31.799524Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:58:31.805474Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:31.807258Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:31.807338Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:31.807397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:58:31.807459Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:31.807506Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:58:31.807625Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:58:31.821132Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:58:32.086166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:32.086431Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:32.086702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:58:32.086962Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:58:32.087025Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:32.092832Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:32.093042Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:58:32.093234Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:32.093340Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:58:32.093382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:58:32.093437Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:58:32.101714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:32.101800Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:58:32.101857Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:58:32.107343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:32.107433Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:32.107500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:32.107599Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:58:32.117054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:58:32.120502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:58:32.120720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:58:32.122022Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:32.122193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:32.122261Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:32.122584Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:58:32.122672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:32.122860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:32.125497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:58:32.136623Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:32.136698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:32.136962Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:32.137049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 7205 ... l.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:59:01.538550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 107: got EvNotifyTxCompletionResult 2025-05-07T08:59:01.538632Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 107: satisfy waiter [1:1336:3237] 2025-05-07T08:59:01.539367Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 107 Name: "SchemeShard/NumShardsByTtlLag" Ranges: "0" Ranges: "900" Ranges: "1800" Ranges: "3600" Ranges: "7200" Ranges: "14400" Ranges: "28800" Ranges: "57600" Ranges: "86400" Ranges: "inf" Values: 0 Values: 2 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 2025-05-07T08:59:01.699007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:561: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409548 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 4] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0002 2025-05-07T08:59:01.699563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:561: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409549 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 4] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0001 2025-05-07T08:59:01.699665Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:3 data size 0 row count 0 2025-05-07T08:59:01.699738Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409548 maps to shardIdx: 72057594046678944:3 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=TTLEnabledTableMoved, is column=0, is olap=0, RowCount 0, DataSize 0 2025-05-07T08:59:01.699825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186233409548 2025-05-07T08:59:01.700342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:4 data size 0 row count 0 2025-05-07T08:59:01.700393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409549 maps to shardIdx: 72057594046678944:4 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=TTLEnabledTableMoved, is column=0, is olap=0, RowCount 0, DataSize 0 2025-05-07T08:59:01.700440Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186233409549 Name: "SchemeShard/NumShardsByTtlLag" Ranges: "0" Ranges: "900" Ranges: "1800" Ranges: "3600" Ranges: "7200" Ranges: "14400" Ranges: "28800" Ranges: "57600" Ranges: "86400" Ranges: "inf" Values: 0 Values: 0 Values: 2 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 2025-05-07T08:59:01.814567Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6592: Handle: TEvRunConditionalErase, at schemeshard: 72057594046678944 2025-05-07T08:59:01.814690Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-05-07T08:59:01.814817Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 2025-05-07T08:59:01.814984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__conditional_erase.cpp:213: Run conditional erase, tabletId: 72075186233409549, request: TableId: 4 Expiration { ColumnId: 2 WallClockTimestamp: 1746621602343444 ColumnUnit: UNIT_AUTO } SchemaVersion: 4 Limits { BatchMaxBytes: 512000 BatchMinKeys: 1 BatchMaxKeys: 256 }, at schemeshard: 72057594046678944 2025-05-07T08:59:01.815094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__conditional_erase.cpp:213: Run conditional erase, tabletId: 72075186233409548, request: TableId: 4 Expiration { ColumnId: 2 WallClockTimestamp: 1746621602343444 ColumnUnit: UNIT_AUTO } SchemaVersion: 4 Limits { BatchMaxBytes: 512000 BatchMinKeys: 1 BatchMaxKeys: 256 }, at schemeshard: 72057594046678944 2025-05-07T08:59:01.816093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6620: Conditional erase accepted: tabletId: 72075186233409548, at schemeshard: 72057594046678944 2025-05-07T08:59:01.816234Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6620: Conditional erase accepted: tabletId: 72075186233409549, at schemeshard: 72057594046678944 2025-05-07T08:59:01.816585Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:346: TTxScheduleConditionalErase Execute: at schemeshard: 72057594046678944 2025-05-07T08:59:01.816637Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:396: Successful conditional erase: tabletId: 72075186233409548, at schemeshard: 72057594046678944 2025-05-07T08:59:01.817473Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:346: TTxScheduleConditionalErase Execute: at schemeshard: 72057594046678944 2025-05-07T08:59:01.817523Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:396: Successful conditional erase: tabletId: 72075186233409549, at schemeshard: 72057594046678944 2025-05-07T08:59:01.827419Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:452: TTxScheduleConditionalErase Complete: at schemeshard: 72057594046678944 2025-05-07T08:59:01.827610Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-05-07T08:59:01.827664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__conditional_erase.cpp:106: Skip conditional erase: shardIdx: 72057594046678944:3, run at: 2025-05-07T13:40:02.343444Z, at schemeshard: 72057594046678944 2025-05-07T08:59:01.827787Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:452: TTxScheduleConditionalErase Complete: at schemeshard: 72057594046678944 2025-05-07T08:59:01.827838Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 2025-05-07T08:59:01.827917Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-05-07T08:59:01.827949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__conditional_erase.cpp:106: Skip conditional erase: shardIdx: 72057594046678944:3, run at: 2025-05-07T13:40:02.343444Z, at schemeshard: 72057594046678944 2025-05-07T08:59:01.827988Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 2025-05-07T08:59:01.849771Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:588: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-05-07T08:59:01.910727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:561: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409548 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 4] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0 2025-05-07T08:59:01.910872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:561: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409549 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 4] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0 2025-05-07T08:59:01.910933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:3 data size 0 row count 0 2025-05-07T08:59:01.911021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409548 maps to shardIdx: 72057594046678944:3 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=TTLEnabledTableMoved, is column=0, is olap=0, RowCount 0, DataSize 0 2025-05-07T08:59:01.911119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186233409548 2025-05-07T08:59:01.911278Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:4 data size 0 row count 0 2025-05-07T08:59:01.911323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409549 maps to shardIdx: 72057594046678944:4 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=TTLEnabledTableMoved, is column=0, is olap=0, RowCount 0, DataSize 0 2025-05-07T08:59:01.911373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186233409549 Name: "SchemeShard/NumShardsByTtlLag" Ranges: "0" Ranges: "900" Ranges: "1800" Ranges: "3600" Ranges: "7200" Ranges: "14400" Ranges: "28800" Ranges: "57600" Ranges: "86400" Ranges: "inf" Values: 2 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 2025-05-07T08:59:01.946225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:588: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-05-07T08:59:02.007490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:561: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409548 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 4] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0 2025-05-07T08:59:02.007592Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:561: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409549 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 4] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0 2025-05-07T08:59:02.007640Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:3 data size 0 row count 0 2025-05-07T08:59:02.007715Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409548 maps to shardIdx: 72057594046678944:3 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=TTLEnabledTableMoved, is column=0, is olap=0, RowCount 0, DataSize 0 2025-05-07T08:59:02.007800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186233409548 2025-05-07T08:59:02.007978Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:4 data size 0 row count 0 2025-05-07T08:59:02.008035Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409549 maps to shardIdx: 72057594046678944:4 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=TTLEnabledTableMoved, is column=0, is olap=0, RowCount 0, DataSize 0 2025-05-07T08:59:02.008085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186233409549 Name: "SchemeShard/NumShardsByTtlLag" Ranges: "0" Ranges: "900" Ranges: "1800" Ranges: "3600" Ranges: "7200" Ranges: "14400" Ranges: "28800" Ranges: "57600" Ranges: "86400" Ranges: "inf" Values: 0 Values: 2 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 >> AsyncIndexChangeExchange::ShouldNotReorderChangesOnRace [GOOD] >> Cdc::AreJsonsEqualReturnsTrueOnEqual [GOOD] >> Cdc::AreJsonsEqualReturnsFalseOnDifferent [GOOD] >> Cdc::AreJsonsEqualFailsOnWildcardInArray [GOOD] >> Cdc::AlterViaTopicService |91.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::TestForceRestartModeScheduledDisconnects [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/backup_ut/unittest >> BackupRestoreS3::PrefixedVectorIndex [GOOD] Test command err: 2025-05-07T08:56:36.857261Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625082705264264:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:56:36.857305Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0021c3/r3tmp/tmpvibFLK/pdisk_1.dat 2025-05-07T08:56:37.945633Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:56:37.958624Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:56:37.961699Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:56:37.961775Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:56:37.983085Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10397, node 1 2025-05-07T08:56:38.379008Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:56:38.379036Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:56:38.379043Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:56:38.388657Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8149 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:56:39.042873Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:56:41.864615Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501625082705264264:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:56:41.864697Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:56:42.108494Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625108475069136:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:42.108614Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:42.413418Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7501625087000231819:2135] Handle TEvProposeTransaction 2025-05-07T08:56:42.413451Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7501625087000231819:2135] TxId# 281474976710658 ProcessProposeTransaction 2025-05-07T08:56:42.413515Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7501625087000231819:2135] Cookie# 0 userReqId# "" txid# 281474976710658 SEND to# [1:7501625108475069164:2638] 2025-05-07T08:56:42.479633Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1520: Actor# [1:7501625108475069164:2638] txid# 281474976710658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table" Columns { Name: "Key" Type: "Uint32" NotNull: false } Columns { Name: "Value" Type: "Utf8" NotNull: false } KeyColumnNames: "Key" PartitionConfig { PartitioningPolicy { MinPartitionsCount: 10 SplitByLoadSettings { Enabled: true } } } Temporary: false } } } UserToken: "" DatabaseName: "" 2025-05-07T08:56:42.479677Z node 1 :TX_PROXY DEBUG: schemereq.cpp:563: Actor# [1:7501625108475069164:2638] txid# 281474976710658 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-05-07T08:56:42.480097Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1585: Actor# [1:7501625108475069164:2638] txid# 281474976710658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-05-07T08:56:42.480182Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1575: Actor# [1:7501625108475069164:2638] txid# 281474976710658 TEvNavigateKeySet requested from SchemeCache 2025-05-07T08:56:42.480410Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1408: Actor# [1:7501625108475069164:2638] txid# 281474976710658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-05-07T08:56:42.480558Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1455: Actor# [1:7501625108475069164:2638] HANDLE EvNavigateKeySetResult, txid# 281474976710658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-05-07T08:56:42.480618Z node 1 :TX_PROXY DEBUG: schemereq.cpp:102: Actor# [1:7501625108475069164:2638] txid# 281474976710658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710658 TabletId# 72057594046644480} 2025-05-07T08:56:42.480765Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1310: Actor# [1:7501625108475069164:2638] txid# 281474976710658 HANDLE EvClientConnected 2025-05-07T08:56:42.482385Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T08:56:42.491664Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1332: Actor# [1:7501625108475069164:2638] txid# 281474976710658 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710658} 2025-05-07T08:56:42.491731Z node 1 :TX_PROXY DEBUG: schemereq.cpp:543: Actor# [1:7501625108475069164:2638] txid# 281474976710658 SEND to# [1:7501625108475069163:2345] Source {TEvProposeTransactionStatus txid# 281474976710658 Status# 53} 2025-05-07T08:56:42.730200Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7501625087000231819:2135] Handle TEvNavigate describe path /Root/table 2025-05-07T08:56:42.730263Z node 1 :TX_PROXY DEBUG: describe.cpp:227: Actor# [1:7501625108475069314:2752] HANDLE EvNavigateScheme /Root/table 2025-05-07T08:56:42.730565Z node 1 :TX_PROXY DEBUG: describe.cpp:311: Actor# [1:7501625108475069314:2752] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-05-07T08:56:42.730684Z node 1 :TX_PROXY DEBUG: describe.cpp:389: Actor# [1:7501625108475069314:2752] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "/Root/table" Options { ShowPrivateTable: false } 2025-05-07T08:56:42.731825Z node 1 :TX_PROXY DEBUG: describe.cpp:402: Actor# [1:7501625108475069314:2752] Handle TEvDescribeSchemeResult Forward to# [1:7501625108475069312:2351] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/table" PathDescription { Self { Name: "table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1746608202689 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "Key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 10 SplitByLoadSettings { Enabled: true } } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxComplete ... 2025-05-07T08:59:01.260519Z node 37 :TX_PROXY DEBUG: describe.cpp:402: Actor# [37:7501625705929255286:4748] Handle TEvDescribeSchemeResult Forward to# [37:7501625705929255284:2456] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/table" PathDescription { Self { Name: "table" PathId: 9 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715760 CreateStep: 1746608338545 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 TableSchemaVersion: 3 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "table" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Group" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "String" TypeId: 4097 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "Key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 0 MinPartitionsCount: 1 SplitByLoadSettings { Enabled: false } } } TableIndexes { Name: "value_idx" LocalPathId: 10 Type: EIndexTypeGlobalVectorKmeansTree State: EIndexStateReady KeyColumnNames: "Group" KeyColumnNames: "Value" SchemaVersion: 2 PathOwnerId: 72057594046644480 DataSize: 0 IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } VectorIndexKmeansTreeDescription { Settings { settings { metric: SIMILARITY_INNER_PRODUCT vector_type: VECTOR_TYPE_FLOAT vector_dimension: 768 } clusters: 80 levels: 2 } } } TableSchemaVersion: 3 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 7 PathsLimit: 10000 ShardsInside: 5 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 9 PathOwnerId: 72057594046644480 >> TestKinesisHttpProxy::TestPing [GOOD] >> TopicAutoscaling::PartitionSplit_PreferedPartition_AutoscaleAwareSDK [GOOD] >> TopicAutoscaling::PartitionSplit_PreferedPartition_PQv1 >> TestYmqHttpProxy::TestSendMessageEmptyQueueUrl [GOOD] >> Cdc::NaN[TopicRunner] [GOOD] >> Cdc::RacyRebootAndSplitWithTxInflight >> TestKinesisHttpProxy::TestRequestBadJson >> TestYmqHttpProxy::TestSendMessageFifoQueue >> TestYmqHttpProxy::TestGetQueueUrl [GOOD] >> TestYmqHttpProxy::TestSendMessage [GOOD] |91.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/control_plane_proxy/ut/ydb-core-fq-libs-control_plane_proxy-ut |91.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/control_plane_proxy/ut/ydb-core-fq-libs-control_plane_proxy-ut |91.4%| [LD] {RESULT} $(B)/ydb/core/fq/libs/control_plane_proxy/ut/ydb-core-fq-libs-control_plane_proxy-ut >> TestYmqHttpProxy::TestGetQueueUrlOfNotExistingQueue >> TestYmqHttpProxy::TestReceiveMessage >> Cdc::DecimalKey [GOOD] >> Cdc::DropColumn >> TopicAutoscaling::ControlPlane_BackCompatibility [GOOD] >> TopicAutoscaling::ControlPlane_AutoscalingWithStorageSizeRetention >> TSchemeShardTTLTests::CreateTableShouldFailOnWrongUnit-EnableTablePgTypes-true [GOOD] >> Cdc::AlterViaTopicService [GOOD] >> Cdc::Alter |91.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/generic/actors/ut/ydb-library-yql-providers-generic-actors-ut |91.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/providers/generic/actors/ut/ydb-library-yql-providers-generic-actors-ut |91.4%| [LD] {RESULT} $(B)/ydb/library/yql/providers/generic/actors/ut/ydb-library-yql-providers-generic-actors-ut >> Cdc::SupportedTypes [GOOD] >> Cdc::SplitTopicPartition_TopicAutoPartitioning ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::CreateTableShouldFailOnWrongUnit-EnableTablePgTypes-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:58:22.327243Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:58:22.327337Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:22.327390Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:58:22.327427Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:58:22.327467Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:58:22.327494Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:58:22.327545Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:22.327614Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:58:22.328321Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:58:22.328636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:58:22.407260Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:58:22.407319Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:22.432009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:58:22.432131Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:58:22.432258Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:58:22.442166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:58:22.442809Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:58:22.443445Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:22.443739Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:58:22.446128Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:22.447797Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:22.447876Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:22.447936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:58:22.447990Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:22.448037Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:58:22.448265Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:58:22.455948Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:58:22.623267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:22.623488Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:22.623760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:58:22.624004Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:58:22.624066Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:22.626951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:22.627161Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:58:22.627371Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:22.627435Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:58:22.627481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:58:22.627512Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:58:22.630864Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:22.630945Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:58:22.630990Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:58:22.635437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:22.635514Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:22.635576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:22.635657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:58:22.645924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:58:22.648380Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:58:22.648583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:58:22.649590Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:22.649738Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:22.649797Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:22.650148Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:58:22.650222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:22.650397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:22.650483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:58:22.652828Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:22.652899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:22.653111Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:22.653159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 2025-05-07T08:59:09.020137Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:09.020279Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:59:09.020393Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:59:09.020497Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:59:09.024488Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:09.024585Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:59:09.024675Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:59:09.026555Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:09.026626Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:09.026727Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:59:09.026837Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:59:09.027117Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:59:09.033898Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:59:09.034368Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:59:09.035938Z node 37 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:59:09.036237Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 132 RawX2: 158913792107 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:59:09.036373Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:59:09.036911Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:59:09.037040Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:59:09.037502Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:59:09.037685Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:59:09.041693Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:59:09.041808Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:59:09.042229Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:59:09.042355Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [37:206:2208], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-05-07T08:59:09.043065Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:09.043184Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 1:0 ProgressState 2025-05-07T08:59:09.043496Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-05-07T08:59:09.043584Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:59:09.043706Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-05-07T08:59:09.043799Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:59:09.043904Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-05-07T08:59:09.044023Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:59:09.044126Z node 37 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1:0 2025-05-07T08:59:09.044210Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 1:0 2025-05-07T08:59:09.044376Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:59:09.044477Z node 37 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-05-07T08:59:09.044573Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-05-07T08:59:09.045625Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-05-07T08:59:09.045867Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-05-07T08:59:09.045987Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-05-07T08:59:09.046102Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-05-07T08:59:09.046209Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:59:09.046432Z node 37 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-05-07T08:59:09.059476Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-05-07T08:59:09.060485Z node 37 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-05-07T08:59:09.062616Z node 37 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [37:269:2260] Bootstrap 2025-05-07T08:59:09.115493Z node 37 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [37:269:2260] Become StateWork (SchemeCache [37:274:2265]) 2025-05-07T08:59:09.120370Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateTable CreateTable { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "pgint8" } KeyColumnNames: "key" TTLSettings { Enabled { ColumnName: "modified_at" ColumnUnit: UNIT_AUTO } } } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:59:09.121227Z node 37 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /MyRoot/TTLEnabledTable, opId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:59:09.121487Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:433: TCreateTable Propose, path: /MyRoot/TTLEnabledTable, opId: 101:0, schema: Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "pgint8" } KeyColumnNames: "key" TTLSettings { Enabled { ColumnName: "modified_at" ColumnUnit: UNIT_AUTO } }, at schemeshard: 72057594046678944 2025-05-07T08:59:09.123878Z node 37 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 101:1, propose status:StatusSchemeError, reason: To enable TTL on integral PG type column 'ValueSinceUnixEpochModeSettings' should be specified, at schemeshard: 72057594046678944 2025-05-07T08:59:09.125722Z node 37 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [37:269:2260] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-05-07T08:59:09.137657Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 101, response: Status: StatusSchemeError Reason: "To enable TTL on integral PG type column \'ValueSinceUnixEpochModeSettings\' should be specified" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:59:09.138084Z node 37 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusSchemeError, reason: To enable TTL on integral PG type column 'ValueSinceUnixEpochModeSettings' should be specified, operation: CREATE TABLE, path: /MyRoot/TTLEnabledTable 2025-05-07T08:59:09.143675Z node 37 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 |91.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_keys/ydb-core-tx-datashard-ut_keys |91.4%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_keys/ydb-core-tx-datashard-ut_keys |91.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_keys/ydb-core-tx-datashard-ut_keys >> TSchemeShardTTLTests::CreateTableShouldSucceed-EnableTablePgTypes-false [GOOD] >> TopicAutoscaling::PartitionSplit_ReadEmptyPartitions_BeforeAutoscaleAwareSDK [GOOD] >> TopicAutoscaling::PartitionSplit_ReadEmptyPartitions_PQv1 >> TSchemeShardViewTest::AsyncCreateSameView >> TSchemeShardViewTest::AsyncDropSameView >> TSchemeShardViewTest::EmptyName >> TSchemeShardViewTest::ReadOnlyMode >> TSchemeShardViewTest::AsyncCreateDifferentViews |91.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_view/unittest >> TSchemeShardViewTest::DropView >> TSchemeShardViewTest::CreateView >> TestKinesisHttpProxy::TestRequestBadJson [GOOD] |91.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_view/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::CreateTableShouldSucceed-EnableTablePgTypes-false [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:58:51.581623Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:58:51.581728Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:51.581774Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:58:51.581811Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:58:51.581852Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:58:51.581892Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:58:51.581946Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:51.585796Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:58:51.586614Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:58:51.587033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:58:51.673658Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:58:51.673720Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:51.691616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:58:51.691829Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:58:51.691991Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:58:51.698434Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:58:51.698801Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:58:51.699478Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:51.699686Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:58:51.702914Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:51.704277Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:51.704344Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:51.704414Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:58:51.704458Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:51.704496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:58:51.704734Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:58:51.711910Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:58:51.887333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:51.887576Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:51.887816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:58:51.889339Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:58:51.889434Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:51.896300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:51.896457Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:58:51.896648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:51.896730Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:58:51.896804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:58:51.896854Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:58:51.900375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:51.900445Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:58:51.900492Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:58:51.906838Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:51.906923Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:51.906968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:51.907043Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:58:51.932505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:58:51.939216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:58:51.939468Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:58:51.940601Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:51.940781Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:51.940866Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:51.941154Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:58:51.941211Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:51.941401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:51.941481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:58:51.947402Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:51.947466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:51.947679Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:51.947733Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 8944, cookie: 101 2025-05-07T08:59:12.251666Z node 18 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 101 2025-05-07T08:59:12.251703Z node 18 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-05-07T08:59:12.251743Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:59:12.253497Z node 18 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T08:59:12.253599Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T08:59:12.253636Z node 18 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 101 2025-05-07T08:59:12.253673Z node 18 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-05-07T08:59:12.253718Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T08:59:12.253828Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 101, ready parts: 0/1, is published: true 2025-05-07T08:59:12.262706Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6245: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1119 } } 2025-05-07T08:59:12.262772Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409546, partId: 0 2025-05-07T08:59:12.262943Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1119 } } 2025-05-07T08:59:12.263049Z node 18 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1119 } } FAKE_COORDINATOR: Erasing txId 101 2025-05-07T08:59:12.264356Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 308 RawX2: 77309413623 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-05-07T08:59:12.264408Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409546, partId: 0 2025-05-07T08:59:12.264534Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: Source { RawX1: 308 RawX2: 77309413623 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-05-07T08:59:12.264583Z node 18 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1005: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 2025-05-07T08:59:12.264675Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1009: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 308 RawX2: 77309413623 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-05-07T08:59:12.264752Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 101:0, shardIdx: 72057594046678944:1, datashard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-05-07T08:59:12.264798Z node 18 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:59:12.264841Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 101:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-05-07T08:59:12.264887Z node 18 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 101:0 129 -> 240 2025-05-07T08:59:12.270394Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T08:59:12.270523Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T08:59:12.270681Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:59:12.270962Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:59:12.271098Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:59:12.271159Z node 18 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 101:0 ProgressState 2025-05-07T08:59:12.271271Z node 18 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T08:59:12.271323Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T08:59:12.271366Z node 18 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T08:59:12.271396Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T08:59:12.271434Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: true 2025-05-07T08:59:12.271515Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [18:334:2313] message: TxId: 101 2025-05-07T08:59:12.271572Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T08:59:12.271621Z node 18 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:0 2025-05-07T08:59:12.271652Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 101:0 2025-05-07T08:59:12.271781Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T08:59:12.274164Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-05-07T08:59:12.274225Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [18:335:2314] TestWaitNotification: OK eventTxId 101 2025-05-07T08:59:12.274751Z node 18 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLTableWithDyNumberColumn_UNIT_NANOSECONDS" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:59:12.274994Z node 18 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLTableWithDyNumberColumn_UNIT_NANOSECONDS" took 283us result status StatusSuccess 2025-05-07T08:59:12.275529Z node 18 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLTableWithDyNumberColumn_UNIT_NANOSECONDS" PathDescription { Self { Name: "TTLTableWithDyNumberColumn_UNIT_NANOSECONDS" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TTLTableWithDyNumberColumn_UNIT_NANOSECONDS" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "DyNumber" TypeId: 4866 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 ColumnUnit: UNIT_NANOSECONDS Tiers { ApplyAfterSeconds: 3600 Delete { } } } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TestKinesisHttpProxy::TestConsumersEmptyNames >> TestYmqHttpProxy::TestSendMessageFifoQueue [GOOD] >> TestYmqHttpProxy::TestGetQueueUrlOfNotExistingQueue [GOOD] >> TSchemeShardViewTest::EmptyQueryText >> TestYmqHttpProxy::TestSendMessageWithAttributes >> TestYmqHttpProxy::TestGetQueueUrlWithIAM >> TPersQueueTest::PreferredCluster_NonExistentPreferredCluster_SessionDiesOnlyAfterDelay [GOOD] >> TPersQueueTest::PreferredCluster_EnabledRemotePreferredClusterAndRemoteClusterEnabledDelaySec_SessionDiesOnlyAfterDelay >> TSchemeShardServerLess::TestServerlessComputeResourcesModeValidation >> TFlatTableExecutor_IndexLoading::Scan_History_FlatIndex [GOOD] >> TFlatTableExecutor_IndexLoading::Scan_History_BTreeIndex >> TGRpcYdbTest::ExecuteQueryImplicitSession >> TVersions::Wreck0Reverse [GOOD] >> TopicAutoscaling::ReadingAfterSplitTest_BeforeAutoscaleAwareSDK [GOOD] >> TopicAutoscaling::ReadingAfterSplitTest_AutoscaleAwareSDK >> TestYmqHttpProxy::TestReceiveMessage [GOOD] >> Cdc::Alter [GOOD] >> Cdc::AddColumn |91.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/locks/ut_range_treap/ydb-core-tx-locks-ut_range_treap |91.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/locks/ut_range_treap/ydb-core-tx-locks-ut_range_treap |91.4%| [LD] {RESULT} $(B)/ydb/core/tx/locks/ut_range_treap/ydb-core-tx-locks-ut_range_treap >> TopicAutoscaling::PartitionSplit_BeforeAutoscaleAwareSDK [GOOD] >> TopicAutoscaling::PartitionSplit_AutoscaleAwareSDK >> TestYmqHttpProxy::TestReceiveMessageWithAttributes |91.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/row_dispatcher/ut/ydb-core-fq-libs-row_dispatcher-ut |91.4%| [LD] {RESULT} $(B)/ydb/core/fq/libs/row_dispatcher/ut/ydb-core-fq-libs-row_dispatcher-ut |91.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/row_dispatcher/ut/ydb-core-fq-libs-row_dispatcher-ut >> TSchemeShardViewTest::AsyncDropSameView [GOOD] >> Cdc::DropColumn [GOOD] >> Cdc::DropIndex >> Cdc::RacyRebootAndSplitWithTxInflight [GOOD] >> Cdc::RacyActivateAndEnqueue >> TSchemeShardViewTest::DropView [GOOD] >> TSchemeShardViewTest::EmptyName [GOOD] >> TSchemeShardViewTest::AsyncCreateDifferentViews [GOOD] >> TSchemeShardViewTest::EmptyQueryText [GOOD] >> TSchemeShardViewTest::AsyncCreateSameView [GOOD] >> TSchemeShardViewTest::CreateView [GOOD] >> TSchemeShardServerLess::TestServerlessComputeResourcesModeValidation [GOOD] >> TopicAutoscaling::ReadingAfterSplitTest_PreferedPartition_BeforeAutoscaleAwareSDK [GOOD] >> TopicAutoscaling::ReadingAfterSplitTest_PreferedPartition_PQv1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_view/unittest >> TSchemeShardViewTest::EmptyName [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:59:16.966666Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:59:16.978145Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:59:16.978230Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:59:16.978287Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:59:16.978363Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:59:16.978419Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:59:16.978508Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:59:16.978584Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:59:16.984577Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:59:17.018657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:59:17.464703Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:59:17.464803Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:59:17.508713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:59:17.509002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:59:17.509266Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:59:17.532512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:59:17.532933Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:59:17.533722Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:59:17.533957Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:59:17.559832Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:59:17.577560Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:59:17.577662Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:59:17.579729Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:59:17.579807Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:59:17.579869Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:59:17.585436Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.603422Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:59:17.853116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:59:17.853343Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.853543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:59:17.853856Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:59:17.853932Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.868923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:59:17.869110Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:59:17.869361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.869431Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:59:17.869478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:59:17.869517Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:59:17.883596Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.883674Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:59:17.883776Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:59:17.886711Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.886770Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.886824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:59:17.886897Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:59:17.899375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:59:17.907393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:59:17.907652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:59:17.908735Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:59:17.908974Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:59:17.909028Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:59:17.909320Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:59:17.909376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:59:17.909596Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:59:17.909699Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:59:17.912736Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:59:17.912792Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:59:17.913021Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:59:17.913072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-05-07T08:59:17.913456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.913511Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 1:0 ProgressState 2025-05-07T08:59:17.913617Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-05-07T08:59:17.913658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:59:17.913697Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-05-07T08:59:17.913730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:59:17.913770Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-05-07T08:59:17.913817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T08:59:17.913867Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1:0 2025-05-07T08:59:17.913900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 1:0 2025-05-07T08:59:17.915743Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:59:17.915832Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-05-07T08:59:17.915891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-05-07T08:59:17.918444Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-05-07T08:59:17.918602Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-05-07T08:59:17.918650Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-05-07T08:59:17.918700Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-05-07T08:59:17.918770Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:59:17.918893Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-05-07T08:59:17.924753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-05-07T08:59:17.925353Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-05-07T08:59:17.930388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateView CreateView { Name: "" QueryText: "Some query" } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:59:17.934206Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_view.cpp:118: [72057594046678944] TCreateView Propose, path: /MyRoot/, opId: 101:0 2025-05-07T08:59:17.934324Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_view.cpp:124: [72057594046678944] TCreateView Propose, path: /MyRoot/, opId: 101:0, viewDescription: Name: "" QueryText: "Some query" 2025-05-07T08:59:17.934466Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 101:1, propose status:StatusSchemeError, reason: Check failed: path: '/MyRoot/', error: path part shouldn't be empty, at schemeshard: 72057594046678944 2025-05-07T08:59:17.954388Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:268:2259] Bootstrap 2025-05-07T08:59:17.976885Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:268:2259] Become StateWork (SchemeCache [1:273:2264]) 2025-05-07T08:59:17.977635Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:268:2259] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-05-07T08:59:17.980832Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 101, response: Status: StatusSchemeError Reason: "Check failed: path: \'/MyRoot/\', error: path part shouldn\'t be empty" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:59:17.981031Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusSchemeError, reason: Check failed: path: '/MyRoot/', error: path part shouldn't be empty, operation: CREATE VIEW, path: /MyRoot/ 2025-05-07T08:59:17.981608Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_view/unittest >> TSchemeShardViewTest::AsyncDropSameView [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:59:16.967149Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:59:16.979025Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:59:16.979136Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:59:16.979195Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:59:16.979267Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:59:16.979300Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:59:16.979376Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:59:16.979451Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:59:16.985312Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:59:17.016325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:59:17.447011Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:59:17.447110Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:59:17.490129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:59:17.490402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:59:17.501067Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:59:17.513125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:59:17.513764Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:59:17.516229Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:59:17.519018Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:59:17.562996Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:59:17.579005Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:59:17.579093Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:59:17.579189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:59:17.579250Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:59:17.579303Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:59:17.582147Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.604330Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:59:17.773796Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:59:17.780417Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.792190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:59:17.801084Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:59:17.801235Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.805873Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:59:17.811570Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:59:17.813428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.813507Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:59:17.813547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:59:17.813575Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:59:17.816743Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.816825Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:59:17.816882Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:59:17.819486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.819545Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.819595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:59:17.819665Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:59:17.842992Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:59:17.853632Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:59:17.858072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:59:17.860872Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:59:17.861089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:59:17.861156Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:59:17.870113Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:59:17.870262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:59:17.872945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:59:17.873090Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:59:17.880053Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:59:17.880118Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:59:17.880375Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:59:17.880438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... p Execute, stepId: 5000003, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:59:18.004801Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 102 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:59:18.004872Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_view.cpp:43: [72057594046678944] TDropView TPropose, opId: 102:0 HandleReply TEvOperationPlan, step: 5000003 2025-05-07T08:59:18.005052Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 102:0 128 -> 240 2025-05-07T08:59:18.005236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:59:18.005303Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-05-07T08:59:18.010082Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:59:18.010151Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:59:18.010319Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T08:59:18.010492Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:59:18.010546Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2208], at schemeshard: 72057594046678944, txId: 102, path id: 1 2025-05-07T08:59:18.010603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2208], at schemeshard: 72057594046678944, txId: 102, path id: 2 FAKE_COORDINATOR: Erasing txId 102 2025-05-07T08:59:18.011163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:59:18.011220Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 102:0 ProgressState 2025-05-07T08:59:18.011316Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T08:59:18.011354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:59:18.011414Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T08:59:18.011445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:59:18.011475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: false 2025-05-07T08:59:18.011522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:59:18.011562Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-05-07T08:59:18.011593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 102:0 2025-05-07T08:59:18.011675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T08:59:18.011713Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 102, publications: 2, subscribers: 0 2025-05-07T08:59:18.011743Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-05-07T08:59:18.011773Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 2], 18446744073709551615 2025-05-07T08:59:18.012425Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:59:18.012534Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:59:18.012593Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-05-07T08:59:18.012637Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-05-07T08:59:18.012710Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:59:18.013245Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:59:18.013328Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:59:18.013359Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-05-07T08:59:18.013402Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-05-07T08:59:18.013447Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-05-07T08:59:18.013533Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-05-07T08:59:18.014269Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T08:59:18.014339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T08:59:18.014412Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:59:18.018786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-05-07T08:59:18.018918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-05-07T08:59:18.018992Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 102, wait until txId: 102 TestModificationResults wait txId: 103 TestModificationResult got TxId: 103, wait until txId: 103 TestModificationResults wait txId: 104 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 103 2025-05-07T08:59:18.019360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-05-07T08:59:18.019418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 TestWaitNotification wait txId: 104 2025-05-07T08:59:18.019520Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2025-05-07T08:59:18.019550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2025-05-07T08:59:18.020056Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-05-07T08:59:18.020192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-05-07T08:59:18.020250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:327:2318] 2025-05-07T08:59:18.020398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-05-07T08:59:18.020544Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-05-07T08:59:18.020569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:327:2318] TestWaitNotification: OK eventTxId 103 TestWaitNotification: OK eventTxId 104 2025-05-07T08:59:18.021097Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/MyView" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:59:18.021297Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/MyView" took 238us result status StatusPathDoesNotExist 2025-05-07T08:59:18.021488Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/MyView\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/MyView" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> CommitOffset::PartitionSplit_OffsetCommit [GOOD] >> CommitOffset::DistributedTxCommit ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_view/unittest >> TSchemeShardViewTest::DropView [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:59:16.966865Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:59:16.980086Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:59:16.980167Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:59:16.980201Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:59:16.980292Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:59:16.980336Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:59:16.980389Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:59:16.980442Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:59:16.984952Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:59:17.014149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:59:17.484112Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:59:17.484170Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:59:17.534504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:59:17.534761Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:59:17.534944Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:59:17.561023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:59:17.561294Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:59:17.561802Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:59:17.561953Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:59:17.572905Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:59:17.577311Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:59:17.577391Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:59:17.578766Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:59:17.578829Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:59:17.578885Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:59:17.584414Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.597578Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:59:17.936555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:59:17.936777Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.936981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:59:17.937207Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:59:17.937275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.944144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:59:17.944314Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:59:17.944545Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.944622Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:59:17.944658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:59:17.944702Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:59:17.946490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.946542Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:59:17.946578Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:59:17.950780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.950851Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.950896Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:59:17.950958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:59:17.956635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:59:17.966895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:59:17.967116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:59:17.968225Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:59:17.968369Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:59:17.968419Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:59:17.968721Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:59:17.968784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:59:17.968977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:59:17.969075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:59:17.982966Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:59:17.983021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:59:17.983253Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:59:17.983299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... hard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 102 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:59:18.072120Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 102:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:102 msg type: 269090816 2025-05-07T08:59:18.072252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 102, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 2025-05-07T08:59:18.072577Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000003, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:59:18.072710Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 102 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:59:18.072759Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_view.cpp:43: [72057594046678944] TDropView TPropose, opId: 102:0 HandleReply TEvOperationPlan, step: 5000003 2025-05-07T08:59:18.072879Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 102:0 128 -> 240 2025-05-07T08:59:18.073029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:59:18.073098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 FAKE_COORDINATOR: Erasing txId 102 2025-05-07T08:59:18.075389Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:59:18.075437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:59:18.075570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T08:59:18.075740Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:59:18.075804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 102, path id: 1 2025-05-07T08:59:18.075851Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-05-07T08:59:18.076145Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T08:59:18.076194Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 102:0 ProgressState 2025-05-07T08:59:18.076310Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T08:59:18.076347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:59:18.076388Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T08:59:18.076422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:59:18.076456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: false 2025-05-07T08:59:18.076500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T08:59:18.076534Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-05-07T08:59:18.076576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 102:0 2025-05-07T08:59:18.076738Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T08:59:18.076778Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 102, publications: 2, subscribers: 0 2025-05-07T08:59:18.076810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-05-07T08:59:18.076842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 2], 18446744073709551615 2025-05-07T08:59:18.077488Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:59:18.077588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:59:18.077643Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-05-07T08:59:18.077693Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-05-07T08:59:18.077744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:59:18.078492Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:59:18.078643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:59:18.078679Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-05-07T08:59:18.078706Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-05-07T08:59:18.078761Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-05-07T08:59:18.078871Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-05-07T08:59:18.079694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T08:59:18.079766Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T08:59:18.079861Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:59:18.087209Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-05-07T08:59:18.088119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-05-07T08:59:18.088614Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-05-07T08:59:18.088851Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-05-07T08:59:18.088893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-05-07T08:59:18.089358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-05-07T08:59:18.089441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T08:59:18.089495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:322:2313] TestWaitNotification: OK eventTxId 102 2025-05-07T08:59:18.090063Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/MyView" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:59:18.090250Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/MyView" took 231us result status StatusPathDoesNotExist 2025-05-07T08:59:18.090447Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/MyView\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/MyView" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_view/unittest >> TSchemeShardViewTest::AsyncCreateSameView [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:59:16.967405Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:59:16.980997Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:59:16.981091Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:59:16.981139Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:59:16.981180Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:59:16.981228Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:59:16.981310Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:59:16.981380Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:59:16.985117Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:59:17.015323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:59:17.431448Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:59:17.431509Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:59:17.525680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:59:17.534230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:59:17.534520Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:59:17.569552Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:59:17.569943Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:59:17.570782Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:59:17.571011Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:59:17.580025Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:59:17.581527Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:59:17.581602Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:59:17.581691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:59:17.581738Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:59:17.581781Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:59:17.582046Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.589324Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:59:17.852421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:59:17.852694Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.852921Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:59:17.853182Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:59:17.853253Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.857304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:59:17.857481Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:59:17.857700Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.857783Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:59:17.857825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:59:17.857859Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:59:17.861099Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.861173Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:59:17.861215Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:59:17.866490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.866556Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.866607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:59:17.866671Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:59:17.877747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:59:17.883037Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:59:17.883268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:59:17.884416Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:59:17.884606Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:59:17.884654Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:59:17.884962Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:59:17.885014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:59:17.885217Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:59:17.885299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:59:17.894608Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:59:17.894665Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:59:17.894899Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:59:17.894945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... TCreateView::TPropose, opId: 101:0 HandleReply TEvPrivate::TEvOperationPlan, step: 5000002 2025-05-07T08:59:17.974228Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 101:0 128 -> 240 2025-05-07T08:59:17.974482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:59:17.974548Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 FAKE_COORDINATOR: Erasing txId 101 2025-05-07T08:59:17.977727Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:59:17.977790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:59:17.977988Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T08:59:17.978134Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:59:17.978204Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-05-07T08:59:17.978290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 101, path id: 2 2025-05-07T08:59:17.978598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.978642Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 101:0 ProgressState 2025-05-07T08:59:17.978744Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T08:59:17.978776Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T08:59:17.978834Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T08:59:17.978866Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T08:59:17.978912Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-05-07T08:59:17.978971Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T08:59:17.979010Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:0 2025-05-07T08:59:17.979041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 101:0 2025-05-07T08:59:17.979109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T08:59:17.979145Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-05-07T08:59:17.979182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 4 2025-05-07T08:59:17.979213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 2 2025-05-07T08:59:17.980035Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T08:59:17.980156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T08:59:17.980200Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-05-07T08:59:17.980254Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 4 2025-05-07T08:59:17.980298Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:59:17.981033Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T08:59:17.981109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T08:59:17.981146Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-05-07T08:59:17.981185Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 2 2025-05-07T08:59:17.981213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-05-07T08:59:17.981292Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-05-07T08:59:17.984495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T08:59:17.984606Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestModificationResults wait txId: 102 TestModificationResult got TxId: 102, wait until txId: 102 TestModificationResults wait txId: 103 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 101 2025-05-07T08:59:17.984906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-05-07T08:59:17.984951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 TestWaitNotification wait txId: 102 2025-05-07T08:59:17.985050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-05-07T08:59:17.985070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 TestWaitNotification wait txId: 103 2025-05-07T08:59:17.985133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-05-07T08:59:17.985153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-05-07T08:59:17.985716Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-05-07T08:59:17.985863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-05-07T08:59:17.985901Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:302:2293] 2025-05-07T08:59:17.986057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-05-07T08:59:17.986163Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-05-07T08:59:17.986236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T08:59:17.986265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:302:2293] 2025-05-07T08:59:17.986364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-05-07T08:59:17.986386Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:302:2293] TestWaitNotification: OK eventTxId 101 TestWaitNotification: OK eventTxId 102 TestWaitNotification: OK eventTxId 103 2025-05-07T08:59:17.986875Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/MyView" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:59:17.987057Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/MyView" took 209us result status StatusSuccess 2025-05-07T08:59:17.987396Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/MyView" PathDescription { Self { Name: "MyView" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeView CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ViewVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } ViewDescription { Name: "MyView" PathId { OwnerId: 72057594046678944 LocalId: 2 } Version: 1 QueryText: "Some query" CapturedContext { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardViewTest::ReadOnlyMode [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_view/unittest >> TSchemeShardViewTest::CreateView [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:59:16.966274Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:59:16.974973Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:59:16.975071Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:59:16.975128Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:59:16.975228Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:59:16.975296Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:59:16.975389Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:59:16.975479Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:59:16.984120Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:59:17.011568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:59:17.418084Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:59:17.418207Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:59:17.528093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:59:17.528325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:59:17.528549Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:59:17.548587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:59:17.548976Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:59:17.549702Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:59:17.549929Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:59:17.561804Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:59:17.578283Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:59:17.578364Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:59:17.578463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:59:17.578518Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:59:17.578566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:59:17.587202Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.604701Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:59:17.967784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:59:17.968044Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.968274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:59:17.968578Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:59:17.968713Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.975191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:59:17.975452Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:59:17.975717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.975801Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:59:17.975850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:59:17.975890Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:59:17.980360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.980430Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:59:17.980500Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:59:17.991326Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.991408Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.991459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:59:17.991536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:59:17.995981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:59:17.999144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:59:17.999410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:59:18.000589Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:59:18.000808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:59:18.000864Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:59:18.001239Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:59:18.001311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:59:18.001523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:59:18.001617Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:59:18.005618Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:59:18.005681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:59:18.005908Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:59:18.005954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... eration.cpp:1638: TOperation IsReadyToPropose , TxId: 100 ready parts: 1/1 2025-05-07T08:59:18.061557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 100 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:59:18.062491Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-05-07T08:59:18.064248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 100:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:100 msg type: 269090816 2025-05-07T08:59:18.064429Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 100, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 100 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 100 at step: 5000002 2025-05-07T08:59:18.064843Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000002, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:59:18.064987Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 100 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000002 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:59:18.065072Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_view.cpp:45: [72057594046678944] TCreateView::TPropose, opId: 100:0 HandleReply TEvPrivate::TEvOperationPlan, step: 5000002 2025-05-07T08:59:18.065252Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 100:0 128 -> 240 2025-05-07T08:59:18.065484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:59:18.065569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 FAKE_COORDINATOR: Erasing txId 100 2025-05-07T08:59:18.067809Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:59:18.067864Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:59:18.068056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T08:59:18.068156Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:59:18.068206Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 100, path id: 1 2025-05-07T08:59:18.068296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 100, path id: 2 2025-05-07T08:59:18.068810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 100:0, at schemeshard: 72057594046678944 2025-05-07T08:59:18.068882Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 100:0 ProgressState 2025-05-07T08:59:18.068988Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#100:0 progress is 1/1 2025-05-07T08:59:18.069027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-05-07T08:59:18.069082Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#100:0 progress is 1/1 2025-05-07T08:59:18.069125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-05-07T08:59:18.069178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 100, ready parts: 1/1, is published: false 2025-05-07T08:59:18.069226Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-05-07T08:59:18.069266Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 100:0 2025-05-07T08:59:18.069301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 100:0 2025-05-07T08:59:18.069372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T08:59:18.069422Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 100, publications: 2, subscribers: 0 2025-05-07T08:59:18.069459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 1], 4 2025-05-07T08:59:18.069525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 2], 2 2025-05-07T08:59:18.070269Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 100 2025-05-07T08:59:18.070400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 100 2025-05-07T08:59:18.070460Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 100 2025-05-07T08:59:18.070504Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 4 2025-05-07T08:59:18.070554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:59:18.071336Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 100 2025-05-07T08:59:18.071421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 100 2025-05-07T08:59:18.071454Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 100 2025-05-07T08:59:18.071487Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 2 2025-05-07T08:59:18.071520Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-05-07T08:59:18.071587Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 100, subscribers: 0 2025-05-07T08:59:18.075661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 2025-05-07T08:59:18.076090Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 TestModificationResult got TxId: 100, wait until txId: 100 TestWaitNotification wait txId: 101 2025-05-07T08:59:18.076319Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-05-07T08:59:18.076368Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-05-07T08:59:18.076840Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-05-07T08:59:18.076941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-05-07T08:59:18.076980Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:298:2289] TestWaitNotification: OK eventTxId 101 2025-05-07T08:59:18.077502Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/MyView" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:59:18.077686Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/MyView" took 212us result status StatusSuccess 2025-05-07T08:59:18.078119Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/MyView" PathDescription { Self { Name: "MyView" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeView CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ViewVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } ViewDescription { Name: "MyView" PathId { OwnerId: 72057594046678944 LocalId: 2 } Version: 1 QueryText: "Some query" CapturedContext { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_view/unittest >> TSchemeShardViewTest::AsyncCreateDifferentViews [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:59:16.967290Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:59:16.979547Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:59:16.979648Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:59:16.979702Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:59:16.979756Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:59:16.979826Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:59:16.979909Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:59:16.979999Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:59:16.984780Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:59:17.018353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:59:17.517276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:59:17.517364Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:59:17.553561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:59:17.553813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:59:17.554376Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:59:17.573079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:59:17.573395Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:59:17.574246Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:59:17.574518Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:59:17.578827Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:59:17.581530Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:59:17.581605Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:59:17.581689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:59:17.581745Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:59:17.581794Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:59:17.582063Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.598976Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:59:17.931897Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:59:17.932155Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.932404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:59:17.932741Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:59:17.932836Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.939513Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:59:17.939684Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:59:17.939936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.940017Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:59:17.940054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:59:17.940082Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:59:17.942441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.942499Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:59:17.942588Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:59:17.946507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.946558Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.946608Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:59:17.946675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:59:17.954819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:59:17.957074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:59:17.957340Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:59:17.958451Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:59:17.958639Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:59:17.958692Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:59:17.959019Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:59:17.959086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:59:17.959298Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:59:17.959554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:59:17.961818Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:59:17.961865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:59:17.962102Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:59:17.962159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... Ack, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:59:18.084586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:59:18.084628Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-05-07T08:59:18.084662Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-05-07T08:59:18.084717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T08:59:18.085480Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:59:18.085569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T08:59:18.085605Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-05-07T08:59:18.085633Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-05-07T08:59:18.085684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-05-07T08:59:18.085755Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-05-07T08:59:18.089497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-05-07T08:59:18.094862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 TestModificationResult got TxId: 101, wait until txId: 101 TestModificationResults wait txId: 102 TestModificationResult got TxId: 102, wait until txId: 102 TestModificationResults wait txId: 103 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 101 2025-05-07T08:59:18.095221Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-05-07T08:59:18.095268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 TestWaitNotification wait txId: 102 2025-05-07T08:59:18.095366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-05-07T08:59:18.095386Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 TestWaitNotification wait txId: 103 2025-05-07T08:59:18.095416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-05-07T08:59:18.095430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-05-07T08:59:18.095859Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-05-07T08:59:18.095935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-05-07T08:59:18.095965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:335:2326] 2025-05-07T08:59:18.096083Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-05-07T08:59:18.096164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T08:59:18.096191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:335:2326] 2025-05-07T08:59:18.096258Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-05-07T08:59:18.096301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-05-07T08:59:18.096318Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:335:2326] TestWaitNotification: OK eventTxId 101 TestWaitNotification: OK eventTxId 102 TestWaitNotification: OK eventTxId 103 2025-05-07T08:59:18.096783Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SomeDir" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:59:18.096943Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SomeDir" took 183us result status StatusSuccess 2025-05-07T08:59:18.097641Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SomeDir" PathDescription { Self { Name: "SomeDir" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 } ChildrenExist: true } Children { Name: "FirstView" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeView CreateFinished: true CreateTxId: 102 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "SecondView" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeView CreateFinished: true CreateTxId: 103 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:59:18.098269Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SomeDir/FirstView" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:59:18.098465Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SomeDir/FirstView" took 207us result status StatusSuccess 2025-05-07T08:59:18.098829Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SomeDir/FirstView" PathDescription { Self { Name: "FirstView" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeView CreateFinished: true CreateTxId: 102 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ViewVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } ViewDescription { Name: "FirstView" PathId { OwnerId: 72057594046678944 LocalId: 3 } Version: 1 QueryText: "First query" CapturedContext { } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:59:18.099474Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SomeDir/SecondView" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:59:18.099719Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SomeDir/SecondView" took 211us result status StatusSuccess 2025-05-07T08:59:18.099998Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SomeDir/SecondView" PathDescription { Self { Name: "SecondView" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeView CreateFinished: true CreateTxId: 103 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ViewVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } ViewDescription { Name: "SecondView" PathId { OwnerId: 72057594046678944 LocalId: 4 } Version: 1 QueryText: "Second query" CapturedContext { } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_view/unittest >> TSchemeShardViewTest::EmptyQueryText [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:59:16.966515Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:59:16.976677Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:59:16.976791Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:59:16.976847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:59:16.976915Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:59:16.976950Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:59:16.977026Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:59:16.977099Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:59:16.984390Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:59:17.012919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:59:17.456933Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:59:17.457017Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:59:17.500152Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:59:17.500289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:59:17.508396Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:59:17.543295Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:59:17.548671Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:59:17.549744Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:59:17.550144Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:59:17.559274Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:59:17.579282Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:59:17.579381Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:59:17.579459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:59:17.579526Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:59:17.579588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:59:17.584573Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.599738Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:59:17.987315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:59:17.987589Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.987854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:59:17.988154Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:59:17.988256Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.998951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:59:17.999194Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:59:17.999425Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.999536Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:59:17.999602Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:59:17.999642Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:59:18.006154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:18.006258Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:59:18.006309Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:59:18.018939Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:18.019020Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:18.019086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:59:18.019158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:59:18.023407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:59:18.028110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:59:18.028342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:59:18.029477Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:59:18.029714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:59:18.029777Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:59:18.030158Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:59:18.030226Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:59:18.030434Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:59:18.030531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:59:18.036401Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:59:18.036474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:59:18.036703Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:59:18.036764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 44 TestModificationResults wait txId: 101 2025-05-07T08:59:18.054325Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:269:2260] Bootstrap 2025-05-07T08:59:18.070594Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:269:2260] Become StateWork (SchemeCache [1:274:2265]) 2025-05-07T08:59:18.073355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateView CreateView { Name: "MyView" QueryText: "" } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:59:18.073693Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_view.cpp:118: [72057594046678944] TCreateView Propose, path: /MyRoot/MyView, opId: 101:0 2025-05-07T08:59:18.073774Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_view.cpp:124: [72057594046678944] TCreateView Propose, path: /MyRoot/MyView, opId: 101:0, viewDescription: Name: "MyView" QueryText: "" 2025-05-07T08:59:18.073916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:319: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 1], parent name: MyRoot, child name: MyView, child id: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T08:59:18.074143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 0 2025-05-07T08:59:18.074214Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 101:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:59:18.075410Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:269:2260] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-05-07T08:59:18.078218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 101, response: Status: StatusAccepted TxId: 101 SchemeshardId: 72057594046678944 PathId: 2, at schemeshard: 72057594046678944 2025-05-07T08:59:18.078395Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusAccepted, operation: CREATE VIEW, path: /MyRoot/MyView 2025-05-07T08:59:18.078584Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:59:18.078635Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_view.cpp:30: [72057594046678944] TCreateView::TPropose, opId: 101:0 ProgressState 2025-05-07T08:59:18.078717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 101 ready parts: 1/1 2025-05-07T08:59:18.078845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 101 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:59:18.079617Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-05-07T08:59:18.081106Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 101:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:101 msg type: 269090816 2025-05-07T08:59:18.081300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 101, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 2025-05-07T08:59:18.081671Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000002, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:59:18.081799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 101 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000002 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:59:18.081862Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_view.cpp:45: [72057594046678944] TCreateView::TPropose, opId: 101:0 HandleReply TEvPrivate::TEvOperationPlan, step: 5000002 2025-05-07T08:59:18.082029Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 101:0 128 -> 240 2025-05-07T08:59:18.082238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:59:18.082316Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 FAKE_COORDINATOR: Erasing txId 101 2025-05-07T08:59:18.084334Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:59:18.084386Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:59:18.084560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T08:59:18.084724Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:59:18.084778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2208], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-05-07T08:59:18.084838Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2208], at schemeshard: 72057594046678944, txId: 101, path id: 2 2025-05-07T08:59:18.085120Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:59:18.085197Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 101:0 ProgressState 2025-05-07T08:59:18.085353Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T08:59:18.085395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T08:59:18.085442Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T08:59:18.085477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T08:59:18.085530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-05-07T08:59:18.085578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T08:59:18.085617Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:0 2025-05-07T08:59:18.085650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 101:0 2025-05-07T08:59:18.085727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T08:59:18.085771Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-05-07T08:59:18.085871Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 4 2025-05-07T08:59:18.085904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 2 2025-05-07T08:59:18.086960Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T08:59:18.087069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T08:59:18.087109Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-05-07T08:59:18.087156Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 4 2025-05-07T08:59:18.087197Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:59:18.088545Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T08:59:18.088647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T08:59:18.088698Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-05-07T08:59:18.088729Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 2 2025-05-07T08:59:18.088762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-05-07T08:59:18.088846Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-05-07T08:59:18.091373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T08:59:18.092562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_serverless/unittest >> TSchemeShardServerLess::TestServerlessComputeResourcesModeValidation [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:59:16.999679Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:59:16.999772Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:59:16.999816Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:59:16.999859Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:59:16.999908Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:59:16.999942Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:59:17.000015Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:59:17.000095Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:59:17.000897Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:59:17.001270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:59:17.121404Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:59:17.121483Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:59:17.143951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:59:17.144061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:59:17.144234Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:59:17.158913Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:59:17.166284Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:59:17.167003Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:59:17.167398Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:59:17.182768Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:59:17.184545Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:59:17.184627Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:59:17.184710Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:59:17.184758Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:59:17.184801Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:59:17.184961Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.202672Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:59:17.432423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:59:17.432700Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.432943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:59:17.433185Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:59:17.433243Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.443474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:59:17.443657Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:59:17.443864Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.443942Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:59:17.443983Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:59:17.444027Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:59:17.446355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.446428Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:59:17.446478Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:59:17.448618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.448695Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.448760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:59:17.448814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:59:17.466840Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:59:17.474859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:59:17.478857Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:59:17.480141Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:59:17.480327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:59:17.480401Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:59:17.480774Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:59:17.480836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:59:17.481020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:59:17.481125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:59:17.495413Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:59:17.495478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:59:17.495679Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:59:17.495739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 4 2025-05-07T08:59:18.224808Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 104:0 128 -> 240 2025-05-07T08:59:18.225057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 104:0, at tablet# 72057594046678944 2025-05-07T08:59:18.225192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-05-07T08:59:18.225328Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:567: DoUpdateTenant no hasChanges, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], tenantLink: TSubDomainsLinks::TLink { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 3], Generation: 2, ActorId:[1:613:2542], EffectiveACLVersion: 0, SubdomainVersion: 2, UserAttributesVersion: 1, TenantHive: 18446744073709551615, TenantSysViewProcessor: 18446744073709551615, TenantStatisticsAggregator: 18446744073709551615, TenantGraphShard: 18446744073709551615, TenantRootACL: }, subDomain->GetVersion(): 2, actualEffectiveACLVersion: 0, actualUserAttrsVersion: 1, tenantHive: 18446744073709551615, tenantSysViewProcessor: 18446744073709551615, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 104 2025-05-07T08:59:18.231866Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:59:18.231917Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-05-07T08:59:18.232098Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:59:18.232136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2208], at schemeshard: 72057594046678944, txId: 104, path id: 3 2025-05-07T08:59:18.232414Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-05-07T08:59:18.232464Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:787: [72057594046678944] TSyncHive, operationId 104:0, ProgressState, NeedSyncHive: 0 2025-05-07T08:59:18.232493Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 104:0 240 -> 240 2025-05-07T08:59:18.233265Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T08:59:18.233391Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T08:59:18.233436Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 104 2025-05-07T08:59:18.233469Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 4 2025-05-07T08:59:18.233511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 6 2025-05-07T08:59:18.233587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 104, ready parts: 0/1, is published: true 2025-05-07T08:59:18.242031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-05-07T08:59:18.242119Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 104:0 ProgressState 2025-05-07T08:59:18.242244Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 1/1 2025-05-07T08:59:18.242302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-05-07T08:59:18.242346Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 1/1 2025-05-07T08:59:18.242405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-05-07T08:59:18.242466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: true 2025-05-07T08:59:18.242512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-05-07T08:59:18.242580Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 104:0 2025-05-07T08:59:18.242616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 104:0 2025-05-07T08:59:18.243056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-05-07T08:59:18.243616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 104 2025-05-07T08:59:18.246718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2025-05-07T08:59:18.246770Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2025-05-07T08:59:18.247892Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-05-07T08:59:18.248017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-05-07T08:59:18.248063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:768:2650] TestWaitNotification: OK eventTxId 104 TestModificationResults wait txId: 105 2025-05-07T08:59:18.252150Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { Name: "SharedDB" ServerlessComputeResourcesMode: EServerlessComputeResourcesModeShared } } TxId: 105 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:59:18.252368Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1102: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 105:0, feature flag EnableAlterDatabaseCreateHiveFirst 1, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { Name: "SharedDB" ServerlessComputeResourcesMode: EServerlessComputeResourcesModeShared } 2025-05-07T08:59:18.252429Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1108: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 105:0, path /MyRoot/SharedDB 2025-05-07T08:59:18.252563Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 105:0, explain: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: ServerlessComputeResourcesMode can be changed only for serverless, at schemeshard: 72057594046678944 2025-05-07T08:59:18.252603Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 105:1, propose status:StatusInvalidParameter, reason: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: ServerlessComputeResourcesMode can be changed only for serverless, at schemeshard: 72057594046678944 2025-05-07T08:59:18.258898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 105, response: Status: StatusInvalidParameter Reason: "Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: ServerlessComputeResourcesMode can be changed only for serverless" TxId: 105 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:59:18.259074Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 105, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: ServerlessComputeResourcesMode can be changed only for serverless, operation: ALTER DATABASE, path: /MyRoot/SharedDB TestModificationResult got TxId: 105, wait until txId: 105 TestModificationResults wait txId: 106 2025-05-07T08:59:18.262070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { Name: "ServerLess0" ServerlessComputeResourcesMode: EServerlessComputeResourcesModeUnspecified } } TxId: 106 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:59:18.262223Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1102: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 106:0, feature flag EnableAlterDatabaseCreateHiveFirst 1, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { Name: "ServerLess0" ServerlessComputeResourcesMode: EServerlessComputeResourcesModeUnspecified } 2025-05-07T08:59:18.262277Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1108: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 106:0, path /MyRoot/ServerLess0 2025-05-07T08:59:18.262419Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 106:0, explain: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: can not set ServerlessComputeResourcesMode to EServerlessComputeResourcesModeUnspecified, at schemeshard: 72057594046678944 2025-05-07T08:59:18.262458Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 106:1, propose status:StatusInvalidParameter, reason: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: can not set ServerlessComputeResourcesMode to EServerlessComputeResourcesModeUnspecified, at schemeshard: 72057594046678944 2025-05-07T08:59:18.269230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 106, response: Status: StatusInvalidParameter Reason: "Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: can not set ServerlessComputeResourcesMode to EServerlessComputeResourcesModeUnspecified" TxId: 106 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:59:18.269463Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 106, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: can not set ServerlessComputeResourcesMode to EServerlessComputeResourcesModeUnspecified, operation: ALTER DATABASE, path: /MyRoot/ServerLess0 TestModificationResult got TxId: 106, wait until txId: 106 >> Cdc::SplitTopicPartition_TopicAutoPartitioning [GOOD] >> Cdc::ShouldDeliverChangesOnSplitMerge >> CommitOffset::Commit_WithoutSession_TopPast [GOOD] >> CommitOffset::Commit_WithWrongSession_ToParent >> Balancing::Balancing_ManyTopics_PQv1 [GOOD] >> CommitOffset::Commit_Flat_WithWrongSession ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_view/unittest >> TSchemeShardViewTest::ReadOnlyMode [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T08:59:16.966985Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:59:16.981036Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:59:16.981117Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:59:16.981173Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:59:16.981367Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:59:16.981426Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:59:16.981502Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:59:16.981590Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:59:16.985486Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:59:17.017521Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:59:17.472236Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:59:17.472337Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:59:17.521047Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:59:17.521299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:59:17.521524Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:59:17.542411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:59:17.542768Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:59:17.543543Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:59:17.543759Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:59:17.554388Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:59:17.577960Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:59:17.578071Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:59:17.578189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:59:17.578261Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:59:17.578312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:59:17.580674Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.607010Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:59:17.897149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:59:17.897413Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.897636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:59:17.897929Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:59:17.898062Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.903913Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:59:17.904147Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:59:17.904422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.904504Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:59:17.904554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:59:17.904591Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:59:17.907205Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.907281Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:59:17.907323Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:59:17.909463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.909518Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:59:17.909570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:59:17.909637Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:59:17.913952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:59:17.916189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:59:17.916446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:59:17.917590Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:59:17.917782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:59:17.917838Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:59:17.918201Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:59:17.918268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:59:17.918475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:59:17.918650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:59:17.922567Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:59:17.922625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:59:17.922853Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:59:17.923004Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... ng: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:59:19.017259Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:59:19.017435Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 TestModificationResults wait txId: 103 Leader for TabletID 72057594046678944 is [1:379:2348] sender: [1:434:2058] recipient: [1:15:2062] 2025-05-07T08:59:19.057248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateView CreateView { Name: "ThirdView" QueryText: "Some query" } } TxId: 103 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:59:19.057555Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_view.cpp:118: [72057594046678944] TCreateView Propose, path: /MyRoot/ThirdView, opId: 103:0 2025-05-07T08:59:19.057631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_view.cpp:124: [72057594046678944] TCreateView Propose, path: /MyRoot/ThirdView, opId: 103:0, viewDescription: Name: "ThirdView" QueryText: "Some query" 2025-05-07T08:59:19.057760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:319: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 1], parent name: MyRoot, child name: ThirdView, child id: [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-05-07T08:59:19.057902Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 0 2025-05-07T08:59:19.057960Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 103:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:59:19.066973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 103, response: Status: StatusAccepted TxId: 103 SchemeshardId: 72057594046678944 PathId: 3, at schemeshard: 72057594046678944 2025-05-07T08:59:19.067175Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 103, database: /MyRoot, subject: , status: StatusAccepted, operation: CREATE VIEW, path: /MyRoot/ThirdView 2025-05-07T08:59:19.067368Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-05-07T08:59:19.067421Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_view.cpp:30: [72057594046678944] TCreateView::TPropose, opId: 103:0 ProgressState 2025-05-07T08:59:19.067486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 103 ready parts: 1/1 2025-05-07T08:59:19.067638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 103 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:59:19.078874Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 103:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:103 msg type: 269090816 2025-05-07T08:59:19.079185Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 103, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 103 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 103 at step: 5000003 2025-05-07T08:59:19.080010Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000003, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:59:19.080156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 103 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:59:19.080221Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_view.cpp:45: [72057594046678944] TCreateView::TPropose, opId: 103:0 HandleReply TEvPrivate::TEvOperationPlan, step: 5000003 2025-05-07T08:59:19.080385Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 103:0 128 -> 240 2025-05-07T08:59:19.080577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T08:59:19.080657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 FAKE_COORDINATOR: Erasing txId 103 2025-05-07T08:59:19.083178Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:59:19.083230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:59:19.083442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-05-07T08:59:19.083551Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:59:19.083597Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:428:2386], at schemeshard: 72057594046678944, txId: 103, path id: 1 2025-05-07T08:59:19.083644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:428:2386], at schemeshard: 72057594046678944, txId: 103, path id: 3 2025-05-07T08:59:19.084149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-05-07T08:59:19.084207Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 103:0 ProgressState 2025-05-07T08:59:19.084323Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-05-07T08:59:19.084365Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-05-07T08:59:19.084406Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-05-07T08:59:19.084446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-05-07T08:59:19.084486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: false 2025-05-07T08:59:19.084539Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-05-07T08:59:19.084595Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 103:0 2025-05-07T08:59:19.084637Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 103:0 2025-05-07T08:59:19.084753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-05-07T08:59:19.084816Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 103, publications: 2, subscribers: 0 2025-05-07T08:59:19.084860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-05-07T08:59:19.084894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-05-07T08:59:19.085530Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 103 2025-05-07T08:59:19.085683Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 103 2025-05-07T08:59:19.085734Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 103 2025-05-07T08:59:19.085789Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-05-07T08:59:19.085853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-05-07T08:59:19.091203Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 103 2025-05-07T08:59:19.091317Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 103 2025-05-07T08:59:19.091359Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 103 2025-05-07T08:59:19.091397Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-05-07T08:59:19.091453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-05-07T08:59:19.091548Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 103, subscribers: 0 2025-05-07T08:59:19.104119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-05-07T08:59:19.104289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 TestModificationResult got TxId: 103, wait until txId: 103 >> TopicAutoscaling::ControlPlane_AutoscalingWithStorageSizeRetention [GOOD] >> TopicAutoscaling::CDC_PartitionSplit_AutosplitByLoad ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut/unittest >> TVersions::Wreck0Reverse [GOOD] Test command err: 00000.000 II| FAKE_ENV: Born at 2025-05-07T08:54:57.707117Z 00000.012 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.013 II| FAKE_ENV: Starting storage for BS group 0 00000.013 II| FAKE_ENV: Starting storage for BS group 1 00000.014 II| FAKE_ENV: Starting storage for BS group 2 00000.014 II| FAKE_ENV: Starting storage for BS group 3 00000.021 II| TABLET_EXECUTOR: Leader{1:2:0} activating executor 00000.022 II| TABLET_EXECUTOR: LSnap{1:2, on 2:1, 35b, wait} done, Waste{2:0, 0b +(0, 0b), 0 trc} 00000.022 DD| TABLET_EXECUTOR: Leader{1:2:2} commited cookie 2 for step 1 00000.023 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema 00000.023 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.024 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema} hope 1 -> done Change{2, redo 0b alter 209b annex 0, ~{ } -{ }, 0 gb} 00000.024 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema} release 4194304b of static, Memory{0 dyn 0} 00000.024 DD| TABLET_EXECUTOR: Leader{1:2:3} commited cookie 1 for step 2 00000.024 NN| TABLET_SAUSAGECACHE: Update config MemoryLimit: 8388608 ReplacementPolicy: ThreeLeveledLRU 00000.024 NN| TABLET_SAUSAGECACHE: Switch replacement policy from S3FIFO to ThreeLeveledLRU 00000.024 NN| TABLET_SAUSAGECACHE: Switch replacement policy done from S3FIFO to ThreeLeveledLRU 00000.025 II| TABLET_SAUSAGECACHE: Limit memory consumer with 16777216TiB 00000.025 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.025 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.026 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{2, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.026 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.026 DD| TABLET_EXECUTOR: Leader{1:2:4} commited cookie 1 for step 3 00000.027 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.027 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.027 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{3, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.027 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.028 DD| TABLET_EXECUTOR: Leader{1:2:5} commited cookie 1 for step 4 00000.028 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.028 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.029 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{4, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.029 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.029 DD| TABLET_EXECUTOR: Leader{1:2:6} commited cookie 1 for step 5 00000.030 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.030 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.030 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{5, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.030 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.031 DD| TABLET_EXECUTOR: Leader{1:2:7} commited cookie 1 for step 6 00000.031 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.031 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.032 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{6, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.032 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.032 DD| TABLET_EXECUTOR: Leader{1:2:8} commited cookie 1 for step 7 00000.033 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.033 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.033 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{7, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.033 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.034 DD| TABLET_EXECUTOR: Leader{1:2:9} commited cookie 1 for step 8 00000.034 DD| TABLET_EXECUTOR: Leader{1:2:9} Tx{8, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.034 DD| TABLET_EXECUTOR: Leader{1:2:9} Tx{8, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.035 DD| TABLET_EXECUTOR: Leader{1:2:9} Tx{8, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{8, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.035 DD| TABLET_EXECUTOR: Leader{1:2:9} Tx{8, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.035 DD| TABLET_EXECUTOR: Leader{1:2:10} commited cookie 1 for step 9 00000.036 DD| TABLET_EXECUTOR: Leader{1:2:10} Tx{9, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.036 DD| TABLET_EXECUTOR: Leader{1:2:10} Tx{9, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.036 DD| TABLET_EXECUTOR: Leader{1:2:10} Tx{9, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{9, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.036 DD| TABLET_EXECUTOR: Leader{1:2:10} Tx{9, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.036 DD| TABLET_EXECUTOR: Leader{1:2:11} commited cookie 1 for step 10 00000.037 DD| TABLET_EXECUTOR: Leader{1:2:11} Tx{10, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.037 DD| TABLET_EXECUTOR: Leader{1:2:11} Tx{10, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.037 DD| TABLET_EXECUTOR: Leader{1:2:11} Tx{10, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{10, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.037 DD| TABLET_EXECUTOR: Leader{1:2:11} Tx{10, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.038 DD| TABLET_EXECUTOR: Leader{1:2:12} commited cookie 1 for step 11 00000.038 DD| TABLET_EXECUTOR: Leader{1:2:12} Tx{11, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.038 DD| TABLET_EXECUTOR: Leader{1:2:12} Tx{11, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.038 DD| TABLET_EXECUTOR: Leader{1:2:12} Tx{11, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{11, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.038 DD| TABLET_EXECUTOR: Leader{1:2:12} Tx{11, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.039 DD| TABLET_EXECUTOR: Leader{1:2:13} commited cookie 1 for step 12 00000.039 DD| TABLET_EXECUTOR: Leader{1:2:13} Tx{12, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.039 DD| TABLET_EXECUTOR: Leader{1:2:13} Tx{12, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.040 DD| TABLET_EXECUTOR: Leader{1:2:13} Tx{12, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{12, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.040 DD| TABLET_EXECUTOR: Leader{1:2:13} Tx{12, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.040 DD| TABLET_EXECUTOR: Leader{1:2:14} commited cookie 1 for step 13 00000.041 DD| TABLET_EXECUTOR: Leader{1:2:14} Tx{13, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.041 DD| TABLET_EXECUTOR: Leader{1:2:14} Tx{13, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.041 DD| TABLET_EXECUTOR: Leader{1:2:14} Tx{13, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{13, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.041 DD| TABLET_EXECUTOR: Leader{1:2:14} Tx{13, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.042 DD| TABLET_EXECUTOR: Leader{1:2:15} commited cookie 1 for step 14 00000.042 DD| TABLET_EXECUTOR: Leader{1:2:15} Tx{14, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.042 DD| TABLET_EXECUTOR: Leader{1:2:15} Tx{14, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.043 DD| TABLET_EXECUTOR: Leader{1:2:15} Tx{14, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{14, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.043 DD| TABLET_EXECUTOR: Leader{1:2:15} Tx{14, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.043 DD| TABLET_EXECUTOR: Leader{ ... ECACHE NOTICE: shared_sausagecache.cpp:1261: Bootstrap with config MemoryLimit: 456 AsyncQueueInFlyLimit: 19 ... waiting for NKikimr::NSharedCache::TEvRequest 2025-05-07T08:55:08.577353Z node 34 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:385: Add page collection [1:0:256:0:0:0:1] 2025-05-07T08:55:08.577420Z node 34 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:400: Add page collection [1:0:256:0:0:0:1] owner [34:5:2052] 2025-05-07T08:55:08.577526Z node 34 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:597: Request page collection [1:0:256:0:0:0:1] owner [34:5:2052] cookie 1 class Online from cache [ ] already requested [ ] to request [ 1 ] ... waiting for NKikimr::NSharedCache::TEvRequest (done) ... waiting for NKikimr::NSharedCache::TEvRequest ... blocking NKikimr::NTabletFlatExecutor::NBlockIO::TEvFetch from SAUSAGE_CACHE to SAUSAGE_BIO_A cookie 0 2025-05-07T08:55:08.577774Z node 34 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:385: Add page collection [1:0:256:0:0:0:2] 2025-05-07T08:55:08.577828Z node 34 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:400: Add page collection [1:0:256:0:0:0:2] owner [34:5:2052] 2025-05-07T08:55:08.577955Z node 34 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:597: Request page collection [1:0:256:0:0:0:2] owner [34:5:2052] cookie 2 class Online from cache [ ] already requested [ ] to request [ 2 ] ... waiting for NKikimr::NSharedCache::TEvRequest (done) ... waiting for NKikimr::NSharedCache::TEvRequest 2025-05-07T08:55:08.578169Z node 34 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:400: Add page collection [1:0:256:0:0:0:1] owner [34:6:2053] 2025-05-07T08:55:08.578238Z node 34 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:597: Request page collection [1:0:256:0:0:0:1] owner [34:6:2053] cookie 3 class Online from cache [ ] already requested [ 1 ] to request [ ] ... waiting for NKikimr::NSharedCache::TEvRequest (done) ... waiting for NKikimr::NSharedCache::TEvRequest 2025-05-07T08:55:08.578400Z node 34 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:597: Request page collection [1:0:256:0:0:0:1] owner [34:6:2053] cookie 4 class Online from cache [ ] already requested [ 1 ] to request [ 3 ] ... waiting for NKikimr::NSharedCache::TEvRequest (done) ... waiting for fetches #4 ... blocking NKikimr::NTabletFlatExecutor::NBlockIO::TEvFetch from SAUSAGE_CACHE to SAUSAGE_BIO_A cookie 0 ... blocking NKikimr::NTabletFlatExecutor::NBlockIO::TEvFetch from SAUSAGE_CACHE to SAUSAGE_BIO_A cookie 0 ... waiting for fetches #4 (done) Checking fetches#4 Expected: PageCollection: [1:0:256:0:0:0:1] Pages: [ 1 ] Cookie: 10 PageCollection: [1:0:256:0:0:0:1] Pages: [ 3 ] Cookie: 10 PageCollection: [1:0:256:0:0:0:2] Pages: [ 2 ] Cookie: 10 Actual: PageCollection: [1:0:256:0:0:0:1] Pages: [ 1 ] Cookie: 10 PageCollection: [1:0:256:0:0:0:1] Pages: [ 3 ] Cookie: 10 PageCollection: [1:0:256:0:0:0:2] Pages: [ 2 ] Cookie: 10 ... waiting for NKikimr::NSharedCache::TEvUnregister 2025-05-07T08:55:08.578958Z node 34 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:799: Unregister owner [34:5:2052] 2025-05-07T08:55:08.579026Z node 34 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:1068: Send page collection error [1:0:256:0:0:0:2] owner [34:5:2052] class Online error RACE cookie 2 2025-05-07T08:55:08.579091Z node 34 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:812: Remove page collection [1:0:256:0:0:0:2] owner [34:5:2052] 2025-05-07T08:55:08.579139Z node 34 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:1068: Send page collection error [1:0:256:0:0:0:1] owner [34:5:2052] class Online error RACE cookie 1 2025-05-07T08:55:08.579167Z node 34 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:812: Remove page collection [1:0:256:0:0:0:1] owner [34:5:2052] 2025-05-07T08:55:08.579197Z node 34 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:819: Remove owner [34:5:2052] ... waiting for NKikimr::NSharedCache::TEvUnregister (done) ... waiting for results #4 ... waiting for results #4 (done) Checking results#4 Expected: PageCollection: [1:0:256:0:0:0:1] Pages: [ ] Cookie: 1 PageCollection: [1:0:256:0:0:0:2] Pages: [ ] Cookie: 2 Actual: PageCollection: [1:0:256:0:0:0:1] Pages: [ ] Cookie: 1 PageCollection: [1:0:256:0:0:0:2] Pages: [ ] Cookie: 2 ... waiting for results #4 2025-05-07T08:55:08.579580Z node 34 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:863: Receive page collection [1:0:256:0:0:0:1] status OK pages [ 1 ] 2025-05-07T08:55:08.579642Z node 34 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:1046: Send page collection result [1:0:256:0:0:0:1] owner [34:6:2053] class Online pages [ 1 ] cookie 3 ... waiting for results #4 (done) Checking results#4 Expected: PageCollection: [1:0:256:0:0:0:1] Pages: [ 1 ] Cookie: 3 Actual: PageCollection: [1:0:256:0:0:0:1] Pages: [ 1 ] Cookie: 3 ... waiting for results #4 2025-05-07T08:55:08.579915Z node 34 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:863: Receive page collection [1:0:256:0:0:0:2] status OK pages [ 2 ] 2025-05-07T08:55:08.579980Z node 34 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:863: Receive page collection [1:0:256:0:0:0:1] status OK pages [ 3 ] 2025-05-07T08:55:08.580021Z node 34 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:1046: Send page collection result [1:0:256:0:0:0:1] owner [34:6:2053] class Online pages [ 1 3 ] cookie 4 ... waiting for results #4 (done) Checking results#4 Expected: PageCollection: [1:0:256:0:0:0:1] Pages: [ 1 3 ] Cookie: 4 Actual: PageCollection: [1:0:256:0:0:0:1] Pages: [ 1 3 ] Cookie: 4 2025-05-07T08:55:08.715236Z node 35 :TABLET_SAUSAGECACHE NOTICE: shared_sausagecache.cpp:1261: Bootstrap with config MemoryLimit: 456 AsyncQueueInFlyLimit: 19 ... waiting for NKikimr::NSharedCache::TEvRequest 2025-05-07T08:55:08.715877Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:385: Add page collection [1:0:256:0:0:0:1] 2025-05-07T08:55:08.715963Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:400: Add page collection [1:0:256:0:0:0:1] owner [35:5:2052] 2025-05-07T08:55:08.716105Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:597: Request page collection [1:0:256:0:0:0:1] owner [35:5:2052] cookie 1 class AsyncLoad from cache [ ] already requested [ ] to request [ 1 2 3 4 5 ] 2025-05-07T08:55:08.716168Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:681: Request page collection [1:0:256:0:0:0:1] async queue pages [ 1 2 ] ... waiting for NKikimr::NSharedCache::TEvRequest (done) ... waiting for NKikimr::NSharedCache::TEvRequest ... blocking NKikimr::NTabletFlatExecutor::NBlockIO::TEvFetch from SAUSAGE_CACHE to SAUSAGE_BIO_A cookie 0 2025-05-07T08:55:08.716449Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:597: Request page collection [1:0:256:0:0:0:1] owner [35:5:2052] cookie 2 class AsyncLoad from cache [ ] already requested [ ] to request [ 6 7 ] ... waiting for NKikimr::NSharedCache::TEvRequest (done) ... waiting for NKikimr::NSharedCache::TEvRequest 2025-05-07T08:55:08.716661Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:385: Add page collection [1:0:256:0:0:0:2] 2025-05-07T08:55:08.716723Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:400: Add page collection [1:0:256:0:0:0:2] owner [35:5:2052] 2025-05-07T08:55:08.716838Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:597: Request page collection [1:0:256:0:0:0:2] owner [35:5:2052] cookie 3 class AsyncLoad from cache [ ] already requested [ ] to request [ 10 11 12 ] ... waiting for NKikimr::NSharedCache::TEvRequest (done) ... waiting for NKikimr::NSharedCache::TEvRequest 2025-05-07T08:55:08.716990Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:400: Add page collection [1:0:256:0:0:0:1] owner [35:6:2053] 2025-05-07T08:55:08.717090Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:597: Request page collection [1:0:256:0:0:0:1] owner [35:6:2053] cookie 4 class AsyncLoad from cache [ ] already requested [ 1 5 ] to request [ 9 10 ] ... waiting for NKikimr::NSharedCache::TEvRequest (done) Checking fetches#4 Expected: PageCollection: [1:0:256:0:0:0:1] Pages: [ 1 2 ] Cookie: 20 Actual: PageCollection: [1:0:256:0:0:0:1] Pages: [ 1 2 ] Cookie: 20 ... waiting for NKikimr::NSharedCache::TEvUnregister 2025-05-07T08:55:08.717406Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:799: Unregister owner [35:5:2052] 2025-05-07T08:55:08.717482Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:1068: Send page collection error [1:0:256:0:0:0:1] owner [35:5:2052] class AsyncLoad error RACE cookie 1 2025-05-07T08:55:08.717612Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:1068: Send page collection error [1:0:256:0:0:0:1] owner [35:5:2052] class AsyncLoad error RACE cookie 2 2025-05-07T08:55:08.717651Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:812: Remove page collection [1:0:256:0:0:0:1] owner [35:5:2052] 2025-05-07T08:55:08.717707Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:1068: Send page collection error [1:0:256:0:0:0:2] owner [35:5:2052] class AsyncLoad error RACE cookie 3 2025-05-07T08:55:08.717741Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:812: Remove page collection [1:0:256:0:0:0:2] owner [35:5:2052] 2025-05-07T08:55:08.717772Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:819: Remove owner [35:5:2052] ... waiting for NKikimr::NSharedCache::TEvUnregister (done) ... waiting for results #4 ... waiting for results #4 (done) Checking results#4 Expected: PageCollection: [1:0:256:0:0:0:1] Pages: [ ] Cookie: 1 PageCollection: [1:0:256:0:0:0:1] Pages: [ ] Cookie: 2 PageCollection: [1:0:256:0:0:0:2] Pages: [ ] Cookie: 3 Actual: PageCollection: [1:0:256:0:0:0:1] Pages: [ ] Cookie: 1 PageCollection: [1:0:256:0:0:0:1] Pages: [ ] Cookie: 2 PageCollection: [1:0:256:0:0:0:2] Pages: [ ] Cookie: 3 ... waiting for fetches #4 2025-05-07T08:55:08.718225Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:863: Receive page collection [1:0:256:0:0:0:1] status OK pages [ 1 2 ] 2025-05-07T08:55:08.718324Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:681: Request page collection [1:0:256:0:0:0:1] async queue pages [ 5 9 ] ... blocking NKikimr::NTabletFlatExecutor::NBlockIO::TEvFetch from SAUSAGE_CACHE to SAUSAGE_BIO_A cookie 0 ... waiting for fetches #4 (done) Checking fetches#4 Expected: PageCollection: [1:0:256:0:0:0:1] Pages: [ 5 9 ] Cookie: 20 Actual: PageCollection: [1:0:256:0:0:0:1] Pages: [ 5 9 ] Cookie: 20 ... waiting for fetches #4 2025-05-07T08:55:08.718775Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:863: Receive page collection [1:0:256:0:0:0:1] status OK pages [ 5 9 ] 2025-05-07T08:55:08.718872Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:681: Request page collection [1:0:256:0:0:0:1] async queue pages [ 10 ] 2025-05-07T08:55:08.719019Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:1008: Drop page collection [1:0:256:0:0:0:1] pages [ 2 ] owner [35:6:2053] ... blocking NKikimr::NTabletFlatExecutor::NBlockIO::TEvFetch from SAUSAGE_CACHE to SAUSAGE_BIO_A cookie 0 ... waiting for fetches #4 (done) Checking fetches#4 Expected: PageCollection: [1:0:256:0:0:0:1] Pages: [ 10 ] Cookie: 10 Actual: PageCollection: [1:0:256:0:0:0:1] Pages: [ 10 ] Cookie: 10 ... waiting for results #4 2025-05-07T08:55:08.719329Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:863: Receive page collection [1:0:256:0:0:0:1] status OK pages [ 10 ] 2025-05-07T08:55:08.719397Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:1046: Send page collection result [1:0:256:0:0:0:1] owner [35:6:2053] class AsyncLoad pages [ 1 5 9 10 ] cookie 4 ... waiting for results #4 (done) Checking results#4 Expected: PageCollection: [1:0:256:0:0:0:1] Pages: [ 1 5 9 10 ] Cookie: 4 Actual: PageCollection: [1:0:256:0:0:0:1] Pages: [ 1 5 9 10 ] Cookie: 4 |91.4%| [TA] $(B)/ydb/core/tx/schemeshard/ut_serverless/test-results/unittest/{meta.json ... results_accumulator.log} |91.4%| [TA] $(B)/ydb/core/tx/schemeshard/ut_view/test-results/unittest/{meta.json ... results_accumulator.log} |91.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/security/certificate_check/ut/unittest |91.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/security/certificate_check/ut/unittest >> TClusterInfoTest::DeviceId [GOOD] >> TClusterInfoTest::FillInfo [GOOD] >> TCmsTenatsTest::CollectInfo >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientWithCorrectCerts_EmptyAllowedSids >> YdbOlapStore::LogLast50ByResource >> YdbQueryService::TestCreateAndAttachSession >> ClientStatsCollector::PrepareQuery >> TGRpcYdbTest::ExecuteQueryImplicitSession [GOOD] >> TGRpcYdbTest::ExecuteQueryWithUuid >> TestKinesisHttpProxy::TestConsumersEmptyNames [GOOD] >> TopicAutoscaling::PartitionMerge_PreferedPartition_BeforeAutoscaleAwareSDK [GOOD] >> TopicAutoscaling::PartitionMerge_PreferedPartition_AutoscaleAwareSDK >> TestYmqHttpProxy::TestGetQueueUrlWithIAM [GOOD] |91.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/security/certificate_check/ut/unittest >> YdbMonitoring::SelfCheck >> TestKinesisHttpProxy::TestListStreamConsumers >> TCmsTenatsTest::CollectInfo [GOOD] >> TCmsTenatsTest::RequestRestartServices >> TestYmqHttpProxy::TestGetQueueAttributes >> TestYmqHttpProxy::TestSendMessageWithAttributes [GOOD] >> TSchemeShardTTLTests::CreateTableShouldSucceed-EnableTablePgTypes-true [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Timestamp-pk_types12-all_types12-index12-Timestamp--] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_1__ASYNC-pk_types5-all_types5-index5---ASYNC] >> TestYmqHttpProxy::TestSetQueueAttributes ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::CreateTableShouldSucceed-EnableTablePgTypes-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:58:51.183676Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:58:51.183786Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:51.183856Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:58:51.183901Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:58:51.183950Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:58:51.183980Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:58:51.184043Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:51.184130Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:58:51.184943Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:58:51.185296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:58:51.270697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:58:51.270758Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:51.293349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:58:51.293448Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:58:51.293580Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:58:51.310826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:58:51.311437Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:58:51.312075Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:51.312348Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:58:51.318294Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:51.320048Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:51.320123Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:51.320181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:58:51.320231Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:51.320290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:58:51.320568Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:58:51.330876Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:58:51.460333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:51.460549Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:51.460779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:58:51.461073Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:58:51.461129Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:51.467557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:51.467744Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:58:51.467978Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:51.468060Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:58:51.468118Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:58:51.468158Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:58:51.471369Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:51.471453Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:58:51.471500Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:58:51.491876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:51.492021Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:51.492119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:51.492226Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:58:51.497733Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:58:51.501082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:58:51.501650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:58:51.502896Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:51.503067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:51.503140Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:51.503468Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:58:51.503560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:51.503790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:51.504007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:58:51.511718Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:51.511798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:51.512066Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:51.512135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 2025-05-07T08:59:24.220481Z node 28 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 101 2025-05-07T08:59:24.220520Z node 28 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-05-07T08:59:24.220563Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 FAKE_COORDINATOR: Erasing txId 101 2025-05-07T08:59:24.229507Z node 28 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T08:59:24.229638Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T08:59:24.229676Z node 28 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 101 2025-05-07T08:59:24.229718Z node 28 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-05-07T08:59:24.229761Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T08:59:24.229872Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 101, ready parts: 0/1, is published: true 2025-05-07T08:59:24.231742Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6245: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1399 } } 2025-05-07T08:59:24.231795Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409546, partId: 0 2025-05-07T08:59:24.231941Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1399 } } 2025-05-07T08:59:24.232045Z node 28 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1399 } } 2025-05-07T08:59:24.246311Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 307 RawX2: 120259086582 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-05-07T08:59:24.246419Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409546, partId: 0 2025-05-07T08:59:24.246626Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: Source { RawX1: 307 RawX2: 120259086582 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-05-07T08:59:24.246683Z node 28 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1005: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 2025-05-07T08:59:24.246786Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1009: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 307 RawX2: 120259086582 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-05-07T08:59:24.246858Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 101:0, shardIdx: 72057594046678944:1, datashard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-05-07T08:59:24.246901Z node 28 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:59:24.246947Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 101:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-05-07T08:59:24.246997Z node 28 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 101:0 129 -> 240 2025-05-07T08:59:24.248680Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T08:59:24.248784Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T08:59:24.251387Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:59:24.251573Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:59:24.252004Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:59:24.252065Z node 28 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 101:0 ProgressState 2025-05-07T08:59:24.252182Z node 28 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T08:59:24.252222Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T08:59:24.252268Z node 28 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T08:59:24.252303Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T08:59:24.252344Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: true 2025-05-07T08:59:24.252436Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [28:335:2314] message: TxId: 101 2025-05-07T08:59:24.252490Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T08:59:24.252533Z node 28 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:0 2025-05-07T08:59:24.252567Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 101:0 2025-05-07T08:59:24.252731Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T08:59:24.256662Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-05-07T08:59:24.256733Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [28:336:2315] TestWaitNotification: OK eventTxId 101 2025-05-07T08:59:24.257314Z node 28 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLTableWithpgint8Column_UNIT_NANOSECONDS" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:59:24.257559Z node 28 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLTableWithpgint8Column_UNIT_NANOSECONDS" took 286us result status StatusSuccess 2025-05-07T08:59:24.258190Z node 28 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLTableWithpgint8Column_UNIT_NANOSECONDS" PathDescription { Self { Name: "TTLTableWithpgint8Column_UNIT_NANOSECONDS" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TTLTableWithpgint8Column_UNIT_NANOSECONDS" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "pgint8" TypeId: 12288 Id: 2 NotNull: false TypeInfo { PgTypeId: 20 } IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 ColumnUnit: UNIT_NANOSECONDS Tiers { ApplyAfterSeconds: 3600 Delete { } } } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TestYmqHttpProxy::TestReceiveMessageWithAttributes [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Datetime-pk_types11-all_types11-index11-Datetime--] >> TestYmqHttpProxy::TestReceiveMessageWithAttemptId |91.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/ymq/actor/yc_search_ut/ydb-core-ymq-actor-yc_search_ut |91.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/ymq/actor/yc_search_ut/ydb-core-ymq-actor-yc_search_ut |91.4%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_serverless/test-results/unittest/{meta.json ... results_accumulator.log} |91.4%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_view/test-results/unittest/{meta.json ... results_accumulator.log} |91.4%| [LD] {RESULT} $(B)/ydb/core/ymq/actor/yc_search_ut/ydb-core-ymq-actor-yc_search_ut >> Cdc::AddColumn [GOOD] >> Cdc::AddColumn_TopicAutoPartitioning >> TFlatTest::CopyTableAndReturnPartAfterCompaction >> YdbImport::Simple >> YdbQueryService::TestCreateAndAttachSession [GOOD] >> YdbQueryService::TestAttachTwice >> TSchemeShardColumnTableTTL::CreateColumnTable [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_0__SYNC-pk_types4-all_types4-index4---SYNC] >> YdbMonitoring::SelfCheck [GOOD] >> YdbMonitoring::SelfCheckWithNodesDying >> TCmsTenatsTest::RequestRestartServices [GOOD] >> Cdc::RacyActivateAndEnqueue [GOOD] >> Cdc::RacyCreateAndSend >> ClientStatsCollector::PrepareQuery [GOOD] >> ClientStatsCollector::CounterCacheMiss >> RetryPolicy::RetryWithBatching [GOOD] |91.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTenatsTest::RequestRestartServices [GOOD] >> TGRpcYdbTest::ExecuteQueryWithUuid [GOOD] >> TGRpcYdbTest::ExecuteQueryWithParametersBadRequest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardColumnTableTTL::CreateColumnTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T08:58:48.140221Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:58:48.140329Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:48.140374Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:58:48.140411Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:58:48.140453Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:58:48.140481Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:58:48.140531Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:48.140601Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:58:48.141352Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:58:48.141720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:58:48.227436Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T08:58:48.227499Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:48.243005Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:58:48.243132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:58:48.243306Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:58:48.252826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:58:48.253545Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:58:48.254254Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:48.254563Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:58:48.256979Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:48.258614Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:48.258692Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:48.258748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:58:48.258799Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:48.258851Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:58:48.259167Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T08:58:48.266277Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T08:58:48.427050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:48.427215Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:48.427384Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:58:48.427555Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:58:48.427608Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:48.430665Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:48.430806Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:58:48.431020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:48.431104Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:58:48.431151Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:58:48.431186Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:58:48.433339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:48.433404Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:58:48.433450Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:58:48.435532Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:48.435600Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:48.435648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:48.435729Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:58:48.438705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:58:48.440714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:58:48.440968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:58:48.442032Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:48.442176Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T08:58:48.442237Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:48.442514Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T08:58:48.442588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:48.442777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T08:58:48.442873Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T08:58:48.445200Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:48.445260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:48.445462Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:48.445508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 72057594046678944 2025-05-07T08:59:27.868805Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:59:27.875404Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:59:27.875832Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:59:27.876151Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:59:27.876281Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:59:27.876377Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:59:27.876465Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:59:27.876525Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:59:27.876593Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:59:27.877266Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:59:27.877346Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:59:27.877425Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:59:27.877513Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:59:27.877578Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:59:27.877641Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:59:27.877713Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:59:27.877766Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:59:27.877824Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:59:27.877946Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:59:27.878164Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T08:59:27.878242Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 101:0 ProgressState 2025-05-07T08:59:27.878372Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T08:59:27.878417Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T08:59:27.878458Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T08:59:27.878492Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T08:59:27.878525Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: true 2025-05-07T08:59:27.878604Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [4:2783:4049] message: TxId: 101 2025-05-07T08:59:27.878650Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T08:59:27.878724Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:0 2025-05-07T08:59:27.878760Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 101:0 2025-05-07T08:59:27.879825Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 66 2025-05-07T08:59:27.882986Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-05-07T08:59:27.883042Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [4:2784:4050] TestWaitNotification: OK eventTxId 101 2025-05-07T08:59:27.883786Z node 4 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:59:27.884130Z node 4 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 378us result status StatusSuccess 2025-05-07T08:59:27.884857Z node 4 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeColumnTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 ColumnTableVersion: 1 ColumnTableSchemaVersion: 1 ColumnTableTtlSettingsVersion: 1 } ChildrenExist: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 64 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } ColumnTableDescription { Name: "TTLEnabledTable" Schema { Columns { Id: 1 Name: "key" Type: "Uint64" TypeId: 4 NotNull: true StorageId: "" DefaultValue { } ColumnFamilyId: 0 } Columns { Id: 2 Name: "modified_at" Type: "Uint64" TypeId: 4 NotNull: true StorageId: "" DefaultValue { } ColumnFamilyId: 0 } KeyColumnNames: "modified_at" NextColumnId: 3 Version: 1 Options { SchemeNeedActualization: false } ColumnFamilies { Id: 0 Name: "default" } NextColumnFamilyId: 1 } TtlSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 ColumnUnit: UNIT_SECONDS Tiers { ApplyAfterSeconds: 3600 Delete { } } } Version: 1 } ColumnShardCount: 64 Sharding { ColumnShards: 72075186233409546 ColumnShards: 72075186233409547 ColumnShards: 72075186233409548 ColumnShards: 72075186233409549 ColumnShards: 72075186233409550 ColumnShards: 72075186233409551 ColumnShards: 72075186233409552 ColumnShards: 72075186233409553 ColumnShards: 72075186233409554 ColumnShards: 72075186233409555 ColumnShards: 72075186233409556 ColumnShards: 72075186233409557 ColumnShards: 72075186233409558 ColumnShards: 72075186233409559 ColumnShards: 72075186233409560 ColumnShards: 72075186233409561 ColumnShards: 72075186233409562 ColumnShards: 72075186233409563 ColumnShards: 72075186233409564 ColumnShards: 72075186233409565 ColumnShards: 72075186233409566 ColumnShards: 72075186233409567 ColumnShards: 72075186233409568 ColumnShards: 72075186233409569 ColumnShards: 72075186233409570 ColumnShards: 72075186233409571 ColumnShards: 72075186233409572 ColumnShards: 72075186233409573 ColumnShards: 72075186233409574 ColumnShards: 72075186233409575 ColumnShards: 72075186233409576 ColumnShards: 72075186233409577 ColumnShards: 72075186233409578 ColumnShards: 72075186233409579 ColumnShards: 72075186233409580 ColumnShards: 72075186233409581 ColumnShards: 72075186233409582 ColumnShards: 72075186233409583 ColumnShards: 72075186233409584 ColumnShards: 72075186233409585 ColumnShards: 72075186233409586 ColumnShards: 72075186233409587 ColumnShards: 72075186233409588 ColumnShards: 72075186233409589 ColumnShards: 72075186233409590 ColumnShards: 72075186233409591 ColumnShards: 72075186233409592 ColumnShards: 72075186233409593 ColumnShards: 72075186233409594 ColumnShards: 72075186233409595 ColumnShards: 72075186233409596 ColumnShards: 72075186233409597 ColumnShards: 72075186233409598 ColumnShards: 72075186233409599 ColumnShards: 72075186233409600 ColumnShards: 72075186233409601 ColumnShards: 72075186233409602 ColumnShards: 72075186233409603 ColumnShards: 72075186233409604 ColumnShards: 72075186233409605 ColumnShards: 72075186233409606 ColumnShards: 72075186233409607 ColumnShards: 72075186233409608 ColumnShards: 72075186233409609 HashSharding { Function: HASH_FUNCTION_CONSISTENCY_64 Columns: "modified_at" } } StorageConfig { DataChannelCount: 64 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> Cdc::DropIndex [GOOD] >> Cdc::DisableStream ------- [TM] {asan, default-linux-x86_64, release} ydb/core/viewer/ut/unittest >> Viewer::JsonStorageListingV1PDiskIdFilter 2025-05-07 08:59:15,958 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-05-07 08:59:16,338 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 125430 46.0M 45.8M 23.1M test_tool run_ut @/home/runner/.ya/build/build_root/zvgn/0038f4/ydb/core/viewer/ut/test-results/unittest/testing_out_stuff/chunk4/testing_out_stuff/test_tool.args 126936 3.5G 3.4G 3.4G └─ ydb-core-viewer-ut --trace-path-append /home/runner/.ya/build/build_root/zvgn/0038f4/ydb/core/viewer/ut/test-results/unittest/testing_out_stuff/chunk4/ytest.report.trace Test command err: 2025-05-07T08:49:35.258014Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:337:2380], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:49:35.258177Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:49:35.258238Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] TServer::EnableGrpc on GrpcPort 13209, node 1 TClient is connected to server localhost:28299 2025-05-07T08:50:43.620295Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:3183:2439], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:50:43.622056Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:50:43.622807Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:50:43.623293Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [10:1373:2379], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:50:43.624845Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:50:43.625718Z node 9 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [9:1370:2248], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:50:43.626101Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:50:43.626831Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:3136:2379], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:50:43.627049Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:3142:2379], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:50:43.628189Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:50:43.628737Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:50:43.628920Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:3139:2379], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:50:43.629016Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:50:43.629095Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:50:43.629853Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:50:43.630029Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:50:43.630503Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:3129:2379], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:50:43.630603Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:50:43.631357Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:50:43.632094Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:50:43.632510Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:50:43.632683Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [8:3145:2379], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:50:43.634761Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:50:43.634913Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:50:43.637313Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:3179:2382], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:50:43.638191Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:50:43.639085Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-05-07T08:50:44.111979Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:50:44.453679Z node 2 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-05-07T08:50:44.506597Z node 2 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:422} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-05-07T08:50:45.593084Z node 2 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:362} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 1539, node 2 TClient is connected to server localhost:6211 2025-05-07T08:50:46.207567Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:50:46.207632Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:50:46.207674Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:50:46.208213Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:52:46.146180Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [11:3146:2436], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:52:46.149016Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:52:46.149589Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:52:46.153777Z node 18 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [18:1762:2379], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:52:46.154671Z node 14 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [14:3152:2379], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:52:46.154963Z node 17 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [17:1759:2379], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:52:46.157057Z node 18 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:52:46.157140Z node 18 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:52:46.157338Z node 17 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:52:46.158376Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [12:3142:2379], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:52:46.158687Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [13:3149:2379], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:52:46.158832Z node 14 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initializatio ... OR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:55:25.367146Z node 28 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:55:25.368732Z node 21 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [21:1492:2182], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:55:25.369032Z node 21 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:55:25.371115Z node 21 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-05-07T08:55:25.980969Z node 20 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:55:26.321265Z node 20 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-05-07T08:55:26.357140Z node 20 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:422} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-05-07T08:55:27.492609Z node 20 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:362} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 62511, node 20 TClient is connected to server localhost:63850 2025-05-07T08:55:28.499726Z node 20 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:55:28.499847Z node 20 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:55:28.499935Z node 20 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:55:28.500653Z node 20 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:58:38.508475Z node 29 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [29:1328:2238], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:58:38.511115Z node 29 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:58:38.512863Z node 29 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:58:38.517334Z node 33 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [33:2723:2379], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:58:38.517710Z node 36 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [36:2732:2379], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:58:38.517878Z node 37 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [37:2735:2379], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:58:38.518607Z node 30 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [30:3159:2379], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:58:38.519251Z node 37 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:58:38.519337Z node 36 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:58:38.520008Z node 33 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:58:38.520258Z node 35 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [35:2729:2379], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:58:38.520488Z node 33 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:58:38.520606Z node 34 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [34:2726:2379], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:58:38.520748Z node 36 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:58:38.521266Z node 30 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:58:38.521632Z node 30 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:58:38.521859Z node 32 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [32:2720:2379], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:58:38.522199Z node 37 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:58:38.522336Z node 34 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:58:38.522391Z node 35 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:58:38.522428Z node 35 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:58:38.522910Z node 31 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [31:2717:2379], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:58:38.523223Z node 31 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:58:38.523292Z node 32 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:58:38.523364Z node 34 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:58:38.523699Z node 31 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:58:38.523739Z node 32 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-05-07T08:58:39.374076Z node 29 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:39.696607Z node 29 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-05-07T08:58:39.770488Z node 29 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:422} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-05-07T08:58:41.123981Z node 29 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:362} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 2713, node 29 TClient is connected to server localhost:27286 2025-05-07T08:58:42.324809Z node 29 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:58:42.324994Z node 29 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:58:42.325107Z node 29 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:58:42.326353Z node 29 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 764, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: 600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/8580453620/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/zvgn/0038f4/ydb/core/viewer/ut/test-results/unittest/testing_out_stuff/chunk4/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/8580453620/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/zvgn/0038f4/ydb/core/viewer/ut/test-results/unittest/testing_out_stuff/chunk4/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout",), {}) >> Cdc::ShouldDeliverChangesOnSplitMerge [GOOD] >> Cdc::ResolvedTimestamps >> TestYmqHttpProxy::TestGetQueueAttributes [GOOD] >> TopicAutoscaling::PartitionSplit_ReadEmptyPartitions_PQv1 [GOOD] >> TopicAutoscaling::PartitionSplit_ReadNotEmptyPartitions_BeforeAutoscaleAwareSDK >> TestKinesisHttpProxy::TestListStreamConsumers [GOOD] >> YdbTableBulkUpsert::ValidRetry >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientWithCorrectCerts_EmptyAllowedSids [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientWithCorrectCerts_AllowOnlyDefaultGroup >> TopicAutoscaling::CDC_PartitionSplit_AutosplitByLoad [GOOD] >> TopicAutoscaling::ControlPlane_CDC >> TGRpcClientLowTest::SimpleRequest >> TestYmqHttpProxy::TestDeleteQueue >> TopicAutoscaling::PartitionSplit_PreferedPartition_PQv1 [GOOD] >> TopicAutoscaling::PartitionSplit_ReadEmptyPartitions_AutoscaleAwareSDK >> TestKinesisHttpProxy::TestListStreamConsumersWithMaxResults |91.4%| [TA] $(B)/ydb/core/viewer/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TestYmqHttpProxy::TestSetQueueAttributes [GOOD] |91.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/view/ydb-core-kqp-ut-view |91.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/view/ydb-core-kqp-ut-view |91.4%| [TA] {RESULT} $(B)/ydb/core/viewer/ut/test-results/unittest/{meta.json ... results_accumulator.log} |91.4%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/view/ydb-core-kqp-ut-view >> TestYmqHttpProxy::TestTagQueue >> TestYmqHttpProxy::TestReceiveMessageWithAttemptId [GOOD] >> YdbQueryService::TestAttachTwice [GOOD] >> YdbQueryService::TestForbidExecuteWithoutAttach >> TRegisterNodeOverLegacyService::ServerWithCertVerification_ClientWithCorrectCerts_AccessDenied >> TFlatTest::CopyTableAndReturnPartAfterCompaction [GOOD] >> TFlatTest::CopyTableDropOriginalAndReturnPartAfterCompaction >> TestYmqHttpProxy::TestListQueues >> YdbImport::Simple [GOOD] >> YdbIndexTable::AlterIndexImplBySuperUser >> ClientStatsCollector::CounterCacheMiss [GOOD] >> ClientStatsCollector::CounterRetryOperation >> TFlatTableExecutor_IndexLoading::Scan_History_BTreeIndex [GOOD] >> TFlatTableExecutor_IndexLoading::Scan_Groups_FlatIndex >> TGRpcYdbTest::ExecuteQueryWithParametersBadRequest [GOOD] >> TGRpcYdbTest::ExecuteQueryWithParametersExplicitSession >> Cdc::RacyCreateAndSend [GOOD] >> Cdc::RacySplitAndDropTable >> TopicAutoscaling::ReadingAfterSplitTest_AutoscaleAwareSDK [GOOD] >> TopicAutoscaling::ReadingAfterSplitTest_AutoscaleAwareSDK_AutoCommit >> Cdc::DisableStream [GOOD] >> Cdc::InitialScan >> YdbTableBulkUpsert::ValidRetry [GOOD] >> YdbTableBulkUpsert::Types >> TGRpcClientLowTest::SimpleRequest [GOOD] >> TGRpcClientLowTest::SimpleRequestDummyService >> TFlatTest::CopyTableDropOriginalAndReturnPartAfterCompaction [GOOD] >> TPersQueueTest::PreferredCluster_EnabledRemotePreferredClusterAndRemoteClusterEnabledDelaySec_SessionDiesOnlyAfterDelay [GOOD] >> TPersQueueTest::PreferredCluster_RemotePreferredClusterEnabledWhileSessionInitializing_SessionDiesOnlyAfterInitializationAndDelay ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> RetryPolicy::RetryWithBatching [GOOD] Test command err: 2025-05-07T08:53:15.717776Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:15.717817Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:15.717850Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-05-07T08:53:15.718419Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-05-07T08:53:15.718486Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:15.718518Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:15.719864Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.005191s 2025-05-07T08:53:15.720643Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-05-07T08:53:15.720678Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:15.720744Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:15.720788Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.009721s 2025-05-07T08:53:15.734245Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-05-07T08:53:15.734295Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:15.734344Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-05-07T08:53:15.734417Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.008339s 2025-05-07T08:53:15.756685Z :TWriteSession_TestPolicy INFO: Random seed for debugging is 1746607995756641 2025-05-07T08:53:16.420929Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624224453719722:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:16.421006Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:53:16.640368Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501624225802943086:2212];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:16.646242Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00345c/r3tmp/tmpCzknuJ/pdisk_1.dat 2025-05-07T08:53:16.979777Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-05-07T08:53:16.958070Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-05-07T08:53:17.468923Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:53:17.620889Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:53:17.620980Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:53:17.632178Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-05-07T08:53:17.637314Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:53:17.658873Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:53:17.658944Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:53:17.674213Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:53:17.683533Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:53:17.725516Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22644, node 1 2025-05-07T08:53:17.918580Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T08:53:17.918597Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T08:53:18.093904Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/zvgn/00345c/r3tmp/yandexu25Kz9.tmp 2025-05-07T08:53:18.093928Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/zvgn/00345c/r3tmp/yandexu25Kz9.tmp 2025-05-07T08:53:18.094127Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/zvgn/00345c/r3tmp/yandexu25Kz9.tmp 2025-05-07T08:53:18.094496Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:53:18.410898Z INFO: TTestServer started on Port 23210 GrpcPort 22644 TClient is connected to server localhost:23210 PQClient connected to localhost:22644 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:53:19.181896Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... waiting... 2025-05-07T08:53:21.426127Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501624224453719722:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:21.426245Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:53:21.574130Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7501624225802943086:2212];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:53:21.574195Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:53:23.784432Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501624255867714351:2318], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:23.784534Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501624255867714319:2315], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:23.784805Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:53:23.804303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715657:3, at schemeshard: 72057594046644480 2025-05-07T08:53:23.844847Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7501624255867714356:2319], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715657 completed, doublechecking } 2025-05-07T08:53:23.920543Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501624255867714384:2136] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:53:24.419094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 2025-05-07T08:53:24.449397Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7501624255867714391:2323], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T08:53:24.450314Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=2&id=Nzg2OTNlYmQtNTNjM2E4ZmMtODVlYjRlODYtMzE4MzQwNzI=, ActorId: [2:7501624255867714316:2313], ActorState: ExecuteState, TraceId: 01jtmz66nq4g023x724qc6ts8k, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T08:53:24.452666Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T08:53:24.455979Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7501624254518491929:2349], status: SCHEME_ERROR, is ... 0 count 10 size 1208 2025-05-07T08:59:26.998272Z node 17 :PERSQUEUE DEBUG: cache_eviction.h:315: Caching head blob in L1. Partition 0 offset 0 count 10 size 1208 actorID [17:7501625807175975684:2629] 2025-05-07T08:59:26.998409Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 1230 WriteNewSizeFromSupportivePartitions# 0 2025-05-07T08:59:26.998465Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-05-07T08:59:26.998532Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 1, partNo: 0, Offset: 0 is stored on disk 2025-05-07T08:59:26.998570Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-05-07T08:59:26.998600Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 2, partNo: 0, Offset: 1 is stored on disk 2025-05-07T08:59:26.998620Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-05-07T08:59:26.998648Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 3, partNo: 0, Offset: 2 is stored on disk 2025-05-07T08:59:26.998669Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-05-07T08:59:26.998698Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 4, partNo: 0, Offset: 3 is stored on disk 2025-05-07T08:59:26.998718Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-05-07T08:59:26.998747Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 5, partNo: 0, Offset: 4 is stored on disk 2025-05-07T08:59:26.998767Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-05-07T08:59:26.998797Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 6, partNo: 0, Offset: 5 is stored on disk 2025-05-07T08:59:26.998815Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-05-07T08:59:26.998845Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 7, partNo: 0, Offset: 6 is stored on disk 2025-05-07T08:59:26.998866Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-05-07T08:59:26.998895Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 8, partNo: 0, Offset: 7 is stored on disk 2025-05-07T08:59:26.998913Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-05-07T08:59:26.998943Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 9, partNo: 0, Offset: 8 is stored on disk 2025-05-07T08:59:26.998964Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-05-07T08:59:26.998992Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 10, partNo: 0, Offset: 9 is stored on disk 2025-05-07T08:59:26.999220Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:774: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-05-07T08:59:26.999264Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:816: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 user user send read request for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 1 rrg 0 2025-05-07T08:59:26.999550Z node 17 :PERSQUEUE DEBUG: pq_impl.cpp:377: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 1 requestId: cookie: 1 2025-05-07T08:59:26.999744Z node 17 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-05-07T08:59:27.000271Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:731: [PQ: 72075186224037892, Partition: 0, State: StateIdle] read cookie 0 Topic 'rt3.dc1--test-topic' partition 0 user user offset 0 count 1 size 1024000 endOffset 10 max time lag 0ms effective offset 0 2025-05-07T08:59:27.000338Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:931: [PQ: 72075186224037892, Partition: 0, State: StateIdle] read cookie 0 added 0 blobs, size 0 count 0 last offset 0, current partition end offset: 10 2025-05-07T08:59:27.000601Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:948: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Reading cookie 0. All data is from uncompacted head. 2025-05-07T08:59:27.000642Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:420: FormAnswer for 0 blobs 2025-05-07T08:59:27.000747Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:856: Topic 'rt3.dc1--test-topic' partition 0 user user readTimeStamp done, result 1746608366979 queuesize 0 startOffset 0 2025-05-07T08:59:27.002223Z node 17 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037892' partition 0 offset 0 partno 0 count 10 parts 0 size 1208 2025-05-07T08:59:27.010327Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|ca8dac80-a8e5636-d2892bda-5e14eb88_0] Write session got write response: sequence_numbers: 1 sequence_numbers: 2 sequence_numbers: 3 sequence_numbers: 4 sequence_numbers: 5 sequence_numbers: 6 sequence_numbers: 7 sequence_numbers: 8 sequence_numbers: 9 sequence_numbers: 10 offsets: 0 offsets: 1 offsets: 2 offsets: 3 offsets: 4 offsets: 5 offsets: 6 offsets: 7 offsets: 8 offsets: 9 already_written: false already_written: false already_written: false already_written: false already_written: false already_written: false already_written: false already_written: false already_written: false already_written: false write_statistics { persist_duration_ms: 13 queued_in_partition_duration_ms: 6 } 2025-05-07T08:59:27.010411Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|ca8dac80-a8e5636-d2892bda-5e14eb88_0] Write session: acknoledged message 1 2025-05-07T08:59:27.010473Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|ca8dac80-a8e5636-d2892bda-5e14eb88_0] Write session: acknoledged message 2 2025-05-07T08:59:27.010504Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|ca8dac80-a8e5636-d2892bda-5e14eb88_0] Write session: acknoledged message 3 2025-05-07T08:59:27.010532Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|ca8dac80-a8e5636-d2892bda-5e14eb88_0] Write session: acknoledged message 4 2025-05-07T08:59:27.010562Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|ca8dac80-a8e5636-d2892bda-5e14eb88_0] Write session: acknoledged message 5 2025-05-07T08:59:27.010612Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|ca8dac80-a8e5636-d2892bda-5e14eb88_0] Write session: acknoledged message 6 2025-05-07T08:59:27.010641Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|ca8dac80-a8e5636-d2892bda-5e14eb88_0] Write session: acknoledged message 7 2025-05-07T08:59:27.010671Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|ca8dac80-a8e5636-d2892bda-5e14eb88_0] Write session: acknoledged message 8 2025-05-07T08:59:27.010701Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|ca8dac80-a8e5636-d2892bda-5e14eb88_0] Write session: acknoledged message 9 2025-05-07T08:59:27.010736Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|ca8dac80-a8e5636-d2892bda-5e14eb88_0] Write session: acknoledged message 10 2025-05-07T08:59:27.013383Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|ca8dac80-a8e5636-d2892bda-5e14eb88_0] Write session: close. Timeout = 0 ms 2025-05-07T08:59:27.013447Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|ca8dac80-a8e5636-d2892bda-5e14eb88_0] Write session will now close 2025-05-07T08:59:27.013507Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|ca8dac80-a8e5636-d2892bda-5e14eb88_0] Write session: aborting 2025-05-07T08:59:27.014092Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|ca8dac80-a8e5636-d2892bda-5e14eb88_0] Write session: gracefully shut down, all writes complete 2025-05-07T08:59:27.014156Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|ca8dac80-a8e5636-d2892bda-5e14eb88_0] Write session: destroy 2025-05-07T08:59:27.018090Z node 17 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 7 sessionId: test-message-group-id|ca8dac80-a8e5636-d2892bda-5e14eb88_0 grpc read done: success: 0 data: 2025-05-07T08:59:27.018149Z node 17 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 7 sessionId: test-message-group-id|ca8dac80-a8e5636-d2892bda-5e14eb88_0 grpc read failed 2025-05-07T08:59:27.018216Z node 17 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 7 sessionId: test-message-group-id|ca8dac80-a8e5636-d2892bda-5e14eb88_0 grpc closed 2025-05-07T08:59:27.018253Z node 17 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 7 sessionId: test-message-group-id|ca8dac80-a8e5636-d2892bda-5e14eb88_0 is DEAD 2025-05-07T08:59:27.019571Z node 17 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-05-07T08:59:27.021064Z node 17 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037892] server disconnected, pipe [17:7501625811470943232:2656] destroyed 2025-05-07T08:59:27.021163Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:138: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. >> Cdc::AddColumn_TopicAutoPartitioning [GOOD] >> Cdc::AddIndex >> TopicAutoscaling::PartitionSplit_AutoscaleAwareSDK [GOOD] >> TopicAutoscaling::PartitionMerge_PreferedPartition_PQv1 >> YdbQueryService::TestForbidExecuteWithoutAttach [GOOD] >> YdbQueryService::TestCreateDropAttachSession >> TSchemeShardTTLTestsWithReboots::AlterTable [GOOD] >> CommitOffset::Commit_Flat_WithWrongSession [GOOD] >> CommitOffset::Commit_Flat_WithWrongSession_ToPast >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_DyNumber-pk_types8-all_types8-index8-DyNumber--] [GOOD] >> YdbYqlClient::TestYqlWrongTable >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientWithCorrectCerts_AllowOnlyDefaultGroup [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithIssuerVerification_ClientWithSameIssuer >> TRegisterNodeOverLegacyService::ServerWithCertVerification_ClientWithCorrectCerts_AccessDenied [GOOD] >> TRegisterNodeOverLegacyService::ServerWithoutCertVerification_ClientProvidesCorrectCerts >> TopicAutoscaling::ReadingAfterSplitTest_PreferedPartition_PQv1 [GOOD] >> TopicAutoscaling::WithDir_PartitionSplit_AutosplitByLoad >> TestKinesisHttpProxy::TestListStreamConsumersWithMaxResults [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTestsWithReboots::AlterTable [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:116:2058] recipient: [1:111:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:116:2058] recipient: [1:111:2142] Leader for TabletID 72057594046678944 is [1:123:2149] sender: [1:126:2058] recipient: [1:108:2140] Leader for TabletID 72057594046447617 is [1:129:2154] sender: [1:131:2058] recipient: [1:109:2141] Leader for TabletID 72057594046316545 is [1:134:2157] sender: [1:136:2058] recipient: [1:111:2142] 2025-05-07T08:58:34.152216Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:58:34.152335Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:34.152383Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:58:34.152418Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:58:34.152468Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:58:34.152501Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:58:34.152579Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:34.152658Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:58:34.153465Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:58:34.153867Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:58:34.273396Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7610: Got new config: QueryServiceConfig { AvailableExternalDataSources: "ObjectStorage" AvailableExternalDataSources: "ClickHouse" AvailableExternalDataSources: "PostgreSQL" AvailableExternalDataSources: "MySQL" AvailableExternalDataSources: "Ydb" AvailableExternalDataSources: "YT" AvailableExternalDataSources: "Greenplum" AvailableExternalDataSources: "MsSQLServer" AvailableExternalDataSources: "Oracle" AvailableExternalDataSources: "Logging" AvailableExternalDataSources: "Solomon" } 2025-05-07T08:58:34.273467Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:34.274262Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# ObjectStorage, ClickHouse, PostgreSQL, MySQL, Ydb, YT, Greenplum, MsSQLServer, Oracle, Logging, Solomon Leader for TabletID 72057594046447617 is [1:129:2154] sender: [1:175:2058] recipient: [1:15:2062] 2025-05-07T08:58:34.308406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:58:34.308664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:58:34.308823Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:58:34.315971Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:58:34.316255Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:58:34.316967Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:34.317204Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:58:34.319610Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:34.321007Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:34.321068Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:34.321253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:58:34.321325Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:34.321377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:58:34.321529Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2212] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2212] Leader for TabletID 72057594037968897 is [1:217:2216] sender: [1:218:2058] recipient: [1:211:2212] 2025-05-07T08:58:34.329516Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:123:2149] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:58:34.481293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:34.481534Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:34.481813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:58:34.482611Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:58:34.482699Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:34.485554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:34.485706Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:58:34.485939Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:34.486009Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:58:34.486073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:58:34.486119Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:58:34.488503Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:34.488582Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:58:34.488632Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:58:34.491383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:34.491447Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:34.491492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:34.491568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:58:34.495736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:58:34.498278Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:58:34.498482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:134:2157] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:58:34.499563Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:34.499737Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 134 RawX2: 4294969453 } } St ... HEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [51:205:2207], at schemeshard: 72057594046678944, txId: 1003, path id: 3 FAKE_COORDINATOR: Erasing txId 1003 2025-05-07T08:59:43.263711Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-05-07T08:59:43.263766Z node 51 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1036: NTableState::TProposedWaitParts operationId# 1003:0 ProgressState at tablet: 72057594046678944 2025-05-07T08:59:43.264494Z node 51 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 1003 2025-05-07T08:59:43.264608Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 1003 2025-05-07T08:59:43.264645Z node 51 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1003 2025-05-07T08:59:43.264681Z node 51 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 4 2025-05-07T08:59:43.264719Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-05-07T08:59:43.264810Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 1003, ready parts: 0/1, is published: true 2025-05-07T08:59:43.265165Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6245: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 1003 Step: 5000004 OrderId: 1003 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 5393 } } 2025-05-07T08:59:43.265206Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 1003, tablet: 72075186233409546, partId: 0 2025-05-07T08:59:43.265329Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 1003:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 1003 Step: 5000004 OrderId: 1003 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 5393 } } 2025-05-07T08:59:43.265425Z node 51 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 1003 Step: 5000004 OrderId: 1003 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 5393 } } 2025-05-07T08:59:43.266018Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 328 RawX2: 219043334411 } Origin: 72075186233409546 State: 2 TxId: 1003 Step: 0 Generation: 2 2025-05-07T08:59:43.266060Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 1003, tablet: 72075186233409546, partId: 0 2025-05-07T08:59:43.266211Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 1003:0, at schemeshard: 72057594046678944, message: Source { RawX1: 328 RawX2: 219043334411 } Origin: 72075186233409546 State: 2 TxId: 1003 Step: 0 Generation: 2 2025-05-07T08:59:43.266266Z node 51 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1005: NTableState::TProposedWaitParts operationId# 1003:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 2025-05-07T08:59:43.266350Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1009: NTableState::TProposedWaitParts operationId# 1003:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 328 RawX2: 219043334411 } Origin: 72075186233409546 State: 2 TxId: 1003 Step: 0 Generation: 2 2025-05-07T08:59:43.266404Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 1003:0, shardIdx: 72057594046678944:1, datashard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-05-07T08:59:43.266443Z node 51 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 1003:0, at schemeshard: 72057594046678944 2025-05-07T08:59:43.266481Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 1003:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-05-07T08:59:43.266520Z node 51 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1003:0 129 -> 240 2025-05-07T08:59:43.277130Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-05-07T08:59:43.277311Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 1003:0, at schemeshard: 72057594046678944 2025-05-07T08:59:43.277482Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 1003:0, at schemeshard: 72057594046678944 2025-05-07T08:59:43.277947Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-05-07T08:59:43.278023Z node 51 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 1003:0 ProgressState 2025-05-07T08:59:43.278136Z node 51 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1003:0 progress is 1/1 2025-05-07T08:59:43.278174Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-05-07T08:59:43.278219Z node 51 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1003:0 progress is 1/1 2025-05-07T08:59:43.278253Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-05-07T08:59:43.278292Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 1003, ready parts: 1/1, is published: true 2025-05-07T08:59:43.278337Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-05-07T08:59:43.278375Z node 51 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1003:0 2025-05-07T08:59:43.278406Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 1003:0 2025-05-07T08:59:43.278532Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 TestModificationResult got TxId: 1003, wait until txId: 1003 TestWaitNotification wait txId: 1003 2025-05-07T08:59:43.285062Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1003: send EvNotifyTxCompletion 2025-05-07T08:59:43.285131Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1003 2025-05-07T08:59:43.285502Z node 51 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1003, at schemeshard: 72057594046678944 2025-05-07T08:59:43.285601Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-05-07T08:59:43.285638Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [51:448:2421] TestWaitNotification: OK eventTxId 1003 2025-05-07T08:59:43.286106Z node 51 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:59:43.286329Z node 51 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 258us result status StatusSuccess 2025-05-07T08:59:43.286787Z node 51 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 2 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TestKinesisHttpProxy::TestListStreamConsumersWithToken >> YdbIndexTable::AlterIndexImplBySuperUser [GOOD] >> YdbIndexTable::CreateTableAddIndex >> TGRpcYdbTest::ExecuteQueryWithParametersExplicitSession [GOOD] >> TGRpcYdbTest::ExplainQuery ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::CopyTableDropOriginalAndReturnPartAfterCompaction [GOOD] Test command err: 2025-05-07T08:59:29.035942Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625827112646684:2203];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:29.039144Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004a1b/r3tmp/tmp42UkAQ/pdisk_1.dat 2025-05-07T08:59:30.086383Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:59:30.290677Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:59:30.290780Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:59:30.291368Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:59:30.307404Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:29272 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-05-07T08:59:31.184146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:59:31.335413Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:32.800179Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.11, eph 1} end=0, 4 blobs 3r (max 3), put Spent{time=0.050s,wait=0.001s,interrupts=1} Part{ 2 pk, lobs 2 +0, (1265 647 2154)b }, ecr=1.000 2025-05-07T08:59:32.802479Z node 1 :OPS_COMPACT INFO: Compact{72075186224037889.1.11, eph 1} end=0, 4 blobs 3r (max 3), put Spent{time=0.042s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 2 +0, (1139 521 2626)b }, ecr=1.000 2025-05-07T08:59:32.841690Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.16, eph 2} end=0, 4 blobs 6r (max 6), put Spent{time=0.014s,wait=0.002s,interrupts=1} Part{ 2 pk, lobs 5 +0, (1573 647 6413)b }, ecr=1.000 2025-05-07T08:59:32.845779Z node 1 :OPS_COMPACT INFO: Compact{72075186224037889.1.16, eph 2} end=0, 4 blobs 6r (max 6), put Spent{time=0.012s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 4 +0, (2326 1432 5183)b }, ecr=1.000 TClient::Ls request: /dc-1/Dir/TableOld TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "TableOld" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1746608371704 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TableOld" Columns { Name: "unused004" Type: "Float" TypeId: 33 Id: 7 NotNull: false IsBuildInProgress: false } Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name... (TRUNCATED) Copy TableOld to Table 2025-05-07T08:59:33.035206Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/dc-1/Dir" OperationType: ESchemeOpCreateTable CreateTable { Name: "Table" PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 100000 InMemStepsToSnapshot: 2 InMemForceStepsToSnapshot: 3 InMemForceSizeToSnapshot: 1000000 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 200000 ReadAheadLoThreshold: 100000 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 10000 CountToCompact: 2 ForceCountToCompact: 2 ForceSizeToCompact: 20000 CompactionBrokerQueue: 1 KeepInCache: true } } ColumnFamilies { Id: 0 ColumnCache: ColumnCacheNone Storage: ColumnStorageTest_1_2_1k } } CopyFromTable: "/dc-1/Dir/TableOld" } } TxId: 281474976710676 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-05-07T08:59:33.035585Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_copy_table.cpp:383: TCopyTable Propose, path: /dc-1/Dir/Table, opId: 281474976710676:0, at schemeshard: 72057594046644480 2025-05-07T08:59:33.036030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:319: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046644480, LocalPathId: 2], parent name: Dir, child name: Table, child id: [OwnerId: 72057594046644480, LocalPathId: 4], at schemeshard: 72057594046644480 2025-05-07T08:59:33.036089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 0 2025-05-07T08:59:33.036103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction source path for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 3 2025-05-07T08:59:33.036134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 1 2025-05-07T08:59:33.036150Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 2 2025-05-07T08:59:33.046202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 3 2025-05-07T08:59:33.046385Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976710676:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-05-07T08:59:33.070301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 2 2025-05-07T08:59:33.070359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 4 waiting... 2025-05-07T08:59:33.077986Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 281474976710676, response: Status: StatusAccepted TxId: 281474976710676 SchemeshardId: 72057594046644480 PathId: 4, at schemeshard: 72057594046644480 2025-05-07T08:59:33.078140Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710676, database: /dc-1, subject: , status: StatusAccepted, operation: CREATE TABLE, path: /dc-1/Dir/Table 2025-05-07T08:59:33.078400Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-05-07T08:59:33.078418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976710676, path id: [OwnerId: 72057594046644480, LocalPathId: 2] 2025-05-07T08:59:33.078557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976710676, path id: [OwnerId: 72057594046644480, LocalPathId: 4] 2025-05-07T08:59:33.078650Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-05-07T08:59:33.078670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:7501625831407614362:2253], at schemeshard: 72057594046644480, txId: 281474976710676, path id: 2 2025-05-07T08:59:33.078686Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:7501625831407614362:2253], at schemeshard: 72057594046644480, txId: 281474976710676, path id: 4 2025-05-07T08:59:33.078730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710676:0, at schemeshard: 72057594046644480 2025-05-07T08:59:33.078777Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 281474976710676:0 ProgressState, operation type: TxCopyTable, at tablet# 72057594046644480 2025-05-07T08:59:33.079187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:357: TCreateParts opId# 281474976710676:0 CreateRequest Event to Hive: 72057594037968897 msg: Owner: 72057594046644480 OwnerIdx: 3 TabletType: DataShard FollowerCount: 0 ObjectDomain { SchemeShard: 72057594046644480 PathId: 1 } ObjectId: 4 BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } AllowedDomains { SchemeShard: 72057594046644480 PathId: 1 } 2025-05-07T08:59:33.079354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:357: TCreateParts opId# 281474976710676:0 CreateRequest Event to Hive: 72057594037968897 msg: Owner: 72057594046644480 OwnerIdx: 4 TabletType: DataShard FollowerCount: 0 ObjectDomain { SchemeShard: 72057594046644480 PathId: 1 } ObjectId: 4 BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } AllowedDomains { SchemeShard: 72057594046644480 PathId: 1 } 2025-05-07T08:59:33.084444Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 2 Version: 6 PathOwnerId: 72057594046644480, cookie: 281474976710676 2025-05-07T08:59:33.084628Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPath ... schemeshard 72057594046644480 2025-05-07T08:59:39.668475Z node 2 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037890 in PreOffline state HasSharedBobs: 0 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-05-07T08:59:39.668515Z node 2 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186224037890 Initiating switch from PreOffline to Offline state 2025-05-07T08:59:39.670659Z node 2 :HIVE WARN: hive_impl.cpp:1929: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037888) Check that tablet 72075186224037889 was deleted 2025-05-07T08:59:39.671277Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037889, clientId# [2:7501625869391562334:2691], serverId# [2:7501625869391562339:3432], sessionId# [0:0:0] 2025-05-07T08:59:39.671353Z node 2 :TX_DATASHARD INFO: datashard_impl.h:3307: 72075186224037890 Reporting state Offline to schemeshard 72057594046644480 2025-05-07T08:59:39.671436Z node 2 :TX_DATASHARD INFO: datashard_impl.h:3307: 72075186224037891 Reporting state Offline to schemeshard 72057594046644480 2025-05-07T08:59:39.671458Z node 2 :TX_DATASHARD INFO: datashard_impl.h:3307: 72075186224037891 Reporting state Offline to schemeshard 72057594046644480 2025-05-07T08:59:39.672092Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5509: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7501625860801626033 RawX2: 4503608217307387 } TabletId: 72075186224037889 State: 4 2025-05-07T08:59:39.672141Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037889, state: Offline, at schemeshard: 72057594046644480 2025-05-07T08:59:39.672519Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:2 hive 72057594037968897 at ss 72057594046644480 2025-05-07T08:59:39.673102Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037889 state Offline 2025-05-07T08:59:39.673278Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5509: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7501625860801626327 RawX2: 4503608217307442 } TabletId: 72075186224037891 State: 4 2025-05-07T08:59:39.673317Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037891, state: Offline, at schemeshard: 72057594046644480 2025-05-07T08:59:39.673466Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5509: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7501625860801626327 RawX2: 4503608217307442 } TabletId: 72075186224037891 State: 4 2025-05-07T08:59:39.673484Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037891, state: Offline, at schemeshard: 72057594046644480 2025-05-07T08:59:39.673562Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5509: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7501625860801626310 RawX2: 4503608217307441 } TabletId: 72075186224037890 State: 4 2025-05-07T08:59:39.673587Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037890, state: Offline, at schemeshard: 72057594046644480 2025-05-07T08:59:39.674151Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046644480 ShardLocalIdx: 2, at schemeshard: 72057594046644480 2025-05-07T08:59:39.674362Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 1 2025-05-07T08:59:39.674535Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-05-07T08:59:39.674550Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 3], at schemeshard: 72057594046644480 2025-05-07T08:59:39.674586Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 2 2025-05-07T08:59:39.675048Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:4 hive 72057594037968897 at ss 72057594046644480 2025-05-07T08:59:39.675105Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:4 hive 72057594037968897 at ss 72057594046644480 2025-05-07T08:59:39.675137Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:3 hive 72057594037968897 at ss 72057594046644480 2025-05-07T08:59:39.675705Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:2 2025-05-07T08:59:39.675723Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:2 tabletId 72075186224037889 2025-05-07T08:59:39.675759Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-05-07T08:59:39.676747Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046644480 ShardLocalIdx: 4, at schemeshard: 72057594046644480 2025-05-07T08:59:39.676931Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 2 2025-05-07T08:59:39.677092Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046644480 ShardLocalIdx: 4, at schemeshard: 72057594046644480 2025-05-07T08:59:39.677217Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046644480 ShardLocalIdx: 3, at schemeshard: 72057594046644480 2025-05-07T08:59:39.677337Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 1 2025-05-07T08:59:39.677424Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-05-07T08:59:39.677438Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 4], at schemeshard: 72057594046644480 2025-05-07T08:59:39.677472Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-05-07T08:59:39.680681Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:4 2025-05-07T08:59:39.680710Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:4 tabletId 72075186224037891 2025-05-07T08:59:39.681457Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:4 2025-05-07T08:59:39.681488Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:3 2025-05-07T08:59:39.681506Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:3 tabletId 72075186224037890 2025-05-07T08:59:39.681539Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-05-07T08:59:39.682276Z node 2 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037889 reason = ReasonStop 2025-05-07T08:59:39.682320Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037889, clientId# [2:7501625860801626155:2391], serverId# [2:7501625860801626156:2392], sessionId# [0:0:0] 2025-05-07T08:59:39.682349Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037891 state Offline 2025-05-07T08:59:39.682365Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037891 state Offline 2025-05-07T08:59:39.682375Z node 2 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037891 reason = ReasonStop 2025-05-07T08:59:39.682387Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037890 state Offline 2025-05-07T08:59:39.682399Z node 2 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037890 reason = ReasonStop 2025-05-07T08:59:39.682417Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037890, clientId# [2:7501625860801626394:2546], serverId# [2:7501625860801626395:2547], sessionId# [0:0:0] 2025-05-07T08:59:39.682440Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037890, clientId# [2:7501625865096593801:2623], serverId# [2:7501625865096593802:2624], sessionId# [0:0:0] 2025-05-07T08:59:39.683244Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037889 not found 2025-05-07T08:59:39.683267Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037891 not found 2025-05-07T08:59:39.683284Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037890 not found 2025-05-07T08:59:39.683580Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037889 2025-05-07T08:59:39.683674Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037889 2025-05-07T08:59:39.685244Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037891 2025-05-07T08:59:39.685309Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037891 2025-05-07T08:59:39.686650Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037890 2025-05-07T08:59:39.686702Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037890 Check that tablet 72075186224037890 was deleted Check that tablet 72075186224037891 was deleted 2025-05-07T08:59:39.975099Z node 2 :HIVE WARN: hive_impl.cpp:1929: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037889) 2025-05-07T08:59:39.976108Z node 2 :HIVE WARN: hive_impl.cpp:1929: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037890) 2025-05-07T08:59:39.977101Z node 2 :HIVE WARN: hive_impl.cpp:1929: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037891) >> TFlatTableExecutor_IndexLoading::Scan_Groups_FlatIndex [GOOD] >> TFlatTableExecutor_IndexLoading::Scan_Groups_BTreeIndex >> TopicAutoscaling::ControlPlane_CDC [GOOD] >> TopicAutoscaling::ControlPlane_CDC_Disable >> TestYmqHttpProxy::TestTagQueue [GOOD] >> YdbTableBulkUpsert::Types [GOOD] >> YdbTableBulkUpsert::Uint8 >> Cdc::ResolvedTimestamps [GOOD] >> Cdc::ResolvedTimestampsMultiplePartitions >> TestYmqHttpProxy::TestListQueues [GOOD] >> CommitOffset::Commit_WithWrongSession_ToParent [GOOD] >> CommitOffset::Commit_WithoutSession_ParentNotFinished >> TGRpcClientLowTest::SimpleRequestDummyService [GOOD] >> TGRpcClientLowTest::MultipleSimpleRequests >> TestYmqHttpProxy::TestUntagQueue >> TestYmqHttpProxy::TestPurgeQueue >> TGRpcLdapAuthentication::LdapAuthWithInvalidRobouserLogin >> ClientStatsCollector::CounterRetryOperation [GOOD] >> ClientStatsCollector::ExternalMetricRegistryByRawPtr >> YdbTableBulkUpsertOlap::UpsertCsvBug >> TopicAutoscaling::PartitionMerge_PreferedPartition_AutoscaleAwareSDK [GOOD] >> TopicAutoscaling::ControlPlane_CreateAlterDescribe >> TRegisterNodeOverLegacyService::ServerWithoutCertVerification_ClientProvidesCorrectCerts [GOOD] >> TRegisterNodeOverLegacyService::ServerWithoutCertVerification_ClientProvidesEmptyClientCerts >> Cdc::RacySplitAndDropTable [GOOD] >> Cdc::RenameTable >> Cdc::InitialScan [GOOD] >> Cdc::InitialScanDebezium |91.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/client/minikql_compile/ut/ydb-core-client-minikql_compile-ut |91.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/client/minikql_compile/ut/ydb-core-client-minikql_compile-ut |91.4%| [LD] {RESULT} $(B)/ydb/core/client/minikql_compile/ut/ydb-core-client-minikql_compile-ut >> YdbQueryService::TestCreateDropAttachSession [GOOD] >> YdbQueryService::TestCreateAttachAndDropAttachedSession |91.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_export/ydb-core-tx-datashard-ut_export |91.4%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_export/ydb-core-tx-datashard-ut_export |91.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_export/ydb-core-tx-datashard-ut_export >> YdbYqlClient::TestYqlWrongTable [GOOD] >> YdbYqlClient::TraceId >> TSchemeShardTTLTestsWithReboots::CreateTable [GOOD] >> CommitOffset::DistributedTxCommit [GOOD] >> CommitOffset::DistributedTxCommit_ChildFirst >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Uint32-pk_types9-all_types9-index9-Uint32--] [GOOD] >> TestYmqHttpProxy::TestDeleteQueue [GOOD] >> YdbIndexTable::CreateTableAddIndex [GOOD] >> YdbIndexTable::AlterTableAddIndex |91.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_followers/ydb-core-tx-datashard-ut_followers |91.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/apps/etcd_proxy/service/ut/ydb-apps-etcd_proxy-service-ut |91.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_followers/ydb-core-tx-datashard-ut_followers |91.4%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_followers/ydb-core-tx-datashard-ut_followers |91.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/apps/etcd_proxy/service/ut/ydb-apps-etcd_proxy-service-ut |91.5%| [LD] {RESULT} $(B)/ydb/apps/etcd_proxy/service/ut/ydb-apps-etcd_proxy-service-ut >> YdbMonitoring::SelfCheckWithNodesDying [GOOD] >> YdbOlapStore::BulkUpsert ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTestsWithReboots::CreateTable [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:116:2058] recipient: [1:111:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:116:2058] recipient: [1:111:2142] Leader for TabletID 72057594046678944 is [1:123:2149] sender: [1:126:2058] recipient: [1:108:2140] Leader for TabletID 72057594046447617 is [1:129:2154] sender: [1:131:2058] recipient: [1:109:2141] Leader for TabletID 72057594046316545 is [1:134:2157] sender: [1:136:2058] recipient: [1:111:2142] 2025-05-07T08:58:12.524814Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:58:12.524916Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:12.524956Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:58:12.524990Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:58:12.525054Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:58:12.525080Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:58:12.525131Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:12.525213Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:58:12.525952Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:58:12.526298Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:58:12.595187Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7610: Got new config: QueryServiceConfig { AvailableExternalDataSources: "ObjectStorage" AvailableExternalDataSources: "ClickHouse" AvailableExternalDataSources: "PostgreSQL" AvailableExternalDataSources: "MySQL" AvailableExternalDataSources: "Ydb" AvailableExternalDataSources: "YT" AvailableExternalDataSources: "Greenplum" AvailableExternalDataSources: "MsSQLServer" AvailableExternalDataSources: "Oracle" AvailableExternalDataSources: "Logging" AvailableExternalDataSources: "Solomon" } 2025-05-07T08:58:12.595266Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:12.595991Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# ObjectStorage, ClickHouse, PostgreSQL, MySQL, Ydb, YT, Greenplum, MsSQLServer, Oracle, Logging, Solomon Leader for TabletID 72057594046447617 is [1:129:2154] sender: [1:175:2058] recipient: [1:15:2062] 2025-05-07T08:58:12.617515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:58:12.617751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:58:12.617941Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:58:12.630305Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:58:12.630567Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:58:12.631257Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:12.631498Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:58:12.633560Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:12.634716Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:12.634760Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:12.634905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:58:12.634943Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:12.634991Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:58:12.635089Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2212] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2212] Leader for TabletID 72057594037968897 is [1:217:2216] sender: [1:218:2058] recipient: [1:211:2212] 2025-05-07T08:58:12.641563Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:123:2149] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:58:12.834971Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:12.835234Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:12.835494Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:58:12.835746Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:58:12.835806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:12.839496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:12.839685Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:58:12.839896Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:12.839951Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:58:12.840014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:58:12.840056Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:58:12.844096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:12.844168Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:58:12.844209Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:58:12.848868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:12.848951Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:12.849003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:12.849103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:58:12.854773Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:58:12.861797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:58:12.862061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:134:2157] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:58:12.863145Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:12.863334Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 134 RawX2: 4294969453 } } St ... AT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1002, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-05-07T08:59:51.233239Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-05-07T08:59:51.234286Z node 72 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 1002 2025-05-07T08:59:51.234402Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 1002 2025-05-07T08:59:51.234442Z node 72 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1002 2025-05-07T08:59:51.234489Z node 72 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1002, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-05-07T08:59:51.234537Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-05-07T08:59:51.234634Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 1002, ready parts: 0/1, is published: true 2025-05-07T08:59:51.236452Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6245: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 1002 Step: 5000003 OrderId: 1002 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 11353 } } 2025-05-07T08:59:51.236531Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 1002, tablet: 72075186233409546, partId: 0 2025-05-07T08:59:51.236700Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 1002:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 1002 Step: 5000003 OrderId: 1002 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 11353 } } 2025-05-07T08:59:51.236822Z node 72 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 1002 Step: 5000003 OrderId: 1002 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 11353 } } 2025-05-07T08:59:51.237568Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 329 RawX2: 309237647628 } Origin: 72075186233409546 State: 2 TxId: 1002 Step: 0 Generation: 2 2025-05-07T08:59:51.237624Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 1002, tablet: 72075186233409546, partId: 0 2025-05-07T08:59:51.237761Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 1002:0, at schemeshard: 72057594046678944, message: Source { RawX1: 329 RawX2: 309237647628 } Origin: 72075186233409546 State: 2 TxId: 1002 Step: 0 Generation: 2 2025-05-07T08:59:51.237845Z node 72 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1005: NTableState::TProposedWaitParts operationId# 1002:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 2025-05-07T08:59:51.237946Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1009: NTableState::TProposedWaitParts operationId# 1002:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 329 RawX2: 309237647628 } Origin: 72075186233409546 State: 2 TxId: 1002 Step: 0 Generation: 2 2025-05-07T08:59:51.238096Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 1002:0, shardIdx: 72057594046678944:1, datashard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-05-07T08:59:51.238147Z node 72 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 1002:0, at schemeshard: 72057594046678944 2025-05-07T08:59:51.238196Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 1002:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-05-07T08:59:51.238243Z node 72 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1002:0 129 -> 240 2025-05-07T08:59:51.244259Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1002 2025-05-07T08:59:51.244410Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1002 2025-05-07T08:59:51.245351Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 1002:0, at schemeshard: 72057594046678944 2025-05-07T08:59:51.245490Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 1002:0, at schemeshard: 72057594046678944 2025-05-07T08:59:51.246162Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1002:0, at schemeshard: 72057594046678944 2025-05-07T08:59:51.246235Z node 72 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 1002:0 ProgressState 2025-05-07T08:59:51.246357Z node 72 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1002:0 progress is 1/1 2025-05-07T08:59:51.246396Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1002 ready parts: 1/1 2025-05-07T08:59:51.246444Z node 72 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1002:0 progress is 1/1 2025-05-07T08:59:51.246479Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1002 ready parts: 1/1 2025-05-07T08:59:51.246523Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 1002, ready parts: 1/1, is published: true 2025-05-07T08:59:51.246573Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1002 ready parts: 1/1 2025-05-07T08:59:51.246619Z node 72 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1002:0 2025-05-07T08:59:51.246655Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 1002:0 2025-05-07T08:59:51.246857Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 TestModificationResult got TxId: 1002, wait until txId: 1002 TestWaitNotification wait txId: 1002 2025-05-07T08:59:51.255982Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1002: send EvNotifyTxCompletion 2025-05-07T08:59:51.256058Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1002 2025-05-07T08:59:51.256546Z node 72 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1002, at schemeshard: 72057594046678944 2025-05-07T08:59:51.256679Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1002: got EvNotifyTxCompletionResult 2025-05-07T08:59:51.256725Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1002: satisfy waiter [72:404:2377] TestWaitNotification: OK eventTxId 1002 2025-05-07T08:59:51.261547Z node 72 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T08:59:51.265281Z node 72 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 3.73ms result status StatusSuccess 2025-05-07T08:59:51.266024Z node 72 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TestYmqHttpProxy::TestListDeadLetterSourceQueues >> YdbOlapStore::LogLast50ByResource [GOOD] >> YdbOlapStore::LogNonExistingRequest |91.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/build_index/ut/ydb-core-tx-datashard-build_index-ut |91.5%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/build_index/ut/ydb-core-tx-datashard-build_index-ut |91.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/build_index/ut/ydb-core-tx-datashard-build_index-ut >> YdbYqlClient::RetryOperationAsync >> TGRpcLdapAuthentication::LdapAuthWithInvalidRobouserLogin [GOOD] >> TGRpcLdapAuthentication::LdapAuthWithInvalidRobouserPassword >> Cdc::AddIndex [GOOD] >> Cdc::AddStream >> TGRpcYdbTest::ExplainQuery [GOOD] >> TestKinesisHttpProxy::TestListStreamConsumersWithToken [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithIssuerVerification_ClientWithSameIssuer [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithOutCertVerification_ClientProvidesExpiredCert >> TRegisterNodeOverLegacyService::ServerWithoutCertVerification_ClientProvidesEmptyClientCerts [GOOD] >> TTableProfileTests::DescribeTableWithPartitioningPolicy |91.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_data_cleanup/ydb-core-tx-datashard-ut_data_cleanup |91.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_data_cleanup/ydb-core-tx-datashard-ut_data_cleanup |91.5%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_data_cleanup/ydb-core-tx-datashard-ut_data_cleanup >> TFlatTableExecutor_IndexLoading::Scan_Groups_BTreeIndex [GOOD] >> TFlatTableExecutor_IndexLoading::Scan_Groups_BTreeIndex_Empty >> YdbYqlClient::TraceId [GOOD] >> YdbYqlClient::Utf8DatabasePassViaHeader >> TFlatTableExecutor_IndexLoading::Scan_Groups_BTreeIndex_Empty [GOOD] >> TFlatTableExecutor_KeepEraseMarkers::TestKeepEraseMarkers >> TFlatTableExecutor_KeepEraseMarkers::TestKeepEraseMarkers [GOOD] >> TFlatTableExecutor_LongTx::MemTableLongTx >> TGRpcClientLowTest::MultipleSimpleRequests [GOOD] >> TGRpcLdapAuthentication::LdapAuthServerIsUnavailable >> TFlatTableExecutor_LongTx::MemTableLongTx [GOOD] >> TFlatTableExecutor_LongTx::CompactUncommittedLongTx >> TestKinesisHttpProxy::TestCounters >> TFlatTableExecutor_LongTx::CompactUncommittedLongTx [GOOD] >> TFlatTableExecutor_LongTx::CompactCommittedLongTx >> ClientStatsCollector::ExternalMetricRegistryByRawPtr [GOOD] >> ClientStatsCollector::ExternalMetricRegistryStdSharedPtr >> YdbTableBulkUpsertOlap::UpsertCsvBug [GOOD] >> YdbTableBulkUpsertOlap::UpsertCSV ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcYdbTest::ExplainQuery [GOOD] Test command err: 2025-05-07T08:59:17.165243Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625773878162001:2076];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:17.165555Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028e4/r3tmp/tmph5H6nU/pdisk_1.dat 2025-05-07T08:59:17.913716Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:59:17.949408Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:59:17.949557Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:59:17.979777Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20244, node 1 2025-05-07T08:59:18.214916Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:59:18.214933Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:59:18.214943Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:59:18.215046Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14305 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:59:18.537230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:23.146570Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501625800804291206:2077];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:23.150774Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028e4/r3tmp/tmplAnrqj/pdisk_1.dat 2025-05-07T08:59:23.533347Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:59:23.586695Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:59:23.586805Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:59:23.596538Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19398, node 4 2025-05-07T08:59:23.745607Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:59:23.745638Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:59:23.745646Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:59:23.745897Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15575 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:59:24.212920Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:27.568763Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501625817984161436:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:27.568839Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501625817984161425:2335], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:27.568998Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:27.575064Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480 2025-05-07T08:59:27.636026Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7501625817984161439:2339], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-05-07T08:59:27.716315Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:7501625817984161509:2675] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:59:28.147696Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7501625800804291206:2077];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:28.147794Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:59:30.860688Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7501625832051832698:2099];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:30.861061Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028e4/r3tmp/tmpxLITNH/pdisk_1.dat 2025-05-07T08:59:31.196065Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:59:31.235298Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:59:31.235399Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:59:31.253512Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7271, node 7 2025-05-07T08:59:31.474769Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:59:31.474800Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:59:31.474811Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:59:31.474988Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7560 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:59:31.927691Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:35.858163Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7501625832051832698:2099];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:35.858245Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;actio ... ydb://session/3?node_id=7&id=NzQ0MGM4ZDMtMzRkNWQwYjItMTRlNTEwZTktM2EwODQzZDg=, ActorId: [7:7501625857821637496:2333], ActorState: ExecuteState, TraceId: 01jtmzhkd681zp3ts4jzqy2p2z, Create QueryResponse for error on request, msg: ydb/core/kqp/session_actor/kqp_session_actor.cpp:1003: ydb/library/mkql_proto/mkql_proto.cpp:1435: Unknown protobuf type:
: Error: Unsupported protobuf type:
: Error: ydb/core/kqp/session_actor/kqp_session_actor.cpp:1003: ydb/library/mkql_proto/mkql_proto.cpp:1435: Unknown protobuf type: 2025-05-07T08:59:37.315412Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jtmzhkd681zp3ts4jzqy2p2z, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NzQ0MGM4ZDMtMzRkNWQwYjItMTRlNTEwZTktM2EwODQzZDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:59:39.522403Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7501625869576762784:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:39.542375Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028e4/r3tmp/tmpoftAr8/pdisk_1.dat 2025-05-07T08:59:40.082542Z node 10 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:59:40.211460Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:59:40.211566Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:59:40.226137Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27117, node 10 2025-05-07T08:59:40.674774Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:59:40.674803Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:59:40.674812Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:59:40.674992Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15885 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:59:41.113547Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:43.548650Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7501625886756633019:2334], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:43.548768Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:43.549197Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7501625886756633031:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:43.555353Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T08:59:43.640472Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7501625886756633033:2338], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T08:59:43.746771Z node 10 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [10:7501625886756633129:2680] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:59:47.017814Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7501625902465774174:2097];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:47.017954Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028e4/r3tmp/tmpVpcpCU/pdisk_1.dat 2025-05-07T08:59:47.618165Z node 13 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:59:47.748792Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:59:47.754091Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:59:47.771855Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11334, node 13 2025-05-07T08:59:48.138175Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:59:48.138209Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:59:48.138222Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:59:48.138433Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61326 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:59:49.016165Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:49.150211Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T08:59:52.018063Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[13:7501625902465774174:2097];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:52.018190Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:59:53.446394Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7501625928235579166:2344], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:53.446486Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:53.447391Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7501625928235579178:2347], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:53.451960Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480 2025-05-07T08:59:53.473470Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7501625928235579180:2348], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-05-07T08:59:53.572191Z node 13 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [13:7501625928235579259:2820] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:59:53.806838Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710661. Ctx: { TraceId: 01jtmzj37459ds4hscg057x3c6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=YTdkODZhM2YtNzJhZDM2MjQtZmI5YzkyNWQtNDc0MjVlM2U=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> TFlatTableExecutor_LongTx::CompactCommittedLongTx [GOOD] >> TFlatTableExecutor_LongTx::CompactedLongTxRestart >> TFlatTableExecutor_LongTx::CompactedLongTxRestart [GOOD] >> TFlatTableExecutor_LongTx::CompactMultipleChanges >> YdbQueryService::TestCreateAttachAndDropAttachedSession [GOOD] >> YdbIndexTable::AlterTableAddIndex [GOOD] >> YdbLogStore::AlterLogStore >> TFlatTableExecutor_LongTx::CompactMultipleChanges [GOOD] >> TFlatTableExecutor_LongTx::LongTxBorrow >> Cdc::ResolvedTimestampsMultiplePartitions [GOOD] >> Cdc::ResolvedTimestampsVolatileOutOfOrder >> TFlatTableExecutor_LongTx::LongTxBorrow [GOOD] >> TFlatTableExecutor_LongTx::MemTableLongTxRead >> TestYmqHttpProxy::TestUntagQueue [GOOD] >> TestYmqHttpProxy::TestPurgeQueue [GOOD] >> TFlatTableExecutor_LongTx::MemTableLongTxRead [GOOD] >> TFlatTableExecutor_LongTx::CompactedTxIdReuse >> TopicAutoscaling::ControlPlane_CDC_Disable [GOOD] >> TopicAutoscaling::BalancingAfterSplit_sessionsWithPartition >> TopicAutoscaling::PartitionSplit_ReadNotEmptyPartitions_BeforeAutoscaleAwareSDK [GOOD] >> TopicAutoscaling::PartitionSplit_ReadNotEmptyPartitions_PQv1 >> TFlatTableExecutor_LongTx::CompactedTxIdReuse [GOOD] >> TFlatTableExecutor_LongTx::MergeSkewedCommitted >> TFlatTableExecutor_LongTx::MergeSkewedCommitted [GOOD] >> TFlatTableExecutor_LongTxAndBlobs::SmallValues >> TestYmqHttpProxy::TestSendMessageBatch >> TFlatTableExecutor_LongTxAndBlobs::SmallValues [GOOD] >> TFlatTableExecutor_LongTxAndBlobs::OuterBlobValues >> TestYmqHttpProxy::TestTagQueueMultipleQueriesInflight ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbQueryService::TestCreateAttachAndDropAttachedSession [GOOD] Test command err: 2025-05-07T08:59:22.993126Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625797742939568:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:22.993169Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028c6/r3tmp/tmpaxWREm/pdisk_1.dat 2025-05-07T08:59:23.775393Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:59:23.775487Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:59:23.784804Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:59:23.864865Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14706, node 1 2025-05-07T08:59:24.130244Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T08:59:24.158554Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T08:59:24.159208Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:59:24.159220Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:59:24.159228Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:59:24.159338Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:59:24.245905Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; TClient is connected to server localhost:31277 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:59:24.762894Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:27.440454Z node 1 :KQP_PROXY WARN: kqp_proxy_service.cpp:1557: Failed to parse session id: unknownSesson 2025-05-07T08:59:29.214878Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501625827078909489:2140];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:29.214937Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028c6/r3tmp/tmpYHvabQ/pdisk_1.dat 2025-05-07T08:59:29.644154Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:59:29.723692Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:59:29.723776Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:59:29.739351Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4442, node 4 2025-05-07T08:59:30.026611Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:59:30.026637Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:59:30.026647Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:59:30.026787Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11211 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:59:30.495949Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:36.696550Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7501625857947212944:2207];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:36.702432Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028c6/r3tmp/tmp8tiRYk/pdisk_1.dat 2025-05-07T08:59:37.168920Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:59:37.260235Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:59:37.260314Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:59:37.263563Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 31670, node 7 2025-05-07T08:59:37.426607Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:59:37.426636Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:59:37.426651Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:59:37.426825Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25002 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:59:37.936592Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:41.698229Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7501625857947212944:2207];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:41.698316Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028c6/r3tmp/tmpxThJax/pdisk_1.dat 2025-05-07T08:59:44.459255Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:59:44.572887Z node 10 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:59:44.624484Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:59:44.624593Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:59:44.637258Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21231, node 10 2025-05-07T08:59:44.886964Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:59:44.886994Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:59:44.887004Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:59:44.887141Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1770 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:59:45.217613Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:51.485116Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7501625920172880818:2092];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:51.507599Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028c6/r3tmp/tmpPx2AXw/pdisk_1.dat 2025-05-07T08:59:51.971384Z node 13 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:59:52.049381Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:59:52.049486Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:59:52.064782Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 61907, node 13 2025-05-07T08:59:52.367483Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:59:52.367516Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:59:52.367525Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:59:52.367678Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63345 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:59:53.019913Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:56.150444Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1665: Updated YQL logs priority to current level: 4 2025-05-07T08:59:56.151617Z node 13 :KQP_PROXY INFO: kqp_proxy_service.cpp:442: Cannot start publishing usage, tenants: /Root, empty 2025-05-07T08:59:56.154282Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1468: TraceId: "01jtmzj2z3fergjf1xx3127272", Request has 18444997465313.397375s seconds to be completed 2025-05-07T08:59:56.157675Z node 13 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=13&id=NzE4ZDA4NjYtODI2OGJjZmQtM2RkYmNhNWItN2UxMTQ0OA==, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id NzE4ZDA4NjYtODI2OGJjZmQtM2RkYmNhNWItN2UxMTQ0OA== 2025-05-07T08:59:56.157774Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1543: TraceId: "01jtmzj2z3fergjf1xx3127272", Created new session, sessionId: ydb://session/3?node_id=13&id=NzE4ZDA4NjYtODI2OGJjZmQtM2RkYmNhNWItN2UxMTQ0OA==, workerId: [13:7501625941647718322:2332], database: , longSession: 1, local sessions count: 1 2025-05-07T08:59:56.157820Z node 13 :KQP_PROXY INFO: kqp_proxy_service.cpp:442: Cannot start publishing usage, tenants: /Root, empty 2025-05-07T08:59:56.158101Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:649: Received create session request, trace_id: 01jtmzj2z3fergjf1xx3127272 2025-05-07T08:59:56.158180Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:512: Subscribed for config changes. 2025-05-07T08:59:56.158214Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:519: Updated table service config. 2025-05-07T08:59:56.158237Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1665: Updated YQL logs priority to current level: 4 2025-05-07T08:59:56.158282Z node 13 :KQP_PROXY INFO: kqp_proxy_service.cpp:442: Cannot start publishing usage, tenants: /Root, empty 2025-05-07T08:59:56.158349Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T08:59:56.158411Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T08:59:56.159285Z node 13 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=13&id=NzE4ZDA4NjYtODI2OGJjZmQtM2RkYmNhNWItN2UxMTQ0OA==, ActorId: [13:7501625941647718322:2332], ActorState: unknown state, session actor bootstrapped 2025-05-07T08:59:56.162630Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T08:59:56.162714Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T08:59:56.162754Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T08:59:56.178865Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:835: Received ping session request, has local session: ydb://session/3?node_id=13&id=NzE4ZDA4NjYtODI2OGJjZmQtM2RkYmNhNWItN2UxMTQ0OA==, rpc ctrl: [13:7501625941647718347:2334], sameNode: 1, trace_id: 2025-05-07T08:59:56.178914Z node 13 :KQP_PROXY TRACE: kqp_proxy_service.cpp:857: Attach local session: [13:7501625941647718322:2332] to rpc: [13:7501625941647718347:2334] on same node 2025-05-07T08:59:56.194649Z node 13 :KQP_SESSION INFO: kqp_session_actor.cpp:2315: SessionId: ydb://session/3?node_id=13&id=NzE4ZDA4NjYtODI2OGJjZmQtM2RkYmNhNWItN2UxMTQ0OA==, ActorId: [13:7501625941647718322:2332], ActorState: ReadyState, Session closed due to explicit close event 2025-05-07T08:59:56.194703Z node 13 :KQP_SESSION INFO: kqp_session_actor.cpp:2473: SessionId: ydb://session/3?node_id=13&id=NzE4ZDA4NjYtODI2OGJjZmQtM2RkYmNhNWItN2UxMTQ0OA==, ActorId: [13:7501625941647718322:2332], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-05-07T08:59:56.194739Z node 13 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2534: SessionId: ydb://session/3?node_id=13&id=NzE4ZDA4NjYtODI2OGJjZmQtM2RkYmNhNWItN2UxMTQ0OA==, ActorId: [13:7501625941647718322:2332], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-05-07T08:59:56.194770Z node 13 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2546: SessionId: ydb://session/3?node_id=13&id=NzE4ZDA4NjYtODI2OGJjZmQtM2RkYmNhNWItN2UxMTQ0OA==, ActorId: [13:7501625941647718322:2332], ActorState: unknown state, Cleanup temp tables: 0 2025-05-07T08:59:56.194860Z node 13 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2637: SessionId: ydb://session/3?node_id=13&id=NzE4ZDA4NjYtODI2OGJjZmQtM2RkYmNhNWItN2UxMTQ0OA==, ActorId: [13:7501625941647718322:2332], ActorState: unknown state, Session actor destroyed 2025-05-07T08:59:56.195227Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1353: Session closed, sessionId: ydb://session/3?node_id=13&id=NzE4ZDA4NjYtODI2OGJjZmQtM2RkYmNhNWItN2UxMTQ0OA==, workerId: [13:7501625941647718322:2332], local sessions count: 0 2025-05-07T08:59:56.210926Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:881: Received ping session request, request_id: 3, sender: [13:7501625941647718350:2336], trace_id: 2025-05-07T08:59:56.211104Z node 13 :KQP_PROXY NOTICE: kqp_proxy_service.cpp:1564: Session not found: ydb://session/3?node_id=13&id=NzE4ZDA4NjYtODI2OGJjZmQtM2RkYmNhNWItN2UxMTQ0OA== 2025-05-07T08:59:56.211195Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 3, sender: [13:7501625941647718350:2336], selfId: [13:7501625920172880934:2206], source: [13:7501625920172880934:2206] >> TopicAutoscaling::PartitionSplit_ReadEmptyPartitions_AutoscaleAwareSDK [GOOD] >> TopicAutoscaling::PartitionSplit_ManySession_PQv1 >> TFlatTableExecutor_LongTxAndBlobs::OuterBlobValues [GOOD] >> TFlatTableExecutor_LongTxAndBlobs::ExternalBlobValues >> TFlatTableExecutor_LongTxAndBlobs::ExternalBlobValues [GOOD] >> TFlatTableExecutor_LowPriorityTxs::TestEnqueueCancel >> TopicAutoscaling::WithDir_PartitionSplit_AutosplitByLoad [GOOD] >> WithSDK::DescribeConsumer >> TFlatTableExecutor_LowPriorityTxs::TestEnqueueCancel [GOOD] >> TFlatTableExecutor_LowPriorityTxs::TestLowPriority [GOOD] >> TFlatTableExecutor_LowPriorityTxs::TestLowPriorityCancel >> TFlatTableExecutor_LowPriorityTxs::TestLowPriorityCancel [GOOD] >> TFlatTableExecutor_LowPriorityTxs::TestLowPriorityAllocatingCancel >> YdbYqlClient::TestDoubleKey >> TFlatTableExecutor_LowPriorityTxs::TestLowPriorityAllocatingCancel [GOOD] >> TFlatTableExecutor_MoveTableData::TestMoveSnapshot >> TFlatTableExecutor_MoveTableData::TestMoveSnapshot [GOOD] >> TFlatTableExecutor_MoveTableData::TestMoveSnapshotFollower |91.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/rate_limiter/ut/ydb-services-rate_limiter-ut |91.5%| [LD] {RESULT} $(B)/ydb/services/rate_limiter/ut/ydb-services-rate_limiter-ut |91.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/rate_limiter/ut/ydb-services-rate_limiter-ut >> TFlatTableExecutor_MoveTableData::TestMoveSnapshotFollower [GOOD] >> TFlatTableExecutor_PostponedScan::TestPostponedScan >> Cdc::RenameTable [GOOD] >> Cdc::InitialScan_WithTopicSchemeTx >> Cdc::InitialScanDebezium [GOOD] >> Cdc::InitialScanRacyCompleteAndRequest >> TFlatTableExecutor_PostponedScan::TestPostponedScan [GOOD] >> TFlatTableExecutor_PostponedScan::TestCancelFinishedScan >> TFlatTableExecutor_PostponedScan::TestCancelFinishedScan [GOOD] >> TFlatTableExecutor_PostponedScan::TestCancelRunningPostponedScan >> TopicAutoscaling::ControlPlane_CreateAlterDescribe [GOOD] >> TopicAutoscaling::ControlPlane_DisableAutoPartitioning >> TFlatTableExecutor_PostponedScan::TestCancelRunningPostponedScan [GOOD] >> TFlatTableExecutor_PostponedScan::TestPostponedScanSnapshotMVCC >> TFlatTableExecutor_PostponedScan::TestPostponedScanSnapshotMVCC [GOOD] >> TFlatTableExecutor_Reboot::TestSchemeGcAfterReassign >> YdbYqlClient::Utf8DatabasePassViaHeader [GOOD] >> YdbYqlClient::TestYqlTypesFromPreparedQuery >> TRegisterNodeOverDiscoveryService::ServerWithoutCertVerification_ClientProvidesCorrectCerts >> TGRpcLdapAuthentication::LdapAuthWithInvalidRobouserPassword [GOOD] >> TGRpcLdapAuthentication::LdapAuthWithInvalidLogin >> TopicAutoscaling::ReadingAfterSplitTest_AutoscaleAwareSDK_AutoCommit [GOOD] >> TopicAutoscaling::ReadingAfterSplitTest_PQv1 >> TestYmqHttpProxy::TestListDeadLetterSourceQueues [GOOD] >> TGRpcLdapAuthentication::LdapAuthServerIsUnavailable [GOOD] >> TGRpcLdapAuthentication::DisableBuiltinAuthMechanism >> YdbLogStore::AlterLogStore [GOOD] >> TFlatTableExecutor_Reboot::TestSchemeGcAfterReassign [GOOD] >> TFlatTableExecutor_RejectProbability::MaxedOutRejectProbability >> YdbTableBulkUpsert::Uint8 [GOOD] >> YdbTableBulkUpsert::Timeout >> TestYmqHttpProxy::TestListQueueTags >> TFlatTableExecutor_RejectProbability::MaxedOutRejectProbability [GOOD] >> TFlatTableExecutor_RejectProbability::SomeRejectProbability |91.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_rs/ydb-core-tx-datashard-ut_rs |91.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_rs/ydb-core-tx-datashard-ut_rs |91.5%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_rs/ydb-core-tx-datashard-ut_rs ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbLogStore::AlterLogStore [GOOD] Test command err: 2025-05-07T08:59:29.082712Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625826999804958:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:29.082755Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028b0/r3tmp/tmpyJPfb6/pdisk_1.dat 2025-05-07T08:59:30.071770Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:59:30.071871Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:59:30.095123Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:59:30.112258Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:59:30.116831Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22473, node 1 2025-05-07T08:59:30.539804Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:59:30.539826Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:59:30.539838Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:59:30.539967Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1176 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:59:30.865850Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:34.090130Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501625826999804958:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:34.090226Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:59:34.789057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 SUCCESS 3 rows in 0.040546s 2025-05-07T08:59:35.030723Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625852769609949:2346], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:35.030808Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625852769609959:2349], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:35.030871Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:35.034702Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480 2025-05-07T08:59:35.061689Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501625852769609963:2350], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-05-07T08:59:35.147849Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501625852769610042:2809] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:59:36.479277Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710661. Ctx: { TraceId: 01jtmzhh7k1y0kvvdhp9q8gg46, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTFkZDA5YzgtMzA1YjQ3NTUtN2YwMzg0ZGEtOWEzMTk1YmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root SUCCESS count returned 3 rows 2025-05-07T08:59:38.150266Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501625863154350640:2212];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:38.150563Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028b0/r3tmp/tmp1voLOF/pdisk_1.dat 2025-05-07T08:59:38.645776Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:59:38.719825Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:59:38.719906Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:59:38.735052Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5341, node 4 2025-05-07T08:59:39.053128Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:59:39.053153Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:59:39.053162Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:59:39.053294Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18603 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:59:39.475360Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... TClient is connected to server localhost:18603 2025-05-07T08:59:43.146191Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7501625863154350640:2212];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:43.146290Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:59:43.510305Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T08:59:44.045923Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_table.cpp:508: TAlterTable Propose, path: Root/Foo/TimestampIndex/indexImplTable, pathId: , opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-05-07T08:59:44.046070Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976715659:1, propose status:StatusNameConflict, reason: Check failed: path: '/Root/Foo/TimestampIndex/indexImplTable', error: path is not a common path (id: [OwnerId: 72057594046644480, LocalPathId: 4], type: EPathTypeTable, state: EPathStateNoChanges), at schemeshard: 72057594046644480 2025-05-07T08:59:44.051216Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715659, database: /Root, subject: , status: StatusNameConflict, reason: Check failed: path: '/Root/Foo/TimestampIndex/indexImplTable', error: path is not a common path (id: [OwnerId: 72057594046644480, LocalPathId: 4], type: EPathTypeTable, state: EPathStateNoChanges), operation: ALTER TABLE, path: Root/Foo/TimestampIndex/indexImplTable 2025-05-07T08:59:44.051460Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:7501625888924155814:2967] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/Foo/TimestampIndex/indexImplTable\', error: path is not a common path (id: [OwnerId: 72057594046644480, LocalPathId: 4], type: EPathTypeTable, state: EPathStateNoChanges)" severity: 1 } Error 128: Administrative access denied TClient::Ls request: /Root/Foo/TimestampIndex/indexImplTable TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "indexImplTable" PathId: 4 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true ... ing columns in index table is not supported, at schemeshard: 72057594046644480 2025-05-07T08:59:44.419100Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:151: Abort operation: IgniteOperation fail to propose a part, opId: 281474976715663:1, at schemeshard: 72057594046644480, already accepted parts: 1, propose result status: StatusInvalidParameter, with reason: Adding or dropping columns in index table is not supported, tx message: Transaction { WorkingDir: "/Root/Foo/TimestampIndex" OperationType: ESchemeOpAlterTable AlterTable { Name: "indexImplTable" DropColumns { Name: "Timestamp" } } } TxId: 281474976715663 TabletId: 72057594046644480 Owner: "root@builtin" UserToken: "***" PeerName: "" 2025-05-07T08:59:44.419209Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_index.cpp:219: TAlterTableIndex AbortPropose, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-05-07T08:59:44.423508Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715663, database: /Root, subject: root@builtin, status: StatusInvalidParameter, reason: Adding or dropping columns in index table is not supported, operation: ALTER TABLE, path: /Root/Foo/TimestampIndex/indexImplTable 2025-05-07T08:59:44.423816Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:7501625888924155990:3120] txid# 281474976715663, issues: { message: "Adding or dropping columns in index table is not supported" severity: 1 } Error 128: Adding or dropping columns in index table is not supported 2025-05-07T08:59:46.499134Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7501625900689340620:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:46.499315Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028b0/r3tmp/tmpJXZQTf/pdisk_1.dat 2025-05-07T08:59:46.760448Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:59:46.822047Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:59:46.822151Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:59:46.828026Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27800, node 7 2025-05-07T08:59:46.938733Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:59:46.938759Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:59:46.938768Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:59:46.938920Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11045 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:59:47.214376Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:47.280197Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T08:59:53.358152Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7501625927157106885:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:53.365096Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028b0/r3tmp/tmpYkzP1o/pdisk_1.dat 2025-05-07T08:59:53.561086Z node 10 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:59:53.600200Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:59:53.600257Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:59:53.602905Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15632, node 10 2025-05-07T08:59:53.894612Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:59:53.894638Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:59:53.894648Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:59:53.894793Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27182 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:59:54.220184Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:54.297792Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T08:59:54.520119Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480 2025-05-07T08:59:54.762198Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480 2025-05-07T08:59:59.086565Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7501625954284963958:2277];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:59.086632Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028b0/r3tmp/tmpFtcNRg/pdisk_1.dat 2025-05-07T08:59:59.350818Z node 13 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:59:59.438096Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:59:59.438193Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 64647, node 13 2025-05-07T08:59:59.482401Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:59:59.651463Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:59:59.651486Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:59:59.651494Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:59:59.651645Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22966 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:00.017822Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... |91.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/memory_controller/ut/ydb-core-memory_controller-ut |91.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/memory_controller/ut/ydb-core-memory_controller-ut |91.5%| [LD] {RESULT} $(B)/ydb/core/memory_controller/ut/ydb-core-memory_controller-ut |91.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_compaction/ydb-core-tx-datashard-ut_compaction |91.5%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_compaction/ydb-core-tx-datashard-ut_compaction |91.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_compaction/ydb-core-tx-datashard-ut_compaction >> ClientStatsCollector::ExternalMetricRegistryStdSharedPtr [GOOD] >> TFlatTableExecutor_RejectProbability::SomeRejectProbability [GOOD] >> TFlatTableExecutor_RejectProbability::ZeroRejectProbability >> TSchemeShardTTLTestsWithReboots::MoveTable [GOOD] |91.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/graph/ut/ydb-core-graph-ut |91.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/graph/ut/ydb-core-graph-ut |91.5%| [LD] {RESULT} $(B)/ydb/core/graph/ut/ydb-core-graph-ut >> TFlatTableExecutor_RejectProbability::ZeroRejectProbability [GOOD] >> TFlatTableExecutor_RejectProbability::ZeroRejectProbabilityMultipleTables >> CommitOffset::Commit_Flat_WithWrongSession_ToPast [GOOD] >> CommitOffset::Commit_WithSession_ParentNotFinished_OtherSession >> Cdc::AddStream [GOOD] >> Cdc::AwsRegion >> TPersQueueTest::PreferredCluster_RemotePreferredClusterEnabledWhileSessionInitializing_SessionDiesOnlyAfterInitializationAndDelay [GOOD] >> TPersQueueTest::PartitionsMapping >> TFlatTableExecutor_RejectProbability::ZeroRejectProbabilityMultipleTables [GOOD] >> TFlatTableExecutor_Reschedule::TestExecuteReschedule [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorSetResourceProfile >> TopicAutoscaling::PartitionMerge_PreferedPartition_PQv1 [GOOD] >> TopicAutoscaling::PartitionSplit_ManySession_BeforeAutoscaleAwareSDK >> TFlatTableExecutor_ResourceProfile::TestExecutorSetResourceProfile [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorRequestTxData >> TTableProfileTests::DescribeTableWithPartitioningPolicy [GOOD] >> TTableProfileTests::DescribeTableOptions >> YdbYqlClient::TestDoubleKey [GOOD] >> YdbYqlClient::TestMultipleModifications >> TFlatTableExecutor_ResourceProfile::TestExecutorRequestTxData [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorReuseStaticMemory ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> ClientStatsCollector::ExternalMetricRegistryStdSharedPtr [GOOD] Test command err: 2025-05-07T08:59:23.034873Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625801985061887:2076];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:23.034925Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028cf/r3tmp/tmpGbOjXW/pdisk_1.dat 2025-05-07T08:59:24.139206Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:59:24.143803Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:59:24.143916Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:59:24.163700Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:59:24.227956Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11360, node 1 2025-05-07T08:59:24.369048Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T08:59:24.377150Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T08:59:24.603195Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:59:24.603224Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:59:24.603232Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:59:24.603374Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11648 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:59:25.062887Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:27.817987Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625819164932113:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:27.818078Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625819164932121:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:27.818134Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:27.821697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480 2025-05-07T08:59:27.847592Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501625819164932127:2342], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-05-07T08:59:27.905994Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501625819164932196:2681] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:59:28.042119Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501625801985061887:2076];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:28.042210Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:59:28.386409Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710660. Ctx: { TraceId: , Database: , DatabaseId: , SessionId: ydb://session/3?node_id=1&id=ZmI0ZGRjMzEtYzIzMDU3MDMtY2I0MjJjNWItMzNkNzFjNDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Database not set, use /Root 2025-05-07T08:59:30.366779Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501625830398499947:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:30.366843Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028cf/r3tmp/tmpLr7bwg/pdisk_1.dat 2025-05-07T08:59:30.817018Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:59:30.915766Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:59:30.915873Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:59:30.933800Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 30012, node 4 2025-05-07T08:59:31.091255Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:59:31.091282Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:59:31.091291Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:59:31.091407Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23182 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:59:31.723612Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:35.370374Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7501625830398499947:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:35.370444Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:59:36.261491Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501625856168304811:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:36.261581Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:36.262153Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501625856168304823:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:36.266830Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T08:59:36.331548Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7501625856168304825:2343], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T08:59:36.410197Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:7501625856168304903:2691] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:59:39.246941Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7501625869159347544:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:39.250352Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect p ... 36], ActorState: ExecuteState, TraceId: 01jtmzhwhjems0k25zevz62agj, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [7:7501625899224120381:2336] from: [7:7501625899224120351:2336] 2025-05-07T08:59:46.621845Z node 7 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1944: ActorId: [7:7501625899224120381:2336] TxId: 281474976710678. Ctx: { TraceId: 01jtmzhwhjems0k25zevz62agj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=ZTQ4MGJmNmMtZTAxNTU2YjAtNTM1ODJjMDctOTlmYzFiNTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `Root/names`., code: 2001 subissue: {
: Error: Operation is aborting because locks are not valid, code: 2001 } } 2025-05-07T08:59:46.622047Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=7&id=ZTQ4MGJmNmMtZTAxNTU2YjAtNTM1ODJjMDctOTlmYzFiNTU=, ActorId: [7:7501625886339217782:2336], ActorState: ExecuteState, TraceId: 01jtmzhwhjems0k25zevz62agj, Create QueryResponse for error on request, msg: 2025-05-07T08:59:49.915154Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7501625912703402083:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:49.974492Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028cf/r3tmp/tmpxuCojG/pdisk_1.dat 2025-05-07T08:59:50.633590Z node 10 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:59:50.682790Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:59:50.682899Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:59:50.690477Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19802, node 10 2025-05-07T08:59:50.957885Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:59:50.957916Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:59:50.957927Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:59:50.958092Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15887 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:59:51.376849Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:54.915274Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7501625912703402083:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:54.915379Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:59:55.926273Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7501625938473206977:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:55.926427Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:55.927022Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7501625938473206990:2343], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:55.931999Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480 2025-05-07T08:59:55.990170Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7501625938473206992:2344], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-05-07T08:59:56.082763Z node 10 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [10:7501625942768174375:2698] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:59:58.806170Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7501625949197165077:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:58.806277Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028cf/r3tmp/tmpv1uB2S/pdisk_1.dat 2025-05-07T08:59:59.358518Z node 13 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:59:59.447201Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:59:59.447310Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:59:59.457011Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7041, node 13 2025-05-07T08:59:59.762716Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:59:59.762746Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:59:59.762759Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:59:59.762919Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13937 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:00.583771Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:03.774111Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[13:7501625949197165077:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:03.774193Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:00:04.807493Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7501625974966969954:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:04.807625Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:04.808017Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7501625974966969966:2343], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:04.819808Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T09:00:04.909253Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7501625974966969968:2344], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T09:00:04.999834Z node 13 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [13:7501625974966970063:2688] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } |91.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_init/ydb-core-tx-datashard-ut_init |91.5%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_init/ydb-core-tx-datashard-ut_init |91.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_init/ydb-core-tx-datashard-ut_init ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTestsWithReboots::MoveTable [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:116:2058] recipient: [1:111:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:116:2058] recipient: [1:111:2142] Leader for TabletID 72057594046678944 is [1:123:2149] sender: [1:126:2058] recipient: [1:108:2140] Leader for TabletID 72057594046447617 is [1:129:2154] sender: [1:131:2058] recipient: [1:109:2141] Leader for TabletID 72057594046316545 is [1:134:2157] sender: [1:136:2058] recipient: [1:111:2142] 2025-05-07T08:58:34.006349Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:58:34.006465Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:34.006525Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:58:34.006582Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:58:34.006629Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:58:34.006656Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:58:34.006718Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:34.006804Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:58:34.007573Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:58:34.007926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:58:34.124047Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7610: Got new config: QueryServiceConfig { AvailableExternalDataSources: "ObjectStorage" AvailableExternalDataSources: "ClickHouse" AvailableExternalDataSources: "PostgreSQL" AvailableExternalDataSources: "MySQL" AvailableExternalDataSources: "Ydb" AvailableExternalDataSources: "YT" AvailableExternalDataSources: "Greenplum" AvailableExternalDataSources: "MsSQLServer" AvailableExternalDataSources: "Oracle" AvailableExternalDataSources: "Logging" AvailableExternalDataSources: "Solomon" } 2025-05-07T08:58:34.124116Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:34.124885Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# ObjectStorage, ClickHouse, PostgreSQL, MySQL, Ydb, YT, Greenplum, MsSQLServer, Oracle, Logging, Solomon Leader for TabletID 72057594046447617 is [1:129:2154] sender: [1:175:2058] recipient: [1:15:2062] 2025-05-07T08:58:34.158110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:58:34.158344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:58:34.158517Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:58:34.170382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:58:34.170665Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:58:34.171343Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:34.171576Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:58:34.174295Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:34.175656Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:34.175720Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:34.175945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:58:34.176012Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:34.176066Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:58:34.176240Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2212] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2212] Leader for TabletID 72057594037968897 is [1:217:2216] sender: [1:218:2058] recipient: [1:211:2212] 2025-05-07T08:58:34.184618Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:123:2149] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:58:34.332618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:34.332868Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:34.333149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:58:34.333392Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:58:34.333457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:34.336345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:34.336491Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:58:34.336801Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:34.336854Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:58:34.336930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:58:34.336979Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:58:34.348113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:34.348184Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:58:34.348237Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:58:34.350669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:34.350792Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:34.350834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:34.350913Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:58:34.354896Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:58:34.365082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:58:34.365360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:134:2157] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:58:34.366492Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:34.366676Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 134 RawX2: 4294969453 } } St ... FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1003, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:00:07.245007Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1003, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-05-07T09:00:07.245158Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:00:07.245196Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [62:205:2207], at schemeshard: 72057594046678944, txId: 1003, path id: 1 2025-05-07T09:00:07.245240Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [62:205:2207], at schemeshard: 72057594046678944, txId: 1003, path id: 3 2025-05-07T09:00:07.245798Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-05-07T09:00:07.245855Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1036: NTableState::TProposedWaitParts operationId# 1003:0 ProgressState at tablet: 72057594046678944 2025-05-07T09:00:07.245959Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 1003:0, at schemeshard: 72057594046678944 2025-05-07T09:00:07.246044Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 1003:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-05-07T09:00:07.246084Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1003:0 129 -> 240 2025-05-07T09:00:07.247017Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 1003 2025-05-07T09:00:07.247117Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 1003 2025-05-07T09:00:07.247149Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1003 2025-05-07T09:00:07.247186Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 11 2025-05-07T09:00:07.247227Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-05-07T09:00:07.248369Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1003 2025-05-07T09:00:07.248493Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1003 2025-05-07T09:00:07.248529Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1003 2025-05-07T09:00:07.248565Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-05-07T09:00:07.248605Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-05-07T09:00:07.248693Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 1003, ready parts: 0/1, is published: true 2025-05-07T09:00:07.265632Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-05-07T09:00:07.265724Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_move_table.cpp:482: TMoveTable TDone, operationId: 1003:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:00:07.265772Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_move_table.cpp:492: TMoveTable TDone, operationId: 1003:0 ProgressState, SourcePathId: [OwnerId: 72057594046678944, LocalPathId: 3], TargetPathId: [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-05-07T09:00:07.265882Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1003:0 progress is 1/1 2025-05-07T09:00:07.265914Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-05-07T09:00:07.265954Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1003:0 progress is 1/1 2025-05-07T09:00:07.266004Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-05-07T09:00:07.266039Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 1003, ready parts: 1/1, is published: true 2025-05-07T09:00:07.266090Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-05-07T09:00:07.266131Z node 62 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1003:0 2025-05-07T09:00:07.266160Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 1003:0 2025-05-07T09:00:07.266320Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-05-07T09:00:07.266357Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-05-07T09:00:07.266989Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T09:00:07.267041Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-05-07T09:00:07.267109Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-05-07T09:00:07.267557Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-05-07T09:00:07.268115Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-05-07T09:00:07.276600Z node 62 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 1003, wait until txId: 1003 TestWaitNotification wait txId: 1003 2025-05-07T09:00:07.276939Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1003: send EvNotifyTxCompletion 2025-05-07T09:00:07.276986Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1003 2025-05-07T09:00:07.277369Z node 62 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1003, at schemeshard: 72057594046678944 2025-05-07T09:00:07.277482Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-05-07T09:00:07.277517Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [62:469:2442] TestWaitNotification: OK eventTxId 1003 2025-05-07T09:00:07.278080Z node 62 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTableMoved" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:00:07.278334Z node 62 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTableMoved" took 300us result status StatusSuccess 2025-05-07T09:00:07.278849Z node 62 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTableMoved" PathDescription { Self { Name: "TTLEnabledTableMoved" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1003 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "TTLEnabledTableMoved" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 2 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientProvideIncorrectCerts >> TestKinesisHttpProxy::TestCounters [GOOD] >> Cdc::InitialScanRacyCompleteAndRequest [GOOD] >> Cdc::InitialScanAndLimits >> TestYmqHttpProxy::TestTagQueueMultipleQueriesInflight [GOOD] >> YdbYqlClient::TestYqlTypesFromPreparedQuery [GOOD] >> TestYmqHttpProxy::TestSendMessageBatch [GOOD] >> TGRpcLdapAuthentication::DisableBuiltinAuthMechanism [GOOD] >> TGRpcLdapAuthentication::LdapAuthWithInvalidLogin [GOOD] >> TGRpcLdapAuthentication::LdapAuthWithInvalidPassword >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientWithCorrectCerts >> TestKinesisHttpProxy::TestEmptyHttpBody >> TFlatTableExecutor_ResourceProfile::TestExecutorReuseStaticMemory [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorRequestPages >> YdbYqlClient::TestDecimal1 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::TestYqlTypesFromPreparedQuery [GOOD] Test command err: 2025-05-07T08:59:44.860548Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625891097069261:2080];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:44.867002Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028a5/r3tmp/tmpNzxAx9/pdisk_1.dat 2025-05-07T08:59:45.595920Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:59:45.596038Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:59:45.605931Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:59:45.709998Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 12222, node 1 2025-05-07T08:59:46.050615Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:59:46.050636Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:59:46.050643Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:59:46.050776Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28317 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:59:46.339828Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:49.422710Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625912571906810:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:49.422883Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:49.700336Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501625912571906831:2632] txid# 281474976710658, issues: { message: "Column Key has wrong key type Json" severity: 1 } 2025-05-07T08:59:49.738519Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625912571906841:2343], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:49.738599Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:49.752248Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501625912571906848:2642] txid# 281474976710659, issues: { message: "Column Key has wrong key type Yson" severity: 1 } test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028a5/r3tmp/tmp7yadkh/pdisk_1.dat TServer::EnableGrpc on GrpcPort 15829, node 4 TClient is connected to server localhost:18531 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028a5/r3tmp/tmp8NO1nT/pdisk_1.dat TServer::EnableGrpc on GrpcPort 21430, node 7 TClient is connected to server localhost:12655 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-05-07T09:00:03.596153Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7501625971162992068:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:03.596239Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028a5/r3tmp/tmpZHRJjG/pdisk_1.dat 2025-05-07T09:00:03.911125Z node 10 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:03.935670Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:03.935760Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:03.940042Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23762, node 10 2025-05-07T09:00:04.266979Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:04.267003Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:04.267012Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:04.267181Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22162 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:04.808072Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:08.594118Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7501625971162992068:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:08.594216Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:00:10.303606Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7501626001227764283:2343], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:10.303734Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:10.304315Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7501626001227764295:2346], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:10.308943Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T09:00:10.354292Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7501626001227764297:2347], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T09:00:10.427100Z node 10 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [10:7501626001227764368:2693] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcLdapAuthentication::DisableBuiltinAuthMechanism [GOOD] Test command err: 2025-05-07T08:59:34.474669Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625846373122205:2111];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:34.474725Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028ae/r3tmp/tmpNppLKg/pdisk_1.dat 2025-05-07T08:59:35.474342Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:59:35.476817Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:59:35.563087Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:59:35.563186Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:59:35.592925Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29034, node 1 2025-05-07T08:59:35.966394Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:59:35.966422Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:59:35.966431Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:59:35.966574Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13505 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:59:36.662468Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:39.477841Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501625846373122205:2111];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:39.477920Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:59:42.306585Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501625880082450159:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:42.306694Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028ae/r3tmp/tmpeiuHx0/pdisk_1.dat 2025-05-07T08:59:42.808560Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:59:42.836445Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:59:42.836552Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:59:42.840970Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14076, node 4 2025-05-07T08:59:43.098459Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:59:43.098487Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:59:43.098494Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:59:43.098643Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21324 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:59:43.445112Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:48.106391Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7501625906224497283:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:48.106468Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028ae/r3tmp/tmphCo5lt/pdisk_1.dat 2025-05-07T08:59:48.510998Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:59:48.537735Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:59:48.538000Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:59:48.551617Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7155, node 7 2025-05-07T08:59:48.712151Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:59:48.712177Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:59:48.712185Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:59:48.712326Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3943 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:59:49.347506Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:53.102100Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7501625906224497283:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:53.102209Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:59:58.410635Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7501625948499778342:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:58.410709Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028ae/r3tmp/tmpRt1E5Y/pdisk_1.dat 2025-05-07T08:59:58.817933Z node 10 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:59:58.889806Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:59:58.889903Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:59:58.902230Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24753, node 10 2025-05-07T08:59:59.037947Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:59:59.037999Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:59:59.038009Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:59:59.038169Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5097 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:59:59.646018Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:05.458236Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7501625978609432307:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:05.458392Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028ae/r3tmp/tmpSaMPNZ/pdisk_1.dat 2025-05-07T09:00:06.072055Z node 13 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20179, node 13 2025-05-07T09:00:06.334567Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:06.334698Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:06.344129Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:00:06.375075Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:06.375117Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:06.375126Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:06.375280Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6844 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:06.835151Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... ------- [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/inside_ydb_ut/unittest >> TestYmqHttpProxy::TestTagQueueMultipleQueriesInflight [GOOD] Test command err: 2025-05-07T08:58:58.893152Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625693902667564:2134];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:58:58.893547Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001773/r3tmp/tmpuVZHi8/pdisk_1.dat 2025-05-07T08:58:59.441537Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:58:59.441640Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:58:59.616918Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:59.647962Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20774, node 1 2025-05-07T08:58:59.758421Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:58:59.758444Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:58:59.758451Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:58:59.758556Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63336 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:59:00.408711Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:00.431060Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 TClient is connected to server localhost:63336 2025-05-07T08:59:00.676993Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:00.684535Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-05-07T08:59:00.686363Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:00.695188Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 2025-05-07T08:59:00.700622Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:00.865842Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:59:00.953067Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710663, at schemeshard: 72057594046644480 2025-05-07T08:59:00.957821Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:59:01.004030Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:01.063086Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:01.115207Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:01.155421Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:01.201001Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:59:01.255556Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480 2025-05-07T08:59:01.307284Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:03.105051Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625715377505363:2376], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:03.105170Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:03.105590Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625715377505375:2379], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:03.110127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710673:3, at schemeshard: 72057594046644480 2025-05-07T08:59:03.138785Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501625715377505377:2380], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710673 completed, doublechecking } 2025-05-07T08:59:03.227751Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501625715377505428:2867] txid# 281474976710674, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 18], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:59:03.550868Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710675. Ctx: { TraceId: 01jtmzgj1y7vfq7p41xrfcx8gm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGFlZmE5Yi1lYTYwMTA0LTZkZjI0ODkyLTY0MDU3Y2U3, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:59:03.584741Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:03.623530Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:03.696084Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710678:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:03.739351Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710679:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:03.817009Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710680:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:03.892956Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501625693902667564:2134];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:03.893019Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:59:03.901800Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710681:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:03.933830Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710682:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:03.983547Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710683:0, at schemeshard: 72057594046644480 waiting... 2025 ... { Scheme: 4 } } } } } Member { Name: "ShowDetailedCountersDeadline" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "VisibilityTimeout" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "queueExists" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "tags" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } } } Value { Struct { Optional { Optional { Struct { Optional { Bool: false } } Struct { Optional { Uint64: 0 } } Struct { Optional { Text: "" } } Struct { Optional { Text: "" } } Struct { Optional { Bool: true } } Struct { Optional { Uint64: 0 } } Struct { Optional { Uint64: 262144 } } Struct { Optional { Uint64: 345600000 } } Struct { Optional { Uint64: 0 } } Struct { } Struct { Optional { Uint64: 30000 } } } } } Struct { Optional { Bool: true } } Struct { Optional { Text: "{\"k0\":\"v\"}" } } } } } 2025-05-07T09:00:10.787651Z node 7 :SQS DEBUG: executor.cpp:287: Request [27154d32-916d79e7-add93f07-14865683] Query(idx=INTERNAL_GET_QUEUE_ATTRIBUTES_ID) Queue [cloud4/000000000000000301v0] Attempt 1 execution duration: 69ms 2025-05-07T09:00:10.788224Z node 7 :SQS TRACE: executor.cpp:325: Request [27154d32-916d79e7-add93f07-14865683] Query(idx=INTERNAL_GET_QUEUE_ATTRIBUTES_ID) Queue [cloud4/000000000000000301v0] Sending mkql execution result: { Status: 48 TxId: 281474976710863 Step: 1746608410820 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "attrs" Type { Kind: Optional Optional { Item { Kind: Optional Optional { Item { Kind: Struct Struct { Member { Name: "ContentBasedDeduplication" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "DelaySeconds" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "DlqArn" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "MaxReceiveCount" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "MaximumMessageSize" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "MessageRetentionPeriod" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "ReceiveMessageWaitTime" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "ShowDetailedCountersDeadline" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "VisibilityTimeout" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "queueExists" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "tags" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } } } Value { Struct { Optional { Optional { Struct { Optional { Bool: false } } Struct { Optional { Uint64: 0 } } Struct { Optional { Text: "" } } Struct { Optional { Text: "" } } Struct { Optional { Bool: true } } Struct { Optional { Uint64: 0 } } Struct { Optional { Uint64: 262144 } } Struct { Optional { Uint64: 345600000 } } Struct { Optional { Uint64: 0 } } Struct { } Struct { Optional { Uint64: 30000 } } } } } Struct { Optional { Bool: true } } Struct { Optional { Text: "{\"k0\":\"v\"}" } } } } } 2025-05-07T09:00:10.788326Z node 7 :SQS TRACE: executor.cpp:327: Request [27154d32-916d79e7-add93f07-14865683] Query(idx=INTERNAL_GET_QUEUE_ATTRIBUTES_ID) Queue [cloud4/000000000000000301v0] Minikql data response: {"attrs": {"ContentBasedDeduplication": false, "DelaySeconds": 0, "DlqArn": "", "DlqName": "", "FifoQueue": true, "MaxReceiveCount": 0, "MaximumMessageSize": 262144, "MessageRetentionPeriod": 345600000, "ReceiveMessageWaitTime": 0, "ShowDetailedCountersDeadline": null, "VisibilityTimeout": 30000}, "queueExists": true, "tags": "{\"k0\":\"v\"}"} 2025-05-07T09:00:10.788534Z node 7 :SQS DEBUG: executor.cpp:401: Request [27154d32-916d79e7-add93f07-14865683] Query(idx=INTERNAL_GET_QUEUE_ATTRIBUTES_ID) Queue [cloud4/000000000000000301v0] execution duration: 69ms 2025-05-07T09:00:10.788718Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:83: (#37,[::1]:46724) incoming connection opened 2025-05-07T09:00:10.788806Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:145: (#37,[::1]:46724) -> (POST /Root) 2025-05-07T09:00:10.789150Z node 7 :SQS DEBUG: queue_leader.cpp:556: Request [27154d32-916d79e7-add93f07-14865683] Sending executed reply 2025-05-07T09:00:10.789701Z node 7 :HTTP_PROXY INFO: http_service.cpp:102: proxy service: incoming request from [5889:da00:6050:0:4089:da00:6050:0] request [ListQueueTags] url [/Root] database [/Root] requestId: beb8a6a5-34816fc4-d994e00b-9ad25c8a 2025-05-07T09:00:10.790451Z node 7 :HTTP_PROXY INFO: http_req.cpp:520: http request [ListQueueTags] requestId [beb8a6a5-34816fc4-d994e00b-9ad25c8a] got new request from [5889:da00:6050:0:4089:da00:6050:0] 2025-05-07T09:00:10.791082Z node 7 :HTTP_PROXY DEBUG: http_req.cpp:454: http request [ListQueueTags] requestId [beb8a6a5-34816fc4-d994e00b-9ad25c8a] Got cloud auth response. FolderId: folder4 CloudId: cloud4 UserSid: fake_user_sid@as 2025-05-07T09:00:10.791101Z node 7 :HTTP_PROXY INFO: http_req.cpp:280: http request [ListQueueTags] requestId [beb8a6a5-34816fc4-d994e00b-9ad25c8a] sending grpc request to '' database: '/Root' iam token size: 0 2025-05-07T09:00:10.791318Z node 7 :SQS DEBUG: ymq_proxy.cpp:148: Got new request in YMQ proxy. FolderId: folder4, CloudId: cloud4, UserSid: fake_user_sid@as, RequestId: beb8a6a5-34816fc4-d994e00b-9ad25c8a 2025-05-07T09:00:10.791435Z node 7 :SQS DEBUG: proxy_actor.cpp:263: Request [beb8a6a5-34816fc4-d994e00b-9ad25c8a] Proxy actor: used user_name='cloud4', queue_name='000000000000000301v0', folder_id='folder4' 2025-05-07T09:00:10.791447Z node 7 :SQS DEBUG: proxy_actor.cpp:78: Request [beb8a6a5-34816fc4-d994e00b-9ad25c8a] Request proxy started 2025-05-07T09:00:10.791604Z node 7 :SQS DEBUG: service.cpp:742: Request [beb8a6a5-34816fc4-d994e00b-9ad25c8a] Answer configuration for queue [cloud4/000000000000000301v0] without leader 2025-05-07T09:00:10.791684Z node 7 :SQS DEBUG: proxy_actor.cpp:97: Request [beb8a6a5-34816fc4-d994e00b-9ad25c8a] Get configuration duration: 1ms 2025-05-07T09:00:10.791793Z node 7 :SQS DEBUG: proxy_service.cpp:246: Request [beb8a6a5-34816fc4-d994e00b-9ad25c8a] Send get leader node request to sqs service for cloud4/000000000000000301v0 2025-05-07T09:00:10.791817Z node 7 :SQS DEBUG: service.cpp:562: Request [beb8a6a5-34816fc4-d994e00b-9ad25c8a] Leader node for queue [cloud4/000000000000000301v0] is 7 2025-05-07T09:00:10.791837Z node 7 :SQS DEBUG: proxy_service.cpp:170: Request [beb8a6a5-34816fc4-d994e00b-9ad25c8a] Got leader node for queue response. Node id: 7. Status: 0 2025-05-07T09:00:10.791937Z node 7 :SQS TRACE: proxy_service.cpp:303: Request [beb8a6a5-34816fc4-d994e00b-9ad25c8a] Sending request from proxy to leader node 7: ListQueueTags { Auth { UserName: "cloud4" FolderId: "folder4" UserSID: "fake_user_sid@as" } QueueName: "000000000000000301v0" } RequestId: "beb8a6a5-34816fc4-d994e00b-9ad25c8a" 2025-05-07T09:00:10.792006Z node 7 :SQS DEBUG: proxy_service.cpp:70: Request [beb8a6a5-34816fc4-d994e00b-9ad25c8a] Received Sqs Request: ListQueueTags { Auth { UserName: "cloud4" FolderId: "folder4" UserSID: "fake_user_sid@as" } QueueName: "000000000000000301v0" } RequestId: "beb8a6a5-34816fc4-d994e00b-9ad25c8a" 2025-05-07T09:00:10.792048Z node 7 :SQS DEBUG: action.h:131: Request [beb8a6a5-34816fc4-d994e00b-9ad25c8a] Request started. Actor: [7:7501626002489319218:4885] 2025-05-07T09:00:10.792078Z node 7 :SQS TRACE: service.cpp:1453: Inc local leader ref for actor [7:7501626002489319218:4885] 2025-05-07T09:00:10.792094Z node 7 :SQS DEBUG: service.cpp:735: Request [beb8a6a5-34816fc4-d994e00b-9ad25c8a] Forward configuration request to queue [cloud4/000000000000000301v0] leader 2025-05-07T09:00:10.792129Z node 7 :SQS DEBUG: action.h:623: Request [beb8a6a5-34816fc4-d994e00b-9ad25c8a] Get configuration duration: 0ms 2025-05-07T09:00:10.792143Z node 7 :SQS TRACE: action.h:643: Request [beb8a6a5-34816fc4-d994e00b-9ad25c8a] Got configuration. Root url: http://ghrun-sykirh5vua.auto.internal:8771, Shards: 1, Fail: 0 2025-05-07T09:00:10.792156Z node 7 :SQS TRACE: action.h:425: Request [beb8a6a5-34816fc4-d994e00b-9ad25c8a] DoRoutine 2025-05-07T09:00:10.792228Z node 7 :SQS TRACE: action.h:262: Request [beb8a6a5-34816fc4-d994e00b-9ad25c8a] SendReplyAndDie from action actor { ListQueueTags { RequestId: "beb8a6a5-34816fc4-d994e00b-9ad25c8a" Tags { Key: "k0" Value: "v" } } } 2025-05-07T09:00:10.792334Z node 7 :SQS TRACE: proxy_service.h:35: Request [beb8a6a5-34816fc4-d994e00b-9ad25c8a] Sending sqs response: { ListQueueTags { RequestId: "beb8a6a5-34816fc4-d994e00b-9ad25c8a" Tags { Key: "k0" Value: "v" } } RequestId: "beb8a6a5-34816fc4-d994e00b-9ad25c8a" FolderId: "folder4" ResourceId: "000000000000000301v0" IsFifo: true QueueTags { Key: "k0" Value: "v" } } 2025-05-07T09:00:10.792491Z node 7 :SQS TRACE: proxy_service.cpp:194: HandleSqsResponse ListQueueTags { RequestId: "beb8a6a5-34816fc4-d994e00b-9ad25c8a" Tags { Key: "k0" Value: "v" } } RequestId: "beb8a6a5-34816fc4-d994e00b-9ad25c8a" FolderId: "folder4" ResourceId: "000000000000000301v0" IsFifo: true QueueTags { Key: "k0" Value: "v" } 2025-05-07T09:00:10.792561Z node 7 :SQS TRACE: proxy_service.cpp:208: Sending answer to proxy actor [7:7501626002489319217:2717]: ListQueueTags { RequestId: "beb8a6a5-34816fc4-d994e00b-9ad25c8a" Tags { Key: "k0" Value: "v" } } RequestId: "beb8a6a5-34816fc4-d994e00b-9ad25c8a" FolderId: "folder4" ResourceId: "000000000000000301v0" IsFifo: true QueueTags { Key: "k0" Value: "v" } 2025-05-07T09:00:10.792606Z node 7 :SQS TRACE: service.cpp:1464: Dec local leader ref for actor [7:7501626002489319218:4885]. Found: 1 2025-05-07T09:00:10.793091Z node 7 :SQS TRACE: proxy_actor.cpp:178: Request [beb8a6a5-34816fc4-d994e00b-9ad25c8a] HandleResponse: { ListQueueTags { RequestId: "beb8a6a5-34816fc4-d994e00b-9ad25c8a" Tags { Key: "k0" Value: "v" } } RequestId: "beb8a6a5-34816fc4-d994e00b-9ad25c8a" FolderId: "folder4" ResourceId: "000000000000000301v0" IsFifo: true QueueTags { Key: "k0" Value: "v" } }, status: OK 2025-05-07T09:00:10.793185Z node 7 :SQS DEBUG: proxy_actor.cpp:147: Request [beb8a6a5-34816fc4-d994e00b-9ad25c8a] Sending reply from proxy actor: { ListQueueTags { RequestId: "beb8a6a5-34816fc4-d994e00b-9ad25c8a" Tags { Key: "k0" Value: "v" } } RequestId: "beb8a6a5-34816fc4-d994e00b-9ad25c8a" FolderId: "folder4" ResourceId: "000000000000000301v0" IsFifo: true QueueTags { Key: "k0" Value: "v" } } 2025-05-07T09:00:10.793486Z node 7 :HTTP_PROXY DEBUG: http_req.cpp:379: http request [ListQueueTags] requestId [beb8a6a5-34816fc4-d994e00b-9ad25c8a] Got succesfult GRPC response. 2025-05-07T09:00:10.793571Z node 7 :HTTP_PROXY INFO: http_req.cpp:1207: http request [ListQueueTags] requestId [beb8a6a5-34816fc4-d994e00b-9ad25c8a] reply ok 2025-05-07T09:00:10.793693Z node 7 :HTTP_PROXY DEBUG: http_req.cpp:1267: http request [ListQueueTags] requestId [beb8a6a5-34816fc4-d994e00b-9ad25c8a] Send metering event. HttpStatusCode: 200 IsFifo: 1 FolderId: folder4 RequestSizeInBytes: 530 ResponseSizeInBytes: 197 SourceAddress: 5889:da00:6050:0:4089:da00:6050:0 ResourceId: 000000000000000301v0 Action: ListQueueTags 2025-05-07T09:00:10.793803Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:243: (#37,[::1]:46724) <- (200 ) 2025-05-07T09:00:10.793934Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:296: (#37,[::1]:46724) connection closed Http output full {"Tags":{"k0":"v"}} >> YdbTableBulkUpsert::Timeout [GOOD] >> YdbTableBulkUpsert::ZeroRows >> YdbYqlClient::RetryOperationAsync [GOOD] >> YdbYqlClient::QueryLimits >> TFlatTableExecutor_ResourceProfile::TestExecutorRequestPages [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorPageLimitExceeded >> TFlatTableExecutor_ResourceProfile::TestExecutorPageLimitExceeded [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorRequestMemory >> Cdc::InitialScan_WithTopicSchemeTx [GOOD] >> Cdc::InitialScan_TopicAutoPartitioning >> TFlatTableExecutor_ResourceProfile::TestExecutorRequestMemory [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorRequestMemoryFollower >> TYqlDateTimeTests::SimpleUpsertSelect >> YdbOlapStore::ManyTables >> TRegisterNodeOverDiscoveryService::ServerWithoutCertVerification_ClientProvidesCorrectCerts [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithoutCertVerification_ClientProvidesEmptyClientCerts ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionSplitGranuleStrKey_PKUtf8 2025-05-07 09:00:10,419 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-05-07 09:00:11,058 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 137523 46.4M 46.3M 23.5M test_tool run_ut @/home/runner/.ya/build/build_root/zvgn/003a2f/ydb/core/tx/columnshard/ut_rw/test-results/unittest/testing_out_stuff/chunk27/testing_out_stuff/test_tool.args 137646 1.5G 1.5G 1.5G └─ ydb-core-tx-columnshard-ut_rw --trace-path-append /home/runner/.ya/build/build_root/zvgn/003a2f/ydb/core/tx/columnshard/ut_rw/test-results/unittest/testing_out_stuff/chu Test command err: 2025-05-07T08:50:11.987583Z node 1 :BLOB_CACHE NOTICE: ctor_logger.h:56: MaxCacheDataSize: 20971520 InFlightDataSize: 0 2025-05-07T08:50:12.098031Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-05-07T08:50:12.122741Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-05-07T08:50:12.123050Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-05-07T08:50:12.131071Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-05-07T08:50:12.131275Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-05-07T08:50:12.131466Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-05-07T08:50:12.131568Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-05-07T08:50:12.131643Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-05-07T08:50:12.131720Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-05-07T08:50:12.131799Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-05-07T08:50:12.131895Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-05-07T08:50:12.131978Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-05-07T08:50:12.132061Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-05-07T08:50:12.132141Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-05-07T08:50:12.132215Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-05-07T08:50:12.157820Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-05-07T08:50:12.158012Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:137;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=11;current_normalizer=CLASS_NAME=Granules; 2025-05-07T08:50:12.158068Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-05-07T08:50:12.158234Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-05-07T08:50:12.158391Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-05-07T08:50:12.158460Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-05-07T08:50:12.158497Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-05-07T08:50:12.158565Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-05-07T08:50:12.158609Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-05-07T08:50:12.158647Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-05-07T08:50:12.158675Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-05-07T08:50:12.158870Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-05-07T08:50:12.158955Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-05-07T08:50:12.159003Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-05-07T08:50:12.159064Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-05-07T08:50:12.159163Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-05-07T08:50:12.159217Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-05-07T08:50:12.159267Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanInsertionDedup;id=CleanInsertionDedup; 2025-05-07T08:50:12.159298Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=8;type=CleanInsertionDedup; 2025-05-07T08:50:12.159387Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanInsertionDedup;id=8; 2025-05-07T08:50:12.159447Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-05-07T08:50:12.159478Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-05-07T08:50:12.159544Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-05-07T08:50:12.159588Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-05-07T08:50:12.159616Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-05-07T08:50:12.159868Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-05-07T08:50:12.159925Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-05-07T08:50:12.159972Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-05-07T08:50:12.160197Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-05-07T08:50:12.160238Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-05-07T08:50:12.160290Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-05-07T08:50:12.160422Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-05-07T08:50:12.160469Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-05-07T08:50:12.160503Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-05-07T08:50:12.160588Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-05-07T08:50:12.160659Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-05-07T08:50:12.160701Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-05-07T08:50:12.160731Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-05-07T08:50:12.161176Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=53; 2025-05-07T08:50:12.161273Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=40; ... 6;method=PutObject;id=[9437184:2:70:255:704:2768:0]; 2025-05-07T08:59:54.752902Z node 1 :S3_WRAPPER DEBUG: log.cpp:784: fline=fake_storage.cpp:106;method=PutObject;id=[9437184:2:70:255:705:2768:0]; 2025-05-07T08:59:54.752965Z node 1 :S3_WRAPPER DEBUG: log.cpp:784: fline=fake_storage.cpp:106;method=PutObject;id=[9437184:2:70:255:706:9080:0]; 2025-05-07T08:59:54.756436Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;parent=[1:139:2171];ev_type=NKikimr::NOlap::NResourceBroker::NSubscribe::TEvStartTask;fline=actor.cpp:38;event=ask_resources;task=cpu=0;mem=3651081;external_task_id=a4c28048-2b2111f0-bdd4f0fd-e77a722f;type=CS::INDEXATION;priority=0;; 2025-05-07T08:59:54.758648Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;parent=[1:139:2171];ev_type=NKikimr::NOlap::NResourceBroker::NSubscribe::TEvStartTask;fline=actor.cpp:38;event=ask_resources;task=cpu=0;mem=206265;external_task_id=a4c2d7be-2b2111f0-ae38b4ae-5d361b7f;type=CS::INDEXATION;priority=0;; 2025-05-07T08:59:54.759895Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;parent=[1:139:2171];ev_type=NKikimr::NResourceBroker::TEvResourceBroker::TEvResourceAllocated;fline=actor.cpp:29;event=result_resources;task_id=71;task=cpu=0;mem=3650994;external_task_id=a4c20456-2b2111f0-9a286b0d-cb6502d0;type=CS::INDEXATION;priority=0;; 2025-05-07T08:59:54.759971Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;parent=[1:139:2171];ev_type=NKikimr::NResourceBroker::TEvResourceBroker::TEvResourceAllocated;fline=task.cpp:9;event=resource_allocated;external_task_id=a4c20456-2b2111f0-9a286b0d-cb6502d0;mem=3650994;cpu=0; 2025-05-07T08:59:54.760035Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;parent=[1:139:2171];ev_type=NKikimr::NResourceBroker::TEvResourceBroker::TEvResourceAllocated;fline=task.cpp:40;event=allocate_resources;external_task_id=a4c20456-2b2111f0-9a286b0d-cb6502d0;task_id=71;mem=3650994;cpu=0; 2025-05-07T08:59:54.762353Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: external_task_id=a4c20456-2b2111f0-9a286b0d-cb6502d0;fline=task.cpp:110;event=OnDataReady;task=agents_waiting=0;additional_info=();;external_task_id=a4c20456-2b2111f0-9a286b0d-cb6502d0; 2025-05-07T08:59:57.880029Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: external_task_id=a4c20456-2b2111f0-9a286b0d-cb6502d0;fline=actor.cpp:48;task=agents_waiting=0;additional_info=();; 2025-05-07T08:59:57.882615Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:50;event=TEvWriteIndex;count=1; 2025-05-07T08:59:57.896326Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;parent=[1:139:2171];ev_type=NKikimr::NResourceBroker::TEvResourceBroker::TEvResourceAllocated;fline=actor.cpp:29;event=result_resources;task_id=72;task=cpu=0;mem=3651081;external_task_id=a4c28048-2b2111f0-bdd4f0fd-e77a722f;type=CS::INDEXATION;priority=0;; 2025-05-07T08:59:57.896406Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;parent=[1:139:2171];ev_type=NKikimr::NResourceBroker::TEvResourceBroker::TEvResourceAllocated;fline=task.cpp:9;event=resource_allocated;external_task_id=a4c28048-2b2111f0-bdd4f0fd-e77a722f;mem=3651081;cpu=0; 2025-05-07T08:59:57.896477Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;parent=[1:139:2171];ev_type=NKikimr::NResourceBroker::TEvResourceBroker::TEvResourceAllocated;fline=task.cpp:40;event=allocate_resources;external_task_id=a4c28048-2b2111f0-bdd4f0fd-e77a722f;task_id=72;mem=3651081;cpu=0; 2025-05-07T08:59:57.898926Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: external_task_id=a4c28048-2b2111f0-bdd4f0fd-e77a722f;fline=task.cpp:110;event=OnDataReady;task=agents_waiting=0;additional_info=();;external_task_id=a4c28048-2b2111f0-bdd4f0fd-e77a722f; 2025-05-07T09:00:01.038684Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: external_task_id=a4c28048-2b2111f0-bdd4f0fd-e77a722f;fline=actor.cpp:48;task=agents_waiting=0;additional_info=();; 2025-05-07T09:00:01.040976Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:50;event=TEvWriteIndex;count=1; 2025-05-07T09:00:01.064477Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;parent=[1:139:2171];ev_type=NKikimr::NResourceBroker::TEvResourceBroker::TEvResourceAllocated;fline=actor.cpp:29;event=result_resources;task_id=73;task=cpu=0;mem=206265;external_task_id=a4c2d7be-2b2111f0-ae38b4ae-5d361b7f;type=CS::INDEXATION;priority=0;; 2025-05-07T09:00:01.064564Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;parent=[1:139:2171];ev_type=NKikimr::NResourceBroker::TEvResourceBroker::TEvResourceAllocated;fline=task.cpp:9;event=resource_allocated;external_task_id=a4c2d7be-2b2111f0-ae38b4ae-5d361b7f;mem=206265;cpu=0; 2025-05-07T09:00:01.064619Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;parent=[1:139:2171];ev_type=NKikimr::NResourceBroker::TEvResourceBroker::TEvResourceAllocated;fline=task.cpp:40;event=allocate_resources;external_task_id=a4c2d7be-2b2111f0-ae38b4ae-5d361b7f;task_id=73;mem=206265;cpu=0; 2025-05-07T09:00:01.071203Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: external_task_id=a4c2d7be-2b2111f0-ae38b4ae-5d361b7f;fline=task.cpp:110;event=OnDataReady;task=agents_waiting=0;additional_info=();;external_task_id=a4c2d7be-2b2111f0-ae38b4ae-5d361b7f; 2025-05-07T09:00:01.166413Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: external_task_id=a4c2d7be-2b2111f0-ae38b4ae-5d361b7f;fline=actor.cpp:48;task=agents_waiting=0;additional_info=();; 2025-05-07T09:00:01.167382Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:50;event=TEvWriteIndex;count=1; 2025-05-07T09:00:06.088254Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: WriteIndex at tablet 9437184 2025-05-07T09:00:06.088442Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:54;memory_size=102;data_size=82;sum=21488;count=407; 2025-05-07T09:00:06.088658Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:75;memory_size=20358;data_size=20354;sum=4259216;count=408;size_of_meta=144; 2025-05-07T09:00:06.088747Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_portion.cpp:40;memory_size=20430;data_size=20426;sum=4273904;count=204;size_of_portion=216; 2025-05-07T09:00:06.089245Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:54;memory_size=110;data_size=88;sum=21598;count=409; 2025-05-07T09:00:06.089324Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:75;memory_size=20238;data_size=20232;sum=4279454;count=410;size_of_meta=144; 2025-05-07T09:00:06.089389Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_portion.cpp:40;memory_size=20310;data_size=20304;sum=4294214;count=205;size_of_portion=216; 2025-05-07T09:00:06.089837Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:54;memory_size=110;data_size=88;sum=21708;count=411; 2025-05-07T09:00:06.089908Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:75;memory_size=20366;data_size=20360;sum=4299820;count=412;size_of_meta=144; 2025-05-07T09:00:06.089964Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_portion.cpp:40;memory_size=20438;data_size=20432;sum=4314652;count=206;size_of_portion=216; 2025-05-07T09:00:06.090770Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:54;memory_size=110;data_size=85;sum=21818;count=413; 2025-05-07T09:00:06.090842Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:75;memory_size=20206;data_size=20197;sum=4320026;count=414;size_of_meta=144; 2025-05-07T09:00:06.090903Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_portion.cpp:40;memory_size=20278;data_size=20269;sum=4334930;count=207;size_of_portion=216; 2025-05-07T09:00:06.091370Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:54;memory_size=110;data_size=82;sum=21928;count=415; 2025-05-07T09:00:06.091436Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:75;memory_size=10222;data_size=10210;sum=4330248;count=416;size_of_meta=144; 2025-05-07T09:00:06.091496Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_portion.cpp:40;memory_size=10294;data_size=10282;sum=4345224;count=208;size_of_portion=216; 2025-05-07T09:00:06.091886Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxWriteIndex[214] (CS::GENERAL) apply at tablet 9437184 2025-05-07T09:00:06.827234Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:88 Blob count: 2113 2025-05-07T09:00:06.909251Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=9148544;raw_bytes=13830820;count=5;records=129260} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=86904864;raw_bytes=127738630;count=44;records=1245740} inactive {blob_bytes=166747952;raw_bytes=241592181;count=83;records=2378453} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 764, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: 600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/8580453620/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/zvgn/003a2f/ydb/core/tx/columnshard/ut_rw/test-results/unittest/testing_out_stuff/chunk27/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/8580453620/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/zvgn/003a2f/ydb/core/tx/columnshard/ut_rw/test-results/unittest/testing_out_stuff/chunk27/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout",), {}) |91.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/keyvalue/ut/ydb-services-keyvalue-ut |91.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/keyvalue/ut/ydb-services-keyvalue-ut |91.5%| [LD] {RESULT} $(B)/ydb/services/keyvalue/ut/ydb-services-keyvalue-ut >> TestYmqHttpProxy::TestListQueueTags [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/inside_ydb_ut/unittest >> TestYmqHttpProxy::TestSendMessageBatch [GOOD] Test command err: 2025-05-07T08:58:59.860922Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625698597279802:2065];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:58:59.860992Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00171b/r3tmp/tmpfY2Dcg/pdisk_1.dat 2025-05-07T08:59:00.465174Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:59:00.467655Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:59:00.467742Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:59:00.472449Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10040, node 1 2025-05-07T08:59:00.592431Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:59:00.592458Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:59:00.592469Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:59:00.592588Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15783 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:59:01.027506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... TClient is connected to server localhost:15783 2025-05-07T08:59:01.229837Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:01.243464Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-05-07T08:59:01.245655Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:01.259161Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 2025-05-07T08:59:01.266344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:01.421874Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:59:01.537423Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:59:01.598015Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710665, at schemeshard: 72057594046644480 2025-05-07T08:59:01.603770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:01.675855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:01.720850Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:01.767882Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:01.884560Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:01.940085Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:01.975297Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:03.776136Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625715777150378:2375], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:03.776283Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:03.776602Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625715777150390:2378], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:03.781265Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710673:3, at schemeshard: 72057594046644480 2025-05-07T08:59:03.795409Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501625715777150392:2379], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710673 completed, doublechecking } 2025-05-07T08:59:03.878453Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501625715777150445:2865] txid# 281474976710674, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 18], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:59:04.358590Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710675. Ctx: { TraceId: 01jtmzgjpv2wgwhhza5t63qses, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjhmZTNmODktN2M3ZTNlNmMtMmU5Yzk1ZjAtZDNkZmRhYzY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:59:04.454407Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:04.525365Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:04.613558Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710678:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:04.649278Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710679:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:04.682114Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710680:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:04.732641Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710681:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:04.772001Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710682:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:04.851083Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710683:0, at schemeshard: 72057594046644480 2025-05-07T08:59:04.861910Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501625698597279802:2065];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:04.862014Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; waiting... 2025-05-07T08:59:04.909245Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCrea ... ponse: {"state": [{"CreatedTimestamp": 1746608411541, "InflyCount": 0, "InflyVersion": null, "MessageCount": 2, "RetentionBoundary": 0}]} 2025-05-07T09:00:12.343442Z node 7 :SQS DEBUG: executor.cpp:401: Request [] Query(idx=GET_STATE_ID) Queue [cloud4/000000000000000101v0] execution duration: 21ms 2025-05-07T09:00:12.343899Z node 7 :SQS DEBUG: queue_leader.cpp:556: Request [] Sending executed reply 2025-05-07T09:00:12.344057Z node 7 :SQS DEBUG: queue_leader.cpp:288: Handle state for [cloud4/000000000000000101v0] 2025-05-07T09:00:12.344212Z node 7 :SQS DEBUG: executor.cpp:114: Request [] Sending execute request for query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) to queue leader 2025-05-07T09:00:12.344274Z node 7 :SQS DEBUG: queue_leader.cpp:514: Request [] Executing compiled query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) 2025-05-07T09:00:12.344343Z node 7 :SQS DEBUG: executor.cpp:83: Request [] Starting executor actor for query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID). Mode: COMPILE_AND_EXEC 2025-05-07T09:00:12.344448Z node 7 :SQS TRACE: executor.cpp:154: Request [] Query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) Queue [cloud4/000000000000000101v0] Serializing params: {"QUEUE_ID_NUMBER": 2, "QUEUE_ID_NUMBER_HASH": 17472595041006102391, "SHARD": 0, "QUEUE_ID_NUMBER_AND_SHARD_HASH": 12311263855443095412, "TIME_FROM": 1746608411898} 2025-05-07T09:00:12.344822Z node 7 :SQS TRACE: executor.cpp:203: Request [] Query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) Queue [cloud4/000000000000000101v0] Execute program: { Transaction { MiniKQLTransaction { Mode: COMPILE_AND_EXEC Program { Bin: "\037\016\nFlags\010Name\010Args\016Payload\022Parameter\014Offset\032SentTimestamp\006\002\206\202\t\211\004\202\203\005@\206\205\004\207\203\010\207\203\010\026\032$SetResult\000\003?\002\020messages\t\211\004?\016\205\004?\016\203\014\020List$Truncated\203\004\030Member\000\t\211\026?\026\203\005\004\200\205\004\203\004\203\004\026\032\213\010\203\010\203\010\203\010\203\010\213\010?$?&\203\010\203\010\203\004\203\010\203\010\203\004\206\203\014\203\014,SelectRange\000\003?\034 \000\001\205\000\000\000\000\001\032\000\000\000\000\000\000\000?\014\005?\"\003?\036\010\003? \006\003\013?,\t\351\000?$\005\205\004\206\205\004\203\010\203\005@\002\006\203\005@\n\016\006\000?R\003?T(QUEUE_ID_NUMBER_HASH\003\022\000\t\351\000?&\005\205\004\206\205\004\203\010\203\005@\002\006\203\005@\n\016\006\000?h\003?j\036QUEUE_ID_NUMBER\003\022\000\t\351\000?(\005\205\004\206\205\004\203\010\203\005@\002\006\203\005@\n\016\006\000?~\003?\200\022TIME_FROM\003\022\000\003?*\000\010\013?2?`?v\003?.\177\377\377\377\377\377\377\377\377\003?0\177\377\377\377\377\377\377\377\377\014\003?4\000\003?6\002\003?8\000\003?:\000\006\010?>\003\203\014\000\003\203\014\000\003\203\014\000\003\203\014\000\017\003?@\000\377\007\003?\030\000\002\001\000/" } Params { Bin: "\037\000\005\205\n\203\010\203\010\203\010\203\004\203\010U~\252e\037/" } FlatMKQL: true } } ExecTimeoutPeriod: 60000 }. Params: {"QUEUE_ID_NUMBER": 2, "QUEUE_ID_NUMBER_HASH": 17472595041006102391, "SHARD": 0, "QUEUE_ID_NUMBER_AND_SHARD_HASH": 12311263855443095412, "TIME_FROM": 1746608411898} 2025-05-07T09:00:12.364782Z node 7 :SQS TRACE: executor.cpp:286: Request [863e23e4-da415ecd-81312105-4dc59f14] Query(idx=INTERNAL_GET_QUEUE_ATTRIBUTES_ID) Queue [cloud4/000000000000000101v0] HandleResponse { Status: 48 TxId: 281474976710703 Step: 1746608412381 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "attrs" Type { Kind: Optional Optional { Item { Kind: Optional Optional { Item { Kind: Struct Struct { Member { Name: "ContentBasedDeduplication" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "DelaySeconds" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "DlqArn" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "MaxReceiveCount" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "MaximumMessageSize" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "MessageRetentionPeriod" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "ReceiveMessageWaitTime" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "ShowDetailedCountersDeadline" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "VisibilityTimeout" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "queueExists" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "tags" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } } } Value { Struct { Optional { Optional { Struct { Optional { Bool: true } } Struct { Optional { Uint64: 0 } } Struct { Optional { Text: "" } } Struct { Optional { Text: "" } } Struct { Optional { Bool: true } } Struct { Optional { Uint64: 0 } } Struct { Optional { Uint64: 262144 } } Struct { Optional { Uint64: 345600000 } } Struct { Optional { Uint64: 0 } } Struct { } Struct { Optional { Uint64: 30000 } } } } } Struct { Optional { Bool: true } } Struct { Optional { Text: "{}" } } } } } 2025-05-07T09:00:12.364833Z node 7 :SQS DEBUG: executor.cpp:287: Request [863e23e4-da415ecd-81312105-4dc59f14] Query(idx=INTERNAL_GET_QUEUE_ATTRIBUTES_ID) Queue [cloud4/000000000000000101v0] Attempt 1 execution duration: 52ms 2025-05-07T09:00:12.365470Z node 7 :SQS TRACE: executor.cpp:325: Request [863e23e4-da415ecd-81312105-4dc59f14] Query(idx=INTERNAL_GET_QUEUE_ATTRIBUTES_ID) Queue [cloud4/000000000000000101v0] Sending mkql execution result: { Status: 48 TxId: 281474976710703 Step: 1746608412381 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "attrs" Type { Kind: Optional Optional { Item { Kind: Optional Optional { Item { Kind: Struct Struct { Member { Name: "ContentBasedDeduplication" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "DelaySeconds" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "DlqArn" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "MaxReceiveCount" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "MaximumMessageSize" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "MessageRetentionPeriod" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "ReceiveMessageWaitTime" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "ShowDetailedCountersDeadline" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "VisibilityTimeout" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "queueExists" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "tags" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } } } Value { Struct { Optional { Optional { Struct { Optional { Bool: true } } Struct { Optional { Uint64: 0 } } Struct { Optional { Text: "" } } Struct { Optional { Text: "" } } Struct { Optional { Bool: true } } Struct { Optional { Uint64: 0 } } Struct { Optional { Uint64: 262144 } } Struct { Optional { Uint64: 345600000 } } Struct { Optional { Uint64: 0 } } Struct { } Struct { Optional { Uint64: 30000 } } } } } Struct { Optional { Bool: true } } Struct { Optional { Text: "{}" } } } } } 2025-05-07T09:00:12.365594Z node 7 :SQS TRACE: executor.cpp:327: Request [863e23e4-da415ecd-81312105-4dc59f14] Query(idx=INTERNAL_GET_QUEUE_ATTRIBUTES_ID) Queue [cloud4/000000000000000101v0] Minikql data response: {"attrs": {"ContentBasedDeduplication": true, "DelaySeconds": 0, "DlqArn": "", "DlqName": "", "FifoQueue": true, "MaxReceiveCount": 0, "MaximumMessageSize": 262144, "MessageRetentionPeriod": 345600000, "ReceiveMessageWaitTime": 0, "ShowDetailedCountersDeadline": null, "VisibilityTimeout": 30000}, "queueExists": true, "tags": "{}"} 2025-05-07T09:00:12.365824Z node 7 :SQS DEBUG: executor.cpp:401: Request [863e23e4-da415ecd-81312105-4dc59f14] Query(idx=INTERNAL_GET_QUEUE_ATTRIBUTES_ID) Queue [cloud4/000000000000000101v0] execution duration: 60ms 2025-05-07T09:00:12.366698Z node 7 :SQS DEBUG: queue_leader.cpp:556: Request [863e23e4-da415ecd-81312105-4dc59f14] Sending executed reply 2025-05-07T09:00:12.367300Z node 7 :SQS TRACE: executor.cpp:286: Request [] Query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) Queue [cloud4/000000000000000101v0] HandleResponse { Status: 48 TxId: 281474976710705 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "messages" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Offset" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "SentTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } } } Value { Struct { Optional { List { Struct { Optional { Uint64: 1 } } Struct { Optional { Uint64: 1746608411898 } } } } } } } } 2025-05-07T09:00:12.367322Z node 7 :SQS DEBUG: executor.cpp:287: Request [] Query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) Queue [cloud4/000000000000000101v0] Attempt 1 execution duration: 22ms 2025-05-07T09:00:12.367509Z node 7 :SQS TRACE: executor.cpp:325: Request [] Query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) Queue [cloud4/000000000000000101v0] Sending mkql execution result: { Status: 48 TxId: 281474976710705 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "messages" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Offset" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "SentTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } } } Value { Struct { Optional { List { Struct { Optional { Uint64: 1 } } Struct { Optional { Uint64: 1746608411898 } } } } } } } } 2025-05-07T09:00:12.367556Z node 7 :SQS TRACE: executor.cpp:327: Request [] Query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) Queue [cloud4/000000000000000101v0] Minikql data response: {"messages": [{"Offset": 1, "SentTimestamp": 1746608411898}]} 2025-05-07T09:00:12.367624Z node 7 :SQS DEBUG: executor.cpp:401: Request [] Query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) Queue [cloud4/000000000000000101v0] execution duration: 23ms 2025-05-07T09:00:12.367774Z node 7 :SQS DEBUG: queue_leader.cpp:556: Request [] Sending executed reply 2025-05-07T09:00:12.367869Z node 7 :SQS DEBUG: queue_leader.cpp:1913: Handle oldest timestamp metrics for [cloud4/000000000000000101v0/0] >> YdbLogStore::LogStore >> TFlatTableExecutor_ResourceProfile::TestExecutorRequestMemoryFollower [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorMemoryLimitExceeded ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionSplitGranule_PKInt64 2025-05-07 09:00:07,013 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-05-07 09:00:07,239 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 136970 46.8M 46.1M 23.9M test_tool run_ut @/home/runner/.ya/build/build_root/zvgn/003a85/ydb/core/tx/columnshard/ut_rw/test-results/unittest/testing_out_stuff/chunk30/testing_out_stuff/test_tool.args 137051 1.4G 1.4G 1.4G └─ ydb-core-tx-columnshard-ut_rw --trace-path-append /home/runner/.ya/build/build_root/zvgn/003a85/ydb/core/tx/columnshard/ut_rw/test-results/unittest/testing_out_stuff/chu Test command err: 2025-05-07T08:50:09.034732Z node 1 :BLOB_CACHE NOTICE: ctor_logger.h:56: MaxCacheDataSize: 20971520 InFlightDataSize: 0 2025-05-07T08:50:09.134220Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-05-07T08:50:09.155652Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-05-07T08:50:09.155956Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-05-07T08:50:09.164621Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-05-07T08:50:09.164858Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-05-07T08:50:09.165150Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-05-07T08:50:09.165283Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-05-07T08:50:09.165357Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-05-07T08:50:09.165438Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-05-07T08:50:09.165542Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-05-07T08:50:09.165682Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-05-07T08:50:09.165783Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-05-07T08:50:09.165875Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-05-07T08:50:09.165989Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-05-07T08:50:09.166131Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-05-07T08:50:09.193889Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-05-07T08:50:09.194089Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:137;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=11;current_normalizer=CLASS_NAME=Granules; 2025-05-07T08:50:09.194136Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-05-07T08:50:09.194293Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-05-07T08:50:09.194497Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-05-07T08:50:09.194583Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-05-07T08:50:09.194620Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-05-07T08:50:09.194695Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-05-07T08:50:09.194754Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-05-07T08:50:09.194842Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-05-07T08:50:09.194888Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-05-07T08:50:09.195100Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-05-07T08:50:09.195160Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-05-07T08:50:09.195193Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-05-07T08:50:09.195217Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-05-07T08:50:09.195308Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-05-07T08:50:09.195353Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-05-07T08:50:09.195401Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanInsertionDedup;id=CleanInsertionDedup; 2025-05-07T08:50:09.195427Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=8;type=CleanInsertionDedup; 2025-05-07T08:50:09.195488Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanInsertionDedup;id=8; 2025-05-07T08:50:09.195520Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-05-07T08:50:09.195538Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-05-07T08:50:09.195607Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-05-07T08:50:09.195641Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-05-07T08:50:09.195662Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-05-07T08:50:09.195832Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-05-07T08:50:09.195879Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-05-07T08:50:09.195912Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-05-07T08:50:09.196064Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-05-07T08:50:09.196098Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-05-07T08:50:09.196118Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-05-07T08:50:09.196208Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-05-07T08:50:09.196249Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-05-07T08:50:09.196287Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-05-07T08:50:09.196350Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-05-07T08:50:09.196434Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-05-07T08:50:09.196464Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-05-07T08:50:09.196483Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-05-07T08:50:09.196795Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=36; 2025-05-07T08:50:09.196875Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=38; ... ScanId=0;TxId=122;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:187;stage=start;iterator=ready_results:(count:425;records_count:1787930;schema=timestamp: int64 message: string;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,6;column_names=message,timestamp;);;program_input=(column_ids=1,6;column_names=message,timestamp;);;;); 2025-05-07T09:00:07.111024Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:73;event=DoExtractReadyResults;result=0;count=0;finished=0; 2025-05-07T09:00:07.111070Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:198;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-05-07T09:00:07.118627Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: external_task_id=;fline=actor.cpp:48;task=agents_waiting=1;additional_info=();; 2025-05-07T09:00:07.121654Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;fline=actor.cpp:84;event=TEvTaskProcessedResult; 2025-05-07T09:00:07.121738Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;fline=fetching.cpp:17;event=apply; 2025-05-07T09:00:07.121786Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;fline=interval.cpp:28;event=fetched;interval_idx=555; 2025-05-07T09:00:07.121841Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;fline=interval.cpp:17;event=start_construct_result;interval_idx=555;interval_id=556;memory=8403280;count=1; 2025-05-07T09:00:07.122689Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;fline=merge.cpp:60;event=update_memory_merger;before_data=1406;before_memory=1468;after_memory=1468;after_data=1406;guard=8403280; 2025-05-07T09:00:07.122790Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;fline=source.cpp:51;event=source_ready;intervals_count=1;source_idx=486; 2025-05-07T09:00:07.122964Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:187;stage=start;iterator=ready_results:(count:425;records_count:1787930;schema=timestamp: int64 message: string;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,6;column_names=message,timestamp;);;program_input=(column_ids=1,6;column_names=message,timestamp;);;;); 2025-05-07T09:00:07.123014Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:73;event=DoExtractReadyResults;result=0;count=0;finished=0; 2025-05-07T09:00:07.123058Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:198;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-05-07T09:00:07.131115Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: event_type=NKikimr::NBlobCache::TEvBlobCache::TEvReadBlobRangeResult;fline=task.cpp:110;event=OnDataReady;task=agents_waiting=0;additional_info=();;external_task_id=; 2025-05-07T09:00:07.131280Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: event_type=NKikimr::NBlobCache::TEvBlobCache::TEvReadBlobRangeResult;fline=fetching.cpp:67;scan_step=name=ASSEMBLER::LAST;details={columns=(column_ids=1,6;column_names=message,timestamp;);;};;scan_step_idx=2;source_id=25003; 2025-05-07T09:00:07.131877Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: event_type=NKikimr::NBlobCache::TEvBlobCache::TEvReadBlobRangeResult;fline=fetching.cpp:67;scan_step=name=BUILD_STAGE_RESULT;details={};;scan_step_idx=3;source_id=25003; 2025-05-07T09:00:07.134557Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;fline=actor.cpp:84;event=TEvTaskProcessedResult; 2025-05-07T09:00:07.134773Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:187;stage=start;iterator=ready_results:(count:425;records_count:1787930;schema=timestamp: int64 message: string;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,6;column_names=message,timestamp;);;program_input=(column_ids=1,6;column_names=message,timestamp;);;;); 2025-05-07T09:00:07.134833Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:73;event=DoExtractReadyResults;result=0;count=0;finished=0; 2025-05-07T09:00:07.134878Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:198;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-05-07T09:00:07.137885Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;fline=actor.cpp:84;event=TEvTaskProcessedResult; 2025-05-07T09:00:07.142026Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:187;stage=start;iterator=ready_results:(count:425;records_count:1787930;schema=timestamp: int64 message: string;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,6;column_names=message,timestamp;);;program_input=(column_ids=1,6;column_names=message,timestamp;);;;); 2025-05-07T09:00:07.142106Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:73;event=DoExtractReadyResults;result=0;count=0;finished=0; 2025-05-07T09:00:07.142153Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:198;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-05-07T09:00:07.143572Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: event_type=NKikimr::NBlobCache::TEvBlobCache::TEvReadBlobRangeResult;fline=task.cpp:110;event=OnDataReady;task=agents_waiting=0;additional_info=();;external_task_id=; 2025-05-07T09:00:07.159624Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: event_type=NKikimr::NBlobCache::TEvBlobCache::TEvReadBlobRangeResult;fline=fetching.cpp:67;scan_step=name=ASSEMBLER::LAST;details={columns=(column_ids=1,6;column_names=message,timestamp;);;};;scan_step_idx=2;source_id=25004; 2025-05-07T09:00:07.160322Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: event_type=NKikimr::NBlobCache::TEvBlobCache::TEvReadBlobRangeResult;fline=fetching.cpp:67;scan_step=name=BUILD_STAGE_RESULT;details={};;scan_step_idx=3;source_id=25004; 2025-05-07T09:00:07.162687Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;fline=actor.cpp:84;event=TEvTaskProcessedResult; 2025-05-07T09:00:07.162927Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:187;stage=start;iterator=ready_results:(count:425;records_count:1787930;schema=timestamp: int64 message: string;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,6;column_names=message,timestamp;);;program_input=(column_ids=1,6;column_names=message,timestamp;);;;); 2025-05-07T09:00:07.162991Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:73;event=DoExtractReadyResults;result=0;count=0;finished=0; 2025-05-07T09:00:07.163038Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:198;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-05-07T09:00:07.186066Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;fline=actor.cpp:84;event=TEvTaskProcessedResult; 2025-05-07T09:00:07.186304Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:187;stage=start;iterator=ready_results:(count:425;records_count:1787930;schema=timestamp: int64 message: string;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,6;column_names=message,timestamp;);;program_input=(column_ids=1,6;column_names=message,timestamp;);;;); 2025-05-07T09:00:07.186357Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:73;event=DoExtractReadyResults;result=0;count=0;finished=0; 2025-05-07T09:00:07.186404Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:198;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 764, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: 600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/8580453620/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/zvgn/003a85/ydb/core/tx/columnshard/ut_rw/test-results/unittest/testing_out_stuff/chunk30/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/8580453620/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/zvgn/003a85/ydb/core/tx/columnshard/ut_rw/test-results/unittest/testing_out_stuff/chunk30/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout",), {}) >> YdbYqlClient::TestMultipleModifications [GOOD] >> YdbYqlClient::TestDescribeTableWithShardStats ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionSplitGranule_PKTimestamp 2025-05-07 09:00:02,789 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-05-07 09:00:03,047 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 136044 46.7M 42.1M 23.9M test_tool run_ut @/home/runner/.ya/build/build_root/zvgn/003ab8/ydb/core/tx/columnshard/ut_rw/test-results/unittest/testing_out_stuff/chunk31/testing_out_stuff/test_tool.args 136268 1.6G 1.5G 1.5G └─ ydb-core-tx-columnshard-ut_rw --trace-path-append /home/runner/.ya/build/build_root/zvgn/003ab8/ydb/core/tx/columnshard/ut_rw/test-results/unittest/testing_out_stuff/chu Test command err: 2025-05-07T08:50:04.358474Z node 1 :BLOB_CACHE NOTICE: ctor_logger.h:56: MaxCacheDataSize: 20971520 InFlightDataSize: 0 2025-05-07T08:50:04.487775Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-05-07T08:50:04.521269Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-05-07T08:50:04.521532Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-05-07T08:50:04.531431Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-05-07T08:50:04.531627Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-05-07T08:50:04.531842Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-05-07T08:50:04.531941Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-05-07T08:50:04.532035Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-05-07T08:50:04.532111Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-05-07T08:50:04.532176Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-05-07T08:50:04.532249Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-05-07T08:50:04.532332Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-05-07T08:50:04.532419Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-05-07T08:50:04.532487Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-05-07T08:50:04.532565Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-05-07T08:50:04.567695Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-05-07T08:50:04.567941Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:137;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=11;current_normalizer=CLASS_NAME=Granules; 2025-05-07T08:50:04.568036Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-05-07T08:50:04.568263Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-05-07T08:50:04.568467Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-05-07T08:50:04.568559Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-05-07T08:50:04.568609Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-05-07T08:50:04.568710Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-05-07T08:50:04.568777Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-05-07T08:50:04.568844Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-05-07T08:50:04.568887Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-05-07T08:50:04.569092Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-05-07T08:50:04.569169Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-05-07T08:50:04.569218Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-05-07T08:50:04.569265Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-05-07T08:50:04.569357Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-05-07T08:50:04.569416Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-05-07T08:50:04.569484Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanInsertionDedup;id=CleanInsertionDedup; 2025-05-07T08:50:04.569539Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=8;type=CleanInsertionDedup; 2025-05-07T08:50:04.569639Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanInsertionDedup;id=8; 2025-05-07T08:50:04.569691Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-05-07T08:50:04.569721Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-05-07T08:50:04.569794Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-05-07T08:50:04.569836Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-05-07T08:50:04.569869Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-05-07T08:50:04.571320Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-05-07T08:50:04.571424Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-05-07T08:50:04.571476Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-05-07T08:50:04.571713Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-05-07T08:50:04.571771Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-05-07T08:50:04.571810Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-05-07T08:50:04.571970Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-05-07T08:50:04.572022Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-05-07T08:50:04.572062Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-05-07T08:50:04.572169Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-05-07T08:50:04.572257Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-05-07T08:50:04.572318Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-05-07T08:50:04.572357Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-05-07T08:50:04.572837Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=52; 2025-05-07T08:50:04.572929Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=39; ... unt=1; 2025-05-07T09:00:02.857821Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;fline=merge.cpp:60;event=update_memory_merger;before_data=1387;before_memory=1392;after_memory=1392;after_data=1387;guard=8402624; 2025-05-07T09:00:02.857892Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;fline=source.cpp:51;event=source_ready;intervals_count=1;source_idx=621; 2025-05-07T09:00:02.858093Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:187;stage=start;iterator=ready_results:(count:491;records_count:1792781;schema=timestamp: timestamp[us] message: string;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,6;column_names=message,timestamp;);;program_input=(column_ids=1,6;column_names=message,timestamp;);;;); 2025-05-07T09:00:02.858148Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:73;event=DoExtractReadyResults;result=0;count=0;finished=0; 2025-05-07T09:00:02.858198Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:198;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-05-07T09:00:03.025756Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: event_type=NKikimr::NBlobCache::TEvBlobCache::TEvReadBlobRangeResult;fline=task.cpp:110;event=OnDataReady;task=agents_waiting=0;additional_info=();;external_task_id=; 2025-05-07T09:00:03.025962Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: event_type=NKikimr::NBlobCache::TEvBlobCache::TEvReadBlobRangeResult;fline=fetching.cpp:67;scan_step=name=ASSEMBLER::LAST;details={columns=(column_ids=1,6;column_names=message,timestamp;);;};;scan_step_idx=2;source_id=25134; 2025-05-07T09:00:03.027314Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: event_type=NKikimr::NBlobCache::TEvBlobCache::TEvReadBlobRangeResult;fline=fetching.cpp:67;scan_step=name=BUILD_STAGE_RESULT;details={};;scan_step_idx=3;source_id=25134; 2025-05-07T09:00:03.028644Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;fline=actor.cpp:84;event=TEvTaskProcessedResult; 2025-05-07T09:00:03.028711Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;fline=merge.cpp:74;event=DoApply;interval_idx=560; 2025-05-07T09:00:03.028764Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;fline=scanner.cpp:21;event=interval_result_received;interval_idx=560;intervalId=561; 2025-05-07T09:00:03.028843Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;fline=scanner.cpp:47;event=interval_result;interval_idx=560;count=73;merger=0;interval_id=561; 2025-05-07T09:00:03.028899Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;fline=scanner.cpp:43;event=interval_result_absent;interval_idx=561;merger=0;interval_id=562; 2025-05-07T09:00:03.028938Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;fline=scanner.cpp:67;event=wait_interval;remained=255;interval_idx=561; 2025-05-07T09:00:03.029107Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:187;stage=start;iterator=ready_results:(count:491;records_count:1792781;schema=timestamp: timestamp[us] message: string;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,6;column_names=message,timestamp;);;program_input=(column_ids=1,6;column_names=message,timestamp;);;;); 2025-05-07T09:00:03.029154Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:73;event=DoExtractReadyResults;result=1;count=73;finished=0; 2025-05-07T09:00:03.029201Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:198;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-05-07T09:00:03.029247Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;fline=scanner.h:52;add_source=747; 2025-05-07T09:00:03.029318Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;fline=interval.cpp:52;event=register_source;interval_idx=816;interval_id=817; 2025-05-07T09:00:03.029460Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;fline=source.cpp:33;InitFetchingPlan={branch:simple;steps:[{name=ALLOCATE_MEMORY::FETCHING;details={stage=FETCHING;column_ids=[Blob:1,Blob:6,Raw:1,Raw:6];};};{name=FETCHING_COLUMNS;details={columns=1,6;};};{name=ASSEMBLER::LAST;details={columns=(column_ids=1,6;column_names=message,timestamp;);;};};{name=BUILD_STAGE_RESULT;details={};};]};source_idx=747; 2025-05-07T09:00:03.029561Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;source=747;method=InitFetchingPlan;fline=fetching.cpp:67;scan_step=name=ALLOCATE_MEMORY::FETCHING;details={stage=FETCHING;column_ids=[Blob:1,Blob:6,Raw:1,Raw:6];};;scan_step_idx=0;source_id=25138; 2025-05-07T09:00:03.029628Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;source=747;method=InitFetchingPlan;fline=fetching.cpp:67;scan_step=name=FETCHING_COLUMNS;details={columns=1,6;};;scan_step_idx=1;source_id=25138; 2025-05-07T09:00:03.029681Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;source=747;method=InitFetchingPlan;fline=source.cpp:219;event=FETCHING_COLUMNS;fetching_info=name=FETCHING_COLUMNS;details={columns=1,6;};; 2025-05-07T09:00:03.029834Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;fline=interval.cpp:11;event=skip_construct_result;interval_idx=816;count=1;ready=0;interval_id=817; 2025-05-07T09:00:03.030812Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;fline=scanner.cpp:162;event=new_interval;interval_idx=816;interval={"sources":[{"start":{"sorting":{"sorting_columns":[{"name":"timestamp","value":"42"},{"name":"resource_type","value":"1811597"},{"name":"resource_id","value":"1811597"},{"name":"uid","value":"1811597"}],"fields":["timestamp: timestamp[us]","resource_type: timestamp[us]","resource_id: string","uid: string"]},"reverse":false,"position":0,"records_count":1},"finish":{"sorting":{"sorting_columns":[{"name":"timestamp","value":"42"},{"name":"resource_type","value":"1811669"},{"name":"resource_id","value":"1811669"},{"name":"uid","value":"1811669"}],"fields":["timestamp: timestamp[us]","resource_type: timestamp[us]","resource_id: string","uid: string"]},"reverse":false,"position":0,"records_count":1},"source_idx":747,"source_id":25138,"specific":{"type":"commit","info":"{ Blob: DS:0:[9437184:2:129:2:0:8294464:0] Offset: 5034744 Size: 7008 };snapshot=plan_step=1746607810027;tx_id=122;;write_id=25138"}}],"merging_context":{"start":{"sorting":{"sorting_columns":[{"name":"timestamp","value":"42"},{"name":"resource_type","value":"1811597"},{"name":"resource_id","value":"1811597"},{"name":"uid","value":"1811597"}],"fields":["timestamp: timestamp[us]","resource_type: timestamp[us]","resource_id: string","uid: string"]},"reverse":false,"position":0,"records_count":1},"exclusive":true,"finish":{"sorting":{"sorting_columns":[{"name":"timestamp","value":"42"},{"name":"resource_type","value":"1811669"},{"name":"resource_id","value":"1811669"},{"name":"uid","value":"1811669"}],"fields":["timestamp: timestamp[us]","resource_type: timestamp[us]","resource_id: string","uid: string"]},"reverse":false,"position":0,"records_count":1},"include_finish":true,"idx":816}}; 2025-05-07T09:00:03.030877Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;fline=scanner.h:59;remove_source=747; 2025-05-07T09:00:03.032824Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;fline=actor.cpp:84;event=TEvTaskProcessedResult; 2025-05-07T09:00:03.033010Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:187;stage=start;iterator=ready_results:(count:492;records_count:1792854;schema=timestamp: timestamp[us] message: string;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,6;column_names=message,timestamp;);;program_input=(column_ids=1,6;column_names=message,timestamp;);;;); 2025-05-07T09:00:03.033062Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:73;event=DoExtractReadyResults;result=0;count=0;finished=0; 2025-05-07T09:00:03.038279Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:9524:11511];TabletId=9437184;ScanId=0;TxId=122;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:198;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 764, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: 600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/8580453620/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/zvgn/003ab8/ydb/core/tx/columnshard/ut_rw/test-results/unittest/testing_out_stuff/chunk31/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/8580453620/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/zvgn/003ab8/ydb/core/tx/columnshard/ut_rw/test-results/unittest/testing_out_stuff/chunk31/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout",), {}) >> TFlatTableExecutor_ResourceProfile::TestExecutorMemoryLimitExceeded [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorPreserveTxData >> TopicAutoscaling::ControlPlane_DisableAutoPartitioning [GOOD] >> TopicAutoscaling::ControlPlane_PauseAutoPartitioning >> TFlatTableExecutor_ResourceProfile::TestExecutorPreserveTxData [GOOD] >> Secret::Validation >> TCmsTenatsTest::TestTenantLimit >> Cdc::AwsRegion [GOOD] >> CommitOffset::Commit_WithoutSession_ParentNotFinished [GOOD] >> CommitOffset::Commit_WithoutSession_ToPastParentPartition |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/security/certificate_check/ut/unittest >> TTableProfileTests::DescribeTableOptions [GOOD] >> YdbTableBulkUpsert::ZeroRows [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientProvideIncorrectCerts [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientDoesNotProvideAnyCerts ------- [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/inside_ydb_ut/unittest >> TestYmqHttpProxy::TestListQueueTags [GOOD] Test command err: 2025-05-07T08:58:59.025201Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625692366344138:2199];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:58:59.090598Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001776/r3tmp/tmpalSC11/pdisk_1.dat 2025-05-07T08:58:59.822779Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:58:59.822889Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:58:59.882627Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:58:59.927187Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13890, node 1 2025-05-07T08:59:00.258267Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:59:00.258292Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:59:00.258299Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:59:00.258416Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25802 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:59:00.664388Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... TClient is connected to server localhost:25802 waiting... 2025-05-07T08:59:00.930389Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T08:59:00.936091Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-05-07T08:59:00.937831Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:00.973682Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:01.161636Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:59:01.224268Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:59:01.285517Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:01.322715Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:01.400251Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:01.493086Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:01.554840Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:01.630148Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:01.661260Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:03.505085Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625713841181869:2376], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:03.505443Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:03.505896Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625713841181881:2379], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:03.510867Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710673:3, at schemeshard: 72057594046644480 2025-05-07T08:59:03.524015Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501625713841181883:2380], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710673 completed, doublechecking } 2025-05-07T08:59:03.622921Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501625713841181934:2867] txid# 281474976710674, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 18], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:59:03.986093Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501625692366344138:2199];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:03.986182Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:59:04.076125Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710675. Ctx: { TraceId: 01jtmzgjedb0tv3q74krrpnp40, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjU5YTE5YjktODRmYWQwOWQtZTA5NTEzYTYtNzQ1MDlkZg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:59:04.141410Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:04.224780Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:04.280543Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710678:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:04.329357Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710679:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:59:04.388471Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710680:0, at schemeshard: 72057594046644480 2025-05-07T08:59:04.478961Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710681:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:59:04.566806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710682:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:04.620487Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710683:0, at schemeshard: 72057594046644480 2025-05-07T08:59:04.697401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710684:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:04.818340Z node 1 :HTTP INFO: http_proxy_acceptor.cpp:88: Listening on http://127.0.0.1:7835 2025-05-07T08:59:05.823093Z node 1 :SQS INFO: service.cpp:378: Start SQS service actor 2025-05-07T08:59:05.825372Z node 1 :SQS INFO: proxy_service.cpp:53: Start SQS proxy service actor 202 ... Query(idx=INTERNAL_GET_QUEUE_ATTRIBUTES_ID) Queue [cloud4/000000000000000301v0] HandleResponse { Status: 48 TxId: 281474976710713 Step: 1746608417113 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "attrs" Type { Kind: Optional Optional { Item { Kind: Optional Optional { Item { Kind: Struct Struct { Member { Name: "ContentBasedDeduplication" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "DelaySeconds" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "DlqArn" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "MaxReceiveCount" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "MaximumMessageSize" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "MessageRetentionPeriod" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "ReceiveMessageWaitTime" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "ShowDetailedCountersDeadline" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "VisibilityTimeout" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "queueExists" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "tags" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } } } Value { Struct { Optional { Optional { Struct { Optional { Bool: false } } Struct { Optional { Uint64: 0 } } Struct { Optional { Text: "" } } Struct { Optional { Text: "" } } Struct { Optional { Bool: true } } Struct { Optional { Uint64: 0 } } Struct { Optional { Uint64: 262144 } } Struct { Optional { Uint64: 345600000 } } Struct { Optional { Uint64: 0 } } Struct { } Struct { Optional { Uint64: 30000 } } } } } Struct { Optional { Bool: true } } Struct { Optional { Text: "{}" } } } } } 2025-05-07T09:00:17.078195Z node 7 :SQS DEBUG: executor.cpp:287: Request [e27e03f1-652ac51e-9911aa56-a3fbfdeb] Query(idx=INTERNAL_GET_QUEUE_ATTRIBUTES_ID) Queue [cloud4/000000000000000301v0] Attempt 1 execution duration: 32ms 2025-05-07T09:00:17.078744Z node 7 :SQS TRACE: executor.cpp:325: Request [e27e03f1-652ac51e-9911aa56-a3fbfdeb] Query(idx=INTERNAL_GET_QUEUE_ATTRIBUTES_ID) Queue [cloud4/000000000000000301v0] Sending mkql execution result: { Status: 48 TxId: 281474976710713 Step: 1746608417113 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "attrs" Type { Kind: Optional Optional { Item { Kind: Optional Optional { Item { Kind: Struct Struct { Member { Name: "ContentBasedDeduplication" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "DelaySeconds" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "DlqArn" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "MaxReceiveCount" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "MaximumMessageSize" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "MessageRetentionPeriod" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "ReceiveMessageWaitTime" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "ShowDetailedCountersDeadline" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "VisibilityTimeout" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "queueExists" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "tags" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } } } Value { Struct { Optional { Optional { Struct { Optional { Bool: false } } Struct { Optional { Uint64: 0 } } Struct { Optional { Text: "" } } Struct { Optional { Text: "" } } Struct { Optional { Bool: true } } Struct { Optional { Uint64: 0 } } Struct { Optional { Uint64: 262144 } } Struct { Optional { Uint64: 345600000 } } Struct { Optional { Uint64: 0 } } Struct { } Struct { Optional { Uint64: 30000 } } } } } Struct { Optional { Bool: true } } Struct { Optional { Text: "{}" } } } } } 2025-05-07T09:00:17.078835Z node 7 :SQS TRACE: executor.cpp:327: Request [e27e03f1-652ac51e-9911aa56-a3fbfdeb] Query(idx=INTERNAL_GET_QUEUE_ATTRIBUTES_ID) Queue [cloud4/000000000000000301v0] Minikql data response: {"attrs": {"ContentBasedDeduplication": false, "DelaySeconds": 0, "DlqArn": "", "DlqName": "", "FifoQueue": true, "MaxReceiveCount": 0, "MaximumMessageSize": 262144, "MessageRetentionPeriod": 345600000, "ReceiveMessageWaitTime": 0, "ShowDetailedCountersDeadline": null, "VisibilityTimeout": 30000}, "queueExists": true, "tags": "{}"} 2025-05-07T09:00:17.078999Z node 7 :SQS DEBUG: executor.cpp:401: Request [e27e03f1-652ac51e-9911aa56-a3fbfdeb] Query(idx=INTERNAL_GET_QUEUE_ATTRIBUTES_ID) Queue [cloud4/000000000000000301v0] execution duration: 36ms 2025-05-07T09:00:17.079508Z node 7 :SQS DEBUG: queue_leader.cpp:2036: Created new Deduplication cleanup actor for queue [cloud4/000000000000000301v0]. Actor id: [7:7501626030567133763:3679] 2025-05-07T09:00:17.079533Z node 7 :SQS DEBUG: queue_leader.cpp:2036: Created new Reads cleanup actor for queue [cloud4/000000000000000301v0]. Actor id: [7:7501626030567133764:3680] 2025-05-07T09:00:17.079563Z node 7 :SQS DEBUG: queue_leader.cpp:2048: Created new retention actor for queue [cloud4/000000000000000301v0]. Actor id: [7:7501626030567133765:3681] 2025-05-07T09:00:17.079592Z node 7 :SQS DEBUG: queue_leader.cpp:2052: Created new purge actor for queue [cloud4/000000000000000301v0]. Actor id: [7:7501626030567133766:3682] 2025-05-07T09:00:17.079604Z node 7 :SQS DEBUG: queue_leader.cpp:556: Request [e27e03f1-652ac51e-9911aa56-a3fbfdeb] Sending executed reply 2025-05-07T09:00:17.079934Z node 7 :SQS INFO: fifo_cleanup.cpp:31: Request [96aed848-eb6384ef-ba217e90-1d088eb6] Bootstrap cleanup actor for queue [cloud4/000000000000000301v0] 2025-05-07T09:00:17.079951Z node 7 :SQS INFO: fifo_cleanup.cpp:31: Request [6254f0fd-1037294f-9e25ded-cf503fea] Bootstrap cleanup actor for queue [cloud4/000000000000000301v0] 2025-05-07T09:00:17.079970Z node 7 :SQS INFO: retention.cpp:30: Request [e4d07c75-710bddb9-9fb6695c-7870c2b4] Bootstrap retention actor for queue [cloud4/000000000000000301v0] 2025-05-07T09:00:17.079989Z node 7 :SQS INFO: purge.cpp:35: Request [8a148a6e-17916cc8-647f78c2-ad230ce7] Create purge actor for queue /Root/SQS/cloud4/000000000000000301v0 2025-05-07T09:00:17.080094Z node 7 :SQS DEBUG: action.h:623: Request [9cfb6494-4c94ca3-4111292c-7f27f35b] Get configuration duration: 44ms 2025-05-07T09:00:17.080109Z node 7 :SQS TRACE: action.h:643: Request [9cfb6494-4c94ca3-4111292c-7f27f35b] Got configuration. Root url: http://ghrun-sykirh5vua.auto.internal:8771, Shards: 1, Fail: 0 2025-05-07T09:00:17.080132Z node 7 :SQS TRACE: action.h:425: Request [9cfb6494-4c94ca3-4111292c-7f27f35b] DoRoutine 2025-05-07T09:00:17.080202Z node 7 :SQS TRACE: action.h:262: Request [9cfb6494-4c94ca3-4111292c-7f27f35b] SendReplyAndDie from action actor { ListQueueTags { RequestId: "9cfb6494-4c94ca3-4111292c-7f27f35b" } } 2025-05-07T09:00:17.080276Z node 7 :SQS TRACE: proxy_service.h:35: Request [9cfb6494-4c94ca3-4111292c-7f27f35b] Sending sqs response: { ListQueueTags { RequestId: "9cfb6494-4c94ca3-4111292c-7f27f35b" } RequestId: "9cfb6494-4c94ca3-4111292c-7f27f35b" FolderId: "folder4" ResourceId: "000000000000000301v0" IsFifo: true } 2025-05-07T09:00:17.080464Z node 7 :SQS TRACE: proxy_service.cpp:194: HandleSqsResponse ListQueueTags { RequestId: "9cfb6494-4c94ca3-4111292c-7f27f35b" } RequestId: "9cfb6494-4c94ca3-4111292c-7f27f35b" FolderId: "folder4" ResourceId: "000000000000000301v0" IsFifo: true 2025-05-07T09:00:17.080511Z node 7 :SQS TRACE: proxy_service.cpp:208: Sending answer to proxy actor [7:7501626030567133740:2550]: ListQueueTags { RequestId: "9cfb6494-4c94ca3-4111292c-7f27f35b" } RequestId: "9cfb6494-4c94ca3-4111292c-7f27f35b" FolderId: "folder4" ResourceId: "000000000000000301v0" IsFifo: true 2025-05-07T09:00:17.080567Z node 7 :SQS TRACE: service.cpp:1464: Dec local leader ref for actor [7:7501626030567133743:3666]. Found: 1 2025-05-07T09:00:17.080932Z node 7 :SQS TRACE: proxy_actor.cpp:178: Request [9cfb6494-4c94ca3-4111292c-7f27f35b] HandleResponse: { ListQueueTags { RequestId: "9cfb6494-4c94ca3-4111292c-7f27f35b" } RequestId: "9cfb6494-4c94ca3-4111292c-7f27f35b" FolderId: "folder4" ResourceId: "000000000000000301v0" IsFifo: true }, status: OK 2025-05-07T09:00:17.080993Z node 7 :SQS DEBUG: proxy_actor.cpp:147: Request [9cfb6494-4c94ca3-4111292c-7f27f35b] Sending reply from proxy actor: { ListQueueTags { RequestId: "9cfb6494-4c94ca3-4111292c-7f27f35b" } RequestId: "9cfb6494-4c94ca3-4111292c-7f27f35b" FolderId: "folder4" ResourceId: "000000000000000301v0" IsFifo: true } Http output full {} 2025-05-07T09:00:17.085442Z node 7 :HTTP_PROXY DEBUG: http_req.cpp:379: http request [ListQueueTags] requestId [9cfb6494-4c94ca3-4111292c-7f27f35b] Got succesfult GRPC response. 2025-05-07T09:00:17.085547Z node 7 :HTTP_PROXY INFO: http_req.cpp:1207: http request [ListQueueTags] requestId [9cfb6494-4c94ca3-4111292c-7f27f35b] reply ok 2025-05-07T09:00:17.085685Z node 7 :HTTP_PROXY DEBUG: http_req.cpp:1267: http request [ListQueueTags] requestId [9cfb6494-4c94ca3-4111292c-7f27f35b] Send metering event. HttpStatusCode: 200 IsFifo: 1 FolderId: folder4 RequestSizeInBytes: 530 ResponseSizeInBytes: 178 SourceAddress: 5828:b01:6050:0:4028:b01:6050:0 ResourceId: 000000000000000301v0 Action: ListQueueTags 2025-05-07T09:00:17.085934Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:243: (#37,[::1]:50332) <- (200 ) 2025-05-07T09:00:17.086110Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:296: (#37,[::1]:50332) connection closed 2025-05-07T09:00:17.098399Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:552: [WorkloadService] [Service] Reply cleanup error NOT_FOUND to [7:7501626021977198530:2448]: Pool not found 2025-05-07T09:00:17.099363Z node 7 :SQS DEBUG: cleanup_queue_data.cpp:100: [cleanup removed queues] getting queues... 2025-05-07T09:00:17.109947Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7501626030567133780:2558], DatabaseId: /Root/SQS, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:17.110083Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:602: [WorkloadService] [TDatabaseFetcherActor] ActorId: [7:7501626030567133781:2559], Database: /Root/SQS, Failed to fetch database info, UNSUPPORTED, issues: {
: Error: Invalid database path /Root/SQS, please check the correctness of the path } 2025-05-07T09:00:17.110133Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/SQS, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } |91.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/graph/shard/ut/ydb-core-graph-shard-ut |91.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/graph/shard/ut/ydb-core-graph-shard-ut |91.6%| [LD] {RESULT} $(B)/ydb/core/graph/shard/ut/ydb-core-graph-shard-ut >> Cdc::ResolvedTimestampsVolatileOutOfOrder [GOOD] >> Cdc::SequentialSplitMerge ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionSplitGranuleStrKey_PKString 2025-05-07 09:00:13,902 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-05-07 09:00:14,377 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 137938 46.5M 46.4M 23.8M test_tool run_ut @/home/runner/.ya/build/build_root/zvgn/003a12/ydb/core/tx/columnshard/ut_rw/test-results/unittest/testing_out_stuff/chunk26/testing_out_stuff/test_tool.args 138213 1.5G 1.4G 1.4G └─ ydb-core-tx-columnshard-ut_rw --trace-path-append /home/runner/.ya/build/build_root/zvgn/003a12/ydb/core/tx/columnshard/ut_rw/test-results/unittest/testing_out_stuff/chu Test command err: 2025-05-07T08:50:15.789160Z node 1 :BLOB_CACHE NOTICE: ctor_logger.h:56: MaxCacheDataSize: 20971520 InFlightDataSize: 0 2025-05-07T08:50:15.922482Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-05-07T08:50:15.946498Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-05-07T08:50:15.946833Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-05-07T08:50:15.954666Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-05-07T08:50:15.954898Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-05-07T08:50:15.955143Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-05-07T08:50:15.955223Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-05-07T08:50:15.955296Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-05-07T08:50:15.955371Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-05-07T08:50:15.955447Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-05-07T08:50:15.955563Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-05-07T08:50:15.955672Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-05-07T08:50:15.955748Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-05-07T08:50:15.955859Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-05-07T08:50:15.955975Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-05-07T08:50:15.985565Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-05-07T08:50:15.985743Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:137;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=11;current_normalizer=CLASS_NAME=Granules; 2025-05-07T08:50:15.985803Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-05-07T08:50:15.986045Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-05-07T08:50:15.986236Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-05-07T08:50:15.986333Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-05-07T08:50:15.986381Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-05-07T08:50:15.986488Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-05-07T08:50:15.986550Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-05-07T08:50:15.986595Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-05-07T08:50:15.986624Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-05-07T08:50:15.986762Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-05-07T08:50:15.986838Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-05-07T08:50:15.986869Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-05-07T08:50:15.986901Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-05-07T08:50:15.986974Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-05-07T08:50:15.987020Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-05-07T08:50:15.987063Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanInsertionDedup;id=CleanInsertionDedup; 2025-05-07T08:50:15.987114Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=8;type=CleanInsertionDedup; 2025-05-07T08:50:15.987227Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanInsertionDedup;id=8; 2025-05-07T08:50:15.987301Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-05-07T08:50:15.987339Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-05-07T08:50:15.987413Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-05-07T08:50:15.987456Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-05-07T08:50:15.987494Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-05-07T08:50:15.987736Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-05-07T08:50:15.987818Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-05-07T08:50:15.987863Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-05-07T08:50:15.988103Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-05-07T08:50:15.988173Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-05-07T08:50:15.988223Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-05-07T08:50:15.988407Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-05-07T08:50:15.988472Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-05-07T08:50:15.988521Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-05-07T08:50:15.988605Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-05-07T08:50:15.988688Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-05-07T08:50:15.988742Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-05-07T08:50:15.988774Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-05-07T08:50:15.989282Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=55; 2025-05-07T08:50:15.989395Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=45; ... :2848:0]; 2025-05-07T08:59:59.504946Z node 1 :S3_WRAPPER DEBUG: log.cpp:784: fline=fake_storage.cpp:106;method=PutObject;id=[9437184:2:70:255:672:2856:0]; 2025-05-07T08:59:59.505015Z node 1 :S3_WRAPPER DEBUG: log.cpp:784: fline=fake_storage.cpp:106;method=PutObject;id=[9437184:2:70:255:673:2840:0]; 2025-05-07T08:59:59.505095Z node 1 :S3_WRAPPER DEBUG: log.cpp:784: fline=fake_storage.cpp:106;method=PutObject;id=[9437184:2:70:255:674:2848:0]; 2025-05-07T08:59:59.505183Z node 1 :S3_WRAPPER DEBUG: log.cpp:784: fline=fake_storage.cpp:106;method=PutObject;id=[9437184:2:70:255:675:2864:0]; 2025-05-07T08:59:59.505252Z node 1 :S3_WRAPPER DEBUG: log.cpp:784: fline=fake_storage.cpp:106;method=PutObject;id=[9437184:2:70:255:676:2784:0]; 2025-05-07T08:59:59.505315Z node 1 :S3_WRAPPER DEBUG: log.cpp:784: fline=fake_storage.cpp:106;method=PutObject;id=[9437184:2:70:255:677:2792:0]; 2025-05-07T08:59:59.505403Z node 1 :S3_WRAPPER DEBUG: log.cpp:784: fline=fake_storage.cpp:106;method=PutObject;id=[9437184:2:70:255:678:2792:0]; 2025-05-07T08:59:59.505478Z node 1 :S3_WRAPPER DEBUG: log.cpp:784: fline=fake_storage.cpp:106;method=PutObject;id=[9437184:2:70:255:679:2784:0]; 2025-05-07T08:59:59.505550Z node 1 :S3_WRAPPER DEBUG: log.cpp:784: fline=fake_storage.cpp:106;method=PutObject;id=[9437184:2:70:255:680:2784:0]; 2025-05-07T08:59:59.505618Z node 1 :S3_WRAPPER DEBUG: log.cpp:784: fline=fake_storage.cpp:106;method=PutObject;id=[9437184:2:70:255:681:2784:0]; 2025-05-07T08:59:59.505681Z node 1 :S3_WRAPPER DEBUG: log.cpp:784: fline=fake_storage.cpp:106;method=PutObject;id=[9437184:2:70:255:682:2864:0]; 2025-05-07T08:59:59.505767Z node 1 :S3_WRAPPER DEBUG: log.cpp:784: fline=fake_storage.cpp:106;method=PutObject;id=[9437184:2:70:255:683:2792:0]; 2025-05-07T08:59:59.505881Z node 1 :S3_WRAPPER DEBUG: log.cpp:784: fline=fake_storage.cpp:106;method=PutObject;id=[9437184:2:70:255:684:2800:0]; 2025-05-07T08:59:59.505988Z node 1 :S3_WRAPPER DEBUG: log.cpp:784: fline=fake_storage.cpp:106;method=PutObject;id=[9437184:2:70:255:685:2824:0]; 2025-05-07T08:59:59.506064Z node 1 :S3_WRAPPER DEBUG: log.cpp:784: fline=fake_storage.cpp:106;method=PutObject;id=[9437184:2:70:255:686:2784:0]; 2025-05-07T08:59:59.506154Z node 1 :S3_WRAPPER DEBUG: log.cpp:784: fline=fake_storage.cpp:106;method=PutObject;id=[9437184:2:70:255:687:2792:0]; 2025-05-07T08:59:59.506247Z node 1 :S3_WRAPPER DEBUG: log.cpp:784: fline=fake_storage.cpp:106;method=PutObject;id=[9437184:2:70:255:688:2792:0]; 2025-05-07T08:59:59.506342Z node 1 :S3_WRAPPER DEBUG: log.cpp:784: fline=fake_storage.cpp:106;method=PutObject;id=[9437184:2:70:255:689:2832:0]; 2025-05-07T08:59:59.506427Z node 1 :S3_WRAPPER DEBUG: log.cpp:784: fline=fake_storage.cpp:106;method=PutObject;id=[9437184:2:70:255:690:2784:0]; 2025-05-07T08:59:59.506515Z node 1 :S3_WRAPPER DEBUG: log.cpp:784: fline=fake_storage.cpp:106;method=PutObject;id=[9437184:2:70:255:691:2776:0]; 2025-05-07T08:59:59.506593Z node 1 :S3_WRAPPER DEBUG: log.cpp:784: fline=fake_storage.cpp:106;method=PutObject;id=[9437184:2:70:255:692:2840:0]; 2025-05-07T08:59:59.506667Z node 1 :S3_WRAPPER DEBUG: log.cpp:784: fline=fake_storage.cpp:106;method=PutObject;id=[9437184:2:70:255:693:2784:0]; 2025-05-07T08:59:59.506758Z node 1 :S3_WRAPPER DEBUG: log.cpp:784: fline=fake_storage.cpp:106;method=PutObject;id=[9437184:2:70:255:694:2784:0]; 2025-05-07T08:59:59.506823Z node 1 :S3_WRAPPER DEBUG: log.cpp:784: fline=fake_storage.cpp:106;method=PutObject;id=[9437184:2:70:255:695:2792:0]; 2025-05-07T08:59:59.506928Z node 1 :S3_WRAPPER DEBUG: log.cpp:784: fline=fake_storage.cpp:106;method=PutObject;id=[9437184:2:70:255:696:2800:0]; 2025-05-07T08:59:59.507017Z node 1 :S3_WRAPPER DEBUG: log.cpp:784: fline=fake_storage.cpp:106;method=PutObject;id=[9437184:2:70:255:697:2792:0]; 2025-05-07T08:59:59.507129Z node 1 :S3_WRAPPER DEBUG: log.cpp:784: fline=fake_storage.cpp:106;method=PutObject;id=[9437184:2:70:255:698:2784:0]; 2025-05-07T08:59:59.507197Z node 1 :S3_WRAPPER DEBUG: log.cpp:784: fline=fake_storage.cpp:106;method=PutObject;id=[9437184:2:70:255:699:2848:0]; 2025-05-07T08:59:59.507277Z node 1 :S3_WRAPPER DEBUG: log.cpp:784: fline=fake_storage.cpp:106;method=PutObject;id=[9437184:2:70:255:700:2768:0]; 2025-05-07T08:59:59.507369Z node 1 :S3_WRAPPER DEBUG: log.cpp:784: fline=fake_storage.cpp:106;method=PutObject;id=[9437184:2:70:255:701:2768:0]; 2025-05-07T08:59:59.507449Z node 1 :S3_WRAPPER DEBUG: log.cpp:784: fline=fake_storage.cpp:106;method=PutObject;id=[9437184:2:70:255:702:2776:0]; 2025-05-07T08:59:59.507518Z node 1 :S3_WRAPPER DEBUG: log.cpp:784: fline=fake_storage.cpp:106;method=PutObject;id=[9437184:2:70:255:703:2776:0]; 2025-05-07T08:59:59.507610Z node 1 :S3_WRAPPER DEBUG: log.cpp:784: fline=fake_storage.cpp:106;method=PutObject;id=[9437184:2:70:255:704:2768:0]; 2025-05-07T08:59:59.507700Z node 1 :S3_WRAPPER DEBUG: log.cpp:784: fline=fake_storage.cpp:106;method=PutObject;id=[9437184:2:70:255:705:2768:0]; 2025-05-07T08:59:59.507769Z node 1 :S3_WRAPPER DEBUG: log.cpp:784: fline=fake_storage.cpp:106;method=PutObject;id=[9437184:2:70:255:706:9080:0]; 2025-05-07T08:59:59.511200Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;parent=[1:139:2171];ev_type=NKikimr::NOlap::NResourceBroker::NSubscribe::TEvStartTask;fline=actor.cpp:38;event=ask_resources;task=cpu=0;mem=3651081;external_task_id=a78d0244-2b2111f0-a8fb4f12-83a0552e;type=CS::INDEXATION;priority=0;; 2025-05-07T08:59:59.511994Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;parent=[1:139:2171];ev_type=NKikimr::NOlap::NResourceBroker::NSubscribe::TEvStartTask;fline=actor.cpp:38;event=ask_resources;task=cpu=0;mem=206265;external_task_id=a78d4a06-2b2111f0-88fa738c-af6f2af1;type=CS::INDEXATION;priority=0;; 2025-05-07T08:59:59.514654Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;parent=[1:139:2171];ev_type=NKikimr::NResourceBroker::TEvResourceBroker::TEvResourceAllocated;fline=actor.cpp:29;event=result_resources;task_id=71;task=cpu=0;mem=3650994;external_task_id=a78ca434-2b2111f0-9d140d8d-c545899b;type=CS::INDEXATION;priority=0;; 2025-05-07T08:59:59.514717Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;parent=[1:139:2171];ev_type=NKikimr::NResourceBroker::TEvResourceBroker::TEvResourceAllocated;fline=task.cpp:9;event=resource_allocated;external_task_id=a78ca434-2b2111f0-9d140d8d-c545899b;mem=3650994;cpu=0; 2025-05-07T08:59:59.514775Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;parent=[1:139:2171];ev_type=NKikimr::NResourceBroker::TEvResourceBroker::TEvResourceAllocated;fline=task.cpp:40;event=allocate_resources;external_task_id=a78ca434-2b2111f0-9d140d8d-c545899b;task_id=71;mem=3650994;cpu=0; 2025-05-07T08:59:59.516477Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: external_task_id=a78ca434-2b2111f0-9d140d8d-c545899b;fline=task.cpp:110;event=OnDataReady;task=agents_waiting=0;additional_info=();;external_task_id=a78ca434-2b2111f0-9d140d8d-c545899b; 2025-05-07T09:00:02.952543Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: external_task_id=a78ca434-2b2111f0-9d140d8d-c545899b;fline=actor.cpp:48;task=agents_waiting=0;additional_info=();; 2025-05-07T09:00:02.959070Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:50;event=TEvWriteIndex;count=1; 2025-05-07T09:00:02.972385Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;parent=[1:139:2171];ev_type=NKikimr::NResourceBroker::TEvResourceBroker::TEvResourceAllocated;fline=actor.cpp:29;event=result_resources;task_id=72;task=cpu=0;mem=3651081;external_task_id=a78d0244-2b2111f0-a8fb4f12-83a0552e;type=CS::INDEXATION;priority=0;; 2025-05-07T09:00:02.972477Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;parent=[1:139:2171];ev_type=NKikimr::NResourceBroker::TEvResourceBroker::TEvResourceAllocated;fline=task.cpp:9;event=resource_allocated;external_task_id=a78d0244-2b2111f0-a8fb4f12-83a0552e;mem=3651081;cpu=0; 2025-05-07T09:00:02.972527Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;parent=[1:139:2171];ev_type=NKikimr::NResourceBroker::TEvResourceBroker::TEvResourceAllocated;fline=task.cpp:40;event=allocate_resources;external_task_id=a78d0244-2b2111f0-a8fb4f12-83a0552e;task_id=72;mem=3651081;cpu=0; 2025-05-07T09:00:02.975121Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: external_task_id=a78d0244-2b2111f0-a8fb4f12-83a0552e;fline=task.cpp:110;event=OnDataReady;task=agents_waiting=0;additional_info=();;external_task_id=a78d0244-2b2111f0-a8fb4f12-83a0552e; 2025-05-07T09:00:08.018312Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: external_task_id=a78d0244-2b2111f0-a8fb4f12-83a0552e;fline=actor.cpp:48;task=agents_waiting=0;additional_info=();; 2025-05-07T09:00:08.020602Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:50;event=TEvWriteIndex;count=1; 2025-05-07T09:00:08.049795Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;parent=[1:139:2171];ev_type=NKikimr::NResourceBroker::TEvResourceBroker::TEvResourceAllocated;fline=actor.cpp:29;event=result_resources;task_id=73;task=cpu=0;mem=206265;external_task_id=a78d4a06-2b2111f0-88fa738c-af6f2af1;type=CS::INDEXATION;priority=0;; 2025-05-07T09:00:08.049891Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;parent=[1:139:2171];ev_type=NKikimr::NResourceBroker::TEvResourceBroker::TEvResourceAllocated;fline=task.cpp:9;event=resource_allocated;external_task_id=a78d4a06-2b2111f0-88fa738c-af6f2af1;mem=206265;cpu=0; 2025-05-07T09:00:08.049947Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;parent=[1:139:2171];ev_type=NKikimr::NResourceBroker::TEvResourceBroker::TEvResourceAllocated;fline=task.cpp:40;event=allocate_resources;external_task_id=a78d4a06-2b2111f0-88fa738c-af6f2af1;task_id=73;mem=206265;cpu=0; 2025-05-07T09:00:08.051422Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: external_task_id=a78d4a06-2b2111f0-88fa738c-af6f2af1;fline=task.cpp:110;event=OnDataReady;task=agents_waiting=0;additional_info=();;external_task_id=a78d4a06-2b2111f0-88fa738c-af6f2af1; 2025-05-07T09:00:08.143206Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: external_task_id=a78d4a06-2b2111f0-88fa738c-af6f2af1;fline=actor.cpp:48;task=agents_waiting=0;additional_info=();; 2025-05-07T09:00:08.145577Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:139:2171];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:50;event=TEvWriteIndex;count=1; Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 764, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: 600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/8580453620/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/zvgn/003a12/ydb/core/tx/columnshard/ut_rw/test-results/unittest/testing_out_stuff/chunk26/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/8580453620/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/zvgn/003a12/ydb/core/tx/columnshard/ut_rw/test-results/unittest/testing_out_stuff/chunk26/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout",), {}) >> TGRpcLdapAuthentication::LdapAuthWithInvalidPassword [GOOD] >> TGRpcLdapAuthentication::LdapAuthWithEmptyPassword |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/security/certificate_check/ut/unittest >> TCmsTenatsTest::TestTenantLimit [GOOD] >> TCmsTenatsTest::TestScheduledPermissionWithNonePolicy |91.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/olap/high_load/ydb-tests-olap-high_load |91.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/olap/high_load/ydb-tests-olap-high_load |91.6%| [LD] {RESULT} $(B)/ydb/tests/olap/high_load/ydb-tests-olap-high_load >> YdbYqlClient::TestDecimal1 [GOOD] >> YdbYqlClient::TestDecimal35 >> Cdc::InitialScanAndLimits [GOOD] >> Cdc::InitialScanComplete ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TTableProfileTests::DescribeTableOptions [GOOD] Test command err: 2025-05-07T08:59:37.985179Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625861487689012:2140];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:37.985239Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028a7/r3tmp/tmpAy17hf/pdisk_1.dat 2025-05-07T08:59:39.003281Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:59:39.111660Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:59:39.111756Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:59:39.122913Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:59:39.129734Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 30013, node 1 2025-05-07T08:59:39.495293Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:59:39.495317Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:59:39.495324Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:59:39.495427Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18602 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:59:40.169471Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... Trying to register node Register node result Status { Code: UNAUTHORIZED Reason: "Cannot authorize node. Access denied" } 2025-05-07T08:59:45.112606Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501625893036159716:2144];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:45.113480Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028a7/r3tmp/tmpzE4fBf/pdisk_1.dat 2025-05-07T08:59:45.519304Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:59:45.551207Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:59:45.551300Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:59:45.560039Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3396, node 4 2025-05-07T08:59:45.794756Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:59:45.794778Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:59:45.794787Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:59:45.794922Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26307 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:59:46.188588Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... Trying to register node Register node result Status { Code: OK } NodeId: 1024 DomainPath: "Root" Expire: 1746615585509248 Nodes { NodeId: 1024 Host: "localhost" Port: 25763 ResolveHost: "localhost" Address: "localhost" Location { DataCenter: "DataCenter" Rack: "Rack" Unit: "Body" } Expire: 1746615585509248 } Nodes { NodeId: 4 Host: "::1" Port: 12001 ResolveHost: "::1" Address: "::1" Location { DataCenterNum: 49 RoomNum: 1 RackNum: 1 BodyNum: 1 DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } } Nodes { NodeId: 5 Host: "::1" Port: 12002 ResolveHost: "::1" Address: "::1" Location { DataCenterNum: 50 RoomNum: 2 RackNum: 2 BodyNum: 2 DataCenter: "2" Module: "2" Rack: "2" Unit: "2" } } Nodes { NodeId: 6 Host: "::1" Port: 12003 ResolveHost: "::1" Address: "::1" Location { DataCenterNum: 51 RoomNum: 3 RackNum: 3 BodyNum: 3 DataCenter: "3" Module: "3" Rack: "3" Unit: "3" } } 2025-05-07T08:59:51.012695Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7501625920836656921:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:51.012757Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028a7/r3tmp/tmpmqGZwP/pdisk_1.dat 2025-05-07T08:59:51.542995Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:59:51.560864Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:59:51.561155Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:59:51.569461Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 61170, node 7 2025-05-07T08:59:51.714684Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:59:51.714711Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:59:51.714721Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:59:51.714883Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28987 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:59:52.227862Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... Trying to register node Register node result Status { Code: OK } NodeId: 1024 DomainPath: "Root" Expire: 1746615591489333 Nodes { NodeId: 1024 Host: "localhost" Port: 8191 ResolveHost: "localhost" Address: "localhost" Location { DataCenter: "DataCenter" Rack: "Rack" Unit: "Body" } Expire: 1746615591489333 } Nodes { NodeId: 7 Host: "::1" Port: 12001 ResolveHost: "::1" Address: "::1" Location { DataCenterNum: 49 RoomNum: 1 RackNum: 1 BodyNum: 1 DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } } Nodes { NodeId: 8 Host: "::1" Port: 12002 ResolveHost: "::1" Address: "::1" Location { DataCenterNum: 50 RoomNum: 2 RackNum: 2 BodyNum: 2 DataCenter: "2" Module: "2" Rack: "2" Unit: "2" } } Nodes { NodeId: 9 Host: "::1" Port: 12003 ResolveHost: "::1" Address: "::1" Location { DataCenterNum: 51 RoomNum: 3 RackNum: 3 BodyNum: 3 DataCenter: "3" Module: "3" Rack: "3" Unit: "3" } } 2025-05-07T08:59:57.715130Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7501625946171486972:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:57.715238Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028a ... oot@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:59:59.220608Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... TClient is connected to server localhost:24963 2025-05-07T09:00:00.032925Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:00.117124Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T09:00:01.141306Z node 12 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[12:7501625963225141647:2140];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:01.141884Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T09:00:01.233498Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:01.233589Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:01.243424Z node 10 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 12 Cookie 12 2025-05-07T09:00:01.243696Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:24963 2025-05-07T09:00:02.715627Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7501625946171486972:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:02.715733Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:00:05.142303Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 2025-05-07T09:00:06.141258Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[12:7501625963225141647:2140];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:06.141367Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=timeout; TClient is connected to server localhost:24963 TClient::Ls request: /Root/ydb_ut_tenant/table-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "table-1" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710660 CreateStep: 1746608405680 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table-1" Columns { Name: "Data" Type: "String" TypeId: 4097 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "KeyHash" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name... (TRUNCATED) 2025-05-07T09:00:06.606193Z node 10 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 12 2025-05-07T09:00:06.629413Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-05-07T09:00:07.978753Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [12:7501625988994946527:2369], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:00:07.978968Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:00:08.077088Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [12:7501625988994946527:2369], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:00:08.199556Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [12:7501625988994946527:2369], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:00:09.926199Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7501625997638041895:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:09.994260Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028a7/r3tmp/tmp2aKbwp/pdisk_1.dat 2025-05-07T09:00:10.592809Z node 13 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:10.784657Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:10.784771Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:10.800016Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 61685, node 13 2025-05-07T09:00:11.230633Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:11.230664Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:11.230674Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:11.230857Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26432 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:11.980286Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... TClient is connected to server localhost:26432 2025-05-07T09:00:12.606739Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:12.673668Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:13.195686Z node 15 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[15:7501626016018615692:2198];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:13.195769Z node 15 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T09:00:13.303606Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(15, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:13.303779Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(15, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:13.332035Z node 13 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 15 Cookie 15 2025-05-07T09:00:13.355307Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(15, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26432 2025-05-07T09:00:14.108115Z node 13 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 15 2025-05-07T09:00:14.108682Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(15, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-05-07T09:00:18.202669Z node 15 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[15:7501626016018615692:2198];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:18.202756Z node 15 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbTableBulkUpsert::ZeroRows [GOOD] Test command err: 2025-05-07T08:59:33.805537Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625841066306915:2142];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:33.805961Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028aa/r3tmp/tmpvTaPLD/pdisk_1.dat 2025-05-07T08:59:34.557649Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:59:34.557744Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:59:34.575866Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:59:34.582067Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25102, node 1 2025-05-07T08:59:34.752509Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:59:34.752536Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:59:34.752549Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:59:34.752717Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16424 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:59:35.186037Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:37.695003Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T08:59:38.814102Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501625841066306915:2142];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:38.814210Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; CLIENT_DEADLINE_EXCEEDED 2025-05-07T08:59:38.951661Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625862541146325:2440], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:38.951731Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625862541146334:2443], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:38.951790Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:38.956855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480 2025-05-07T08:59:38.990176Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501625862541146342:2444], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-05-07T08:59:39.093183Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501625866836113740:4179] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:59:39.609068Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710661. Ctx: { TraceId: 01jtmzhn25dqeg2f24zcpn0nf2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWE1Yjg2M2YtMmYxZWUwNTgtYTc5NmRjNmYtYjgyZjcxOTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:59:41.714242Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501625878553889730:2147];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:41.719005Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028aa/r3tmp/tmpUUSf7T/pdisk_1.dat 2025-05-07T08:59:41.957696Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:59:42.022391Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:59:42.022483Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:59:42.028293Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11645, node 4 2025-05-07T08:59:42.175143Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:59:42.175171Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:59:42.175178Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:59:42.175312Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25037 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:59:42.500036Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:44.967553Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T08:59:47.082024Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7501625902754292605:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:47.082128Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028aa/r3tmp/tmp1tuhS4/pdisk_1.dat 2025-05-07T08:59:47.296848Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10632, node 7 2025-05-07T08:59:47.386866Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:59:47.386950Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:59:47.422521Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:59:47.424272Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:59:47.424284Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:59:47.424293Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:59:47.424439Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23680 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:59:47.699339Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:50.603708Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T08:59:52.066905Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7501625902754292605:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:52.066985Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:00:02.278132Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7261: Cannot get console configs 2025-05-07T09:00:02.278170Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:06.346105Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7501625986832284429:2079];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:06.355137Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028aa/r3tmp/tmpQ6Zb9k/pdisk_1.dat 2025-05-07T09:00:06.645213Z node 10 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:06.688048Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:06.688542Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 7825, node 10 2025-05-07T09:00:06.698592Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:00:06.953797Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:06.953821Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:06.953829Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:06.954022Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29940 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:07.424957Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:11.066403Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T09:00:11.637745Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7501625986832284429:2079];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:11.712266Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 1 usec
: Error: Bulk upsert to table '/Root/ui32' longTx ydb://long-tx/read-only timed out, duration: 0 sec 2 usec
: Error: Bulk upsert to table '/Root/ui32' longTx ydb://long-tx/read-only timed out, duration: 0 sec 4 usec
: Error: Bulk upsert to table '/Root/ui32' longTx ydb://long-tx/read-only timed out, duration: 0 sec 8 usec
: Error: Bulk upsert to table '/Root/ui32' longTx ydb://long-tx/read-only timed out, duration: 0 sec 16 usec
: Error: Bulk upsert to table '/Root/ui32' longTx ydb://long-tx/read-only timed out, duration: 0 sec 32 usec
: Error: Bulk upsert to table '/Root/ui32' longTx ydb://long-tx/read-only timed out, duration: 0 sec 64 usec
: Error: Bulk upsert to table '/Root/ui32' longTx ydb://long-tx/read-only timed out, duration: 0 sec 128 usec
: Error: Bulk upsert to table '/Root/ui32' longTx ydb://long-tx/read-only timed out, duration: 0 sec 256 usec
: Error: Bulk upsert to table '/Root/ui32' longTx ydb://long-tx/read-only timed out, duration: 0 sec 512 usec
: Error: Bulk upsert to table '/Root/ui32' longTx ydb://long-tx/read-only timed out, duration: 0 sec 1024 usec
: Error: Bulk upsert to table '/Root/ui32' longTx ydb://long-tx/read-only timed out, duration: 0 sec 2048 usec
: Error: Bulk upsert to table '/Root/ui32' longTx ydb://long-tx/read-only timed out, duration: 0 sec 4096 usec
: Error: Bulk upsert to table '/Root/ui32' longTx ydb://long-tx/read-only timed out, duration: 0 sec 8192 usec
: Error: Bulk upsert to table '/Root/ui32' Deadline exceeded 16384 usec
: Error: Bulk upsert to table '/Root/ui32' longTx ydb://long-tx/read-only timed out, duration: 0 sec 32768 usec
: Error: Bulk upsert to table '/Root/ui32' longTx ydb://long-tx/read-only timed out, duration: 0 sec 65536 usec 2025-05-07T09:00:14.646243Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7501626017335692646:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:14.646346Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028aa/r3tmp/tmpYdFaQv/pdisk_1.dat 2025-05-07T09:00:14.871705Z node 13 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:14.957589Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:14.957703Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:14.963911Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25094, node 13 2025-05-07T09:00:15.146684Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:15.146712Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:15.146722Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:15.146871Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7570 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:15.479015Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:18.848481Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 >> YdbYqlClient::QueryLimits [GOOD] >> YdbYqlClient::QueryStats >> TCmsTest::RequestReplaceBrokenDevices >> YdbTableBulkUpsertOlap::UpsertCSV [GOOD] >> YdbTableBulkUpsertOlap::UpsertCSV_DataShard |91.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tablet_flat/benchmark/core_tablet_flat_benchmark |91.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tablet_flat/benchmark/core_tablet_flat_benchmark |91.6%| [LD] {RESULT} $(B)/ydb/core/tablet_flat/benchmark/core_tablet_flat_benchmark >> TCmsTenatsTest::TestScheduledPermissionWithNonePolicy [GOOD] >> TCmsTenatsTest::TestTenantLimitForceRestartMode >> TestKinesisHttpProxy::TestEmptyHttpBody [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut/unittest >> TFlatTableExecutor_ResourceProfile::TestExecutorPreserveTxData [GOOD] Test command err: 00000.000 II| FAKE_ENV: Born at 2025-05-07T08:55:01.511431Z 00000.010 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.011 II| FAKE_ENV: Starting storage for BS group 0 00000.011 II| FAKE_ENV: Starting storage for BS group 1 00000.011 II| FAKE_ENV: Starting storage for BS group 2 00000.012 II| FAKE_ENV: Starting storage for BS group 3 00000.074 C1| TABLET_EXECUTOR: Tablet 1 unhandled exception std::runtime_error: test ??+0 (0x11617AF1) __cxa_throw+221 (0x1161791D) NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Exceptions::TTxExecuteThrowException::Execute(NKikimr::NTabletFlatExecutor::TTransactionContext&, NActors::TActorContext const&)+62 (0x1092C13E) NKikimr::NTabletFlatExecutor::TExecutor::ExecuteTransaction(NKikimr::NTabletFlatExecutor::TSeat*)+3349 (0x177C3FB5) NKikimr::NTabletFlatExecutor::TExecutor::StateWork(TAutoPtr&)+504 (0x17788118) NActors::IActor::Receive(TAutoPtr&)+237 (0x12ABCD3D) 00000.075 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 2 actors 00000.075 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.075 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.075 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {62b, 2} 00000.075 II| FAKE_ENV: DS.1 gone, left {35b, 1}, put {35b, 1} 00000.075 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.075 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.075 II| FAKE_ENV: All BS storage groups are stopped 00000.075 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.075 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 1 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-05-07T08:55:01.595370Z 00000.024 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.025 II| FAKE_ENV: Starting storage for BS group 0 00000.025 II| FAKE_ENV: Starting storage for BS group 1 00000.025 II| FAKE_ENV: Starting storage for BS group 2 00000.025 II| FAKE_ENV: Starting storage for BS group 3 00000.038 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 2 actors 00000.038 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.038 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.038 II| FAKE_ENV: DS.0 gone, left {111b, 2}, put {131b, 3} 00000.038 II| FAKE_ENV: DS.1 gone, left {42b, 2}, put {42b, 2} 00000.038 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.039 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.039 II| FAKE_ENV: All BS storage groups are stopped 00000.039 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.039 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-05-07T08:55:01.644686Z 00000.016 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.016 II| FAKE_ENV: Starting storage for BS group 0 00000.017 II| FAKE_ENV: Starting storage for BS group 1 00000.017 II| FAKE_ENV: Starting storage for BS group 2 00000.017 II| FAKE_ENV: Starting storage for BS group 3 00000.040 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 4 actors 00000.041 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.041 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.041 II| FAKE_ENV: DS.0 gone, left {561b, 14}, put {623b, 16} 00000.041 II| FAKE_ENV: DS.1 gone, left {693b, 8}, put {693b, 8} 00000.041 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.041 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.041 II| FAKE_ENV: All BS storage groups are stopped 00000.041 II| FAKE_ENV: Model stopped, hosted 4 actors, spent 0.000s 00000.041 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-05-07T08:55:01.720754Z 00000.015 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.016 II| FAKE_ENV: Starting storage for BS group 0 00000.016 II| FAKE_ENV: Starting storage for BS group 1 00000.016 II| FAKE_ENV: Starting storage for BS group 2 00000.016 II| FAKE_ENV: Starting storage for BS group 3 00000.027 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 4 actors 00000.028 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.028 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.028 II| FAKE_ENV: DS.0 gone, left {141b, 4}, put {669b, 13} 00000.028 II| FAKE_ENV: DS.1 gone, left {868b, 8}, put {987b, 10} 00000.028 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.028 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.028 II| FAKE_ENV: All BS storage groups are stopped 00000.028 II| FAKE_ENV: Model stopped, hosted 5 actors, spent 0.000s 00000.028 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-05-07T08:55:01.754625Z 00000.008 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.008 II| FAKE_ENV: Starting storage for BS group 0 00000.008 II| FAKE_ENV: Starting storage for BS group 1 00000.008 II| FAKE_ENV: Starting storage for BS group 2 00000.009 II| FAKE_ENV: Starting storage for BS group 3 00000.010 II| TABLET_EXECUTOR: Leader{1:2:0} activating executor 00000.010 II| TABLET_EXECUTOR: LSnap{1:2, on 2:1, 35b, wait} done, Waste{2:0, 0b +(0, 0b), 0 trc} 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:2} commited cookie 2 for step 1 ... initializing schema 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema} queued, type NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema} hope 1 -> done Change{2, redo 0b alter 209b annex 0, ~{ } -{ }, 0 gb} 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema} release 4194304b of static, Memory{0 dyn 0} 00000.012 DD| TABLET_EXECUTOR: Leader{1:2:3} commited cookie 1 for step 2 ... inserting rows 00000.012 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NTabletFlatExecutor::TRowsModel::TTxAddRows} queued, type NKikimr::NTabletFlatExecutor::TRowsModel::TTxAddRows 00000.012 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NTabletFlatExecutor::TRowsModel::TTxAddRows} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.012 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NTabletFlatExecutor::TRowsModel::TTxAddRows} hope 1 -> done Change{2, redo 512b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.012 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NTabletFlatExecutor::TRowsModel::TTxAddRows} release 4194304b of static, Memory{0 dyn 0} 00000.013 DD| TABLET_EXECUTOR: Leader{1:2:4} commited cookie 1 for step 3 ... starting follower ... waiting for follower attach ... blocking NKikimr::TEvTablet::TEvNewFollowerAttached from TABLET_ACTOR to NKikimr::NTabletFlatExecutor::TTestFlatTablet cookie 0 ... waiting for follower attach (done) ... spamming QueueScan transactions 00000.013 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan 00000.013 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.014 II| TABLET_EXECUTOR: Leader{1:2:5} starting Scan{2 on 101, TEmptyScan{}} 00000.014 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} hope 1 -> done Change{3, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.014 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} release 4194304b of static, Memory{0 dyn 0} 00000.014 DD| TABLET_EXECUTOR: Leader{1:2:5} commited cookie 8 for step 4 00000.014 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan 00000.014 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.014 II| TABLET_EXECUTOR: Leader{1:2:6} starting Scan{4 on 101, TEmptyScan{}} 00000.015 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} hope 1 -> done Change{3, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.015 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} release 4194304b of static, Memory{0 dyn 0} 00000.015 DD| TABLET_EXECUTOR: Leader{1:2:6} commited cookie 8 for step 5 00000.015 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan 00000.015 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.015 II| TABLET_EXECUTOR: Leader{1:2:7} starting Scan{6 on 101, TEmptyScan{}} 00000.015 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} hope 1 -> done Change{3, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.015 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} release 4194304b of static, Memory{0 dyn 0} 00000.015 DD| TABLET_EXECUTOR: Leader{1:2:7} commited cookie 8 for step 6 00000.016 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan 00000.016 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.016 II| TABLET_EXECUTOR: Leader{1:2:8} starting Scan{8 on 101, TEmptyScan{}} 00000.016 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{6, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} hope 1 -> done Change{3, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.016 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{6, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} release 4194304b of static, Memory{0 dyn 0} 00000.016 DD| TABLET_EXECUTOR: Leader{1:2:8} commited cookie 8 for step 7 00000.016 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan 00000.016 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.017 II| TABLET_EXECUTOR: Leader{1:2:9} starting Scan{10 on 101, TEmptyScan{}} 00000.017 DD| TABLET_EXECUTOR: Leader{1:2:9} Tx{7, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} hope 1 -> done Change{3, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.044 DD| TABLET_ ... ange{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00001.017 DD| TABLET_EXECUTOR: Leader{1:2:4} found attached Res{10 20480b} 00001.017 DD| TABLET_EXECUTOR: release 10240b of static tx data due to attached res 10, Memory{0 dyn 20480} 00001.017 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{24, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} touch new 0b, 0b lo load (0b in total), 524267520b requested for data (524288000b in total) 00001.017 EE| TABLET_EXECUTOR: Leader{1:2:4} Tx{24, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} mem 524288000b terminated, limit 314572800b is exceeded 00001.018 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{24, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release Res{10 20480b}, Memory{0 dyn 0} 00001.018 DD| RESOURCE_BROKER: Update cookie for task Tx{23, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (10 by [56:30:2062]) 00001.018 DD| RESOURCE_BROKER: Finish task Tx{23, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (10 by [56:30:2062]) (release resources {0, 20480}) 00001.018 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 0.001311 to 0.000000 (remove task Tx{23, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (10 by [56:30:2062])) 00001.018 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory 00001.018 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} took 1024b of static mem, Memory{1024 dyn 0} 00001.019 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 1 -> retry Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00001.019 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} touch new 0b, 0b lo load (0b in total), 19456b requested for data (20480b in total) 00001.019 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release 1024b of static, Memory{0 dyn 0} 00001.019 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release tx data 00001.019 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} request Res{11 20480b} type small_transaction 00001.019 DD| RESOURCE_BROKER: Submitted new unknown task Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062]) priority=5 resources={0, 20480} 00001.019 EE| RESOURCE_BROKER: Assigning waiting task 'Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062])' of unknown type 'small_transaction' to default queue 00001.019 DD| RESOURCE_BROKER: Allocate resources {0, 20480} for task Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062]) from queue queue_default 00001.019 EE| RESOURCE_BROKER: Assigning in-fly task 'Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062])' of unknown type 'small_transaction' to default queue 00001.019 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 0.000000 to 0.001192 (insert task Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062])) 00001.019 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} acquired dyn mem Res{11 20480b}, Memory{0 dyn 20480} 00001.118 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 2 -> done Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00001.119 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} update resource task 11 releasing 0b, Memory{0 dyn 20480} 00001.119 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} captured Res{11 20480b} 00001.119 DD| RESOURCE_BROKER: Update task Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062]) (priority=5 type=small_transaction resources={0, 20480} resubmit=0) 00001.119 EE| RESOURCE_BROKER: Assigning in-fly task 'Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062])' of unknown type 'small_transaction' to default queue 00001.119 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 0.000000 to 0.001192 (insert task Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062])) 00001.119 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory 00001.119 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} took 1024b of static mem, Memory{1024 dyn 20480} 00001.119 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 1 -> retry Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00001.119 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} touch new 0b, 0b lo load (0b in total), 19456b requested for data (20480b in total) 00001.119 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release 1024b of static, Memory{0 dyn 20480} 00001.119 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release tx data 00001.119 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} request Res{12 20480b} type small_transaction 00001.119 DD| RESOURCE_BROKER: Submitted new unknown task Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062]) priority=5 resources={0, 20480} 00001.119 EE| RESOURCE_BROKER: Assigning waiting task 'Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062])' of unknown type 'small_transaction' to default queue 00001.120 DD| RESOURCE_BROKER: Allocate resources {0, 20480} for task Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062]) from queue queue_default 00001.120 EE| RESOURCE_BROKER: Assigning in-fly task 'Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062])' of unknown type 'small_transaction' to default queue 00001.120 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 0.001192 to 0.002384 (insert task Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062])) 00001.120 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} acquired dyn mem Res{12 20480b}, Memory{0 dyn 40960} 00001.120 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 2 -> retry Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00001.120 DD| TABLET_EXECUTOR: Leader{1:2:4} found attached Res{11 20480b} 00001.120 DD| TABLET_EXECUTOR: Leader{1:2:4} moving tx data from attached Res{11 20480b} to Res{12 ...} 00001.120 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} touch new 0b, 0b lo load (0b in total), 524267520b requested for data (524288000b in total) 00001.120 EE| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} mem 524288000b terminated, limit 314572800b is exceeded 00001.120 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release Res{12 40960b}, Memory{0 dyn 0} 00001.120 DD| RESOURCE_BROKER: Update task Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062]) (priority=5 type=small_transaction resources={0, 40960} resubmit=0) 00001.120 EE| RESOURCE_BROKER: Assigning in-fly task 'Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062])' of unknown type 'small_transaction' to default queue 00001.120 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 0.001192 to 0.003576 (insert task Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062])) 00001.120 DD| RESOURCE_BROKER: Finish task Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062]) (release resources {0, 20480}) 00001.120 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 0.003576 to 0.002384 (remove task Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062])) 00001.120 DD| RESOURCE_BROKER: Finish task Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062]) (release resources {0, 40960}) 00001.120 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 0.002384 to 0.000000 (remove task Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062])) 00001.121 II| TABLET_EXECUTOR: Leader{1:2:4} suiciding, Waste{2:0, 317b +(0, 0b), 3 trc, -0b acc} 00001.123 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 2 actors 00001.123 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00001.123 II| FAKE_ENV: Shut order, stopping 4 BS groups 00001.123 II| FAKE_ENV: DS.0 gone, left {180b, 3}, put {200b, 4} 00001.124 II| FAKE_ENV: DS.1 gone, left {352b, 3}, put {352b, 3} 00001.124 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00001.124 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00001.124 II| FAKE_ENV: All BS storage groups are stopped 00001.124 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00001.124 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 45 Left 401}, stopped >> YdbYqlClient::TestDescribeTableWithShardStats [GOOD] >> YdbYqlClient::TestExplicitPartitioning |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientWithCorrectCerts [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientProvidesEmptyClientCerts >> TRegisterNodeOverDiscoveryService::ServerWithOutCertVerification_ClientProvidesExpiredCert [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithoutCertVerification_ClientDoesNotProvideClientCerts ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_change_exchange/unittest >> Cdc::AwsRegion [GOOD] Test command err: 2025-05-07T08:57:21.495429Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:57:21.495654Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:57:21.496046Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003122/r3tmp/tmpfIliVt/pdisk_1.dat 2025-05-07T08:57:21.949098Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:57:22.001764Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:57:22.065911Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:22.066089Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:22.078281Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:57:22.180501Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T08:57:22.224561Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:665:2569] 2025-05-07T08:57:22.224869Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T08:57:22.280069Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T08:57:22.280216Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T08:57:22.282122Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-05-07T08:57:22.282221Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-05-07T08:57:22.282284Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-05-07T08:57:22.282716Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T08:57:22.282898Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T08:57:22.283008Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:681:2569] in generation 1 2025-05-07T08:57:22.294553Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T08:57:22.339417Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-05-07T08:57:22.339685Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T08:57:22.339823Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:683:2579] 2025-05-07T08:57:22.339864Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T08:57:22.339906Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-05-07T08:57:22.339946Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:57:22.340454Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T08:57:22.340565Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T08:57:22.340670Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T08:57:22.340740Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:57:22.340785Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T08:57:22.340847Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:57:22.340965Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:672:2573], sessionId# [0:0:0] 2025-05-07T08:57:22.341536Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T08:57:22.341811Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-05-07T08:57:22.341912Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-05-07T08:57:22.343923Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:57:22.354851Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T08:57:22.354996Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-05-07T08:57:22.536222Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:697:2587], serverId# [1:699:2589], sessionId# [0:0:0] 2025-05-07T08:57:22.542432Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-05-07T08:57:22.542542Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:57:22.542848Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T08:57:22.542903Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-05-07T08:57:22.542986Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-05-07T08:57:22.543219Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-05-07T08:57:22.543358Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-05-07T08:57:22.543932Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T08:57:22.544002Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-05-07T08:57:22.546447Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-05-07T08:57:22.547007Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:57:22.549253Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-05-07T08:57:22.549329Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:57:22.552917Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-05-07T08:57:22.553025Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:57:22.554561Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:57:22.554619Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T08:57:22.554706Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-05-07T08:57:22.554792Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:410:2405], exec latency: 0 ms, propose latency: 0 ms 2025-05-07T08:57:22.554865Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-05-07T08:57:22.554979Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:57:22.556608Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender.cpp:153: [ChangeSender][72075186224037888:1][1:683:2579][Inactive] Handle NKikimrChangeExchange.TEvActivateSender 2025-05-07T08:57:22.561765Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:57:22.563816Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-05-07T08:57:22.564008Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-05-07T08:57:22.564095Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-05-07T08:57:26.892583Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:311:2354], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T08:57:26.892852Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T08:57:26.892962Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003122/r3tmp/tmpykgXqO/pdisk_1.dat 2025-05-07T08:57:27.260348Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp ... Partition: 0, State: StateIdle] === DumpKeyValueRequest === 2025-05-07T09:00:18.616668Z node 21 :PERSQUEUE DEBUG: partition.cpp:2183: [PQ: 72075186224037889, Partition: 0, State: StateIdle] --- delete ---------------- 2025-05-07T09:00:18.616713Z node 21 :PERSQUEUE DEBUG: partition.cpp:2189: [PQ: 72075186224037889, Partition: 0, State: StateIdle] [x0000000000, x0000000001) 2025-05-07T09:00:18.616756Z node 21 :PERSQUEUE DEBUG: partition.cpp:2191: [PQ: 72075186224037889, Partition: 0, State: StateIdle] --- write ----------------- 2025-05-07T09:00:18.616797Z node 21 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037889, Partition: 0, State: StateIdle] m0000000000p72075186224037888 2025-05-07T09:00:18.616834Z node 21 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037889, Partition: 0, State: StateIdle] d0000000000_00000000000000000000_00000_0000000001_00000| 2025-05-07T09:00:18.616866Z node 21 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037889, Partition: 0, State: StateIdle] i0000000000 2025-05-07T09:00:18.616905Z node 21 :PERSQUEUE DEBUG: partition.cpp:2196: [PQ: 72075186224037889, Partition: 0, State: StateIdle] --- rename ---------------- 2025-05-07T09:00:18.616948Z node 21 :PERSQUEUE DEBUG: partition.cpp:2201: [PQ: 72075186224037889, Partition: 0, State: StateIdle] =========================== 2025-05-07T09:00:18.617189Z node 21 :PERSQUEUE DEBUG: read.h:262: CacheProxy. Passthrough write request to KV 2025-05-07T09:00:18.617293Z node 21 :PERSQUEUE DEBUG: read.h:300: CacheProxy. Passthrough blob. Partition 0 offset 0 partNo 0 count 1 size 427 2025-05-07T09:00:18.619223Z node 21 :PERSQUEUE DEBUG: cache_eviction.h:315: Caching head blob in L1. Partition 0 offset 0 count 1 size 426 actorID [21:962:2758] 2025-05-07T09:00:18.619709Z node 21 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037891' partition 0 offset 0 partno 0 count 1 parts 0 size 426 2025-05-07T09:00:18.620173Z node 21 :PERSQUEUE DEBUG: cache_eviction.h:315: Caching head blob in L1. Partition 0 offset 0 count 1 size 427 actorID [21:790:2654] 2025-05-07T09:00:18.620318Z node 21 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037889' partition 0 offset 0 partno 0 count 1 parts 0 size 427 >>>>> GetRecords path=/Root/Table/Stream1 partitionId=0 2025-05-07T09:00:18.622207Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:342: Handle TEvRequest topic: 'streamImpl' requestId: 2025-05-07T09:00:18.622367Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:2788: [PQ: 72075186224037889] got client message batch for topic 'Table/Stream1/streamImpl' partition 0 2025-05-07T09:00:18.623536Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:731: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 0 Topic 'Table/Stream1/streamImpl' partition 0 user $without_consumer offset 0 count 10000 size 26214400 endOffset 0 max time lag 0ms effective offset 0 2025-05-07T09:00:18.623645Z node 21 :PERSQUEUE DEBUG: subscriber.cpp:68: waiting read cookie 0 partition 0 user $without_consumer offset 0 count 10000 size 26214400 timeout 0 2025-05-07T09:00:18.623828Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:420: FormAnswer for 0 blobs 2025-05-07T09:00:18.623970Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:600: [PQ: 72075186224037889, Partition: 0, State: StateIdle] waiting read cookie 0 partition 0 read timeout for $without_consumer offset 0 2025-05-07T09:00:18.624139Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:377: Answer ok topic: 'streamImpl' partition: 0 messageNo: 0 requestId: cookie: 0 2025-05-07T09:00:18.636506Z node 21 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72075186224037891, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 341 WriteNewSizeFromSupportivePartitions# 0 2025-05-07T09:00:18.636794Z node 21 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037891, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-05-07T09:00:18.637018Z node 21 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037891, Partition: 0, State: StateIdle] Answering for message sourceid: '\00072075186224037888', Topic: 'Table/Stream2/streamImpl', Partition: 0, SeqNo: 2, partNo: 0, Offset: 0 is stored on disk 2025-05-07T09:00:18.637627Z node 21 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72075186224037889, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 342 WriteNewSizeFromSupportivePartitions# 0 2025-05-07T09:00:18.637708Z node 21 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037889, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-05-07T09:00:18.637780Z node 21 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Answering for message sourceid: '\00072075186224037888', Topic: 'Table/Stream1/streamImpl', Partition: 0, SeqNo: 1, partNo: 0, Offset: 0 is stored on disk 2025-05-07T09:00:18.638029Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:774: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream1/streamImpl' partition 0 user $without_consumer readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 1 2025-05-07T09:00:18.638187Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:816: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream1/streamImpl' partition 0 user $without_consumer send read request for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 1 rrg 1 2025-05-07T09:00:18.638407Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:377: Answer ok topic: 'streamImpl' partition: 0 messageNo: 1 requestId: cookie: 1 2025-05-07T09:00:18.638722Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:377: Answer ok topic: 'streamImpl' partition: 0 messageNo: 1 requestId: cookie: 1 2025-05-07T09:00:18.638892Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:731: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 1 Topic 'Table/Stream1/streamImpl' partition 0 user $without_consumer offset 0 count 1 size 1024000 endOffset 1 max time lag 0ms effective offset 0 2025-05-07T09:00:18.639054Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:931: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 1 added 0 blobs, size 0 count 0 last offset 0, current partition end offset: 1 2025-05-07T09:00:18.639231Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:948: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Reading cookie 1. All data is from uncompacted head. 2025-05-07T09:00:18.639352Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:420: FormAnswer for 0 blobs 2025-05-07T09:00:18.639881Z node 21 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:160: [CdcChangeSenderPartition][72075186224037888:1][0][72075186224037889][21:1162:2704] Handle NKikimrClient.TResponse { SessionId: TxId: Success { Response: Status: 1 ErrorCode: OK PartitionResponse { CmdWriteResult { AlreadyWritten: false SourceId: "\00072075186224037888" SeqNo: 1 Offset: 0 WriteTimestampMS: 2535 PartitionQuotedTimeMs: 0 TotalTimeInPartitionQueueMs: 0 WriteTimeMs: 0 TopicQuotedTimeMs: 0 WrittenInTx: false } Cookie: 1 } } } 2025-05-07T09:00:18.640168Z node 21 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:643: [CdcChangeSenderMain][72075186224037888:1][21:866:2704] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 0 } 2025-05-07T09:00:18.640429Z node 21 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:160: [CdcChangeSenderPartition][72075186224037888:1][0][72075186224037891][21:1163:2802] Handle NKikimrClient.TResponse { SessionId: TxId: Success { Response: Status: 1 ErrorCode: OK PartitionResponse { CmdWriteResult { AlreadyWritten: false SourceId: "\00072075186224037888" SeqNo: 2 Offset: 0 WriteTimestampMS: 2535 PartitionQuotedTimeMs: 0 TotalTimeInPartitionQueueMs: 0 WriteTimeMs: 0 TopicQuotedTimeMs: 0 WrittenInTx: false } Cookie: 1 } } } 2025-05-07T09:00:18.640574Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:856: Topic 'Table/Stream1/streamImpl' partition 0 user $without_consumer readTimeStamp done, result 2535 queuesize 0 startOffset 0 2025-05-07T09:00:18.640864Z node 21 :TX_DATASHARD INFO: datashard_change_sending.cpp:310: TTxRemoveChangeRecords Execute: records# 1, at tablet# 72075186224037888 2025-05-07T09:00:18.640971Z node 21 :TX_DATASHARD DEBUG: datashard.cpp:1087: RemoveChangeRecord: order: 1, at tablet: 72075186224037888 2025-05-07T09:00:18.641456Z node 21 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:643: [CdcChangeSenderMain][72075186224037888:1][21:1020:2802] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 0 } 2025-05-07T09:00:18.652550Z node 21 :TX_DATASHARD INFO: datashard_change_sending.cpp:335: TTxRemoveChangeRecords Complete: removed# 1, left# 1, at tablet# 72075186224037888 2025-05-07T09:00:18.652790Z node 21 :TX_DATASHARD INFO: datashard_change_sending.cpp:310: TTxRemoveChangeRecords Execute: records# 1, at tablet# 72075186224037888 2025-05-07T09:00:18.652875Z node 21 :TX_DATASHARD DEBUG: datashard.cpp:1087: RemoveChangeRecord: order: 2, at tablet: 72075186224037888 2025-05-07T09:00:18.667891Z node 21 :TX_DATASHARD INFO: datashard_change_sending.cpp:335: TTxRemoveChangeRecords Complete: removed# 1, left# 0, at tablet# 72075186224037888 >>>>> GetRecords path=/Root/Table/Stream1 partitionId=0 2025-05-07T09:00:18.993785Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:342: Handle TEvRequest topic: 'streamImpl' requestId: 2025-05-07T09:00:18.993877Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:2788: [PQ: 72075186224037889] got client message batch for topic 'Table/Stream1/streamImpl' partition 0 2025-05-07T09:00:18.994102Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:731: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 2 Topic 'Table/Stream1/streamImpl' partition 0 user $without_consumer offset 0 count 10000 size 26214400 endOffset 1 max time lag 0ms effective offset 0 2025-05-07T09:00:18.994214Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:931: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 2 added 0 blobs, size 0 count 0 last offset 0, current partition end offset: 1 2025-05-07T09:00:18.994386Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:948: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Reading cookie 2. All data is from uncompacted head. 2025-05-07T09:00:18.994502Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:420: FormAnswer for 0 blobs 2025-05-07T09:00:18.995409Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:377: Answer ok topic: 'streamImpl' partition: 0 messageNo: 0 requestId: cookie: 0 >>>>> GetRecords path=/Root/Table/Stream2 partitionId=0 2025-05-07T09:00:18.997382Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:342: Handle TEvRequest topic: 'streamImpl' requestId: 2025-05-07T09:00:18.997518Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:2788: [PQ: 72075186224037891] got client message batch for topic 'Table/Stream2/streamImpl' partition 0 2025-05-07T09:00:19.007024Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:731: [PQ: 72075186224037891, Partition: 0, State: StateIdle] read cookie 0 Topic 'Table/Stream2/streamImpl' partition 0 user $without_consumer offset 0 count 10000 size 26214400 endOffset 1 max time lag 0ms effective offset 0 2025-05-07T09:00:19.007199Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:931: [PQ: 72075186224037891, Partition: 0, State: StateIdle] read cookie 0 added 0 blobs, size 0 count 0 last offset 0, current partition end offset: 1 2025-05-07T09:00:19.007384Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:948: [PQ: 72075186224037891, Partition: 0, State: StateIdle] Reading cookie 0. All data is from uncompacted head. 2025-05-07T09:00:19.007501Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:420: FormAnswer for 0 blobs 2025-05-07T09:00:19.008444Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:377: Answer ok topic: 'streamImpl' partition: 0 messageNo: 0 requestId: cookie: 0 >> TCmsTest::RequestReplaceBrokenDevices [GOOD] >> TCmsTest::PermissionDuration >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_0__ASYNC-pk_types6-all_types6-index6---ASYNC] >> TCmsTenatsTest::TestTenantLimitForceRestartMode [GOOD] >> TCmsTenatsTest::TestTenantLimitForceRestartModeScheduled >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Date-pk_types13-all_types13-index13-Date--] >> TopicAutoscaling::BalancingAfterSplit_sessionsWithPartition [GOOD] >> TPersQueueMirrorer::ValidStartStream >> TYqlDateTimeTests::SimpleUpsertSelect [GOOD] >> TYqlDateTimeTests::DatetimeKey |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_DyNumber-pk_types8-all_types8-index8-DyNumber--] [GOOD] |91.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_stats/ydb-core-tx-datashard-ut_stats |91.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_stats/ydb-core-tx-datashard-ut_stats |91.6%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_stats/ydb-core-tx-datashard-ut_stats >> TCmsTest::PermissionDuration [GOOD] >> TCmsTest::RacyStartCollecting >> TGRpcYdbTest::MakeListRemoveDirectory ------- [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/inside_ydb_ut/unittest >> TestKinesisHttpProxy::TestEmptyHttpBody [GOOD] Test command err: 2025-05-07T08:58:58.842603Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625691333783097:2203];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:58:58.856680Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001728/r3tmp/tmp5KqgS1/pdisk_1.dat 2025-05-07T08:58:59.599623Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:58:59.599819Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:58:59.603227Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:58:59.682990Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19633, node 1 2025-05-07T08:58:59.938435Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:58:59.938455Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:58:59.938462Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:58:59.938578Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64130 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:59:00.374736Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... TClient is connected to server localhost:64130 2025-05-07T08:59:00.670784Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:00.677021Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-05-07T08:59:00.678600Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:00.697283Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:00.849660Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:59:00.942929Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710663, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:00.947808Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:01.031942Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:01.077859Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:01.129225Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:01.173864Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:01.224680Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:01.285856Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:01.331080Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:02.878564Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625708513653526:2375], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:02.878564Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625708513653534:2378], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:02.878658Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:02.883248Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710673:3, at schemeshard: 72057594046644480 2025-05-07T08:59:02.900506Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501625708513653540:2379], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710673 completed, doublechecking } 2025-05-07T08:59:02.996229Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501625708513653593:2866] txid# 281474976710674, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 18], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:59:03.375022Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710675. Ctx: { TraceId: 01jtmzghtvdftppy8hd17cffr1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmY3ZDhjNGItN2FkYmRhMzUtMjY1Njk1ZDAtOGIzNTdiMDY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:59:03.428378Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:03.480633Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:03.540862Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710678:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:03.588898Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710679:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:03.630900Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710680:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:03.690363Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710681:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:03.781426Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710682:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:03.819779Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710683:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:03.837994Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501625691333783097:2203];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:03.838099Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:59:03.856799Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710684:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:03.969358Z node 1 :HTTP INFO: http_proxy_acceptor.cpp:88: Listening on http://127.0.0.1:8175 ... } } Struct { Optional { Bool: false } } } } } 2025-05-07T09:00:25.338263Z node 8 :SQS DEBUG: executor.cpp:287: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] Attempt 1 execution duration: 41ms 2025-05-07T09:00:25.338598Z node 8 :SQS TRACE: executor.cpp:325: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] Sending mkql execution result: { Status: 48 TxId: 281474976710685 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "settings" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Name" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Value" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-05-07T09:00:25.338612Z node 8 :SQS TRACE: executor.cpp:286: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] HandleResponse { Status: 48 TxId: 281474976710686 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "queues" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "CreatedTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "CustomQueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "FolderId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "MasterTabletId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "QueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "QueueState" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "Shards" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "TablesFormat" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 2 } } } } } Member { Name: "Version" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-05-07T09:00:25.338636Z node 8 :SQS DEBUG: executor.cpp:287: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] Attempt 1 execution duration: 35ms 2025-05-07T09:00:25.338651Z node 8 :SQS TRACE: executor.cpp:327: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] Minikql data response: {"settings": [], "truncated": false} 2025-05-07T09:00:25.338768Z node 8 :SQS DEBUG: executor.cpp:401: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] execution duration: 41ms 2025-05-07T09:00:25.339110Z node 8 :SQS TRACE: executor.cpp:325: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] Sending mkql execution result: { Status: 48 TxId: 281474976710686 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "queues" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "CreatedTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "CustomQueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "FolderId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "MasterTabletId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "QueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "QueueState" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "Shards" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "TablesFormat" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 2 } } } } } Member { Name: "Version" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-05-07T09:00:25.339142Z node 8 :SQS TRACE: executor.cpp:327: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] Minikql data response: {"queues": [], "truncated": false} 2025-05-07T09:00:25.339145Z node 8 :SQS TRACE: user_settings_reader.cpp:89: Handle user settings: { Status: 48 TxId: 281474976710685 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "settings" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Name" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Value" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-05-07T09:00:25.339284Z node 8 :SQS DEBUG: executor.cpp:401: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] execution duration: 41ms 2025-05-07T09:00:25.339870Z node 8 :SQS TRACE: queues_list_reader.cpp:82: Handle queues list: { Status: 48 TxId: 281474976710686 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "queues" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "CreatedTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "CustomQueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "FolderId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "MasterTabletId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "QueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "QueueState" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "Shards" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "TablesFormat" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 2 } } } } } Member { Name: "Version" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-05-07T09:00:26.061325Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:552: [WorkloadService] [Service] Reply cleanup error NOT_FOUND to [8:7501626066218166431:2466]: Pool not found 2025-05-07T09:00:26.063957Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:552: [WorkloadService] [Service] Reply cleanup error NOT_FOUND to [8:7501626066218166427:2463]: Pool not found 2025-05-07T09:00:26.064984Z node 8 :SQS DEBUG: monitoring.cpp:60: [monitoring] Report deletion queue data lag: 0.000000s, count: 0 2025-05-07T09:00:26.065086Z node 8 :SQS DEBUG: cleanup_queue_data.cpp:100: [cleanup removed queues] getting queues... 2025-05-07T09:00:26.069874Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7501626070513133849:2488], DatabaseId: /Root/SQS, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:26.070003Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:602: [WorkloadService] [TDatabaseFetcherActor] ActorId: [8:7501626070513133850:2489], Database: /Root/SQS, Failed to fetch database info, UNSUPPORTED, issues: {
: Error: Invalid database path /Root/SQS, please check the correctness of the path } 2025-05-07T09:00:26.070069Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/SQS, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:26.202423Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:83: (#37,[::1]:53804) incoming connection opened 2025-05-07T09:00:26.202511Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:145: (#37,[::1]:53804) -> (POST /Root) 2025-05-07T09:00:26.202711Z node 8 :HTTP_PROXY INFO: http_service.cpp:102: proxy service: incoming request from [5873:501:6050:0:4073:501:6050:0] request [CreateStream] url [/Root] database [/Root] requestId: f2e7adba-4cc11a06-739aedd-da119971 2025-05-07T09:00:26.203383Z node 8 :HTTP_PROXY INFO: http_req.cpp:1211: http request [CreateStream] requestId [f2e7adba-4cc11a06-739aedd-da119971] reply with status: BAD_REQUEST message: ydb/core/http_proxy/json_proto_conversion.h:395: Top level of json value is not a map 2025-05-07T09:00:26.203522Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:243: (#37,[::1]:53804) <- (400 MissingParameter) 2025-05-07T09:00:26.203599Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:252: (#37,[::1]:53804) Request: POST /Root HTTP/1.1 Host: example.amazonaws.com X-Amz-Target: kinesisApi.CreateStream X-Amz-Date: 20150830T123600Z Authorization: Content-Type: application/json Connection: Close Transfer-Encoding: chunked 4 null 0 Http output full {"__type":"MissingParameter","message":"ydb/core/http_proxy/json_proto_conversion.h:395: Top level of json value is not a map"} 2025-05-07T09:00:26.203640Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:259: (#37,[::1]:53804) Response: HTTP/1.1 400 MissingParameter Connection: close x-amzn-requestid: f2e7adba-4cc11a06-739aedd-da119971 x-amz-crc32: 851558042 Content-Type: application/x-amz-json-1.1 Content-Length: 127 {"__type":"MissingParameter","message":"ydb/core/http_proxy/json_proto_conversion.h:395: Top level of json value is not a map"} 2025-05-07T09:00:26.203770Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:296: (#37,[::1]:53804) connection closed |91.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_column_stats/ydb-core-tx-datashard-ut_column_stats |91.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_column_stats/ydb-core-tx-datashard-ut_column_stats |91.6%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_column_stats/ydb-core-tx-datashard-ut_column_stats >> TCmsTenatsTest::TestTenantLimitForceRestartModeScheduled [GOOD] |91.6%| [TA] $(B)/ydb/core/tablet_flat/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> Cdc::InitialScan_TopicAutoPartitioning [GOOD] >> Cdc::InitialScanUpdatedRows >> TGRpcLdapAuthentication::LdapAuthWithEmptyPassword [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithoutCertVerification_ClientProvidesEmptyClientCerts [GOOD] >> TRegisterNodeOverLegacyService::ServerWithCertVerification_ClientWithCorrectCerts >> WithSDK::DescribeConsumer [GOOD] |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTenatsTest::TestTenantLimitForceRestartModeScheduled [GOOD] |91.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_sequence_reboots/ydb-core-tx-schemeshard-ut_sequence_reboots |91.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_sequence_reboots/ydb-core-tx-schemeshard-ut_sequence_reboots >> TopicAutoscaling::PartitionSplit_ManySession_PQv1 [GOOD] >> TopicAutoscaling::PartitionSplit_ManySession_existed_AutoscaleAwareSDK >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Uint64-pk_types10-all_types10-index10-Uint64--] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcLdapAuthentication::LdapAuthWithEmptyPassword [GOOD] Test command err: 2025-05-07T08:59:49.003273Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625906422900020:2210];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:49.003560Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028a3/r3tmp/tmpjCbMiO/pdisk_1.dat 2025-05-07T08:59:49.970669Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:59:49.981450Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:59:49.981577Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:59:49.997133Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 63881, node 1 2025-05-07T08:59:50.462660Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:59:50.462681Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:59:50.462688Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:59:50.462802Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31547 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:59:51.071456Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:56.731366Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501625940675116441:2220];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:56.731497Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028a3/r3tmp/tmpfE8fti/pdisk_1.dat 2025-05-07T08:59:57.032981Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:59:57.076834Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:59:57.076916Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:59:57.091273Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4748, node 4 2025-05-07T08:59:57.349257Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:59:57.362076Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:59:57.362099Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:59:57.362313Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10311 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:59:57.872202Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:04.210166Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7501625975893836065:2210];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:04.210689Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028a3/r3tmp/tmpRimThb/pdisk_1.dat 2025-05-07T09:00:04.791151Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:04.880541Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:04.898284Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:04.920579Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11501, node 7 2025-05-07T09:00:05.259492Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:05.259517Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:05.259524Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:05.259663Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17412 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:05.953789Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:13.145375Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7501626014917061720:2082];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:13.190290Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028a3/r3tmp/tmpzkYAm0/pdisk_1.dat 2025-05-07T09:00:13.594393Z node 10 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:13.674840Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:13.674933Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:13.683721Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6847, node 10 2025-05-07T09:00:14.088175Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:14.088200Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:14.088213Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:14.088376Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7435 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:14.629596Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:23.572150Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7501626059956963562:2192];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:23.572229Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028a3/r3tmp/tmpabimUH/pdisk_1.dat 2025-05-07T09:00:24.127940Z node 13 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:24.202578Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:24.202696Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:24.209915Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9194, node 13 2025-05-07T09:00:24.527482Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:24.527512Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:24.527520Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:24.527714Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10040 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-05-07T09:00:25.317137Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 |91.6%| [TA] {RESULT} $(B)/ydb/core/tablet_flat/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> YdbYqlClient::TestDecimal35 [GOOD] >> YdbYqlClient::TestDecimalFullStack |91.6%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_sequence_reboots/ydb-core-tx-schemeshard-ut_sequence_reboots |91.6%| [TA] $(B)/ydb/core/http_proxy/ut/inside_ydb_ut/test-results/unittest/{meta.json ... results_accumulator.log} >> CommitOffset::DistributedTxCommit_ChildFirst [GOOD] >> CommitOffset::DistributedTxCommit_CheckSessionResetAfterCommit >> TCmsTest::RacyStartCollecting [GOOD] >> TCmsTest::PriorityRange >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_2__SYNC-pk_types2-all_types2-index2---SYNC] >> YdbYqlClient::TestTzTypesFullStack >> TopicAutoscaling::PartitionSplit_ReadNotEmptyPartitions_PQv1 [GOOD] >> TopicAutoscaling::PartitionSplit_ReadNotEmptyPartitions_AutoscaleAwareSDK >> TopicAutoscaling::ReadingAfterSplitTest_PQv1 [GOOD] >> TopicAutoscaling::ReadingAfterSplitTest_PreferedPartition_AutoscaleAwareSDK >> YdbTableBulkUpsert::Nulls >> TGRpcClientLowTest::GrpcRequestProxy >> TGRpcYdbTest::ExecuteQueryBadRequest >> Cdc::InitialScanComplete [GOOD] >> Cdc::InitialScanEnqueuesZeroRecords >> TPersQueueTest::PartitionsMapping [GOOD] >> TPersQueueTest::MessageMetadata >> TCmsTest::PriorityRange [GOOD] |91.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tools/query_replay/ydb_query_replay |91.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tools/query_replay/ydb_query_replay >> YdbTableBulkUpsertOlap::UpsertCSV_DataShard [GOOD] >> YdbTableBulkUpsertOlap::UpsertMixed ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/ut_with_sdk/unittest >> WithSDK::DescribeConsumer [GOOD] Test command err: 2025-05-07T08:58:00.193889Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625445418086405:2067];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:58:00.193930Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:58:00.404357Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004027/r3tmp/tmpOxTKYZ/pdisk_1.dat 2025-05-07T08:58:00.666477Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:00.680090Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:58:00.680189Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:58:00.681677Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7193, node 1 2025-05-07T08:58:00.751561Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/zvgn/004027/r3tmp/yandex1fHqvd.tmp 2025-05-07T08:58:00.751590Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/zvgn/004027/r3tmp/yandex1fHqvd.tmp 2025-05-07T08:58:00.751794Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/zvgn/004027/r3tmp/yandex1fHqvd.tmp 2025-05-07T08:58:00.751929Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:58:00.804245Z INFO: TTestServer started on Port 3360 GrpcPort 7193 TClient is connected to server localhost:3360 PQClient connected to localhost:7193 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:58:01.087303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... 2025-05-07T08:58:01.119497Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:58:03.242003Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625458302989101:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:58:03.242220Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:58:03.242634Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625458302989113:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:58:03.246802Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480 2025-05-07T08:58:03.255151Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625458302989146:2346], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:58:03.255246Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:58:03.262282Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501625458302989115:2343], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-05-07T08:58:03.635644Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501625458302989171:2439] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:58:03.693906Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:58:03.768219Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:58:03.846773Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7501625458302989187:2349], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T08:58:03.848856Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=1&id=YmY3NjBhYTctOTdkMmUyNjItYWQ3MzJiNTMtMjk1YTg2NWM=, ActorId: [1:7501625458302989098:2337], ActorState: ExecuteState, TraceId: 01jtmzeqk12ehh8pfh89t20bpw, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T08:58:03.851164Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T08:58:03.890992Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7501625462597956772:2617] 2025-05-07T08:58:05.198089Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501625445418086405:2067];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:58:05.198210Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-05-07T08:58:10.377502Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-05-07T08:58:10.412241Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-05-07T08:58:10.413207Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:7501625488367760742:2690], Recipient [1:7501625445418086813:2184]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:58:10.413234Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:58:10.413251Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T08:58:10.413278Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122432, Sender [1:7501625488367760738:2687], Recipient [1:7501625445418086813:2184]: {TEvModifySchemeTransaction txid# 281474976710673 TabletId# 72057594046644480} 2025-05-07T08:58:10.413287Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4851: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-05-07T08:58:10.495531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreatePersQueueGroup CreatePersQueueGroup { Name: "test-topic" TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } PartitionStrategy { MinPartitionCount: 1 MaxPartitionCount: 100 ScaleThresholdSeconds: 300 ScaleUpPartitionWriteSpeedThresholdPercent: 90 ScaleDownPartitionWriteSpeedThresholdPercent: 30 PartitionStrategyType: CAN_SPLIT } Consumers { Name: "test-consumer" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } ServiceType: "data-streams" Version: 0 } } } } TxId: 281474976710673 ... ic] pipe [7:7501626075152431526:2845] disconnected; active server actors: 1 2025-05-07T09:00:27.686770Z node 7 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][test-topic] pipe [7:7501626075152431526:2845] client test-consumer disconnected session test-consumer_7_1_4951138396332053187_v1 2025-05-07T09:00:27.694190Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7501626015022888077:2445], Partition 0, Sender [0:0:0], Recipient [7:7501626015022888139:2449], Cookie: 0 2025-05-07T09:00:27.694269Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7501626015022888139:2449]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:00:27.694311Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:00:27.694366Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-05-07T09:00:27.694433Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-05-07T09:00:27.694460Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-05-07T09:00:27.694496Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-05-07T09:00:27.727449Z node 7 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:159: new Describe consumer request 2025-05-07T09:00:27.727585Z node 7 :PQ_READ_PROXY DEBUG: schema_actors.cpp:473: TDescribeConsumerActor for request operation_params { } path: "test-topic" consumer: "test-consumer" include_stats: true include_location: true 2025-05-07T09:00:27.728621Z node 7 :PQ_READ_PROXY DEBUG: schema_actors.cpp:657: DescribeTopicImpl [7:7501626075152431540:2854]: Request location 2025-05-07T09:00:27.728685Z node 7 :PQ_READ_PROXY DEBUG: schema_actors.cpp:686: DescribeTopicImpl [7:7501626075152431540:2854]: Request sessions 2025-05-07T09:00:27.729420Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5200: HandleHook, received event# 269877761, Sender [7:7501626075152431544:3241], Recipient [7:7501626015022888077:2445]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T09:00:27.729457Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5218: HandleHook, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T09:00:27.729481Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:2869: [PQ: 72075186224037892] Handle TEvTabletPipe::TEvServerConnected 2025-05-07T09:00:27.729524Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72075186224037892] server connected, pipe [7:7501626075152431542:2855], now have 1 active actors on pipe 2025-05-07T09:00:27.729600Z node 7 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][test-topic] pipe [7:7501626075152431543:2856] connected; active server actors: 1 2025-05-07T09:00:27.729944Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5200: HandleHook, received event# 271187975, Sender [7:7501626075152431540:2854], Recipient [7:7501626015022888077:2445]: NKikimrPQ.TStatus ClientId: "test-consumer" 2025-05-07T09:00:27.729993Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5208: HandleHook, processing event TEvPersQueue::TEvStatus 2025-05-07T09:00:27.730014Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:1797: [PQ: 72075186224037892] Handle TEvPersQueue::TEvStatus 2025-05-07T09:00:27.730082Z node 7 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037893][test-topic] addPartitionToResponse tabletId 72075186224037892, partitionId 0, NodeId 7, Generation 1 2025-05-07T09:00:27.730183Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188491 (NKikimr::TEvPQ::TEvPartitionStatus), Tablet [7:7501626015022888077:2445], Partition 0, Sender [7:7501626015022888077:2445], Recipient [7:7501626015022888139:2449], Cookie: 0 2025-05-07T09:00:27.730239Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188491, Sender [7:7501626015022888077:2445], Recipient [7:7501626015022888139:2449]: NKikimr::TEvPQ::TEvPartitionStatus 2025-05-07T09:00:27.730266Z node 7 :PERSQUEUE TRACE: partition.h:581: StateIdle, processing event TEvPQ::TEvPartitionStatus 2025-05-07T09:00:27.730510Z node 7 :PERSQUEUE DEBUG: partition.cpp:855: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 TotalPartitions: 1 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } 2025-05-07T09:00:27.730613Z node 7 :PQ_READ_PROXY DEBUG: schema_actors.cpp:750: DescribeTopicImpl [7:7501626075152431540:2854]: Got location 2025-05-07T09:00:27.730644Z node 7 :PQ_READ_PROXY DEBUG: schema_actors.cpp:729: DescribeTopicImpl [7:7501626075152431540:2854]: Got sessions 2025-05-07T09:00:27.733999Z node 7 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][test-topic] pipe [7:7501626075152431543:2856] disconnected; active server actors: 1 2025-05-07T09:00:27.734035Z node 7 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037893][test-topic] pipe [7:7501626075152431543:2856] disconnected no session 2025-05-07T09:00:27.734132Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5200: HandleHook, received event# 269877764, Sender [7:7501626075152431544:3241], Recipient [7:7501626015022888077:2445]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-05-07T09:00:27.734162Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5219: HandleHook, processing event TEvTabletPipe::TEvServerDisconnected 2025-05-07T09:00:27.734184Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:2882: [PQ: 72075186224037892] Handle TEvTabletPipe::TEvServerDisconnected 2025-05-07T09:00:27.734212Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037892] server disconnected, pipe [7:7501626075152431542:2855] destroyed 2025-05-07T09:00:27.796974Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7501626015022888077:2445], Partition 0, Sender [0:0:0], Recipient [7:7501626015022888139:2449], Cookie: 0 2025-05-07T09:00:27.797063Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7501626015022888139:2449]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:00:27.797099Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:00:27.797155Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-05-07T09:00:27.797261Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-05-07T09:00:27.797293Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-05-07T09:00:27.797332Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-05-07T09:00:27.902257Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7501626015022888077:2445], Partition 0, Sender [0:0:0], Recipient [7:7501626015022888139:2449], Cookie: 0 2025-05-07T09:00:27.902350Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7501626015022888139:2449]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:00:27.902380Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:00:27.902437Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-05-07T09:00:27.902519Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-05-07T09:00:27.902545Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-05-07T09:00:27.902577Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-05-07T09:00:28.000847Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7501626015022888077:2445], Partition 0, Sender [0:0:0], Recipient [7:7501626015022888139:2449], Cookie: 0 2025-05-07T09:00:28.000947Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7501626015022888139:2449]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:00:28.000984Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:00:28.001056Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-05-07T09:00:28.001140Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-05-07T09:00:28.001171Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-05-07T09:00:28.001207Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-05-07T09:00:28.102559Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7501626015022888077:2445], Partition 0, Sender [0:0:0], Recipient [7:7501626015022888139:2449], Cookie: 0 2025-05-07T09:00:28.102648Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7501626015022888139:2449]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:00:28.102681Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:00:28.102734Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-05-07T09:00:28.102810Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-05-07T09:00:28.102841Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-05-07T09:00:28.102892Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 |91.6%| [TA] {RESULT} $(B)/ydb/core/http_proxy/ut/inside_ydb_ut/test-results/unittest/{meta.json ... results_accumulator.log} |91.6%| [LD] {RESULT} $(B)/ydb/tools/query_replay/ydb_query_replay >> TopicAutoscaling::ControlPlane_PauseAutoPartitioning [GOOD] >> TopicAutoscaling::ControlPlane_CDC_Enable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::PriorityRange [GOOD] Test command err: 2025-05-07T09:00:32.249769Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 17:17 2025-05-07T09:00:32.249893Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 18:18 2025-05-07T09:00:32.249923Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 19:19 2025-05-07T09:00:32.249947Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 20:20 2025-05-07T09:00:32.249990Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 21:21 2025-05-07T09:00:32.250038Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 22:22 2025-05-07T09:00:32.250068Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 23:23 2025-05-07T09:00:32.250093Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 24:24 2025-05-07T09:00:32.259747Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 17:17 2025-05-07T09:00:32.259846Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 18:18 2025-05-07T09:00:32.259884Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 19:19 2025-05-07T09:00:32.259908Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 20:20 2025-05-07T09:00:32.259931Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 21:21 2025-05-07T09:00:32.259953Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 22:22 2025-05-07T09:00:32.260015Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 23:23 2025-05-07T09:00:32.260049Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 24:24 2025-05-07T09:00:32.338790Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 17:17 2025-05-07T09:00:32.338868Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 18:18 2025-05-07T09:00:32.338894Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 19:19 2025-05-07T09:00:32.338918Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 20:20 2025-05-07T09:00:32.338942Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 21:21 2025-05-07T09:00:32.338964Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 22:22 2025-05-07T09:00:32.338986Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 23:23 2025-05-07T09:00:32.339017Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 24:24 >> TopicAutoscaling::PartitionSplit_ManySession_BeforeAutoscaleAwareSDK [GOOD] >> TopicAutoscaling::PartitionSplit_ManySession_AutoscaleAwareSDK |91.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_subdomain_reboots/ydb-core-tx-schemeshard-ut_subdomain_reboots |91.6%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_subdomain_reboots/ydb-core-tx-schemeshard-ut_subdomain_reboots |91.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_subdomain_reboots/ydb-core-tx-schemeshard-ut_subdomain_reboots |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Uint32-pk_types9-all_types9-index9-Uint32--] [GOOD] >> TGRpcYdbTest::MakeListRemoveDirectory [GOOD] >> TGRpcYdbTest::GetOperationBadRequest >> Cdc::SequentialSplitMerge [GOOD] >> Cdc::ShouldBreakLocksOnConcurrentSchemeTx >> TGRpcAuthentication::ValidCredentials >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientProvidesEmptyClientCerts [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientProvidesServerCerts >> TYqlDateTimeTests::DatetimeKey [GOOD] >> TYqlDateTimeTests::TimestampKey >> YdbYqlClient::TestTzTypesFullStack [GOOD] >> YdbYqlClient::TestVariant >> YdbOlapStore::LogNonExistingRequest [GOOD] >> YdbOlapStore::LogNonExistingUserId >> TGRpcClientLowTest::GrpcRequestProxy [GOOD] >> TGRpcClientLowTest::GrpcRequestProxyWithoutToken >> TGRpcYdbTest::CreateTableBadRequest >> YdbYqlClient::DiscoveryLocationOverride |91.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/scan/ydb-core-kqp-ut-scan |91.6%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/scan/ydb-core-kqp-ut-scan |91.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/scan/ydb-core-kqp-ut-scan >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientDoesNotProvideAnyCerts [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientProvidesCorruptedCert >> TRegisterNodeOverLegacyService::ServerWithCertVerification_ClientWithCorrectCerts [GOOD] >> TRegisterNodeOverLegacyService::ServerWithCertVerification_ClientProvidesEmptyClientCerts >> TGRpcYdbTest::GetOperationBadRequest [GOOD] >> TGRpcYdbTest::OperationTimeout >> YdbYqlClient::QueryStats [GOOD] >> YdbYqlClient::RenameTables |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/security/certificate_check/ut/unittest >> TGRpcYdbTest::ExecuteQueryBadRequest [GOOD] >> TGRpcYdbTest::ExecuteQueryExplicitSession >> Cdc::InitialScanUpdatedRows [GOOD] >> Cdc::MustNotLoseSchemaSnapshot >> CommitOffset::Commit_WithSession_ParentNotFinished_OtherSession [GOOD] >> CommitOffset::Commit_FromSession_ToNewChild_WithoutCommitToParent |91.6%| [TA] $(B)/ydb/core/security/certificate_check/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> YdbTableBulkUpsertOlap::UpsertMixed [GOOD] >> YdbYqlClient::AlterTableAddIndex >> Cdc::InitialScanEnqueuesZeroRecords [GOOD] >> Cdc::InitialScanRacyProgressAndDrop |91.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/external_sources/s3/ut/ydb-core-external_sources-s3-ut |91.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/external_sources/s3/ut/ydb-core-external_sources-s3-ut |91.6%| [TA] {RESULT} $(B)/ydb/core/security/certificate_check/ut/test-results/unittest/{meta.json ... results_accumulator.log} |91.6%| [LD] {RESULT} $(B)/ydb/core/external_sources/s3/ut/ydb-core-external_sources-s3-ut >> YdbYqlClient::TestDecimalFullStack [GOOD] >> YdbYqlClient::TestDescribeDirectory >> YdbYqlClient::TestVariant [GOOD] >> YdbYqlClient::TestTransactionQueryError >> TGRpcClientLowTest::GrpcRequestProxyWithoutToken [GOOD] >> TGRpcClientLowTest::GrpcRequestProxyCheckTokenWhenItIsSpecified_Ignore |91.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_user_attributes_reboots/core-tx-schemeshard-ut_user_attributes_reboots |91.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_user_attributes_reboots/core-tx-schemeshard-ut_user_attributes_reboots |91.6%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_user_attributes_reboots/core-tx-schemeshard-ut_user_attributes_reboots >> TPersQueueMirrorer::ValidStartStream [GOOD] >> overlapping_portions.py::TestOverlappingPortions::test >> TGRpcYdbTest::OperationTimeout [GOOD] >> TGRpcYdbTest::OperationCancelAfter >> YdbYqlClient::DiscoveryLocationOverride [GOOD] >> YdbYqlClient::DeleteTableWithDeletedIndex >> TRegisterNodeOverDiscoveryService::ServerWithoutCertVerification_ClientDoesNotProvideClientCerts [GOOD] >> TGRpcAuthentication::ValidCredentials [GOOD] >> TGRpcAuthentication::NoConnectRights |91.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/quoter/quoter_service_bandwidth_test/quoter_service_bandwidth_test |91.6%| [LD] {RESULT} $(B)/ydb/core/quoter/quoter_service_bandwidth_test/quoter_service_bandwidth_test |91.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/quoter/quoter_service_bandwidth_test/quoter_service_bandwidth_test ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/ut_with_sdk/unittest >> TPersQueueMirrorer::ValidStartStream [GOOD] Test command err: 2025-05-07T08:58:58.161260Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625691548083693:2063];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:58:58.161324Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:58:58.364888Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003ff0/r3tmp/tmpDeU4NM/pdisk_1.dat 2025-05-07T08:58:58.827974Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:58.835984Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:58:58.836070Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:58:58.843371Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 64114, node 1 2025-05-07T08:58:58.995036Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/zvgn/003ff0/r3tmp/yandexn06isu.tmp 2025-05-07T08:58:58.995058Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/zvgn/003ff0/r3tmp/yandexn06isu.tmp 2025-05-07T08:58:58.995206Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/zvgn/003ff0/r3tmp/yandexn06isu.tmp 2025-05-07T08:58:58.995321Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:58:59.045669Z INFO: TTestServer started on Port 62658 GrpcPort 64114 TClient is connected to server localhost:62658 PQClient connected to localhost:64114 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:58:59.389288Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:58:59.429700Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:58:59.454268Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-05-07T08:58:59.592129Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:58:59.814363Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:58:59.834654Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710661, at schemeshard: 72057594046644480 2025-05-07T08:59:02.201697Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625708727953667:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:02.201879Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:02.202317Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625708727953705:2343], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:02.215293Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480 2025-05-07T08:59:02.219635Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625708727953737:2346], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:02.219757Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:02.232105Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501625708727953707:2344], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-05-07T08:59:02.431522Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501625708727953763:2443] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:59:02.471036Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:59:02.524702Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:59:02.581694Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7501625708727953772:2350], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T08:59:02.583444Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=1&id=ZmRlNDJkNmUtMTA3YWU0ZTEtYTQyZTE3YmEtOTVmMGZiY2M=, ActorId: [1:7501625708727953663:2335], ActorState: ExecuteState, TraceId: 01jtmzgh5g4razkksh9zsyw44t, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T08:59:02.585900Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T08:59:02.642248Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7501625708727954068:2621] 2025-05-07T08:59:03.162140Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501625691548083693:2063];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:03.162244Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-05-07T08:59:07.999668Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-05-07T08:59:08.028396Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-05-07T08:59:08.029714Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:7501625734497758000:2680], Recipient [1:7501625691548084125:2194]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:59:08.029752Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:59:08.029772Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T08:59:08.029816Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122432, Sender [1:7501625734497757996:2677], Recipient [1:7501625691548084125:2194]: {TEvModifySchemeTransaction txid# 281474976710672 TabletId# 72057594046644480} 2025-05-07T08:59:08.029833Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4851: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-05-07T08:59:08.130441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreatePersQueueGroup CreatePersQueueGroup { Name: "back-compatibility-test" TotalGroupCount: 3 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelPro ... 656855_v1 sending to client partition status 2025-05-07T09:00:47.382766Z :INFO: [] [] [4f3663c8-19408fad-9c2be59f-96dfe73e] [] Confirm partition stream create. Partition stream id: 1. Cluster: "-". Topic: "/topic1". Partition: 0. Read offset: 5 2025-05-07T09:00:47.386225Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 2 consumer shared/user session shared/user_7_2_12751805137500656855_v1 grpc read done: success# 1, data# { start_partition_session_response { partition_session_id: 1 read_offset: 5 } } 2025-05-07T09:00:47.386490Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:533: session cookie 2 consumer shared/user session shared/user_7_2_12751805137500656855_v1 got StartRead from client: partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), readOffset# 5, commitOffset# (empty maybe) 2025-05-07T09:00:47.386545Z node 7 :PQ_READ_PROXY INFO: partition_actor.cpp:1002: session cookie 2 consumer shared/user session shared/user_7_2_12751805137500656855_v1 Start reading TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) EndOffset 10 readOffset 0 committedOffset 0 clientCommitOffset (empty maybe) clientReadOffset 5 2025-05-07T09:00:47.386576Z node 7 :PQ_READ_PROXY DEBUG: partition_actor.cpp:948: session cookie 2 consumer shared/user session shared/user_7_2_12751805137500656855_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) ready for read with readOffset 5 endOffset 10 2025-05-07T09:00:47.386644Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2309: session cookie 2 consumer shared/user session shared/user_7_2_12751805137500656855_v1 partition ready for read: partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), readOffset# 5, endOffset# 10, WTime# 1746608446690, sizeLag# 1439 2025-05-07T09:00:47.386662Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2320: session cookie 2 consumer shared/user session shared/user_7_2_12751805137500656855_v1TEvPartitionReady. Aval parts: 1 2025-05-07T09:00:47.386712Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2243: session cookie 2 consumer shared/user session shared/user_7_2_12751805137500656855_v1 performing read request: guid# bdb22d8f-24a1a05d-5efdbf8a-c75320e3, from# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), count# 6, size# 1726, partitionsAsked# 1, maxTimeLag# 0ms 2025-05-07T09:00:47.386806Z node 7 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1369: session cookie 2 consumer shared/user session shared/user_7_2_12751805137500656855_v1 READ FROM TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1)maxCount 6 maxSize 1726 maxTimeLagMs 0 readTimestampMs 0 readOffset 5 EndOffset 10 ClientCommitOffset 0 committedOffset 0 Guid bdb22d8f-24a1a05d-5efdbf8a-c75320e3 2025-05-07T09:00:47.387667Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:342: Handle TEvRequest topic: 'rt3.dc1--topic1' requestId: 2025-05-07T09:00:47.387715Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2788: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--topic1' partition 0 2025-05-07T09:00:47.387851Z node 8 :PERSQUEUE DEBUG: partition_read.cpp:731: [PQ: 72075186224037892, Partition: 0, State: StateIdle] read cookie 2 Topic 'rt3.dc1--topic1' partition 0 user user offset 5 count 6 size 1726 endOffset 10 max time lag 0ms effective offset 5 2025-05-07T09:00:47.387881Z node 8 :PERSQUEUE DEBUG: partition_read.cpp:931: [PQ: 72075186224037892, Partition: 0, State: StateIdle] read cookie 2 added 0 blobs, size 0 count 0 last offset 5, current partition end offset: 10 2025-05-07T09:00:47.389188Z node 7 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 2 consumer shared/user session shared/user_7_2_12751805137500656855_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) initDone 1 event { CmdReadResult { MaxOffset: 10 Result { Offset: 5 Data: "... 94 bytes ..." SourceId: "\000src-id-test" SeqNo: 6 WriteTimestampMS: 1746608447014 CreateTimestampMS: 1746608447009 UncompressedSize: 10 PartitionKey: "" ExplicitHash: "" } Result { Offset: 6 Data: "... 94 bytes ..." SourceId: "\000src-id-test" SeqNo: 7 WriteTimestampMS: 1746608447024 CreateTimestampMS: 1746608447009 UncompressedSize: 10 PartitionKey: "" ExplicitHash: "" } Result { Offset: 7 Data: "... 94 bytes ..." SourceId: "\000src-id-test" SeqNo: 8 WriteTimestampMS: 1746608447034 CreateTimestampMS: 1746608447009 UncompressedSize: 10 PartitionKey: "" ExplicitHash: "" } Result { Offset: 8 Data: "... 94 bytes ..." SourceId: "\000src-id-test" SeqNo: 9 WriteTimestampMS: 1746608447057 CreateTimestampMS: 1746608447009 UncompressedSize: 10 PartitionKey: "" ExplicitHash: "" } Result { Offset: 9 Data: "... 94 bytes ..." SourceId: "\000src-id-test" SeqNo: 10 WriteTimestampMS: 1746608447218 CreateTimestampMS: 1746608447009 UncompressedSize: 10 PartitionKey: "" ExplicitHash: "" } BlobsFromDisk: 0 BlobsFromCache: 0 SizeLag: 63 RealReadOffset: 9 WaitQuotaTimeMs: 0 EndOffset: 10 StartOffset: 0 } Cookie: 5 } 2025-05-07T09:00:47.389450Z node 7 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1252: session cookie 2 consumer shared/user session shared/user_7_2_12751805137500656855_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) wait data in partition inited, cookie 1 from offset10 2025-05-07T09:00:47.389495Z node 7 :PQ_READ_PROXY DEBUG: partition_actor.cpp:880: session cookie 2 consumer shared/user session shared/user_7_2_12751805137500656855_v1 after read state TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) EndOffset 10 ReadOffset 10 ReadGuid bdb22d8f-24a1a05d-5efdbf8a-c75320e3 has messages 1 2025-05-07T09:00:47.389633Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1917: session cookie 2 consumer shared/user session shared/user_7_2_12751805137500656855_v1 read done: guid# bdb22d8f-24a1a05d-5efdbf8a-c75320e3, partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), size# 917 2025-05-07T09:00:47.389666Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2079: session cookie 2 consumer shared/user session shared/user_7_2_12751805137500656855_v1 response to read: guid# bdb22d8f-24a1a05d-5efdbf8a-c75320e3 2025-05-07T09:00:47.389987Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2122: session cookie 2 consumer shared/user session shared/user_7_2_12751805137500656855_v1 Process answer. Aval parts: 0 2025-05-07T09:00:47.388066Z node 8 :PERSQUEUE DEBUG: partition_read.cpp:948: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Reading cookie 2. All data is from uncompacted head. 2025-05-07T09:00:47.388099Z node 8 :PERSQUEUE DEBUG: partition_read.cpp:420: FormAnswer for 0 blobs 2025-05-07T09:00:47.388282Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:377: Answer ok topic: 'rt3.dc1--topic1' partition: 0 messageNo: 0 requestId: cookie: 5 2025-05-07T09:00:47.394997Z :DEBUG: [] [] [4f3663c8-19408fad-9c2be59f-96dfe73e] [] Got ReadResponse, serverBytesSize = 917, now ReadSizeBudget = 0, ReadSizeServerDelta = 52427883 2025-05-07T09:00:47.395216Z :DEBUG: [] [] [4f3663c8-19408fad-9c2be59f-96dfe73e] [] In ContinueReadingDataImpl, ReadSizeBudget = 0, ReadSizeServerDelta = 52427883 2025-05-07T09:00:47.398132Z :DEBUG: [] Decompression task done. Partition/PartitionSessionId: 1 (5-9) 2025-05-07T09:00:47.398211Z :DEBUG: [] [] [4f3663c8-19408fad-9c2be59f-96dfe73e] [] Returning serverBytesSize = 917 to budget 2025-05-07T09:00:47.398253Z :DEBUG: [] [] [4f3663c8-19408fad-9c2be59f-96dfe73e] [] In ContinueReadingDataImpl, ReadSizeBudget = 917, ReadSizeServerDelta = 52427883 2025-05-07T09:00:47.398681Z :DEBUG: [] [] [4f3663c8-19408fad-9c2be59f-96dfe73e] [] After sending read request: ReadSizeBudget = 0, ReadSizeServerDelta = 52428800 2025-05-07T09:00:47.399953Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 2 consumer shared/user session shared/user_7_2_12751805137500656855_v1 grpc read done: success# 1, data# { read_request { bytes_size: 917 } } 2025-05-07T09:00:47.400114Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1816: session cookie 2 consumer shared/user session shared/user_7_2_12751805137500656855_v1 got read request: guid# 8a596e10-4708f181-ae572ab4-23eb36ff 2025-05-07T09:00:47.402212Z :DEBUG: [] Take Data. Partition 0. Read: {0, 0} (5-5) 2025-05-07T09:00:47.402305Z :DEBUG: [] Take Data. Partition 0. Read: {1, 0} (6-6) 2025-05-07T09:00:47.402350Z :DEBUG: [] Take Data. Partition 0. Read: {2, 0} (7-7) 2025-05-07T09:00:47.402390Z :DEBUG: [] Take Data. Partition 0. Read: {3, 0} (8-8) 2025-05-07T09:00:47.402433Z :DEBUG: [] Take Data. Partition 0. Read: {4, 0} (9-9) 2025-05-07T09:00:47.402495Z :DEBUG: [] [] [4f3663c8-19408fad-9c2be59f-96dfe73e] [] The application data is transferred to the client. Number of messages 5, size 115 bytes 2025-05-07T09:00:47.402560Z :DEBUG: [] [] [4f3663c8-19408fad-9c2be59f-96dfe73e] [] Returning serverBytesSize = 0 to budget 2025-05-07T09:00:47.402770Z :INFO: [] [] [4f3663c8-19408fad-9c2be59f-96dfe73e] Closing read session. Close timeout: 0.000000s 2025-05-07T09:00:47.402823Z :INFO: [] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:/topic1:0:1:9:0 2025-05-07T09:00:47.402886Z :INFO: [] [] [4f3663c8-19408fad-9c2be59f-96dfe73e] Counters: { Errors: 0 CurrentSessionLifetimeMs: 85 BytesRead: 115 MessagesRead: 5 BytesReadCompressed: 115 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-05-07T09:00:47.403018Z :NOTICE: [] [] [4f3663c8-19408fad-9c2be59f-96dfe73e] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-05-07T09:00:47.403070Z :DEBUG: [] [] [4f3663c8-19408fad-9c2be59f-96dfe73e] [] Abort session to cluster 2025-05-07T09:00:47.405182Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 2 consumer shared/user session shared/user_7_2_12751805137500656855_v1 grpc read done: success# 0, data# { } 2025-05-07T09:00:47.405243Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 2 consumer shared/user session shared/user_7_2_12751805137500656855_v1 grpc read failed 2025-05-07T09:00:47.405277Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 2 consumer shared/user session shared/user_7_2_12751805137500656855_v1 grpc closed 2025-05-07T09:00:47.405317Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 2 consumer shared/user session shared/user_7_2_12751805137500656855_v1 is DEAD 2025-05-07T09:00:47.406788Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2433: [PQ: 72075186224037892] Destroy direct read session shared/user_7_2_12751805137500656855_v1 2025-05-07T09:00:47.406844Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037892] server disconnected, pipe [7:7501626162922617909:2538] destroyed 2025-05-07T09:00:47.406905Z node 8 :PQ_READ_PROXY DEBUG: caching_service.cpp:138: Direct read cache: server session deregistered: shared/user_7_2_12751805137500656855_v1 2025-05-07T09:00:47.407376Z node 7 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--topic1] pipe [7:7501626162922617902:2535] disconnected; active server actors: 1 2025-05-07T09:00:47.407412Z node 7 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--topic1] pipe [7:7501626162922617902:2535] client user disconnected session shared/user_7_2_12751805137500656855_v1 2025-05-07T09:00:47.407780Z :NOTICE: [] [] [4f3663c8-19408fad-9c2be59f-96dfe73e] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-05-07T09:00:47.411261Z :DEBUG: [] MessageGroupId [src-id-test] SessionId [src-id-test|1df6325c-9fd173ce-bb194151-b7c7ec42_0] Write session: destroy >> TGRpcYdbTest::CreateTableBadRequest [GOOD] >> TGRpcYdbTest::CreateTableBadRequest2 >> TGRpcClientLowTest::GrpcRequestProxyCheckTokenWhenItIsSpecified_Ignore [GOOD] >> TGRpcClientLowTest::GrpcRequestProxyCheckTokenWhenItIsSpecified_Check >> TYqlDateTimeTests::TimestampKey [GOOD] >> TYqlDateTimeTests::IntervalKey >> CommitOffset::Commit_WithoutSession_ToPastParentPartition [GOOD] >> CommitOffset::Commit_WithSession_ParentNotFinished_SameSession >> Cdc::ShouldBreakLocksOnConcurrentSchemeTx [GOOD] >> Cdc::ResolvedTimestampsContinueAfterMerge ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TRegisterNodeOverDiscoveryService::ServerWithoutCertVerification_ClientDoesNotProvideClientCerts [GOOD] Test command err: 2025-05-07T08:59:22.696767Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625794705404292:2207];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:22.696897Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028d8/r3tmp/tmpfjUGf8/pdisk_1.dat 2025-05-07T08:59:23.330960Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:59:23.331092Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:59:23.335989Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:59:23.378587Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20838, node 1 2025-05-07T08:59:23.443705Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T08:59:23.443730Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T08:59:23.530929Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:59:23.530954Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:59:23.530966Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:59:23.531088Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29954 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:59:24.107349Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:24.245817Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket 48C6A5B7F476F534633F7E77F719149DF1096412360F4CF074979DFBB9E21501 (ipv6:[::1]:46218) has now valid token of C=RU,ST=MSK,L=MSK,O=YA,OU=UtTest,CN=localhost@cert 2025-05-07T08:59:24.576919Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket **** (B6C6F477) (ipv6:[::1]:46228) has now valid token of root@builtin 2025-05-07T08:59:24.809184Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db /Root, token db , DomainLoginOnly 1 2025-05-07T08:59:24.809235Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T08:59:24.809246Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:816: CanInitLoginToken, database /Root, A6 error 2025-05-07T08:59:24.809300Z node 1 :TICKET_PARSER ERROR: ticket_parser_impl.h:969: Ticket **** (0C093832): Could not find correct token validator 2025-05-07T08:59:28.367094Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501625821563261774:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:28.367167Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028d8/r3tmp/tmp62Zf0K/pdisk_1.dat 2025-05-07T08:59:28.548057Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:59:28.577184Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:59:28.577294Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:59:28.587159Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13532, node 4 2025-05-07T08:59:28.730573Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:59:28.730601Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:59:28.730611Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:59:28.730758Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31941 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:59:29.019988Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:29.040275Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T08:59:29.108725Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket 48C6A5B7F476F534633F7E77F719149DF1096412360F4CF074979DFBB9E21501 (ipv6:[::1]:53984) has now valid token of C=RU,ST=MSK,L=MSK,O=YA,OU=UtTest,CN=localhost@cert 2025-05-07T08:59:29.209490Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket **** (B6C6F477) (ipv6:[::1]:54006) has now valid token of root@builtin 2025-05-07T08:59:29.322038Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db /Root, token db , DomainLoginOnly 1 2025-05-07T08:59:29.322064Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T08:59:29.322073Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:816: CanInitLoginToken, database /Root, A6 error 2025-05-07T08:59:29.322103Z node 4 :TICKET_PARSER ERROR: ticket_parser_impl.h:969: Ticket **** (0C093832): Could not find correct token validator 2025-05-07T08:59:33.774040Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7501625842860588028:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:33.774132Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028d8/r3tmp/tmpd97OwW/pdisk_1.dat 2025-05-07T08:59:34.014089Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:59:34.051417Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:59:34.051508Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:59:34.056790Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4208, node 7 2025-05-07T08:59:34.230836Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:59:34.230860Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:59:34.230867Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:59:34.230994Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27666 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:59:34.529188Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:34.636462Z node 7 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket 4516827AC389B35100C1431CD7A9EAB46A9B15A2E9E0999170AECBC529F7D25A (ipv6:[::1]:35772) has now valid token of C=RU,ST=MSK,L=MSK,O=YA,OU=UtTest,CN=localhost@cer ... ccessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028d8/r3tmp/tmpKxwrsP/pdisk_1.dat 2025-05-07T09:00:12.642343Z node 22 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:12.836858Z node 22 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(22, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:12.836978Z node 22 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(22, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:12.842453Z node 22 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(22, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21333, node 22 2025-05-07T09:00:13.161189Z node 22 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:13.161225Z node 22 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:13.161236Z node 22 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:13.161437Z node 22 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9329 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:14.078686Z node 22 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:16.786110Z node 22 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[22:7501626006210570208:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:16.786220Z node 22 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:00:24.516253Z node 22 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket **** (B6C6F477) (ipv6:[::1]:55472) has now valid token of root@builtin 2025-05-07T09:00:24.771155Z node 22 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db /Root, token db , DomainLoginOnly 1 2025-05-07T09:00:24.771208Z node 22 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T09:00:24.771223Z node 22 :TICKET_PARSER TRACE: ticket_parser_impl.h:816: CanInitLoginToken, database /Root, A6 error 2025-05-07T09:00:24.771275Z node 22 :TICKET_PARSER ERROR: ticket_parser_impl.h:969: Ticket **** (0C093832): Could not find correct token validator 2025-05-07T09:00:29.180316Z node 25 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[25:7501626083698494453:2203];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:29.180405Z node 25 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028d8/r3tmp/tmpxwj9rx/pdisk_1.dat 2025-05-07T09:00:30.163828Z node 25 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:30.250795Z node 25 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T09:00:30.292870Z node 25 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(25, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:30.292999Z node 25 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(25, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:30.304775Z node 25 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(25, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26761, node 25 2025-05-07T09:00:30.843166Z node 25 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:30.843204Z node 25 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:30.843217Z node 25 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:30.843438Z node 25 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3576 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:31.664720Z node 25 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:32.089087Z node 25 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket **** (B6C6F477) (ipv6:[::1]:43266) has now valid token of root@builtin 2025-05-07T09:00:32.376236Z node 25 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db /Root, token db , DomainLoginOnly 1 2025-05-07T09:00:32.376304Z node 25 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T09:00:32.376318Z node 25 :TICKET_PARSER TRACE: ticket_parser_impl.h:816: CanInitLoginToken, database /Root, A6 error 2025-05-07T09:00:32.376371Z node 25 :TICKET_PARSER ERROR: ticket_parser_impl.h:969: Ticket **** (0C093832): Could not find correct token validator 2025-05-07T09:00:43.253056Z node 28 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[28:7501626145374846391:2076];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:43.253870Z node 28 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028d8/r3tmp/tmp1jbVsh/pdisk_1.dat 2025-05-07T09:00:43.772822Z node 28 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:43.878930Z node 28 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(28, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:43.879071Z node 28 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(28, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:43.884037Z node 28 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(28, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 62257, node 28 2025-05-07T09:00:44.243137Z node 28 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:44.243173Z node 28 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:44.243185Z node 28 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:44.243354Z node 28 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11491 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:45.002454Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:45.318404Z node 28 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket **** (B6C6F477) (ipv6:[::1]:44058) has now valid token of root@builtin 2025-05-07T09:00:45.458336Z node 28 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db /Root, token db , DomainLoginOnly 1 2025-05-07T09:00:45.458380Z node 28 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T09:00:45.458391Z node 28 :TICKET_PARSER TRACE: ticket_parser_impl.h:816: CanInitLoginToken, database /Root, A6 error 2025-05-07T09:00:45.458431Z node 28 :TICKET_PARSER ERROR: ticket_parser_impl.h:969: Ticket **** (0C093832): Could not find correct token validator >> YdbYqlClient::RenameTables [GOOD] >> TopicAutoscaling::ControlPlane_CDC_Enable [GOOD] >> TopicAutoscaling::MidOfRange [GOOD] >> YdbYqlClient::TestDescribeDirectory [GOOD] >> SystemView::QueryStatsAllTables [GOOD] >> SystemView::QueryStatsRetries >> YdbYqlClient::TestTransactionQueryError [GOOD] >> YdbYqlClient::TestReadWrongTable >> TGRpcYdbTest::ExecuteQueryExplicitSession [GOOD] >> TGRpcYdbTest::ExecuteDmlQuery >> TFlatTest::SelectRangeReverseItemsLimit ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::RenameTables [GOOD] Test command err: 2025-05-07T08:59:56.418437Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625941272597031:2076];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:56.418484Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00289e/r3tmp/tmpJ2YaIx/pdisk_1.dat 2025-05-07T08:59:57.108078Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:59:57.132675Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:59:57.132771Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:59:57.142660Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2093, node 1 2025-05-07T08:59:57.419973Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:59:57.419993Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:59:57.420003Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:59:57.420109Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64432 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:59:57.907448Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:01.420912Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501625941272597031:2076];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:01.420983Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Previous query attempt was finished with unsuccessful status OVERLOADED: Sending retry attempt 1 of 5 Previous query attempt was finished with unsuccessful status CLIENT_RESOURCE_EXHAUSTED: Sending retry attempt 2 of 5 Previous query attempt was finished with unsuccessful status UNAVAILABLE: Sending retry attempt 3 of 5 Previous query attempt was finished with unsuccessful status BAD_SESSION: Sending retry attempt 4 of 5 Previous query attempt was finished with unsuccessful status SESSION_BUSY: Sending retry attempt 5 of 5 2025-05-07T09:00:07.202413Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625988517238516:2356], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:07.202534Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:07.203273Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625988517238528:2359], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:07.207960Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480 2025-05-07T09:00:07.268223Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501625988517238530:2360], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-05-07T09:00:07.371181Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501625988517238606:2738] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Previous query attempt was finished with unsuccessful status OVERLOADED: Sending retry attempt 1 of 5 Previous query attempt was finished with unsuccessful status CLIENT_RESOURCE_EXHAUSTED: Sending retry attempt 2 of 5 Previous query attempt was finished with unsuccessful status UNAVAILABLE: Sending retry attempt 3 of 5 Previous query attempt was finished with unsuccessful status BAD_SESSION: Sending retry attempt 4 of 5 Previous query attempt was finished with unsuccessful status SESSION_BUSY: Sending retry attempt 5 of 5 Previous query attempt was finished with unsuccessful status NOT_FOUND: Sending retry attempt 1 of 1 2025-05-07T09:00:12.100697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7261: Cannot get console configs 2025-05-07T09:00:12.100752Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded Previous query attempt was finished with unsuccessful status NOT_FOUND: Sending retry attempt 1 of 1 Previous query attempt was finished with unsuccessful status UNDETERMINED: Sending retry attempt 1 of 1 Previous query attempt was finished with unsuccessful status UNDETERMINED: Sending retry attempt 1 of 1 Previous query attempt was finished with unsuccessful status TRANSPORT_UNAVAILABLE: Sending retry attempt 1 of 1 Previous query attempt was finished with unsuccessful status TRANSPORT_UNAVAILABLE: Sending retry attempt 1 of 1 2025-05-07T09:00:14.699104Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501626019287512296:2085];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:14.807329Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00289e/r3tmp/tmptNqDgb/pdisk_1.dat 2025-05-07T09:00:14.998227Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:15.060629Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:15.060706Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:15.066818Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 30524, node 4 2025-05-07T09:00:15.262104Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:15.262255Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:15.262272Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:15.262440Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20895 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:15.741960Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:19.687431Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7501626019287512296:2085];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:19.687545Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:00:22.118143Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501626053647251795:2345], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:22.118267Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:22.207220Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T09:00:22.623230Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501626053647251974:2355], DatabaseId: /Root, PoolId: default, Failed to fetch ... 0: IgniteOperation, opId: 281474976715672:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-05-07T09:00:54.009721Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715672, database: /Root, subject: , status: StatusAccepted, operation: DROP TABLE, path: Root/Table-1 2025-05-07T09:00:54.010600Z node 13 :TX_PROXY DEBUG: schemereq.cpp:1332: Actor# [13:7501626190015644969:3547] txid# 281474976715672 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715672} 2025-05-07T09:00:54.010639Z node 13 :TX_PROXY DEBUG: schemereq.cpp:543: Actor# [13:7501626190015644969:3547] txid# 281474976715672 SEND to# [13:7501626185720677672:2401] Source {TEvProposeTransactionStatus txid# 281474976715672 Status# 53} 2025-05-07T09:00:54.013877Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:469: SchemeBoardUpdate /Root 2025-05-07T09:00:54.014026Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:498: Can't update SecurityState for /Root - no PublicKeys 2025-05-07T09:00:54.014306Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:469: SchemeBoardUpdate /Root 2025-05-07T09:00:54.014386Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:498: Can't update SecurityState for /Root - no PublicKeys 2025-05-07T09:00:54.051375Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 1746608454087, transactions count in step: 1, at schemeshard: 72057594046644480 2025-05-07T09:00:54.060309Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1103: All parts have reached barrier, tx: 281474976715672, done: 0, blocked: 1 2025-05-07T09:00:54.070120Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715672:0 2025-05-07T09:00:54.070237Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 281474976715672, publications: 2, subscribers: 1 2025-05-07T09:00:54.071029Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976715672, subscribers: 1 2025-05-07T09:00:54.072296Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:469: SchemeBoardUpdate /Root 2025-05-07T09:00:54.072408Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:498: Can't update SecurityState for /Root - no PublicKeys 2025-05-07T09:00:54.072418Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:469: SchemeBoardUpdate /Root 2025-05-07T09:00:54.072470Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:498: Can't update SecurityState for /Root - no PublicKeys 2025-05-07T09:00:54.087335Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:575: Got grpc request# DropTableRequest, traceId# 01jtmzkye72t6r8yse95n4gtq6, sdkBuildInfo# ydb-cpp-sdk/dev, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:50308, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-05-07T09:00:54.087575Z node 13 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [13:7501626155655904515:2114] Handle TEvProposeTransaction 2025-05-07T09:00:54.087592Z node 13 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [13:7501626155655904515:2114] TxId# 281474976715673 ProcessProposeTransaction 2025-05-07T09:00:54.087621Z node 13 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [13:7501626155655904515:2114] Cookie# 0 userReqId# "" txid# 281474976715673 SEND to# [13:7501626190015645035:3609] 2025-05-07T09:00:54.090433Z node 13 :TX_PROXY DEBUG: schemereq.cpp:1520: Actor# [13:7501626190015645035:3609] txid# 281474976715673 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "Root" OperationType: ESchemeOpDropTable Drop { Name: "Table-2" } } } DatabaseName: "" RequestType: "" PeerName: "ipv6:[::1]:50308" 2025-05-07T09:00:54.090480Z node 13 :TX_PROXY DEBUG: schemereq.cpp:563: Actor# [13:7501626190015645035:3609] txid# 281474976715673 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-05-07T09:00:54.090535Z node 13 :TX_PROXY DEBUG: schemereq.cpp:1575: Actor# [13:7501626190015645035:3609] txid# 281474976715673 TEvNavigateKeySet requested from SchemeCache 2025-05-07T09:00:54.090855Z node 13 :TX_PROXY DEBUG: schemereq.cpp:1408: Actor# [13:7501626190015645035:3609] txid# 281474976715673 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-05-07T09:00:54.090943Z node 13 :TX_PROXY DEBUG: schemereq.cpp:1455: Actor# [13:7501626190015645035:3609] HANDLE EvNavigateKeySetResult, txid# 281474976715673 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-05-07T09:00:54.090977Z node 13 :TX_PROXY DEBUG: schemereq.cpp:102: Actor# [13:7501626190015645035:3609] txid# 281474976715673 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715673 TabletId# 72057594046644480} 2025-05-07T09:00:54.091113Z node 13 :TX_PROXY DEBUG: schemereq.cpp:1310: Actor# [13:7501626190015645035:3609] txid# 281474976715673 HANDLE EvClientConnected 2025-05-07T09:00:54.091348Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_drop_table.cpp:492: TDropTable Propose, path: Root/Table-2, pathId: 0, opId: 281474976715673:0, at schemeshard: 72057594046644480 2025-05-07T09:00:54.091489Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976715673:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-05-07T09:00:54.095180Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715673, database: /Root, subject: , status: StatusAccepted, operation: DROP TABLE, path: Root/Table-2 2025-05-07T09:00:54.095970Z node 13 :TX_PROXY DEBUG: schemereq.cpp:1332: Actor# [13:7501626190015645035:3609] txid# 281474976715673 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715673} 2025-05-07T09:00:54.096015Z node 13 :TX_PROXY DEBUG: schemereq.cpp:543: Actor# [13:7501626190015645035:3609] txid# 281474976715673 SEND to# [13:7501626190015645034:2404] Source {TEvProposeTransactionStatus txid# 281474976715673 Status# 53} 2025-05-07T09:00:54.102735Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:469: SchemeBoardUpdate /Root 2025-05-07T09:00:54.102879Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:498: Can't update SecurityState for /Root - no PublicKeys 2025-05-07T09:00:54.102907Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:469: SchemeBoardUpdate /Root 2025-05-07T09:00:54.102966Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:498: Can't update SecurityState for /Root - no PublicKeys 2025-05-07T09:00:54.109337Z node 13 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 13, TabletId: 72075186224037890 not found 2025-05-07T09:00:54.121440Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-05-07T09:00:54.127911Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 1746608454171, transactions count in step: 1, at schemeshard: 72057594046644480 2025-05-07T09:00:54.137186Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1103: All parts have reached barrier, tx: 281474976715673, done: 0, blocked: 1 2025-05-07T09:00:54.144422Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:469: SchemeBoardUpdate /Root 2025-05-07T09:00:54.144543Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:498: Can't update SecurityState for /Root - no PublicKeys 2025-05-07T09:00:54.144553Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:469: SchemeBoardUpdate /Root 2025-05-07T09:00:54.144601Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:498: Can't update SecurityState for /Root - no PublicKeys 2025-05-07T09:00:54.150611Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715673:0 2025-05-07T09:00:54.164797Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000078c80] received request Name# SchemeOperation ok# false data# peer# current inflight# 0 2025-05-07T09:00:54.165134Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000015680] received request Name# SchemeOperationStatus ok# false data# peer# current inflight# 0 2025-05-07T09:00:54.165349Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000083a80] received request Name# SchemeDescribe ok# false data# peer# current inflight# 0 2025-05-07T09:00:54.165545Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0001a2880] received request Name# ChooseProxy ok# false data# peer# current inflight# 0 2025-05-07T09:00:54.165765Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00016d480] received request Name# PersQueueRequest ok# false data# peer# current inflight# 0 2025-05-07T09:00:54.165960Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000b4080] received request Name# SchemeInitRoot ok# false data# peer# current inflight# 0 2025-05-07T09:00:54.166194Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0001a2280] received request Name# ResolveNode ok# false data# peer# current inflight# 0 2025-05-07T09:00:54.166406Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0001a1c80] received request Name# FillNode ok# false data# peer# current inflight# 0 2025-05-07T09:00:54.166607Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0001a1680] received request Name# DrainNode ok# false data# peer# current inflight# 0 2025-05-07T09:00:54.166796Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000152a80] received request Name# BlobStorageConfig ok# false data# peer# current inflight# 0 2025-05-07T09:00:54.166986Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000189080] received request Name# HiveCreateTablet ok# false data# peer# current inflight# 0 2025-05-07T09:00:54.167177Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000078080] received request Name# TestShardControl ok# false data# peer# current inflight# 0 2025-05-07T09:00:54.167383Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000c6c80] received request Name# RegisterNode ok# false data# peer# current inflight# 0 2025-05-07T09:00:54.167578Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000eca80] received request Name# CmsRequest ok# false data# peer# current inflight# 0 2025-05-07T09:00:54.167790Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000091280] received request Name# ConsoleRequest ok# false data# peer# current inflight# 0 2025-05-07T09:00:54.167990Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000188a80] received request Name# InterconnectDebug ok# false data# peer# current inflight# 0 2025-05-07T09:00:54.168196Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000181280] received request Name# TabletStateRequest ok# false data# peer# current inflight# 0 2025-05-07T09:00:54.180593Z node 13 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 13, TabletId: 72075186224037889 not found 2025-05-07T09:00:54.183591Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 >> Secret::SimpleQueryService ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::TestDescribeDirectory [GOOD] Test command err: 2025-05-07T09:00:13.846728Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626015881798700:2086];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:13.846796Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00288f/r3tmp/tmpCfm8Uq/pdisk_1.dat 2025-05-07T09:00:14.971389Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T09:00:15.208136Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:15.211807Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:15.211934Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:15.218898Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 61763, node 1 2025-05-07T09:00:15.730674Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:15.730693Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:15.730701Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:15.730809Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13865 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:16.587649Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:18.847017Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501626015881798700:2086];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:18.847087Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:00:20.408415Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626045946570878:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:20.408537Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:20.409011Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626045946570890:2344], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:20.418974Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480 2025-05-07T09:00:20.476305Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501626045946570892:2345], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-05-07T09:00:20.581297Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501626045946570974:2695] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:00:24.838663Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501626063970176857:2279];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:24.838719Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00288f/r3tmp/tmp5LB2jj/pdisk_1.dat 2025-05-07T09:00:25.469441Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:25.583440Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:25.583538Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:25.593332Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12391, node 4 2025-05-07T09:00:25.958821Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:25.958849Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:25.958857Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:25.958998Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6020 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:26.755720Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:29.839272Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7501626063970176857:2279];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:29.839347Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:00:31.361486Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501626094034948813:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:31.361606Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:31.362241Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501626094034948825:2344], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:31.366753Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T09:00:31.423636Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7501626094034948827:2345], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T09:00:31.494279Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:7501626094034948899:2699] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:00:34.296233Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7501626105238396427:2211];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:34.314487Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00288f/r3tmp/tmpNjuGt9/pdisk_1.dat 2025-05-07T09:00:34.774246Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:34.847781Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:34.847878Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:34.858036Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7929, node 7 2025-05-07T09:00:35.118987Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:35.119013Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:35.119022Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:35.119174Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16968 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:35.743833Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:39.327137Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7501626105238396427:2211];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:39.327206Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:00:42.145947Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T09:00:42.448698Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7501626139598135975:2353], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:42.448797Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:42.450057Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7501626139598135987:2356], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:42.459321Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480 2025-05-07T09:00:42.507357Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7501626139598135989:2357], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-05-07T09:00:42.610189Z node 7 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [7:7501626139598136067:2821] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:00:42.879589Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710661. Ctx: { TraceId: 01jtmzkk2e0tk2derb2kpf97h4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=ZjBiNDUwYS1lYjBiNjEzNy00NDIyN2Q1Ni1jYTJkZjZlYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:00:43.086112Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710662. Ctx: { TraceId: 01jtmzkkgv8g3bfddwwn4nspz2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=ZjBiNDUwYS1lYjBiNjEzNy00NDIyN2Q1Ni1jYTJkZjZlYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:00:43.334192Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710663. Ctx: { TraceId: 01jtmzkkrr55ps44y5btpzq9km, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=ZjBiNDUwYS1lYjBiNjEzNy00NDIyN2Q1Ni1jYTJkZjZlYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:00:43.660947Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710664. Ctx: { TraceId: 01jtmzkm0p0cvbqzxzw04j2f6t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=ZjBiNDUwYS1lYjBiNjEzNy00NDIyN2Q1Ni1jYTJkZjZlYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:00:43.947259Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710665. Ctx: { TraceId: 01jtmzkm9w4cjedqmm9fapjhwf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=ZjBiNDUwYS1lYjBiNjEzNy00NDIyN2Q1Ni1jYTJkZjZlYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:00:46.205421Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710666. Ctx: { TraceId: 01jtmzkmjmefwrtkp3ctfwf25r, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=ZjBiNDUwYS1lYjBiNjEzNy00NDIyN2Q1Ni1jYTJkZjZlYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:00:46.273190Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710667. Ctx: { TraceId: 01jtmzkmjmefwrtkp3ctfwf25r, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=ZjBiNDUwYS1lYjBiNjEzNy00NDIyN2Q1Ni1jYTJkZjZlYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:00:49.153216Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7501626169702201773:2080];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:49.153285Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00288f/r3tmp/tmpHiyVG8/pdisk_1.dat 2025-05-07T09:00:49.486706Z node 10 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:49.577607Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:49.577723Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:49.582536Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11890, node 10 2025-05-07T09:00:49.754983Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:49.755016Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:49.755028Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:49.755197Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5044 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:50.189524Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:54.153355Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7501626169702201773:2080];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:54.153460Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:00:54.491729Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7501626191177039338:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:54.491856Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:54.534084Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 >> TRegisterNodeOverLegacyService::ServerWithCertVerification_ClientProvidesEmptyClientCerts [GOOD] >> TRegisterNodeOverLegacyService::ServerWithCertVerification_ClientDoesNotProvideCorrectCerts >> Cdc::InitialScanRacyProgressAndDrop [GOOD] >> Cdc::EnqueueRequestProcessSend |91.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_login_large/ydb-core-tx-schemeshard-ut_login_large |91.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_login_large/ydb-core-tx-schemeshard-ut_login_large |91.7%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_login_large/ydb-core-tx-schemeshard-ut_login_large >> TGRpcAuthentication::NoConnectRights [GOOD] >> TGRpcAuthentication::NoDescribeRights ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/ut_with_sdk/unittest >> TopicAutoscaling::MidOfRange [GOOD] Test command err: 2025-05-07T08:58:58.224923Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625694220267292:2063];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:58:58.225480Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:58:58.467732Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003ff5/r3tmp/tmpcfQ7F1/pdisk_1.dat 2025-05-07T08:58:58.900057Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:58:58.900274Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:58:58.902223Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:58:58.914208Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 6274, node 1 2025-05-07T08:58:59.185456Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/zvgn/003ff5/r3tmp/yandexp2Spll.tmp 2025-05-07T08:58:59.185480Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/zvgn/003ff5/r3tmp/yandexp2Spll.tmp 2025-05-07T08:58:59.185637Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/zvgn/003ff5/r3tmp/yandexp2Spll.tmp 2025-05-07T08:58:59.185776Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:58:59.351520Z INFO: TTestServer started on Port 16224 GrpcPort 6274 TClient is connected to server localhost:16224 PQClient connected to localhost:6274 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:58:59.993507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:59:00.026747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-05-07T08:59:00.032629Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:59:00.216740Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:00.229404Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710661, at schemeshard: 72057594046644480 2025-05-07T08:59:02.437618Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625711400137285:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:02.437712Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:02.437947Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625711400137304:2343], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:02.442997Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480 2025-05-07T08:59:02.456454Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501625711400137306:2344], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-05-07T08:59:02.799490Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501625711400137370:2444] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:59:02.833235Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:59:02.882248Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:59:02.988579Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7501625711400137385:2350], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T08:59:02.990837Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=1&id=ODI2OGY3OTEtN2VhNDllMTMtNTNlZTE5ZmUtNDMwZjU1ZDI=, ActorId: [1:7501625711400137274:2338], ActorState: ExecuteState, TraceId: 01jtmzghcvfg542kjms64s9t1v, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T08:59:02.993367Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T08:59:03.044986Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-05-07T08:59:03.225389Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501625694220267292:2063];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:03.225461Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Subcribe to ClusterTracker from [1:7501625715695104970:2623] === CheckClustersList. Ok 2025-05-07T08:59:09.736563Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-05-07T08:59:09.765140Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-05-07T08:59:09.766436Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:7501625741464908941:2698], Recipient [1:7501625694220267756:2208]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:59:09.766473Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:59:09.766488Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T08:59:09.766532Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122432, Sender [1:7501625741464908937:2695], Recipient [1:7501625694220267756:2208]: {TEvModifySchemeTransaction txid# 281474976710673 TabletId# 72057594046644480} 2025-05-07T08:59:09.766547Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4851: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-05-07T08:59:09.845388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreatePersQueueGroup CreatePersQueueGroup { Name: "test-topic" TotalGroupCount: 2 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } PartitionStrategy { MinPartitionCount: 2 MaxPartitionCount: 100 ScaleThresholdSeconds: 300 ScaleUpPartitionWriteSpeedThresholdPercent: 90 ScaleDownPartitionWriteSpeedThresholdPercent: 30 PartitionStrategyType: CAN_SPLIT } Consumers { Name: "test-consumer" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } ServiceType: "data-streams" Version: 0 } } } } TxId: 281474976710673 TabletId: 72057594046644480 Owner: "root@builtin ... Operation and all the parts is done, operation id: 281474976710673:1 2025-05-07T09:00:54.428664Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976710673:1 2025-05-07T09:00:54.428745Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 13] was 4 2025-05-07T09:00:54.428773Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976710673:2 2025-05-07T09:00:54.428782Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976710673:2 2025-05-07T09:00:54.428836Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 15] was 5 2025-05-07T09:00:54.428865Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 281474976710673, publications: 2, subscribers: 1 2025-05-07T09:00:54.428881Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 281474976710673, [OwnerId: 72057594046644480, LocalPathId: 14], 4 2025-05-07T09:00:54.428892Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 281474976710673, [OwnerId: 72057594046644480, LocalPathId: 15], 2 2025-05-07T09:00:54.429478Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 274137603, Sender [6:7501626123670092016:2235], Recipient [6:7501626119375124545:2135]: NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046644480 Generation: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 14] Version: 4 } 2025-05-07T09:00:54.429528Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4924: StateWork, processing event NSchemeBoard::NSchemeshardEvents::TEvUpdateAck 2025-05-07T09:00:54.429610Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 14 Version: 4 PathOwnerId: 72057594046644480, cookie: 281474976710673 2025-05-07T09:00:54.429726Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 14 Version: 4 PathOwnerId: 72057594046644480, cookie: 281474976710673 2025-05-07T09:00:54.429747Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046644480, txId: 281474976710673 2025-05-07T09:00:54.429766Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976710673, pathId: [OwnerId: 72057594046644480, LocalPathId: 14], version: 4 2025-05-07T09:00:54.429793Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 14] was 3 2025-05-07T09:00:54.429881Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T09:00:54.430030Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:5200: HandleHook, received event# 270794756, Sender [6:7501626192389569763:2487], Recipient [6:7501626192389569763:2487]: NKikimr::TEvKeyValue::TEvCollect 2025-05-07T09:00:54.430096Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 274137603, Sender [6:7501626123670092016:2235], Recipient [6:7501626119375124545:2135]: NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046644480 Generation: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 15] Version: 2 } 2025-05-07T09:00:54.430115Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4924: StateWork, processing event NSchemeBoard::NSchemeshardEvents::TEvUpdateAck 2025-05-07T09:00:54.430184Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 15 Version: 2 PathOwnerId: 72057594046644480, cookie: 281474976710673 2025-05-07T09:00:54.430265Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 15 Version: 2 PathOwnerId: 72057594046644480, cookie: 281474976710673 2025-05-07T09:00:54.430291Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046644480, txId: 281474976710673 2025-05-07T09:00:54.430307Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976710673, pathId: [OwnerId: 72057594046644480, LocalPathId: 15], version: 2 2025-05-07T09:00:54.430322Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 15] was 4 2025-05-07T09:00:54.430389Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976710673, subscribers: 1 2025-05-07T09:00:54.430422Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046644480, to actorId: [6:7501626192389569735:2485] 2025-05-07T09:00:54.430447Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T09:00:54.430462Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:5200: HandleHook, received event# 270794760, Sender [6:7501626192389569886:2498], Recipient [6:7501626192389569763:2487]: NKikimr::TEvKeyValue::TEvCompleteGC 2025-05-07T09:00:54.430596Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T09:00:54.430799Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976710673 2025-05-07T09:00:54.430815Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T09:00:54.430955Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976710673 2025-05-07T09:00:54.430965Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T09:00:54.431039Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [6:7501626192389569735:2485] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710673 at schemeshard: 72057594046644480 2025-05-07T09:00:54.434439Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877764, Sender [6:7501626192389569742:2759], Recipient [6:7501626119375124545:2135]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-05-07T09:00:54.434485Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4938: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-05-07T09:00:54.434502Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5761: Server pipe is reset, at schemeshard: 72057594046644480 2025-05-07T09:00:54.452104Z node 6 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:141: new alter topic request 2025-05-07T09:00:54.518397Z node 6 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [6:7501626192389569763:2487], Partition 0, Sender [0:0:0], Recipient [6:7501626192389569833:2493], Cookie: 0 2025-05-07T09:00:54.518491Z node 6 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [6:7501626192389569833:2493]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:00:54.518530Z node 6 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:00:54.518599Z node 6 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-05-07T09:00:54.518697Z node 6 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-05-07T09:00:54.518739Z node 6 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037893, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-05-07T09:00:54.518949Z node 6 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037893, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-05-07T09:00:54.615648Z node 6 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [6:7501626192389569763:2487], Partition 0, Sender [0:0:0], Recipient [6:7501626192389569833:2493], Cookie: 0 2025-05-07T09:00:54.615755Z node 6 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [6:7501626192389569833:2493]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:00:54.615794Z node 6 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:00:54.615847Z node 6 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-05-07T09:00:54.615937Z node 6 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-05-07T09:00:54.615966Z node 6 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037893, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-05-07T09:00:54.616002Z node 6 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037893, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-05-07T09:00:54.718300Z node 6 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [6:7501626192389569763:2487], Partition 0, Sender [0:0:0], Recipient [6:7501626192389569833:2493], Cookie: 0 2025-05-07T09:00:54.718392Z node 6 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [6:7501626192389569833:2493]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:00:54.718448Z node 6 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:00:54.718503Z node 6 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-05-07T09:00:54.718615Z node 6 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-05-07T09:00:54.718648Z node 6 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037893, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-05-07T09:00:54.718686Z node 6 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037893, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientProvidesCorruptedCert [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientDoesNotProvideClientCerts >> Cdc::MustNotLoseSchemaSnapshot [GOOD] >> Cdc::MustNotLoseSchemaSnapshotWithVolatileTx >> TCmsTest::RequestRestartServicesRejectSecond >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientProvidesServerCerts [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientProvidesCorruptedPrivatekey >> YdbYqlClient::AlterTableAddIndex [GOOD] >> TGRpcYdbTest::CreateTableBadRequest2 [GOOD] >> TGRpcYdbTest::CreateTableBadRequest3 >> YdbYqlClient::TestExplicitPartitioning [GOOD] >> TPersQueueTest::MessageMetadata [GOOD] >> TPersQueueTest::LOGBROKER_7820 >> TCmsTest::RequestRestartServicesRejectSecond [GOOD] >> TCmsTest::RequestRestartServicesWrongHost >> TGRpcClientLowTest::GrpcRequestProxyCheckTokenWhenItIsSpecified_Check [GOOD] >> TGRpcClientLowTest::ChangeAcl >> YdbYqlClient::DeleteTableWithDeletedIndex [GOOD] >> YdbYqlClient::CreateTableWithUniformPartitions >> TGRpcYdbTest::OperationCancelAfter [GOOD] >> TGRpcYdbTest::KeepAlive ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::AlterTableAddIndex [GOOD] Test command err: 2025-05-07T08:59:49.352584Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625911423004955:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:49.352629Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028a0/r3tmp/tmpJnpay0/pdisk_1.dat 2025-05-07T08:59:50.265300Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:59:50.265427Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:59:50.279666Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:59:50.320028Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:59:50.378837Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; TServer::EnableGrpc on GrpcPort 29950, node 1 2025-05-07T08:59:50.682085Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T08:59:50.694467Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T08:59:50.719002Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:59:50.719026Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:59:50.719033Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:59:50.719163Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9309 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:59:51.473682Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:54.358121Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501625911423004955:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:54.358239Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:59:54.654619Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:1, at schemeshard: 72057594046644480 SUCCESS 2025-05-07T08:59:54.972787Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625932897842719:2347], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:54.972869Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:54.973477Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625932897842731:2350], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:54.977355Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480 2025-05-07T08:59:55.014151Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501625932897842733:2351], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-05-07T08:59:55.091547Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501625937192810100:2827] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:59:55.534263Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2175} ProcessControllerEvent event processing took too much time Type# 2146435072 Duration# 0.212074s 2025-05-07T08:59:55.534304Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:666} StateWork event processing took too much time Type# 2146435078 Duration# 0.212146s 2025-05-07T08:59:55.673453Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710662. Ctx: { TraceId: 01jtmzj4pr6pk98gq4av98ybdj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzZjNTk5NjctNzI2ZmU2M2QtYzIxN2FkMDAtNGQwZTRlNDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T08:59:55.687842Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746608395707, txId: 281474976710661] shutting down 2025-05-07T08:59:55.788952Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-05-07T08:59:55.792822Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 BAD_REQUEST 2025-05-07T08:59:55.993827Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T08:59:56.015279Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037889 not found SUCCESS 2025-05-07T08:59:56.262172Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037890 not found 2025-05-07T08:59:58.674614Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501625949488731930:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:58.678016Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028a0/r3tmp/tmpXF849x/pdisk_1.dat 2025-05-07T08:59:59.051627Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:59:59.121766Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:59:59.121890Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:59:59.131653Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24802, node 4 2025-05-07T08:59:59.393102Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:59:59.393126Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:59:59.393136Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:59:59.393633Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25439 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:59:59.816535Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... TClient is connected to server localhost:25439 2025-05-07T09:00:00.079723Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnStore, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:00.202778Z node 4 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037891;self_id=[4:7501625958078667591:2322];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-05-07T09:00:00.240546Z node 4 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037891;self_id=[4:7501625958078667591:2322];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-05-07T09:00:00 ... ,"id":5},{"name":"message","id":6},{"name":"json_payload","id":7},{"name":"ingested_at","id":8},{"name":"saved_at","id":9},{"name":"request_id","id":10},{"name":"flt","id":11},{"name":"dbl","id":12}]},"o":"1,2,3,4,5,6,7,8,9,10,11,12","t":"FetchOriginalData"},"w":24,"id":25},"6":{"p":{"i":"8","p":{"address":{"name":"ingested_at","id":8}},"o":"8","t":"AssembleOriginalData"},"w":29,"id":6},"22":{"p":{"i":"1","p":{"address":{"name":"timestamp","id":1}},"o":"1","t":"AssembleOriginalData"},"w":29,"id":22},"12":{"p":{"i":"6","p":{"address":{"name":"message","id":6}},"o":"6","t":"AssembleOriginalData"},"w":29,"id":12},"26":{"p":{"p":{"data":[{"name":"timestamp","id":1},{"name":"resource_type","id":2},{"name":"resource_id","id":3},{"name":"uid","id":4},{"name":"level","id":5},{"name":"message","id":6},{"name":"json_payload","id":7},{"name":"ingested_at","id":8},{"name":"saved_at","id":9},{"name":"request_id","id":10},{"name":"flt","id":11},{"name":"dbl","id":12}]},"o":"0","t":"ReserveMemory"},"w":0,"id":26}}}; 2025-05-07T09:00:45.252939Z node 10 :TX_COLUMNSHARD INFO: log.cpp:784: self_id=[10:7501626131798172927:2336];tablet_id=72075186224037888;parent=[10:7501626131798172821:2321];fline=manager.cpp:82;event=ask_data;request=request_id=16;3={portions_count=1};; 2025-05-07T09:00:45.253122Z node 10 :TX_COLUMNSHARD DEBUG: log.cpp:784: external_task_id=;fline=actor.cpp:48;task=agents_waiting=1;additional_info=();; 2025-05-07T09:00:45.255053Z node 10 :TX_COLUMNSHARD DEBUG: log.cpp:784: external_task_id=;fline=actor.cpp:48;task=agents_waiting=1;additional_info=();; 2025-05-07T09:00:45.257773Z node 10 :TX_COLUMNSHARD DEBUG: log.cpp:784: event_type=NKikimr::NBlobCache::TEvBlobCache::TEvReadBlobRangeResult;fline=task.cpp:110;event=OnDataReady;task=agents_waiting=0;additional_info=();;external_task_id=; 2025-05-07T09:00:45.261844Z node 10 :TX_COLUMNSHARD DEBUG: log.cpp:784: event_type=NKikimr::NBlobCache::TEvBlobCache::TEvReadBlobRangeResult;fline=task.cpp:110;event=OnDataReady;task=agents_waiting=0;additional_info=();;external_task_id=; 2025-05-07T09:00:45.267971Z node 10 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Finished read cookie: 1 at tablet 72075186224037888 2025-05-07T09:00:45.310992Z node 10 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746608445001, txId: 18446744073709551615] shutting down 2025-05-07T09:00:45.364837Z node 10 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;parent=[10:7501626131798172821:2321];fline=actor.cpp:33;event=skip_flush_writing; 2025-05-07T09:00:45.386722Z node 10 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037890;parent=[10:7501626131798172846:2324];fline=actor.cpp:33;event=skip_flush_writing; 2025-05-07T09:00:45.386786Z node 10 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037891;parent=[10:7501626131798172829:2323];fline=actor.cpp:33;event=skip_flush_writing; 2025-05-07T09:00:45.386813Z node 10 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037889;parent=[10:7501626131798172827:2322];fline=actor.cpp:33;event=skip_flush_writing; 2025-05-07T09:00:45.398206Z node 10 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037889;self_id=[10:7501626131798172827:2322];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:253;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=72075186224037889; 2025-05-07T09:00:45.410302Z node 10 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;self_id=[10:7501626131798172821:2321];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:253;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=72075186224037888; 2025-05-07T09:00:45.410394Z node 10 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037891;self_id=[10:7501626131798172829:2323];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:253;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=72075186224037891; 2025-05-07T09:00:45.434178Z node 10 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037890;self_id=[10:7501626131798172846:2324];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:253;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=72075186224037890; 2025-05-07T09:00:48.463523Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7501626163945473680:2218];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028a0/r3tmp/tmpJhM9Pa/pdisk_1.dat 2025-05-07T09:00:48.675355Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T09:00:48.923356Z node 13 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:48.976895Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:48.977049Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:48.989911Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22947, node 13 2025-05-07T09:00:49.346104Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:49.346138Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:49.346152Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:49.346353Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4683 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:50.368919Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:53.462484Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[13:7501626163945473680:2218];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:53.462585Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:00:57.059830Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7501626202600180366:2350], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:57.059962Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:57.148603Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T09:00:57.418863Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7501626202600180546:2360], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:57.419098Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:57.419837Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7501626202600180551:2363], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:57.431508Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-05-07T09:00:57.530488Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7501626202600180553:2364], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-05-07T09:00:57.624814Z node 13 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [13:7501626202600180629:2815] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:00:58.280233Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jtmzm1p7f4hqdxcpmpth0amg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZTUxMjE3MzAtZjM2OWVmYjYtYzljNmY2NTEtYjE1YWY3Zjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:00:58.412921Z node 13 :TX_PROXY WARN: rpc_alter_table.cpp:329: [AlterTableAddIndex [13:7501626206895148003:2382] TxId# 281474976715663] Access check failed 2025-05-07T09:00:58.516454Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480 2025-05-07T09:00:58.669011Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480 2025-05-07T09:00:58.890579Z node 13 :TX_PROXY ERROR: rpc_alter_table.cpp:274: [AlterTableAddIndex [13:7501626206895148385:2399] TxId# 281474976715665] Unable to navigate: Root/WrongPath status: PathErrorUnknown 2025-05-07T09:00:59.148741Z node 13 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 13, TabletId: 72075186224037889 not found |91.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/fq/control_plane_storage/ydb-tests-fq-control_plane_storage |91.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/fq/control_plane_storage/ydb-tests-fq-control_plane_storage |91.7%| [LD] {RESULT} $(B)/ydb/tests/fq/control_plane_storage/ydb-tests-fq-control_plane_storage ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::TestExplicitPartitioning [GOOD] Test command err: 2025-05-07T09:00:01.559018Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625962085773360:2140];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:01.559065Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00289c/r3tmp/tmp4iobhB/pdisk_1.dat 2025-05-07T09:00:02.306505Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:02.362186Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:02.362353Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:02.379516Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9459, node 1 2025-05-07T09:00:02.842624Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:02.842647Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:02.842653Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:02.842770Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:62038 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:03.393707Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:06.562296Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501625962085773360:2140];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:06.562402Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:00:06.747888Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625983560610869:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:06.747996Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:07.670795Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2175} ProcessControllerEvent event processing took too much time Type# 2146435072 Duration# 0.244273s 2025-05-07T09:00:07.670848Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:666} StateWork event processing took too much time Type# 2146435078 Duration# 0.244437s 2025-05-07T09:00:07.703529Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501625987855578214:2647] txid# 281474976710658, issues: { message: "Column Key has wrong key type Double" severity: 1 } 2025-05-07T09:00:09.611264Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501625998816114396:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:09.611350Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00289c/r3tmp/tmp7uAuG5/pdisk_1.dat 2025-05-07T09:00:10.056190Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:10.173362Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:10.173468Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:10.179938Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29008, node 4 2025-05-07T09:00:10.465115Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:10.465149Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:10.465158Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:10.465333Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24028 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:11.568843Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:14.620704Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7501625998816114396:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:14.620784Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:00:16.049113Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501626028880886586:2361], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:16.049363Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:16.078450Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501626028880886626:2373], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:16.078680Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:16.108513Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T09:00:16.117379Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:7501626028880886669:2674] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/Test\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 2], type: EPathTypeTable, state: EPathStateCreate)" severity: 1 } 2025-05-07T09:00:16.123389Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:7501626028880886686:2689] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/Test\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 2], type: EPathTypeTable, state: EPathStateCreate)" severity: 1 } 2025-05-07T09:00:16.133760Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:7501626028880886710:2705] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/Test\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 2], type: EPathTypeTable, state: EPathStateCreate)" severity: 1 } 2025-05-07T09:00:16.135570Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:7501626028880886708:2703] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/Test\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 2], type: EPathTypeTable, state: EPathStateCreate)" severity: 1 } 2025-05-07T09:00:16.138352Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:7501626028880886709:2704] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/Test\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 2], type: EPathTypeTable, state: EPathStateCreate)" severity: 1 } 2025-05-07T09:00:16.144509Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:7501626028880886727:2717] txid# 281474976715665, issues: { message: "Check failed: path: \'/Root/Test\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 2], type: EPathTypeTable, state: EPathStateCreate)" severity: 1 } 2025-05-07T09:00:16.144675Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:7501626028880886726:2716] txid# 281474976715664, issues: { message: "Check failed: path: \'/Root/Test\', error: path exists but creating ri ... N: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:19.195780Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:19.204720Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29871, node 7 2025-05-07T09:00:19.462812Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:19.462841Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:19.462850Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:19.463022Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14354 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:20.037851Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:23.682092Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7501626036246418528:2150];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:23.682175Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:00:23.880021Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T09:00:24.061502Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7501626062016223513:2348], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:24.061623Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:24.062264Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7501626062016223525:2351], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:24.067208Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-05-07T09:00:24.107964Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7501626062016223527:2352], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-05-07T09:00:24.174328Z node 7 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [7:7501626062016223594:2836] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:00:24.324523Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jtmzk13v33ndnetjv3v86kdh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NTY5ODYwYWUtZTVlMjE4YS01MjhmNTQwOC0xNTc5ODFkYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:00:27.104592Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7501626074714644618:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:27.104681Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00289c/r3tmp/tmpo4xjLw/pdisk_1.dat 2025-05-07T09:00:27.586277Z node 10 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:27.672830Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:27.672932Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:27.687928Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3268, node 10 2025-05-07T09:00:28.018776Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:28.018818Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:28.018848Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:28.019074Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:65311 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:28.587523Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:32.104316Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7501626074714644618:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:32.104392Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:00:32.463181Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T09:00:42.530663Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7261: Cannot get console configs 2025-05-07T09:00:42.530700Z node 10 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:58.624159Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7501626207858633158:2541], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:58.624279Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:58.625225Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7501626207858633170:2544], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:58.629956Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-05-07T09:00:58.702204Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7501626207858633172:2545], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-05-07T09:00:58.791278Z node 10 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [10:7501626207858633247:3194] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:00:58.933193Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jtmzm2vx6xrc5f57he117hp5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=OWY0YmEzZTMtZjQ5MmRkNzYtYjUxZjg0OS1iOWMyODQ4YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:00.123222Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jtmzm371cbew3rtvz9gyjxzk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=OWY0YmEzZTMtZjQ5MmRkNzYtYjUxZjg0OS1iOWMyODQ4YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> TCmsTest::RequestRestartServicesWrongHost [GOOD] >> TCmsTest::RestartNodeInDownState >> TopicAutoscaling::PartitionSplit_ManySession_existed_AutoscaleAwareSDK [GOOD] >> TFlatTest::SelectRangeReverseItemsLimit [GOOD] >> TFlatTest::SelectRangeReverseIncludeKeys >> TGRpcAuthentication::NoDescribeRights [GOOD] >> TGRpcClientLowTest::BiStreamPing >> TGRpcLdapAuthentication::LdapAuthSettingsWithEmptyHosts >> YdbYqlClient::TestReadWrongTable [GOOD] >> TYqlDateTimeTests::IntervalKey [GOOD] >> TYqlDateTimeTests::SimpleOperations >> TCmsTest::RestartNodeInDownState [GOOD] >> TCmsTest::SamePriorityRequest >> YdbYqlClient::TestReadTableMultiShard >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Timestamp-pk_types12-all_types12-index12-Timestamp--] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::TestReadWrongTable [GOOD] Test command err: 2025-05-07T09:00:35.464440Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626111422165061:2207];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:35.464584Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002882/r3tmp/tmpiZLgvV/pdisk_1.dat 2025-05-07T09:00:36.079401Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:36.146105Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:36.146244Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:36.155359Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7233, node 1 2025-05-07T09:00:36.229120Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:36.229151Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:36.229163Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:36.229303Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18904 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:36.701204Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:39.703678Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626128602035155:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:39.703770Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626128602035166:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:39.703850Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:39.707956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480 2025-05-07T09:00:39.745187Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501626128602035169:2342], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-05-07T09:00:39.819358Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501626128602035238:2678] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:00:40.470100Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501626111422165061:2207];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:40.470191Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:00:42.447056Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501626140530679171:2071];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:42.450974Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002882/r3tmp/tmpUgg8iY/pdisk_1.dat 2025-05-07T09:00:43.028931Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:43.067924Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:43.068027Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:43.075815Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27122, node 4 2025-05-07T09:00:43.186034Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:43.186063Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:43.186070Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:43.186225Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12886 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:43.565491Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:47.450123Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7501626140530679171:2071];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:47.450196Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:00:47.580471Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501626162005516756:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:47.580619Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:47.580863Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501626162005516768:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:47.585510Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480 2025-05-07T09:00:47.676850Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7501626162005516770:2343], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-05-07T09:00:47.766450Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:7501626162005516845:2685] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:00:49.935563Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7501626169629134099:2141];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:49.936145Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002882/r3tmp/tmpJz4qsT/pdisk_1.dat 2025-05-07T09:00:50.111761Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:50.160271Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:50.160374Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:50.176365Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort ... _actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7501626191103971721:2348], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:54.623021Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:54.624063Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7501626191103971726:2351], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:54.628926Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-05-07T09:00:54.665102Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7501626191103971728:2352], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-05-07T09:00:54.762734Z node 7 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [7:7501626191103971806:2795] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:00:54.927171Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jtmzkyyt2h74tn8zpmhzrby1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=YTM5ODc0NDYtNjJhOTI4ZjEtMzBlZGMxOGYtZWE5YzNiOTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:00:54.936318Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7501626169629134099:2141];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:54.936565Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:00:55.041167Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jtmzkz9we0fa6rjdrqs33me7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NDFlMDhhZjAtMTY5MDI2NzctMTY3ODlkZjYtOWIyNzdhNzk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:00:55.192594Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=7&id=YTM5ODc0NDYtNjJhOTI4ZjEtMzBlZGMxOGYtZWE5YzNiOTY=, ActorId: [7:7501626191103971529:2336], ActorState: ExecuteState, TraceId: 01jtmzkzcv2ea4z5b9gfn415hg, Create QueryResponse for error on request, msg: 2025-05-07T09:00:57.406378Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7501626202974076920:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:57.406438Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002882/r3tmp/tmpyTtqda/pdisk_1.dat 2025-05-07T09:00:57.591820Z node 10 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:57.636010Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:57.636126Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:57.640210Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 65220, node 10 2025-05-07T09:00:57.882875Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:57.882899Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:57.882937Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:57.883087Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29195 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:58.285751Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:58.378262Z node 10 :GRPC_SERVER INFO: grpc_request_proxy.cpp:572: Got grpc request# ListEndpointsRequest, traceId# 01jtmzm2m9db20mxs4bc5errcc, sdkBuildInfo# ydb-cpp-sdk/dev, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:44804, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# 9.997763s 2025-05-07T09:00:58.391027Z node 10 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:575: Got grpc request# CreateSessionRequest, traceId# 01jtmzm2mp9nrexxynrzbwepy3, sdkBuildInfo# ydb-cpp-sdk/dev, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:44810, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-05-07T09:01:02.407140Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7501626202974076920:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:02.407219Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:01:03.382328Z node 10 :GRPC_SERVER INFO: grpc_request_proxy.cpp:572: Got grpc request# ListEndpointsRequest, traceId# 01jtmzm7gn31qwqfrx9vcp0y6p, sdkBuildInfo# ydb-cpp-sdk/dev, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:44810, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# 9.997681s 2025-05-07T09:01:04.763540Z node 10 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:575: Got grpc request# ReadTableRequest, traceId# 01jtmzm8vv9h5nhfxwv0khrvyk, sdkBuildInfo# undef, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:55612, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-05-07T09:01:04.775967Z node 10 :TX_PROXY ERROR: read_table_impl.cpp:567: [ReadTable [10:7501626233038849112:2344] TxId# 281474976710658] Navigate request failed for table 'Root/NoTable' 2025-05-07T09:01:04.800193Z node 10 :TX_PROXY ERROR: read_table_impl.cpp:2919: [ReadTable [10:7501626233038849112:2344] TxId# 281474976710658] RESPONSE Status# ResolveError shard: 0 table: Root/NoTable 2025-05-07T09:01:04.800951Z node 10 :READ_TABLE_API NOTICE: rpc_read_table.cpp:531: [10:7501626233038849111:2344] Finish grpc stream, status: 400070
: Error: Failed to resolve table Root/NoTable, code: 200400
: Error: Got ResolveError response from TxProxy
: Error: Failed to resolve table Root/NoTable 2025-05-07T09:01:04.809514Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000021080] received request Name# SchemeOperation ok# false data# peer# current inflight# 0 2025-05-07T09:01:04.809766Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00007aa80] received request Name# SchemeOperationStatus ok# false data# peer# current inflight# 0 2025-05-07T09:01:04.809939Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000021680] received request Name# SchemeDescribe ok# false data# peer# current inflight# 0 2025-05-07T09:01:04.810144Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000079e80] received request Name# ChooseProxy ok# false data# peer# current inflight# 0 2025-05-07T09:01:04.810320Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000020a80] received request Name# PersQueueRequest ok# false data# peer# current inflight# 0 2025-05-07T09:01:04.810529Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00016da80] received request Name# SchemeInitRoot ok# false data# peer# current inflight# 0 2025-05-07T09:01:04.810704Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000079880] received request Name# ResolveNode ok# false data# peer# current inflight# 0 2025-05-07T09:01:04.810872Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000078c80] received request Name# FillNode ok# false data# peer# current inflight# 0 2025-05-07T09:01:04.811053Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000161480] received request Name# DrainNode ok# false data# peer# current inflight# 0 2025-05-07T09:01:04.811211Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00005b880] received request Name# BlobStorageConfig ok# false data# peer# current inflight# 0 2025-05-07T09:01:04.811382Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000041480] received request Name# HiveCreateTablet ok# false data# peer# current inflight# 0 2025-05-07T09:01:04.811556Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00004f280] received request Name# TestShardControl ok# false data# peer# current inflight# 0 2025-05-07T09:01:04.811710Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00016d480] received request Name# RegisterNode ok# false data# peer# current inflight# 0 2025-05-07T09:01:04.811871Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00016e080] received request Name# CmsRequest ok# false data# peer# current inflight# 0 2025-05-07T09:01:04.812022Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00016e680] received request Name# ConsoleRequest ok# false data# peer# current inflight# 0 2025-05-07T09:01:04.812209Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000160880] received request Name# InterconnectDebug ok# false data# peer# current inflight# 0 2025-05-07T09:01:04.812363Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00006a280] received request Name# TabletStateRequest ok# false data# peer# current inflight# 0 >> KqpIndexes::UpsertWithoutExtraNullDelete+UseSink >> KqpUniqueIndex::UpdateOnFkSelectResultSameValue >> KqpUniqueIndex::InsertFkPartialColumnSet |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Uint64-pk_types10-all_types10-index10-Uint64--] [GOOD] >> TGRpcYdbTest::ExecuteDmlQuery [GOOD] >> TGRpcYdbTest::ExecutePreparedQuery >> TRegisterNodeOverLegacyService::ServerWithCertVerification_ClientDoesNotProvideCorrectCerts [GOOD] >> YdbYqlClient::CreateTableWithUniformPartitions [GOOD] >> YdbYqlClient::CreateTableWithUniformPartitionsAndAutoPartitioning >> TCmsTest::SamePriorityRequest [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/ut_with_sdk/unittest >> TopicAutoscaling::PartitionSplit_ManySession_existed_AutoscaleAwareSDK [GOOD] Test command err: 2025-05-07T08:57:57.303197Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625432071360333:2070];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:57.307079Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:57:57.570006Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00402b/r3tmp/tmp2yvnsv/pdisk_1.dat 2025-05-07T08:57:57.866139Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:57:57.875712Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:57.875788Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:57.878116Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27370, node 1 2025-05-07T08:57:57.982312Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/zvgn/00402b/r3tmp/yandexVfKSjU.tmp 2025-05-07T08:57:57.982349Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/zvgn/00402b/r3tmp/yandexVfKSjU.tmp 2025-05-07T08:57:57.982544Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/zvgn/00402b/r3tmp/yandexVfKSjU.tmp 2025-05-07T08:57:57.982680Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:57:58.062694Z INFO: TTestServer started on Port 23717 GrpcPort 27370 TClient is connected to server localhost:23717 PQClient connected to localhost:27370 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:57:58.517019Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:57:58.587093Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:57:58.748322Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:57:58.770256Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710661, at schemeshard: 72057594046644480 2025-05-07T08:58:00.874509Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625444956263020:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:58:00.874636Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:58:00.874750Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625444956263033:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:58:00.879210Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480 2025-05-07T08:58:00.883378Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625444956263070:2346], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:58:00.883458Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:58:00.889184Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501625444956263035:2343], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-05-07T08:58:01.167686Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501625444956263091:2443] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:58:01.197081Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:58:01.238273Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:58:01.293371Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7501625449251230403:2349], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T08:58:01.293632Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=1&id=MTlkNGE2NTAtODJkNDIwZWYtMTU0NTM3Y2EtNDZiNGNhYWM=, ActorId: [1:7501625444956263003:2337], ActorState: ExecuteState, TraceId: 01jtmzen8c1p6qc0ghz6knmfqs, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T08:58:01.295810Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T08:58:01.346550Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7501625449251230694:2622] 2025-05-07T08:58:02.303162Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501625432071360333:2070];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:58:02.303229Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-05-07T08:58:07.801719Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-05-07T08:58:07.822716Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-05-07T08:58:07.824140Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:7501625475021034662:2695], Recipient [1:7501625432071360732:2188]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:58:07.824176Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:58:07.824189Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T08:58:07.824234Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122432, Sender [1:7501625475021034658:2692], Recipient [1:7501625432071360732:2188]: {TEvModifySchemeTransaction txid# 281474976710673 TabletId# 72057594046644480} 2025-05-07T08:58:07.824248Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4851: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-05-07T08:58:07.881292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreatePersQueueGroup CreatePersQueueGroup { Name: "test-topic" TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Cod ... 55461:2443]: NKikimr::TEvPQ::TEvPartitionLabeledCounters 2025-05-07T09:01:01.545074Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5214: HandleHook, processing event TEvPQ::TEvPartitionLabeledCounters 2025-05-07T09:01:01.550179Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7501626211569831584:2813], Partition 1, Sender [0:0:0], Recipient [7:7501626211569831690:2828], Cookie: 0 2025-05-07T09:01:01.550263Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7501626211569831690:2828]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:01:01.550296Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:01:01.550354Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-05-07T09:01:01.550442Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-05-07T09:01:01.550474Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-05-07T09:01:01.550505Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-05-07T09:01:01.550572Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7501626155735255461:2443], Partition 0, Sender [0:0:0], Recipient [7:7501626155735255521:2447], Cookie: 0 2025-05-07T09:01:01.550606Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7501626155735255521:2447]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:01:01.550622Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:01:01.550655Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-05-07T09:01:01.550695Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-05-07T09:01:01.550712Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-05-07T09:01:01.550731Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-05-07T09:01:01.550783Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7501626211569831582:2812], Partition 2, Sender [0:0:0], Recipient [7:7501626211569831696:2831], Cookie: 0 2025-05-07T09:01:01.550817Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7501626211569831696:2831]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:01:01.550830Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:01:01.550853Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-05-07T09:01:01.550887Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-05-07T09:01:01.550902Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-05-07T09:01:01.550918Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-05-07T09:01:01.651162Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7501626211569831584:2813], Partition 1, Sender [0:0:0], Recipient [7:7501626211569831690:2828], Cookie: 0 2025-05-07T09:01:01.651243Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7501626211569831690:2828]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:01:01.651273Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:01:01.651337Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-05-07T09:01:01.651420Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-05-07T09:01:01.651460Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-05-07T09:01:01.651494Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-05-07T09:01:01.651567Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7501626155735255461:2443], Partition 0, Sender [0:0:0], Recipient [7:7501626155735255521:2447], Cookie: 0 2025-05-07T09:01:01.651603Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7501626155735255521:2447]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:01:01.651620Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:01:01.651648Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-05-07T09:01:01.651686Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-05-07T09:01:01.651704Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-05-07T09:01:01.651724Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-05-07T09:01:01.651765Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7501626211569831582:2812], Partition 2, Sender [0:0:0], Recipient [7:7501626211569831696:2831], Cookie: 0 2025-05-07T09:01:01.651801Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7501626211569831696:2831]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:01:01.651818Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:01:01.651841Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-05-07T09:01:01.651876Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-05-07T09:01:01.651894Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-05-07T09:01:01.651913Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-05-07T09:01:01.754473Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7501626211569831584:2813], Partition 1, Sender [0:0:0], Recipient [7:7501626211569831690:2828], Cookie: 0 2025-05-07T09:01:01.754575Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7501626211569831690:2828]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:01:01.754607Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:01:01.754665Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-05-07T09:01:01.754752Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-05-07T09:01:01.754781Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-05-07T09:01:01.754826Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-05-07T09:01:01.754900Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7501626155735255461:2443], Partition 0, Sender [0:0:0], Recipient [7:7501626155735255521:2447], Cookie: 0 2025-05-07T09:01:01.754940Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7501626155735255521:2447]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:01:01.754955Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:01:01.754985Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-05-07T09:01:01.755022Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-05-07T09:01:01.755038Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-05-07T09:01:01.755058Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-05-07T09:01:01.755102Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7501626211569831582:2812], Partition 2, Sender [0:0:0], Recipient [7:7501626211569831696:2831], Cookie: 0 2025-05-07T09:01:01.755139Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7501626211569831696:2831]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:01:01.755154Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:01:01.755179Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-05-07T09:01:01.755211Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-05-07T09:01:01.755229Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-05-07T09:01:01.755247Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 >> TopicAutoscaling::PartitionSplit_ReadNotEmptyPartitions_AutoscaleAwareSDK [GOOD] >> TopicAutoscaling::ReBalancingAfterSplit_sessionsWithPartition >> TFlatTest::SelectRangeReverseIncludeKeys [GOOD] >> TGRpcYdbTest::CreateTableBadRequest3 [GOOD] >> TGRpcYdbTest::CreateAlterCopyAndDropTable >> TopicAutoscaling::ReadingAfterSplitTest_PreferedPartition_AutoscaleAwareSDK [GOOD] >> TopicAutoscaling::ReadFromTimestamp_BeforeAutoscaleAwareSDK |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::SamePriorityRequest [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_1__SYNC-pk_types3-all_types3-index3---SYNC] [GOOD] >> Cdc::MustNotLoseSchemaSnapshotWithVolatileTx [GOOD] >> Cdc::ResolvedTimestampForDisplacedUpsert ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TRegisterNodeOverLegacyService::ServerWithCertVerification_ClientDoesNotProvideCorrectCerts [GOOD] Test command err: 2025-05-07T09:00:04.651556Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625974768941566:2206];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:04.651686Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002898/r3tmp/tmpOPKaJB/pdisk_1.dat 2025-05-07T09:00:05.426067Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:05.429302Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:05.463463Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:00:05.600803Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29174, node 1 2025-05-07T09:00:05.723583Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T09:00:05.723669Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T09:00:05.723719Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T09:00:06.029660Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:06.029683Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:06.029695Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:06.029789Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18988 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:06.446553Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:06.777374Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket **** (B6C6F477) (ipv6:[::1]:38612) has now valid token of root@builtin 2025-05-07T09:00:06.988285Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db /Root, token db , DomainLoginOnly 1 2025-05-07T09:00:06.988309Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T09:00:06.988321Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:816: CanInitLoginToken, database /Root, A6 error 2025-05-07T09:00:06.988349Z node 1 :TICKET_PARSER ERROR: ticket_parser_impl.h:969: Ticket **** (0C093832): Could not find correct token validator 2025-05-07T09:00:10.875061Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501626001286038994:2213];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:10.875562Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002898/r3tmp/tmptpvywy/pdisk_1.dat 2025-05-07T09:00:11.189224Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:11.233205Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:11.233677Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:11.238462Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15309, node 4 2025-05-07T09:00:11.475744Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:11.475773Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:11.475782Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:11.477301Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8015 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:12.152656Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:12.487896Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket **** (B6C6F477) (ipv6:[::1]:59936) has now valid token of root@builtin 2025-05-07T09:00:12.598365Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db /Root, token db , DomainLoginOnly 1 2025-05-07T09:00:12.598397Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T09:00:12.598407Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:816: CanInitLoginToken, database /Root, A6 error 2025-05-07T09:00:12.598450Z node 4 :TICKET_PARSER ERROR: ticket_parser_impl.h:969: Ticket **** (0C093832): Could not find correct token validator 2025-05-07T09:00:17.716212Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7501626032271187944:2077];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:17.716293Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002898/r3tmp/tmp0QmXEX/pdisk_1.dat 2025-05-07T09:00:18.001073Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:18.012604Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:18.012704Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:18.018698Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25596, node 7 2025-05-07T09:00:18.263918Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:18.263941Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:18.263947Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:18.264071Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61026 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:18.602136Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:18.873768Z node 7 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket **** (B6C6F477) (ipv6:[::1]:49216) has now valid token of root@builtin 2025-05-07T09:00:18.986603Z node 7 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db /Root, token db , DomainLoginOnly 1 2025-05-07T09:00:18.986638Z node 7 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T09:00:18.986646Z node 7 :TICKET_PARSER TRACE: ticket_parser_impl.h:816: CanInitLoginToken, database /Root, A6 error 2025-05-07T09:00:18.986682Z node 7 :TICKET_PARSER ERROR: ticket_parser_impl.h:969: Ticket **** (0C093832 ... oken db , DomainLoginOnly 1 2025-05-07T09:00:26.833900Z node 10 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T09:00:26.833911Z node 10 :TICKET_PARSER TRACE: ticket_parser_impl.h:816: CanInitLoginToken, database /Root, A6 error 2025-05-07T09:00:26.833952Z node 10 :TICKET_PARSER ERROR: ticket_parser_impl.h:969: Ticket **** (0C093832): Could not find correct token validator 2025-05-07T09:00:33.058180Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7501626099300651321:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:33.058311Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002898/r3tmp/tmpyUbWk5/pdisk_1.dat 2025-05-07T09:00:33.733821Z node 13 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:33.821514Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:33.821779Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 18268, node 13 2025-05-07T09:00:33.860772Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:00:34.155112Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:34.155148Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:34.155160Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:34.155329Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26710 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:35.167731Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... Trying to register node Register node result Status { Code: OK } NodeId: 1024 DomainPath: "Root" Expire: 1746615633550871 Nodes { NodeId: 1024 Host: "localhost" Port: 17769 ResolveHost: "localhost" Address: "localhost" Location { DataCenter: "DataCenter" Rack: "Rack" Unit: "Body" } Expire: 1746615633550871 } Nodes { NodeId: 13 Host: "::1" Port: 12001 ResolveHost: "::1" Address: "::1" Location { DataCenterNum: 49 RoomNum: 1 RackNum: 1 BodyNum: 1 DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } } Nodes { NodeId: 14 Host: "::1" Port: 12002 ResolveHost: "::1" Address: "::1" Location { DataCenterNum: 50 RoomNum: 2 RackNum: 2 BodyNum: 2 DataCenter: "2" Module: "2" Rack: "2" Unit: "2" } } Nodes { NodeId: 15 Host: "::1" Port: 12003 ResolveHost: "::1" Address: "::1" Location { DataCenterNum: 51 RoomNum: 3 RackNum: 3 BodyNum: 3 DataCenter: "3" Module: "3" Rack: "3" Unit: "3" } } 2025-05-07T09:00:45.359320Z node 16 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[16:7501626152474389133:2080];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:45.431037Z node 16 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002898/r3tmp/tmpLcZNs6/pdisk_1.dat 2025-05-07T09:00:45.987836Z node 16 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:46.143714Z node 16 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(16, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:46.143842Z node 16 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(16, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:46.159819Z node 16 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(16, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3188, node 16 2025-05-07T09:00:46.742818Z node 16 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:46.742862Z node 16 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:46.742873Z node 16 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:46.743081Z node 16 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5605 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:47.964569Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... Trying to register node Register node result Status { Code: OK } NodeId: 1024 DomainPath: "Root" Expire: 1746615646050190 Nodes { NodeId: 1024 Host: "localhost" Port: 23997 ResolveHost: "localhost" Address: "localhost" Location { DataCenter: "DataCenter" Rack: "Rack" Unit: "Body" } Expire: 1746615646050190 } Nodes { NodeId: 16 Host: "::1" Port: 12001 ResolveHost: "::1" Address: "::1" Location { DataCenterNum: 49 RoomNum: 1 RackNum: 1 BodyNum: 1 DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } } Nodes { NodeId: 17 Host: "::1" Port: 12002 ResolveHost: "::1" Address: "::1" Location { DataCenterNum: 50 RoomNum: 2 RackNum: 2 BodyNum: 2 DataCenter: "2" Module: "2" Rack: "2" Unit: "2" } } Nodes { NodeId: 18 Host: "::1" Port: 12003 ResolveHost: "::1" Address: "::1" Location { DataCenterNum: 51 RoomNum: 3 RackNum: 3 BodyNum: 3 DataCenter: "3" Module: "3" Rack: "3" Unit: "3" } } 2025-05-07T09:00:58.708427Z node 19 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[19:7501626208517959518:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:58.708487Z node 19 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002898/r3tmp/tmpHxkphZ/pdisk_1.dat 2025-05-07T09:00:59.289794Z node 19 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:59.523498Z node 19 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(19, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:59.523637Z node 19 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(19, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:59.538824Z node 19 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(19, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16217, node 19 2025-05-07T09:01:00.014285Z node 19 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:00.014319Z node 19 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:00.014330Z node 19 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:00.014540Z node 19 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8379 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:01.303872Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... Trying to register node 2025-05-07T09:01:01.761243Z node 19 :TICKET_PARSER ERROR: ticket_parser_impl.h:969: Ticket 70BCD7F6D7B2E983233247E8B44761238BF8989F59BC90DFA5FCBC90C1D85D4C: Cannot create token from certificate. Client certificate failed verification Register node result Status { Code: ERROR Reason: "Cannot create token from certificate. Client certificate failed verification" } >> YdbYqlClient::SecurityTokenAuth >> TGRpcYdbTest::KeepAlive [GOOD] >> YdbTableBulkUpsertOlap::UpsertArrowBatch >> TGRpcClientLowTest::BiStreamPing [GOOD] >> TGRpcClientLowTest::BiStreamCancelled >> TAuthenticationWithSqlExecution::CreateAlterUserWithHash >> TGRpcClientLowTest::ChangeAcl [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientDoesNotProvideClientCerts [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_AuthNotRequired >> Cdc::ResolvedTimestampsContinueAfterMerge [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::SelectRangeReverseIncludeKeys [GOOD] Test command err: 2025-05-07T09:00:59.751162Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626210429356548:2269];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:59.751655Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004a1a/r3tmp/tmpEiiWJ2/pdisk_1.dat 2025-05-07T09:01:01.610175Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T09:01:01.742367Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:01.974548Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:01.974669Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:02.005402Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:01:02.377592Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2175} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.211319s 2025-05-07T09:01:02.377693Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:666} StateWork event processing took too much time Type# 2146435078 Duration# 0.211452s TClient is connected to server localhost:18065 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-05-07T09:01:02.959670Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... 2025-05-07T09:01:03.271681Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 2025-05-07T09:01:04.748055Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501626210429356548:2269];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:04.748159Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:01:05.699486Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501626238367502119:2135];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:05.735630Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004a1a/r3tmp/tmp8sFGFs/pdisk_1.dat 2025-05-07T09:01:05.936914Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:05.957137Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:05.957228Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:05.958926Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:4911 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-05-07T09:01:06.147322Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T09:01:06.169509Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... >> TGRpcNewClient::TestAuth >> CommitOffset::DistributedTxCommit_CheckSessionResetAfterCommit [GOOD] >> CommitOffset::DistributedTxCommit_CheckOffsetCommitForDifferentCases >> TGRpcLdapAuthentication::LdapAuthSettingsWithEmptyHosts [GOOD] >> TGRpcLdapAuthentication::LdapAuthSettingsWithEmptyBaseDn >> TopicAutoscaling::PartitionSplit_ManySession_AutoscaleAwareSDK [GOOD] >> TopicAutoscaling::PartitionSplit_AutosplitByLoad >> TGRpcNewCoordinationClient::SessionSemaphoreInfiniteTimeout ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcYdbTest::KeepAlive [GOOD] Test command err: 2025-05-07T09:00:31.618790Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626094321998943:2279];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:31.618842Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002883/r3tmp/tmpwDRhgg/pdisk_1.dat 2025-05-07T09:00:32.465702Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:32.465795Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:32.506524Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:00:32.534517Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 64133, node 1 2025-05-07T09:00:32.667287Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T09:00:32.674724Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T09:00:32.946753Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:32.946783Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:32.946791Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:32.946949Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18469 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 Pa... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:33.661264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:33.852825Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 281474976710659:0, at schemeshard: 72057594046644480 2025-05-07T09:00:39.994606Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501626126011073794:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:39.994654Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002883/r3tmp/tmpuBHIPY/pdisk_1.dat 2025-05-07T09:00:40.350743Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14004, node 4 2025-05-07T09:00:40.449786Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:40.449875Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:40.462213Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:00:40.486824Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:40.486846Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:40.486854Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:40.486994Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22246 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:40.777221Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:45.917058Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7501626154205904330:2208];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:45.917398Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002883/r3tmp/tmpBxyBfc/pdisk_1.dat 2025-05-07T09:00:46.431119Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:46.455040Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:46.455126Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:46.471172Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12158, node 7 2025-05-07T09:00:46.665540Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:46.665566Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:46.665578Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:46.665710Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15016 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:47.527496Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting...
: Error: Operation timeout. test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002883/r3tmp/tmpgIPWf1/pdisk_1.dat 2025-05-07T09:00:53.279077Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T09:00:53.836068Z node 10 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:53.905792Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:53.905900Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:53.915789Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27504, node 10 2025-05-07T09:00:54.225200Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:54.225233Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:54.225245Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:54.225414Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8474 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:54.931832Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting...
: Error: Operation cancelled. 2025-05-07T09:01:03.883696Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7501626228498941094:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:03.883789Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002883/r3tmp/tmpkVeNe4/pdisk_1.dat 2025-05-07T09:01:04.315046Z node 13 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:04.354676Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:04.354781Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:04.360756Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5421, node 13 2025-05-07T09:01:04.544083Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:04.544110Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:04.544119Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:04.544254Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27847 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-05-07T09:01:05.018632Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:01:08.886103Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[13:7501626228498941094:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:08.886206Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> SystemView::QueryStatsRetries [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcClientLowTest::ChangeAcl [GOOD] Test command err: 2025-05-07T09:00:36.455879Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626114042180580:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:36.455931Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00287c/r3tmp/tmpGXpEde/pdisk_1.dat 2025-05-07T09:00:37.080747Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:37.108709Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:37.108826Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:37.115386Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20438, node 1 2025-05-07T09:00:37.429910Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:37.429934Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:37.429940Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:37.430086Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12114 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:38.336074Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... TestRequest(database="/Root", token="root@builtin") => {SUCCESS, 0} 2025-05-07T09:00:41.164180Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /blabla Strong=1 TestRequest(database="/blabla", token="root@builtin") => {STATUS_CODE_UNSPECIFIED, 16} 2025-05-07T09:00:41.176837Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /blabla Strong=1 TestRequest(database="blabla", token="root@builtin") => {STATUS_CODE_UNSPECIFIED, 16} 2025-05-07T09:00:43.171949Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501626142183127552:2206];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:43.172000Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00287c/r3tmp/tmpat0Jzm/pdisk_1.dat 2025-05-07T09:00:43.486135Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 62877, node 4 2025-05-07T09:00:43.646132Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:43.646250Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:43.855606Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:00:43.919804Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:43.919824Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:43.919831Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:43.919964Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1273 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:44.247278Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... TestRequest(database="/Root", token="") => {STATUS_CODE_UNSPECIFIED, 16} 2025-05-07T09:00:44.394968Z node 4 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /blabla Strong=1 TestRequest(database="/blabla", token="") => {STATUS_CODE_UNSPECIFIED, 16} 2025-05-07T09:00:44.422963Z node 4 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /blabla Strong=1 TestRequest(database="blabla", token="") => {STATUS_CODE_UNSPECIFIED, 16} 2025-05-07T09:00:49.895797Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7501626170884715106:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:49.895910Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00287c/r3tmp/tmp5VJoiS/pdisk_1.dat 2025-05-07T09:00:50.106179Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:50.170820Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:50.170910Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:50.186473Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15418, node 7 2025-05-07T09:00:50.384762Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:50.384787Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:50.384794Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:50.384918Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6485 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:50.655165Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... TestRequest(database="/Root", token="") => {SUCCESS, 0} TestRequest(database="/blabla", token="") => {SUCCESS, 0} TestRequest(database="blabla", token="") => {SUCCESS, 0} TestRequest(database="/Root", token="root@builtin") => {SUCCESS, 0} 2025-05-07T09:00:53.147384Z node 7 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /blabla Strong=1 TestRequest(database="/blabla", token="root@builtin") => {STATUS_CODE_UNSPECIFIED, 16} 2025-05-07T09:00:53.171235Z node 7 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /blabla Strong=1 TestRequest(database="blabla", token="root@builtin") => {STATUS_CODE_UNSPECIFIED, 16} 2025-05-07T09:00:53.185103Z node 7 :TICKET_PARSER ERROR: ticket_parser_impl.h:969: Ticket **** (717F937C): Unknown token TestRequest(database="/Root", token="invalid token") => {UNAUTHORIZED, 0} 2025-05-07T09:00:53.231052Z node 7 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /blabla Strong=1 TestRequest(database="/blabla", token="invalid token") => {STATUS_CODE_UNSPECIFIED, 16} 2025-05-07T09:00:53.251328Z node 7 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /blabla Strong=1 TestRequest(database="blabla", token="invalid token") => {STATUS_CODE_UNSPECIFIED, 16} 2025-05-07T09:00:55.363510Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7501626193220707376:2118];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:55.438598Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00287c/r3tmp/tmpAHu07H/pdisk_1.dat 2025-05-07T09:00:55.663146Z node 10 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:55.735569Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:55.735660Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 22335, node 10 2025-05-07T09:00:55.778166Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:00:55.846530Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:55.846551Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:55.846560Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:55.847404Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31794 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:56.015672Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... TestRequest(database="/Root", token="") => {SUCCESS, 0} TestRequest(database="/blabla", token="") => {SUCCESS, 0} TestRequest(database="blabla", token="") => {SUCCESS, 0} TestRequest(database="/Root", token="root@builtin") => {SUCCESS, 0} 2025-05-07T09:00:59.965637Z node 10 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /blabla Strong=1 TestRequest(database="/blabla", token="root@builtin") => {STATUS_CODE_UNSPECIFIED, 16} TestRequest(database="blabla", token="root@builtin") => {STATUS_CODE_UNSPECIFIED, 16} 2025-05-07T09:00:59.994180Z node 10 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /blabla Strong=1 2025-05-07T09:01:00.019956Z node 10 :TICKET_PARSER ERROR: ticket_parser_impl.h:969: Ticket **** (717F937C): Unknown token TestRequest(database="/Root", token="invalid token") => {STATUS_CODE_UNSPECIFIED, 16} 2025-05-07T09:01:00.043184Z node 10 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /blabla Strong=1 TestRequest(database="/blabla", token="invalid token") => {STATUS_CODE_UNSPECIFIED, 16} 2025-05-07T09:01:00.078306Z node 10 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /blabla Strong=1 TestRequest(database="blabla", token="invalid token") => {STATUS_CODE_UNSPECIFIED, 16} 2025-05-07T09:01:03.238838Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7501626228117397115:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:03.238939Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00287c/r3tmp/tmpVQiIJb/pdisk_1.dat 2025-05-07T09:01:03.808048Z node 13 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:03.988084Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:03.988195Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:04.001667Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13515, node 13 2025-05-07T09:01:04.302703Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:04.302729Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:04.302738Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:04.302915Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22349 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:04.859828Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... TClient is connected to server localhost:22349 2025-05-07T09:01:05.509193Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710660:0, at schemeshard: 72057594046644480 |91.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_split_merge_reboots/ydb-core-tx-schemeshard-ut_split_merge_reboots |91.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_split_merge_reboots/ydb-core-tx-schemeshard-ut_split_merge_reboots |91.7%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_split_merge_reboots/ydb-core-tx-schemeshard-ut_split_merge_reboots >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Datetime-pk_types11-all_types11-index11-Datetime--] [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientProvidesCorruptedPrivatekey [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientProvidesExpiredCert |91.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_export_reboots_s3/ydb-core-tx-schemeshard-ut_export_reboots_s3 |91.7%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_export_reboots_s3/ydb-core-tx-schemeshard-ut_export_reboots_s3 |91.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_export_reboots_s3/ydb-core-tx-schemeshard-ut_export_reboots_s3 >> YdbYqlClient::CreateTableWithUniformPartitionsAndAutoPartitioning [GOOD] >> YdbYqlClient::CreateTableWithPartitionAtKeysAndAutoPartitioning >> YdbYqlClient::BuildInfo >> YdbOlapStore::LogLast50 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_change_exchange/unittest >> Cdc::ResolvedTimestampsContinueAfterMerge [GOOD] Test command err: 2025-05-07T08:57:18.319815Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625262664744583:2063];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:18.319882Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00306d/r3tmp/tmpu2ZzSh/pdisk_1.dat 2025-05-07T08:57:18.872530Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:57:18.875834Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:18.875951Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:18.880202Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27075, node 1 2025-05-07T08:57:19.162130Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:57:19.162157Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:57:19.162166Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:57:19.162296Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:57:19.343845Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:57:19.377065Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-05-07T08:57:19.447656Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:7501625266959712504:2309] 2025-05-07T08:57:19.447921Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T08:57:19.481340Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T08:57:19.481438Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T08:57:19.483450Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-05-07T08:57:19.483506Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-05-07T08:57:19.483552Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-05-07T08:57:19.483907Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T08:57:19.483961Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T08:57:19.483988Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:7501625266959712520:2309] in generation 1 2025-05-07T08:57:19.490393Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T08:57:19.592587Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-05-07T08:57:19.592739Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T08:57:19.592797Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:7501625266959712522:2310] 2025-05-07T08:57:19.592822Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T08:57:19.592838Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-05-07T08:57:19.592852Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:57:19.593026Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T08:57:19.593103Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T08:57:19.593126Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T08:57:19.593154Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:57:19.593169Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T08:57:19.593185Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:57:19.596403Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:7501625266959712501:2302], serverId# [1:7501625266959712518:2309], sessionId# [0:0:0] 2025-05-07T08:57:19.596594Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T08:57:19.596880Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976710657 ssId 72057594046644480 seqNo 2:1 2025-05-07T08:57:19.597518Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976710657 at tablet 72075186224037888 2025-05-07T08:57:19.613139Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:57:19.614644Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T08:57:19.614748Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-05-07T08:57:19.620810Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:7501625266959712536:2319], serverId# [1:7501625266959712538:2321], sessionId# [0:0:0] 2025-05-07T08:57:19.631266Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976710657 at step 1746608239663 at tablet 72075186224037888 { Transactions { TxId: 281474976710657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1746608239663 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-05-07T08:57:19.631318Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:57:19.632070Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:57:19.632171Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T08:57:19.632191Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-05-07T08:57:19.632221Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1746608239663:281474976710657] in PlanQueue unit at 72075186224037888 2025-05-07T08:57:19.632525Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1746608239663:281474976710657 keys extracted: 0 2025-05-07T08:57:19.632752Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-05-07T08:57:19.632874Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T08:57:19.632925Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-05-07T08:57:19.672181Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-05-07T08:57:19.672735Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:57:19.677042Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 1746608239662 2025-05-07T08:57:19.677326Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:57:19.677394Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1746608239663} 2025-05-07T08:57:19.677458Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:57:19.677510Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:57:19.677527Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T08:57:19.677544Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-05-07T08:57:19.677597Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1746608239663 : 281474976710657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:7501625262664745044:2206], exec latency: 39 ms, propose latency: 44 ms 2025-05-07T08:57:19.677629Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976710657 state Ready TxInFly 0 2025-05-07T08:57:19.677670Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:57:19.677750Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1746608239670 2025-05-07T08:57:19.679417Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender.cpp:153: [ChangeSender][72075186224037888:1][1:7501625266959712522:2310][Inactive] Handle NKikimrChangeExchange.TEvActivateSender 2025-05-07T08:57:19.696717Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976710657 datashard 72075186224037888 state Ready 2025-05-07T08:57:19.696809Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-05-07T08:57:19.710613Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:57:19.713485Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T08:57:19.713603Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose ... 89, Partition: 0, State: StateIdle] m0000000000p72075186224037893 2025-05-07T09:01:10.546646Z node 24 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037889, Partition: 0, State: StateIdle] i0000000000 2025-05-07T09:01:10.546713Z node 24 :PERSQUEUE DEBUG: partition.cpp:2196: [PQ: 72075186224037889, Partition: 0, State: StateIdle] --- rename ---------------- 2025-05-07T09:01:10.546815Z node 24 :PERSQUEUE DEBUG: partition.cpp:2201: [PQ: 72075186224037889, Partition: 0, State: StateIdle] =========================== 2025-05-07T09:01:10.547067Z node 24 :PERSQUEUE DEBUG: read.h:262: CacheProxy. Passthrough write request to KV 2025-05-07T09:01:10.548902Z node 24 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 ... wait for final heartbeat >>>>> GetRecords path=/Root/Table/Stream partitionId=0 2025-05-07T09:01:10.553326Z node 24 :PERSQUEUE DEBUG: pq_impl.cpp:342: Handle TEvRequest topic: 'streamImpl' requestId: 2025-05-07T09:01:10.553493Z node 24 :PERSQUEUE DEBUG: pq_impl.cpp:2788: [PQ: 72075186224037889] got client message batch for topic 'Table/Stream/streamImpl' partition 0 2025-05-07T09:01:10.554606Z node 24 :PERSQUEUE DEBUG: partition_read.cpp:731: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 0 Topic 'Table/Stream/streamImpl' partition 0 user $without_consumer offset 0 count 10000 size 26214400 endOffset 2 max time lag 0ms effective offset 0 2025-05-07T09:01:10.554746Z node 24 :PERSQUEUE DEBUG: partition_read.cpp:931: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 0 added 0 blobs, size 0 count 0 last offset 0, current partition end offset: 2 2025-05-07T09:01:10.554908Z node 24 :PERSQUEUE DEBUG: partition_read.cpp:948: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Reading cookie 0. All data is from uncompacted head. 2025-05-07T09:01:10.555010Z node 24 :PERSQUEUE DEBUG: partition_read.cpp:420: FormAnswer for 0 blobs 2025-05-07T09:01:10.555910Z node 24 :PERSQUEUE DEBUG: pq_impl.cpp:377: Answer ok topic: 'streamImpl' partition: 0 messageNo: 0 requestId: cookie: 0 2025-05-07T09:01:10.570267Z node 24 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72075186224037889, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-05-07T09:01:10.570609Z node 24 :PERSQUEUE DEBUG: pq_impl.cpp:377: Answer ok topic: 'streamImpl' partition: 0 messageNo: 0 requestId: cookie: 0 2025-05-07T09:01:10.570958Z node 24 :PERSQUEUE DEBUG: pq_impl.cpp:342: Handle TEvRequest topic: 'streamImpl' requestId: 2025-05-07T09:01:10.571105Z node 24 :PERSQUEUE DEBUG: pq_impl.cpp:2788: [PQ: 72075186224037889] got client message batch for topic 'Table/Stream/streamImpl' partition 0 2025-05-07T09:01:10.571383Z node 24 :PERSQUEUE DEBUG: pq_impl.cpp:377: Answer ok topic: 'streamImpl' partition: 0 messageNo: 0 requestId: cookie: 0 2025-05-07T09:01:10.571830Z node 24 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:61: [CdcChangeSenderPartition][72075186224037893:1][0][72075186224037889][24:1297:3021] Handle NKikimr::NPQ::TEvPartitionWriter::TEvInitResult { SessionId: TxId: Success { OwnerCookie: 72075186224037893|f8e6bb83-d90e57c3-5b571c80-5cec3da3_0 SourceIdInfo: SourceId: "\00072075186224037893" SeqNo: 0 Offset: 2 WriteTimestampMS: 0 Explicit: true State: STATE_REGISTERED } } 2025-05-07T09:01:10.572090Z node 24 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:643: [CdcChangeSenderMain][72075186224037893:1][24:1294:3021] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 0 } 2025-05-07T09:01:10.572378Z node 24 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:111: [CdcChangeSenderPartition][72075186224037893:1][0][72075186224037889][24:1297:3021] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 0 Step: 6000 TxId: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcHeartbeat Source: Unspecified Body: 0b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 0 LockId: 0 LockOffset: 0 }] } 2025-05-07T09:01:10.572976Z node 24 :PERSQUEUE DEBUG: pq_impl.cpp:342: Handle TEvRequest topic: 'streamImpl' requestId: 2025-05-07T09:01:10.573038Z node 24 :PERSQUEUE DEBUG: pq_impl.cpp:2788: [PQ: 72075186224037889] got client message batch for topic 'Table/Stream/streamImpl' partition 0 2025-05-07T09:01:10.573190Z node 24 :PERSQUEUE DEBUG: pq_impl.cpp:377: Answer ok topic: 'streamImpl' partition: 0 messageNo: 0 requestId: cookie: 1 2025-05-07T09:01:10.573355Z node 24 :PERSQUEUE DEBUG: pq_impl.cpp:342: Handle TEvRequest topic: 'streamImpl' requestId: 2025-05-07T09:01:10.573390Z node 24 :PERSQUEUE DEBUG: pq_impl.cpp:2788: [PQ: 72075186224037889] got client message batch for topic 'Table/Stream/streamImpl' partition 0 2025-05-07T09:01:10.573501Z node 24 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037889] got client message topic: Table/Stream/streamImpl partition: 0 SourceId: '\00072075186224037893' SeqNo: 1 partNo : 0 messageNo: 1 size 26 offset: -1 2025-05-07T09:01:10.573797Z node 24 :PERSQUEUE DEBUG: partition_write.cpp:1162: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream/streamImpl' partition 0 process heartbeat sourceId '\00072075186224037893' version v6000/0 2025-05-07T09:01:10.574018Z node 24 :PERSQUEUE INFO: partition_write.cpp:1658: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream/streamImpl' partition 0 emit heartbeat v6000/0 2025-05-07T09:01:10.574306Z node 24 :PERSQUEUE DEBUG: partition_write.cpp:1233: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream/streamImpl' partition 0 part blob processing sourceId '\00072075186224037889' seqNo 0 partNo 0 2025-05-07T09:01:10.575620Z node 24 :PERSQUEUE DEBUG: partition_write.cpp:1333: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream/streamImpl' partition 0 part blob complete sourceId '\00072075186224037889' seqNo 0 partNo 0 FormedBlobsCount 0 NewHead: Offset 2 PartNo 0 PackedSize 107 count 1 nextOffset 3 batches 1 2025-05-07T09:01:10.577288Z node 24 :PERSQUEUE DEBUG: partition_write.cpp:1623: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Add new write blob: topic 'Table/Stream/streamImpl' partition 0 compactOffset 2,1 HeadOffset 0 endOffset 2 curOffset 3 d0000000000_00000000000000000002_00000_0000000001_00000| size 93 WTime 6505 2025-05-07T09:01:10.577699Z node 24 :PERSQUEUE DEBUG: partition.cpp:2182: [PQ: 72075186224037889, Partition: 0, State: StateIdle] === DumpKeyValueRequest === 2025-05-07T09:01:10.579762Z node 24 :PERSQUEUE DEBUG: partition.cpp:2183: [PQ: 72075186224037889, Partition: 0, State: StateIdle] --- delete ---------------- 2025-05-07T09:01:10.579924Z node 24 :PERSQUEUE DEBUG: partition.cpp:2189: [PQ: 72075186224037889, Partition: 0, State: StateIdle] [x0000000000, x0000000001) 2025-05-07T09:01:10.580090Z node 24 :PERSQUEUE DEBUG: partition.cpp:2191: [PQ: 72075186224037889, Partition: 0, State: StateIdle] --- write ----------------- 2025-05-07T09:01:10.580211Z node 24 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037889, Partition: 0, State: StateIdle] m0000000000p72075186224037889 2025-05-07T09:01:10.580296Z node 24 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037889, Partition: 0, State: StateIdle] d0000000000_00000000000000000002_00000_0000000001_00000| 2025-05-07T09:01:10.580333Z node 24 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037889, Partition: 0, State: StateIdle] i0000000000 2025-05-07T09:01:10.580403Z node 24 :PERSQUEUE DEBUG: partition.cpp:2196: [PQ: 72075186224037889, Partition: 0, State: StateIdle] --- rename ---------------- 2025-05-07T09:01:10.580556Z node 24 :PERSQUEUE DEBUG: partition.cpp:2201: [PQ: 72075186224037889, Partition: 0, State: StateIdle] =========================== 2025-05-07T09:01:10.580825Z node 24 :PERSQUEUE DEBUG: read.h:262: CacheProxy. Passthrough write request to KV 2025-05-07T09:01:10.581117Z node 24 :PERSQUEUE DEBUG: read.h:300: CacheProxy. Passthrough blob. Partition 0 offset 2 partNo 0 count 1 size 93 2025-05-07T09:01:10.583410Z node 24 :PERSQUEUE DEBUG: cache_eviction.h:315: Caching head blob in L1. Partition 0 offset 2 count 1 size 93 actorID [24:1266:3001] 2025-05-07T09:01:10.583835Z node 24 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037889' partition 0 offset 2 partno 0 count 1 parts 0 size 93 2025-05-07T09:01:10.594456Z node 24 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72075186224037889, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 44 WriteNewSizeFromSupportivePartitions# 0 2025-05-07T09:01:10.594703Z node 24 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037889, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-05-07T09:01:10.594913Z node 24 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Answering for message sourceid: '\00072075186224037893', Topic: 'Table/Stream/streamImpl', Partition: 0, SeqNo: 1, partNo: 0, Offset: 2 is stored on disk 2025-05-07T09:01:10.595510Z node 24 :PERSQUEUE DEBUG: pq_impl.cpp:377: Answer ok topic: 'streamImpl' partition: 0 messageNo: 1 requestId: cookie: 1 2025-05-07T09:01:10.595990Z node 24 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:160: [CdcChangeSenderPartition][72075186224037893:1][0][72075186224037889][24:1297:3021] Handle NKikimrClient.TResponse { SessionId: TxId: Success { Response: Status: 1 ErrorCode: OK PartitionResponse { CmdWriteResult { AlreadyWritten: false SourceId: "\00072075186224037893" SeqNo: 1 Offset: 2 WriteTimestampMS: 6505 PartitionQuotedTimeMs: 0 TotalTimeInPartitionQueueMs: 0 WriteTimeMs: 0 TopicQuotedTimeMs: 0 WrittenInTx: false } Cookie: 1 } } } 2025-05-07T09:01:10.596197Z node 24 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:643: [CdcChangeSenderMain][72075186224037893:1][24:1294:3021] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 0 } 2025-05-07T09:01:10.596482Z node 24 :TX_DATASHARD INFO: datashard_change_sending.cpp:310: TTxRemoveChangeRecords Execute: records# 1, at tablet# 72075186224037893 2025-05-07T09:01:10.596587Z node 24 :TX_DATASHARD DEBUG: datashard.cpp:1087: RemoveChangeRecord: order: 1, at tablet: 72075186224037893 2025-05-07T09:01:10.611010Z node 24 :TX_DATASHARD INFO: datashard_change_sending.cpp:335: TTxRemoveChangeRecords Complete: removed# 1, left# 0, at tablet# 72075186224037893 >>>>> GetRecords path=/Root/Table/Stream partitionId=0 2025-05-07T09:01:11.101985Z node 24 :PERSQUEUE DEBUG: pq_impl.cpp:342: Handle TEvRequest topic: 'streamImpl' requestId: 2025-05-07T09:01:11.102082Z node 24 :PERSQUEUE DEBUG: pq_impl.cpp:2788: [PQ: 72075186224037889] got client message batch for topic 'Table/Stream/streamImpl' partition 0 2025-05-07T09:01:11.102294Z node 24 :PERSQUEUE DEBUG: partition_read.cpp:731: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 1 Topic 'Table/Stream/streamImpl' partition 0 user $without_consumer offset 0 count 10000 size 26214400 endOffset 3 max time lag 0ms effective offset 0 2025-05-07T09:01:11.102355Z node 24 :PERSQUEUE DEBUG: partition_read.cpp:931: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 1 added 0 blobs, size 0 count 0 last offset 0, current partition end offset: 3 2025-05-07T09:01:11.102443Z node 24 :PERSQUEUE DEBUG: partition_read.cpp:948: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Reading cookie 1. All data is from uncompacted head. 2025-05-07T09:01:11.102489Z node 24 :PERSQUEUE DEBUG: partition_read.cpp:420: FormAnswer for 0 blobs 2025-05-07T09:01:11.102708Z node 24 :PERSQUEUE DEBUG: pq_impl.cpp:377: Answer ok topic: 'streamImpl' partition: 0 messageNo: 0 requestId: cookie: 0 >> YdbYqlClient::TestReadTableMultiShard [GOOD] >> YdbYqlClient::TestReadTableMultiShardUseSnapshot >> Cdc::EnqueueRequestProcessSend [GOOD] >> Cdc::InitialScanAndResolvedTimestamps >> GrpcConnectionStringParserTest::NoDatabaseFlag >> TGRpcYdbTest::CreateAlterCopyAndDropTable [GOOD] >> TGRpcYdbTest::CreateDeleteYqlSession >> TGRpcYdbTest::ExecutePreparedQuery [GOOD] >> TGRpcYdbTest::ExecuteQueryCache |91.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/dsproxy/ut_ftol/ydb-core-blobstorage-dsproxy-ut_ftol |91.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/dsproxy/ut_ftol/ydb-core-blobstorage-dsproxy-ut_ftol |91.7%| [LD] {RESULT} $(B)/ydb/core/blobstorage/dsproxy/ut_ftol/ydb-core-blobstorage-dsproxy-ut_ftol >> TGRpcLdapAuthentication::LdapAuthSettingsWithEmptyBaseDn [GOOD] >> TGRpcLdapAuthentication::LdapAuthSettingsWithEmptyBindDn >> TGRpcNewClient::TestAuth [GOOD] >> TGRpcNewClient::YqlQueryWithParams >> YdbYqlClient::SecurityTokenAuth [GOOD] >> YdbYqlClient::RetryOperationTemplate >> TGRpcNewCoordinationClient::SessionSemaphoreInfiniteTimeout [GOOD] >> TGRpcNewCoordinationClient::SessionReconnectReattach >> TYqlDateTimeTests::SimpleOperations [GOOD] >> CommitOffset::Commit_FromSession_ToNewChild_WithoutCommitToParent [GOOD] >> YdbYqlClient::CreateTableWithPartitionAtKeys >> TGRpcClientLowTest::BiStreamCancelled [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_AuthNotRequired [GOOD] >> YdbTableBulkUpsertOlap::UpsertArrowBatch [GOOD] >> YdbTableBulkUpsertOlap::UpsertArrowDupField |91.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TYqlDateTimeTests::SimpleOperations [GOOD] Test command err: 2025-05-07T09:00:18.135878Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626038499158235:2211];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:18.136216Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002888/r3tmp/tmpiICjhU/pdisk_1.dat 2025-05-07T09:00:19.222229Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T09:00:19.279389Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:19.279483Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:19.297653Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:00:19.361178Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25968, node 1 2025-05-07T09:00:19.570125Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T09:00:19.570225Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T09:00:19.690656Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:19.690685Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:19.690695Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:19.690839Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4442 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:20.505931Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:23.129988Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501626038499158235:2211];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:23.130076Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:00:25.422650Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T09:00:25.841122Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626068563930432:2346], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:25.841251Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:25.846146Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626068563930444:2349], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:25.858774Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480 2025-05-07T09:00:25.932558Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501626068563930446:2350], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-05-07T09:00:26.004973Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501626068563930514:2812] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:00:27.186877Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710661. Ctx: { TraceId: 01jtmzk2ve425648n6hjm3v784, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWEyZWQ0ZGMtZmUzOTdjODMtMjAwNGUzYWUtYzY4ZGRmMGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:00:27.635648Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710662. Ctx: { TraceId: 01jtmzk47e7xa68tcjrvszmrbx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWEyZWQ0ZGMtZmUzOTdjODMtMjAwNGUzYWUtYzY4ZGRmMGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:00:27.816381Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710663. Ctx: { TraceId: 01jtmzk4mj5m3geyvt9t7eqvz2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWEyZWQ0ZGMtZmUzOTdjODMtMjAwNGUzYWUtYzY4ZGRmMGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:00:27.966350Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710664. Ctx: { TraceId: 01jtmzk4skd3aj94xdvmwkt5gj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWEyZWQ0ZGMtZmUzOTdjODMtMjAwNGUzYWUtYzY4ZGRmMGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:00:28.139081Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710665. Ctx: { TraceId: 01jtmzk4yf4p1jrxc3pxgkv2fe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWEyZWQ0ZGMtZmUzOTdjODMtMjAwNGUzYWUtYzY4ZGRmMGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:00:30.666053Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501626087675984864:2141];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:30.707056Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002888/r3tmp/tmpFqwF9s/pdisk_1.dat 2025-05-07T09:00:31.042363Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:31.083275Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:31.083352Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:31.090533Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5916, node 4 2025-05-07T09:00:31.343754Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:31.343773Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:31.343779Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:31.343897Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13238 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:32.001209Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:35.670116Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7501626087675984864:2141];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:35.670189Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:00:36.897200Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T09:00:37.095285Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501626117740757127:2344], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resour ... [WorkloadService] [TPoolCreatorActor] ActorId: [10:7501626228551499389:2350], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-05-07T09:01:03.555852Z node 10 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [10:7501626228551499464:2810] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:01:03.687443Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jtmzm7ja4hr6z448s0t7x9x1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=YmUwMjUwMTctNmE3OTdiNGEtNTY3MjBlYTAtNTVmOWM4MzY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:03.895231Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jtmzm7tt1tqz7rzd3k5290bm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=YmUwMjUwMTctNmE3OTdiNGEtNTY3MjBlYTAtNTVmOWM4MzY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:04.104324Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715663. Ctx: { TraceId: 01jtmzm80y9vk0mk59vpm42fa3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=YmUwMjUwMTctNmE3OTdiNGEtNTY3MjBlYTAtNTVmOWM4MzY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:04.342470Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715664. Ctx: { TraceId: 01jtmzm87g467rkmabdfycx7t5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=YmUwMjUwMTctNmE3OTdiNGEtNTY3MjBlYTAtNTVmOWM4MzY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002888/r3tmp/tmptLSlxz/pdisk_1.dat 2025-05-07T09:01:07.090703Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T09:01:07.211433Z node 13 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:07.270654Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:07.270772Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:07.275373Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1653, node 13 2025-05-07T09:01:07.644157Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:07.644183Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:07.644194Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:07.644373Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13760 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:08.116494Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:12.799297Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T09:01:12.997481Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 2025-05-07T09:01:13.118335Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7501626272451215476:2348], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:13.118487Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:13.119881Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7501626272451215488:2351], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:13.125843Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710660:3, at schemeshard: 72057594046644480 2025-05-07T09:01:13.168830Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7501626272451215490:2352], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2025-05-07T09:01:13.245531Z node 13 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [13:7501626272451215560:2900] txid# 281474976710661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:01:13.344480Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710662. Ctx: { TraceId: 01jtmzmh0ta9x4drc5v8bqsr2z, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZjNlZjg3MDktNTA5ZDg0NDgtODg3ZDFhNzctNWVkNTExMDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:13.601957Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710663. Ctx: { TraceId: 01jtmzmh8g43g44gmenp18b0s0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZjNlZjg3MDktNTA5ZDg0NDgtODg3ZDFhNzctNWVkNTExMDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:14.543187Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710664. Ctx: { TraceId: 01jtmzmhgsd2ga53txwz1w6tqw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZjNlZjg3MDktNTA5ZDg0NDgtODg3ZDFhNzctNWVkNTExMDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:14.555249Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710665. Ctx: { TraceId: 01jtmzmhgsd2ga53txwz1w6tqw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZjNlZjg3MDktNTA5ZDg0NDgtODg3ZDFhNzctNWVkNTExMDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:15.237468Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710666. Ctx: { TraceId: 01jtmzmjefdszpq512br11nhr2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZjNlZjg3MDktNTA5ZDg0NDgtODg3ZDFhNzctNWVkNTExMDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:15.257019Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710667. Ctx: { TraceId: 01jtmzmjefdszpq512br11nhr2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZjNlZjg3MDktNTA5ZDg0NDgtODg3ZDFhNzctNWVkNTExMDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:15.674446Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710668. Ctx: { TraceId: 01jtmzmk5y8v7qmary0d3mwz9y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZjNlZjg3MDktNTA5ZDg0NDgtODg3ZDFhNzctNWVkNTExMDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:15.874893Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710669. Ctx: { TraceId: 01jtmzmkh6dr74s5tpsfjz6gkg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZjNlZjg3MDktNTA5ZDg0NDgtODg3ZDFhNzctNWVkNTExMDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:16.130976Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710670. Ctx: { TraceId: 01jtmzmkqf3ewype0mfkk4rsa1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZjNlZjg3MDktNTA5ZDg0NDgtODg3ZDFhNzctNWVkNTExMDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:16.347342Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710671. Ctx: { TraceId: 01jtmzmkzm6xxme72y8dtk16jq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZjNlZjg3MDktNTA5ZDg0NDgtODg3ZDFhNzctNWVkNTExMDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:16.599938Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710672. Ctx: { TraceId: 01jtmzmm642m196xe5z2bynhar, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZjNlZjg3MDktNTA5ZDg0NDgtODg3ZDFhNzctNWVkNTExMDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:16.991270Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710673. Ctx: { TraceId: 01jtmzmme63bfz17qq94jmf040, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZjNlZjg3MDktNTA5ZDg0NDgtODg3ZDFhNzctNWVkNTExMDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:16.996760Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710674. Ctx: { TraceId: 01jtmzmme63bfz17qq94jmf040, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZjNlZjg3MDktNTA5ZDg0NDgtODg3ZDFhNzctNWVkNTExMDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcClientLowTest::BiStreamCancelled [GOOD] Test command err: 2025-05-07T09:00:42.138787Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626137656031441:2078];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:42.139576Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00287b/r3tmp/tmpDB4tNJ/pdisk_1.dat 2025-05-07T09:00:43.190127Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T09:00:43.357115Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:43.357225Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:43.371792Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:00:43.457091Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14947, node 1 2025-05-07T09:00:43.594774Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T09:00:43.594796Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T09:00:44.006611Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:44.006631Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:44.006638Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:44.006738Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23482 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:44.485876Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... TClient is connected to server localhost:23482 TClient is connected to server localhost:23482 2025-05-07T09:00:45.389579Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480 2025-05-07T09:00:47.110313Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501626137656031441:2078];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:47.110397Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:00:49.203461Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626167720803644:2345], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:49.203605Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:49.203985Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626167720803656:2348], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:49.208573Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710660:3, at schemeshard: 72057594046644480 2025-05-07T09:00:49.255967Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501626167720803659:2350], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2025-05-07T09:00:49.332571Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501626167720803741:2736] txid# 281474976710661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } TClient is connected to server localhost:23482 TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1746608444553 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "\n\016\010\001\020\200\204\002\032\004user \003" EffectiveACL: "\n\016\010\001\020\200\204\002\032\004user \003" PathVersion: 10 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 10 ACLVersion: 1 EffectiveACLVersion: 1 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 2 } ChildrenExist: true } Children { Name: ".metadata" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 281474976710660 CreateStep: 1746608449278 ParentPathId: 1 PathState: EPathStateCreate Owner: "met... (TRUNCATED) 2025-05-07T09:00:52.754592Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501626183956578349:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:52.754825Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00287b/r3tmp/tmpzbuNzh/pdisk_1.dat 2025-05-07T09:00:53.024324Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:53.068213Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:53.068312Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:53.073174Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18118, node 4 2025-05-07T09:00:53.387099Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:53.387144Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:53.387154Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:53.387298Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10331 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:53.927849Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... TClient is connected to server localhost:10331 2025-05-07T09:00:59.248258Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7501626211977113648:2242];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00287b/r3tmp/tmpZ4ekFZ/pdisk_1.dat 2025-05-07T09:00:59.293425Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T09:00:59.366269Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:59.449929Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:59.450035Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:59.462225Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22429, node 7 2025-05-07T09:00:59.748077Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:59.748123Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:59.748137Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:59.748306Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3762 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:00.203668Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... TClient is connected to server localhost:3762 TClient is connected to server localhost:3762 2025-05-07T09:01:00.815162Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-05-07T09:01:03.768468Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7501626229156983787:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:03.768636Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:03.770366Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7501626229156983799:2344], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:03.778605Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480 2025-05-07T09:01:03.816925Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7501626229156983801:2345], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-05-07T09:01:03.881414Z node 7 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [7:7501626229156983874:2716] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:01:04.168486Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7501626211977113648:2242];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:04.168582Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; TClient is connected to server localhost:3762 TClient::Ls request: Root 2025-05-07T09:01:04.226336Z node 7 :TX_PROXY ERROR: describe.cpp:350: Access denied for user with access DescribeSchema to path Root TClient::Ls response: Status: 128 StatusCode: ERROR Issues { message: "Default error" severity: 1 } SchemeStatus: 12 ErrorReason: "Access denied" 2025-05-07T09:01:06.270392Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7501626243667732010:2076];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:06.270461Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00287b/r3tmp/tmpXCcgyA/pdisk_1.dat 2025-05-07T09:01:06.526762Z node 10 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:06.586088Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:06.586191Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:06.592324Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20152, node 10 2025-05-07T09:01:06.698612Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:06.698633Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:06.698641Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:06.698761Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7495 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:07.244947Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:07.458645Z node 10 :TICKET_PARSER ERROR: ticket_parser_impl.h:969: Ticket some****oken (BB86510A): Could not find correct token validator 2025-05-07T09:01:07.458801Z node 10 :GRPC_SERVER ERROR: ydb_dummy.cpp:94: Received TEvRefreshTokenResponse, Authenticated = 0 2025-05-07T09:01:12.349413Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7501626266240971929:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:12.349500Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00287b/r3tmp/tmptBiKfy/pdisk_1.dat 2025-05-07T09:01:13.067881Z node 13 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:13.110274Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:13.110403Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:13.139574Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13581, node 13 2025-05-07T09:01:13.371034Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:13.371070Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:13.371086Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:13.371281Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6583 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:13.851578Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... >> TAuthenticationWithSqlExecution::CreateAlterUserWithHash [GOOD] >> TDatabaseQuotas::DisableWritesToDatabase >> YdbYqlClient::CreateTableWithPartitionAtKeysAndAutoPartitioning [GOOD] >> YdbYqlClient::BuildInfo [GOOD] >> YdbYqlClient::AlterTableAddIndexAsyncOp |91.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/dsproxy/ut/ydb-core-blobstorage-dsproxy-ut |91.7%| [LD] {RESULT} $(B)/ydb/core/blobstorage/dsproxy/ut/ydb-core-blobstorage-dsproxy-ut |91.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/dsproxy/ut/ydb-core-blobstorage-dsproxy-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_AuthNotRequired [GOOD] Test command err: 2025-05-07T09:00:11.203663Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626008421600208:2279];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:11.203722Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002892/r3tmp/tmp1i3qwh/pdisk_1.dat 2025-05-07T09:00:11.967116Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:11.982456Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:11.982547Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:11.988361Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1917, node 1 2025-05-07T09:00:12.106567Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:12.106591Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:12.106616Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:12.106783Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6892 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:12.501816Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:12.662787Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1815: Ticket 050ECFAC7D2AFB3394DC8EF840A8BA423BA8BC2D433621A25D75EB83BBC46322 (ipv6:[::1]:48912) has now permanent error message 'Cannot create token from certificate. Client certificate failed verification' 2025-05-07T09:00:12.663749Z node 1 :TICKET_PARSER ERROR: ticket_parser_impl.h:969: Ticket 050ECFAC7D2AFB3394DC8EF840A8BA423BA8BC2D433621A25D75EB83BBC46322: Cannot create token from certificate. Client certificate failed verification 2025-05-07T09:00:12.786278Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket **** (B6C6F477) (ipv6:[::1]:48922) has now valid token of root@builtin 2025-05-07T09:00:12.910837Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db /Root, token db , DomainLoginOnly 1 2025-05-07T09:00:12.910886Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T09:00:12.910896Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:816: CanInitLoginToken, database /Root, A6 error 2025-05-07T09:00:12.910945Z node 1 :TICKET_PARSER ERROR: ticket_parser_impl.h:969: Ticket **** (0C093832): Could not find correct token validator 2025-05-07T09:00:16.842068Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501626026245289405:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:16.843005Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002892/r3tmp/tmpNxgmfE/pdisk_1.dat 2025-05-07T09:00:17.076969Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:17.113349Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:17.113407Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:17.117008Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28650, node 4 2025-05-07T09:00:17.287243Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:17.287267Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:17.287274Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:17.287406Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64489 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:17.603728Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:17.748753Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1815: Ticket 050ECFAC7D2AFB3394DC8EF840A8BA423BA8BC2D433621A25D75EB83BBC46322 (ipv6:[::1]:53970) has now permanent error message 'Cannot create token from certificate. Client certificate failed verification' 2025-05-07T09:00:17.749387Z node 4 :TICKET_PARSER ERROR: ticket_parser_impl.h:969: Ticket 050ECFAC7D2AFB3394DC8EF840A8BA423BA8BC2D433621A25D75EB83BBC46322: Cannot create token from certificate. Client certificate failed verification 2025-05-07T09:00:17.882300Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket **** (B6C6F477) (ipv6:[::1]:53978) has now valid token of root@builtin 2025-05-07T09:00:18.035548Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db /Root, token db , DomainLoginOnly 1 2025-05-07T09:00:18.035594Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T09:00:18.035604Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:816: CanInitLoginToken, database /Root, A6 error 2025-05-07T09:00:18.035660Z node 4 :TICKET_PARSER ERROR: ticket_parser_impl.h:969: Ticket **** (0C093832): Could not find correct token validator 2025-05-07T09:00:22.803869Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7501626052541528489:2161];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:22.804025Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002892/r3tmp/tmphEEsnB/pdisk_1.dat 2025-05-07T09:00:23.380263Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:23.525492Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:23.534190Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:23.552191Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7651, node 7 2025-05-07T09:00:23.858680Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:23.858716Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:23.858727Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:23.858917Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6293 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:24.596328Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... E0507 09:00:24.743424778 303059 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:140 ... meout_ms=10000, grpc.max_receive_message_length=64000000, grpc.max_send_message_length=64000000, grpc.primary_user_agent=grpc-c++/1.54.3, grpc.resource_quota=0x504000195650, grpc.server_uri=dns:///localhost:10038} 2025-05-07T09:00:59.615646Z node 19 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[19:7501626210864731647:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:59.615712Z node 19 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002892/r3tmp/tmprbcWEM/pdisk_1.dat 2025-05-07T09:01:00.050474Z node 19 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:00.090709Z node 19 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(19, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:00.090822Z node 19 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(19, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:00.096055Z node 19 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(19, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25462, node 19 2025-05-07T09:01:00.339936Z node 19 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:00.339964Z node 19 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:00.339975Z node 19 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:00.340185Z node 19 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12470 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:00.804799Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:00.983942Z node 19 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket **** (B6C6F477) (ipv6:[::1]:57738) has now valid token of root@builtin 2025-05-07T09:01:01.070101Z node 19 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db /Root, token db , DomainLoginOnly 1 2025-05-07T09:01:01.070140Z node 19 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T09:01:01.070153Z node 19 :TICKET_PARSER TRACE: ticket_parser_impl.h:816: CanInitLoginToken, database /Root, A6 error 2025-05-07T09:01:01.070188Z node 19 :TICKET_PARSER ERROR: ticket_parser_impl.h:969: Ticket **** (0C093832): Could not find correct token validator 2025-05-07T09:01:05.886344Z node 22 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[22:7501626239916214171:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:05.886405Z node 22 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002892/r3tmp/tmpbQ7gec/pdisk_1.dat 2025-05-07T09:01:06.143453Z node 22 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:06.187042Z node 22 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(22, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:06.187168Z node 22 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(22, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:06.190939Z node 22 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(22, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8626, node 22 2025-05-07T09:01:06.379973Z node 22 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:06.380001Z node 22 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:06.380011Z node 22 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:06.380190Z node 22 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12835 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:06.826049Z node 22 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:07.019726Z node 22 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket **** (B6C6F477) (ipv6:[::1]:44904) has now valid token of root@builtin 2025-05-07T09:01:07.090370Z node 22 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db /Root, token db , DomainLoginOnly 1 2025-05-07T09:01:07.090409Z node 22 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T09:01:07.090421Z node 22 :TICKET_PARSER TRACE: ticket_parser_impl.h:816: CanInitLoginToken, database /Root, A6 error 2025-05-07T09:01:07.090461Z node 22 :TICKET_PARSER ERROR: ticket_parser_impl.h:969: Ticket **** (0C093832): Could not find correct token validator 2025-05-07T09:01:12.838883Z node 25 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[25:7501626266907815010:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:12.838988Z node 25 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002892/r3tmp/tmpneCv2t/pdisk_1.dat 2025-05-07T09:01:13.117292Z node 25 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 12767, node 25 2025-05-07T09:01:13.201872Z node 25 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(25, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:13.202022Z node 25 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(25, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:13.352471Z node 25 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(25, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:01:13.431100Z node 25 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:13.431137Z node 25 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:13.431150Z node 25 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:13.431324Z node 25 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3604 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:13.908214Z node 25 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:14.107876Z node 25 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket 0EFFC76764645F953150F96ED2BE9C7FAAA23DC61A0942AB9DE4BAB5A930C99C (ipv6:[::1]:52520) has now valid token of C=RU,ST=MSK,L=MSK,O=YA,OU=UtTest,CN=localhost@cert 2025-05-07T09:01:14.338077Z node 25 :TICKET_PARSER ERROR: ticket_parser_impl.h:969: Ticket **** (717F937C): Unknown token 2025-05-07T09:01:14.501730Z node 25 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1815: Ticket DEA36AEC6F39D17FA9B60E7513ADF0C7BBCA4CC09C4153CB8D331382780DCEA8 (ipv6:[::1]:52574) has now permanent error message 'Cannot create token from certificate. Client certificate failed verification' 2025-05-07T09:01:14.507002Z node 25 :TICKET_PARSER ERROR: ticket_parser_impl.h:969: Ticket DEA36AEC6F39D17FA9B60E7513ADF0C7BBCA4CC09C4153CB8D331382780DCEA8: Cannot create token from certificate. Client certificate failed verification >> GrpcConnectionStringParserTest::NoDatabaseFlag [GOOD] >> GrpcConnectionStringParserTest::IncorrectConnectionString >> TGRpcNewCoordinationClient::SessionMethods >> GrpcConnectionStringParserTest::IncorrectConnectionString [GOOD] >> GrpcConnectionStringParserTest::CommonClientSettingsFromConnectionString >> YdbImport::EmptyData >> TGRpcLdapAuthentication::LdapAuthSettingsWithEmptyBindDn [GOOD] >> TGRpcLdapAuthentication::LdapAuthSettingsWithEmptyBindPassword >> TTableProfileTests::OverwriteCompactionPolicy >> Cdc::ResolvedTimestampForDisplacedUpsert [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::CreateTableWithPartitionAtKeysAndAutoPartitioning [GOOD] Test command err: 2025-05-07T09:00:44.627032Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626147429492640:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:44.627088Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002876/r3tmp/tmppnrz52/pdisk_1.dat 2025-05-07T09:00:45.676832Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T09:00:45.705641Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:45.712223Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:45.712333Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:45.733848Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22731, node 1 2025-05-07T09:00:45.985830Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:45.985855Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:45.985862Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:45.985995Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18994 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:47.022470Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:49.630192Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501626147429492640:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:49.630273Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:00:53.184682Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501626186866467582:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:53.184740Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002876/r3tmp/tmpnl3FiQ/pdisk_1.dat 2025-05-07T09:00:53.782608Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:53.965254Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:53.965350Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:53.999914Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 31171, node 4 2025-05-07T09:00:54.391805Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:54.391831Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:54.391856Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:54.392016Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22806 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:55.164008Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:58.190144Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7501626186866467582:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:58.190213Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:00:59.932548Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T09:01:00.679996Z node 4 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 4, TabletId: 72075186224037889 not found 2025-05-07T09:01:00.782302Z node 4 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 4, TabletId: 72075186224037888 not found 2025-05-07T09:01:02.888571Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7501626226608180684:2076];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:02.888619Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002876/r3tmp/tmpSPQCpf/pdisk_1.dat 2025-05-07T09:01:03.306986Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:03.343361Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:03.343459Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:03.351489Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19837, node 7 2025-05-07T09:01:03.618156Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:03.618190Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:03.618199Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:03.618350Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21322 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:03.941304Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:07.145863Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T09:01:09.349198Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7501626256850524010:2210];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:09.350712Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002876/r3tmp/tmp7nGjce/pdisk_1.dat 2025-05-07T09:01:09.507897Z node 10 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:09.543541Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:09.543635Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:09.546637Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23336, node 10 2025-05-07T09:01:09.601881Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:09.601904Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:09.601915Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:09.602075Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9580 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:09.967795Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:13.049808Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T09:01:15.575849Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7501626283283614318:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:15.575911Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002876/r3tmp/tmpmJGrdd/pdisk_1.dat 2025-05-07T09:01:15.923706Z node 13 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:15.966220Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:15.966317Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:15.981773Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28830, node 13 2025-05-07T09:01:16.098678Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:16.098703Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:16.098715Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:16.098861Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22562 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:16.518165Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:20.058237Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 >> TGRpcNewCoordinationClient::SessionReconnectReattach [GOOD] >> TGRpcNewCoordinationClientAuth::OwnersAndPermissions >> TGRpcNewClient::YqlQueryWithParams [GOOD] >> TGRpcNewClient::YqlExplainDataQuery >> TGRpcYdbTest::ExecuteQueryCache [GOOD] >> TGRpcYdbTest::CreateDeleteYqlSession [GOOD] >> TSchemeShardTTLTestsWithReboots::CopyTable [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/ut_with_sdk/unittest >> CommitOffset::Commit_FromSession_ToNewChild_WithoutCommitToParent [GOOD] Test command err: 2025-05-07T08:57:56.758915Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625428101758005:2059];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:56.766645Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:57:56.944417Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004034/r3tmp/tmp5MWMZp/pdisk_1.dat 2025-05-07T08:57:57.163995Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:57.164112Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:57.166357Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:57:57.189677Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 63888, node 1 2025-05-07T08:57:57.291149Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/zvgn/004034/r3tmp/yandexYGzGwE.tmp 2025-05-07T08:57:57.291181Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/zvgn/004034/r3tmp/yandexYGzGwE.tmp 2025-05-07T08:57:57.291362Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/zvgn/004034/r3tmp/yandexYGzGwE.tmp 2025-05-07T08:57:57.291483Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:57:57.338519Z INFO: TTestServer started on Port 31907 GrpcPort 63888 TClient is connected to server localhost:31907 PQClient connected to localhost:63888 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:57:57.659955Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:57:57.703450Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... 2025-05-07T08:58:00.045925Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625445281627995:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:58:00.045942Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625445281628014:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:58:00.046115Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:58:00.051084Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480 2025-05-07T08:58:00.068809Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501625445281628016:2343], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-05-07T08:58:00.367897Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501625445281628080:2439] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:58:00.402165Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:58:00.453835Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:58:00.493631Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7501625445281628088:2349], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T08:58:00.494831Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=1&id=MjdiYjM4ZTUtOTU5M2MzNjQtMjdhOWUzYzEtMzJhNzUxM2I=, ActorId: [1:7501625445281627984:2337], ActorState: ExecuteState, TraceId: 01jtmzemebc8d8t37zwrsw7yb1, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T08:58:00.497235Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T08:58:00.561034Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7501625445281628382:2617] 2025-05-07T08:58:01.750911Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501625428101758005:2059];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:58:01.751029Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-05-07T08:58:07.008982Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-05-07T08:58:07.039023Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-05-07T08:58:07.040330Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:7501625475346399648:2690], Recipient [1:7501625432396725729:2194]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:58:07.040362Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:58:07.040374Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T08:58:07.040404Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122432, Sender [1:7501625475346399644:2687], Recipient [1:7501625432396725729:2194]: {TEvModifySchemeTransaction txid# 281474976710673 TabletId# 72057594046644480} 2025-05-07T08:58:07.040417Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4851: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-05-07T08:58:07.090276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreatePersQueueGroup CreatePersQueueGroup { Name: "test-topic" TotalGroupCount: 10 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } Consumers { Name: "test-consumer" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } ServiceType: "data-streams" Version: 0 } } } } TxId: 281474976710673 TabletId: 72057594046644480 Owner: "root@builtin" UserToken: "***" PeerName: "" , at schemeshard: 72057594046644480 2025-05-07T08:58:07.090767Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_pq.cpp:307: TCreatePQ Propose, path: /Root/test-topic, opId: 281474976710673:0, at schemeshard: 72057594046644480 2025-05-07T08:58:07.091068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:319: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046644480, LocalPathId: 1], parent name: Root, child name: test-topic, child id: [OwnerId: 72057594046644480, LocalPathId: 13], at schemeshard: 72057594046644480 2025-05-07T08:58:07.091113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 720 ... itChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } 2025-05-07T09:01:17.710914Z node 8 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188536 (NKikimr::TEvPQ::TEvSubDomainStatus), Tablet [8:7501626262997256072:2809], Partition 2, Sender [8:7501626262997256072:2809], Recipient [8:7501626262997256147:2815], Cookie: 0 2025-05-07T09:01:17.710972Z node 8 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188536, Sender [8:7501626262997256072:2809], Recipient [8:7501626262997256147:2815]: NKikimrPQ.TEvSubDomainStatus SubDomainOutOfSpace: false 2025-05-07T09:01:17.710991Z node 8 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvSubDomainStatus 2025-05-07T09:01:17.711030Z node 8 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [8:7501626262997256072:2809], Partition 2, Sender [0:0:0], Recipient [8:7501626262997256147:2815], Cookie: 0 2025-05-07T09:01:17.711065Z node 8 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [8:7501626262997256147:2815]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:01:17.711086Z node 8 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:01:17.711131Z node 8 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-05-07T09:01:17.711202Z node 8 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-05-07T09:01:17.711230Z node 8 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-05-07T09:01:17.711239Z node 8 :PERSQUEUE TRACE: pq_impl.cpp:5200: HandleHook, received event# 271188503, Sender [8:7501626262997256147:2815], Recipient [8:7501626262997256072:2809]: NKikimr::TEvPQ::TEvPartitionLabeledCounters 2025-05-07T09:01:17.711261Z node 8 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-05-07T09:01:17.711270Z node 8 :PERSQUEUE TRACE: pq_impl.cpp:5214: HandleHook, processing event TEvPQ::TEvPartitionLabeledCounters 2025-05-07T09:01:17.711314Z node 8 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [8:7501626220047581884:2454], Partition 0, Sender [0:0:0], Recipient [8:7501626220047581946:2458], Cookie: 0 2025-05-07T09:01:17.711347Z node 8 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [8:7501626220047581946:2458]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:01:17.711356Z node 8 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [8:7501626262997256070:2808], Partition 1, Sender [0:0:0], Recipient [8:7501626262997256149:2817], Cookie: 0 2025-05-07T09:01:17.711364Z node 8 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:01:17.711402Z node 8 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [8:7501626262997256149:2817]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:01:17.711404Z node 8 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-05-07T09:01:17.711427Z node 8 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:01:17.711439Z node 8 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-05-07T09:01:17.711456Z node 8 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-05-07T09:01:17.711477Z node 8 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-05-07T09:01:17.711780Z node 8 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-05-07T09:01:17.711847Z node 8 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-05-07T09:01:17.711871Z node 8 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-05-07T09:01:17.711874Z node 8 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:688: [72075186224037893][test-topic] Send TEvPeriodicTopicStats PathId: 13 Generation: 1 StatsReportRound: 16 DataSize: 0 UsedReserveSize: 0 2025-05-07T09:01:17.711896Z node 8 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-05-07T09:01:17.712083Z node 8 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1823: [72075186224037893][test-topic] ProcessPendingStats. PendingUpdates size 3 2025-05-07T09:01:17.712688Z node 8 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271188001, Sender [8:7501626220047581889:2455], Recipient [8:7501626159918038722:2146]: NKikimrPQ.TEvPeriodicTopicStats PathId: 13 Generation: 1 Round: 16 DataSize: 0 UsedReserveSize: 0 SubDomainOutOfSpace: false 2025-05-07T09:01:17.712720Z node 8 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4878: StateWork, processing event TEvPersQueue::TEvPeriodicTopicStats 2025-05-07T09:01:17.712742Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__pq_stats.cpp:100: Got periodic topic stats at partition [OwnerId: 72057594046644480, LocalPathId: 13] DataSize 0 UsedReserveSize 0 2025-05-07T09:01:17.712773Z node 8 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__pq_stats.cpp:141: Will delay TTxStoreTopicStats on# 0.099995s, queue# 1 2025-05-07T09:01:17.742373Z node 8 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122945, Sender [8:7501626220047581889:2455], Recipient [8:7501626159918038722:2146]: NKikimrSchemeOp.TDescribePath PathId: 13 SchemeshardId: 72057594046644480 2025-05-07T09:01:17.742430Z node 8 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4852: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-05-07T09:01:17.812403Z node 8 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [8:7501626262997256072:2809], Partition 2, Sender [0:0:0], Recipient [8:7501626262997256147:2815], Cookie: 0 2025-05-07T09:01:17.812501Z node 8 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [8:7501626262997256147:2815]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:01:17.812531Z node 8 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:01:17.812585Z node 8 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-05-07T09:01:17.812674Z node 8 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-05-07T09:01:17.812703Z node 8 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-05-07T09:01:17.812737Z node 8 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-05-07T09:01:17.812798Z node 8 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [8:7501626220047581884:2454], Partition 0, Sender [0:0:0], Recipient [8:7501626220047581946:2458], Cookie: 0 2025-05-07T09:01:17.812835Z node 8 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [8:7501626220047581946:2458]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:01:17.812848Z node 8 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:01:17.812873Z node 8 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-05-07T09:01:17.812905Z node 8 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-05-07T09:01:17.812922Z node 8 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-05-07T09:01:17.812939Z node 8 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-05-07T09:01:17.812980Z node 8 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [8:7501626262997256070:2808], Partition 1, Sender [0:0:0], Recipient [8:7501626262997256149:2817], Cookie: 0 2025-05-07T09:01:17.813007Z node 8 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [8:7501626262997256149:2817]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:01:17.813019Z node 8 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:01:17.813039Z node 8 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-05-07T09:01:17.813067Z node 8 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-05-07T09:01:17.813078Z node 8 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-05-07T09:01:17.813093Z node 8 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-05-07T09:01:17.822800Z node 8 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435095, Sender [0:0:0], Recipient [8:7501626159918038722:2146]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTopicStats 2025-05-07T09:01:17.822865Z node 8 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5016: StateWork, processing event TEvPrivate::TEvPersistTopicStats 2025-05-07T09:01:17.822892Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__pq_stats.cpp:119: Started TEvPersistStats at tablet 72057594046644480, queue size# 1 2025-05-07T09:01:17.822906Z node 8 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__pq_stats.cpp:128: Will execute TTxStoreStats, queue# 1 2025-05-07T09:01:17.822973Z node 8 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__pq_stats.cpp:141: Will delay TTxStoreTopicStats on# 0.000000s, queue# 1 2025-05-07T09:01:17.823346Z node 8 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435095, Sender [0:0:0], Recipient [8:7501626159918038722:2146]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTopicStats 2025-05-07T09:01:17.823368Z node 8 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5016: StateWork, processing event TEvPrivate::TEvPersistTopicStats 2025-05-07T09:01:17.823384Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__pq_stats.cpp:119: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/ut/unittest >> SystemView::QueryStatsRetries [GOOD] Test command err: 2025-05-07T08:55:13.805647Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501624727586299806:2250];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:55:13.817626Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0045ac/r3tmp/tmp9jUw1k/pdisk_1.dat 2025-05-07T08:55:14.442337Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:55:14.442448Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:55:14.450945Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:55:14.482508Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22090, node 1 2025-05-07T08:55:14.623020Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:55:14.623153Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:55:14.623163Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:55:14.623524Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7032 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:55:15.069695Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:55:15.111141Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:55:15.146319Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7501624735370683260:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:55:15.146393Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:55:15.177916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:55:15.294279Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:55:15.294398Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:55:15.309607Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 4 Cookie 4 2025-05-07T08:55:15.314781Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:55:15.332325Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:55:15.337896Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:55:15.350055Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 5 Cookie 5 2025-05-07T08:55:15.364601Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:55:15.556535Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:55:15.595271Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7501624737057028106:2071];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:55:15.595347Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant2/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:55:15.605992Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501624735446244465:2139];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:55:15.606041Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant2/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:55:15.638663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:55:15.811891Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:55:15.812342Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:55:15.838767Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-05-07T08:55:15.851311Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:55:15.864940Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:55:15.865017Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:55:15.901698Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-05-07T08:55:15.931234Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:55:18.800298Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501624727586299806:2250];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:55:18.800421Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:55:20.146100Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[5:7501624735370683260:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:55:20.146197Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant1/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:55:20.598099Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7501624737057028106:2071];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:55:20.598169Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant2/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:55:20.612525Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7501624735446244465:2139];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:55:20.612639Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant2/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:55:21.035347Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T08:55:21.271333Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624761946039441:2352], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:21.271427Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501624761946039433:2349], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:21.271971Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:55:21.278853Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710663:3, at schemeshard: 72057594046644480 2025-05-07T08:55:21.322948Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501624761946039447:2353], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710663 completed, doublechecking } 2025-05-07T08:55:21.410912Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501624761946039515:3043] txid# 281474976710664, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:55:22.096471Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710665. Ctx: { TraceId: 01jtmz9sdm230jc3qzbgvdzqys, Database: , DatabaseId: /Root, Ses ... [71:7501626099770593419:2071];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:38.590247Z node 71 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:00:49.338082Z node 71 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7261: Cannot get console configs 2025-05-07T09:00:49.338134Z node 71 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:51.950383Z node 71 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [71:7501626177080005948:2373], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:51.950668Z node 71 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:51.956870Z node 71 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [71:7501626177080005960:2376], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:51.969304Z node 71 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-05-07T09:00:52.090483Z node 71 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [71:7501626177080005965:2377], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-05-07T09:00:52.154014Z node 71 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [71:7501626181374973335:2778] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:00:52.465121Z node 71 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jtmzkwb801zq1q3tnm84zy3v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=71&id=Njk5NzYwMzUtZjUxOWNiOGItMzNmNzRkODEtOGYxOGNhYTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:00:52.838008Z node 71 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715663. Ctx: { TraceId: 01jtmzkwwk6db904nv792tj09b, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=71&id=ZGE1Zjk5LTlmMGVhNmEyLTJjNzg3ODc3LTNlOGQ1ZmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:00:52.840799Z node 71 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:45: Scan started, actor: [71:7501626181374973416:2397], owner: [71:7501626181374973412:2395], scan id: 0, table id: [72057594046644480:1:0:top_queries_by_request_units_one_hour] 2025-05-07T09:00:52.844609Z node 71 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:321: Scan prepared, actor: [71:7501626181374973416:2397], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-05-07T09:00:52.850937Z node 71 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [71:7501626181374973416:2397], row count: 1, finished: 1 2025-05-07T09:00:52.851001Z node 71 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:120: Scan finished, actor: [71:7501626181374973416:2397], owner: [71:7501626181374973412:2395], scan id: 0, table id: [72057594046644480:1:0:top_queries_by_request_units_one_hour] 2025-05-07T09:00:52.858443Z node 71 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746608452831, txId: 281474976715662] shutting down 2025-05-07T09:00:57.510667Z node 76 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[76:7501626202116394227:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:57.510784Z node 76 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0045ac/r3tmp/tmpszPGgN/pdisk_1.dat 2025-05-07T09:00:58.161720Z node 76 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:58.281785Z node 76 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(76, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:58.283343Z node 76 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(76, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:58.292307Z node 76 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(76, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29709, node 76 2025-05-07T09:00:58.595940Z node 76 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:58.595978Z node 76 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:58.595994Z node 76 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:58.596276Z node 76 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:65229 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:59.410570Z node 76 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:59.545997Z node 76 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:02.510693Z node 76 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[76:7501626202116394227:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:02.510827Z node 76 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:01:09.210693Z node 76 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [76:7501626253656002890:2358], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:09.210819Z node 76 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [76:7501626253656002884:2355], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:09.211130Z node 76 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:09.218617Z node 76 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-05-07T09:01:09.270824Z node 76 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [76:7501626253656002898:2359], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-05-07T09:01:09.381163Z node 76 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [76:7501626253656002970:2739] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:01:10.019881Z node 76 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jtmzmd6q15qcd430yt7kphty, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=76&id=ZmNmNWZiYzItYzc0ZGIwYjAtMmUwYTFmYjEtNjk1N2NhOWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:10.482801Z node 76 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715663. Ctx: { TraceId: 01jtmzme1r4ev0cp43edgwzeah, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=76&id=NThhNjQ1MjEtZGRkNjE0YjAtNDk1OWYzMzMtOTYwNjk3MQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:10.495817Z node 76 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:45: Scan started, actor: [76:7501626257950970359:2382], owner: [76:7501626257950970356:2380], scan id: 0, table id: [72057594046644480:1:0:top_queries_by_read_bytes_one_minute] 2025-05-07T09:01:10.551053Z node 76 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:321: Scan prepared, actor: [76:7501626257950970359:2382], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-05-07T09:01:10.551890Z node 76 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [76:7501626257950970359:2382], row count: 1, finished: 1 2025-05-07T09:01:10.551947Z node 76 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:120: Scan finished, actor: [76:7501626257950970359:2382], owner: [76:7501626257950970356:2380], scan id: 0, table id: [72057594046644480:1:0:top_queries_by_read_bytes_one_minute] 2025-05-07T09:01:10.558449Z node 76 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746608470466, txId: 281474976715662] shutting down >> YdbYqlClient::RetryOperationTemplate [GOOD] >> YdbYqlClient::RetryOperationSync >> YdbYqlClient::TestReadTableMultiShardUseSnapshot [GOOD] >> YdbYqlClient::TestReadTableMultiShardOneRow ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcYdbTest::ExecuteQueryCache [GOOD] Test command err: 2025-05-07T09:00:37.730249Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626116300022030:2078];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:37.730356Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00287d/r3tmp/tmpHdoHdm/pdisk_1.dat 2025-05-07T09:00:38.817036Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T09:00:38.977769Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:39.022824Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:39.023829Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:39.064091Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22124, node 1 2025-05-07T09:00:39.402482Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:39.402506Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:39.402514Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:39.402641Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15206 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:40.372906Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:42.710113Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501626116300022030:2078];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:42.710188Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:00:47.915179Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501626162699300570:2076];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:47.915220Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00287d/r3tmp/tmpOXu05r/pdisk_1.dat 2025-05-07T09:00:48.269310Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:48.347131Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:48.347207Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:48.373786Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24731, node 4 2025-05-07T09:00:48.870742Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:48.870770Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:48.870778Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:48.870915Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16332 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:49.572608Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:52.918347Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7501626162699300570:2076];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:52.918459Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:00:54.771386Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501626192764072725:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:54.771550Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:54.771824Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501626192764072737:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:54.776116Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480 2025-05-07T09:00:54.840002Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7501626192764072739:2343], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-05-07T09:00:54.951206Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:7501626192764072828:2683] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:00:58.473202Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7501626209651936872:2206];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00287d/r3tmp/tmpjqrzUh/pdisk_1.dat 2025-05-07T09:00:58.756352Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T09:00:58.915127Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:59.006119Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:59.006218Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:59.023492Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13980, node 7 2025-05-07T09:00:59.328823Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:59.328848Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:59.328856Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:59.329007Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30159 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Vers ... 06.435511Z node 7 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [7:7501626244011676481:2824] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:01:06.783887Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710661. Ctx: { TraceId: 01jtmzmas7b3ve01d4nzjsxrks, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjRiMzU4MDQtMmUzODg4NTYtMzA5NTZlZGMtZmQ5YzQ0MzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:06.982863Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710662. Ctx: { TraceId: 01jtmzmb143zdsxce62rq3f5sw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjRiMzU4MDQtMmUzODg4NTYtMzA5NTZlZGMtZmQ5YzQ0MzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:09.590287Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7501626254336096563:2076];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:09.590349Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00287d/r3tmp/tmplW7qzH/pdisk_1.dat 2025-05-07T09:01:10.046288Z node 10 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:10.159930Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:10.160051Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:10.176050Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18688, node 10 2025-05-07T09:01:10.574610Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:10.574636Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:10.574647Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:10.574816Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23178 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:11.291799Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:14.594171Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7501626254336096563:2076];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:14.594266Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:01:15.233514Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7501626280105901426:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:15.233595Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7501626280105901418:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:15.233754Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:15.237694Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T09:01:15.289167Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7501626280105901432:2340], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T09:01:15.347459Z node 10 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [10:7501626280105901499:2682] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:01:17.677002Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7501626291067511281:2208];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:17.677182Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00287d/r3tmp/tmpKGVr27/pdisk_1.dat 2025-05-07T09:01:18.137777Z node 13 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:18.182395Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:18.182508Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:18.189268Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10178, node 13 2025-05-07T09:01:18.434004Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:18.434033Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:18.434044Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:18.434242Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24822 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:18.892544Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:22.293772Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7501626312542348681:2335], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:22.293878Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7501626312542348692:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:22.293951Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:22.299292Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T09:01:22.361718Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7501626312542348695:2339], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T09:01:22.440391Z node 13 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [13:7501626312542348784:2667] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:01:22.678191Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[13:7501626291067511281:2208];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:22.678301Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> YdbOlapStore::LogNonExistingUserId [GOOD] >> YdbOlapStore::LogPagingBefore >> TGRpcNewCoordinationClient::CreateDropDescribe ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcYdbTest::CreateDeleteYqlSession [GOOD] Test command err: 2025-05-07T09:00:44.276294Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626148197896047:2268];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:44.276354Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002877/r3tmp/tmpeak80S/pdisk_1.dat 2025-05-07T09:00:45.250140Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:45.250238Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:45.268731Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:00:45.308105Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T09:00:45.315258Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25542, node 1 2025-05-07T09:00:45.808885Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:45.808912Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:45.808920Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:45.809080Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7258 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:46.566965Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002877/r3tmp/tmpyQfz6d/pdisk_1.dat 2025-05-07T09:00:55.453301Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T09:00:55.724461Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:55.796192Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:55.796289Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:55.885626Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3264, node 4 2025-05-07T09:00:56.206652Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:56.206678Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:56.206686Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:56.206818Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16411 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:56.563466Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:56.726555Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:7501626197895168513:2608] txid# 281474976715658, issues: { message: "Unknown column \'BlaBla\' specified in key column list" severity: 1 } 2025-05-07T09:01:02.588856Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7501626225742566239:2208];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:02.589143Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002877/r3tmp/tmpBQsylu/pdisk_1.dat 2025-05-07T09:01:03.002399Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:03.068463Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:03.068543Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:03.077040Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 30451, node 7 2025-05-07T09:01:03.282807Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:03.282837Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:03.282845Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:03.282957Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18238 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:03.854810Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:10.306569Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7501626261567586368:2241];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002877/r3tmp/tmpodivGR/pdisk_1.dat 2025-05-07T09:01:10.493644Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T09:01:10.653147Z node 10 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:10.711527Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:10.711625Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:10.718396Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2560, node 10 2025-05-07T09:01:10.982127Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:10.982154Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:10.982167Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:10.982323Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3004 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:11.347910Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:11.468938Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T09:01:11.741127Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-05-07T09:01:12.262233Z node 10 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 10, TabletId: 72075186224037888 not found 2025-05-07T09:01:17.381283Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7501626290929008362:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:17.381337Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002877/r3tmp/tmpHQZhRF/pdisk_1.dat 2025-05-07T09:01:17.823030Z node 13 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:17.851954Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:17.852080Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:17.856722Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7414, node 13 2025-05-07T09:01:18.194733Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:18.194767Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:18.194778Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:18.194937Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19350 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:18.609393Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:22.382177Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[13:7501626290929008362:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:22.382273Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> YdbTableBulkUpsertOlap::UpsertArrowDupField [GOOD] >> YdbTableBulkUpsertOlap::ParquetImportBug ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_change_exchange/unittest >> Cdc::ResolvedTimestampForDisplacedUpsert [GOOD] Test command err: 2025-05-07T08:57:18.839062Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625262767749439:2064];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:18.839133Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003045/r3tmp/tmpNAplCv/pdisk_1.dat 2025-05-07T08:57:19.635384Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:57:19.672530Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:19.672629Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:19.676321Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15179, node 1 2025-05-07T08:57:19.983066Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:57:19.983091Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:57:19.983097Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:57:19.983210Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:57:20.057888Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:57:20.088430Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-05-07T08:57:20.161760Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:7501625271357684649:2309] 2025-05-07T08:57:20.162182Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T08:57:20.176803Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T08:57:20.176864Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T08:57:20.179333Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-05-07T08:57:20.179404Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-05-07T08:57:20.179443Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-05-07T08:57:20.179885Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T08:57:20.179947Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T08:57:20.179999Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:7501625271357684663:2309] in generation 1 2025-05-07T08:57:20.181503Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T08:57:20.219375Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-05-07T08:57:20.219560Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T08:57:20.219658Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:7501625271357684665:2310] 2025-05-07T08:57:20.219681Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T08:57:20.219697Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-05-07T08:57:20.219709Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:57:20.219873Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T08:57:20.219967Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T08:57:20.220005Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T08:57:20.220026Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:57:20.220043Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T08:57:20.220076Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:57:20.246743Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:7501625271357684639:2299], serverId# [1:7501625271357684668:2313], sessionId# [0:0:0] 2025-05-07T08:57:20.246977Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T08:57:20.247243Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976710657 ssId 72057594046644480 seqNo 2:1 2025-05-07T08:57:20.247338Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976710657 at tablet 72075186224037888 2025-05-07T08:57:20.249000Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:57:20.250652Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T08:57:20.250733Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-05-07T08:57:20.254899Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:7501625271357684681:2320], serverId# [1:7501625271357684684:2323], sessionId# [0:0:0] 2025-05-07T08:57:20.254960Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:57:20.260093Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976710657 at step 1746608240300 at tablet 72075186224037888 { Transactions { TxId: 281474976710657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1746608240300 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-05-07T08:57:20.260136Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:57:20.260319Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T08:57:20.260335Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-05-07T08:57:20.260381Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1746608240300:281474976710657] in PlanQueue unit at 72075186224037888 2025-05-07T08:57:20.260654Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1746608240300:281474976710657 keys extracted: 0 2025-05-07T08:57:20.260843Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-05-07T08:57:20.260941Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T08:57:20.260993Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-05-07T08:57:20.263810Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-05-07T08:57:20.265925Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:57:20.267284Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 1746608240299 2025-05-07T08:57:20.267299Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:57:20.267328Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1746608240307 2025-05-07T08:57:20.270626Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1746608240300} 2025-05-07T08:57:20.270679Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:57:20.270739Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:57:20.270768Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T08:57:20.270782Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-05-07T08:57:20.270817Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1746608240300 : 281474976710657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:7501625267062717143:2189], exec latency: 4 ms, propose latency: 10 ms 2025-05-07T08:57:20.270845Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976710657 state Ready TxInFly 0 2025-05-07T08:57:20.270876Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:57:20.277225Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender.cpp:153: [ChangeSender][72075186224037888:1][1:7501625271357684665:2310][Inactive] Handle NKikimrChangeExchange.TEvActivateSender 2025-05-07T08:57:20.282922Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976710657 datashard 72075186224037888 state Ready 2025-05-07T08:57:20.282977Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-05-07T08:57:20.304329Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:57:20.304495Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T08:57:20.304629Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose ... Timecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR ... unblocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR ... unblocking NKikimr::TEvMediatorTimecast::TEvUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR ... unblocking NKikimr::TEvMediatorTimecast::TEvUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR 2025-05-07T09:01:22.280550Z node 27 :TX_DATASHARD DEBUG: datashard.cpp:3780: Notified by mediator time cast with PlanStep# 9000 at tablet 72075186224037888 2025-05-07T09:01:22.280751Z node 27 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:01:22.280934Z node 27 :TX_DATASHARD INFO: cdc_stream_heartbeat.cpp:42: [CdcStreamHeartbeat] Emit change records: edge# v9000/18446744073709551615, at tablet# 72075186224037888 2025-05-07T09:01:22.281196Z node 27 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 6 Group: 0 Step: 9000 TxId: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcHeartbeat Source: Unspecified Body: 0b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 0 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-05-07T09:01:22.285799Z node 27 :TX_DATASHARD INFO: cdc_stream_heartbeat.cpp:78: [CdcStreamHeartbeat] Enqueue 1 change record(s): at tablet# 72075186224037888 2025-05-07T09:01:22.286013Z node 27 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 6 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 0 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 0 } 2025-05-07T09:01:22.286161Z node 27 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:01:22.286299Z node 27 :TX_DATASHARD DEBUG: datashard.cpp:3812: Waiting for PlanStep# 12000 from mediator time cast 2025-05-07T09:01:22.286598Z node 27 :CHANGE_EXCHANGE DEBUG: change_sender.cpp:71: [ChangeSender][72075186224037888:1][27:682:2578] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvEnqueueRecords { Records [{ Order: 6 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 0 }] } 2025-05-07T09:01:22.286877Z node 27 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:628: [CdcChangeSenderMain][72075186224037888:1][27:843:2686] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvEnqueueRecords { Records [{ Order: 6 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 0 }] } 2025-05-07T09:01:22.287595Z node 27 :TX_DATASHARD INFO: datashard_change_sending.cpp:215: TTxRequestChangeRecords Execute: at tablet# 72075186224037888 2025-05-07T09:01:22.288036Z node 27 :TX_DATASHARD DEBUG: datashard_change_sending.cpp:235: Send 1 change records: to# [27:843:2686], at tablet# 72075186224037888 2025-05-07T09:01:22.288157Z node 27 :TX_DATASHARD INFO: datashard_change_sending.cpp:260: TTxRequestChangeRecords Complete: sent# 1, forgotten# 0, left# 0, at tablet# 72075186224037888 2025-05-07T09:01:22.288424Z node 27 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:633: [CdcChangeSenderMain][72075186224037888:1][27:843:2686] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 6 Group: 0 Step: 9000 TxId: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcHeartbeat Source: Unspecified Body: 0b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 0 LockId: 0 LockOffset: 0 }] } 2025-05-07T09:01:22.288966Z node 27 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:111: [CdcChangeSenderPartition][72075186224037888:1][0][72075186224037889][27:928:2686] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 6 Group: 0 Step: 9000 TxId: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcHeartbeat Source: Unspecified Body: 0b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 0 LockId: 0 LockOffset: 0 }] } 2025-05-07T09:01:22.289520Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:342: Handle TEvRequest topic: 'streamImpl' requestId: 2025-05-07T09:01:22.289666Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2788: [PQ: 72075186224037889] got client message batch for topic 'Table/Stream/streamImpl' partition 0 2025-05-07T09:01:22.289874Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:377: Answer ok topic: 'streamImpl' partition: 0 messageNo: 10 requestId: cookie: 6 2025-05-07T09:01:22.290153Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:342: Handle TEvRequest topic: 'streamImpl' requestId: 2025-05-07T09:01:22.290207Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2788: [PQ: 72075186224037889] got client message batch for topic 'Table/Stream/streamImpl' partition 0 2025-05-07T09:01:22.290334Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037889] got client message topic: Table/Stream/streamImpl partition: 0 SourceId: '\00072075186224037888' SeqNo: 6 partNo : 0 messageNo: 11 size 26 offset: -1 2025-05-07T09:01:22.290636Z node 27 :PERSQUEUE DEBUG: partition_write.cpp:1162: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream/streamImpl' partition 0 process heartbeat sourceId '\00072075186224037888' version v9000/0 2025-05-07T09:01:22.290821Z node 27 :PERSQUEUE INFO: partition_write.cpp:1658: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream/streamImpl' partition 0 emit heartbeat v9000/0 2025-05-07T09:01:22.291132Z node 27 :PERSQUEUE DEBUG: partition_write.cpp:1233: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream/streamImpl' partition 0 part blob processing sourceId '\00072075186224037889' seqNo 0 partNo 0 2025-05-07T09:01:22.292272Z node 27 :PERSQUEUE DEBUG: partition_write.cpp:1333: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream/streamImpl' partition 0 part blob complete sourceId '\00072075186224037889' seqNo 0 partNo 0 FormedBlobsCount 0 NewHead: Offset 5 PartNo 0 PackedSize 107 count 1 nextOffset 6 batches 1 2025-05-07T09:01:22.293924Z node 27 :PERSQUEUE DEBUG: partition_write.cpp:1623: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Add new write blob: topic 'Table/Stream/streamImpl' partition 0 compactOffset 5,1 HeadOffset 0 endOffset 5 curOffset 6 d0000000000_00000000000000000005_00000_0000000001_00000| size 93 WTime 8979 2025-05-07T09:01:22.294395Z node 27 :PERSQUEUE DEBUG: partition.cpp:2182: [PQ: 72075186224037889, Partition: 0, State: StateIdle] === DumpKeyValueRequest === 2025-05-07T09:01:22.294525Z node 27 :PERSQUEUE DEBUG: partition.cpp:2183: [PQ: 72075186224037889, Partition: 0, State: StateIdle] --- delete ---------------- 2025-05-07T09:01:22.294677Z node 27 :PERSQUEUE DEBUG: partition.cpp:2189: [PQ: 72075186224037889, Partition: 0, State: StateIdle] [x0000000000, x0000000001) 2025-05-07T09:01:22.294799Z node 27 :PERSQUEUE DEBUG: partition.cpp:2191: [PQ: 72075186224037889, Partition: 0, State: StateIdle] --- write ----------------- 2025-05-07T09:01:22.294928Z node 27 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037889, Partition: 0, State: StateIdle] m0000000000p72075186224037889 2025-05-07T09:01:22.295019Z node 27 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037889, Partition: 0, State: StateIdle] d0000000000_00000000000000000005_00000_0000000001_00000| 2025-05-07T09:01:22.295060Z node 27 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037889, Partition: 0, State: StateIdle] i0000000000 2025-05-07T09:01:22.295124Z node 27 :PERSQUEUE DEBUG: partition.cpp:2196: [PQ: 72075186224037889, Partition: 0, State: StateIdle] --- rename ---------------- 2025-05-07T09:01:22.295244Z node 27 :PERSQUEUE DEBUG: partition.cpp:2201: [PQ: 72075186224037889, Partition: 0, State: StateIdle] =========================== 2025-05-07T09:01:22.295491Z node 27 :PERSQUEUE DEBUG: read.h:262: CacheProxy. Passthrough write request to KV 2025-05-07T09:01:22.295764Z node 27 :PERSQUEUE DEBUG: read.h:300: CacheProxy. Passthrough blob. Partition 0 offset 5 partNo 0 count 1 size 93 2025-05-07T09:01:22.297522Z node 27 :PERSQUEUE DEBUG: cache_eviction.h:315: Caching head blob in L1. Partition 0 offset 5 count 1 size 93 actorID [27:798:2659] 2025-05-07T09:01:22.297867Z node 27 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037889' partition 0 offset 5 partno 0 count 1 parts 0 size 93 2025-05-07T09:01:22.308598Z node 27 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72075186224037889, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 44 WriteNewSizeFromSupportivePartitions# 0 2025-05-07T09:01:22.308865Z node 27 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037889, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-05-07T09:01:22.309075Z node 27 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Answering for message sourceid: '\00072075186224037888', Topic: 'Table/Stream/streamImpl', Partition: 0, SeqNo: 6, partNo: 0, Offset: 5 is stored on disk 2025-05-07T09:01:22.309664Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:377: Answer ok topic: 'streamImpl' partition: 0 messageNo: 11 requestId: cookie: 6 2025-05-07T09:01:22.310136Z node 27 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:160: [CdcChangeSenderPartition][72075186224037888:1][0][72075186224037889][27:928:2686] Handle NKikimrClient.TResponse { SessionId: TxId: Success { Response: Status: 1 ErrorCode: OK PartitionResponse { CmdWriteResult { AlreadyWritten: false SourceId: "\00072075186224037888" SeqNo: 6 Offset: 5 WriteTimestampMS: 8979 PartitionQuotedTimeMs: 0 TotalTimeInPartitionQueueMs: 0 WriteTimeMs: 0 TopicQuotedTimeMs: 0 WrittenInTx: false } Cookie: 6 } } } 2025-05-07T09:01:22.310344Z node 27 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:643: [CdcChangeSenderMain][72075186224037888:1][27:843:2686] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 0 } 2025-05-07T09:01:22.310654Z node 27 :TX_DATASHARD INFO: datashard_change_sending.cpp:310: TTxRemoveChangeRecords Execute: records# 1, at tablet# 72075186224037888 2025-05-07T09:01:22.310750Z node 27 :TX_DATASHARD DEBUG: datashard.cpp:1087: RemoveChangeRecord: order: 6, at tablet: 72075186224037888 2025-05-07T09:01:22.311604Z node 27 :TX_DATASHARD INFO: datashard_change_sending.cpp:335: TTxRemoveChangeRecords Complete: removed# 1, left# 0, at tablet# 72075186224037888 ... checking the update is logged before the new resolved timestamp >>>>> GetRecords path=/Root/Table/Stream partitionId=0 2025-05-07T09:01:22.424266Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:342: Handle TEvRequest topic: 'streamImpl' requestId: 2025-05-07T09:01:22.424435Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2788: [PQ: 72075186224037889] got client message batch for topic 'Table/Stream/streamImpl' partition 0 2025-05-07T09:01:22.424736Z node 27 :PERSQUEUE DEBUG: partition_read.cpp:731: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 8 Topic 'Table/Stream/streamImpl' partition 0 user $without_consumer offset 0 count 10000 size 26214400 endOffset 6 max time lag 0ms effective offset 0 2025-05-07T09:01:22.424874Z node 27 :PERSQUEUE DEBUG: partition_read.cpp:931: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 8 added 0 blobs, size 0 count 0 last offset 0, current partition end offset: 6 2025-05-07T09:01:22.425086Z node 27 :PERSQUEUE DEBUG: partition_read.cpp:948: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Reading cookie 8. All data is from uncompacted head. 2025-05-07T09:01:22.425189Z node 27 :PERSQUEUE DEBUG: partition_read.cpp:420: FormAnswer for 0 blobs 2025-05-07T09:01:22.425554Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:377: Answer ok topic: 'streamImpl' partition: 0 messageNo: 0 requestId: cookie: 0 >> TGRpcYdbTest::DropTableBadRequest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTestsWithReboots::CopyTable [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:116:2058] recipient: [1:111:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:116:2058] recipient: [1:111:2142] Leader for TabletID 72057594046678944 is [1:123:2149] sender: [1:126:2058] recipient: [1:108:2140] Leader for TabletID 72057594046447617 is [1:129:2154] sender: [1:131:2058] recipient: [1:109:2141] Leader for TabletID 72057594046316545 is [1:134:2157] sender: [1:136:2058] recipient: [1:111:2142] 2025-05-07T08:58:45.156441Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T08:58:45.156570Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:45.156608Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-05-07T08:58:45.156645Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T08:58:45.156712Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T08:58:45.156752Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T08:58:45.156813Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T08:58:45.156911Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T08:58:45.157671Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T08:58:45.159971Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T08:58:45.271005Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7610: Got new config: QueryServiceConfig { AvailableExternalDataSources: "ObjectStorage" AvailableExternalDataSources: "ClickHouse" AvailableExternalDataSources: "PostgreSQL" AvailableExternalDataSources: "MySQL" AvailableExternalDataSources: "Ydb" AvailableExternalDataSources: "YT" AvailableExternalDataSources: "Greenplum" AvailableExternalDataSources: "MsSQLServer" AvailableExternalDataSources: "Oracle" AvailableExternalDataSources: "Logging" AvailableExternalDataSources: "Solomon" } 2025-05-07T08:58:45.271091Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:45.271913Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# ObjectStorage, ClickHouse, PostgreSQL, MySQL, Ydb, YT, Greenplum, MsSQLServer, Oracle, Logging, Solomon Leader for TabletID 72057594046447617 is [1:129:2154] sender: [1:175:2058] recipient: [1:15:2062] 2025-05-07T08:58:45.293361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T08:58:45.293641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T08:58:45.293834Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T08:58:45.347346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T08:58:45.347655Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T08:58:45.348437Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:45.348711Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T08:58:45.367066Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:45.368922Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T08:58:45.369016Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T08:58:45.369230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T08:58:45.369293Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T08:58:45.369377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T08:58:45.369543Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2212] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2212] Leader for TabletID 72057594037968897 is [1:217:2216] sender: [1:218:2058] recipient: [1:211:2212] 2025-05-07T08:58:45.415238Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:123:2149] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T08:58:45.651553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T08:58:45.651808Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:45.652065Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T08:58:45.652314Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T08:58:45.652377Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:45.656714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:45.656902Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T08:58:45.657084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:45.657137Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T08:58:45.657195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T08:58:45.657237Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T08:58:45.662876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:45.662955Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T08:58:45.662996Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T08:58:45.667516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:45.667595Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T08:58:45.667645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T08:58:45.667728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T08:58:45.671467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T08:58:45.680735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T08:58:45.680995Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:134:2157] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T08:58:45.682047Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T08:58:45.682225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 134 RawX2: 4294969453 } } St ... plete, at schemeshard: 72057594046678944, cookie: 1003 2025-05-07T09:01:23.820659Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-05-07T09:01:23.824118Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6245: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409547 Status: COMPLETE TxId: 1003 Step: 5000004 OrderId: 1003 ExecLatency: 3 ProposeLatency: 4 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409547 CpuTimeUsec: 1670 } } 2025-05-07T09:01:23.824189Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 1003, tablet: 72075186233409547, partId: 0 2025-05-07T09:01:23.824347Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 1003:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409547 Status: COMPLETE TxId: 1003 Step: 5000004 OrderId: 1003 ExecLatency: 3 ProposeLatency: 4 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409547 CpuTimeUsec: 1670 } } 2025-05-07T09:01:23.824450Z node 97 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409547 Status: COMPLETE TxId: 1003 Step: 5000004 OrderId: 1003 ExecLatency: 3 ProposeLatency: 4 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409547 CpuTimeUsec: 1670 } } 2025-05-07T09:01:23.824971Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 1003:0, at schemeshard: 72057594046678944 2025-05-07T09:01:23.825445Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 434 RawX2: 416611830116 } Origin: 72075186233409547 State: 2 TxId: 1003 Step: 0 Generation: 2 2025-05-07T09:01:23.825492Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 1003, tablet: 72075186233409547, partId: 0 2025-05-07T09:01:23.825619Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 1003:0, at schemeshard: 72057594046678944, message: Source { RawX1: 434 RawX2: 416611830116 } Origin: 72075186233409547 State: 2 TxId: 1003 Step: 0 Generation: 2 2025-05-07T09:01:23.825669Z node 97 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1005: NTableState::TProposedWaitParts operationId# 1003:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 2025-05-07T09:01:23.825754Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1009: NTableState::TProposedWaitParts operationId# 1003:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 434 RawX2: 416611830116 } Origin: 72075186233409547 State: 2 TxId: 1003 Step: 0 Generation: 2 2025-05-07T09:01:23.825811Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 1003:0, shardIdx: 72057594046678944:2, datashard: 72075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-05-07T09:01:23.825852Z node 97 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 1003:0, at schemeshard: 72057594046678944 2025-05-07T09:01:23.825897Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 1003:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-05-07T09:01:23.825937Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 1003:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-05-07T09:01:23.826093Z node 97 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1003:0 129 -> 240 2025-05-07T09:01:23.830104Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 1003:0, at schemeshard: 72057594046678944 2025-05-07T09:01:23.830511Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 1003:0, at schemeshard: 72057594046678944 2025-05-07T09:01:23.830981Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-05-07T09:01:23.831044Z node 97 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_copy_table.cpp:306: TCopyTable TCopyTableBarrier operationId: 1003:0ProgressState, operation type TxCopyTable 2025-05-07T09:01:23.831097Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1059: Set barrier, OperationId: 1003:0, name: CopyTableBarrier, done: 0, blocked: 1, parts count: 1 2025-05-07T09:01:23.831138Z node 97 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1103: All parts have reached barrier, tx: 1003, done: 0, blocked: 1 2025-05-07T09:01:23.831208Z node 97 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_copy_table.cpp:289: TCopyTable TCopyTableBarrier operationId: 1003:0 HandleReply TEvPrivate::TEvCompleteBarrier, msg: NKikimr::NSchemeShard::TEvPrivate::TEvCompleteBarrier { TxId: 1003 Name: CopyTableBarrier }, at tablet# 72057594046678944 2025-05-07T09:01:23.831251Z node 97 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1003:0 240 -> 240 2025-05-07T09:01:23.839842Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-05-07T09:01:23.839927Z node 97 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 1003:0 ProgressState 2025-05-07T09:01:23.840067Z node 97 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1003:0 progress is 1/1 2025-05-07T09:01:23.840107Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-05-07T09:01:23.840153Z node 97 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1003:0 progress is 1/1 2025-05-07T09:01:23.840184Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-05-07T09:01:23.840223Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 1003, ready parts: 1/1, is published: true 2025-05-07T09:01:23.840268Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-05-07T09:01:23.840316Z node 97 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1003:0 2025-05-07T09:01:23.840350Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 1003:0 2025-05-07T09:01:23.840530Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-05-07T09:01:23.840571Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 TestModificationResult got TxId: 1003, wait until txId: 1003 TestWaitNotification wait txId: 1003 2025-05-07T09:01:23.851433Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1003: send EvNotifyTxCompletion 2025-05-07T09:01:23.851503Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1003 2025-05-07T09:01:23.851929Z node 97 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1003, at schemeshard: 72057594046678944 2025-05-07T09:01:23.852098Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-05-07T09:01:23.852134Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [97:528:2489] TestWaitNotification: OK eventTxId 1003 2025-05-07T09:01:23.852654Z node 97 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTableCopy" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:01:23.852929Z node 97 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTableCopy" took 286us result status StatusSuccess 2025-05-07T09:01:23.853601Z node 97 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTableCopy" PathDescription { Self { Name: "TTLEnabledTableCopy" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1003 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TTLEnabledTableCopy" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> YdbYqlClient::CreateTableWithPartitionAtKeys [GOOD] >> YdbYqlClient::CreateAndAltertTableWithPartitioningBySize >> YdbS3Internal::TestS3Listing >> YdbTableBulkUpsert::Simple >> Cdc::InitialScanAndResolvedTimestamps [GOOD] >> GrpcConnectionStringParserTest::CommonClientSettingsFromConnectionString [GOOD] >> LocalityOperation::LocksFromAnotherTenants+UseSink >> YdbYqlClient::ConnectDbAclIsStrictlyChecked >> YdbImport::EmptyData [GOOD] >> YdbImport::ImportFromS3ToExistingTable >> YdbYqlClient::TestYqlIssues >> YdbYqlClient::TestColumnOrder >> TGRpcLdapAuthentication::LdapAuthWithValidCredentials >> TGRpcNewCoordinationClientAuth::OwnersAndPermissions [GOOD] >> TGRpcYdbTest::AlterTableAddIndexBadRequest >> TGRpcLdapAuthentication::LdapAuthSettingsWithEmptyBindPassword [GOOD] >> TGRpcLdapAuthentication::LdapAuthSetIncorrectDomain >> KqpUniqueIndex::InsertFkPartialColumnSet [GOOD] >> KqpUniqueIndex::InsertFkPkOverlap >> TGRpcNewClient::YqlExplainDataQuery [GOOD] >> TGRpcNewCoordinationClient::CheckUnauthorized >> CommitOffset::Commit_WithSession_ParentNotFinished_SameSession [GOOD] >> CommitOffset::Commit_WithSession_ParentNotFinished_OtherSession_ParentCommittedToEnd >> TGRpcNewCoordinationClient::SessionMethods [GOOD] >> TGRpcNewCoordinationClient::SessionDescribeWatchData >> YdbYqlClient::AlterTableAddIndexAsyncOp [GOOD] >> YdbYqlClient::AlterTableAddIndexWithDataColumn >> TTableProfileTests::OverwriteCompactionPolicy [GOOD] >> TTableProfileTests::OverwriteExecutionPolicy ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_change_exchange/unittest >> Cdc::InitialScanAndResolvedTimestamps [GOOD] Test command err: 2025-05-07T08:57:18.365838Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625262580190267:2130];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:57:18.365882Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003019/r3tmp/tmphlMKij/pdisk_1.dat 2025-05-07T08:57:18.828448Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:57:18.828517Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:57:18.830287Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:57:18.863525Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18275, node 1 2025-05-07T08:57:19.050729Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:57:19.050754Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:57:19.050760Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:57:19.050882Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:57:19.098474Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T08:57:19.148795Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-05-07T08:57:19.185416Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:7501625266875158102:2308] 2025-05-07T08:57:19.185670Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T08:57:19.198405Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T08:57:19.198470Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T08:57:19.201316Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-05-07T08:57:19.201372Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-05-07T08:57:19.201419Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-05-07T08:57:19.201806Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T08:57:19.201863Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T08:57:19.201890Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:7501625266875158116:2308] in generation 1 2025-05-07T08:57:19.206681Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T08:57:19.260300Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-05-07T08:57:19.260471Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T08:57:19.260520Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:7501625266875158120:2309] 2025-05-07T08:57:19.260533Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T08:57:19.260546Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-05-07T08:57:19.260554Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:57:19.260749Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T08:57:19.260836Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T08:57:19.260853Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T08:57:19.260874Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:57:19.260888Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T08:57:19.260901Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:57:19.262092Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:7501625266875158097:2297], serverId# [1:7501625266875158119:2308], sessionId# [0:0:0] 2025-05-07T08:57:19.262204Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T08:57:19.262476Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976710657 ssId 72057594046644480 seqNo 2:1 2025-05-07T08:57:19.262557Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976710657 at tablet 72075186224037888 2025-05-07T08:57:19.264648Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:57:19.264988Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T08:57:19.265042Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-05-07T08:57:19.269559Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:7501625266875158134:2316], serverId# [1:7501625266875158136:2318], sessionId# [0:0:0] 2025-05-07T08:57:19.269607Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:57:19.274977Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976710657 at step 1746608239313 at tablet 72075186224037888 { Transactions { TxId: 281474976710657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1746608239313 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-05-07T08:57:19.275027Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:57:19.275272Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T08:57:19.275288Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-05-07T08:57:19.275310Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1746608239313:281474976710657] in PlanQueue unit at 72075186224037888 2025-05-07T08:57:19.275552Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1746608239313:281474976710657 keys extracted: 0 2025-05-07T08:57:19.275705Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-05-07T08:57:19.275822Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T08:57:19.275888Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-05-07T08:57:19.278756Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-05-07T08:57:19.279197Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T08:57:19.280974Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-05-07T08:57:19.280990Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:57:19.281057Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1746608239313} 2025-05-07T08:57:19.281123Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:57:19.281205Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T08:57:19.281228Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T08:57:19.281242Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-05-07T08:57:19.281287Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1746608239313 : 281474976710657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:7501625262580190631:2197], exec latency: 3 ms, propose latency: 5 ms 2025-05-07T08:57:19.281332Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976710657 state Ready TxInFly 0 2025-05-07T08:57:19.281397Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T08:57:19.300304Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender.cpp:153: [ChangeSender][72075186224037888:1][1:7501625266875158120:2309][Inactive] Handle NKikimrChangeExchange.TEvActivateSender 2025-05-07T08:57:19.300774Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1746608239320 2025-05-07T08:57:19.306441Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976710657 datashard 72075186224037888 state Ready 2025-05-07T08:57:19.306573Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-05-07T08:57:19.320687Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T08:57:19.334571Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T08:57:19.334692Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transa ... 24037888 2025-05-07T09:01:26.399422Z node 27 :TX_DATASHARD INFO: cdc_stream_heartbeat.cpp:42: [CdcStreamHeartbeat] Emit change records: edge# v7500/18446744073709551615, at tablet# 72075186224037888 2025-05-07T09:01:26.399771Z node 27 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 4 Group: 0 Step: 6000 TxId: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 7] Kind: CdcHeartbeat Source: Unspecified Body: 0b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 0 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-05-07T09:01:26.401025Z node 27 :TX_DATASHARD INFO: cdc_stream_heartbeat.cpp:42: [CdcStreamHeartbeat] Emit change records: edge# v7500/18446744073709551615, at tablet# 72075186224037888 2025-05-07T09:01:26.408872Z node 27 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715662 datashard 72075186224037888 state Ready 2025-05-07T09:01:26.409078Z node 27 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-05-07T09:01:26.422832Z node 27 :TX_DATASHARD INFO: cdc_stream_heartbeat.cpp:78: [CdcStreamHeartbeat] Enqueue 1 change record(s): at tablet# 72075186224037888 2025-05-07T09:01:26.423089Z node 27 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 4 PathId: [OwnerId: 72057594046644480, LocalPathId: 7] BodySize: 0 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 0 } 2025-05-07T09:01:26.423258Z node 27 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:01:26.423440Z node 27 :TX_DATASHARD DEBUG: datashard.cpp:3812: Waiting for PlanStep# 9000 from mediator time cast 2025-05-07T09:01:26.423603Z node 27 :TX_DATASHARD INFO: cdc_stream_heartbeat.cpp:78: [CdcStreamHeartbeat] Enqueue 0 change record(s): at tablet# 72075186224037888 2025-05-07T09:01:26.423741Z node 27 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:01:26.424091Z node 27 :CHANGE_EXCHANGE DEBUG: change_sender.cpp:71: [ChangeSender][72075186224037888:1][27:682:2578] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvEnqueueRecords { Records [{ Order: 4 PathId: [OwnerId: 72057594046644480, LocalPathId: 7] BodySize: 0 }] } 2025-05-07T09:01:26.424228Z node 27 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:628: [CdcChangeSenderMain][72075186224037888:1][27:971:2769] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvEnqueueRecords { Records [{ Order: 4 PathId: [OwnerId: 72057594046644480, LocalPathId: 7] BodySize: 0 }] } 2025-05-07T09:01:26.424940Z node 27 :TX_DATASHARD INFO: datashard_change_sending.cpp:215: TTxRequestChangeRecords Execute: at tablet# 72075186224037888 2025-05-07T09:01:26.425212Z node 27 :TX_DATASHARD DEBUG: datashard_change_sending.cpp:235: Send 1 change records: to# [27:971:2769], at tablet# 72075186224037888 2025-05-07T09:01:26.425272Z node 27 :TX_DATASHARD INFO: datashard_change_sending.cpp:260: TTxRequestChangeRecords Complete: sent# 1, forgotten# 0, left# 0, at tablet# 72075186224037888 2025-05-07T09:01:26.425461Z node 27 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:633: [CdcChangeSenderMain][72075186224037888:1][27:971:2769] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 4 Group: 0 Step: 6000 TxId: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 7] Kind: CdcHeartbeat Source: Unspecified Body: 0b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 0 LockId: 0 LockOffset: 0 }] } 2025-05-07T09:01:26.425753Z node 27 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:111: [CdcChangeSenderPartition][72075186224037888:1][0][72075186224037889][27:1052:2769] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 4 Group: 0 Step: 6000 TxId: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 7] Kind: CdcHeartbeat Source: Unspecified Body: 0b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 0 LockId: 0 LockOffset: 0 }] } 2025-05-07T09:01:26.426680Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:342: Handle TEvRequest topic: 'streamImpl' requestId: 2025-05-07T09:01:26.426743Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2788: [PQ: 72075186224037889] got client message batch for topic 'Table/Stream/streamImpl' partition 0 2025-05-07T09:01:26.426865Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:377: Answer ok topic: 'streamImpl' partition: 0 messageNo: 2 requestId: cookie: 2 2025-05-07T09:01:26.427057Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:342: Handle TEvRequest topic: 'streamImpl' requestId: 2025-05-07T09:01:26.427091Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2788: [PQ: 72075186224037889] got client message batch for topic 'Table/Stream/streamImpl' partition 0 2025-05-07T09:01:26.427149Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037889] got client message topic: Table/Stream/streamImpl partition: 0 SourceId: '\00072075186224037888' SeqNo: 4 partNo : 0 messageNo: 3 size 26 offset: -1 2025-05-07T09:01:26.427428Z node 27 :PERSQUEUE DEBUG: partition_write.cpp:1162: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream/streamImpl' partition 0 process heartbeat sourceId '\00072075186224037888' version v6000/0 2025-05-07T09:01:26.427631Z node 27 :PERSQUEUE INFO: partition_write.cpp:1658: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream/streamImpl' partition 0 emit heartbeat v6000/0 2025-05-07T09:01:26.428002Z node 27 :PERSQUEUE DEBUG: partition_write.cpp:1233: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream/streamImpl' partition 0 part blob processing sourceId '\00072075186224037889' seqNo 0 partNo 0 2025-05-07T09:01:26.429182Z node 27 :PERSQUEUE DEBUG: partition_write.cpp:1333: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream/streamImpl' partition 0 part blob complete sourceId '\00072075186224037889' seqNo 0 partNo 0 FormedBlobsCount 0 NewHead: Offset 3 PartNo 0 PackedSize 107 count 1 nextOffset 4 batches 1 2025-05-07T09:01:26.429845Z node 27 :PERSQUEUE DEBUG: partition_write.cpp:1623: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Add new write blob: topic 'Table/Stream/streamImpl' partition 0 compactOffset 3,1 HeadOffset 0 endOffset 3 curOffset 4 d0000000000_00000000000000000003_00000_0000000001_00000| size 93 WTime 7451 2025-05-07T09:01:26.430871Z node 27 :PERSQUEUE DEBUG: partition.cpp:2182: [PQ: 72075186224037889, Partition: 0, State: StateIdle] === DumpKeyValueRequest === 2025-05-07T09:01:26.430946Z node 27 :PERSQUEUE DEBUG: partition.cpp:2183: [PQ: 72075186224037889, Partition: 0, State: StateIdle] --- delete ---------------- 2025-05-07T09:01:26.431007Z node 27 :PERSQUEUE DEBUG: partition.cpp:2189: [PQ: 72075186224037889, Partition: 0, State: StateIdle] [x0000000000, x0000000001) 2025-05-07T09:01:26.431072Z node 27 :PERSQUEUE DEBUG: partition.cpp:2191: [PQ: 72075186224037889, Partition: 0, State: StateIdle] --- write ----------------- 2025-05-07T09:01:26.431125Z node 27 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037889, Partition: 0, State: StateIdle] m0000000000p72075186224037889 2025-05-07T09:01:26.431201Z node 27 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037889, Partition: 0, State: StateIdle] d0000000000_00000000000000000003_00000_0000000001_00000| 2025-05-07T09:01:26.431285Z node 27 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037889, Partition: 0, State: StateIdle] i0000000000 2025-05-07T09:01:26.431364Z node 27 :PERSQUEUE DEBUG: partition.cpp:2196: [PQ: 72075186224037889, Partition: 0, State: StateIdle] --- rename ---------------- 2025-05-07T09:01:26.431414Z node 27 :PERSQUEUE DEBUG: partition.cpp:2201: [PQ: 72075186224037889, Partition: 0, State: StateIdle] =========================== 2025-05-07T09:01:26.431589Z node 27 :PERSQUEUE DEBUG: read.h:262: CacheProxy. Passthrough write request to KV 2025-05-07T09:01:26.431709Z node 27 :PERSQUEUE DEBUG: read.h:300: CacheProxy. Passthrough blob. Partition 0 offset 3 partNo 0 count 1 size 93 2025-05-07T09:01:26.433109Z node 27 :PERSQUEUE DEBUG: cache_eviction.h:315: Caching head blob in L1. Partition 0 offset 3 count 1 size 93 actorID [27:912:2724] 2025-05-07T09:01:26.433295Z node 27 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037889' partition 0 offset 3 partno 0 count 1 parts 0 size 93 2025-05-07T09:01:26.446351Z node 27 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72075186224037889, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 44 WriteNewSizeFromSupportivePartitions# 0 2025-05-07T09:01:26.446564Z node 27 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037889, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-05-07T09:01:26.446676Z node 27 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Answering for message sourceid: '\00072075186224037888', Topic: 'Table/Stream/streamImpl', Partition: 0, SeqNo: 4, partNo: 0, Offset: 3 is stored on disk 2025-05-07T09:01:26.447008Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:377: Answer ok topic: 'streamImpl' partition: 0 messageNo: 3 requestId: cookie: 2 2025-05-07T09:01:26.447364Z node 27 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:160: [CdcChangeSenderPartition][72075186224037888:1][0][72075186224037889][27:1052:2769] Handle NKikimrClient.TResponse { SessionId: TxId: Success { Response: Status: 1 ErrorCode: OK PartitionResponse { CmdWriteResult { AlreadyWritten: false SourceId: "\00072075186224037888" SeqNo: 4 Offset: 3 WriteTimestampMS: 7451 PartitionQuotedTimeMs: 0 TotalTimeInPartitionQueueMs: 0 WriteTimeMs: 0 TopicQuotedTimeMs: 0 WrittenInTx: false } Cookie: 2 } } } 2025-05-07T09:01:26.447485Z node 27 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:643: [CdcChangeSenderMain][72075186224037888:1][27:971:2769] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 0 } 2025-05-07T09:01:26.447787Z node 27 :TX_DATASHARD INFO: datashard_change_sending.cpp:310: TTxRemoveChangeRecords Execute: records# 1, at tablet# 72075186224037888 2025-05-07T09:01:26.447836Z node 27 :TX_DATASHARD DEBUG: datashard.cpp:1087: RemoveChangeRecord: order: 4, at tablet: 72075186224037888 2025-05-07T09:01:26.459112Z node 27 :TX_DATASHARD INFO: datashard_change_sending.cpp:335: TTxRemoveChangeRecords Complete: removed# 1, left# 0, at tablet# 72075186224037888 >>>>> GetRecords path=/Root/Table/Stream partitionId=0 2025-05-07T09:01:26.605189Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:342: Handle TEvRequest topic: 'streamImpl' requestId: 2025-05-07T09:01:26.605265Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2788: [PQ: 72075186224037889] got client message batch for topic 'Table/Stream/streamImpl' partition 0 2025-05-07T09:01:26.605436Z node 27 :PERSQUEUE DEBUG: partition_read.cpp:731: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 2 Topic 'Table/Stream/streamImpl' partition 0 user $without_consumer offset 0 count 10000 size 26214400 endOffset 4 max time lag 0ms effective offset 0 2025-05-07T09:01:26.605526Z node 27 :PERSQUEUE DEBUG: partition_read.cpp:931: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 2 added 0 blobs, size 0 count 0 last offset 0, current partition end offset: 4 2025-05-07T09:01:26.606002Z node 27 :PERSQUEUE DEBUG: partition_read.cpp:948: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Reading cookie 2. All data is from uncompacted head. 2025-05-07T09:01:26.606128Z node 27 :PERSQUEUE DEBUG: partition_read.cpp:420: FormAnswer for 0 blobs 2025-05-07T09:01:26.607173Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:377: Answer ok topic: 'streamImpl' partition: 0 messageNo: 0 requestId: cookie: 0 |91.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_replication/core-blobstorage-ut_blobstorage-ut_replication |91.7%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_replication/core-blobstorage-ut_blobstorage-ut_replication |91.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_replication/core-blobstorage-ut_blobstorage-ut_replication >> KqpIndexes::UpsertWithoutExtraNullDelete+UseSink [GOOD] >> KqpIndexes::UpsertWithoutExtraNullDelete-UseSink >> YdbYqlClient::CreateAndAltertTableWithPartitioningBySize [GOOD] >> YdbYqlClient::CreateAndAltertTableWithPartitioningByLoad >> TGRpcYdbTest::DropTableBadRequest [GOOD] >> TGRpcYdbTest::CreateTableWithIndex >> YdbTableBulkUpsertOlap::ParquetImportBug [GOOD] >> YdbTableBulkUpsertOlap::ParquetImportBug_Datashard >> TGRpcNewCoordinationClient::CreateDropDescribe [GOOD] >> TGRpcNewCoordinationClient::CreateAlter >> YdbYqlClient::ConnectDbAclIsStrictlyChecked [GOOD] >> YdbYqlClient::ConnectDbAclIsOffWhenYdbRequestsWithoutDatabase >> YdbYqlClient::TestReadTableMultiShardWholeTable >> TPersQueueTest::LOGBROKER_7820 [GOOD] >> YdbImport::ImportFromS3ToExistingTable [GOOD] >> TYqlDecimalTests::SimpleUpsertSelect >> YdbYqlClient::TestReadTableMultiShardOneRow [GOOD] >> YdbYqlClient::TestReadTableBatchLimits >> YdbOlapStore::ManyTables [GOOD] >> YdbOlapStore::LogPagingBetween |91.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_external_data_source_reboots/schemeshard-ut_external_data_source_reboots |91.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_external_data_source_reboots/schemeshard-ut_external_data_source_reboots |91.7%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_external_data_source_reboots/schemeshard-ut_external_data_source_reboots |91.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_filestore_reboots/ydb-core-tx-schemeshard-ut_filestore_reboots |91.7%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_filestore_reboots/ydb-core-tx-schemeshard-ut_filestore_reboots |91.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_filestore_reboots/ydb-core-tx-schemeshard-ut_filestore_reboots >> KqpUniqueIndex::UpdateOnFkSelectResultSameValue [GOOD] >> KqpUniqueIndex::UpdateOnHidenChanges+DataColumn >> TGRpcYdbTest::AlterTableAddIndexBadRequest [GOOD] >> TGRpcYdbTest::BeginTxRequestError >> YdbYqlClient::RetryOperationSync [GOOD] >> YdbYqlClient::RetryOperationLimitedDuration >> TGRpcNewCoordinationClient::SessionDescribeWatchData [GOOD] >> TGRpcNewCoordinationClient::SessionDescribeWatchOwners >> TGRpcLdapAuthentication::LdapAuthWithValidCredentials [GOOD] >> TGRpcLdapAuthentication::LdapAuthWithInvalidSearchFilter >> YdbTableBulkUpsert::Nulls [GOOD] >> YdbTableBulkUpsert::NotNulls >> YdbTableBulkUpsert::Simple [GOOD] >> YdbTableBulkUpsert::SyncIndexShouldSucceed >> TGRpcNewCoordinationClient::CheckUnauthorized [GOOD] >> TGRpcNewCoordinationClient::BasicMethods >> LocalityOperation::LocksFromAnotherTenants+UseSink [GOOD] >> LocalityOperation::LocksFromAnotherTenants-UseSink |91.7%| [TA] $(B)/ydb/core/tx/datashard/ut_change_exchange/test-results/unittest/{meta.json ... results_accumulator.log} >> YdbYqlClient::AlterTableAddIndexWithDataColumn [GOOD] >> YdbYqlClient::CheckDefaultTableSettings1 >> YdbYqlClient::TestYqlIssues [GOOD] >> YdbYqlClient::TestYqlSessionClosed |91.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/ut_large/ydb-core-sys_view-ut_large |91.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/sys_view/ut_large/ydb-core-sys_view-ut_large >> YdbS3Internal::TestS3Listing [GOOD] >> YdbS3Internal::TestAccessCheck >> TTableProfileTests::OverwriteExecutionPolicy [GOOD] >> TTableProfileTests::OverwritePartitioningPolicy >> YdbYqlClient::CreateAndAltertTableWithPartitioningByLoad [GOOD] >> YdbYqlClient::CreateAndAltertTableWithReadReplicasSettings >> TGRpcLdapAuthentication::LdapAuthSetIncorrectDomain [GOOD] >> YdbYqlClient::ConnectDbAclIsOffWhenYdbRequestsWithoutDatabase [GOOD] >> YdbYqlClient::TestColumnOrder [GOOD] >> YdbYqlClient::CopyTables >> YdbYqlClient::TestDecimal >> YdbTableBulkUpsertOlap::ParquetImportBug_Datashard [GOOD] >> YdbTableBulkUpsertOlap::UpsertArrowBatch_DataShard |91.8%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_change_exchange/test-results/unittest/{meta.json ... results_accumulator.log} |91.8%| [LD] {RESULT} $(B)/ydb/core/sys_view/ut_large/ydb-core-sys_view-ut_large >> TGRpcLdapAuthentication::LdapAuthWithInvalidSearchFilter [GOOD] >> TGRpcNewClient::SimpleYqlQuery >> TopicAutoscaling::ReBalancingAfterSplit_sessionsWithPartition [GOOD] >> TopicAutoscaling::ReadFromTimestamp_AutoscaleAwareSDK ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcLdapAuthentication::LdapAuthSetIncorrectDomain [GOOD] Test command err: 2025-05-07T09:01:06.993279Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626242202956987:2274];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:06.993333Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002875/r3tmp/tmpQ8iSjx/pdisk_1.dat 2025-05-07T09:01:07.610585Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:07.610746Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:07.627838Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:01:07.656177Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4114, node 1 2025-05-07T09:01:07.692939Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T09:01:07.696817Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T09:01:07.973002Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:07.973033Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:07.973041Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:07.973152Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2310 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:08.441666Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:13.085039Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501626271651400261:2077];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:13.085128Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002875/r3tmp/tmpCVXa5q/pdisk_1.dat 2025-05-07T09:01:13.443146Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:13.492032Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:13.492132Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:13.503977Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21834, node 4 2025-05-07T09:01:13.762703Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:13.762729Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:13.762737Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:13.762878Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18990 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:14.205135Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:18.422757Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7501626296110206174:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:18.425584Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002875/r3tmp/tmprhXR3B/pdisk_1.dat 2025-05-07T09:01:18.687864Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:18.725285Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:18.725378Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:18.731006Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12165, node 7 2025-05-07T09:01:18.883271Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:18.883297Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:18.883305Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:18.883440Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3431 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:19.186125Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:24.324925Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7501626318424105508:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:24.324994Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002875/r3tmp/tmpnmK5Ze/pdisk_1.dat 2025-05-07T09:01:24.545408Z node 10 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:24.667498Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:24.667599Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 4580, node 10 2025-05-07T09:01:24.868460Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:01:24.925482Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:24.925521Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:24.925531Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:24.925688Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17238 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:25.301470Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:30.715290Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7501626346909400443:2076];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:30.716017Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002875/r3tmp/tmpwamC11/pdisk_1.dat 2025-05-07T09:01:31.121638Z node 13 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:31.185693Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:31.185799Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:31.193815Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5547, node 13 2025-05-07T09:01:31.463407Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:31.463434Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:31.463445Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:31.463602Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29547 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:32.275775Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... >> TGRpcNewCoordinationClient::CreateAlter [GOOD] >> TGRpcNewCoordinationClient::NodeNotFound >> TGRpcNewCoordinationClient::SessionDescribeWatchOwners [GOOD] >> TGRpcNewCoordinationClient::SessionDescribeWatchReplace >> TopicAutoscaling::ReadFromTimestamp_BeforeAutoscaleAwareSDK [GOOD] >> TopicAutoscaling::ReadFromTimestamp_PQv1 >> TYqlDecimalTests::SimpleUpsertSelect [GOOD] >> TYqlDecimalTests::NegativeValues >> YdbLogStore::LogStore [GOOD] >> YdbLogStore::LogStoreNegative |91.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Timestamp-pk_types12-all_types12-index12-Timestamp--] [GOOD] |91.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_data_erasure_reboots/ydb-core-tx-schemeshard-ut_data_erasure_reboots |91.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_data_erasure_reboots/ydb-core-tx-schemeshard-ut_data_erasure_reboots |91.8%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_data_erasure_reboots/ydb-core-tx-schemeshard-ut_data_erasure_reboots |91.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/federated_query/s3/ydb-core-kqp-ut-federated_query-s3 |91.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/federated_query/s3/ydb-core-kqp-ut-federated_query-s3 |91.8%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/federated_query/s3/ydb-core-kqp-ut-federated_query-s3 >> TGRpcNewCoordinationClient::BasicMethods [GOOD] >> KqpUniqueIndex::InsertFkPkOverlap [GOOD] >> YdbTableBulkUpsert::SyncIndexShouldSucceed [GOOD] >> YdbTableBulkUpsert::Overload >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientProvidesExpiredCert [GOOD] >> TDatabaseQuotas::DisableWritesToDatabase [GOOD] >> TGRpcAuthentication::InvalidPassword >> YdbScripting::MultiResults >> YdbOlapStore::LogLast50 [GOOD] >> YdbOlapStore::LogGrepNonExisting >> TGRpcYdbTest::CreateTableWithIndex [GOOD] >> TGRpcYdbTest::CreateYqlSession ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcNewCoordinationClient::BasicMethods [GOOD] Test command err: 2025-05-07T09:01:12.951698Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626266159772774:2276];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:12.951795Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00286a/r3tmp/tmpcOLD9t/pdisk_1.dat 2025-05-07T09:01:13.733124Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:13.739569Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:13.739664Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:13.749233Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25622, node 1 2025-05-07T09:01:14.134845Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:14.134870Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:14.134876Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:14.134983Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63857 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:14.459844Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:18.784195Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501626294865089834:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:18.784249Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00286a/r3tmp/tmpoLHUAS/pdisk_1.dat 2025-05-07T09:01:19.067761Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13391, node 4 2025-05-07T09:01:19.169555Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:19.169632Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:19.270028Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:01:19.372730Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:19.372749Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:19.372756Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:19.372873Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3522 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:19.708600Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:22.142614Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501626312044960046:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:22.143591Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501626312044960038:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:22.143678Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:22.146675Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T09:01:22.173015Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7501626312044960052:2341], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T09:01:22.245740Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:7501626312044960129:2677] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:01:24.610135Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7501626318156456488:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:24.610664Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00286a/r3tmp/tmpNPj9zk/pdisk_1.dat 2025-05-07T09:01:24.774032Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:24.814251Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:24.814338Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:24.819526Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26283, node 7 2025-05-07T09:01:24.888957Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:24.888973Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:24.888978Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:24.889098Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8889 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-05-07T09:01:25.154084Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:01:28.896163Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7501626335336326737:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:28.896254Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:28.940340Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T09:01:29.131063Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7501626339631294202:2348], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:29.131167Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:29.131416Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7501626339631294207:2351], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:29.135083Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-05-07T09:01:29.156631Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7501626339631294209:2352], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-05-07T09:01:29.275344Z node 7 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [7:7501626339631294287:2798] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:01:30.888020Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7501626347720062957:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:30.888070Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00286a/r3tmp/tmpLnyc7q/pdisk_1.dat 2025-05-07T09:01:31.170221Z node 10 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28432, node 10 2025-05-07T09:01:31.243942Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:31.244036Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:31.328277Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:01:31.335799Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:31.335820Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:31.335828Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:31.336251Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63516 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:31.628880Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:31.722458Z node 10 :TX_PROXY ERROR: schemereq.cpp:1030: Actor# [10:7501626352015031193:2587] txid# 281474976710658, Access denied for bad@builtin on path /Root, with access CreateTable 2025-05-07T09:01:31.722606Z node 10 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [10:7501626352015031193:2587] txid# 281474976710658, issues: { message: "Access denied for bad@builtin on path /Root" issue_code: 200000 severity: 1 } 2025-05-07T09:01:37.811884Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7501626375212582417:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:37.813147Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00286a/r3tmp/tmpKkRsfk/pdisk_1.dat 2025-05-07T09:01:38.026294Z node 13 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16174, node 13 2025-05-07T09:01:38.147788Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:38.147902Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:38.177118Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:01:38.194362Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:38.194388Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:38.194396Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:38.194543Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20279 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:38.558052Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:38.669146Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976710658:0, at schemeshard: 72057594046644480 >> YdbYqlClient::CheckDefaultTableSettings1 [GOOD] >> YdbYqlClient::CheckDefaultTableSettings2 >> YdbTableBulkUpsert::NotNulls [GOOD] >> YdbTableBulkUpsert::Errors >> TGRpcYdbTest::BeginTxRequestError [GOOD] >> YdbYqlClient::TestYqlSessionClosed [GOOD] >> YdbYqlClient::TestYqlLongSessionPrepareError ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpUniqueIndex::InsertFkPkOverlap [GOOD] Test command err: Trying to start YDB, gRPC: 4356, MsgBus: 8007 2025-05-07T09:01:09.777206Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626255726860885:2068];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:09.777265Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003c1d/r3tmp/tmpnCYVT8/pdisk_1.dat 2025-05-07T09:01:11.024656Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T09:01:11.165388Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:11.310458Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:11.310544Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:11.314480Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:01:11.528967Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2175} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.106669s 2025-05-07T09:01:11.529049Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:666} StateWork event processing took too much time Type# 2146435078 Duration# 0.106781s TServer::EnableGrpc on GrpcPort 4356, node 1 2025-05-07T09:01:12.819327Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:12.819353Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:12.819360Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:12.819516Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T09:01:14.778202Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501626255726860885:2068];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:14.778273Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; TClient is connected to server localhost:8007 TClient is connected to server localhost:8007 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:15.791887Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:15.942624Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:16.312503Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:16.563612Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:16.617594Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:17.144273Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626290086600922:2414], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:17.144406Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:20.676236Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:01:20.771852Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:01:20.932062Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:01:20.970717Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:01:21.011312Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:01:21.071246Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:01:21.136885Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:01:21.402334Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626307266470798:2489], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:21.402437Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:21.402703Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626307266470803:2492], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:21.443449Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:01:21.466189Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501626307266470805:2493], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:01:21.565900Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501626307266470864:3441] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:01:26.159071Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7261: Cannot get console configs 2025-05-07T09:01:26.159117Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:26.548473Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 waiting... Trying to start YDB, gRPC: 4754, MsgBus: 20169 2025-05-07T09:01:30.697345Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501626345281553883:2203];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:30.698011Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003c1d/r3tmp/tmpYNKFLA/pdisk_1.dat 2025-05-07T09:01:31.074992Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:31.130642Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:31.130720Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:31.136181Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4754, node 2 2025-05-07T09:01:31.286929Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:31.286955Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:31.286963Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:31.287087Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20169 TClient is connected to server localhost:20169 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:32.300630Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:32.321670Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:32.417011Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:32.780297Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:32.900594Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:35.698271Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7501626345281553883:2203];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:35.698357Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:01:37.547628Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501626375346326472:2410], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:37.547733Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:37.658605Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:01:37.720136Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:01:37.794066Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:01:37.860410Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:01:37.919744Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:01:37.979673Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:01:38.069014Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:01:38.234540Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501626379641294450:2475], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:38.234683Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:38.235238Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501626379641294455:2478], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:38.240927Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:01:38.276480Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7501626379641294457:2479], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:01:38.380373Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501626379641294508:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:01:40.070328Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 waiting... ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientProvidesExpiredCert [GOOD] Test command err: 2025-05-07T09:00:13.436144Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626016347585758:2279];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:13.436289Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00288d/r3tmp/tmpUGNo6m/pdisk_1.dat 2025-05-07T09:00:13.882441Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:13.902501Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:13.902603Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:13.916562Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4645, node 1 2025-05-07T09:00:14.139314Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:14.139341Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:14.139350Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:14.139514Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16492 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:14.652919Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:14.841656Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket B8B5C3C451AC1770E383E9BA77E986B66D1683CF18A303DC2401F134D41401E4 (ipv6:[::1]:53598) has now valid token of C=RU,ST=MSK,L=MSK,O=YA,OU=UtTest,CN=localhost@cert 2025-05-07T09:00:14.980734Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket **** (B6C6F477) (ipv6:[::1]:53622) has now valid token of root@builtin 2025-05-07T09:00:15.130505Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db /Root, token db , DomainLoginOnly 1 2025-05-07T09:00:15.130560Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T09:00:15.130576Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:816: CanInitLoginToken, database /Root, A6 error 2025-05-07T09:00:15.130626Z node 1 :TICKET_PARSER ERROR: ticket_parser_impl.h:969: Ticket **** (0C093832): Could not find correct token validator 2025-05-07T09:00:20.380298Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501626045344494812:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:20.380374Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00288d/r3tmp/tmps9cDWS/pdisk_1.dat 2025-05-07T09:00:20.607176Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14413, node 4 2025-05-07T09:00:20.702809Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:20.702884Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:20.813590Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:00:20.882905Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:20.882933Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:20.882941Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:20.883077Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12734 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:21.438068Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:21.625810Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket B8B5C3C451AC1770E383E9BA77E986B66D1683CF18A303DC2401F134D41401E4 (ipv6:[::1]:34556) has now valid token of C=RU,ST=MSK,L=MSK,O=YA,OU=UtTest,CN=localhost@cert 2025-05-07T09:00:21.792030Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket **** (B6C6F477) (ipv6:[::1]:34580) has now valid token of root@builtin 2025-05-07T09:00:21.951676Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db /Root, token db , DomainLoginOnly 1 2025-05-07T09:00:21.951706Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T09:00:21.951714Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:816: CanInitLoginToken, database /Root, A6 error 2025-05-07T09:00:21.951746Z node 4 :TICKET_PARSER ERROR: ticket_parser_impl.h:969: Ticket **** (0C093832): Could not find correct token validator 2025-05-07T09:00:28.770474Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7501626077906641592:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:28.770553Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00288d/r3tmp/tmpXd96wN/pdisk_1.dat 2025-05-07T09:00:29.253053Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:29.359120Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:29.359198Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:29.365179Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14469, node 7 2025-05-07T09:00:29.781792Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:29.781820Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:29.781829Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:29.781961Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64509 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:30.103467Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:30.352048Z node 7 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1784: Ticket **** (B6C6F477) (ipv6:[::1]:33666) has now valid token of root@builtin 2025-05-07T09:00:30.448810Z node 7 :TICKET_PARSER TRACE: ticket_parser_impl.h:758: CanInitLoginToken, domain db /Root, request db /Root, token db , DomainLoginOnly 1 2025-05-07T09:00:30.448852Z node 7 :TICKET_PARSER TRACE: ticket_parser_impl.h:763: CanInitLoginToken, target database candidates(1): /Root 2025-05-07T09:00:30.448864Z node 7 :TICKET_PARSER TRACE: ticket_parser_impl.h:816: CanInitLoginToken, database /Root, A6 error 2025-05-07T09:00:30.448900Z node 7 :TICKET_PARSER ERROR: ticket_parser_i ... 784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[25:7501626283614419933:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:16.050420Z node 25 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00288d/r3tmp/tmpkIM4Ip/pdisk_1.dat 2025-05-07T09:01:16.468112Z node 25 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:16.561032Z node 25 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(25, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:16.561169Z node 25 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(25, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:16.572025Z node 25 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(25, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10587, node 25 2025-05-07T09:01:16.875577Z node 25 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:16.875610Z node 25 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:16.875626Z node 25 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:16.875818Z node 25 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1825 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:17.577885Z node 25 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:21.038694Z node 25 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[25:7501626283614419933:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:21.038806Z node 25 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; E0507 09:01:27.786378166 321099 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0507 09:01:27.827918395 321099 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0507 09:01:27.928265587 321098 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0507 09:01:27.978668578 321099 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0507 09:01:28.095899773 321379 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0507 09:01:28.165945823 321098 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0507 09:01:28.250002386 321377 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0507 09:01:28.303123509 325294 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0507 09:01:28.375892613 321377 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0507 09:01:28.419194860 321377 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0507 09:01:28.497429445 321377 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0507 09:01:28.532196272 325491 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. 2025-05-07T09:01:30.770255Z node 28 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[28:7501626344206098476:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:30.770410Z node 28 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00288d/r3tmp/tmp7pX9KD/pdisk_1.dat 2025-05-07T09:01:31.168473Z node 28 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:31.223225Z node 28 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(28, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:31.223350Z node 28 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(28, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:31.234945Z node 28 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(28, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25421, node 28 2025-05-07T09:01:31.512791Z node 28 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:31.512825Z node 28 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:31.512837Z node 28 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:31.513021Z node 28 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27927 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:32.010943Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:35.776744Z node 28 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[28:7501626344206098476:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:35.776875Z node 28 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; E0507 09:01:42.139750876 326620 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0507 09:01:42.167785983 331139 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0507 09:01:42.214042984 327167 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0507 09:01:42.245957757 327168 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0507 09:01:42.315424658 327167 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0507 09:01:42.339175346 326620 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0507 09:01:42.395941790 326620 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0507 09:01:42.445576948 327167 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0507 09:01:42.525805230 326620 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0507 09:01:42.553026864 326620 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0507 09:01:42.602177320 327167 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0507 09:01:42.634525202 331408 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/unittest >> TPersQueueTest::LOGBROKER_7820 [GOOD] Test command err: 2025-05-07T08:56:24.487056Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501625032392924020:2069];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:56:24.487104Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:56:25.100624Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-05-07T08:56:25.141850Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625032718178602:2215];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:56:25.146677Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:56:25.146954Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00263e/r3tmp/tmptxLh15/pdisk_1.dat 2025-05-07T08:56:25.554360Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:56:25.824042Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:56:25.863109Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:56:25.863250Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:56:25.865326Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:56:25.865420Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:56:25.879199Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-05-07T08:56:25.879427Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:56:25.884488Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 31708, node 1 2025-05-07T08:56:26.130465Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/zvgn/00263e/r3tmp/yandexyn8rHW.tmp 2025-05-07T08:56:26.130497Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/zvgn/00263e/r3tmp/yandexyn8rHW.tmp 2025-05-07T08:56:26.130696Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/zvgn/00263e/r3tmp/yandexyn8rHW.tmp 2025-05-07T08:56:26.130831Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:56:26.200528Z INFO: TTestServer started on Port 63860 GrpcPort 31708 TClient is connected to server localhost:63860 PQClient connected to localhost:31708 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:56:27.108881Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:56:27.203108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-05-07T08:56:27.241683Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... 2025-05-07T08:56:29.458098Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501625032718178602:2215];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:56:29.458187Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:56:29.490086Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7501625032392924020:2069];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:56:29.490167Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:56:31.017463Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625058487983347:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:31.017662Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625062782950680:2345], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:31.017737Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:56:31.026224Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480 2025-05-07T08:56:31.074201Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501625062782950682:2346], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-05-07T08:56:31.570343Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501625062782950766:2768] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:56:31.621156Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:56:31.628387Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7501625062457695513:2322], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T08:56:31.632012Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7501625062782950783:2354], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T08:56:31.633703Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=1&id=MjhkNjMxMWItODk4NWRmMTItODI0NDEyNmEtNjBjOTA5ZTg=, ActorId: [1:7501625058487983344:2339], ActorState: ExecuteState, TraceId: 01jtmzbxfkcf3d8g9drp2t34md, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T08:56:31.634076Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T08:56:31.630312Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=2&id=OTE0MWIzODMtNmVmMjk0OTMtNWJlNTAyMGUtM2RiMjUzYg==, ActorId: [2:7501625062457695481:2316], ActorState: ExecuteState, TraceId: 01jtmzbxp73y5at0hvawm3b38s, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T08:56:31.633564Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T08:56:31.774607Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:56:31.926001Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local ... 5-07T09:01:29.851752Z :DEBUG: [] Take Data. Partition 0. Read: {18, 1} (662-662) 2025-05-07T09:01:29.851783Z :DEBUG: [] Take Data. Partition 0. Read: {18, 2} (663-663) 2025-05-07T09:01:29.851809Z :DEBUG: [] Take Data. Partition 0. Read: {18, 3} (664-664) 2025-05-07T09:01:29.851837Z :DEBUG: [] Take Data. Partition 0. Read: {18, 4} (665-665) 2025-05-07T09:01:29.851925Z :DEBUG: [] Take Data. Partition 0. Read: {18, 5} (666-666) 2025-05-07T09:01:29.851953Z :DEBUG: [] Take Data. Partition 0. Read: {18, 6} (667-667) 2025-05-07T09:01:29.851980Z :DEBUG: [] Take Data. Partition 0. Read: {18, 7} (668-668) 2025-05-07T09:01:29.852007Z :DEBUG: [] Take Data. Partition 0. Read: {18, 8} (669-669) 2025-05-07T09:01:29.852037Z :DEBUG: [] Take Data. Partition 0. Read: {18, 9} (670-670) 2025-05-07T09:01:29.852070Z :DEBUG: [] Take Data. Partition 0. Read: {18, 10} (671-671) 2025-05-07T09:01:29.852099Z :DEBUG: [] Take Data. Partition 0. Read: {18, 11} (672-672) 2025-05-07T09:01:29.852128Z :DEBUG: [] Take Data. Partition 0. Read: {18, 12} (673-673) 2025-05-07T09:01:29.852160Z :DEBUG: [] Take Data. Partition 0. Read: {18, 13} (674-674) 2025-05-07T09:01:29.852192Z :DEBUG: [] Take Data. Partition 0. Read: {18, 14} (675-675) 2025-05-07T09:01:29.852223Z :DEBUG: [] Take Data. Partition 0. Read: {19, 0} (676-676) 2025-05-07T09:01:29.852247Z :DEBUG: [] Take Data. Partition 0. Read: {19, 1} (677-677) 2025-05-07T09:01:29.852278Z :DEBUG: [] Take Data. Partition 0. Read: {19, 2} (678-678) 2025-05-07T09:01:29.852308Z :DEBUG: [] Take Data. Partition 0. Read: {19, 3} (679-679) 2025-05-07T09:01:29.852338Z :DEBUG: [] Take Data. Partition 0. Read: {20, 0} (680-680) 2025-05-07T09:01:29.852368Z :DEBUG: [] Take Data. Partition 0. Read: {21, 0} (681-681) 2025-05-07T09:01:29.852465Z :DEBUG: [] Take Data. Partition 0. Read: {21, 1} (682-682) 2025-05-07T09:01:29.852494Z :DEBUG: [] Take Data. Partition 0. Read: {21, 2} (683-683) 2025-05-07T09:01:29.852521Z :DEBUG: [] Take Data. Partition 0. Read: {21, 3} (684-684) 2025-05-07T09:01:29.852550Z :DEBUG: [] Take Data. Partition 0. Read: {21, 4} (685-685) 2025-05-07T09:01:29.852583Z :DEBUG: [] Take Data. Partition 0. Read: {21, 5} (686-686) 2025-05-07T09:01:29.852610Z :DEBUG: [] Take Data. Partition 0. Read: {21, 6} (687-687) 2025-05-07T09:01:29.852639Z :DEBUG: [] Take Data. Partition 0. Read: {22, 0} (688-688) 2025-05-07T09:01:29.852666Z :DEBUG: [] Take Data. Partition 0. Read: {22, 1} (689-689) 2025-05-07T09:01:29.852700Z :DEBUG: [] Take Data. Partition 0. Read: {22, 2} (690-690) 2025-05-07T09:01:29.852733Z :DEBUG: [] Take Data. Partition 0. Read: {22, 3} (691-691) 2025-05-07T09:01:29.852762Z :DEBUG: [] Take Data. Partition 0. Read: {22, 4} (692-692) 2025-05-07T09:01:29.852788Z :DEBUG: [] Take Data. Partition 0. Read: {22, 5} (693-693) 2025-05-07T09:01:29.852818Z :DEBUG: [] Take Data. Partition 0. Read: {22, 6} (694-694) 2025-05-07T09:01:29.852849Z :DEBUG: [] Take Data. Partition 0. Read: {23, 0} (695-695) 2025-05-07T09:01:29.852878Z :DEBUG: [] Take Data. Partition 0. Read: {24, 0} (696-696) 2025-05-07T09:01:29.852907Z :DEBUG: [] Take Data. Partition 0. Read: {24, 1} (697-697) 2025-05-07T09:01:29.852936Z :DEBUG: [] Take Data. Partition 0. Read: {24, 2} (698-698) 2025-05-07T09:01:29.852965Z :DEBUG: [] Take Data. Partition 0. Read: {24, 3} (699-699) 2025-05-07T09:01:30.814205Z node 25 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1252: session cookie 1 consumer shared/user session shared/user_25_1_5033164845511819713_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) wait data in partition inited, cookie 3 from offset700 2025-05-07T09:01:30.840509Z :DEBUG: [] [] [882370d5-ad73af5c-aea480da-e3ce2e1c] [null] Commit offsets [496, 650). Partition stream id: 1 2025-05-07T09:01:30.840730Z :DEBUG: [] [] [882370d5-ad73af5c-aea480da-e3ce2e1c] [null] The application data is transferred to the client. Number of messages 154, size 308000 bytes 2025-05-07T09:01:30.840981Z :DEBUG: [] [] [882370d5-ad73af5c-aea480da-e3ce2e1c] [null] Commit offsets [650, 700). Partition stream id: 1 2025-05-07T09:01:30.841455Z :DEBUG: [] [] [882370d5-ad73af5c-aea480da-e3ce2e1c] [null] The application data is transferred to the client. Number of messages 50, size 100000 bytes 2025-05-07T09:01:30.841898Z node 25 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/user session shared/user_25_1_5033164845511819713_v1 grpc read done: success# 1, data# { commit { cookies { assign_id: 1 partition_cookie: 2 } } } 2025-05-07T09:01:30.842077Z node 25 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1423: session cookie 1 consumer shared/user session shared/user_25_1_5033164845511819713_v1 commit request from client for 2 in TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) 2025-05-07T09:01:30.842105Z node 25 :PQ_READ_PROXY DEBUG: partition_actor.cpp:129: session cookie 1 consumer shared/user session shared/user_25_1_5033164845511819713_v1 commit request from 2 to 2 in TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) 2025-05-07T09:01:30.842152Z node 25 :PQ_READ_PROXY DEBUG: partition_actor.cpp:192: session cookie 1 consumer shared/user session shared/user_25_1_5033164845511819713_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) committing to position 700 prev 496 end 700 by cookie 2 2025-05-07T09:01:30.842875Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:342: Handle TEvRequest topic: 'rt3.dc1--topic1' requestId: 2025-05-07T09:01:30.842944Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:2788: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--topic1' partition 0 2025-05-07T09:01:30.843121Z node 26 :PERSQUEUE DEBUG: partition.cpp:3264: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--topic1' partition 0 user user offset is set to 700 (startOffset 0) session shared/user_25_1_5033164845511819713_v1 2025-05-07T09:01:30.843350Z node 26 :PERSQUEUE DEBUG: partition.cpp:2182: [PQ: 72075186224037892, Partition: 0, State: StateIdle] === DumpKeyValueRequest === 2025-05-07T09:01:30.843388Z node 26 :PERSQUEUE DEBUG: partition.cpp:2183: [PQ: 72075186224037892, Partition: 0, State: StateIdle] --- delete ---------------- 2025-05-07T09:01:30.843422Z node 26 :PERSQUEUE DEBUG: partition.cpp:2191: [PQ: 72075186224037892, Partition: 0, State: StateIdle] --- write ----------------- 2025-05-07T09:01:30.843460Z node 26 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037892, Partition: 0, State: StateIdle] i0000000000 2025-05-07T09:01:30.843477Z node 26 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037892, Partition: 0, State: StateIdle] m0000000000cuser 2025-05-07T09:01:30.843491Z node 26 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037892, Partition: 0, State: StateIdle] m0000000000uuser 2025-05-07T09:01:30.843524Z node 26 :PERSQUEUE DEBUG: partition.cpp:2196: [PQ: 72075186224037892, Partition: 0, State: StateIdle] --- rename ---------------- 2025-05-07T09:01:30.843565Z node 26 :PERSQUEUE DEBUG: partition.cpp:2201: [PQ: 72075186224037892, Partition: 0, State: StateIdle] =========================== 2025-05-07T09:01:30.843601Z node 26 :PERSQUEUE DEBUG: read.h:262: CacheProxy. Passthrough write request to KV 2025-05-07T09:01:30.851526Z node 26 :PERSQUEUE DEBUG: partition_read.cpp:774: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--topic1' partition 0 user user readTimeStamp for offset 700 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-05-07T09:01:30.851635Z node 26 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-05-07T09:01:30.851716Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:377: Answer ok topic: 'rt3.dc1--topic1' partition: 0 messageNo: 0 requestId: cookie: 2 2025-05-07T09:01:30.852078Z node 25 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 1 consumer shared/user session shared/user_25_1_5033164845511819713_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) initDone 1 event { Cookie: 2 } 2025-05-07T09:01:30.852150Z node 25 :PQ_READ_PROXY DEBUG: partition_actor.cpp:940: session cookie 1 consumer shared/user session shared/user_25_1_5033164845511819713_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) commit done to position 700 endOffset 700 with cookie 2 2025-05-07T09:01:30.852192Z node 25 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:696: session cookie 1 consumer shared/user session shared/user_25_1_5033164845511819713_v1 replying for commits: assignId# 1, from# 2, to# 2, offset# 700 2025-05-07T09:01:30.858275Z :DEBUG: [] [] [882370d5-ad73af5c-aea480da-e3ce2e1c] [null] Committed response: { cookies { assign_id: 1 partition_cookie: 2 } } 2025-05-07T09:01:31.857018Z :INFO: [] [] [882370d5-ad73af5c-aea480da-e3ce2e1c] Closing read session. Close timeout: 10.000000s 2025-05-07T09:01:31.857139Z :INFO: [] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): null:topic1:0:1:699:700 2025-05-07T09:01:31.857227Z :INFO: [] [] [882370d5-ad73af5c-aea480da-e3ce2e1c] Counters: { Errors: 0 CurrentSessionLifetimeMs: 10074 BytesRead: 1400000 MessagesRead: 700 BytesReadCompressed: 1400000 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-05-07T09:01:31.859957Z :INFO: [] [] [882370d5-ad73af5c-aea480da-e3ce2e1c] Closing read session. Close timeout: 0.000000s 2025-05-07T09:01:31.860053Z :INFO: [] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): null:topic1:0:1:699:700 2025-05-07T09:01:31.860142Z :INFO: [] [] [882370d5-ad73af5c-aea480da-e3ce2e1c] Counters: { Errors: 0 CurrentSessionLifetimeMs: 10077 BytesRead: 1400000 MessagesRead: 700 BytesReadCompressed: 1400000 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-05-07T09:01:31.860336Z :NOTICE: [] [] [882370d5-ad73af5c-aea480da-e3ce2e1c] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-05-07T09:01:31.862368Z node 25 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/user session shared/user_25_1_5033164845511819713_v1 grpc read done: success# 0, data# { } 2025-05-07T09:01:31.862440Z node 25 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer shared/user session shared/user_25_1_5033164845511819713_v1 grpc read failed 2025-05-07T09:01:31.862506Z node 25 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer shared/user session shared/user_25_1_5033164845511819713_v1 grpc closed 2025-05-07T09:01:31.862567Z node 25 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer shared/user session shared/user_25_1_5033164845511819713_v1 is DEAD 2025-05-07T09:01:31.865762Z node 25 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--topic1] pipe [25:7501626308511217381:2507] disconnected; active server actors: 1 2025-05-07T09:01:31.865864Z node 25 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--topic1] pipe [25:7501626308511217381:2507] client user disconnected session shared/user_25_1_5033164845511819713_v1 2025-05-07T09:01:31.870325Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:2433: [PQ: 72075186224037892] Destroy direct read session shared/user_25_1_5033164845511819713_v1 2025-05-07T09:01:31.870474Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037892] server disconnected, pipe [25:7501626308511217384:2510] destroyed 2025-05-07T09:01:31.870630Z node 26 :PQ_READ_PROXY DEBUG: caching_service.cpp:138: Direct read cache: server session deregistered: shared/user_25_1_5033164845511819713_v1 >> YdbYqlClient::TestReadTableMultiShardWholeTable [GOOD] >> YdbYqlClient::TestReadTableMultiShardWholeTableUseSnapshot >> YdbYqlClient::CreateAndAltertTableWithReadReplicasSettings [GOOD] >> YdbYqlClient::CreateTableWithMESettings >> YdbYqlClient::TestReadTableOneBatch >> TGRpcNewCoordinationClient::SessionDescribeWatchReplace [GOOD] >> TGRpcNewCoordinationClient::SessionCreateUpdateDeleteSemaphore ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcYdbTest::BeginTxRequestError [GOOD] Test command err: 2025-05-07T09:01:13.490854Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626270736716413:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:13.510156Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002869/r3tmp/tmpx5Z6lq/pdisk_1.dat 2025-05-07T09:01:14.227916Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:14.352061Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:14.352164Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 10231, node 1 2025-05-07T09:01:14.359094Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:01:14.521155Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:14.521176Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:14.521182Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:14.521287Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13947 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:14.866150Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:15.023341Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T09:01:19.082307Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501626296401222734:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:19.082389Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002869/r3tmp/tmpVx1zdl/pdisk_1.dat 2025-05-07T09:01:19.346718Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 63978, node 4 2025-05-07T09:01:19.456715Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:19.456797Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:19.490015Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:01:19.669637Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:19.669658Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:19.669665Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:19.669791Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13428 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:20.048501Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:20.126224Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T09:01:24.596272Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7501626318970707488:2076];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:24.609051Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002869/r3tmp/tmpQwmG3i/pdisk_1.dat 2025-05-07T09:01:24.950286Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:24.988655Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:24.988747Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:24.992568Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26667, node 7 2025-05-07T09:01:25.114738Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:25.114760Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:25.114771Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:25.114912Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2346 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:25.355638Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:25.496557Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T09:01:25.590821Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-05-07T09:01:25.719652Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-05-07T09:01:25.825654Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976715661:0, at schemeshard: 72057594046644480 2025-05-07T09:01:25.945748Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-05-07T09:01:26.373491Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-05-07T09:01:26.446871Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-05-07T09:01:26.515123Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-05-07T09:01:30.323311Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7501626344438348836:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:30.323399Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002869/r3tmp/tmp32zBrh/pdisk_1.dat 2025-05-07T09:01:30.628709Z node 10 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:30.657085Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:30.657177Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:30.659975Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24207, node 10 2025-05-07T09:01:30.851294Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:30.851315Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:30.851322Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:30.851428Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13305 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:31.150167Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:31.344912Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T09:01:37.062682Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7501626375550102990:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:37.062822Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002869/r3tmp/tmp5anMFH/pdisk_1.dat 2025-05-07T09:01:37.882493Z node 13 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:37.984360Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:37.984470Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:38.007955Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2342, node 13 2025-05-07T09:01:38.410802Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:38.410831Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:38.410844Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:38.411041Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21080 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:38.948554Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:42.046118Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[13:7501626375550102990:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:42.046222Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:01:43.785980Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7501626401319907870:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:43.785984Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7501626401319907859:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:43.786079Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:43.791982Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480 2025-05-07T09:01:43.862284Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7501626401319907873:2342], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-05-07T09:01:43.939953Z node 13 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [13:7501626401319907948:2691] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:01:43.941551Z node 13 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=13&id=NDgwYzdjNC04YjY4OGNiYS01MzZhY2EzYy1hYmU3YWU3MQ==, ActorId: [13:7501626401319907856:2336], ActorState: ExecuteState, TraceId: 01jtmznez76m7wy483866an8vy, ReplyQueryCompileError, status NOT_FOUND remove tx with tx_id: 2025-05-07T09:01:43.947743Z node 13 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=13&id=NDgwYzdjNC04YjY4OGNiYS01MzZhY2EzYy1hYmU3YWU3MQ==, ActorId: [13:7501626401319907856:2336], ActorState: ExecuteState, TraceId: 01jtmznf4a7nsbtak6hdeyjddt, ReplyQueryCompileError, status NOT_FOUND remove tx with tx_id: 2025-05-07T09:01:43.951453Z node 13 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=13&id=NDgwYzdjNC04YjY4OGNiYS01MzZhY2EzYy1hYmU3YWU3MQ==, ActorId: [13:7501626401319907856:2336], ActorState: ExecuteState, TraceId: 01jtmznf4e9ca6ektnq1ze4pqb, ReplyQueryCompileError, status NOT_FOUND remove tx with tx_id: >> YdbYqlClient::SecurityTokenAuthMultiTenantSDK >> KqpIndexes::UpsertWithoutExtraNullDelete-UseSink [GOOD] >> KqpIndexes::UpsertWithNullKeysSimple >> YdbYqlClient::ConnectDbAclIsOffWhenTokenIsOptionalAndNull >> KqpUniqueIndex::UpdateOnHidenChanges+DataColumn [GOOD] >> YdbS3Internal::TestAccessCheck [GOOD] >> YdbS3Internal::BadRequests >> YdbYqlClient::TestDecimal [GOOD] >> YdbYqlClient::TestBusySession >> LocalityOperation::LocksFromAnotherTenants-UseSink [GOOD] >> YdbYqlClient::TestReadTableBatchLimits [GOOD] >> TGRpcNewCoordinationClient::NodeNotFound [GOOD] >> TGRpcNewCoordinationClient::MultipleSessionsSemaphores >> YdbTableBulkUpsertOlap::UpsertArrowBatch_DataShard [GOOD] >> CommitOffset::DistributedTxCommit_CheckOffsetCommitForDifferentCases [GOOD] >> CommitOffset::DistributedTxCommit_Flat_CheckOffsetCommitForDifferentCases ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpUniqueIndex::UpdateOnHidenChanges+DataColumn [GOOD] Test command err: Trying to start YDB, gRPC: 9488, MsgBus: 9743 2025-05-07T09:01:09.778850Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626253294151118:2199];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:09.778989Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003c23/r3tmp/tmpLlkC8G/pdisk_1.dat 2025-05-07T09:01:11.009105Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T09:01:11.177921Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:11.258539Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:11.269081Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:11.296770Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:01:11.530641Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2175} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.108094s 2025-05-07T09:01:11.530720Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:666} StateWork event processing took too much time Type# 2146435078 Duration# 0.108191s TServer::EnableGrpc on GrpcPort 9488, node 1 2025-05-07T09:01:12.792168Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:12.792191Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:12.792204Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:12.792375Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T09:01:14.778635Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501626253294151118:2199];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:14.778692Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; TClient is connected to server localhost:9743 TClient is connected to server localhost:9743 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:15.795633Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:15.946868Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:16.368082Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:16.546663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:16.657793Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:17.142976Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626287653891026:2415], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:17.143168Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:20.679660Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:01:20.743219Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:01:20.817749Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:01:20.866474Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:01:20.940571Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:01:21.051687Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:01:21.134037Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:01:21.401733Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626304833760898:2490], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:21.401815Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:21.402215Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626304833760903:2493], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:21.442340Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:01:21.459758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-05-07T09:01:21.460122Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501626304833760905:2494], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:01:21.564001Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501626304833760964:3444] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:01:26.128326Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7261: Cannot get console configs 2025-05-07T09:01:26.128366Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:26.545466Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 waiting... Trying to start YDB, gRPC: 10735, MsgBus: 25121 2025-05-07T09:01:36.330667Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501626369277607779:2059];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:36.330754Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003c23/r3tmp/tmpu8HAOm/pdisk_1.dat 2025-05-07T09:01:36.524901Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:36.561296Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:36.561411Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:36.563104Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10735, node 2 2025-05-07T09:01:36.626673Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:36.626708Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:36.626718Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:36.626864Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25121 TClient is connected to server localhost:25121 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:37.214855Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:37.224533Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T09:01:37.240925Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:37.355419Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:37.597308Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:37.697929Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:40.925790Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501626386457478611:2407], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:40.925916Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:40.993465Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-05-07T09:01:41.041192Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-05-07T09:01:41.132810Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-05-07T09:01:41.182856Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-05-07T09:01:41.275174Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-05-07T09:01:41.330213Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7501626369277607779:2059];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:41.330272Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:01:41.414820Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-05-07T09:01:41.500696Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-05-07T09:01:41.649237Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501626390752446574:2471], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:41.649385Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:41.649929Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501626390752446579:2474], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:41.654337Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-05-07T09:01:41.673642Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7501626390752446581:2475], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-05-07T09:01:41.733380Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501626390752446632:3425] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:01:43.168707Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:46.594611Z node 2 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jtmzngkpeyyq6fj140qb4fc8, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Nzg1Yzg1NWYtMjhkMDZmYzQtODczYzE3ZjYtZTA0MDQ3MjE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-05-07T09:01:46.606753Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=2&id=Nzg1Yzg1NWYtMjhkMDZmYzQtODczYzE3ZjYtZTA0MDQ3MjE=, ActorId: [2:7501626399342382270:2575], ActorState: ExecuteState, TraceId: 01jtmzngkpeyyq6fj140qb4fc8, Create QueryResponse for error on request, msg: >> TGRpcNewClient::SimpleYqlQuery [GOOD] >> TGRpcNewClient::CreateAlterUpsertDrop |91.8%| [TA] $(B)/ydb/core/tx/schemeshard/ut_ttl/test-results/unittest/{meta.json ... results_accumulator.log} >> YdbLogStore::LogStoreNegative [GOOD] >> YdbLogStore::Dirs ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> LocalityOperation::LocksFromAnotherTenants-UseSink [GOOD] Test command err: 2025-05-07T09:01:17.440815Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626290787789812:2207];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:17.441490Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002863/r3tmp/tmp1pqivJ/pdisk_1.dat 2025-05-07T09:01:18.161075Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:18.161164Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:18.176383Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:01:18.178772Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25085, node 1 2025-05-07T09:01:18.618494Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:18.618515Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:18.618522Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:18.618632Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2553 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 Pa... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:19.139695Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:23.404132Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501626314063865462:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:23.404186Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002863/r3tmp/tmpn88ZD3/pdisk_1.dat 2025-05-07T09:01:23.629925Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:23.659382Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:23.659460Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:23.663820Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10074, node 4 2025-05-07T09:01:23.711330Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:23.711356Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:23.711362Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:23.711487Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2370 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 Pa... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:23.937160Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:28.082378Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7501626336726365702:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:28.082476Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002863/r3tmp/tmpHtcqqz/pdisk_1.dat 2025-05-07T09:01:28.287063Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:28.332292Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:28.332361Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:28.338305Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6209, node 7 2025-05-07T09:01:28.505756Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:28.505783Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:28.505790Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:28.505916Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17316 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:28.767536Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... TClient is connected to server localhost:17316 2025-05-07T09:01:29.323316Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:29.383414Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:29.894371Z node 9 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[9:7501626342906925139:2071];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:29.894479Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_tenant_0/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T09:01:30.036218Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:30.036302Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:30.051578Z node 7 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 9 Cookie 9 2025-05-07T09:01:30.056867Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:01:30.167197Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:30.231604Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:30.745371Z node 8 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[8:7501626344097862982:2211];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:30.745476Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_tenant_1/.metadata/initialization/migrations;error=scheme_cache_undelive ... tRootIsUp 'Root' success. 2025-05-07T09:01:39.239486Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... TClient is connected to server localhost:32555 2025-05-07T09:01:39.761246Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:39.842981Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:40.356493Z node 12 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[12:7501626388917376098:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:40.356564Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_tenant_0/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T09:01:40.368326Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:40.368409Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:40.379120Z node 10 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 12 Cookie 12 2025-05-07T09:01:40.493770Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:01:40.588894Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:40.661120Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:41.356042Z node 11 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[11:7501626393670162173:2221];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:41.410929Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:41.411039Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:41.417084Z node 10 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 11 Cookie 11 2025-05-07T09:01:41.418487Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:01:41.484812Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_tenant_1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T09:01:43.172722Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7501626378585101349:2071];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:43.172790Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:01:43.859227Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:01:44.099995Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:01:44.306929Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7501626404354907283:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:44.307035Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:44.307418Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7501626404354907295:2372], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:44.312110Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710664:3, at schemeshard: 72057594046644480 2025-05-07T09:01:44.367407Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7501626404354907297:2373], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710664 completed, doublechecking } 2025-05-07T09:01:44.443550Z node 10 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [10:7501626404354907372:3415] txid# 281474976710665, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:01:44.693648Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710666. Ctx: { TraceId: 01jtmznffhbaz6at48r68981yf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=OGE3ODNkNjUtMTUxNDFhNmQtY2FkMWNhOTEtNTg5OGU3Njc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:44.945234Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710667. Ctx: { TraceId: 01jtmznfxtffec8nfabsvnwp3x, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=OGE3ODNkNjUtMTUxNDFhNmQtY2FkMWNhOTEtNTg5OGU3Njc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:45.216655Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710668. Ctx: { TraceId: 01jtmzng7mcm9mmr56g12xdtbr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=OGE3ODNkNjUtMTUxNDFhNmQtY2FkMWNhOTEtNTg5OGU3Njc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:45.358651Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[12:7501626388917376098:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:45.358743Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_tenant_0/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:01:46.215619Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[11:7501626393670162173:2221];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:46.215682Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_tenant_1/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:01:46.997477Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710669. Ctx: { TraceId: 01jtmzng7mcm9mmr56g12xdtbr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=OGE3ODNkNjUtMTUxNDFhNmQtY2FkMWNhOTEtNTg5OGU3Njc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:47.010340Z node 10 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:817: ActorId: [10:7501626412944842225:2361] TxId: 281474976710669. Ctx: { TraceId: 01jtmzng7mcm9mmr56g12xdtbr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=OGE3ODNkNjUtMTUxNDFhNmQtY2FkMWNhOTEtNTg5OGU3Njc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Handle TEvProposeTransactionResult: unable to select coordinator. Tx canceled, actorId: [10:7501626412944842225:2361], previously selected coordinator: 72075186224037888, coordinator selected at propose result: 72075186224037890 2025-05-07T09:01:47.014093Z node 10 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=10&id=OGE3ODNkNjUtMTUxNDFhNmQtY2FkMWNhOTEtNTg5OGU3Njc=, ActorId: [10:7501626400059939807:2361], ActorState: ExecuteState, TraceId: 01jtmzng7mcm9mmr56g12xdtbr, Create QueryResponse for error on request, msg: 2025-05-07T09:01:47.015287Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710670. Ctx: { TraceId: 01jtmzng7mcm9mmr56g12xdtbr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=OGE3ODNkNjUtMTUxNDFhNmQtY2FkMWNhOTEtNTg5OGU3Njc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:47.026840Z node 10 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 11 2025-05-07T09:01:47.027353Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-05-07T09:01:47.027747Z node 10 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 12 2025-05-07T09:01:47.028135Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-05-07T09:01:48.424327Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [11:7501626423734933563:2323], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:01:48.424494Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_tenant_1/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:01:48.488270Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [11:7501626423734933563:2323], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:01:48.602479Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [11:7501626423734933563:2323], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:01:48.925012Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [11:7501626423734933563:2323], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } >> YdbYqlClient::CopyTables [GOOD] >> YdbYqlClient::CreateAndAltertTableWithCompactionPolicy ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::TestReadTableBatchLimits [GOOD] Test command err: 2025-05-07T09:01:07.859071Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626246699404016:2279];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:07.859112Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002871/r3tmp/tmpJp2rfR/pdisk_1.dat 2025-05-07T09:01:08.845917Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:08.847743Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:08.848266Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:08.857075Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:01:08.870926Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; TServer::EnableGrpc on GrpcPort 27682, node 1 2025-05-07T09:01:09.422602Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:09.422642Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:09.422653Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:09.422779Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20182 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:09.922686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:10.057115Z node 1 :GRPC_SERVER INFO: grpc_request_proxy.cpp:572: Got grpc request# ListEndpointsRequest, traceId# 01jtmzme1603zhptetbsw651yq, sdkBuildInfo# ydb-cpp-sdk/dev, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:58432, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# 9.984908s 2025-05-07T09:01:10.108968Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:575: Got grpc request# CreateSessionRequest, traceId# 01jtmzme2655sm47v6mwj26nn9, sdkBuildInfo# ydb-cpp-sdk/dev, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:58448, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-05-07T09:01:12.812447Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:575: Got grpc request# CreateTableRequest, traceId# 01jtmzmgqc8zmxhbw8f517a41n, sdkBuildInfo# ydb-cpp-sdk/dev, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:49132, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-05-07T09:01:12.813142Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7501626246699404020:2139] Handle TEvProposeTransaction 2025-05-07T09:01:12.813165Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7501626246699404020:2139] TxId# 281474976710658 ProcessProposeTransaction 2025-05-07T09:01:12.813214Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7501626246699404020:2139] Cookie# 0 userReqId# "" txid# 281474976710658 SEND to# [1:7501626268174241326:2630] 2025-05-07T09:01:12.861237Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501626246699404016:2279];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:12.861319Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:01:12.950665Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1520: Actor# [1:7501626268174241326:2630] txid# 281474976710658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "Test" Columns { Name: "Key" Type: "Uint32" NotNull: false } Columns { Name: "Fk" Type: "Uint64" NotNull: false } Columns { Name: "Value" Type: "String" NotNull: false } KeyColumnNames: "Key" KeyColumnNames: "Fk" UniformPartitionsCount: 16 PartitionConfig { } Temporary: false } CreateIndexedTable { } } } DatabaseName: "" RequestType: "" PeerName: "ipv6:[::1]:49132" 2025-05-07T09:01:12.950726Z node 1 :TX_PROXY DEBUG: schemereq.cpp:563: Actor# [1:7501626268174241326:2630] txid# 281474976710658 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-05-07T09:01:12.951077Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1585: Actor# [1:7501626268174241326:2630] txid# 281474976710658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-05-07T09:01:12.951166Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1575: Actor# [1:7501626268174241326:2630] txid# 281474976710658 TEvNavigateKeySet requested from SchemeCache 2025-05-07T09:01:12.951324Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1408: Actor# [1:7501626268174241326:2630] txid# 281474976710658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-05-07T09:01:12.951485Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1455: Actor# [1:7501626268174241326:2630] HANDLE EvNavigateKeySetResult, txid# 281474976710658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-05-07T09:01:12.951539Z node 1 :TX_PROXY DEBUG: schemereq.cpp:102: Actor# [1:7501626268174241326:2630] txid# 281474976710658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710658 TabletId# 72057594046644480} 2025-05-07T09:01:12.951931Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1310: Actor# [1:7501626268174241326:2630] txid# 281474976710658 HANDLE EvClientConnected 2025-05-07T09:01:12.955579Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T09:01:12.964610Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1332: Actor# [1:7501626268174241326:2630] txid# 281474976710658 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710658} 2025-05-07T09:01:12.964754Z node 1 :TX_PROXY DEBUG: schemereq.cpp:543: Actor# [1:7501626268174241326:2630] txid# 281474976710658 SEND to# [1:7501626268174241325:2337] Source {TEvProposeTransactionStatus txid# 281474976710658 Status# 53} 2025-05-07T09:01:12.965817Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:469: SchemeBoardUpdate /Root 2025-05-07T09:01:12.965926Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:498: Can't update SecurityState for /Root - no PublicKeys 2025-05-07T09:01:12.965938Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:469: SchemeBoardUpdate /Root 2025-05-07T09:01:12.966000Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:498: Can't update SecurityState for /Root - no PublicKeys 2025-05-07T09:01:13.054283Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828672, Sender [1:7501626268174241369:2667], Recipient [1:7501626272469208777:2341]: NKikimr::TEvTablet::TEvBoot 2025-05-07T09:01:13.072937Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828672, Sender [1:7501626268174241370:2668], Recipient [1:7501626272469208787:2344]: NKikimr::TEvTablet::TEvBoot 2025-05-07T09:01:13.075390Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828672, Sender [1:7501626268174241371:2669], Recipient [1:7501626272469208782:2342]: NKikimr::TEvTablet::TEvBoot 2025-05-07T09:01:13.076162Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828672, Sender [1:7501626268174241372:2670], Recipient [1:7501626272469208783:2343]: NKikimr::TEvTablet::TEvBoot 2025-05-07T09:01:13.083334Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828672, Sender [1:7501626268174241362:2660], Recipient [1:7501626272469208829:2346]: NKikimr::TEvTablet::TEvBoot 2025-05-07T09:01:13.084054Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828672, Sender [1:7501626268174241363:2661], Recipient [1:7501626272469208857:2352]: NKikimr::TEvTablet::TEvBoot 2025-05-07T09:01:13.084585Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828672, Sender [1:7501626268174241377:2675], Recipient [1:7501626272469208858:2353]: NKikimr::TEvTablet::TEvBoot 2025-05-07T09:01:13.085016Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828673, Sender [1:7501626268174241369:2667], Recipient [1:7501626272469208777:2341]: NKikimr::TEvTablet::TEvRestored 2025-05-07T09:01:13.085496Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037900 actor [1:7501626272469208777:2341] 2025-05-07T09:01:13.085769Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T09:01:13.091420Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828672, Sender [1:7501626268174241373:2671], Recipient [1:7501626272469208849:2348]: NKikimr::TEvTablet::TEvBoot 2025-05-07T09:01:13.092150Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828672, Sender [1:7501626268174241364:2662], Recipient [1:7501626272469208860:2355]: NKikimr::TEvTablet::TEvBoot 2025-05-07T09:01:13.092621Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828673, Sender [1:7501626268174241370:2668], Recipient [1:7501626272469208787:2344]: NKikimr::TEvTablet::TEvRestored 2025-05-07T09:01:13.092981Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037899 actor [1:7501626272469208787:2344] 2025-05-07T09:01:13.093178Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T09:01:13.113821Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828673, Sender [1:7501626268174241372:2670], Recipient [1:7501626272469208783:2343]: NKikimr::TEvTablet::TEvRestored 2025-05-07T09:01:13.114333Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnA ... tch end ---- ---- batch start ---- [[11u];[22u];["A"]] ---- batch end ---- ---- batch start ---- [[12u];[24u];["A"]] ---- batch end ---- ---- batch start ---- [[13u];[26u];["A"]] ---- batch end ---- ---- batch start ---- [[14u];[28u];["A"]] ---- batch end ---- ---- batch start ---- [[15u];[30u];["A"]] ---- batch end ---- ---- batch start ---- [[16u];[32u];["A"]] ---- batch end ---- ---- batch start ---- [[17u];[34u];["A"]] ---- batch end ---- ---- batch start ---- [[18u];[36u];["A"]] ---- batch end ---- ---- batch start ---- [[19u];[38u];["A"]] ---- batch end ---- ---- batch start ---- [[20u];[40u];["A"]] ---- batch end ---- ---- batch start ---- [[21u];[42u];["A"]] ---- batch end ---- ---- batch start ---- [[22u];[44u];["A"]] ---- batch end ---- ---- batch start ---- [[23u];[46u];["A"]] ---- batch end ---- ---- batch start ---- [[24u];[48u];["A"]] ---- batch end ---- ---- batch start ---- [[25u];[50u];["A"]] ---- batch end ---- ---- batch start ---- [[26u];[52u];["A"]] ---- batch end ---- ---- batch start ---- [[27u];[54u];["A"]] ---- batch end ---- ---- batch start ---- [[28u];[56u];["A"]] ---- batch end ---- ---- batch start ---- [[29u];[58u];["A"]] ---- batch end ---- ---- batch start ---- [[30u];[60u];["A"]] ---- batch end ---- ---- batch start ---- [[31u];[62u];["A"]] ---- batch end ---- ---- batch start ---- [[32u];[64u];["A"]] ---- batch end ---- ---- batch start ---- [[33u];[66u];["A"]] ---- batch end ---- ---- batch start ---- [[34u];[68u];["A"]] ---- batch end ---- ---- batch start ---- [[35u];[70u];["A"]] ---- batch end ---- ---- batch start ---- [[36u];[72u];["A"]] ---- batch end ---- ---- batch start ---- [[37u];[74u];["A"]] ---- batch end ---- ---- batch start ---- [[38u];[76u];["A"]] ---- batch end ---- ---- batch start ---- [[39u];[78u];["A"]] ---- batch end ---- ---- batch start ---- [[40u];[80u];["A"]] ---- batch end ---- ---- batch start ---- [[41u];[82u];["A"]] ---- batch end ---- ---- batch start ---- [[42u];[84u];["A"]] ---- batch end ---- ---- batch start ---- [[43u];[86u];["A"]] ---- batch end ---- ---- batch start ---- [[44u];[88u];["A"]] ---- batch end ---- ---- batch start ---- [[45u];[90u];["A"]] ---- batch end ---- ---- batch start ---- [[46u];[92u];["A"]] ---- batch end ---- ---- batch start ---- [[47u];[94u];["A"]] ---- batch end ---- ---- batch start ---- [[48u];[96u];["A"]] ---- batch end ---- ---- batch start ---- [[49u];[98u];["A"]] ---- batch end ---- ---- batch start ---- [[50u];[100u];["A"]] ---- batch end ---- ---- batch start ---- [[51u];[102u];["A"]] ---- batch end ---- ---- batch start ---- [[52u];[104u];["A"]] ---- batch end ---- ---- batch start ---- [[53u];[106u];["A"]] ---- batch end ---- ---- batch start ---- [[54u];[108u];["A"]] ---- batch end ---- ---- batch start ---- [[55u];[110u];["A"]] ---- batch end ---- ---- batch start ---- [[56u];[112u];["A"]] ---- batch end ---- ---- batch start ---- [[57u];[114u];["A"]] ---- batch end ---- ---- batch start ---- [[58u];[116u];["A"]] ---- batch end ---- ---- batch start ---- [[59u];[118u];["A"]] ---- batch end ---- ---- batch start ---- [[60u];[120u];["A"]] ---- batch end ---- ---- batch start ---- [[61u];[122u];["A"]] ---- batch end ---- ---- batch start ---- [[62u];[124u];["A"]] ---- batch end ---- ---- batch start ---- [[63u];[126u];["A"]] ---- batch end ---- ---- batch start ---- [[64u];[128u];["A"]] ---- batch end ---- ---- batch start ---- [[65u];[130u];["A"]] ---- batch end ---- ---- batch start ---- [[66u];[132u];["A"]] ---- batch end ---- ---- batch start ---- [[67u];[134u];["A"]] ---- batch end ---- ---- batch start ---- [[68u];[136u];["A"]] ---- batch end ---- ---- batch start ---- [[69u];[138u];["A"]] ---- batch end ---- ---- batch start ---- [[70u];[140u];["A"]] ---- batch end ---- ---- batch start ---- [[71u];[142u];["A"]] ---- batch end ---- ---- batch start ---- [[72u];[144u];["A"]] ---- batch end ---- ---- batch start ---- [[73u];[146u];["A"]] ---- batch end ---- ---- batch start ---- [[74u];[148u];["A"]] ---- batch end ---- ---- batch start ---- [[75u];[150u];["A"]] ---- batch end ---- ---- batch start ---- [[76u];[152u];["A"]] ---- batch end ---- ---- batch start ---- [[77u];[154u];["A"]] ---- batch end ---- ---- batch start ---- [[78u];[156u];["A"]] ---- batch end ---- ---- batch start ---- [[79u];[158u];["A"]] ---- batch end ---- ---- batch start ---- [[80u];[160u];["A"]] ---- batch end ---- ---- batch start ---- [[81u];[162u];["A"]] ---- batch end ---- ---- batch start ---- [[82u];[164u];["A"]] ---- batch end ---- ---- batch start ---- [[83u];[166u];["A"]] ---- batch end ---- ---- batch start ---- [[84u];[168u];["A"]] ---- batch end ---- ---- batch start ---- [[85u];[170u];["A"]] ---- batch end ---- ---- batch start ---- [[86u];[172u];["A"]] ---- batch end ---- ---- batch start ---- [[87u];[174u];["A"]] ---- batch end ---- ---- batch start ---- [[88u];[176u];["A"]] ---- batch end ---- ---- batch start ---- [[89u];[178u];["A"]] ---- batch end ---- ---- batch start ---- [[90u];[180u];["A"]] ---- batch end ---- ---- batch start ---- [[91u];[182u];["A"]] ---- batch end ---- ---- batch start ---- [[92u];[184u];["A"]] ---- batch end ---- ---- batch start ---- [[93u];[186u];["A"]] ---- batch end ---- ---- batch start ---- [[94u];[188u];["A"]] ---- batch end ---- ---- batch start ---- [[95u];[190u];["A"]] ---- batch end ---- ---- batch start ---- [[96u];[192u];["A"]] ---- batch end ---- ---- batch start ---- [[97u];[194u];["A"]] ---- batch end ---- ---- batch start ---- [[98u];[196u];["A"]] ---- batch end ---- ---- batch start ---- [[99u];[198u];["A"]] ---- batch end ---- 2025-05-07T09:01:48.321762Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269553190, Sender [10:7501626421457364645:2412], Recipient [10:7501626399982526324:2347]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 1746608508218 TxId: 281474976715678 2025-05-07T09:01:48.322063Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269553190, Sender [10:7501626421457364645:2412], Recipient [10:7501626399982526340:2350]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 1746608508218 TxId: 281474976715678 2025-05-07T09:01:48.322241Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269553190, Sender [10:7501626421457364645:2412], Recipient [10:7501626399982526345:2352]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 1746608508218 TxId: 281474976715678 2025-05-07T09:01:48.322386Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269553190, Sender [10:7501626421457364645:2412], Recipient [10:7501626399982526301:2344]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 1746608508218 TxId: 281474976715678 2025-05-07T09:01:48.322525Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269553190, Sender [10:7501626421457364645:2412], Recipient [10:7501626399982526300:2343]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 1746608508218 TxId: 281474976715678 2025-05-07T09:01:48.322659Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269553190, Sender [10:7501626421457364645:2412], Recipient [10:7501626399982526325:2348]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 1746608508218 TxId: 281474976715678 2025-05-07T09:01:48.322782Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269553190, Sender [10:7501626421457364645:2412], Recipient [10:7501626399982526311:2345]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 1746608508218 TxId: 281474976715678 2025-05-07T09:01:48.322914Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269553190, Sender [10:7501626421457364645:2412], Recipient [10:7501626399982526323:2346]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 1746608508218 TxId: 281474976715678 2025-05-07T09:01:48.323036Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269553190, Sender [10:7501626421457364645:2412], Recipient [10:7501626399982526343:2351]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 1746608508218 TxId: 281474976715678 2025-05-07T09:01:48.323167Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269553190, Sender [10:7501626421457364645:2412], Recipient [10:7501626399982526329:2349]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 1746608508218 TxId: 281474976715678 2025-05-07T09:01:48.328156Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000a2080] received request Name# SchemeOperation ok# false data# peer# current inflight# 0 2025-05-07T09:01:48.328429Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00009f680] received request Name# SchemeOperationStatus ok# false data# peer# current inflight# 0 2025-05-07T09:01:48.328629Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000085e80] received request Name# SchemeDescribe ok# false data# peer# current inflight# 0 2025-05-07T09:01:48.328844Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000023a80] received request Name# ChooseProxy ok# false data# peer# current inflight# 0 2025-05-07T09:01:48.329072Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0001a4680] received request Name# PersQueueRequest ok# false data# peer# current inflight# 0 2025-05-07T09:01:48.329270Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000109880] received request Name# SchemeInitRoot ok# false data# peer# current inflight# 0 2025-05-07T09:01:48.329450Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000171c80] received request Name# ResolveNode ok# false data# peer# current inflight# 0 2025-05-07T09:01:48.329629Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000024080] received request Name# FillNode ok# false data# peer# current inflight# 0 2025-05-07T09:01:48.329826Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000105080] received request Name# DrainNode ok# false data# peer# current inflight# 0 2025-05-07T09:01:48.330061Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000171680] received request Name# BlobStorageConfig ok# false data# peer# current inflight# 0 2025-05-07T09:01:48.330252Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000a5080] received request Name# HiveCreateTablet ok# false data# peer# current inflight# 0 2025-05-07T09:01:48.330443Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000dbc80] received request Name# TestShardControl ok# false data# peer# current inflight# 0 2025-05-07T09:01:48.330620Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000ffc80] received request Name# RegisterNode ok# false data# peer# current inflight# 0 2025-05-07T09:01:48.330818Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000109280] received request Name# CmsRequest ok# false data# peer# current inflight# 0 2025-05-07T09:01:48.331022Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0001a5e80] received request Name# ConsoleRequest ok# false data# peer# current inflight# 0 2025-05-07T09:01:48.331216Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000106280] received request Name# InterconnectDebug ok# false data# peer# current inflight# 0 2025-05-07T09:01:48.331397Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000105c80] received request Name# TabletStateRequest ok# false data# peer# current inflight# 0 >> TGRpcYdbTest::RemoveNotExistedDirectory ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbTableBulkUpsertOlap::UpsertArrowBatch_DataShard [GOOD] Test command err: 2025-05-07T09:01:12.312713Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626270273232472:2279];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:12.312803Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00286f/r3tmp/tmp34rvHE/pdisk_1.dat 2025-05-07T09:01:12.865018Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:12.865134Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:12.870748Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:01:12.878397Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28948, node 1 2025-05-07T09:01:13.151967Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:13.151987Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:13.151993Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:13.152139Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17908 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:13.765311Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... TClient is connected to server localhost:17908 2025-05-07T09:01:14.233523Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnStore, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:14.367988Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7501626278863167936:2322];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-05-07T09:01:14.400077Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7501626278863167936:2322];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-05-07T09:01:14.400277Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 72075186224037890 2025-05-07T09:01:14.409531Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7501626278863167936:2322];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-05-07T09:01:14.410374Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7501626278863167936:2322];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-05-07T09:01:14.410750Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7501626278863167936:2322];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-05-07T09:01:14.410934Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7501626278863167936:2322];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-05-07T09:01:14.411065Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7501626278863167936:2322];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-05-07T09:01:14.411213Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7501626278863167936:2322];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-05-07T09:01:14.411337Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7501626278863167936:2322];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-05-07T09:01:14.411453Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7501626278863167936:2322];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-05-07T09:01:14.411585Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7501626278863167936:2322];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-05-07T09:01:14.411691Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7501626278863167936:2322];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-05-07T09:01:14.411822Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7501626278863167936:2322];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-05-07T09:01:14.411932Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7501626278863167936:2322];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-05-07T09:01:14.418739Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7501626278863167941:2323];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-05-07T09:01:14.452018Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7501626278863167941:2323];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-05-07T09:01:14.452222Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 72075186224037889 2025-05-07T09:01:14.459828Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7501626278863167941:2323];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-05-07T09:01:14.459915Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7501626278863167941:2323];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-05-07T09:01:14.460201Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7501626278863167941:2323];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-05-07T09:01:14.460332Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7501626278863167941:2323];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-05-07T09:01:14.460461Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7501626278863167941:2323];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-05-07T09:01:14.460598Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7501626278863167941:2323];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-05-07T09:01:14.460724Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7501626278863167941:2323];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-05-07T09:01:14.460836Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7501626278863167941:2323];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-05-07T09:01:14.460947Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7501626278863167941:2323];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-05-07T09:01:14.461062Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7501626278863167941:2323];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-05-07T09:01:14.461180Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7501626278863167941:2323];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-05-07T09:01:14.461305Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7501626278863167941:2323];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-05-07T09:01:14.464602Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7501626278863167932:2320];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-05-07T09:01:14.501400Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7501626278863167932:2320];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-05-07T09:01:14.501540Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 72075186224037891 2025-05-07T09:01:14.507753Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7501626278863167932:2320];tablet_id=72075186224037891 ... beats: at tablet# 72075186224037888 2025-05-07T09:01:47.032403Z node 13 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:01:47.040240Z node 13 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1746608507028} 2025-05-07T09:01:47.040306Z node 13 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:01:47.040355Z node 13 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:01:47.040379Z node 13 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:01:47.040420Z node 13 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-05-07T09:01:47.040483Z node 13 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1746608507028 : 281474976715658] from 72075186224037888 at tablet 72075186224037888 send result to client [13:7501626392126645468:2203], exec latency: 1 ms, propose latency: 11 ms 2025-05-07T09:01:47.040531Z node 13 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715658 state Ready TxInFly 0 2025-05-07T09:01:47.040575Z node 13 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:01:47.040699Z node 13 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1746608507049 2025-05-07T09:01:47.052216Z node 13 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715658 datashard 72075186224037888 state Ready 2025-05-07T09:01:47.052299Z node 13 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8984;columns=10; 2025-05-07T09:01:47.096252Z node 13 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [13:7501626417896450126:2742], serverId# [13:7501626417896450127:2743], sessionId# [0:0:0] 2025-05-07T09:01:47.096432Z node 13 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(36) Execute: at tablet# 72075186224037888 2025-05-07T09:01:47.107183Z node 13 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(36) Complete: at tablet# 72075186224037888 2025-05-07T09:01:47.107234Z node 13 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 SUCCESS Upsert done: 0.044390s 2025-05-07T09:01:47.128361Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7501626417896450143:2350], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:47.128462Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7501626417896450135:2347], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:47.128591Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:47.138915Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-05-07T09:01:47.163168Z node 13 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:01:47.186064Z node 13 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:01:47.206176Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7501626417896450149:2351], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-05-07T09:01:47.303513Z node 13 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [13:7501626417896450219:2802] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:01:47.447709Z node 13 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T09:01:47.447866Z node 13 :TX_DATASHARD DEBUG: check_snapshot_tx_unit.cpp:153: Prepared Snapshot transaction txId 281474976715661 at tablet 72075186224037888 2025-05-07T09:01:47.451076Z node 13 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T09:01:47.454212Z node 13 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715661 at step 1746608507497 at tablet 72075186224037888 { Transactions { TxId: 281474976715661 AckTo { RawX1: 0 RawX2: 0 } } Step: 1746608507497 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-05-07T09:01:47.454244Z node 13 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:01:47.454384Z node 13 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:01:47.454400Z node 13 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-05-07T09:01:47.454422Z node 13 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1746608507497:281474976715661] in PlanQueue unit at 72075186224037888 2025-05-07T09:01:47.454578Z node 13 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1746608507497:281474976715661 keys extracted: 0 2025-05-07T09:01:47.454914Z node 13 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:01:47.456979Z node 13 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1746608507497} 2025-05-07T09:01:47.457026Z node 13 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:01:47.457072Z node 13 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1746608507497 : 281474976715661] from 72075186224037888 at tablet 72075186224037888 send result to client [13:7501626417896450254:2820], exec latency: 0 ms, propose latency: 2 ms 2025-05-07T09:01:47.457107Z node 13 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:01:47.460474Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jtmznj7kbwgjhzmfd8nvrsz1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=MzZiZWNlOS1hNTRlMDg1OC04ZmU2NGIzNS00OTU1NjkzYQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:47.463682Z node 13 :TX_DATASHARD INFO: datashard__kqp_scan.cpp:214: Start scan, at: [13:7501626417896450281:2164], tablet: [13:7501626413601482708:2341], scanId: 4, table: /Root/LogsX, gen: 1, deadline: 2025-05-07T09:11:47.463414Z 2025-05-07T09:01:47.463849Z node 13 :TX_DATASHARD DEBUG: datashard__kqp_scan.cpp:109: Got ScanDataAck, at: [13:7501626417896450281:2164], scanId: 4, table: /Root/LogsX, gen: 1, tablet: [13:7501626413601482708:2341], freeSpace: 8388608;limits:(bytes=0;chunks=0); 2025-05-07T09:01:47.463878Z node 13 :TX_DATASHARD DEBUG: datashard__kqp_scan.cpp:124: Wakeup driver at: [13:7501626417896450281:2164] 2025-05-07T09:01:47.465275Z node 13 :TX_DATASHARD DEBUG: datashard__kqp_scan.cpp:311: Range 0 of 1 exhausted: try next one. table: /Root/LogsX range: [(Utf8 : NULL, Timestamp : NULL) ; ()) next range: 2025-05-07T09:01:47.465301Z node 13 :TX_DATASHARD DEBUG: datashard__kqp_scan.cpp:226: TableRanges is over, at: [13:7501626417896450281:2164], scanId: 4, table: /Root/LogsX 2025-05-07T09:01:47.465333Z node 13 :TX_DATASHARD DEBUG: datashard__kqp_scan.cpp:340: Finish scan, at: [13:7501626417896450281:2164], scanId: 4, table: /Root/LogsX, reason: 0, abortEvent: 2025-05-07T09:01:47.465365Z node 13 :TX_DATASHARD DEBUG: datashard__kqp_scan.cpp:453: Send ScanData, from: [13:7501626417896450281:2164], to: [13:7501626417896450278:2364], scanId: 4, table: /Root/LogsX, bytes: 11000, rows: 100, page faults: 0, finished: 1, pageFault: 0 2025-05-07T09:01:47.465718Z node 13 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-05-07T09:01:47.465811Z node 13 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:01:47.465828Z node 13 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:01:47.465849Z node 13 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T09:01:47.465885Z node 13 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:01:47.477195Z node 13 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746608507497, txId: 281474976715661] shutting down 2025-05-07T09:01:48.600839Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715663. Ctx: { TraceId: 01jtmznjkk0vrnnq49hsdntrzh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZmQzM2Q3NS0xNmQxNjEzOS0zYTJjZTNiOC05ZDQ4MzEwOA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root SUCCESS count returned 100 rows Negative (wrong format): BAD_REQUEST Negative (wrong data): SCHEME_ERROR FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8016;columns=9; 2025-05-07T09:01:48.714940Z node 13 :ARROW_HELPER ERROR: log.cpp:784: fline=arrow_helpers.cpp:142;event=cannot_parse;message=Invalid: Ran out of field metadata, likely malformed;schema_columns_count=10;schema_columns=timestamp,resource_type,resource_id,uid,level,message,json_payload,ingested_at,saved_at,request_id; Negative (less columns): BAD_REQUEST FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8984;columns=10; 2025-05-07T09:01:48.736285Z node 13 :ARROW_HELPER ERROR: log.cpp:784: fline=arrow_helpers.cpp:142;event=cannot_parse;message=Serialization error: batch is not valid: Invalid: Offsets buffer size (bytes): 400 isn't large enough for length: 100;schema_columns_count=10;schema_columns=timestamp,resource_type,resource_id,uid,level,message,json_payload,ingested_at,saved_at,request_id; Negative (reordered columns): BAD_REQUEST >> YdbYqlClient::CheckDefaultTableSettings2 [GOOD] >> TGRpcYdbTest::CreateYqlSession [GOOD] >> TGRpcAuthentication::InvalidPassword [GOOD] >> TGRpcYdbTest::CreateYqlSessionExecuteQuery >> YdbYqlClient::TestYqlLongSessionPrepareError [GOOD] >> TGRpcNewCoordinationClient::SessionCreateUpdateDeleteSemaphore [GOOD] >> YdbYqlClient::TestYqlLongSessionMultipleErrors >> TGRpcAuthentication::DisableLoginAuthentication >> YdbTableBulkUpsert::Overload [GOOD] >> YdbYqlClient::ConnectDbAclIsOffWhenTokenIsOptionalAndNull [GOOD] >> YdbYqlClient::ColumnFamiliesWithStorageAndIndex >> TYqlDecimalTests::NegativeValues [GOOD] >> YdbYqlClient::TestReadTableMultiShardWholeTableUseSnapshot [GOOD] >> YdbTableBulkUpsert::Errors [GOOD] >> YdbYqlClient::CreateTableWithMESettings [GOOD] >> YdbScripting::MultiResults [GOOD] >> TTableProfileTests::OverwritePartitioningPolicy [GOOD] >> YdbYqlClient::SecurityTokenAuthMultiTenantSDK [GOOD] |91.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/olap/ydb-core-kqp-ut-olap >> TGRpcNewCoordinationClient::MultipleSessionsSemaphores [GOOD] >> YdbYqlClient::TestBusySession [GOOD] |91.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/olap/ydb-core-kqp-ut-olap |91.8%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/olap/ydb-core-kqp-ut-olap |91.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Datetime-pk_types11-all_types11-index11-Datetime--] [GOOD] >> YdbYqlClient::TestReadTableOneBatch [GOOD] >> YdbS3Internal::BadRequests [GOOD] >> YdbTableBulkUpsert::Limits >> YdbScripting::Params >> YdbYqlClient::TestReadTableMultiShardWithDescribe >> TGRpcYdbTest::RemoveNotExistedDirectory [GOOD] >> Secret::Validation [FAIL] >> YdbYqlClient::CreateAndAltertTableWithCompactionPolicy [GOOD] >> TGRpcNewClient::CreateAlterUpsertDrop [GOOD] >> YdbYqlClient::RetryOperationLimitedDuration [GOOD] >> YdbLogStore::Dirs [GOOD] >> YdbYqlClient::TestConstraintViolation >> YdbTableBulkUpsert::RetryOperationSync >> YdbYqlClient::TestReadTableNotNullBorder >> TYqlDecimalTests::DecimalKey >> TTableProfileTests::OverwriteStoragePolicy >> TGRpcNewCoordinationClient::SessionAcquireAcceptedCallback >> YdbYqlClient::SecurityTokenAuthMultiTenantSDKAsync |91.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_1__SYNC-pk_types3-all_types3-index3---SYNC] [GOOD] >> TGRpcYdbTest::SdkUuid >> YdbScripting::BasicV0 >> TGRpcNewClient::InMemoryTables >> YdbLogStore::LogTable >> YdbYqlClient::CreateAndAltertTableWithKeyBloomFilter >> TGRpcYdbTest::CreateYqlSessionExecuteQuery [GOOD] >> TGRpcYdbTest::DeleteFromAfterCreate >> TGRpcAuthentication::DisableLoginAuthentication [GOOD] >> KqpIndexes::UpsertWithNullKeysSimple [GOOD] >> ReadRows::KillTabletDuringRead ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::CreateTableWithMESettings [GOOD] Test command err: 2025-05-07T09:01:20.087133Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626300908194593:2209];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:20.087274Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00285f/r3tmp/tmpRqLorj/pdisk_1.dat 2025-05-07T09:01:21.128892Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T09:01:21.233850Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:21.240717Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:21.258403Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:21.261180Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15740, node 1 2025-05-07T09:01:21.530662Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:21.530684Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:21.530695Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:21.530795Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8017 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:22.164068Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:25.083734Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501626300908194593:2209];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:25.083813Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:01:25.104445Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T09:01:27.760461Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501626334109325365:2076];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:27.760516Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00285f/r3tmp/tmpmoT1Fb/pdisk_1.dat 2025-05-07T09:01:27.944716Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25006, node 4 2025-05-07T09:01:28.049183Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:28.049206Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:28.049217Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:28.049358Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T09:01:28.076109Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:28.076224Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:28.127751Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19068 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:28.341137Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:31.299189Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T09:01:31.495407Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 2025-05-07T09:01:31.587733Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 2025-05-07T09:01:31.646945Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 2025-05-07T09:01:33.867139Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7501626358994465963:2163];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:33.868659Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00285f/r3tmp/tmpeeX2O3/pdisk_1.dat 2025-05-07T09:01:34.227355Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:34.264317Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:34.264432Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:34.267533Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27245, node 7 2025-05-07T09:01:34.370339Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:34.370371Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:34.370379Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:34.370518Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20227 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:34.731579Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:37.662545Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T09:01:37.877509Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-05-07T09:01:37.941854Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-05-07T09:01:40.096138Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7501626389682906333:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:40.096213Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00285f/r3tmp/tmpmD3ti2/pdisk_1.dat 2025-05-07T09:01:40.427584Z node 10 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:40.477707Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:40.477797Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:40.483685Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4660, node 10 2025-05-07T09:01:40.693025Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:40.693076Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:40.693093Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:40.693247Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14451 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:41.070377Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:45.010758Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T09:01:45.102258Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7501626389682906333:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:45.102338Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:01:45.154252Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-05-07T09:01:45.164601Z node 10 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 10, TabletId: 72075186224037888 not found 2025-05-07T09:01:45.164631Z node 10 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 10, TabletId: 72075186224037888 not found 2025-05-07T09:01:47.277358Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7501626419362169363:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:47.277785Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00285f/r3tmp/tmpFqlk87/pdisk_1.dat 2025-05-07T09:01:47.609234Z node 13 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:47.688473Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:47.688588Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:47.713669Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 30006, node 13 2025-05-07T09:01:47.907477Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:47.907502Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:47.907512Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:47.907665Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14540 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:48.392787Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:52.272015Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[13:7501626419362169363:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:52.272097Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:01:52.768897Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 >> YdbYqlClient::TestYqlLongSessionMultipleErrors [GOOD] >> YdbYqlClient::SecurityTokenAuthMultiTenantSDKAsync [GOOD] >> YdbYqlClient::SimpleColumnFamilies >> YdbYqlClient::TestReadTableMultiShardWithDescribe [GOOD] |91.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_pq_reboots/ydb-core-tx-schemeshard-ut_pq_reboots |91.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tools/query_replay_yt/query_replay_yt >> YdbYqlClient::TestReadTableMultiShardWithDescribeAndRowLimit >> YdbYqlClient::ColumnFamiliesWithStorageAndIndex [GOOD] |91.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/scheme/ydb-core-kqp-ut-scheme |91.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_move_reboots/ydb-core-tx-schemeshard-ut_move_reboots >> TGRpcNewCoordinationClient::SessionAcquireAcceptedCallback [GOOD] >> TYqlDecimalTests::DecimalKey [GOOD] >> YdbYqlClient::TestConstraintViolation [GOOD] >> YdbYqlClient::TestReadTableNotNullBorder [GOOD] >> YdbTableBulkUpsert::RetryOperationSync [GOOD] >> TGRpcYdbTest::SdkUuid [GOOD] >> YdbScripting::Params [GOOD] >> YdbTableBulkUpsert::DataValidation |91.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_pq_reboots/ydb-core-tx-schemeshard-ut_pq_reboots >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Date-pk_types13-all_types13-index13-Date--] [GOOD] >> TGRpcNewClient::InMemoryTables [GOOD] >> YdbScripting::BasicV0 [GOOD] >> YdbYqlClient::CreateAndAltertTableWithKeyBloomFilter [GOOD] >> TGRpcYdbTest::SdkUuidViaParams >> TGRpcYdbTest::DeleteFromAfterCreate [GOOD] >> YdbYqlClient::ColumnFamiliesDescriptionWithStorageAndIndex >> ReadRows::KillTabletDuringRead [GOOD] >> YdbTableBulkUpsert::RetryOperation >> TTableProfileTests::OverwriteStoragePolicy [GOOD] >> TopicAutoscaling::PartitionSplit_AutosplitByLoad [GOOD] >> TTableProfileTests::OverwriteCachingPolicy >> YdbScripting::BasicV1 >> YdbYqlClient::TestReadTableNotNullBorder2 >> YdbTableBulkUpsert::DataValidation [GOOD] >> YdbYqlClient::TestReadTableMultiShardWithDescribeAndRowLimit [GOOD] >> TopicAutoscaling::PartitionSplit_AutosplitByLoad_AfterAlter >> YdbTableBulkUpsert::AsyncIndexShouldFail >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_1__ASYNC-pk_types5-all_types5-index5---ASYNC] [GOOD] |91.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tools/query_replay_yt/query_replay_yt |91.8%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_pq_reboots/ydb-core-tx-schemeshard-ut_pq_reboots |91.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/scheme/ydb-core-kqp-ut-scheme |91.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_move_reboots/ydb-core-tx-schemeshard-ut_move_reboots |91.8%| [LD] {RESULT} $(B)/ydb/tools/query_replay_yt/query_replay_yt |91.8%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/scheme/ydb-core-kqp-ut-scheme |91.8%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_move_reboots/ydb-core-tx-schemeshard-ut_move_reboots |91.8%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_ttl/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TYqlDecimalTests::DecimalKey [GOOD] Test command err: 2025-05-07T09:01:23.621405Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626315068211962:2078];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:23.621543Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00285d/r3tmp/tmpbXvtni/pdisk_1.dat 2025-05-07T09:01:24.233078Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:24.233153Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:24.241373Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:01:24.263546Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 15824, node 1 2025-05-07T09:01:24.408748Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:24.408775Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:24.408786Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:24.408904Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1616 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:24.766171Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:27.710830Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T09:01:29.630106Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501626342034300907:2176];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:29.630568Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00285d/r3tmp/tmpIvZbdR/pdisk_1.dat 2025-05-07T09:01:29.792242Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:29.817573Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:29.818083Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:29.822720Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10015, node 4 2025-05-07T09:01:30.004151Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:30.004178Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:30.004185Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:30.004322Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30541 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-05-07T09:01:30.192308Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:01:33.568739Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T09:01:35.870046Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7501626369073860144:2223];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:35.886866Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00285d/r3tmp/tmpVxXiSC/pdisk_1.dat 2025-05-07T09:01:35.988742Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:36.050530Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:36.050628Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:36.055045Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7027, node 7 2025-05-07T09:01:36.150067Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:36.150089Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:36.150097Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:36.150229Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3534 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:36.430150Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:38.934262Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T09:01:39.075604Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7501626386253730311:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:39.075702Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:39.076270Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7501626386253730323:2343], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:39.080606Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-05-07T09:01:39.120551Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7501626386253730325:2344], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-05-07T09:01:39.177604Z node 7 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [7:7501626386253730409:2782] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges ... 1.254061Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480 2025-05-07T09:01:51.303554Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7501626436867980208:2352], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-05-07T09:01:51.383546Z node 10 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [10:7501626436867980282:2808] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:01:51.540631Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710661. Ctx: { TraceId: 01jtmznp8a6ffhcamm1mdd6kb6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=OWU2MTBkNzktNGFjMzY0MmMtMmY3MDg3NWQtNjBmNTJmNDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:51.711579Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710662. Ctx: { TraceId: 01jtmznpja91vserk4826wz2yr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=OWU2MTBkNzktNGFjMzY0MmMtMmY3MDg3NWQtNjBmNTJmNDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:51.896308Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710663. Ctx: { TraceId: 01jtmznpqa36npfccc94g8nb3n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=OWU2MTBkNzktNGFjMzY0MmMtMmY3MDg3NWQtNjBmNTJmNDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:52.022801Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710664. Ctx: { TraceId: 01jtmznpwz4bsdt1n9gwv3f1kb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=OWU2MTBkNzktNGFjMzY0MmMtMmY3MDg3NWQtNjBmNTJmNDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:52.231873Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710665. Ctx: { TraceId: 01jtmznq0x9m6vx9jvdkb47945, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=OWU2MTBkNzktNGFjMzY0MmMtMmY3MDg3NWQtNjBmNTJmNDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:55.126070Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7501626451188640875:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:55.126705Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00285d/r3tmp/tmpuImMfP/pdisk_1.dat 2025-05-07T09:01:55.372081Z node 13 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:55.603966Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:55.611819Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:55.628519Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7106, node 13 2025-05-07T09:01:55.908675Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:55.908711Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:55.908723Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:55.908887Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11372 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:56.639017Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:59.930022Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T09:02:00.052630Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7501626472663478558:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:00.052754Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:00.053262Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7501626472663478570:2344], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:00.059437Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-05-07T09:02:00.104350Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7501626472663478572:2345], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-05-07T09:02:00.126202Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[13:7501626451188640875:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:02:00.126299Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:02:00.170401Z node 13 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [13:7501626472663478650:2804] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:02:00.276579Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jtmznyvj7h49s72k24wzmbfh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=OGVhOWQzNzctNjhlYjAwOTUtYWZiYzExNTktZjlhNWRlMGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:02:00.436700Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jtmznz3a6ppbfa6947sp80dr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=OGVhOWQzNzctNjhlYjAwOTUtYWZiYzExNTktZjlhNWRlMGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:02:00.560904Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715663. Ctx: { TraceId: 01jtmznz7wcb03nwp6ep3hj361, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=OGVhOWQzNzctNjhlYjAwOTUtYWZiYzExNTktZjlhNWRlMGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:02:00.745611Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715664. Ctx: { TraceId: 01jtmznzbtbx3kd5ag7qqvrs8f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=OGVhOWQzNzctNjhlYjAwOTUtYWZiYzExNTktZjlhNWRlMGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:02:00.882183Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715665. Ctx: { TraceId: 01jtmznzhg4pba19tbfvnjaye9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=OGVhOWQzNzctNjhlYjAwOTUtYWZiYzExNTktZjlhNWRlMGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:02:01.021245Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715666. Ctx: { TraceId: 01jtmznznt7jgnmpe9qypka4qt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=OGVhOWQzNzctNjhlYjAwOTUtYWZiYzExNTktZjlhNWRlMGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:02:01.142560Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715667. Ctx: { TraceId: 01jtmznzt52nsnv7f09taxjssh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=OGVhOWQzNzctNjhlYjAwOTUtYWZiYzExNTktZjlhNWRlMGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:02:01.397165Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715668. Ctx: { TraceId: 01jtmznzy18rg7q2dwvk7wvy21, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=OGVhOWQzNzctNjhlYjAwOTUtYWZiYzExNTktZjlhNWRlMGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:02:01.634026Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715669. Ctx: { TraceId: 01jtmzp05w1vpsb76dra400z7p, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=OGVhOWQzNzctNjhlYjAwOTUtYWZiYzExNTktZjlhNWRlMGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:02:01.974101Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715670. Ctx: { TraceId: 01jtmzp0daf4qhe48ja12ryg62, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=OGVhOWQzNzctNjhlYjAwOTUtYWZiYzExNTktZjlhNWRlMGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::TestConstraintViolation [GOOD] Test command err: 2025-05-07T09:01:30.427726Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626343861255604:2210];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:30.428031Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002848/r3tmp/tmpq8tQJu/pdisk_1.dat 2025-05-07T09:01:31.447255Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T09:01:31.632210Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:31.632300Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:31.639184Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:01:31.649675Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11844, node 1 2025-05-07T09:01:32.170568Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:32.170600Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:32.170607Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:32.170712Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23497 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:32.605250Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:35.410730Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501626343861255604:2210];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:35.410822Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:01:37.214841Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626373926027670:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:37.214927Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:37.682184Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T09:01:38.039707Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626378220995158:2355], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:38.039811Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:38.040373Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626378220995163:2358], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:38.044821Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480 2025-05-07T09:01:38.083942Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501626378220995165:2359], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-05-07T09:01:38.191317Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501626378220995241:2823] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:01:38.663821Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710661. Ctx: { TraceId: 01jtmzn9bpat6je9wkreqfyt7q, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjU1Yzg0YjctOGRlNDc3OS03OGMzMjk3MS1lMjhiNGY2Ng==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:39.063698Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710662. Ctx: { TraceId: 01jtmzna07ex6kjygpwa39d0cf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjU1Yzg0YjctOGRlNDc3OS03OGMzMjk3MS1lMjhiNGY2Ng==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:41.638487Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501626392139153792:2225];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002848/r3tmp/tmph8uCTm/pdisk_1.dat 2025-05-07T09:01:41.750947Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T09:01:42.055995Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:42.108395Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:42.108515Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:42.118598Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13676, node 4 2025-05-07T09:01:42.573076Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:42.573109Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:42.573126Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:42.573277Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20796 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:42.999769Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:46.550124Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7501626392139153792:2225];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:46.550188Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:01:47.754715Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501626417908958484:2343], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:47.754795Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501626417908958473:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:47.755092Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:47.760175Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 720 ... ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480 2025-05-07T09:01:54.472184Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2191: SessionId: ydb://session/3?node_id=7&id=OGM0NmYzMmEtNjdjNzBiZTctOWQ2NmNlMDEtNDAyNTJiNjI=, ActorId: [7:7501626450063197789:2334], ActorState: ExecuteState, TraceId: 01jtmznsb9edk4qxwrwajz826a, Reply query error, msg: Pending previous query completion proxyRequestId: 8 2025-05-07T09:01:54.483112Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2191: SessionId: ydb://session/3?node_id=7&id=OGM0NmYzMmEtNjdjNzBiZTctOWQ2NmNlMDEtNDAyNTJiNjI=, ActorId: [7:7501626450063197789:2334], ActorState: ExecuteState, TraceId: 01jtmznsb9edk4qxwrwajz826a, Reply query error, msg: Pending previous query completion proxyRequestId: 9 2025-05-07T09:01:54.483193Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2191: SessionId: ydb://session/3?node_id=7&id=OGM0NmYzMmEtNjdjNzBiZTctOWQ2NmNlMDEtNDAyNTJiNjI=, ActorId: [7:7501626450063197789:2334], ActorState: ExecuteState, TraceId: 01jtmznsb9edk4qxwrwajz826a, Reply query error, msg: Pending previous query completion proxyRequestId: 10 2025-05-07T09:01:54.483224Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2191: SessionId: ydb://session/3?node_id=7&id=OGM0NmYzMmEtNjdjNzBiZTctOWQ2NmNlMDEtNDAyNTJiNjI=, ActorId: [7:7501626450063197789:2334], ActorState: ExecuteState, TraceId: 01jtmznsb9edk4qxwrwajz826a, Reply query error, msg: Pending previous query completion proxyRequestId: 11 2025-05-07T09:01:54.484137Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2191: SessionId: ydb://session/3?node_id=7&id=OGM0NmYzMmEtNjdjNzBiZTctOWQ2NmNlMDEtNDAyNTJiNjI=, ActorId: [7:7501626450063197789:2334], ActorState: ExecuteState, TraceId: 01jtmznsb9edk4qxwrwajz826a, Reply query error, msg: Pending previous query completion proxyRequestId: 12 2025-05-07T09:01:54.520134Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7501626450063197825:2346], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-05-07T09:01:54.604267Z node 7 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [7:7501626450063197924:2691] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:01:57.191366Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7501626461782938076:2140];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:57.191441Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002848/r3tmp/tmpp2XigY/pdisk_1.dat 2025-05-07T09:01:57.557087Z node 10 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:57.621151Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:57.621243Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:57.624214Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28802, node 10 2025-05-07T09:01:57.762726Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:57.762755Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:57.762765Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:57.762922Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15971 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:58.077563Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:02:01.346137Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7501626478962808258:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:01.346307Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:01.365262Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T09:02:01.537137Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7501626478962808426:2348], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:01.537233Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:01.537613Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7501626478962808431:2351], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:01.542389Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-05-07T09:02:01.580342Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7501626478962808433:2352], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-05-07T09:02:01.677244Z node 10 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [10:7501626478962808503:2794] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:02:01.766312Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jtmzp09z4c3xahe1rnht7azm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=MTM1NWVlYzYtM2IyNDk1ZjctMjI0ODg0Ny1mNDY5YzA4NA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:02:01.914966Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jtmzp0j50p8dzfk1rnv3qq9j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=MTM1NWVlYzYtM2IyNDk1ZjctMjI0ODg0Ny1mNDY5YzA4NA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:02:01.936364Z node 10 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=3; 2025-05-07T09:02:01.960034Z node 10 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 3 at tablet 72075186224037888 errors: Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-05-07T09:02:01.960243Z node 10 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 3 at tablet 72075186224037888 Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-05-07T09:02:01.960510Z node 10 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:765: SelfId: [10:7501626478962808577:2336], Table: `Root/Test` ([72057594046644480:2:1]), SessionActorId: [10:7501626478962808240:2336]Got CONSTRAINT VIOLATION for table `Root/Test`. ShardID=72075186224037888, Sink=[10:7501626478962808577:2336].{
: Error: Conflict with existing key., code: 2012 } 2025-05-07T09:02:01.961175Z node 10 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:2833: SelfId: [10:7501626478962808570:2336], SessionActorId: [10:7501626478962808240:2336], statusCode=PRECONDITION_FAILED. Issue=
: Error: Constraint violated. Table: `Root/Test`., code: 2012
: Error: Conflict with existing key., code: 2012 . sessionActorId=[10:7501626478962808240:2336]. isRollback=0 2025-05-07T09:02:01.961484Z node 10 :KQP_SESSION WARN: kqp_session_actor.cpp:1840: SessionId: ydb://session/3?node_id=10&id=MTM1NWVlYzYtM2IyNDk1ZjctMjI0ODg0Ny1mNDY5YzA4NA==, ActorId: [10:7501626478962808240:2336], ActorState: ExecuteState, TraceId: 01jtmzp0j50p8dzfk1rnv3qq9j, got TEvKqpBuffer::TEvError in ExecuteState, status: PRECONDITION_FAILED send to: [10:7501626478962808571:2336] from: [10:7501626478962808570:2336] 2025-05-07T09:02:01.961588Z node 10 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1944: ActorId: [10:7501626478962808571:2336] TxId: 281474976715662. Ctx: { TraceId: 01jtmzp0j50p8dzfk1rnv3qq9j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=MTM1NWVlYzYtM2IyNDk1ZjctMjI0ODg0Ny1mNDY5YzA4NA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. PRECONDITION_FAILED: {
: Error: Constraint violated. Table: `Root/Test`., code: 2012 subissue: {
: Error: Conflict with existing key., code: 2012 } } 2025-05-07T09:02:01.961840Z node 10 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=10&id=MTM1NWVlYzYtM2IyNDk1ZjctMjI0ODg0Ny1mNDY5YzA4NA==, ActorId: [10:7501626478962808240:2336], ActorState: ExecuteState, TraceId: 01jtmzp0j50p8dzfk1rnv3qq9j, Create QueryResponse for error on request, msg: ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::CreateAndAltertTableWithKeyBloomFilter [GOOD] Test command err: 2025-05-07T09:01:28.722060Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626335900082683:2206];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:28.722193Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002851/r3tmp/tmpZTCvd5/pdisk_1.dat 2025-05-07T09:01:29.545403Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:29.545545Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:29.550654Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:01:29.569558Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10556, node 1 2025-05-07T09:01:29.738578Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:29.738654Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:29.738665Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:29.738777Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22972 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:30.061834Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:30.135646Z node 1 :GRPC_PROXY_NO_CONNECT_ACCESS DEBUG: grpc_request_check_actor.h:538: Skip check permission connect db, AllowYdbRequestsWithoutDatabase is off, there is no db provided from user, database: /Root, user: root@builtin, from ip: ipv6:[::1]:56472 Call 2025-05-07T09:01:30.160186Z node 1 :GRPC_PROXY_NO_CONNECT_ACCESS DEBUG: grpc_request_check_actor.h:538: Skip check permission connect db, AllowYdbRequestsWithoutDatabase is off, there is no db provided from user, database: /Root, user: root@builtin, from ip: ipv6:[::1]:56478 2025-05-07T09:01:33.299925Z node 1 :GRPC_PROXY_NO_CONNECT_ACCESS DEBUG: grpc_request_check_actor.h:538: Skip check permission connect db, AllowYdbRequestsWithoutDatabase is off, there is no db provided from user, database: /Root, user: root@builtin, from ip: ipv6:[::1]:48762 Call Call 2025-05-07T09:01:33.367639Z node 1 :GRPC_PROXY_NO_CONNECT_ACCESS DEBUG: grpc_request_check_actor.h:574: Skip check permission connect db, user is a admin, database: /Root, user: root@builtin, from ip: ipv6:[::1]:48792 2025-05-07T09:01:33.386414Z node 1 :GRPC_PROXY_NO_CONNECT_ACCESS DEBUG: grpc_request_check_actor.h:574: Skip check permission connect db, user is a admin, database: /Root, user: root@builtin, from ip: ipv6:[::1]:48794 2025-05-07T09:01:33.388289Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T09:01:35.581401Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501626365332843460:2199];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:35.600900Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002851/r3tmp/tmp9vQUbl/pdisk_1.dat 2025-05-07T09:01:35.815045Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 61823, node 4 2025-05-07T09:01:35.919923Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:35.920017Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:36.023466Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:01:36.101401Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:36.101425Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:36.101432Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:36.101602Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12895 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:36.384169Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:40.612539Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7501626389477112461:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:40.614466Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002851/r3tmp/tmp2eX8mb/pdisk_1.dat 2025-05-07T09:01:40.918639Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:40.951087Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:40.951153Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:40.957377Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5794, node 7 2025-05-07T09:01:41.113843Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:41.113870Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:41.113877Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:41.114032Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4774 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 Pa... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:41.488838Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:45.618852Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7501626389477112461:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:45.618940Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:01:45.996888Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /Root/Table-1, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T09:01:45.998180Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976710658:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-05-07T09:01:45.998222Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 202 ... _TX_SCHEMESHARD NOTICE: schemeshard__operation_drop_table.cpp:492: TDropTable Propose, path: Root/Table-8, pathId: 0, opId: 281474976710688:0, at schemeshard: 72057594046644480 2025-05-07T09:01:50.343364Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976710688:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-05-07T09:01:50.347389Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710688, database: /Root, subject: , status: StatusAccepted, operation: DROP TABLE, path: Root/Table-8 2025-05-07T09:01:50.350535Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037894 not found 2025-05-07T09:01:50.356308Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-05-07T09:01:50.376827Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 1746608510423, transactions count in step: 1, at schemeshard: 72057594046644480 2025-05-07T09:01:50.387123Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1103: All parts have reached barrier, tx: 281474976710688, done: 0, blocked: 1 2025-05-07T09:01:50.407967Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976710688:0 2025-05-07T09:01:50.427716Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037892 not found 2025-05-07T09:01:50.451848Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-05-07T09:01:52.333123Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7501626440341199195:2078];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:52.333223Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002851/r3tmp/tmp0zjfXu/pdisk_1.dat 2025-05-07T09:01:52.590496Z node 10 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:52.664765Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:52.664857Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:52.670715Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28429, node 10 2025-05-07T09:01:52.866677Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:52.866701Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:52.866709Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:52.866858Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19808 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:53.359949Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... TClient is connected to server localhost:19808 2025-05-07T09:01:57.071382Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T09:01:57.333236Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7501626440341199195:2078];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:57.333310Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; TClient is connected to server localhost:19808 TClient::Ls request: Root/Test TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Test" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1746608517255 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Test" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyCo... (TRUNCATED) 2025-05-07T09:01:57.516235Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:19808 TClient::Ls request: Root/Test TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Test" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1746608517255 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Test" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyCo... (TRUNCATED) 2025-05-07T09:01:59.530161Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7501626471867733767:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:59.530276Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002851/r3tmp/tmpXwNoNe/pdisk_1.dat 2025-05-07T09:01:59.811534Z node 13 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:59.859547Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:59.859654Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:59.867083Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8353, node 13 2025-05-07T09:02:00.045667Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:02:00.045697Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:02:00.045719Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:02:00.045891Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64010 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:02:00.504708Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:02:04.008393Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T09:02:04.153812Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::CheckDefaultTableSettings2 [GOOD] Test command err: test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002865/r3tmp/tmpb0aZbE/pdisk_1.dat TServer::EnableGrpc on GrpcPort 7327, node 1 TClient is connected to server localhost:2442 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-05-07T09:01:22.769409Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501626312601274024:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:22.769474Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002865/r3tmp/tmp0WNCTw/pdisk_1.dat 2025-05-07T09:01:23.275564Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:23.295927Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:23.296026Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:23.305095Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8361, node 4 2025-05-07T09:01:23.610425Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:23.610449Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:23.610458Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:23.617343Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19693 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:24.208127Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:27.616459Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501626334076111595:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:27.616560Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:27.770803Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7501626312601274024:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:27.770860Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:01:28.033299Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T09:01:28.275915Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501626338371079078:2352], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:28.276100Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:28.276740Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501626338371079083:2355], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:28.281357Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-05-07T09:01:28.315732Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7501626338371079085:2356], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-05-07T09:01:28.408765Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:7501626338371079163:2814] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:01:28.763090Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jtmzmztjfxtrnxyyn4j12za8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=YzM0NDkyZDMtNGNmZDJmN2UtMWEzZTAzYTEtNjYyYzU5OGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:28.920245Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480 2025-05-07T09:01:29.070791Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480 2025-05-07T09:01:29.306545Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-05-07T09:01:31.253227Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7501626351655074622:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:31.253522Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002865/r3tmp/tmpXyid2c/pdisk_1.dat 2025-05-07T09:01:31.453627Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:31.507513Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:31.507603Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:31.513460Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15531, node 7 2025-05-07T09:01:31.734609Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:31.734640Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:31.734647Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:31.735291Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16513 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:32.117149Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:35.338134Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7501626368834944885:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:35.338255Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:35.370673Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T09:01:35.514336Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7501626368834945050:2348], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:35.514436Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:35.514736Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7501626368834945055:2351], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:35.519391Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480 2025-05-07T09:01:35.552778Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7501626368834945057:2352], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-05-07T09:01:35.650834Z node 7 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [7:7501626368834945138:2794] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:01:35.728713Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710661. Ctx: { TraceId: 01jtmzn6ws1sadjsrwrgv7sb88, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=YTZkYjkxNzAtYjQwZDAzNjItZmIxMTA1OTItZjIzZmEzNDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:35.805088Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480 2025-05-07T09:01:35.938276Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480 2025-05-07T09:01:36.253004Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7501626351655074622:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:36.253096Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:01:38.306633Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7501626380673869544:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:38.306772Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002865/r3tmp/tmphm4Igg/pdisk_1.dat 2025-05-07T09:01:38.632639Z node 10 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:38.670306Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:38.670411Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:38.678803Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5548, node 10 2025-05-07T09:01:38.836581Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:38.836608Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:38.836618Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:38.836780Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5689 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:39.273760Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:43.310813Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7501626380673869544:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:43.310888Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:01:43.405692Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T09:01:45.751542Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7501626411299867074:2219];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002865/r3tmp/tmp4O0VUQ/pdisk_1.dat 2025-05-07T09:01:45.906556Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T09:01:46.116234Z node 13 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:46.147592Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:46.147695Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:46.154590Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10964, node 13 2025-05-07T09:01:46.469656Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:46.469691Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:46.469701Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:46.469869Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18531 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:46.959015Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:50.652597Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T09:01:50.724374Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[13:7501626411299867074:2219];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:50.724474Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcNewCoordinationClient::SessionCreateUpdateDeleteSemaphore [GOOD] Test command err: 2025-05-07T09:01:23.535285Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626315225562094:2139];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:23.535497Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00285e/r3tmp/tmp85AVLb/pdisk_1.dat 2025-05-07T09:01:24.327192Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:24.362962Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:24.363089Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:24.368145Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 63440, node 1 2025-05-07T09:01:24.912455Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:24.912478Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:24.912486Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:24.912615Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8973 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:25.790553Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:25.996590Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T09:01:31.170470Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501626351745955466:2251];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:31.170861Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00285e/r3tmp/tmpmgUe2L/pdisk_1.dat 2025-05-07T09:01:31.393602Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:31.427113Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:31.427211Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:31.435917Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4973, node 4 2025-05-07T09:01:31.594528Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:31.594550Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:31.594557Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:31.594689Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20020 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:31.807626Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:31.861658Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T09:01:36.802271Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7501626372165279873:2082];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:36.802327Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00285e/r3tmp/tmpRfBvOU/pdisk_1.dat 2025-05-07T09:01:37.107178Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7055, node 7 2025-05-07T09:01:37.173521Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:37.173612Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:37.252810Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:01:37.328141Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:37.328159Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:37.328164Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:37.328276Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23271 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:37.591438Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:37.760423Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T09:01:42.430284Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7501626398998974157:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:42.430342Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00285e/r3tmp/tmprfafCU/pdisk_1.dat 2025-05-07T09:01:42.740099Z node 10 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:42.799936Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:42.800053Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:42.815554Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29859, node 10 2025-05-07T09:01:43.047216Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:43.047250Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:43.047260Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:43.047428Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10603 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:43.400304Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:43.519000Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T09:01:48.237314Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7501626422902308151:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:48.237403Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00285e/r3tmp/tmp2dfVTE/pdisk_1.dat 2025-05-07T09:01:48.830281Z node 13 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:48.995637Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:48.995756Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 14286, node 13 2025-05-07T09:01:49.013570Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:01:49.106751Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:49.106776Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:49.106785Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:49.106915Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9677 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:49.486273Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:49.574184Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976715658:0, at schemeshard: 72057594046644480 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::TestYqlLongSessionMultipleErrors [GOOD] Test command err: 2025-05-07T09:01:30.558287Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626346976602573:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:30.558361Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00284d/r3tmp/tmpr11W3s/pdisk_1.dat 2025-05-07T09:01:31.390163Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:31.390254Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:31.399765Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:01:31.461078Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19678, node 1 2025-05-07T09:01:31.692183Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T09:01:31.692371Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T09:01:31.700191Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T09:01:31.734631Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:31.734653Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:31.734661Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:31.734743Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20181 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:32.139735Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:34.948155Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626364156472808:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:34.948264Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:35.467980Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T09:01:35.545842Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501626346976602573:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:35.545886Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:01:35.649706Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626368451440276:2348], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:35.649776Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:35.650214Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626368451440281:2351], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:35.654659Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480 2025-05-07T09:01:35.681587Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501626368451440283:2352], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-05-07T09:01:35.752503Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501626368451440365:2822] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:01:35.844122Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7501626368451440389:2357], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:2:25: Error: At function: KiWriteTable!
:2:43: Error: Failed to convert type: Struct<'Key':String,'Value':String> to Struct<'Key':Uint32?,'Value':String?>
:2:43: Error: Failed to convert 'Key': String to Optional
:2:43: Error: Failed to convert input columns types to scheme types, code: 2031 2025-05-07T09:01:35.845008Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=1&id=Y2JiODVjODctMzI2NzkzYjgtNDk4NjUzNmEtZDJhODE4YTc=, ActorId: [1:7501626364156472782:2336], ActorState: ExecuteState, TraceId: 01jtmzn710b7cb07656egy4h7q, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-05-07T09:01:38.730181Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501626380888149825:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:38.746188Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00284d/r3tmp/tmpEXZIXm/pdisk_1.dat 2025-05-07T09:01:39.138181Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:39.199782Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:39.199886Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:39.203432Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1681, node 4 2025-05-07T09:01:39.475219Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:39.475250Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:39.475260Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:39.475437Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10282 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:39.935456Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:43.716180Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7501626380888149825:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:43.716258Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:01:43.979165Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501626402362987409:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:43.979280Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:46.202317Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_ ... 5-07T09:01:51.538244Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T09:01:51.721152Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7501626437924220340:2349], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:51.721243Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:51.721641Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7501626437924220345:2352], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:51.727523Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480 2025-05-07T09:01:51.775974Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7501626437924220347:2353], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-05-07T09:01:51.850823Z node 7 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [7:7501626437924220417:2795] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:01:51.928351Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710661. Ctx: { TraceId: 01jtmznpq80pewm6epj1qj50wt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=ZTUxMDkwOTUtZTNkNjQzMmQtMmZhNzgzZWQtM2RiZDA1MWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:52.117175Z node 7 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [7:7501626437924220479:2364], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:13: Error: At function: KiReadTable!
:2:13: Error: Cannot find table 'db.[Root/BadTable]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T09:01:52.118873Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=7&id=ZTUxMDkwOTUtZTNkNjQzMmQtMmZhNzgzZWQtM2RiZDA1MWY=, ActorId: [7:7501626437924220173:2337], ActorState: ExecuteState, TraceId: 01jtmznpyz37cme767bs66hmq6, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T09:01:52.209309Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710662. Ctx: { TraceId: 01jtmznq45700da48yt2baa5h5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=ZTUxMDkwOTUtZTNkNjQzMmQtMmZhNzgzZWQtM2RiZDA1MWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:52.417531Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710663. Ctx: { TraceId: 01jtmznq7f6mfdb0t7wdpjrfdc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=ZTUxMDkwOTUtZTNkNjQzMmQtMmZhNzgzZWQtM2RiZDA1MWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:54.523042Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7501626450068358057:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:54.534141Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00284d/r3tmp/tmpOnBI7C/pdisk_1.dat 2025-05-07T09:01:54.961159Z node 10 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:55.093488Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:55.094690Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:55.103373Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5802, node 10 2025-05-07T09:01:55.565668Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:55.565698Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:55.565709Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:55.565948Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19649 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:56.223727Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:59.039118Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7501626471543195596:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:59.039209Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:59.064802Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T09:01:59.208659Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7501626471543195762:2348], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:59.208788Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:59.209315Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7501626471543195767:2351], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:59.213218Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480 2025-05-07T09:01:59.240846Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7501626471543195769:2352], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-05-07T09:01:59.351517Z node 10 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [10:7501626471543195845:2812] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:01:59.372088Z node 10 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [10:7501626471543195856:2356], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:25: Error: At function: KiWriteTable!
:2:25: Error: Cannot find table 'db.[Root/BadTable1]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T09:01:59.374964Z node 10 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=10&id=NDg0OTQ0NWEtZTBhYjE2Y2EtZGEyNmU0MjEtOGU3ZWJjYzE=, ActorId: [10:7501626471543195578:2336], ActorState: ExecuteState, TraceId: 01jtmzny179wsg05hecdj53tnj, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T09:01:59.438753Z node 10 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [10:7501626471543195890:2362], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:25: Error: At function: KiWriteTable!
:2:25: Error: Cannot find table 'db.[Root/BadTable2]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T09:01:59.440208Z node 10 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=10&id=NDg0OTQ0NWEtZTBhYjE2Y2EtZGEyNmU0MjEtOGU3ZWJjYzE=, ActorId: [10:7501626471543195578:2336], ActorState: ExecuteState, TraceId: 01jtmzny7gbsg613k07wq0mmek, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T09:01:59.518095Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7501626450068358057:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:59.518171Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::RetryOperationLimitedDuration [GOOD] Test command err: 2025-05-07T09:01:11.000380Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626257870622451:2211];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:11.026048Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002870/r3tmp/tmpLcRNKs/pdisk_1.dat 2025-05-07T09:01:11.543707Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:11.543838Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:11.547266Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:01:11.586733Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 31009, node 1 2025-05-07T09:01:11.648871Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T09:01:11.649012Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T09:01:11.858648Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:11.858670Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:11.858677Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:11.858813Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27270 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:12.431634Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:15.828364Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626279345459844:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:15.828485Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:15.969724Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501626257870622451:2211];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:15.969796Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:01:16.150752Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T09:01:16.549232Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480 2025-05-07T09:01:16.814693Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626283640427358:2359], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:16.814807Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:16.815175Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626283640427363:2362], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:16.819915Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710660:3, at schemeshard: 72057594046644480 2025-05-07T09:01:16.879525Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501626283640427365:2363], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2025-05-07T09:01:16.987320Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501626283640427446:2820] txid# 281474976710661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:01:17.070790Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:303: Access denied: self# [1:7501626287935394766:2832], for# test_user@builtin, access# DescribeSchema 2025-05-07T09:01:17.070824Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:303: Access denied: self# [1:7501626287935394766:2832], for# test_user@builtin, access# DescribeSchema 2025-05-07T09:01:17.085093Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7501626283640427457:2368], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:21: Error: At function: KiReadTable!
:2:21: Error: Cannot find table 'db.[Root/Test]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T09:01:17.086404Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=1&id=ZDUxM2IxYmUtZmVlODlmZDctZTFjZWJlN2UtNmU4OTViMGY=, ActorId: [1:7501626283640427343:2356], ActorState: ExecuteState, TraceId: 01jtmzmmm6epx1j8pc6pmdcan7, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T09:01:18.889735Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501626295390451207:2208];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:18.889840Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002870/r3tmp/tmp9EaGSo/pdisk_1.dat 2025-05-07T09:01:19.071907Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:19.092653Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:19.092730Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:19.095881Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21219, node 4 2025-05-07T09:01:19.319717Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:19.319755Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:19.319763Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:19.319883Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21180 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:19.579117Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:22.319103Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501626312570321272:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:22.319206Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:22.335934Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T09:01:22.462920Z node 4 :KQP_WORKLOA ... aceId: 01jtmzmtea9zgbxd9hm0y7hebk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=ZTk2YTRhZmQtZGVkMDc1MC05MWZjNDFhNS1jNTE2YjNlZg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:25.541597Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7501626322302888731:2086];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:25.548577Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002870/r3tmp/tmpCkR1hb/pdisk_1.dat 2025-05-07T09:01:25.691928Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:25.735010Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:25.735105Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:25.738066Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7494, node 7 2025-05-07T09:01:25.924631Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:25.924658Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:25.924667Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:25.924823Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30253 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:26.220661Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... Previous query attempt was finished with unsuccessful status OVERLOADED: Sending retry attempt 1 of 5 2025-05-07T09:01:30.540308Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7501626322302888731:2086];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:30.540396Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Previous query attempt was finished with unsuccessful status CLIENT_RESOURCE_EXHAUSTED: Sending retry attempt 2 of 5 Previous query attempt was finished with unsuccessful status UNAVAILABLE: Sending retry attempt 3 of 5 Previous query attempt was finished with unsuccessful status BAD_SESSION: Sending retry attempt 4 of 5 Previous query attempt was finished with unsuccessful status SESSION_BUSY: Sending retry attempt 5 of 5 2025-05-07T09:01:30.626764Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7501626343777726262:2343], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:30.626841Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7501626343777726254:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:30.627134Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:30.631254Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T09:01:30.665770Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7501626343777726268:2344], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T09:01:30.771093Z node 7 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [7:7501626343777726349:2687] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Previous query attempt was finished with unsuccessful status OVERLOADED: Sending retry attempt 1 of 5 Previous query attempt was finished with unsuccessful status CLIENT_RESOURCE_EXHAUSTED: Sending retry attempt 2 of 5 Previous query attempt was finished with unsuccessful status UNAVAILABLE: Sending retry attempt 3 of 5 Previous query attempt was finished with unsuccessful status BAD_SESSION: Sending retry attempt 4 of 5 Previous query attempt was finished with unsuccessful status SESSION_BUSY: Sending retry attempt 5 of 5 Previous query attempt was finished with unsuccessful status NOT_FOUND: Sending retry attempt 1 of 1 Previous query attempt was finished with unsuccessful status NOT_FOUND: Sending retry attempt 1 of 1 Previous query attempt was finished with unsuccessful status UNDETERMINED: Sending retry attempt 1 of 1 Previous query attempt was finished with unsuccessful status UNDETERMINED: Sending retry attempt 1 of 1 Previous query attempt was finished with unsuccessful status TRANSPORT_UNAVAILABLE: Sending retry attempt 1 of 1 Previous query attempt was finished with unsuccessful status TRANSPORT_UNAVAILABLE: Sending retry attempt 1 of 1 2025-05-07T09:01:36.772549Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7501626371277714276:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:36.772646Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002870/r3tmp/tmpmiV8rr/pdisk_1.dat 2025-05-07T09:01:37.060610Z node 10 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:37.105018Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:37.105120Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:37.118790Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2491, node 10 2025-05-07T09:01:37.307680Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:37.307714Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:37.307724Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:37.307889Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30920 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:37.680565Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... Previous query attempt was finished with unsuccessful status OVERLOADED: Sending retry attempt 1 of 3 2025-05-07T09:01:41.774710Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7501626371277714276:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:41.774826Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Previous query attempt was finished with unsuccessful status OVERLOADED: Sending retry attempt 2 of 3 Previous query attempt was finished with unsuccessful status OVERLOADED: Sending retry attempt 3 of 3 Previous query attempt was finished with unsuccessful status OVERLOADED: Sending retry attempt 1 of 3 2025-05-07T09:01:52.026667Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7261: Cannot get console configs 2025-05-07T09:01:52.026703Z node 10 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded Previous query attempt was finished with unsuccessful status OVERLOADED: Sending retry attempt 1 of 3 Previous query attempt was finished with unsuccessful status OVERLOADED: Sending retry attempt 2 of 3 Previous query attempt was finished with unsuccessful status OVERLOADED: Sending retry attempt 3 of 3 Previous query attempt was finished with unsuccessful status OVERLOADED: Sending retry attempt 1 of 3 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcNewCoordinationClient::SessionAcquireAcceptedCallback [GOOD] Test command err: 2025-05-07T09:01:26.930519Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626328323903344:2141];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:26.948369Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002858/r3tmp/tmp2RzRfk/pdisk_1.dat 2025-05-07T09:01:27.940923Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:27.941024Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:27.964635Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:01:27.965060Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T09:01:27.986087Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16681, node 1 2025-05-07T09:01:28.330731Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:28.330753Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:28.330760Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:28.330871Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4631 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:29.020942Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:29.142693Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T09:01:29.303392Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976710659:0, at schemeshard: 72057594046644480 2025-05-07T09:01:29.498077Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropKesus, opId: 281474976710660:0, at schemeshard: 72057594046644480 2025-05-07T09:01:29.506183Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-05-07T09:01:29.512057Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,1) wasn't found 2025-05-07T09:01:34.738215Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501626362592210226:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:34.738271Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002858/r3tmp/tmp5jeuAQ/pdisk_1.dat 2025-05-07T09:01:35.103927Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:35.132498Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:35.132579Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:35.143246Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 32055, node 4 2025-05-07T09:01:35.454615Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:35.454639Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:35.454646Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:35.454785Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11256 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:36.102853Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:36.247672Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T09:01:36.365879Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterKesus, opId: 281474976710659:0, at schemeshard: 72057594046644480 2025-05-07T09:01:36.466288Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterKesus, opId: 281474976710660:0, at schemeshard: 72057594046644480 2025-05-07T09:01:36.546647Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterKesus, opId: 281474976710661:0, at schemeshard: 72057594046644480 2025-05-07T09:01:42.703810Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7501626398512173917:2112];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:42.882315Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002858/r3tmp/tmp6zSR8q/pdisk_1.dat 2025-05-07T09:01:43.242494Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:43.328611Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:43.328706Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:43.334198Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8649, node 7 2025-05-07T09:01:43.682385Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:43.682410Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:43.682420Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:43.682677Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24064 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:44.088463Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:50.714412Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7501626433529240589:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:50.714466Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002858/r3tmp/tmp7aMAma/pdisk_1.dat 2025-05-07T09:01:50.990011Z node 10 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5895, node 10 2025-05-07T09:01:51.090081Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:51.090255Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:51.110985Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:01:51.263857Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:51.263884Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:51.263896Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:51.264069Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5316 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:51.795489Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:51.946402Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T09:01:56.982383Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7501626455527132897:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:56.994952Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002858/r3tmp/tmpt0zd0F/pdisk_1.dat 2025-05-07T09:01:57.337035Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:57.337120Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:57.343884Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:01:57.346474Z node 13 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3898, node 13 2025-05-07T09:01:57.496326Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:57.496350Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:57.496360Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:57.496505Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19963 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:57.800397Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:57.912361Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976710658:0, at schemeshard: 72057594046644480 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcNewClient::InMemoryTables [GOOD] Test command err: 2025-05-07T09:01:30.638480Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626343724705796:2076];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:30.638530Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00284c/r3tmp/tmpOPyI9o/pdisk_1.dat 2025-05-07T09:01:31.271607Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:31.271700Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:31.276050Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:01:31.321735Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27122, node 1 2025-05-07T09:01:31.500550Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T09:01:31.500597Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T09:01:31.669368Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:31.669404Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:31.669415Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:31.669521Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63950 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:32.173852Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:37.120508Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501626376068776510:2206];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:37.120575Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00284c/r3tmp/tmpV18aSh/pdisk_1.dat 2025-05-07T09:01:37.384577Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:37.434211Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:37.434285Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:37.445809Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15423, node 4 2025-05-07T09:01:37.573679Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:37.573709Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:37.573716Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:37.573843Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11905 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:37.867281Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:42.370224Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7501626398720741617:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:42.370309Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00284c/r3tmp/tmpCOeKGM/pdisk_1.dat 2025-05-07T09:01:42.852818Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:42.922913Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:42.923004Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:42.930682Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1312, node 7 2025-05-07T09:01:43.172873Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:43.172895Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:43.172903Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:43.173059Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8997 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:43.772767Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:47.370462Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7501626398720741617:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:47.370553Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:01:48.457224Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T09:01:48.823750Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7501626424490546632:2347], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:48.823900Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:48.824405Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7501626424490546644:2350], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:48.829135Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480 2025-05-07T09:01:48.864342Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7501626424490546646:2351], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-05-07T09:01:48.931322Z node 7 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [7:7501626424490546719:2803] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:01:52.045294Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7501626441339765048:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:52.045369Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00284c/r3tmp/tmpt0zWAs/pdisk_1.dat 2025-05-07T09:01:52.458528Z node 10 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:52.536298Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:52.536404Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:52.551271Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26747, node 10 2025-05-07T09:01:52.893337Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:52.893362Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:52.893371Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:52.893521Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7936 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 Pa... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:53.411597Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:57.012507Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 2025-05-07T09:01:57.050174Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7501626441339765048:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:57.050252Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:01:57.232010Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 2025-05-07T09:01:57.295274Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7501626462814602858:2358], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:57.295348Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:57.295604Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7501626462814602870:2361], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:57.299887Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480 2025-05-07T09:01:57.348434Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7501626462814602872:2362], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-05-07T09:01:57.413561Z node 10 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [10:7501626462814602942:2891] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:01:57.540117Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710663. Ctx: { TraceId: 01jtmznw59bgnw4zpk01vsw4fn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=YTdhN2JmM2MtMWNlNTgwY2YtYzA1NmVjNmEtOThmMjliMjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:57.682204Z node 10 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 10, TabletId: 72075186224037888 not found 2025-05-07T09:01:59.408015Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7501626468813751624:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:59.408072Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00284c/r3tmp/tmp5B4iwp/pdisk_1.dat 2025-05-07T09:01:59.544665Z node 13 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:59.582019Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:59.582119Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:59.585955Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24821, node 13 2025-05-07T09:01:59.686588Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:59.686616Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:59.686626Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:59.686766Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3902 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 Pa... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:02:00.123973Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:02:03.142250Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T09:02:03.296519Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-05-07T09:02:03.437098Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcYdbTest::DeleteFromAfterCreate [GOOD] Test command err: 2025-05-07T09:01:27.591151Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626333511147110:2076];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:27.591205Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002857/r3tmp/tmpHwe6eU/pdisk_1.dat 2025-05-07T09:01:28.497320Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17496, node 1 2025-05-07T09:01:28.534543Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T09:01:28.536389Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T09:01:28.632040Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:28.632328Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:28.775537Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:01:28.794025Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:28.794048Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:28.794054Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:28.794199Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24459 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:29.525087Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:29.769958Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501626342101082684:2615] txid# 281474976710658, issues: { message: "Path does not exist" issue_code: 200200 severity: 1 } 2025-05-07T09:01:34.755111Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501626364367584284:2080];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:34.775929Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002857/r3tmp/tmpbHFbGW/pdisk_1.dat 2025-05-07T09:01:35.222589Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:35.423202Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:35.423288Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:35.431356Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24807, node 4 2025-05-07T09:01:35.674757Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:35.674779Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:35.674794Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:35.674947Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14122 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:36.106512Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:36.207722Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T09:01:36.599532Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-05-07T09:01:39.590256Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7501626364367584284:2080];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:39.590314Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:01:39.930064Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501626385842422285:2354], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:39.930237Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:39.933147Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501626385842422297:2357], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:39.937848Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480 2025-05-07T09:01:39.994372Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7501626385842422299:2358], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-05-07T09:01:40.078698Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:7501626390137389668:3034] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 11], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:01:41.910424Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jtmznb6p41jtncq17nccertw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=MTQyNDNkOGMtZjlhZjdmZGEtZjE4ZmZmYTUtOWQ1Nzk4ZGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:41.964743Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715663. Ctx: { TraceId: 01jtmznb6p41jtncq17nccertw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=MTQyNDNkOGMtZjlhZjdmZGEtZjE4ZmZmYTUtOWQ1Nzk4ZGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:42.279179Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715664. Ctx: { TraceId: 01jtmznd8kcpc2384hfsz6evpv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=YjY3NTc0YzMtYjMxMmQyOGQtOGRlZjFkMDItOTZhNjE5MWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:45.462750Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7501626408224438677:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:45.462810Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002857/r3tmp/tmpjZKLIQ/pdisk_1.dat 2025-05-07T09:01:46.067116Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:46.171586Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:46.171675Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:46.183539Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14191, node 7 2025-05-07T09:01:46.566705Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:46.566736Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty ... xistsActor;event=undelivered;self_id=[10:7501626446928620135:2205];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:54.029332Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002857/r3tmp/tmpMBRv88/pdisk_1.dat 2025-05-07T09:01:54.394581Z node 10 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:54.434696Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:54.434785Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:54.446811Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8937, node 10 2025-05-07T09:01:54.645292Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:54.645310Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:54.645318Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:54.645444Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4829 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:55.047503Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:58.343104Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7501626464108490260:2335], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:58.343208Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:58.343421Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7501626464108490272:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:58.347548Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T09:01:58.378870Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7501626464108490274:2339], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T09:01:58.460891Z node 10 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [10:7501626464108490358:2696] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:01:58.655070Z node 10 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [10:7501626464108490387:2349], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiReadTable!
:1:1: Error: Cannot find table 'db.[Root/NotFound]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T09:01:58.656334Z node 10 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=10&id=ZDZiNmY3ZDktN2IyMTgzZjQtYmY2NzllZjUtNmI5MGNmMWY=, ActorId: [10:7501626464108490232:2331], ActorState: ExecuteState, TraceId: 01jtmznxepexhhfnwmc7mzbeck, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T09:02:00.712134Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7501626475943802588:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:02:00.712211Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002857/r3tmp/tmpOTXyYb/pdisk_1.dat 2025-05-07T09:02:00.882925Z node 13 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:02:00.928038Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:02:00.928151Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:02:00.933469Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4673, node 13 2025-05-07T09:02:01.017747Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:02:01.017776Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:02:01.017786Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:02:01.017944Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10040 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:02:01.387710Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:02:01.455165Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T09:02:01.569639Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-05-07T09:02:04.913613Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7501626493123673054:2346], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:04.913613Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7501626493123673064:2349], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:04.913684Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:04.918193Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480 2025-05-07T09:02:04.944056Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7501626493123673070:2350], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-05-07T09:02:05.019422Z node 13 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [13:7501626497418640439:2891] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:02:05.142920Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jtmzp3kf8v27vxyn6a5z2qg9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=NzExNzFlZmYtYzdkMGYyZWYtMzhkOTk0NmUtYzQwNzgzYjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:02:05.261836Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715663. Ctx: { TraceId: 01jtmzp3v0fdd3sxmyk6acqgvx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=NzExNzFlZmYtYzdkMGYyZWYtMzhkOTk0NmUtYzQwNzgzYjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::UpsertWithNullKeysSimple [GOOD] Test command err: Trying to start YDB, gRPC: 25727, MsgBus: 16845 2025-05-07T09:01:09.774639Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626254844990271:2067];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:09.774712Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003c22/r3tmp/tmpVnW6HK/pdisk_1.dat 2025-05-07T09:01:10.998671Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T09:01:11.135370Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:11.297573Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:11.297736Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:11.309208Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:01:11.532412Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2175} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.109693s 2025-05-07T09:01:11.532472Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:666} StateWork event processing took too much time Type# 2146435078 Duration# 0.109769s TServer::EnableGrpc on GrpcPort 25727, node 1 2025-05-07T09:01:12.783419Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:12.783460Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:12.783471Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:12.783651Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T09:01:14.778175Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501626254844990271:2067];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:14.778244Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; TClient is connected to server localhost:16845 TClient is connected to server localhost:16845 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:15.793811Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:15.944156Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:16.333841Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:16.598768Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:16.694371Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:17.140166Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626289204730307:2415], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:17.140361Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:20.673351Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:01:20.745599Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:01:20.840054Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:01:20.935872Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:01:20.980453Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:01:21.088805Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:01:21.162008Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:01:21.403351Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626306384600191:2490], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:21.403445Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:21.403851Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626306384600196:2493], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:21.442355Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:01:21.459725Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501626306384600198:2494], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:01:21.567881Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501626306384600257:3451] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:01:26.107435Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7261: Cannot get console configs 2025-05-07T09:01:26.107467Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:26.560756Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:7501626327859437068:3643], Recipient [1:7501626259139957969:2181]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T09:01:26.560821Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T09:01:26.560843Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T09:01:26.560950Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:7501626327859437069:3644], Recipient [1:7501626259139957969:2181]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T09:01:26.560961Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T09:01:26.560968Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T09:01:26.560999Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:7501626327859437070:3645], Recipient [1:7501626259139957969:2181]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T09:01:26.561023Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T09:01:26.561031Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T09:01:26.561061Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:7501626327859437071:3646], Recipient [1:7501626259139957969:2181]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T09:01:26.561070Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T09:01:26.561081Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T ... parts: 1/1, is published: true 2025-05-07T09:01:57.991784Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:7501626421576005098:2140] message: TxId: 281474976715760 2025-05-07T09:01:57.991807Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715760 ready parts: 1/1 2025-05-07T09:01:57.991819Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715760:0 2025-05-07T09:01:57.991828Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976715760:0 2025-05-07T09:01:57.991862Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 17] was 4 2025-05-07T09:01:57.992119Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T09:01:57.992172Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [3:7501626421576005098:2140] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715760 at schemeshard: 72057594046644480 2025-05-07T09:01:57.992272Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124998, Sender [3:7501626421576005098:2140], Recipient [3:7501626421576005098:2140]: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715760 2025-05-07T09:01:57.992292Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4997: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletionResult 2025-05-07T09:01:57.992306Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6706: Handle: TEvNotifyTxCompletionResult: txId# 281474976715760 2025-05-07T09:01:57.992320Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6708: Message: TxId: 281474976715760 2025-05-07T09:01:57.992353Z node 3 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:2321: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, txId# 281474976715760, buildInfoId: 281474976710674 2025-05-07T09:01:57.992404Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:2324: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, txId# 281474976715760, buildInfo: TBuildInfo{ IndexBuildId: 281474976710674, Uid: , DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1], TablePathId: [OwnerId: 72057594046644480, LocalPathId: 17], IndexType: EIndexTypeGlobal, IndexName: IndexName, IndexColumn: IndexColumn, State: Unlocking, IsCancellationRequested: 0, Issue: , SubscribersCount: 0, CreateSender: [3:7501626460230713203:2535], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976715757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976715758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 1746608517801, ApplyTxId: 281474976715759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976715760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 2, upload bytes: 51, read rows: 2, read bytes: 51 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-05-07T09:01:57.992440Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T09:01:57.992663Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T09:01:57.992762Z node 3 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1105: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 281474976710674 Unlocking TBuildInfo{ IndexBuildId: 281474976710674, Uid: , DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1], TablePathId: [OwnerId: 72057594046644480, LocalPathId: 17], IndexType: EIndexTypeGlobal, IndexName: IndexName, IndexColumn: IndexColumn, State: Unlocking, IsCancellationRequested: 0, Issue: , SubscribersCount: 0, CreateSender: [3:7501626460230713203:2535], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976715757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976715758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 1746608517801, ApplyTxId: 281474976715759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976715760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 2, upload bytes: 51, read rows: 2, read bytes: 51 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-05-07T09:01:57.992793Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T09:01:57.992805Z node 3 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:25: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Unlocking to Done 2025-05-07T09:01:57.993008Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T09:01:57.993101Z node 3 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1105: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 281474976710674 Done TBuildInfo{ IndexBuildId: 281474976710674, Uid: , DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1], TablePathId: [OwnerId: 72057594046644480, LocalPathId: 17], IndexType: EIndexTypeGlobal, IndexName: IndexName, IndexColumn: IndexColumn, State: Done, IsCancellationRequested: 0, Issue: , SubscribersCount: 0, CreateSender: [3:7501626460230713203:2535], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976715757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976715758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 1746608517801, ApplyTxId: 281474976715759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976715760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 2, upload bytes: 51, read rows: 2, read bytes: 51 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-05-07T09:01:57.993126Z node 3 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:325: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 281474976710674, subscribers count# 0 2025-05-07T09:01:57.993135Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T09:01:57.993158Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T09:01:58.122985Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [3:7501626464525680691:3814], Recipient [3:7501626421576005098:2140]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T09:01:58.123022Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T09:01:58.123034Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T09:01:58.123256Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 274792450, Sender [3:7501626464525680688:2548], Recipient [3:7501626421576005098:2140]: NKikimrIndexBuilder.TEvGetRequest DatabaseName: "/Root" IndexBuildId: 281474976710674 2025-05-07T09:01:58.123271Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4974: StateWork, processing event TEvIndexBuilder::TEvGetRequest 2025-05-07T09:01:58.123354Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index__get.cpp:19: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: DoExecute DatabaseName: "/Root" IndexBuildId: 281474976710674 2025-05-07T09:01:58.123539Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:93: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: Reply Status: SUCCESS IndexBuild { Id: 281474976710674 State: STATE_DONE Settings { source_path: "/Root/TestTable" index { name: "IndexName" index_columns: "IndexColumn" global_index { } } max_shards_in_flight: 32 ScanSettings { } } Progress: 100 } 2025-05-07T09:01:58.123556Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T09:01:58.123613Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T09:01:58.123731Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [3:7501626464525680688:2548] msg type: 274792451 msg: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 281474976710674 State: STATE_DONE Settings { source_path: "/Root/TestTable" index { name: "IndexName" index_columns: "IndexColumn" global_index { } } max_shards_in_flight: 32 ScanSettings { } } Progress: 100 } at schemeshard: 72057594046644480 2025-05-07T09:01:58.124586Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877764, Sender [3:7501626464525680691:3814], Recipient [3:7501626421576005098:2140]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-05-07T09:01:58.124606Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4938: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-05-07T09:01:58.124617Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5761: Server pipe is reset, at schemeshard: 72057594046644480 2025-05-07T09:01:58.174078Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7501626421576005098:2140]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:01:58.174119Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:01:58.174166Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [3:7501626421576005098:2140], Recipient [3:7501626421576005098:2140]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:01:58.174183Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:01:59.178150Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7501626421576005098:2140]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:01:59.178193Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:01:59.178237Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [3:7501626421576005098:2140], Recipient [3:7501626421576005098:2140]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:01:59.178251Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:01:59.643108Z node 3 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill |91.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_osiris/ydb-core-blobstorage-ut_blobstorage-ut_osiris |91.8%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_osiris/ydb-core-blobstorage-ut_blobstorage-ut_osiris |91.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_osiris/ydb-core-blobstorage-ut_blobstorage-ut_osiris >> YdbYqlClient::ColumnFamiliesDescriptionWithStorageAndIndex [GOOD] >> YdbYqlClient::ColumnFamiliesExternalBlobsWithoutDefaultProfile >> YdbYqlClient::TestReadTableNotNullBorder2 [GOOD] >> YdbYqlClient::TestReadTableSnapshot ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> ReadRows::KillTabletDuringRead [GOOD] Test command err: 2025-05-07T09:01:12.483418Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626270410240951:2110];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:12.490269Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00286b/r3tmp/tmpblZC5F/pdisk_1.dat 2025-05-07T09:01:13.359121Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:13.401052Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:13.401161Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:13.421350Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 32091, node 1 2025-05-07T09:01:13.733783Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:13.733808Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:13.733816Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:13.733949Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13137 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:14.610291Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... TClient is connected to server localhost:13137 TClient is connected to server localhost:13137 2025-05-07T09:01:15.928378Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480 2025-05-07T09:01:17.485434Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501626270410240951:2110];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:17.485503Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:01:18.869123Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626296180045821:2344], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:18.869227Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626296180045833:2347], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:18.869269Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:18.874663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710660:3, at schemeshard: 72057594046644480 2025-05-07T09:01:18.920779Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501626296180045836:2348], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2025-05-07T09:01:18.992836Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501626296180045909:2716] txid# 281474976710661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } TClient is connected to server localhost:13137 2025-05-07T09:01:19.935045Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:01:27.252709Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:451:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:01:27.253090Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T09:01:27.253205Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00286b/r3tmp/tmpFKHY1v/pdisk_1.dat 2025-05-07T09:01:28.219880Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:01:28.316996Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:28.317185Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:28.332688Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:01:28.709408Z node 4 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [4:999:2803], Recipient [4:551:2465]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T09:01:28.709526Z node 4 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T09:01:28.709575Z node 4 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T09:01:28.709724Z node 4 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122432, Sender [4:996:2801], Recipient [4:551:2465]: {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-05-07T09:01:28.709761Z node 4 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4851: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-05-07T09:01:28.928661Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateSubDomain SubDomain { Name: "tenant" } } TxId: 281474976715657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-05-07T09:01:28.928990Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_subdomain.cpp:92: TCreateSubDomain Propose, path: /Root/tenant, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:01:28.929118Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:319: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046644480, LocalPathId: 1], parent name: Root, child name: tenant, child id: [OwnerId: 72057594046644480, LocalPathId: 2], at schemeshard: 72057594046644480 2025-05-07T09:01:28.929317Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 0 2025-05-07T09:01:28.929557Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-05-07T09:01:28.929707Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976715657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-05-07T09:01:28.929781Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:01:28.929883Z node 4 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T09:01:28.929958Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-05-07T09:01:28.933232Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 2 2025-05-07T09:01:28.946517Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 281474976715657, response: Status: StatusAccepted TxId: 281474976715657 SchemeshardId: 72057594046644480 PathId: 2, at schemeshard: 72057594046644480 2025-05-07T09:01:28.946771Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715657, database: /Root, subject: , status: StatusAccepted, operation: CREATE DATABASE, path: /Root/tenant 2025-05-07T09:01:28.946860Z node 4 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T09:01:28.946912Z node 4 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 281474976715657:0 2025-05-07T09:01:28.947284Z node 4 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435072, Sender [4:551:2465], Recipient [4:551:2465]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-05-07T09:01:28.947334Z node 4 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4857: ... 594046644480, LocalPathId: 3] state 'Ready' dataSize 75 rowCount 1 cpuUsage 0 2025-05-07T09:01:42.496211Z node 4 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:568: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037890 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 3] raw table stats: DataSize: 75 RowCount: 1 IndexSize: 0 InMemSize: 0 LastAccessTime: 2017 LastUpdateTime: 2017 ImmediateTxCompleted: 1 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 1 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 1 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 2 HasLoanedParts: false Channels { Channel: 1 DataSize: 30 IndexSize: 0 } Channels { Channel: 2 DataSize: 45 IndexSize: 0 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-05-07T09:01:42.496278Z node 4 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:608: Will delay TTxStoreTableStats on# 0.100000s, queue# 1 2025-05-07T09:01:42.608038Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715664. Ctx: { TraceId: 01jtmzndq453yr2m4g1w42vbdq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=YTIzYjU5ZGEtNWMwMDI5NGUtYTJkNzViZTQtMmJkOTdlNGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:45.274862Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7501626411800347288:2076];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:45.290084Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00286b/r3tmp/tmpYJX4Nz/pdisk_1.dat 2025-05-07T09:01:45.670935Z node 6 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:45.755598Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:45.755703Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:45.769675Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 32381, node 6 2025-05-07T09:01:46.041005Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:46.041033Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:46.041042Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:46.041183Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1074 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:46.576552Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... TClient is connected to server localhost:1074 2025-05-07T09:01:54.328393Z node 9 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[9:7501626449632530158:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:54.344200Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00286b/r3tmp/tmp1mdwC4/pdisk_1.dat 2025-05-07T09:01:54.658703Z node 9 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:54.727198Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:54.727303Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:54.732334Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 62607, node 9 2025-05-07T09:01:54.996105Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:54.996131Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:54.996140Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:54.996303Z node 9 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11802 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:55.931996Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:02:05.102736Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [12:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:02:05.103023Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T09:02:05.103241Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00286b/r3tmp/tmpy2mkhA/pdisk_1.dat 2025-05-07T09:02:05.468670Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:02:05.507226Z node 12 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:02:05.560325Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:02:05.560521Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:02:05.572391Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:02:05.669589Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:02:05.970082Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:731:2613], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:05.970216Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:741:2618], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:05.970324Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:05.977073Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T09:02:06.152802Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [12:745:2621], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T09:02:06.187641Z node 12 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [12:815:2660] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:02:06.272530Z node 12 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715660. Ctx: { TraceId: 01jtmzp4mgfet70ehps6jv1w24, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=12&id=NDZlODAyNDAtNTc5MTAxNjItZDIwM2JiN2ItYzQxZjA5MQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Stoping tablet id: 720751862240378882025-05-07T09:02:06.316503Z node 12 :RPC_REQUEST ERROR: rpc_read_rows.cpp:731: TReadRowsRPC ReplyWithError: Failed to connect to shard 72075186224037888 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::TestReadTableMultiShardWithDescribeAndRowLimit [GOOD] Test command err: 2025-05-07T09:01:36.014634Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626368206885087:2207];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:36.015281Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002847/r3tmp/tmp7Nx7UY/pdisk_1.dat 2025-05-07T09:01:37.051904Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:37.102575Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T09:01:37.131473Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:37.131573Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:37.141566Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16069, node 1 2025-05-07T09:01:37.538910Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:37.538936Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:37.538943Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:37.539065Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20788 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:38.312481Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:38.420662Z node 1 :GRPC_SERVER INFO: grpc_request_proxy.cpp:572: Got grpc request# ListEndpointsRequest, traceId# 01jtmzn9qj7t4za9802fsb1411, sdkBuildInfo# ydb-cpp-sdk/dev, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:46568, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# 9.982343s 2025-05-07T09:01:38.486653Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:575: Got grpc request# CreateSessionRequest, traceId# 01jtmzn9rd9jf11akkf86hfwm0, sdkBuildInfo# ydb-cpp-sdk/dev, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:46580, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-05-07T09:01:40.986167Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501626368206885087:2207];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:40.986240Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:01:42.130837Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:575: Got grpc request# CreateTableRequest, traceId# 01jtmzndbj0ps4h7thcq5j2v5q, sdkBuildInfo# ydb-cpp-sdk/dev, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:48852, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-05-07T09:01:42.131730Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7501626372501852474:2141] Handle TEvProposeTransaction 2025-05-07T09:01:42.131754Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7501626372501852474:2141] TxId# 281474976715658 ProcessProposeTransaction 2025-05-07T09:01:42.131797Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7501626372501852474:2141] Cookie# 0 userReqId# "" txid# 281474976715658 SEND to# [1:7501626398271657094:2632] 2025-05-07T09:01:42.360547Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1520: Actor# [1:7501626398271657094:2632] txid# 281474976715658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "Test" Columns { Name: "Key" Type: "Uint32" NotNull: false } Columns { Name: "Fk" Type: "Uint64" NotNull: false } Columns { Name: "Value" Type: "String" NotNull: false } KeyColumnNames: "Key" KeyColumnNames: "Fk" UniformPartitionsCount: 16 PartitionConfig { } Temporary: false } CreateIndexedTable { } } } DatabaseName: "" RequestType: "" PeerName: "ipv6:[::1]:48852" 2025-05-07T09:01:42.360601Z node 1 :TX_PROXY DEBUG: schemereq.cpp:563: Actor# [1:7501626398271657094:2632] txid# 281474976715658 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-05-07T09:01:42.360937Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1585: Actor# [1:7501626398271657094:2632] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-05-07T09:01:42.361006Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1575: Actor# [1:7501626398271657094:2632] txid# 281474976715658 TEvNavigateKeySet requested from SchemeCache 2025-05-07T09:01:42.361140Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1408: Actor# [1:7501626398271657094:2632] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-05-07T09:01:42.361261Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1455: Actor# [1:7501626398271657094:2632] HANDLE EvNavigateKeySetResult, txid# 281474976715658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-05-07T09:01:42.361309Z node 1 :TX_PROXY DEBUG: schemereq.cpp:102: Actor# [1:7501626398271657094:2632] txid# 281474976715658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715658 TabletId# 72057594046644480} 2025-05-07T09:01:42.361461Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1310: Actor# [1:7501626398271657094:2632] txid# 281474976715658 HANDLE EvClientConnected 2025-05-07T09:01:42.380086Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T09:01:42.391878Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1332: Actor# [1:7501626398271657094:2632] txid# 281474976715658 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715658} 2025-05-07T09:01:42.391948Z node 1 :TX_PROXY DEBUG: schemereq.cpp:543: Actor# [1:7501626398271657094:2632] txid# 281474976715658 SEND to# [1:7501626398271657093:2340] Source {TEvProposeTransactionStatus txid# 281474976715658 Status# 53} 2025-05-07T09:01:42.394391Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:469: SchemeBoardUpdate /Root 2025-05-07T09:01:42.394478Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:498: Can't update SecurityState for /Root - no PublicKeys 2025-05-07T09:01:42.394489Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:469: SchemeBoardUpdate /Root 2025-05-07T09:01:42.394520Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:498: Can't update SecurityState for /Root - no PublicKeys 2025-05-07T09:01:42.569184Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828672, Sender [1:7501626398271657137:2667], Recipient [1:7501626398271657254:2343]: NKikimr::TEvTablet::TEvBoot 2025-05-07T09:01:42.570082Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828672, Sender [1:7501626398271657130:2660], Recipient [1:7501626398271657274:2349]: NKikimr::TEvTablet::TEvBoot 2025-05-07T09:01:42.570589Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828672, Sender [1:7501626398271657128:2658], Recipient [1:7501626398271657296:2357]: NKikimr::TEvTablet::TEvBoot 2025-05-07T09:01:42.570973Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828672, Sender [1:7501626398271657143:2673], Recipient [1:7501626398271657259:2345]: NKikimr::TEvTablet::TEvBoot 2025-05-07T09:01:42.571344Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828672, Sender [1:7501626398271657141:2671], Recipient [1:7501626398271657279:2353]: NKikimr::TEvTablet::TEvBoot 2025-05-07T09:01:42.571672Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828672, Sender [1:7501626398271657134:2664], Recipient [1:7501626398271657276:2351]: NKikimr::TEvTablet::TEvBoot 2025-05-07T09:01:42.572075Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828672, Sender [1:7501626398271657129:2659], Recipient [1:7501626398271657264:2346]: NKikimr::TEvTablet::TEvBoot 2025-05-07T09:01:42.572422Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828672, Sender [1:7501626398271657132:2662], Recipient [1:7501626398271657282:2356]: NKikimr::TEvTablet::TEvBoot 2025-05-07T09:01:42.572739Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828672, Sender [1:7501626398271657136:2666], Recipient [1:7501626398271657277:2352]: NKikimr::TEvTablet::TEvBoot 2025-05-07T09:01:42.573015Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828672, Sender [1:7501626398271657142:2672], Recipient [1:7501626398271657255:2344]: NKikimr::TEvTablet::TEvBoot 2025-05-07T09:01:42.574140Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828672, Sender [1:7501626398271657138:2668], Recipient [1:7501626398271657280:2354]: NKikimr::TEvTablet::TEvBoot 2025-05-07T09:01:42.574801Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828672, Sender [1:7501626398271657139:2669], Recipient [1:7501626398271657265:2347]: NKikimr::TEvTablet::TEvBoot 2025-05-07T09:01:42.575323Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828672, Sender [1:7501626398271657140:2670], Recipient [1:7501626398271657273:2348]: NKikimr::TEvTablet::TEvBoot 2025-05-07T09:01:42.575860Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828672, Sender [1:7501626398271657131:2661], Recipient [1:7501626398271657275:2350]: NKikimr::TEvTablet::TEvBoot 2025-05-07T09:01:42.576422Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828672, Sender [1:7501626398271657135:2665], Recipient [1:7501626398271657281:2355]: ... p:272: GetNextActiveOp at 72075186224037897 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-05-07T09:02:06.481827Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037897 2025-05-07T09:02:06.481839Z node 10 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037897 has no attached operations 2025-05-07T09:02:06.481847Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037897 2025-05-07T09:02:06.481873Z node 10 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037897 2025-05-07T09:02:06.482929Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 2146435082, Sender [10:7501626501723788092:2137], Recipient [10:7501626497428819816:2347]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvRegisterScanActor 2025-05-07T09:02:06.482960Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3160: StateWork, processing event TEvPrivate::TEvRegisterScanActor 2025-05-07T09:02:06.483025Z node 10 :READ_TABLE_API DEBUG: rpc_read_table.cpp:267: [10:7501626501723788073:2400] Adding quota request to queue ShardId: 0, TxId: 281474976710680 2025-05-07T09:02:06.483054Z node 10 :READ_TABLE_API DEBUG: rpc_read_table.cpp:629: [10:7501626501723788073:2400] Assign stream quota to Shard 0, Quota 5, TxId 281474976710680 Reserved: 5 of 25, Queued: 0 2025-05-07T09:02:06.483188Z node 10 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037897, TxId: 281474976710681, MessageQuota: 5 2025-05-07T09:02:06.483389Z node 10 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037897, TxId: 281474976710681, Size: 54, Rows: 0, PendingAcks: 1, MessageQuota: 4 2025-05-07T09:02:06.483433Z node 10 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037897, TxId: 281474976710681, PendingAcks: 0 2025-05-07T09:02:06.483450Z node 10 :TX_DATASHARD DEBUG: read_table_scan.cpp:717: Finish scan ShardId: 72075186224037897, TxId: 281474976710681, MessageQuota: 4 2025-05-07T09:02:06.483566Z node 10 :READ_TABLE_API DEBUG: rpc_read_table.cpp:647: [10:7501626501723788073:2400] got stream part, size: 75, RU required: 128 rate limiter absent 2025-05-07T09:02:06.483983Z node 10 :READ_TABLE_API DEBUG: rpc_read_table.cpp:563: [10:7501626501723788073:2400] Starting inactivity timer for 600.000000s with tag 3 2025-05-07T09:02:06.486340Z node 10 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037897 2025-05-07T09:02:06.486356Z node 10 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976710681, at: 72075186224037897 2025-05-07T09:02:06.486470Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269549569, Sender [10:7501626501723788074:2400], Recipient [10:7501626497428819816:2347]: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976710681 2025-05-07T09:02:06.486489Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3172: StateWork, processing event TEvDataShard::TEvCancelTransactionProposal 2025-05-07T09:02:06.486500Z node 10 :TX_DATASHARD DEBUG: datashard__cancel_tx_proposal.cpp:73: Got TEvDataShard::TEvCancelTransactionProposal 72075186224037897 txId 281474976710681 2025-05-07T09:02:06.486532Z node 10 :TX_DATASHARD DEBUG: datashard__cancel_tx_proposal.cpp:44: Start TTxCancelTransactionProposal at tablet 72075186224037897 txId 281474976710681 2025-05-07T09:02:06.486604Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269287431, Sender [10:7501626501723788074:2400], Recipient [10:7501626497428819816:2347]: NKikimrTx.TEvInterruptTransaction TxId: 281474976710681 2025-05-07T09:02:06.486617Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, processing event TEvTxProcessing::TEvInterruptTransaction 2025-05-07T09:02:06.486677Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269553190, Sender [10:7501626501723788074:2400], Recipient [10:7501626497428819816:2347]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 1746608526523 TxId: 281474976710680 2025-05-07T09:02:06.486833Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 2146435072, Sender [10:7501626497428819816:2347], Recipient [10:7501626497428819816:2347]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-05-07T09:02:06.486861Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3155: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-05-07T09:02:06.486885Z node 10 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037897 2025-05-07T09:02:06.486902Z node 10 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037897 active 1 active planned 0 immediate 1 planned 0 2025-05-07T09:02:06.486926Z node 10 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976710681] at 72075186224037897 for ReadTableScan 2025-05-07T09:02:06.486937Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976710681] at 72075186224037897 on unit ReadTableScan 2025-05-07T09:02:06.486955Z node 10 :TX_DATASHARD TRACE: read_table_scan_unit.cpp:158: ReadTable scan complete for [0:281474976710681] at 72075186224037897 error: , IsFatalError: 0 2025-05-07T09:02:06.486983Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976710681] at 72075186224037897 is Executed 2025-05-07T09:02:06.486997Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976710681] at 72075186224037897 executing on unit ReadTableScan 2025-05-07T09:02:06.487008Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976710681] at 72075186224037897 to execution unit FinishPropose 2025-05-07T09:02:06.487020Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976710681] at 72075186224037897 on unit FinishPropose 2025-05-07T09:02:06.487055Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976710681] at 72075186224037897 is DelayCompleteNoMoreRestarts 2025-05-07T09:02:06.487075Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976710681] at 72075186224037897 executing on unit FinishPropose 2025-05-07T09:02:06.487085Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976710681] at 72075186224037897 to execution unit CompletedOperations 2025-05-07T09:02:06.487094Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976710681] at 72075186224037897 on unit CompletedOperations 2025-05-07T09:02:06.487124Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976710681] at 72075186224037897 is Executed 2025-05-07T09:02:06.487133Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976710681] at 72075186224037897 executing on unit CompletedOperations 2025-05-07T09:02:06.487144Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:281474976710681] at 72075186224037897 has finished 2025-05-07T09:02:06.487155Z node 10 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037897 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:02:06.487164Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037897 2025-05-07T09:02:06.487175Z node 10 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037897 has no attached operations 2025-05-07T09:02:06.487186Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037897 2025-05-07T09:02:06.488736Z node 10 :READ_TABLE_API NOTICE: rpc_read_table.cpp:531: [10:7501626501723788073:2400] Finish grpc stream, status: 400000 2025-05-07T09:02:06.490387Z node 10 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037897 2025-05-07T09:02:06.490415Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976710681] at 72075186224037897 on unit FinishPropose 2025-05-07T09:02:06.490435Z node 10 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976710681 at tablet 72075186224037897 send to client, exec latency: 6 ms, propose latency: 9 ms, status: COMPLETE 2025-05-07T09:02:06.490502Z node 10 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037897 2025-05-07T09:02:06.496060Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000f4e80] received request Name# SchemeOperation ok# false data# peer# current inflight# 0 2025-05-07T09:02:06.496374Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000171080] received request Name# SchemeOperationStatus ok# false data# peer# current inflight# 0 2025-05-07T09:02:06.496781Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000f3680] received request Name# SchemeDescribe ok# false data# peer# current inflight# 0 2025-05-07T09:02:06.496913Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000fe480] received request Name# ChooseProxy ok# false data# peer# current inflight# 0 2025-05-07T09:02:06.496986Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000032a80] received request Name# PersQueueRequest ok# false data# peer# current inflight# 0 2025-05-07T09:02:06.497185Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000036080] received request Name# SchemeInitRoot ok# false data# peer# current inflight# 0 2025-05-07T09:02:06.497249Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000c7e80] received request Name# ResolveNode ok# false data# peer# current inflight# 0 2025-05-07T09:02:06.497359Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000b0a80] received request Name# FillNode ok# false data# peer# current inflight# 0 2025-05-07T09:02:06.497486Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00006d880] received request Name# DrainNode ok# false data# peer# current inflight# 0 2025-05-07T09:02:06.497516Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000155480] received request Name# BlobStorageConfig ok# false data# peer# current inflight# 0 2025-05-07T09:02:06.497688Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00001ec80] received request Name# HiveCreateTablet ok# false data# peer# current inflight# 0 2025-05-07T09:02:06.497695Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000107480] received request Name# TestShardControl ok# false data# peer# current inflight# 0 2025-05-07T09:02:06.497843Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000f8a80] received request Name# RegisterNode ok# false data# peer# current inflight# 0 2025-05-07T09:02:06.497885Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000108080] received request Name# CmsRequest ok# false data# peer# current inflight# 0 2025-05-07T09:02:06.498104Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000f5480] received request Name# ConsoleRequest ok# false data# peer# current inflight# 0 2025-05-07T09:02:06.498290Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000f9080] received request Name# InterconnectDebug ok# false data# peer# current inflight# 0 2025-05-07T09:02:06.498396Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000b1c80] received request Name# TabletStateRequest ok# false data# peer# current inflight# 0 |91.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream_reboots/ydb-core-tx-schemeshard-ut_cdc_stream_reboots |91.9%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream_reboots/ydb-core-tx-schemeshard-ut_cdc_stream_reboots >> TGRpcYdbTest::SdkUuidViaParams [GOOD] >> TGRpcYdbTest::ReadTable |91.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream_reboots/ydb-core-tx-schemeshard-ut_cdc_stream_reboots |91.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tools/stress_tool/ut/ydb-tools-stress_tool-ut |91.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tools/stress_tool/ut/ydb-tools-stress_tool-ut |91.9%| [LD] {RESULT} $(B)/ydb/tools/stress_tool/ut/ydb-tools-stress_tool-ut >> YdbTableBulkUpsert::RetryOperation [GOOD] >> YdbOlapStore::LogPagingBefore [GOOD] >> YdbOlapStore::LogPagingAfter ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbTableBulkUpsert::RetryOperation [GOOD] Test command err: 2025-05-07T09:01:28.332138Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626338946567969:2225];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:28.332196Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002853/r3tmp/tmpbcSRkh/pdisk_1.dat 2025-05-07T09:01:29.448299Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T09:01:29.464459Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:29.464570Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:29.471756Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1406, node 1 2025-05-07T09:01:29.578761Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:29.647591Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T09:01:29.651149Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T09:01:29.983001Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:29.983027Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:29.983040Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:29.983163Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29067 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:30.636599Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:33.331750Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501626338946567969:2225];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:33.331808Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:01:33.521391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 SUCCESS 3 rows in 0.031843s 2025-05-07T09:01:34.444294Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626364716374588:2440], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:34.444424Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:34.444833Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626364716374608:2443], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:34.449241Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480 2025-05-07T09:01:34.480075Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501626364716374610:2444], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-05-07T09:01:34.567227Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501626364716374695:4169] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:01:35.427905Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710661. Ctx: { TraceId: 01jtmzn5v8c7fb59z27v4kd9hp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTk0YzE1OGQtYWI1ZDE4NjctOTUxZmExYjMtMTI5MDM4ZTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root SUCCESS count returned 3 rows 2025-05-07T09:01:37.707049Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501626377354792364:2140];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:37.709151Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002853/r3tmp/tmpHwNYx0/pdisk_1.dat 2025-05-07T09:01:38.111615Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:38.168131Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:38.168251Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:38.175443Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26268, node 4 2025-05-07T09:01:38.370397Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:38.370421Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:38.370428Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:38.374681Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64499 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:38.741547Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:41.812813Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480
: Error: Bulk upsert to table '/Root/ui8' Only async-indexed tables are supported by BulkUpsert
: Error: Bulk upsert to table '/Root/ui8/Value_index/indexImplTable' unknown table 2025-05-07T09:01:44.579800Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7501626407329893446:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:44.579905Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002853/r3tmp/tmp0njMkB/pdisk_1.dat 2025-05-07T09:01:44.814578Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27221, node 7 2025-05-07T09:01:44.913958Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:44.914133Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:45.093218Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:01:45.137462Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:45.137495Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:45.137504Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:45.137658Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29200 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription ... 7762515]; 2025-05-07T09:01:55.026143Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002853/r3tmp/tmpD2RO3E/pdisk_1.dat 2025-05-07T09:01:55.627924Z node 10 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:55.698750Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:55.698856Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:55.708365Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2534, node 10 2025-05-07T09:01:55.966123Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:55.966146Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:55.966155Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:55.966352Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28346 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:56.420043Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:59.965127Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T09:02:00.019395Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7501626453361873582:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:02:00.019490Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Injecting ABORTED 10 times Result: ABORTED Injecting ABORTED 6 times Result: ABORTED Injecting ABORTED 5 times Result: SUCCESS Injecting ABORTED 3 times Result: SUCCESS Injecting ABORTED 0 times Result: SUCCESS Injecting OVERLOADED 10 times Result: OVERLOADED Injecting OVERLOADED 6 times Result: OVERLOADED Injecting OVERLOADED 5 times Result: SUCCESS Injecting OVERLOADED 3 times Result: SUCCESS Injecting OVERLOADED 0 times Result: SUCCESS Injecting CLIENT_RESOURCE_EXHAUSTED 10 times Result: CLIENT_RESOURCE_EXHAUSTED Injecting CLIENT_RESOURCE_EXHAUSTED 6 times Result: CLIENT_RESOURCE_EXHAUSTED Injecting CLIENT_RESOURCE_EXHAUSTED 5 times Result: SUCCESS Injecting CLIENT_RESOURCE_EXHAUSTED 3 times Result: SUCCESS Injecting CLIENT_RESOURCE_EXHAUSTED 0 times Result: SUCCESS Injecting UNAVAILABLE 10 times Result: UNAVAILABLE Injecting UNAVAILABLE 6 times Result: UNAVAILABLE Injecting UNAVAILABLE 5 times Result: SUCCESS Injecting UNAVAILABLE 3 times Result: SUCCESS Injecting UNAVAILABLE 0 times Result: SUCCESS Injecting BAD_SESSION 10 times Result: BAD_SESSION Injecting BAD_SESSION 6 times Result: BAD_SESSION Injecting BAD_SESSION 5 times Result: SUCCESS Injecting BAD_SESSION 3 times Result: SUCCESS Injecting BAD_SESSION 0 times Result: SUCCESS Injecting SESSION_BUSY 10 times Result: SESSION_BUSY Injecting SESSION_BUSY 6 times Result: SESSION_BUSY Injecting SESSION_BUSY 5 times Result: SUCCESS Injecting SESSION_BUSY 3 times Result: SUCCESS Injecting SESSION_BUSY 0 times Result: SUCCESS Injecting NOT_FOUND 10 times Result: NOT_FOUND Injecting NOT_FOUND 6 times Result: NOT_FOUND Injecting NOT_FOUND 5 times Result: SUCCESS Injecting NOT_FOUND 3 times Result: SUCCESS Injecting NOT_FOUND 0 times Result: SUCCESS Injecting UNDETERMINED 10 times Result: UNDETERMINED Injecting UNDETERMINED 6 times Result: UNDETERMINED Injecting UNDETERMINED 5 times Result: SUCCESS Injecting UNDETERMINED 3 times Result: SUCCESS Injecting UNDETERMINED 0 times Result: SUCCESS Injecting TRANSPORT_UNAVAILABLE 10 times Result: TRANSPORT_UNAVAILABLE Injecting TRANSPORT_UNAVAILABLE 6 times Result: TRANSPORT_UNAVAILABLE Injecting TRANSPORT_UNAVAILABLE 5 times Result: SUCCESS Injecting TRANSPORT_UNAVAILABLE 3 times Result: SUCCESS Injecting TRANSPORT_UNAVAILABLE 0 times Result: SUCCESS 2025-05-07T09:02:03.266806Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7501626486494806430:2077];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:02:03.266891Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002853/r3tmp/tmpvwThhM/pdisk_1.dat 2025-05-07T09:02:03.653876Z node 13 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:02:03.663920Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:02:03.664016Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:02:03.666654Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14874, node 13 2025-05-07T09:02:03.789376Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:02:03.789405Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:02:03.789416Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:02:03.789599Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14191 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:02:04.148324Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:02:07.233024Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 Injecting ABORTED 10 times Result: ABORTED Injecting ABORTED 6 times Result: ABORTED Injecting ABORTED 5 times Result: SUCCESS Injecting ABORTED 3 times Result: SUCCESS Injecting ABORTED 0 times Result: SUCCESS Injecting OVERLOADED 10 times Result: OVERLOADED Injecting OVERLOADED 6 times Result: OVERLOADED Injecting OVERLOADED 5 times Result: SUCCESS Injecting OVERLOADED 3 times Result: SUCCESS Injecting OVERLOADED 0 times Result: SUCCESS Injecting CLIENT_RESOURCE_EXHAUSTED 10 times Result: CLIENT_RESOURCE_EXHAUSTED Injecting CLIENT_RESOURCE_EXHAUSTED 6 times Result: CLIENT_RESOURCE_EXHAUSTED Injecting CLIENT_RESOURCE_EXHAUSTED 5 times Result: SUCCESS Injecting CLIENT_RESOURCE_EXHAUSTED 3 times Result: SUCCESS Injecting CLIENT_RESOURCE_EXHAUSTED 0 times Result: SUCCESS Injecting UNAVAILABLE 10 times Result: UNAVAILABLE Injecting UNAVAILABLE 6 times Result: UNAVAILABLE Injecting UNAVAILABLE 5 times Result: SUCCESS Injecting UNAVAILABLE 3 times Result: SUCCESS Injecting UNAVAILABLE 0 times Result: SUCCESS Injecting BAD_SESSION 10 times Result: BAD_SESSION Injecting BAD_SESSION 6 times Result: BAD_SESSION Injecting BAD_SESSION 5 times Result: SUCCESS Injecting BAD_SESSION 3 times Result: SUCCESS Injecting BAD_SESSION 0 times Result: SUCCESS Injecting SESSION_BUSY 10 times Result: SESSION_BUSY Injecting SESSION_BUSY 6 times Result: SESSION_BUSY Injecting SESSION_BUSY 5 times Result: SUCCESS Injecting SESSION_BUSY 3 times Result: SUCCESS Injecting SESSION_BUSY 0 times Result: SUCCESS Injecting NOT_FOUND 10 times Result: NOT_FOUND Injecting NOT_FOUND 6 times Result: NOT_FOUND Injecting NOT_FOUND 5 times Result: SUCCESS Injecting NOT_FOUND 3 times Result: SUCCESS Injecting NOT_FOUND 0 times Result: SUCCESS Injecting UNDETERMINED 10 times Result: UNDETERMINED Injecting UNDETERMINED 6 times 2025-05-07T09:02:08.267042Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[13:7501626486494806430:2077];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:02:08.267153Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Result: UNDETERMINED Injecting UNDETERMINED 5 times Result: SUCCESS Injecting UNDETERMINED 3 times Result: SUCCESS Injecting UNDETERMINED 0 times Result: SUCCESS Injecting TRANSPORT_UNAVAILABLE 10 times Result: TRANSPORT_UNAVAILABLE Injecting TRANSPORT_UNAVAILABLE 6 times Result: TRANSPORT_UNAVAILABLE Injecting TRANSPORT_UNAVAILABLE 5 times Result: SUCCESS Injecting TRANSPORT_UNAVAILABLE 3 times Result: SUCCESS Injecting TRANSPORT_UNAVAILABLE 0 times Result: SUCCESS >> CommitOffset::Commit_WithSession_ParentNotFinished_OtherSession_ParentCommittedToEnd [GOOD] >> CommitOffset::Commit_WithSession_ToPastParentPartition >> GroupWriteTest::WriteHardRateDispatcher [GOOD] >> YdbOlapStore::LogPagingBetween [GOOD] >> YdbOlapStore::LogWithUnionAllAscending >> YdbScripting::BasicV1 [GOOD] >> TopicAutoscaling::ReadFromTimestamp_PQv1 [GOOD] |91.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/ydb-public-sdk-cpp-src-client-topic-ut |91.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/ydb-public-sdk-cpp-src-client-topic-ut |91.9%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/ydb-public-sdk-cpp-src-client-topic-ut >> YdbOlapStore::LogGrepNonExisting [GOOD] >> YdbOlapStore::LogGrepExisting >> YdbTableBulkUpsert::Limits [GOOD] >> YdbTableBulkUpsert::DecimalPK |91.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_rtmr_reboots/ydb-core-tx-schemeshard-ut_rtmr_reboots |91.9%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_rtmr_reboots/ydb-core-tx-schemeshard-ut_rtmr_reboots |91.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_rtmr_reboots/ydb-core-tx-schemeshard-ut_rtmr_reboots >> YdbTableBulkUpsert::AsyncIndexShouldFail [GOOD] >> YdbTableBulkUpsert::AsyncIndexShouldSucceed ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbScripting::BasicV1 [GOOD] Test command err: 2025-05-07T09:01:28.154472Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626337848376940:2084];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:28.154553Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002852/r3tmp/tmpBwAIxZ/pdisk_1.dat 2025-05-07T09:01:29.260613Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:29.265907Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T09:01:29.281910Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:29.287251Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:29.296269Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5153, node 1 2025-05-07T09:01:29.722778Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:29.722807Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:29.722817Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:29.722978Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1924 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:30.396412Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:33.162097Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501626337848376940:2084];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:33.162161Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:01:33.832445Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T09:01:34.894450Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626363618183706:2441], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:34.894558Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:34.895177Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626363618183718:2444], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:34.899067Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480 2025-05-07T09:01:34.947778Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501626363618183720:2445], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-05-07T09:01:35.037136Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501626367913151099:4125] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:01:36.174606Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710661. Ctx: { TraceId: 01jtmzn69ccrnz6pevhd70hhhv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmU3MTU0MzItMjc4MzhkN2ItOTM0NjBjMmQtZTUzMGQxNmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root SUCCESS 2025-05-07T09:01:39.322709Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501626385575029771:2076];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:39.330129Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002852/r3tmp/tmpXVVHN1/pdisk_1.dat 2025-05-07T09:01:39.776822Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:39.816628Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:39.816713Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:39.824974Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15146, node 4 2025-05-07T09:01:40.208899Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:40.208934Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:40.208964Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:40.209089Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4100 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:40.639511Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:44.323138Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7501626385575029771:2076];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:44.323197Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:01:44.851252Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T09:01:45.947219Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501626411344836593:2443], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:45.947306Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:45.948358Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501626411344836605:2446], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:45.952579Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-05-07T09:01:46.015916Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7501626411344836607:2447], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-05-07T09:01:46.097627Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:7501626415639803998:4153] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:01:46.30 ... nges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:59.350784Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:02:02.894145Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7501626481720752708:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:02.894275Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:03.052095Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T09:02:03.170546Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7501626486015720188:2348], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:03.170661Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:03.170889Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7501626486015720193:2351], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:03.174515Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480 2025-05-07T09:02:03.196331Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7501626486015720195:2352], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-05-07T09:02:03.303550Z node 10 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [10:7501626486015720270:2811] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:02:03.385072Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710661. Ctx: { TraceId: 01jtmznyaqb0wsda2er5j2175c, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=NmZhM2VlZTktNTI1ZWEyNjctODQzMzQ0NmMtYmU0NTVmY2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:02:03.517878Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7501626464540882630:2257];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:02:03.517944Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:02:03.533014Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710663. Ctx: { TraceId: 01jtmzp24j1eb4p7rzjzpt7ehw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=ZmI4MjgzMmMtMjM4ZmE1My03NjgwMTFmLWYzNTQxMWU2, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:02:03.541666Z node 10 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746608523569, txId: 281474976710662] shutting down 2025-05-07T09:02:05.372082Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7501626497381323855:2076];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:02:05.372189Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002852/r3tmp/tmpW24yvC/pdisk_1.dat 2025-05-07T09:02:05.523439Z node 13 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:02:05.563283Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:02:05.563378Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:02:05.566978Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 30071, node 13 2025-05-07T09:02:05.660746Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:02:05.660783Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:02:05.660793Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:02:05.660952Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12758 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:02:05.825538Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:02:09.572283Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7501626514561194057:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:09.572371Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:09.650736Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T09:02:09.744885Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7501626514561194237:2348], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:09.744965Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:09.745192Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7501626514561194242:2351], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:09.749211Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480 2025-05-07T09:02:09.766866Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7501626514561194244:2352], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-05-07T09:02:09.829722Z node 13 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [13:7501626514561194317:2783] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:02:09.876784Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710661. Ctx: { TraceId: 01jtmzp4jb8kejbbce9j6sth5t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=NzEyNDg0MjgtZjQ1NTBjZWEtYzA5NmJmOWUtMTFkMjY0MWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:02:09.959923Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710663. Ctx: { TraceId: 01jtmzp8ey73jhrvmyvk9hjht7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=M2RiM2NlZDktMzIyMTI2Zi00ZWU2YmJkNC0zOGFmM2NjZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:02:09.963884Z node 13 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746608530002, txId: 281474976710662] shutting down |91.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_restore/ydb-core-tx-schemeshard-ut_restore |91.9%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_restore/ydb-core-tx-schemeshard-ut_restore |91.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_restore/ydb-core-tx-schemeshard-ut_restore >> YdbYqlClient::TestReadTableSnapshot [GOOD] |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest >> TopicAutoscaling::ReadFromTimestamp_AutoscaleAwareSDK [GOOD] |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/ut_with_sdk/unittest >> TopicAutoscaling::ReadFromTimestamp_PQv1 [GOOD] Test command err: 2025-05-07T08:58:54.771069Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625676996055241:2062];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:58:54.771114Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:58:55.101213Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003ffb/r3tmp/tmpmGAVTA/pdisk_1.dat 2025-05-07T08:58:55.486955Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:55.549049Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:58:55.549174Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:58:55.554951Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13634, node 1 2025-05-07T08:58:55.894606Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/zvgn/003ffb/r3tmp/yandexloaEdG.tmp 2025-05-07T08:58:55.894659Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/zvgn/003ffb/r3tmp/yandexloaEdG.tmp 2025-05-07T08:58:55.894842Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/zvgn/003ffb/r3tmp/yandexloaEdG.tmp 2025-05-07T08:58:55.894965Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:58:55.953623Z INFO: TTestServer started on Port 9801 GrpcPort 13634 TClient is connected to server localhost:9801 PQClient connected to localhost:13634 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:58:56.449794Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:58:56.499000Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:58:56.525830Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:58:56.772849Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:58:56.802963Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710661, at schemeshard: 72057594046644480 2025-05-07T08:58:59.438299Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625698470892514:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:58:59.442881Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:58:59.443709Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625698470892550:2343], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:58:59.499266Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480 2025-05-07T08:58:59.525372Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501625698470892552:2344], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-05-07T08:58:59.619088Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501625698470892616:2444] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:58:59.862252Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501625676996055241:2062];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:58:59.862537Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:58:59.889284Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:58:59.924065Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7501625698470892624:2350], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T08:58:59.924467Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=1&id=MmNiNDdkNGMtZjVjMzAxMjMtY2M1YzU2OGYtNjk0MTYyZWI=, ActorId: [1:7501625698470892511:2337], ActorState: ExecuteState, TraceId: 01jtmzgef13wczcs0edwa3zyka, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T08:58:59.935015Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T08:58:59.964180Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:59:00.076293Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7501625702765860206:2621] === CheckClustersList. Ok 2025-05-07T08:59:06.666237Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-05-07T08:59:06.708387Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-05-07T08:59:06.710208Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:7501625728535664177:2695], Recipient [1:7501625681291022961:2196]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:59:06.710249Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:59:06.710263Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T08:59:06.710307Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122432, Sender [1:7501625728535664173:2692], Recipient [1:7501625681291022961:2196]: {TEvModifySchemeTransaction txid# 281474976710673 TabletId# 72057594046644480} 2025-05-07T08:59:06.710322Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4851: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-05-07T08:59:06.811644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreatePersQueueGroup CreatePersQueueGroup { Name: "test-topic" TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } PartitionStrategy { MinPartitionCount: 1 MaxPartitionCount: 100 ScaleThresholdSeconds: 300 ScaleUpPartitionWriteSpeedThresholdPercent: 90 ScaleDownPartitionWriteSpeedThresholdPercent: 30 PartitionStrategyType: CAN_SPLIT } Consumers { Name: "test-consumer" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } ServiceType: "data-streams" Version: 0 } } } } TxId: 281474976710673 TabletId: 72057594046644480 Owner: "root@builtin" ... 21518:2462], Partition 0, Sender [7:7501626458568721582:2468], Recipient [7:7501626458568721579:2466], Cookie: 0 2025-05-07T09:02:10.759895Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188544, Sender [7:7501626458568721582:2468], Recipient [7:7501626458568721579:2466]: NKikimr::NPQ::NReadQuoterEvents::TEvQuotaCountersUpdated 2025-05-07T09:02:10.759927Z node 7 :PERSQUEUE TRACE: partition.h:609: StateIdle, processing event NReadQuoterEvents::TEvQuotaCountersUpdated 2025-05-07T09:02:10.760033Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5200: HandleHook, received event# 271187975, Sender [7:7501626458568721519:2463], Recipient [7:7501626510108330510:2867]: NKikimrPQ.TStatus GetStatForAllConsumers: true 2025-05-07T09:02:10.760064Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5208: HandleHook, processing event TEvPersQueue::TEvStatus 2025-05-07T09:02:10.760083Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:1797: [PQ: 72075186224037897] Handle TEvPersQueue::TEvStatus 2025-05-07T09:02:10.760167Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5200: HandleHook, received event# 271188536, Sender [7:7501626458568721519:2463], Recipient [7:7501626510108330510:2867]: NKikimrPQ.TEvSubDomainStatus SubDomainOutOfSpace: false 2025-05-07T09:02:10.760197Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5233: HandleHook, processing event TEvPQ::TEvSubDomainStatus 2025-05-07T09:02:10.760259Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5200: HandleHook, received event# 271187975, Sender [7:7501626458568721519:2463], Recipient [7:7501626510108330509:2866]: NKikimrPQ.TStatus GetStatForAllConsumers: true 2025-05-07T09:02:10.760273Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5208: HandleHook, processing event TEvPersQueue::TEvStatus 2025-05-07T09:02:10.760284Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:1797: [PQ: 72075186224037896] Handle TEvPersQueue::TEvStatus 2025-05-07T09:02:10.760338Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5200: HandleHook, received event# 271188536, Sender [7:7501626458568721519:2463], Recipient [7:7501626510108330509:2866]: NKikimrPQ.TEvSubDomainStatus SubDomainOutOfSpace: false 2025-05-07T09:02:10.760356Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5233: HandleHook, processing event TEvPQ::TEvSubDomainStatus 2025-05-07T09:02:10.760402Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5200: HandleHook, received event# 271187975, Sender [7:7501626458568721519:2463], Recipient [7:7501626458568721518:2462]: NKikimrPQ.TStatus GetStatForAllConsumers: true 2025-05-07T09:02:10.760420Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5208: HandleHook, processing event TEvPersQueue::TEvStatus 2025-05-07T09:02:10.760434Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:1797: [PQ: 72075186224037892] Handle TEvPersQueue::TEvStatus 2025-05-07T09:02:10.760486Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5200: HandleHook, received event# 271188536, Sender [7:7501626458568721519:2463], Recipient [7:7501626458568721518:2462]: NKikimrPQ.TEvSubDomainStatus SubDomainOutOfSpace: false 2025-05-07T09:02:10.760504Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5233: HandleHook, processing event TEvPQ::TEvSubDomainStatus 2025-05-07T09:02:10.760558Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188491 (NKikimr::TEvPQ::TEvPartitionStatus), Tablet [7:7501626510108330510:2867], Partition 1, Sender [7:7501626510108330510:2867], Recipient [7:7501626510108330587:2873], Cookie: 0 2025-05-07T09:02:10.760603Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188491, Sender [7:7501626510108330510:2867], Recipient [7:7501626510108330587:2873]: NKikimr::TEvPQ::TEvPartitionStatus 2025-05-07T09:02:10.760628Z node 7 :PERSQUEUE TRACE: partition.h:581: StateIdle, processing event TEvPQ::TEvPartitionStatus 2025-05-07T09:02:10.760869Z node 7 :PERSQUEUE DEBUG: partition.cpp:855: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 TotalPartitions: 3 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } 2025-05-07T09:02:10.761040Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188536 (NKikimr::TEvPQ::TEvSubDomainStatus), Tablet [7:7501626510108330510:2867], Partition 1, Sender [7:7501626510108330510:2867], Recipient [7:7501626510108330587:2873], Cookie: 0 2025-05-07T09:02:10.761099Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188536, Sender [7:7501626510108330510:2867], Recipient [7:7501626510108330587:2873]: NKikimrPQ.TEvSubDomainStatus SubDomainOutOfSpace: false 2025-05-07T09:02:10.761123Z node 7 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvSubDomainStatus 2025-05-07T09:02:10.761166Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188491 (NKikimr::TEvPQ::TEvPartitionStatus), Tablet [7:7501626510108330509:2866], Partition 2, Sender [7:7501626510108330509:2866], Recipient [7:7501626510108330590:2875], Cookie: 0 2025-05-07T09:02:10.761223Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188491, Sender [7:7501626510108330509:2866], Recipient [7:7501626510108330590:2875]: NKikimr::TEvPQ::TEvPartitionStatus 2025-05-07T09:02:10.761244Z node 7 :PERSQUEUE TRACE: partition.h:581: StateIdle, processing event TEvPQ::TEvPartitionStatus 2025-05-07T09:02:10.761419Z node 7 :PERSQUEUE DEBUG: partition.cpp:855: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 TotalPartitions: 3 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } 2025-05-07T09:02:10.761493Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188536 (NKikimr::TEvPQ::TEvSubDomainStatus), Tablet [7:7501626510108330509:2866], Partition 2, Sender [7:7501626510108330509:2866], Recipient [7:7501626510108330590:2875], Cookie: 0 2025-05-07T09:02:10.761523Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188536, Sender [7:7501626510108330509:2866], Recipient [7:7501626510108330590:2875]: NKikimrPQ.TEvSubDomainStatus SubDomainOutOfSpace: false 2025-05-07T09:02:10.761532Z node 7 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvSubDomainStatus 2025-05-07T09:02:10.761561Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188491 (NKikimr::TEvPQ::TEvPartitionStatus), Tablet [7:7501626458568721518:2462], Partition 0, Sender [7:7501626458568721518:2462], Recipient [7:7501626458568721579:2466], Cookie: 0 2025-05-07T09:02:10.761582Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188491, Sender [7:7501626458568721518:2462], Recipient [7:7501626458568721579:2466]: NKikimr::TEvPQ::TEvPartitionStatus 2025-05-07T09:02:10.761590Z node 7 :PERSQUEUE TRACE: partition.h:581: StateIdle, processing event TEvPQ::TEvPartitionStatus 2025-05-07T09:02:10.761736Z node 7 :PERSQUEUE DEBUG: partition.cpp:855: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 TotalPartitions: 3 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } 2025-05-07T09:02:10.761842Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188536 (NKikimr::TEvPQ::TEvSubDomainStatus), Tablet [7:7501626458568721518:2462], Partition 0, Sender [7:7501626458568721518:2462], Recipient [7:7501626458568721579:2466], Cookie: 0 2025-05-07T09:02:10.761886Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188536, Sender [7:7501626458568721518:2462], Recipient [7:7501626458568721579:2466]: NKikimrPQ.TEvSubDomainStatus SubDomainOutOfSpace: false 2025-05-07T09:02:10.761900Z node 7 :PERSQUEUE TRACE: partition.h:605: StateIdle, processing event TEvPQ::TEvSubDomainStatus 2025-05-07T09:02:10.762058Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5200: HandleHook, received event# 271188503, Sender [7:7501626510108330587:2873], Recipient [7:7501626510108330510:2867]: NKikimr::TEvPQ::TEvPartitionLabeledCounters 2025-05-07T09:02:10.762073Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5214: HandleHook, processing event TEvPQ::TEvPartitionLabeledCounters 2025-05-07T09:02:10.762200Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5200: HandleHook, received event# 271188503, Sender [7:7501626510108330590:2875], Recipient [7:7501626510108330509:2866]: NKikimr::TEvPQ::TEvPartitionLabeledCounters 2025-05-07T09:02:10.762215Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5214: HandleHook, processing event TEvPQ::TEvPartitionLabeledCounters 2025-05-07T09:02:10.762252Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5200: HandleHook, received event# 271188503, Sender [7:7501626458568721579:2466], Recipient [7:7501626458568721518:2462]: NKikimr::TEvPQ::TEvPartitionLabeledCounters 2025-05-07T09:02:10.762260Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5214: HandleHook, processing event TEvPQ::TEvPartitionLabeledCounters 2025-05-07T09:02:10.762558Z node 7 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:688: [72075186224037893][test-topic] Send TEvPeriodicTopicStats PathId: 13 Generation: 1 StatsReportRound: 14 DataSize: 0 UsedReserveSize: 0 2025-05-07T09:02:10.762650Z node 7 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1823: [72075186224037893][test-topic] ProcessPendingStats. PendingUpdates size 3 2025-05-07T09:02:10.762821Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271188001, Sender [7:7501626458568721519:2463], Recipient [7:7501626398439178371:2174]: NKikimrPQ.TEvPeriodicTopicStats PathId: 13 Generation: 1 Round: 14 DataSize: 0 UsedReserveSize: 0 SubDomainOutOfSpace: false 2025-05-07T09:02:10.762849Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4878: StateWork, processing event TEvPersQueue::TEvPeriodicTopicStats 2025-05-07T09:02:10.762887Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__pq_stats.cpp:100: Got periodic topic stats at partition [OwnerId: 72057594046644480, LocalPathId: 13] DataSize 0 UsedReserveSize 0 2025-05-07T09:02:10.762929Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__pq_stats.cpp:141: Will delay TTxStoreTopicStats on# 0.099995s, queue# 1 |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest >> ExternalIndex::Simple >> TGRpcYdbTest::ReadTable [GOOD] >> TGRpcYdbTest::ReadTablePg ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::TestReadTableSnapshot [GOOD] Test command err: 2025-05-07T09:01:48.244162Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626422515881594:2279];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:48.244413Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002841/r3tmp/tmpxmKJJ0/pdisk_1.dat 2025-05-07T09:01:49.188780Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:49.188882Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:49.194661Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:01:49.222837Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17612, node 1 2025-05-07T09:01:49.347047Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T09:01:49.358800Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T09:01:49.358859Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T09:01:49.601902Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:49.601923Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:49.601935Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:49.602094Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19549 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:50.003039Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:53.246282Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501626422515881594:2279];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:53.246355Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:01:53.624229Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626443990718944:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:53.624341Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:54.027798Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T09:01:54.242229Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626448285686414:2349], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:54.242305Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:54.246195Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626448285686419:2352], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:54.251690Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-05-07T09:01:54.278949Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501626448285686421:2353], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-05-07T09:01:54.363337Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501626448285686493:2820] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:01:54.566178Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jtmzns619w1716egnhgxhmx5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2FhY2MwYmUtZjNkNzdjNGEtZjIzN2I5MjMtNDQxOGNiNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:54.787370Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jtmznsh1bzaeh07r5zfzz3z6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2FhY2MwYmUtZjNkNzdjNGEtZjIzN2I5MjMtNDQxOGNiNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:54.815711Z node 1 :TX_PROXY ERROR: read_table_impl.cpp:2919: [ReadTable [1:7501626448285686575:2372] TxId# 281474976715663] RESPONSE Status# ResolveError shard: 0 table: Root/Test 2025-05-07T09:01:54.825818Z node 1 :TX_PROXY ERROR: read_table_impl.cpp:2919: [ReadTable [1:7501626448285686578:2373] TxId# 281474976715664] RESPONSE Status# ResolveError shard: 0 table: Root/Test 2025-05-07T09:01:56.877213Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501626455692785473:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:56.877297Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002841/r3tmp/tmp5Cyi89/pdisk_1.dat 2025-05-07T09:01:56.990331Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:57.026479Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:57.026564Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:57.028796Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14465, node 4 2025-05-07T09:01:57.086084Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:57.086106Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:57.086112Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:57.086217Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15358 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:57.386949Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:02:00.091151Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501626472872655685:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:00.091284Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:00.111360Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T09:02:00.230207Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] Ac ... on.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480 2025-05-07T09:02:06.687053Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7501626500225080757:2351], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-05-07T09:02:06.752757Z node 7 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [7:7501626500225080832:2802] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:02:06.815004Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710661. Ctx: { TraceId: 01jtmzp59tbhb6zfcferb338ms, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NDdkYzM0NzktNjI3NDc1ZmQtMmY3MGY2MGYtNmI5MGFkYTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:02:06.963731Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710662. Ctx: { TraceId: 01jtmzp5fh1ea19wgfk8ghyamn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NDdkYzM0NzktNjI3NDc1ZmQtMmY3MGY2MGYtNmI5MGFkYTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:02:07.010095Z node 7 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-05-07T09:02:08.847745Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7501626508770268641:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:02:08.847884Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002841/r3tmp/tmpgB9CFD/pdisk_1.dat 2025-05-07T09:02:09.036040Z node 10 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:02:09.058161Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:02:09.058405Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:02:09.064284Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 63261, node 10 2025-05-07T09:02:09.139336Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:02:09.139388Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:02:09.139397Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:02:09.139589Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28475 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:02:09.424965Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:02:09.470705Z node 10 :GRPC_SERVER INFO: grpc_request_proxy.cpp:572: Got grpc request# ListEndpointsRequest, traceId# 01jtmzp81y81vw951nz1r0ty12, sdkBuildInfo# ydb-cpp-sdk/dev, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:35316, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# 9.998296s 2025-05-07T09:02:09.478088Z node 10 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:575: Got grpc request# CreateSessionRequest, traceId# 01jtmzp8256tzm0vxt6jxt9ry1, sdkBuildInfo# ydb-cpp-sdk/dev, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:35318, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-05-07T09:02:12.152554Z node 10 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:575: Got grpc request# ExecuteSchemeQueryRequest, traceId# 01jtmzpanrde833n1bvqbbhv5e, sdkBuildInfo# ydb-cpp-sdk/dev, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:48092, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-05-07T09:02:12.154933Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7501626525950138858:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:12.155042Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:12.173099Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T09:02:12.181206Z node 10 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:469: SchemeBoardUpdate /Root 2025-05-07T09:02:12.181297Z node 10 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:469: SchemeBoardUpdate /Root 2025-05-07T09:02:12.181314Z node 10 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:498: Can't update SecurityState for /Root - no PublicKeys 2025-05-07T09:02:12.181353Z node 10 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:498: Can't update SecurityState for /Root - no PublicKeys 2025-05-07T09:02:12.263421Z node 10 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:469: SchemeBoardUpdate /Root 2025-05-07T09:02:12.263569Z node 10 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:498: Can't update SecurityState for /Root - no PublicKeys 2025-05-07T09:02:12.263584Z node 10 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:469: SchemeBoardUpdate /Root 2025-05-07T09:02:12.263624Z node 10 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:498: Can't update SecurityState for /Root - no PublicKeys 2025-05-07T09:02:12.287231Z node 10 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:575: Got grpc request# ReadTableRequest, traceId# 01jtmzpaszameccwebaysvpcqn, sdkBuildInfo# undef, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:48098, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-05-07T09:02:12.306128Z node 10 :READ_TABLE_API DEBUG: rpc_read_table.cpp:267: [10:7501626525950139020:2346] Adding quota request to queue ShardId: 0, TxId: 281474976710659 2025-05-07T09:02:12.306174Z node 10 :READ_TABLE_API DEBUG: rpc_read_table.cpp:629: [10:7501626525950139020:2346] Assign stream quota to Shard 0, Quota 5, TxId 281474976710659 Reserved: 5 of 25, Queued: 0 2025-05-07T09:02:12.307345Z node 10 :READ_TABLE_API DEBUG: rpc_read_table.cpp:647: [10:7501626525950139020:2346] got stream part, size: 35, RU required: 128 rate limiter absent 2025-05-07T09:02:12.307802Z node 10 :READ_TABLE_API DEBUG: rpc_read_table.cpp:563: [10:7501626525950139020:2346] Starting inactivity timer for 600.000000s with tag 3 2025-05-07T09:02:12.307864Z node 10 :READ_TABLE_API NOTICE: rpc_read_table.cpp:531: [10:7501626525950139020:2346] Finish grpc stream, status: 400000 2025-05-07T09:02:12.322276Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000080480] received request Name# SchemeOperation ok# false data# peer# current inflight# 0 2025-05-07T09:02:12.322436Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00001f280] received request Name# SchemeOperationStatus ok# false data# peer# current inflight# 0 2025-05-07T09:02:12.322755Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000f5a80] received request Name# SchemeDescribe ok# false data# peer# current inflight# 0 2025-05-07T09:02:12.323804Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000080a80] received request Name# PersQueueRequest ok# false data# peer# current inflight# 0 2025-05-07T09:02:12.323987Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000081080] received request Name# SchemeInitRoot ok# false data# peer# current inflight# 0 2025-05-07T09:02:12.324161Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000034280] received request Name# ResolveNode ok# false data# peer# current inflight# 0 2025-05-07T09:02:12.324326Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000073880] received request Name# FillNode ok# false data# peer# current inflight# 0 2025-05-07T09:02:12.324480Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000075c80] received request Name# DrainNode ok# false data# peer# current inflight# 0 2025-05-07T09:02:12.324642Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000c6c80] received request Name# BlobStorageConfig ok# false data# peer# current inflight# 0 2025-05-07T09:02:12.324812Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00015fc80] received request Name# HiveCreateTablet ok# false data# peer# current inflight# 0 2025-05-07T09:02:12.324982Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000075080] received request Name# TestShardControl ok# false data# peer# current inflight# 0 2025-05-07T09:02:12.325142Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000020a80] received request Name# RegisterNode ok# false data# peer# current inflight# 0 2025-05-07T09:02:12.325295Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00001fe80] received request Name# CmsRequest ok# false data# peer# current inflight# 0 2025-05-07T09:02:12.325458Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000029480] received request Name# ConsoleRequest ok# false data# peer# current inflight# 0 2025-05-07T09:02:12.325627Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000072080] received request Name# InterconnectDebug ok# false data# peer# current inflight# 0 2025-05-07T09:02:12.325780Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000034880] received request Name# TabletStateRequest ok# false data# peer# current inflight# 0 2025-05-07T09:02:12.327297Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000048c80] received request Name# ChooseProxy ok# false data# peer# current inflight# 0 >> YdbYqlClient::ColumnFamiliesExternalBlobsWithoutDefaultProfile [GOOD] >> YdbYqlClient::CheckDefaultTableSettings3 >> CommitOffset::DistributedTxCommit_Flat_CheckOffsetCommitForDifferentCases [GOOD] >> TPersQueueMirrorer::TestBasicRemote |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/ut_with_sdk/unittest >> TopicAutoscaling::ReadFromTimestamp_AutoscaleAwareSDK [GOOD] Test command err: 2025-05-07T08:58:53.442580Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625670873290106:2196];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:58:53.443243Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:58:53.916735Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004015/r3tmp/tmpvXy1Y2/pdisk_1.dat 2025-05-07T08:58:54.337437Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:54.351264Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:58:54.351358Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:58:54.353337Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3688, node 1 2025-05-07T08:58:54.498672Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/zvgn/004015/r3tmp/yandexFYQEvW.tmp 2025-05-07T08:58:54.498705Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/zvgn/004015/r3tmp/yandexFYQEvW.tmp 2025-05-07T08:58:54.498863Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/zvgn/004015/r3tmp/yandexFYQEvW.tmp 2025-05-07T08:58:54.498985Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:58:54.571766Z INFO: TTestServer started on Port 10470 GrpcPort 3688 TClient is connected to server localhost:10470 PQClient connected to localhost:3688 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:58:54.979411Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:58:55.006465Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:58:55.022173Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-05-07T08:58:55.028230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... 2025-05-07T08:58:57.513287Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625688053159946:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:58:57.513624Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:58:57.514074Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625688053159981:2343], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:58:57.523405Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480 2025-05-07T08:58:57.544153Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501625688053159983:2344], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-05-07T08:58:57.642845Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501625688053160047:2444] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:58:58.092128Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:58:58.123725Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7501625688053160055:2350], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T08:58:58.125029Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=1&id=MTVmMWQ4ZjEtYWUxNDA1MzMtM2IyNTM2YjgtOTkxMmUxNw==, ActorId: [1:7501625688053159943:2338], ActorState: ExecuteState, TraceId: 01jtmzgck68wwvp933vben803g, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T08:58:58.126731Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T08:58:58.133228Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:58:58.239380Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-05-07T08:58:58.431603Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501625670873290106:2196];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:58:58.431689Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Subcribe to ClusterTracker from [1:7501625692348127640:2621] === CheckClustersList. Ok 2025-05-07T08:59:03.687131Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-05-07T08:59:03.719940Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-05-07T08:59:03.723040Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:7501625713822964271:2677], Recipient [1:7501625675168257695:2194]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:59:03.723104Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:59:03.723119Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T08:59:03.723164Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122432, Sender [1:7501625713822964267:2674], Recipient [1:7501625675168257695:2194]: {TEvModifySchemeTransaction txid# 281474976710672 TabletId# 72057594046644480} 2025-05-07T08:59:03.723179Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4851: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-05-07T08:59:03.861520Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreatePersQueueGroup CreatePersQueueGroup { Name: "test-topic" TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } PartitionStrategy { MinPartitionCount: 1 MaxPartitionCount: 100 ScaleThresholdSeconds: 300 ScaleUpPartitionWriteSpeedThresholdPercent: 90 ScaleDownPartitionWriteSpeedThresholdPercent: 30 PartitionStrategyType: CAN_SPLIT } Consumers { Name: "test-consumer" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } ServiceType: "data-streams" Version: 0 } } } } TxId: 281474976710672 TabletId: 72057594046644480 Owner: "root@builtin" UserToken: "***" PeerName: "" , at schemeshard: 72057594046644480 2025-05-07T08:59:03.862049Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_pq.cpp:307: TCreatePQ Propose ... 2075186224037897] Destroy direct read session test-consumer_7_1_5224659408702291272_v1 2025-05-07T09:02:13.438852Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037897] server disconnected, pipe [7:7501626531974262910:2918] destroyed 2025-05-07T09:02:13.438896Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5200: HandleHook, received event# 269877764, Sender [7:7501626531974262904:3436], Recipient [7:7501626471844719055:2459]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-05-07T09:02:13.438910Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5219: HandleHook, processing event TEvTabletPipe::TEvServerDisconnected 2025-05-07T09:02:13.438920Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:2882: [PQ: 72075186224037892] Handle TEvTabletPipe::TEvServerDisconnected 2025-05-07T09:02:13.438933Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2433: [PQ: 72075186224037892] Destroy direct read session test-consumer_7_1_5224659408702291272_v1 2025-05-07T09:02:13.438954Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037892] server disconnected, pipe [7:7501626531974262903:2917] destroyed 2025-05-07T09:02:13.439024Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188506 (NKikimr::TEvPQ::TEvPipeDisconnected), Tablet [7:7501626523384327978:2843], Partition 2, Sender [7:7501626523384327978:2843], Recipient [7:7501626523384328057:2852], Cookie: 0 2025-05-07T09:02:13.439088Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188506, Sender [7:7501626523384327978:2843], Recipient [7:7501626523384328057:2852]: NKikimr::TEvPQ::TEvPipeDisconnected 2025-05-07T09:02:13.439111Z node 7 :PERSQUEUE TRACE: partition.h:591: StateIdle, processing event TEvPQ::TEvPipeDisconnected 2025-05-07T09:02:13.439144Z node 7 :PERSQUEUE DEBUG: partition_write.cpp:138: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::DropOwner. 2025-05-07T09:02:13.439186Z node 7 :PERSQUEUE TRACE: partition_write.cpp:854: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessChangeOwnerRequests. 2025-05-07T09:02:13.439228Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-05-07T09:02:13.439303Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-05-07T09:02:13.439330Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-05-07T09:02:13.439361Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-05-07T09:02:13.439415Z node 7 :PQ_READ_PROXY DEBUG: caching_service.cpp:138: Direct read cache: server session deregistered: test-consumer_7_1_5224659408702291272_v1 2025-05-07T09:02:13.439436Z node 7 :PQ_READ_PROXY DEBUG: caching_service.cpp:138: Direct read cache: server session deregistered: test-consumer_7_1_5224659408702291272_v1 2025-05-07T09:02:13.439454Z node 7 :PQ_READ_PROXY DEBUG: caching_service.cpp:138: Direct read cache: server session deregistered: test-consumer_7_1_5224659408702291272_v1 2025-05-07T09:02:13.483262Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [7:7501626398830273965:2142]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:02:13.483322Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:02:13.483383Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [7:7501626398830273965:2142], Recipient [7:7501626398830273965:2142]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:02:13.483409Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:02:13.487284Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7501626523384327979:2844], Partition 1, Sender [0:0:0], Recipient [7:7501626523384328054:2850], Cookie: 0 2025-05-07T09:02:13.487399Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7501626523384328054:2850]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:02:13.487427Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:02:13.487485Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-05-07T09:02:13.487593Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-05-07T09:02:13.487624Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-05-07T09:02:13.487670Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-05-07T09:02:13.491402Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7501626523384327978:2843], Partition 2, Sender [0:0:0], Recipient [7:7501626523384328057:2852], Cookie: 0 2025-05-07T09:02:13.491517Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7501626523384328057:2852]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:02:13.491547Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:02:13.491607Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-05-07T09:02:13.491724Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-05-07T09:02:13.491765Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-05-07T09:02:13.491810Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-05-07T09:02:13.499044Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7501626471844719055:2459], Partition 0, Sender [0:0:0], Recipient [7:7501626471844719111:2462], Cookie: 0 2025-05-07T09:02:13.499158Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7501626471844719111:2462]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:02:13.499186Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:02:13.499249Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-05-07T09:02:13.499352Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-05-07T09:02:13.499397Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-05-07T09:02:13.499433Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-05-07T09:02:13.587622Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7501626523384327979:2844], Partition 1, Sender [0:0:0], Recipient [7:7501626523384328054:2850], Cookie: 0 2025-05-07T09:02:13.587744Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7501626523384328054:2850]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:02:13.587777Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:02:13.587838Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-05-07T09:02:13.587928Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-05-07T09:02:13.587967Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-05-07T09:02:13.588011Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-05-07T09:02:13.591723Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7501626523384327978:2843], Partition 2, Sender [0:0:0], Recipient [7:7501626523384328057:2852], Cookie: 0 2025-05-07T09:02:13.591819Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7501626523384328057:2852]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:02:13.591861Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:02:13.591920Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-05-07T09:02:13.592013Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-05-07T09:02:13.592043Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-05-07T09:02:13.592080Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-05-07T09:02:13.602192Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7501626471844719055:2459], Partition 0, Sender [0:0:0], Recipient [7:7501626471844719111:2462], Cookie: 0 2025-05-07T09:02:13.602286Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7501626471844719111:2462]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:02:13.602314Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:02:13.602367Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-05-07T09:02:13.602460Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-05-07T09:02:13.602498Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-05-07T09:02:13.602547Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut/unittest >> GroupWriteTest::WriteHardRateDispatcher [GOOD] Test command err: RandomSeed# 14142374164533693802 2025-05-07T08:56:24.265031Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 5 Generation# 1 is bootstrapped, going to send TEvDiscover {TabletId# 5 MinGeneration# 1 ReadBody# false DiscoverBlockedGeneration# true ForceBlockedGeneration# 0 FromLeader# true Deadline# 18446744073709551} 2025-05-07T08:56:24.290072Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 5 Generation# 1 recieved TEvDiscoverResult {Status# NODATA BlockedGeneration# 0 Id# [0:0:0:0:0:0:0] Size# 0 MinGeneration# 1} 2025-05-07T08:56:24.290243Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 5 Generation# 1 going to send TEvBlock {TabletId# 5 Generation# 1 Deadline# 18446744073709551 IsMonitored# 1} 2025-05-07T08:56:24.293264Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 5 Generation# 1 recieved TEvBlockResult {Status# OK} 2025-05-07T08:56:24.308838Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 5 Generation# 2 going to send TEvCollectGarbage {TabletId# 5 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 0 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-05-07T08:56:24.311724Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 5 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 5 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Status# OK} 2025-05-07T09:02:06.661108Z 1 00h01m10.000000s :BS_CONTROLLER ERROR: {BSC07@impl.h:2175} ProcessControllerEvent event processing took too much time Type# 268637720 Duration# 0.318769s 2025-05-07T09:02:06.697845Z 1 00h01m10.000000s :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:666} StateWork event processing took too much time Type# 2146435078 Duration# 0.408311s 2025-05-07T09:02:07.523205Z 1 00h01m10.010512s :BS_LOAD_TEST DEBUG: Load tablet recieved PoisonPill, going to die 2025-05-07T09:02:07.539220Z 1 00h01m10.010512s :BS_LOAD_TEST DEBUG: TabletId# 5 Generation# 2 end working, going to send TEvCollectGarbage {TabletId# 5 RecordGeneration# 2 PerGenerationCounter# 12 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-05-07T09:02:07.549786Z 1 00h01m10.010512s :BS_LOAD_TEST DEBUG: Load tablet recieved PoisonPill, going to die 2025-05-07T09:02:07.549882Z 1 00h01m10.010512s :BS_LOAD_TEST DEBUG: TabletId# 5 Generation# 2 end working, going to send TEvCollectGarbage {TabletId# 5 RecordGeneration# 2 PerGenerationCounter# 13 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-05-07T09:02:07.962147Z 1 00h01m10.010512s :BS_LOAD_TEST INFO: TabletId# 5 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 5 RecordGeneration# 2 PerGenerationCounter# 12 Channel# 0 Status# OK} 2025-05-07T09:02:07.962387Z 1 00h01m10.010512s :BS_LOAD_TEST INFO: TabletId# 5 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 5 RecordGeneration# 2 PerGenerationCounter# 13 Channel# 0 Status# OK} |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest >> TTableProfileTests::OverwriteCachingPolicy [GOOD] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |92.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/apps/pgwire/pgwire |92.0%| [LD] {RESULT} $(B)/ydb/apps/pgwire/pgwire |92.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/apps/pgwire/pgwire |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest >> YdbTableBulkUpsert::AsyncIndexShouldSucceed [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TTableProfileTests::OverwriteCachingPolicy [GOOD] Test command err: 2025-05-07T09:01:23.776375Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626316037759800:2142];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:23.777445Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002859/r3tmp/tmpnlrGtg/pdisk_1.dat 2025-05-07T09:01:24.182923Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:24.209120Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:24.209231Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:24.214393Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12151, node 1 2025-05-07T09:01:24.457789Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:24.457825Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:24.457841Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:24.458010Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7831 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:24.873319Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... TClient is connected to server localhost:7831 2025-05-07T09:01:25.160454Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:25.189697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:25.700077Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7501626325989171404:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:25.700184Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T09:01:25.897129Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:25.897218Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:25.906984Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-05-07T09:01:25.908384Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:7831 2025-05-07T09:01:26.278320Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:7831 TClient::Ls request: /Root/ydb_ut_tenant/table-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "table-1" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710660 CreateStep: 1746608487240 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table-1" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 ... (TRUNCATED) 2025-05-07T09:01:27.824835Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:7831 TClient::Ls request: /Root/ydb_ut_tenant/table-2 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "table-2" PathId: 4 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710661 CreateStep: 1746608488260 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table-2" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 ... (TRUNCATED) 2025-05-07T09:01:28.555614Z node 1 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 3 2025-05-07T09:01:28.558750Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-05-07T09:01:28.776081Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501626316037759800:2142];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:28.776170Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:01:31.842213Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501626350255541670:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:31.846166Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002859/r3tmp/tmp5b7ASv/pdisk_1.dat 2025-05-07T09:01:32.155219Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 2343, node 4 2025-05-07T09:01:32.218649Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:32.218725Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:32.237887Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:01:32.362705Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:32.362733Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:32.362743Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:32.362910Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15805 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:32.606176Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... TClient is connected to server localhost:15805 2025-05-07T09:01:33.241440Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:33.276484Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710659:0, at schemeshard: 7205 ... 71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:02:02.994070Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:7357 TClient::Ls request: /Root/ydb_ut_tenant/table-4 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "table-4" PathId: 6 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715663 CreateStep: 1746608523520 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table-4" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 ... (TRUNCATED) 2025-05-07T09:02:04.115880Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:7357 TClient::Ls request: /Root/ydb_ut_tenant/table-5 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "table-5" PathId: 7 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715664 CreateStep: 1746608524630 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table-5" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 ... (TRUNCATED) 2025-05-07T09:02:05.218111Z node 10 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 12 2025-05-07T09:02:05.229195Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-05-07T09:02:07.825400Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7501626503989939940:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:02:07.825468Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002859/r3tmp/tmpkbOqXP/pdisk_1.dat 2025-05-07T09:02:08.242990Z node 13 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:02:08.289836Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:02:08.289994Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:02:08.293805Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7529, node 13 2025-05-07T09:02:08.451040Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:02:08.451079Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:02:08.451096Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:02:08.451321Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15393 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:02:08.980845Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... TClient is connected to server localhost:15393 2025-05-07T09:02:09.519734Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:02:09.554737Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:02:10.061535Z node 15 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[15:7501626518577880133:2071];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:02:10.061632Z node 15 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T09:02:10.066139Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(15, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:02:10.066270Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(15, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:02:10.075097Z node 13 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 15 Cookie 15 2025-05-07T09:02:10.082230Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(15, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:15393 2025-05-07T09:02:10.673681Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:15393 TClient::Ls request: /Root/ydb_ut_tenant/table-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "table-1" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710660 CreateStep: 1746608531120 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table-1" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 ... (TRUNCATED) 2025-05-07T09:02:11.656656Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:15393 TClient::Ls request: /Root/ydb_ut_tenant/table-2 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "table-2" PathId: 4 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710661 CreateStep: 1746608532090 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table-2" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 ... (TRUNCATED) 2025-05-07T09:02:12.757420Z node 13 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 15 2025-05-07T09:02:12.761536Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(15, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-05-07T09:02:12.829885Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[13:7501626503989939940:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:02:12.830015Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:02:15.061871Z node 15 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[15:7501626518577880133:2071];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:02:15.062018Z node 15 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=timeout; >> YdbTableBulkUpsert::DecimalPK [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbTableBulkUpsert::AsyncIndexShouldSucceed [GOOD] Test command err: 2025-05-07T09:01:45.342616Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626409918565706:2141];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:45.342727Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002842/r3tmp/tmpXVaLoR/pdisk_1.dat 2025-05-07T09:01:46.374961Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:46.382233Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T09:01:46.388842Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:46.388967Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:46.415247Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16133, node 1 2025-05-07T09:01:46.782653Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:46.782677Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:46.782684Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:46.782820Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24724 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:47.481028Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:50.339119Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501626409918565706:2141];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:50.339200Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:01:50.919687Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626431393403182:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:50.919879Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:51.900303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T09:01:52.209603Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626439983337974:2352], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:52.209705Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:52.210258Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626439983337979:2355], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:52.215419Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-05-07T09:01:52.255988Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501626439983337981:2356], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-05-07T09:01:52.320467Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501626439983338052:2818] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:01:52.481693Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jtmznjsp1qnr17ck627j5t9p, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MWVkMTNlNWYtNGRhNjk4Ny00MWZkZjcyNS0zMmU4MDRjYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:52.724998Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jtmznjsp1qnr17ck627j5t9p, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDYyNTc0OTItMTc5MmQ2YjQtZDNlNGMyNmMtNTNiOTFmZmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:52.785168Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715663. Ctx: { TraceId: 01jtmznjsp1qnr17ck627j5t9p, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzdiOTQwZWMtOTcxOWMwZi0zMTA1Nzg4NC04ZmUyNjQ5Ng==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:53.095024Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715664. Ctx: { TraceId: 01jtmznjsp1qnr17ck627j5t9p, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTM4NzU1ZGQtOTUxZjU1YjgtZTczNzBiMDQtYTMwNzQ1YWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:01:55.605330Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501626454528736285:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:55.605388Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002842/r3tmp/tmp88FbZo/pdisk_1.dat 2025-05-07T09:01:56.024848Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:56.088491Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:56.088597Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:56.098630Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2544, node 4 2025-05-07T09:01:56.366703Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:56.366731Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:56.366739Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:56.366884Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1055 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:56.804686Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:59.815413Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501626471708606499:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:59.815506Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:59.944419Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T09:02:00.057075Z node 4 :KQP_WO ... # /home/runner/.ya/build/build_root/zvgn/002842/r3tmp/tmpS5fiRn/pdisk_1.dat 2025-05-07T09:02:02.781411Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:02:02.834999Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:02:02.835086Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:02:02.851474Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9825, node 7 2025-05-07T09:02:02.966656Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:02:02.966682Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:02:02.966690Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:02:02.966838Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14009 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:02:03.300254Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:02:06.066508Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 BAD_REQUEST
: Error: Bulk upsert to table '/Root/TestInvalidData' Invalid Decimal(22,9) value BAD_REQUEST
: Error: Bulk upsert to table '/Root/TestInvalidData' Invalid Date value BAD_REQUEST
: Error: Bulk upsert to table '/Root/TestInvalidData' Invalid Datetime value BAD_REQUEST
: Error: Bulk upsert to table '/Root/TestInvalidData' Invalid Timestamp value BAD_REQUEST
: Error: Bulk upsert to table '/Root/TestInvalidData' Invalid Interval value CLIENT_INTERNAL_ERROR
: Error: GRpc error: (13): Unable to parse request
: Error: Grpc error response on endpoint localhost:9825 BAD_REQUEST
: Error: Bulk upsert to table '/Root/TestInvalidData' Invalid Yson value BAD_REQUEST
: Error: Bulk upsert to table '/Root/TestInvalidData' Invalid Json value BAD_REQUEST
: Error: Bulk upsert to table '/Root/TestInvalidData' Invalid JSON for JsonDocument provided: TAPE_ERROR: The JSON document has an improper structure: missing or superfluous commas, braces, missing keys, etc. This is a fatal and unrecoverable error. BAD_REQUEST
: Error: Bulk upsert to table '/Root/TestInvalidData' Invalid DyNumber string representation 2025-05-07T09:02:07.662642Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7501626505208255786:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:02:07.662705Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002842/r3tmp/tmp0YANqH/pdisk_1.dat 2025-05-07T09:02:07.867383Z node 10 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:02:07.915660Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:02:07.915773Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:02:07.922470Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3763, node 10 2025-05-07T09:02:08.155083Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:02:08.155113Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:02:08.155125Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:02:08.155303Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6412 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:02:08.443011Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:02:10.968038Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480
: Error: Bulk upsert to table '/Root/ui8' Only async-indexed tables are supported by BulkUpsert
: Error: Bulk upsert to table '/Root/ui8/Value_index/indexImplTable' unknown table 2025-05-07T09:02:12.766224Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7501626526982912989:2183];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:02:12.826330Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002842/r3tmp/tmpZnJkEx/pdisk_1.dat 2025-05-07T09:02:12.961354Z node 13 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20551, node 13 2025-05-07T09:02:13.104771Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:02:13.104916Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:02:13.126981Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:02:13.127009Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:02:13.127031Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:02:13.130405Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T09:02:13.131904Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20546 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:02:13.311687Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:02:17.001278Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T09:02:17.178379Z node 13 :CHANGE_EXCHANGE WARN: change_sender_async_index.cpp:195: [AsyncIndexChangeSenderMain][72075186224037888:1][13:7501626548457750552:2346] Failed entry at 'ResolveUserTable': entry# { Path: TableId: [72057594046644480:2:0] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo } 2025-05-07T09:02:17.762826Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[13:7501626526982912989:2183];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:02:17.762934Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout;
: Error: Bulk upsert to table '/Root/ui8/Value_index/indexImplTable' unknown table >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_0__SYNC-pk_types4-all_types4-index4---SYNC] [GOOD] >> TGRpcYdbTest::ReadTablePg [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbTableBulkUpsert::DecimalPK [GOOD] Test command err: 2025-05-07T09:00:36.058723Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626112553993031:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:36.058791Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002881/r3tmp/tmpzBwIyT/pdisk_1.dat 2025-05-07T09:00:37.126528Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:37.126631Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:37.128919Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T09:00:37.136353Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:00:37.201182Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 12278, node 1 2025-05-07T09:00:37.347750Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T09:00:37.347783Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T09:00:37.637910Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:37.637945Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:37.637953Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:37.660989Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17059 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:38.369595Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:41.058961Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501626112553993031:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:41.059043Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:00:42.205075Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 SUCCESS 2025-05-07T09:00:42.490117Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626138323798055:2348], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:42.490195Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:42.490565Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626138323798067:2351], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:42.493911Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480 2025-05-07T09:00:42.529794Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501626138323798069:2352], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-05-07T09:00:42.594500Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501626138323798154:2829] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:00:44.001652Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710661. Ctx: { TraceId: 01jtmzkk3r541bx6cegd8crv0x, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTFhNmRkZWUtNzVmN2QwYTItODk0YmQ0OWQtYjYyYTg0ZTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root SUCCESS count returned 0 rows 2025-05-07T09:00:44.771479Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710662. Ctx: { TraceId: 01jtmzkmm68g53gt6twawneees, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTFhNmRkZWUtNzVmN2QwYTItODk0YmQ0OWQtYjYyYTg0ZTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root SUCCESS count returned 1 rows 2025-05-07T09:00:44.915442Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-05-07T09:00:44.948051Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 SUCCESS 2025-05-07T09:00:45.840936Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710665. Ctx: { TraceId: 01jtmzknrf8z3k82z70kgn0a8v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGM5OTc3YmYtZmQzM2I2ZjItYTAxYTc2ZGMtMmU5NmRkZjE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root SUCCESS count returned 0 rows 2025-05-07T09:00:47.091507Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710666. Ctx: { TraceId: 01jtmzkpd720fpcx31vwtjxdc8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGM5OTc3YmYtZmQzM2I2ZjItYTAxYTc2ZGMtMmU5NmRkZjE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root SUCCESS count returned 1 rows 2025-05-07T09:00:47.248869Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037889 not found 2025-05-07T09:00:47.273788Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 SUCCESS 2025-05-07T09:00:48.440950Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710669. Ctx: { TraceId: 01jtmzkqzwa9sszz2tynsmwcar, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjE2YjYwOC00NmI4MTZhYS1lNzg1ODA1OS1kMzZkODMyYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root SUCCESS count returned 0 rows 2025-05-07T09:00:48.956575Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710670. Ctx: { TraceId: 01jtmzkryka11a98tf4yajf7fc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjE2YjYwOC00NmI4MTZhYS1lNzg1ODA1OS1kMzZkODMyYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root SUCCESS count returned 1 rows 2025-05-07T09:00:49.105913Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037890 not found 2025-05-07T09:00:49.152071Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 SUCCESS 2025-05-07T09:00:50.273386Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710673. Ctx: { TraceId: 01jtmzksr274ra0k994ngzxgpr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGIzMjkyYTQtNTdjN2MyODItOTY1NDFlNmMtYzRmMWMwZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root SUCCESS count returned 0 rows 2025-05-07T09:00:51.686354Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710674. Ctx: { TraceId: 01jtmzktqy5kydth78hxg5en09, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGIzMjkyYTQtNTdjN2MyODItOTY1NDFlNmMtYzRmMWMwZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root SUCCESS count returned 1 rows 2025-05-07T09:00:51.799302Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037891 not found 2025-05-07T09:00:51.896488Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480 SUCCESS 2025-05-07T09:00:52.172587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7261: Cannot get console configs 2025-05-07T09:00:52.172638Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:53.657728Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710677. Ctx: { TraceId: 01jtmzkwfcaqcgh9dqwycqywf3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTVlZGJkNTYtMWQwY2YzMWMtYjkzOGZhYTktZjVmMmI2ZmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root SUCCESS count returned 0 rows 2025-05-07T09:00:54.335338Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710678. Ctx: { TraceId: 01jtmzky1pbn8rqg1r251rtdze, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTVlZGJkNTYtMWQwY2YzMWMtYjkzOGZhYTktZjVmMmI2ZmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root SUCCESS count returned 1 rows 2025- ... thId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:47.332644Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:51.131027Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7501626414848876267:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:51.131111Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:01:51.240101Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480
: Error: Bulk upsert to table '/Root/Traces' unknown table
: Error: Bulk upsert to table '/Root/Logs' Missing key columns: Timestamp
: Error: Bulk upsert to table '/Root/Logs' Missing key columns: Shard
: Error: Bulk upsert to table '/Root/Logs' Type mismatch, got type Uint64 for column App, but expected Utf8
: Error: Bulk upsert to table '/Root/Logs' Type mismatch, got type Uint64 for column Message, but expected Utf8
: Error: Bulk upsert to table '/Root/Logs' Unknown column: HttpCode 2025-05-07T09:01:55.279275Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7501626454741457806:2141];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:55.279344Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002881/r3tmp/tmpYA6EPN/pdisk_1.dat 2025-05-07T09:01:55.820171Z node 10 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:55.855643Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:55.856237Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:55.869198Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 30720, node 10 2025-05-07T09:01:56.152955Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:56.152979Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:56.152988Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:56.153142Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29745 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:56.565083Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:02:00.282123Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7501626454741457806:2141];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:02:00.282220Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:02:00.328455Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480
: Error: Bulk upsert to table '/Root/Limits' Row key size of 1100002 bytes is larger than the allowed threshold 1049600
: Error: Bulk upsert to table '/Root/Limits' Row key size of 1100002 bytes is larger than the allowed threshold 1049600
: Error: Bulk upsert to table '/Root/Limits' Row key size of 1100000 bytes is larger than the allowed threshold 1049600
: Error: Bulk upsert to table '/Root/Limits' Row cell size of 17000022 bytes is larger than the allowed threshold 16777216 2025-05-07T09:02:12.785350Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7501626526127362987:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:02:12.785482Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002881/r3tmp/tmpVmHHmM/pdisk_1.dat 2025-05-07T09:02:13.162602Z node 13 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:02:13.190625Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:02:13.190794Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:02:13.198686Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5543, node 13 2025-05-07T09:02:13.296358Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:02:13.296394Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:02:13.296412Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:02:13.296610Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16834 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:02:13.635997Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:02:17.785715Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[13:7501626526127362987:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:02:17.785810Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:02:18.317331Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T09:02:18.464499Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7501626551897168060:2346], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:18.464618Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:18.464656Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7501626551897168072:2349], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:18.469344Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480 2025-05-07T09:02:18.510670Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7501626551897168074:2350], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-05-07T09:02:18.583800Z node 13 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [13:7501626551897168152:2833] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:02:19.021161Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710661. Ctx: { TraceId: 01jtmzpgty3xvpdnthd60ge5e2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=MmJmODc2MzUtY2VjOWJiYzItM2MxNzQ0OWItMmIyYjgzZDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcYdbTest::ReadTablePg [GOOD] Test command err: 2025-05-07T09:01:53.198298Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626444335210837:2210];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:53.198646Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002836/r3tmp/tmpyGEoRk/pdisk_1.dat 2025-05-07T09:01:53.702116Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:53.702208Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:53.705654Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:53.713673Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17962, node 1 2025-05-07T09:01:53.800051Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:53.800072Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:53.800083Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:53.800192Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27577 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:54.116923Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:54.197547Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501626448630178912:2602] txid# 281474976710658, issues: { message: "Path does not exist" issue_code: 200200 severity: 1 } 2025-05-07T09:01:58.574064Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501626464444252950:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:58.574130Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002836/r3tmp/tmp0kXgTy/pdisk_1.dat 2025-05-07T09:01:58.766815Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:58.810273Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:58.810347Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:58.813052Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 62684, node 4 2025-05-07T09:01:58.868155Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:58.868179Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:58.868185Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:58.868301Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31680 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:59.114618Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:02:02.222434Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501626481624123212:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:02.222511Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501626481624123201:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:02.222655Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:02.227075Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T09:02:02.262188Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7501626481624123215:2341], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T09:02:02.362340Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:7501626481624123292:2681] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:02:04.605333Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7501626490853249090:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:02:04.606803Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002836/r3tmp/tmpuOuQQ3/pdisk_1.dat 2025-05-07T09:02:04.789620Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:02:04.814948Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:02:04.815047Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:02:04.821763Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20024, node 7 2025-05-07T09:02:04.890197Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:02:04.890224Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:02:04.890232Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:02:04.890378Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6289 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:02:05.109612Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:02:07.658027Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7501626503738151988:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:07.658086Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7501626503738151999:2340], DatabaseId: /Root, PoolId: default, Failed to fetch poo ... SecurityState for /Root - no PublicKeys 2025-05-07T09:02:17.166694Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:469: SchemeBoardUpdate /Root 2025-05-07T09:02:17.166844Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:498: Can't update SecurityState for /Root - no PublicKeys 2025-05-07T09:02:17.166872Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:469: SchemeBoardUpdate /Root 2025-05-07T09:02:17.166925Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:498: Can't update SecurityState for /Root - no PublicKeys 2025-05-07T09:02:17.196670Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:575: Got grpc request# CreateSessionRequest, traceId# 01jtmzpfkcdys20g57qgderars, sdkBuildInfo# undef, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:51610, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-05-07T09:02:20.397484Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:575: Got grpc request# ExecuteDataQueryRequest, traceId# 01jtmzpjqde9f4g8cw8rzr8y28, sdkBuildInfo# undef, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:51610, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-05-07T09:02:20.399510Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7501626559498876499:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:20.399629Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:20.400066Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7501626559498876511:2344], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:20.405256Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-05-07T09:02:20.411894Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:469: SchemeBoardUpdate /Root 2025-05-07T09:02:20.412065Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:498: Can't update SecurityState for /Root - no PublicKeys 2025-05-07T09:02:20.412335Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:469: SchemeBoardUpdate /Root 2025-05-07T09:02:20.412415Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:498: Can't update SecurityState for /Root - no PublicKeys 2025-05-07T09:02:20.432675Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:469: SchemeBoardUpdate /Root 2025-05-07T09:02:20.432817Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:498: Can't update SecurityState for /Root - no PublicKeys 2025-05-07T09:02:20.438450Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:469: SchemeBoardUpdate /Root 2025-05-07T09:02:20.438592Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:498: Can't update SecurityState for /Root - no PublicKeys 2025-05-07T09:02:20.444071Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7501626559498876513:2345], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-05-07T09:02:20.456638Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[13:7501626538024038847:2139];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:02:20.456724Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:02:20.522910Z node 13 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [13:7501626559498876593:2801] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:02:20.723156Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jtmzpjqde9f4g8cw8rzr8y28, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=MjVmMTI0MmUtZTRmMzIwNGQtNzhlNjlhNzUtYzk5YTdkZDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:02:20.819529Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:575: Got grpc request# ReadTableRequest, traceId# 01jtmzpk4ke0e2n224a701rpra, sdkBuildInfo# undef, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:51610, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-05-07T09:02:20.820145Z node 13 :READ_TABLE_API NOTICE: rpc_read_table.cpp:531: [13:7501626559498876640:2353] Finish grpc stream, status: 400010 2025-05-07T09:02:20.823263Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:575: Got grpc request# ReadTableRequest, traceId# 01jtmzpk4qfzrdg4mv3wz5g729, sdkBuildInfo# undef, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:51610, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-05-07T09:02:20.842236Z node 13 :READ_TABLE_API DEBUG: rpc_read_table.cpp:267: [13:7501626559498876641:2354] Adding quota request to queue ShardId: 0, TxId: 281474976715662 2025-05-07T09:02:20.842286Z node 13 :READ_TABLE_API DEBUG: rpc_read_table.cpp:629: [13:7501626559498876641:2354] Assign stream quota to Shard 0, Quota 5, TxId 281474976715662 Reserved: 5 of 25, Queued: 0 2025-05-07T09:02:20.859187Z node 13 :READ_TABLE_API DEBUG: rpc_read_table.cpp:647: [13:7501626559498876641:2354] got stream part, size: 246, RU required: 128 rate limiter absent 2025-05-07T09:02:20.859652Z node 13 :READ_TABLE_API DEBUG: rpc_read_table.cpp:563: [13:7501626559498876641:2354] Starting inactivity timer for 600.000000s with tag 3 2025-05-07T09:02:20.962143Z node 13 :READ_TABLE_API NOTICE: rpc_read_table.cpp:531: [13:7501626559498876641:2354] Finish grpc stream, status: 400000 2025-05-07T09:02:20.963203Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:575: Got grpc request# ReadTableRequest, traceId# 01jtmzpk926pbxe5p2q11hcgje, sdkBuildInfo# undef, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:51610, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-05-07T09:02:20.989705Z node 13 :READ_TABLE_API DEBUG: rpc_read_table.cpp:267: [13:7501626559498876663:2356] Adding quota request to queue ShardId: 0, TxId: 281474976715664 2025-05-07T09:02:20.989758Z node 13 :READ_TABLE_API DEBUG: rpc_read_table.cpp:629: [13:7501626559498876663:2356] Assign stream quota to Shard 0, Quota 5, TxId 281474976715664 Reserved: 5 of 25, Queued: 0 2025-05-07T09:02:20.990527Z node 13 :READ_TABLE_API DEBUG: rpc_read_table.cpp:647: [13:7501626559498876663:2356] got stream part, size: 84, RU required: 128 rate limiter absent 2025-05-07T09:02:20.990888Z node 13 :READ_TABLE_API DEBUG: rpc_read_table.cpp:563: [13:7501626559498876663:2356] Starting inactivity timer for 600.000000s with tag 3 2025-05-07T09:02:20.993735Z node 13 :READ_TABLE_API NOTICE: rpc_read_table.cpp:531: [13:7501626559498876663:2356] Finish grpc stream, status: 400000 2025-05-07T09:02:21.003639Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:575: Got grpc request# ReadTableRequest, traceId# 01jtmzpkab8fwmphhp0t32dgjc, sdkBuildInfo# undef, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:51610, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-05-07T09:02:21.018961Z node 13 :READ_TABLE_API DEBUG: rpc_read_table.cpp:267: [13:7501626563793843983:2358] Adding quota request to queue ShardId: 0, TxId: 281474976715666 2025-05-07T09:02:21.019018Z node 13 :READ_TABLE_API DEBUG: rpc_read_table.cpp:629: [13:7501626563793843983:2358] Assign stream quota to Shard 0, Quota 5, TxId 281474976715666 Reserved: 5 of 25, Queued: 0 2025-05-07T09:02:21.020744Z node 13 :READ_TABLE_API DEBUG: rpc_read_table.cpp:647: [13:7501626563793843983:2358] got stream part, size: 210, RU required: 128 rate limiter absent 2025-05-07T09:02:21.021144Z node 13 :READ_TABLE_API DEBUG: rpc_read_table.cpp:563: [13:7501626563793843983:2358] Starting inactivity timer for 600.000000s with tag 3 2025-05-07T09:02:21.099627Z node 13 :READ_TABLE_API NOTICE: rpc_read_table.cpp:531: [13:7501626563793843983:2358] Finish grpc stream, status: 400000 2025-05-07T09:02:21.102354Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000031280] received request Name# SchemeOperation ok# false data# peer# current inflight# 0 2025-05-07T09:02:21.102711Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000070e80] received request Name# SchemeOperationStatus ok# false data# peer# current inflight# 0 2025-05-07T09:02:21.102959Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00002fa80] received request Name# SchemeDescribe ok# false data# peer# current inflight# 0 2025-05-07T09:02:21.103208Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000042680] received request Name# ChooseProxy ok# false data# peer# current inflight# 0 2025-05-07T09:02:21.103445Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000031880] received request Name# PersQueueRequest ok# false data# peer# current inflight# 0 2025-05-07T09:02:21.103557Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000031e80] received request Name# SchemeInitRoot ok# false data# peer# current inflight# 0 2025-05-07T09:02:21.103698Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000075080] received request Name# ResolveNode ok# false data# peer# current inflight# 0 2025-05-07T09:02:21.103833Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000073e80] received request Name# FillNode ok# false data# peer# current inflight# 0 2025-05-07T09:02:21.103951Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000a2680] received request Name# DrainNode ok# false data# peer# current inflight# 0 2025-05-07T09:02:21.104055Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00006fc80] received request Name# BlobStorageConfig ok# false data# peer# current inflight# 0 2025-05-07T09:02:21.104187Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000070280] received request Name# HiveCreateTablet ok# false data# peer# current inflight# 0 2025-05-07T09:02:21.104293Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000051680] received request Name# TestShardControl ok# false data# peer# current inflight# 0 2025-05-07T09:02:21.104406Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000c3080] received request Name# RegisterNode ok# false data# peer# current inflight# 0 2025-05-07T09:02:21.104517Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00004f280] received request Name# CmsRequest ok# false data# peer# current inflight# 0 2025-05-07T09:02:21.104623Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000032480] received request Name# ConsoleRequest ok# false data# peer# current inflight# 0 2025-05-07T09:02:21.104724Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000a9880] received request Name# InterconnectDebug ok# false data# peer# current inflight# 0 2025-05-07T09:02:21.104871Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000070880] received request Name# TabletStateRequest ok# false data# peer# current inflight# 0 >> YdbYqlClient::CheckDefaultTableSettings3 [GOOD] >> YdbOlapStore::BulkUpsert [GOOD] >> YdbOlapStore::DuplicateRows |92.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tools/stress_tool/ydb_stress_tool |92.0%| [LD] {RESULT} $(B)/ydb/tools/stress_tool/ydb_stress_tool |92.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tools/stress_tool/ydb_stress_tool |92.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kesus/tablet/quoter_performance_test/quoter_performance_test |92.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kesus/tablet/quoter_performance_test/quoter_performance_test |92.0%| [LD] {RESULT} $(B)/ydb/core/kesus/tablet/quoter_performance_test/quoter_performance_test ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::CheckDefaultTableSettings3 [GOOD] Test command err: 2025-05-07T09:01:49.134311Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626427198767133:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:49.134368Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00283e/r3tmp/tmpTW9O1a/pdisk_1.dat 2025-05-07T09:01:50.162117Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T09:01:50.167568Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:50.167687Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:50.171757Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 63633, node 1 2025-05-07T09:01:50.248077Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T09:01:50.248637Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T09:01:50.266287Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:50.586786Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:50.586819Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:50.586827Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:50.586947Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7267 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:51.185511Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:54.138329Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501626427198767133:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:54.138379Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:01:56.026433Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501626457104943162:2092];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:56.045559Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00283e/r3tmp/tmpc1XNWJ/pdisk_1.dat 2025-05-07T09:01:56.205618Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:56.244033Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:56.244127Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:56.248308Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 30739, node 4 2025-05-07T09:01:56.339869Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:56.339929Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:56.339937Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:56.340074Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10464 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:56.593617Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... TClient is connected to server localhost:10464 2025-05-07T09:01:57.018941Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_subdomain.cpp:92: TCreateSubDomain Propose, path: /Root/ydb_ut_tenant, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T09:01:57.019384Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976715658:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-05-07T09:01:57.019407Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T09:01:57.026231Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715658, database: /Root, subject: , status: StatusAccepted, operation: CREATE DATABASE, path: /Root/ydb_ut_tenant waiting... 2025-05-07T09:01:57.039993Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 1746608517087, transactions count in step: 1, at schemeshard: 72057594046644480 2025-05-07T09:01:57.041676Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715658:0 2025-05-07T09:01:57.041737Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 281474976715658, publications: 2, subscribers: 1 2025-05-07T09:01:57.042990Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976715658, subscribers: 1 2025-05-07T09:01:57.047985Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: /Root/ydb_ut_tenant, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-05-07T09:01:57.048566Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976715659:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-05-07T09:01:57.048584Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-05-07T09:01:57.052844Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715659, database: /Root, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: /Root/ydb_ut_tenant waiting... 2025-05-07T09:01:57.559141Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7501626460839721020:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:57.559774Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T09:01:57.583406Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:57.583480Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:57.585536Z node 4 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 6 Cookie 6 2025-05-07T09:01:57.586499Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# ObjectStorage, ObjectStorage, PostgreSQL 2025-05-07T09:01:57.610058Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:01:58.054831Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 1746608518102, transactions count in step: 1, at schemeshard: 72057594046644480 2025-05-07T09:01:58.063617Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715659:0 2025-05-07T09:01:58.063868Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 281474976715659, publications: 1, subscribers: 1 2025-05-07T09:01:58.066329Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976715659, subscribers: 1 2025-05-07T09:01:59.910421Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: sc ... ssifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:02:09.119133Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:02:09.119141Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:02:09.119286Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10720 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:02:09.426902Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... TClient is connected to server localhost:10720 2025-05-07T09:02:09.880255Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_subdomain.cpp:92: TCreateSubDomain Propose, path: /Root/ydb_ut_tenant, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T09:02:09.880654Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976715658:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-05-07T09:02:09.880685Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T09:02:09.883316Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715658, database: /Root, subject: , status: StatusAccepted, operation: CREATE DATABASE, path: /Root/ydb_ut_tenant waiting... 2025-05-07T09:02:09.893613Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 1746608529939, transactions count in step: 1, at schemeshard: 72057594046644480 2025-05-07T09:02:09.895766Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715658:0 2025-05-07T09:02:09.895824Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 281474976715658, publications: 2, subscribers: 1 2025-05-07T09:02:09.896755Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976715658, subscribers: 1 2025-05-07T09:02:09.901359Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: /Root/ydb_ut_tenant, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-05-07T09:02:09.901994Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976715659:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-05-07T09:02:09.902031Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-05-07T09:02:09.903752Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715659, database: /Root, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: /Root/ydb_ut_tenant waiting... 2025-05-07T09:02:10.406259Z node 12 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[12:7501626516369208147:2071];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:02:10.406350Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T09:02:10.409223Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:02:10.409351Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:02:10.412890Z node 10 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 12 Cookie 12 2025-05-07T09:02:10.420261Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:02:10.809396Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 1746608530856, transactions count in step: 1, at schemeshard: 72057594046644480 2025-05-07T09:02:10.815741Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715659:0 2025-05-07T09:02:10.815959Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 281474976715659, publications: 1, subscribers: 1 2025-05-07T09:02:10.817441Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976715659, subscribers: 1 2025-05-07T09:02:13.103449Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /Root/ydb_ut_tenant/Table-1, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-05-07T09:02:13.105098Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976715660:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-05-07T09:02:13.105129Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-05-07T09:02:13.108153Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715660, database: /Root/ydb_ut_tenant, subject: , status: StatusAccepted, operation: CREATE TABLE, path: /Root/ydb_ut_tenant/Table-1 2025-05-07T09:02:13.200347Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 1746608533240, transactions count in step: 1, at schemeshard: 72057594046644480 2025-05-07T09:02:13.210679Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715660:0 2025-05-07T09:02:13.239215Z node 10 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 12 2025-05-07T09:02:13.239801Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-05-07T09:02:16.604008Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7501626544391820217:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:02:16.604106Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00283e/r3tmp/tmpSmkFV8/pdisk_1.dat 2025-05-07T09:02:17.012443Z node 13 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:02:17.068335Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:02:17.068454Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:02:17.082902Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15809, node 13 2025-05-07T09:02:17.420761Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:02:17.420795Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:02:17.420805Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:02:17.421015Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17171 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:02:18.110492Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:02:21.603201Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[13:7501626544391820217:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:02:21.603296Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:02:22.528487Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 |92.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_external_table_reboots/ydb-core-tx-schemeshard-ut_external_table_reboots |92.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_external_table_reboots/ydb-core-tx-schemeshard-ut_external_table_reboots |92.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_external_table_reboots/ydb-core-tx-schemeshard-ut_external_table_reboots |92.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_reboots/ydb-core-tx-schemeshard-ut_reboots |92.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_reboots/ydb-core-tx-schemeshard-ut_reboots |92.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_reboots/ydb-core-tx-schemeshard-ut_reboots |92.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_bsvolume_reboots/ydb-core-tx-schemeshard-ut_bsvolume_reboots |92.1%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_bsvolume_reboots/ydb-core-tx-schemeshard-ut_bsvolume_reboots |92.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_bsvolume_reboots/ydb-core-tx-schemeshard-ut_bsvolume_reboots |92.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain_reboots/ydb-core-tx-schemeshard-ut_extsubdomain_reboots |92.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain_reboots/ydb-core-tx-schemeshard-ut_extsubdomain_reboots |92.1%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain_reboots/ydb-core-tx-schemeshard-ut_extsubdomain_reboots |92.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_split_merge/ydb-core-tx-schemeshard-ut_split_merge |92.1%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_split_merge/ydb-core-tx-schemeshard-ut_split_merge |92.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_split_merge/ydb-core-tx-schemeshard-ut_split_merge |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest >> TPersQueueMirrorer::TestBasicRemote [GOOD] |92.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_base_reboots/ydb-core-tx-schemeshard-ut_base_reboots |92.1%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_base_reboots/ydb-core-tx-schemeshard-ut_base_reboots |92.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_base_reboots/ydb-core-tx-schemeshard-ut_base_reboots ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/ut_with_sdk/unittest >> TPersQueueMirrorer::TestBasicRemote [GOOD] Test command err: 2025-05-07T08:58:55.079248Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625679541328432:2198];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:58:55.079934Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004007/r3tmp/tmp2y00fI/pdisk_1.dat 2025-05-07T08:58:55.395829Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-05-07T08:58:55.772605Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:58:55.772723Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:58:55.779541Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:58:55.855591Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18848, node 1 2025-05-07T08:58:56.014309Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/zvgn/004007/r3tmp/yandexv3eCaP.tmp 2025-05-07T08:58:56.014342Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/zvgn/004007/r3tmp/yandexv3eCaP.tmp 2025-05-07T08:58:56.014514Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/zvgn/004007/r3tmp/yandexv3eCaP.tmp 2025-05-07T08:58:56.014641Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:58:56.071457Z INFO: TTestServer started on Port 26886 GrpcPort 18848 TClient is connected to server localhost:26886 PQClient connected to localhost:18848 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:58:56.509107Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:58:56.539725Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:58:56.555630Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-05-07T08:58:56.567621Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:58:56.743879Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:58:56.766539Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710661, at schemeshard: 72057594046644480 2025-05-07T08:58:59.575694Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625696721198300:2343], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:58:59.575949Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625696721198296:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:58:59.576178Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:58:59.581195Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480 2025-05-07T08:58:59.623096Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501625696721198310:2344], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-05-07T08:58:59.875448Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501625696721198374:2444] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:58:59.954465Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:59:00.070434Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501625679541328432:2198];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:00.070493Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:59:00.076002Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:59:00.144749Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7501625696721198382:2350], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T08:59:00.145758Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=1&id=Zjg0MmViYS01Yjk5M2JhMS1kYjc5OTkyLThlNDMwNjE=, ActorId: [1:7501625696721198278:2338], ActorState: ExecuteState, TraceId: 01jtmzgej74s8rx687prvg048g, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T08:59:00.147888Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T08:59:00.241791Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7501625701016165981:2626] === CheckClustersList. Ok 2025-05-07T08:59:05.648428Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-05-07T08:59:05.664076Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-05-07T08:59:05.665559Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:7501625722491002619:2685], Recipient [1:7501625679541328762:2209]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:59:05.665599Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:59:05.665619Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T08:59:05.665690Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122432, Sender [1:7501625722491002615:2682], Recipient [1:7501625679541328762:2209]: {TEvModifySchemeTransaction txid# 281474976710672 TabletId# 72057594046644480} 2025-05-07T08:59:05.665711Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4851: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-05-07T08:59:05.755343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreatePersQueueGroup CreatePersQueueGroup { Name: "test-topic" TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } PartitionStrategy { MinPartitionCount: 1 MaxPartitionCount: 100 ScaleThresholdSeconds: 300 ScaleUpPartitionWriteSpeedThresholdPercent: 90 ScaleDownPartitionWriteSpeedThresholdPercent: 30 PartitionStrategyType: CAN_SPLIT } Consumers { Na ... g was scheduled 2025-05-07T09:02:34.212103Z node 7 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1255: [72075186224037893][rt3.dc1--topic2] consumer some_user balancing. Sessions=1, Families=2, UnradableFamilies=1 [2 (1), ], RequireBalancing=0 [] 2025-05-07T09:02:34.212130Z node 7 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1292: [72075186224037893][rt3.dc1--topic2] consumer some_user balancing of the family=2 (Status=Free, Partitions=[1]) failed because there are no suitable reading sessions. 2025-05-07T09:02:34.212150Z node 7 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1399: [72075186224037893][rt3.dc1--topic2] consumer some_user balancing duration: 0.000025s 2025-05-07T09:02:34.212202Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2433: [PQ: 72075186224037892] Destroy direct read session shared/some_user_7_1_15247479292643248149_v1 2025-05-07T09:02:34.212262Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037892] server disconnected, pipe [7:7501626612117486101:2539] destroyed 2025-05-07T09:02:34.212319Z node 8 :PQ_READ_PROXY DEBUG: caching_service.cpp:138: Direct read cache: server session deregistered: shared/some_user_7_1_15247479292643248149_v1 2025-05-07T09:02:34.212484Z node 7 :PQ_MIRRORER ERROR: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 1][reader 1] [] [c09ff02f-85f6e525-5958f28a-cb58f009] [] Got error. Status: CLIENT_CANCELLED. Description:
: Error: GRpc error: (1): Cancelled on the server side 2025-05-07T09:02:34.212686Z node 7 :PQ_MIRRORER DEBUG: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 1][reader 1] [] [c09ff02f-85f6e525-5958f28a-cb58f009] [] In Reconnect, ReadSizeBudget = 0, ReadSizeServerDelta = 8388608 2025-05-07T09:02:34.212725Z node 7 :PQ_MIRRORER DEBUG: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 1][reader 1] [] [c09ff02f-85f6e525-5958f28a-cb58f009] [] New values: ReadSizeBudget = 8388608, ReadSizeServerDelta = 0 2025-05-07T09:02:34.212799Z node 7 :PQ_MIRRORER INFO: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 1][reader 1] [] [c09ff02f-85f6e525-5958f28a-cb58f009] [] Closing session to cluster: SessionClosed { Status: CLIENT_CANCELLED Issues: "
: Error: GRpc error: (1): Cancelled on the server side " } 2025-05-07T09:02:34.212967Z node 7 :PQ_MIRRORER NOTICE: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 1][reader 1] [] [c09ff02f-85f6e525-5958f28a-cb58f009] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-05-07T09:02:34.213026Z node 7 :PQ_MIRRORER DEBUG: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 1][reader 1] [] [c09ff02f-85f6e525-5958f28a-cb58f009] [] Abort session to cluster 2025-05-07T09:02:34.213064Z node 8 :PQ_MIRRORER DEBUG: mirrorer.cpp:601: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 1] got next reader event: 1 2025-05-07T09:02:34.213182Z node 7 :PQ_MIRRORER INFO: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 1][reader 1] [] [c09ff02f-85f6e525-5958f28a-cb58f009] Closing read session. Close timeout: 0.000000s 2025-05-07T09:02:34.213226Z node 7 :PQ_MIRRORER INFO: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 1][reader 1] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:/topic2:1:1:11:12 2025-05-07T09:02:34.213225Z node 8 :PQ_MIRRORER ERROR: partition.cpp:929: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 1]: read session closed: SessionClosed { Status: CLIENT_CANCELLED Issues: "
: Error: GRpc error: (1): Cancelled on the server side " } 2025-05-07T09:02:34.213276Z node 7 :PQ_MIRRORER INFO: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 1][reader 1] [] [c09ff02f-85f6e525-5958f28a-cb58f009] Counters: { Errors: 1 CurrentSessionLifetimeMs: 1688 BytesRead: 255 MessagesRead: 12 BytesReadCompressed: 255 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-05-07T09:02:34.213310Z node 7 :PQ_MIRRORER INFO: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 1][reader 1] [] [c09ff02f-85f6e525-5958f28a-cb58f009] Closing read session. Close timeout: 0.000000s 2025-05-07T09:02:34.213338Z node 7 :PQ_MIRRORER INFO: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 1][reader 1] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:/topic2:1:1:11:12 2025-05-07T09:02:34.213375Z node 7 :PQ_MIRRORER INFO: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 1][reader 1] [] [c09ff02f-85f6e525-5958f28a-cb58f009] Counters: { Errors: 1 CurrentSessionLifetimeMs: 1688 BytesRead: 255 MessagesRead: 12 BytesReadCompressed: 255 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-05-07T09:02:34.213446Z node 7 :PQ_MIRRORER NOTICE: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 1][reader 1] [] [c09ff02f-85f6e525-5958f28a-cb58f009] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-05-07T09:02:34.214119Z node 8 :PQ_MIRRORER NOTICE: mirrorer.cpp:546: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 1] schedule consumer creation 2025-05-07T09:02:34.214333Z node 7 :PQ_MIRRORER ERROR: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0][reader 1] [] [5494252-4b273380-2638fb6b-45398f24] [] Got error. Status: CLIENT_CANCELLED. Description:
: Error: GRpc error: (1): Cancelled on the server side 2025-05-07T09:02:34.214514Z node 7 :PQ_MIRRORER DEBUG: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0][reader 1] [] [5494252-4b273380-2638fb6b-45398f24] [] In Reconnect, ReadSizeBudget = 0, ReadSizeServerDelta = 8388608 2025-05-07T09:02:34.214542Z node 7 :PQ_MIRRORER DEBUG: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0][reader 1] [] [5494252-4b273380-2638fb6b-45398f24] [] New values: ReadSizeBudget = 8388608, ReadSizeServerDelta = 0 2025-05-07T09:02:34.214583Z node 7 :PQ_MIRRORER INFO: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0][reader 1] [] [5494252-4b273380-2638fb6b-45398f24] [] Closing session to cluster: SessionClosed { Status: CLIENT_CANCELLED Issues: "
: Error: GRpc error: (1): Cancelled on the server side " } 2025-05-07T09:02:34.214666Z node 7 :PQ_MIRRORER NOTICE: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0][reader 1] [] [5494252-4b273380-2638fb6b-45398f24] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-05-07T09:02:34.214689Z node 7 :PQ_MIRRORER DEBUG: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0][reader 1] [] [5494252-4b273380-2638fb6b-45398f24] [] Abort session to cluster 2025-05-07T09:02:34.214708Z node 8 :PQ_MIRRORER DEBUG: mirrorer.cpp:601: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0] got next reader event: 1 2025-05-07T09:02:34.214760Z node 7 :PQ_MIRRORER INFO: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0][reader 1] [] [5494252-4b273380-2638fb6b-45398f24] Closing read session. Close timeout: 0.000000s 2025-05-07T09:02:34.214788Z node 7 :PQ_MIRRORER INFO: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0][reader 1] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:/topic2:0:1:15:16 2025-05-07T09:02:34.214807Z node 7 :PQ_MIRRORER INFO: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0][reader 1] [] [5494252-4b273380-2638fb6b-45398f24] Counters: { Errors: 1 CurrentSessionLifetimeMs: 1685 BytesRead: 530 MessagesRead: 16 BytesReadCompressed: 530 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-05-07T09:02:34.214789Z node 8 :PQ_MIRRORER ERROR: partition.cpp:929: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0]: read session closed: SessionClosed { Status: CLIENT_CANCELLED Issues: "
: Error: GRpc error: (1): Cancelled on the server side " } 2025-05-07T09:02:34.214820Z node 7 :PQ_MIRRORER INFO: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0][reader 1] [] [5494252-4b273380-2638fb6b-45398f24] Closing read session. Close timeout: 0.000000s 2025-05-07T09:02:34.214842Z node 7 :PQ_MIRRORER INFO: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0][reader 1] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:/topic2:0:1:15:16 2025-05-07T09:02:34.214858Z node 7 :PQ_MIRRORER INFO: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0][reader 1] [] [5494252-4b273380-2638fb6b-45398f24] Counters: { Errors: 1 CurrentSessionLifetimeMs: 1685 BytesRead: 530 MessagesRead: 16 BytesReadCompressed: 530 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-05-07T09:02:34.214882Z node 7 :PQ_MIRRORER NOTICE: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0][reader 1] [] [5494252-4b273380-2638fb6b-45398f24] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-05-07T09:02:34.216373Z node 8 :PQ_MIRRORER NOTICE: mirrorer.cpp:546: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0] schedule consumer creation 2025-05-07T09:02:34.224882Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 2 consumer shared/some_user session shared/some_user_7_2_15241466959848133550_v1 grpc read done: success# 0, data# { } 2025-05-07T09:02:34.224911Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 2 consumer shared/some_user session shared/some_user_7_2_15241466959848133550_v1 grpc read failed 2025-05-07T09:02:34.224933Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:1645: session cookie 2 consumer shared/some_user session shared/some_user_7_2_15241466959848133550_v1 closed 2025-05-07T09:02:34.225032Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 2 consumer shared/some_user session shared/some_user_7_2_15241466959848133550_v1 is DEAD 2025-05-07T09:02:34.225374Z node 7 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--topic2] pipe [7:7501626612117486097:2536] disconnected; active server actors: 1 2025-05-07T09:02:34.225393Z node 7 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--topic2] pipe [7:7501626612117486097:2536] client some_user disconnected session shared/some_user_7_2_15241466959848133550_v1 2025-05-07T09:02:34.225626Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2433: [PQ: 72075186224037892] Destroy direct read session shared/some_user_7_2_15241466959848133550_v1 2025-05-07T09:02:34.225689Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037892] server disconnected, pipe [7:7501626612117486102:2540] destroyed 2025-05-07T09:02:34.225723Z node 8 :PQ_READ_PROXY DEBUG: caching_service.cpp:138: Direct read cache: server session deregistered: shared/some_user_7_2_15241466959848133550_v1 >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_0__ASYNC-pk_types6-all_types6-index6---ASYNC] [GOOD] >> CommitOffset::Commit_WithSession_ToPastParentPartition [GOOD] >> YdbOlapStore::LogGrepExisting [GOOD] >> YdbOlapStore::LogExistingRequest >> TNebiusAccessServiceTest::Authenticate |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/library/ncloud/impl/ut/unittest |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/library/ncloud/impl/ut/unittest |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/library/ncloud/impl/ut/unittest |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/library/ncloud/impl/ut/unittest |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/library/ncloud/impl/ut/unittest >> TNebiusAccessServiceTest::PassRequestId [GOOD] >> TNebiusAccessServiceTest::Authenticate [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/library/ncloud/impl/ut/unittest >> TNebiusAccessServiceTest::PassRequestId [GOOD] Test command err: 2025-05-07T09:02:40.438811Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000003908]{reqId} Connect to grpc://localhost:1697 2025-05-07T09:02:40.441932Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000003908]{reqId} Request AuthenticateRequest { iam_token: "**** (717F937C)" } 2025-05-07T09:02:40.476512Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000003908]{reqId} Response AuthenticateResponse { account { user_account { id: "1234" } } } ------- [TM] {asan, default-linux-x86_64, release} ydb/library/ncloud/impl/ut/unittest >> TNebiusAccessServiceTest::Authenticate [GOOD] Test command err: 2025-05-07T09:02:40.364980Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000002b08] Connect to grpc://localhost:14398 2025-05-07T09:02:40.402848Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000002b08] Request AuthenticateRequest { iam_token: "**** (3C4833B6)" } 2025-05-07T09:02:40.469650Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000002b08] Status 7 Permission Denied 2025-05-07T09:02:40.470046Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000002b08] Request AuthenticateRequest { iam_token: "**** (86DDB286)" } 2025-05-07T09:02:40.476573Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000002b08] Response AuthenticateResponse { account { user_account { id: "1234" } } } >> TNebiusAccessServiceTest::Authorize [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/library/ncloud/impl/ut/unittest >> TNebiusAccessServiceTest::Authorize [GOOD] Test command err: 2025-05-07T09:02:41.098374Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000004388] Connect to grpc://localhost:21360 2025-05-07T09:02:41.106700Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000004388] Request AuthorizeRequest { checks { key: 0 value { permission { name: "perm" } resource_path { path { id: "path_id" } } iam_token: "**** (717F937C)" } } } 2025-05-07T09:02:41.121792Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000004388] Response AuthorizeResponse { results { key: 0 value { account { user_account { id: "user_id" } } } } } 2025-05-07T09:02:41.122426Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000004388] Request AuthorizeRequest { checks { key: 0 value { permission { name: "perm" } resource_path { path { id: "path_id" } } iam_token: "**** (79225CA9)" } } } 2025-05-07T09:02:41.124263Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000004388] Status 7 Permission Denied 2025-05-07T09:02:41.124636Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000004388] Request AuthorizeRequest { checks { key: 0 value { permission { name: "denied" } resource_path { path { id: "path_id" } } iam_token: "**** (717F937C)" } } } 2025-05-07T09:02:41.125900Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000004388] Status 7 Permission Denied 2025-05-07T09:02:41.126297Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000004388] Request AuthorizeRequest { checks { key: 0 value { permission { name: "perm" } resource_path { path { id: "p" } } iam_token: "**** (717F937C)" } } } 2025-05-07T09:02:41.127781Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000004388] Status 7 Permission Denied >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_all_types-pk_types7-all_types7-index7---] [GOOD] |92.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/external_sources/hive_metastore/ut/ydb-core-external_sources-hive_metastore-ut |92.1%| [LD] {RESULT} $(B)/ydb/core/external_sources/hive_metastore/ut/ydb-core-external_sources-hive_metastore-ut |92.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/external_sources/hive_metastore/ut/ydb-core-external_sources-hive_metastore-ut |92.1%| [TA] $(B)/ydb/library/ncloud/impl/ut/test-results/unittest/{meta.json ... results_accumulator.log} |92.1%| [TA] {RESULT} $(B)/ydb/library/ncloud/impl/ut/test-results/unittest/{meta.json ... results_accumulator.log} |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_1__ASYNC-pk_types5-all_types5-index5---ASYNC] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/ut_with_sdk/unittest >> CommitOffset::Commit_WithSession_ToPastParentPartition [GOOD] Test command err: 2025-05-07T08:58:52.539170Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625668908839873:2063];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:58:52.539209Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:58:53.055760Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004022/r3tmp/tmprSXJCP/pdisk_1.dat 2025-05-07T08:58:53.684172Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:53.756783Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:58:53.756903Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:58:53.767501Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T08:58:53.775417Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6481, node 1 2025-05-07T08:58:54.006573Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/zvgn/004022/r3tmp/yandexceVMMS.tmp 2025-05-07T08:58:54.006597Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/zvgn/004022/r3tmp/yandexceVMMS.tmp 2025-05-07T08:58:54.006765Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/zvgn/004022/r3tmp/yandexceVMMS.tmp 2025-05-07T08:58:54.006885Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:58:54.164790Z INFO: TTestServer started on Port 24984 GrpcPort 6481 TClient is connected to server localhost:24984 PQClient connected to localhost:6481 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:58:54.661030Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:58:54.714300Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:58:54.892488Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:58:54.904517Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710661, at schemeshard: 72057594046644480 2025-05-07T08:58:57.542098Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501625668908839873:2063];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:58:57.542175Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:58:58.353797Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625694678644472:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:58:58.353996Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:58:58.360234Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625694678644484:2344], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:58:58.371805Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625694678644515:2347], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:58:58.371889Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:58:58.374367Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480 2025-05-07T08:58:58.403687Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501625694678644486:2345], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-05-07T08:58:58.467498Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501625694678644542:2447] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:58:58.820655Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:58:58.833306Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7501625694678644551:2351], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T08:58:58.834190Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=1&id=YjAyZDA3OGMtN2QxMzY2NTMtNzY2NGFhNDYtYzNmYTRmYjU=, ActorId: [1:7501625694678644469:2339], ActorState: ExecuteState, TraceId: 01jtmzgdbx5kpwrjz94gs89gtc, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T08:58:58.836196Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T08:58:58.919415Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:58:59.033873Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7501625698973612147:2627] === CheckClustersList. Ok 2025-05-07T08:59:05.599439Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-05-07T08:59:05.616551Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-05-07T08:59:05.617962Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:7501625724743416106:2695], Recipient [1:7501625673203807566:2179]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:59:05.618050Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:59:05.618089Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T08:59:05.618142Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122432, Sender [1:7501625724743416102:2692], Recipient [1:7501625673203807566:2179]: {TEvModifySchemeTransaction txid# 281474976710673 TabletId# 72057594046644480} 2025-05-07T08:59:05.618159Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4851: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-05-07T08:59:05.777883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreatePersQueueGroup CreatePersQueueGroup { Name: "test-topic" TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" ... PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037899, Partition: 3, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-05-07T09:02:38.195996Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7501626580931907892:2463], Partition 0, Sender [0:0:0], Recipient [7:7501626580931907952:2467], Cookie: 0 2025-05-07T09:02:38.195996Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7501626610996680077:2775], Partition 1, Sender [0:0:0], Recipient [7:7501626610996680151:2782], Cookie: 0 2025-05-07T09:02:38.196047Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7501626610996680151:2782]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:02:38.196069Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7501626580931907952:2467]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:02:38.196074Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:02:38.196087Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:02:38.196111Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-05-07T09:02:38.196116Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-05-07T09:02:38.196187Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-05-07T09:02:38.196198Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-05-07T09:02:38.196212Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-05-07T09:02:38.196215Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-05-07T09:02:38.196245Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-05-07T09:02:38.196246Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-05-07T09:02:38.196291Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7501626610996680073:2774], Partition 2, Sender [0:0:0], Recipient [7:7501626610996680150:2781], Cookie: 0 2025-05-07T09:02:38.196320Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7501626610996680150:2781]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:02:38.196330Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:02:38.196355Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-05-07T09:02:38.196379Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-05-07T09:02:38.196390Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-05-07T09:02:38.196401Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-05-07T09:02:38.289709Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7501626623881582389:2894], Partition 4, Sender [0:0:0], Recipient [7:7501626623881582474:2904], Cookie: 0 2025-05-07T09:02:38.289709Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7501626623881582391:2895], Partition 3, Sender [0:0:0], Recipient [7:7501626623881582473:2903], Cookie: 0 2025-05-07T09:02:38.289765Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7501626623881582473:2903]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:02:38.289769Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7501626623881582474:2904]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:02:38.289793Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:02:38.289793Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:02:38.289831Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037898, Partition: 4, State: StateIdle] Have 0 items to delete old stuff 2025-05-07T09:02:38.289832Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037899, Partition: 3, State: StateIdle] Have 0 items to delete old stuff 2025-05-07T09:02:38.289900Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037899, Partition: 3, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-05-07T09:02:38.289900Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037898, Partition: 4, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-05-07T09:02:38.289923Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037899, Partition: 3, State: StateIdle] TPartition::ProcessReserveRequests. 2025-05-07T09:02:38.289923Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037898, Partition: 4, State: StateIdle] TPartition::ProcessReserveRequests. 2025-05-07T09:02:38.289953Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037899, Partition: 3, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-05-07T09:02:38.289953Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037898, Partition: 4, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-05-07T09:02:38.296358Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7501626610996680077:2775], Partition 1, Sender [0:0:0], Recipient [7:7501626610996680151:2782], Cookie: 0 2025-05-07T09:02:38.296366Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7501626580931907892:2463], Partition 0, Sender [0:0:0], Recipient [7:7501626580931907952:2467], Cookie: 0 2025-05-07T09:02:38.296429Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7501626580931907952:2467]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:02:38.296436Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7501626610996680151:2782]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:02:38.296455Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:02:38.296458Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:02:38.296493Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-05-07T09:02:38.296493Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-05-07T09:02:38.296558Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-05-07T09:02:38.296564Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-05-07T09:02:38.296580Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-05-07T09:02:38.296584Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-05-07T09:02:38.296610Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-05-07T09:02:38.296611Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-05-07T09:02:38.296649Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7501626610996680073:2774], Partition 2, Sender [0:0:0], Recipient [7:7501626610996680150:2781], Cookie: 0 2025-05-07T09:02:38.296670Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7501626610996680150:2781]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:02:38.296680Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:02:38.296696Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-05-07T09:02:38.296715Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-05-07T09:02:38.296724Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-05-07T09:02:38.296734Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-05-07T09:02:38.344993Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188544 (NKikimr::NPQ::NReadQuoterEvents::TEvQuotaCountersUpdated), Tablet [7:7501626623881582389:2894], Partition 4, Sender [7:7501626623881582488:2907], Recipient [7:7501626623881582474:2904], Cookie: 0 2025-05-07T09:02:38.345059Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188544, Sender [7:7501626623881582488:2907], Recipient [7:7501626623881582474:2904]: NKikimr::NPQ::NReadQuoterEvents::TEvQuotaCountersUpdated 2025-05-07T09:02:38.345084Z node 7 :PERSQUEUE TRACE: partition.h:609: StateIdle, processing event NReadQuoterEvents::TEvQuotaCountersUpdated 2025-05-07T09:02:38.346460Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188544 (NKikimr::NPQ::NReadQuoterEvents::TEvQuotaCountersUpdated), Tablet [7:7501626623881582391:2895], Partition 3, Sender [7:7501626623881582492:2909], Recipient [7:7501626623881582473:2903], Cookie: 0 2025-05-07T09:02:38.346512Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188544, Sender [7:7501626623881582492:2909], Recipient [7:7501626623881582473:2903]: NKikimr::NPQ::NReadQuoterEvents::TEvQuotaCountersUpdated 2025-05-07T09:02:38.346524Z node 7 :PERSQUEUE TRACE: partition.h:609: StateIdle, processing event NReadQuoterEvents::TEvQuotaCountersUpdated >> TopicAutoscaling::PartitionSplit_AutosplitByLoad_AfterAlter [GOOD] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Date-pk_types13-all_types13-index13-Date--] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/ut_with_sdk/unittest >> TopicAutoscaling::PartitionSplit_AutosplitByLoad_AfterAlter [GOOD] Test command err: 2025-05-07T08:58:57.277619Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625690413114350:2061];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:58:57.277835Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:58:57.583153Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003ff7/r3tmp/tmpW08Wjq/pdisk_1.dat 2025-05-07T08:58:57.990257Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:58:57.995341Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:58:57.995452Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:58:58.002285Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22925, node 1 2025-05-07T08:58:58.108055Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/zvgn/003ff7/r3tmp/yandexYvlJUH.tmp 2025-05-07T08:58:58.108113Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/zvgn/003ff7/r3tmp/yandexYvlJUH.tmp 2025-05-07T08:58:58.108296Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/zvgn/003ff7/r3tmp/yandexYvlJUH.tmp 2025-05-07T08:58:58.108437Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T08:58:58.164317Z INFO: TTestServer started on Port 24177 GrpcPort 22925 TClient is connected to server localhost:24177 PQClient connected to localhost:22925 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:58:58.565106Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:58:58.596012Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-05-07T08:58:58.601908Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T08:58:58.758995Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:01.430246Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625707592984349:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:01.430351Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501625707592984361:2343], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:01.430451Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T08:59:01.434891Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480 2025-05-07T08:59:01.460843Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501625707592984363:2344], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-05-07T08:59:01.686632Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501625707592984427:2445] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T08:59:01.716156Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T08:59:01.779246Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T08:59:01.866980Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7501625707592984435:2350], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T08:59:01.868495Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=1&id=N2M0MmNhM2MtMzUwYTMzNmYtMzM5MmIyOWYtZmI2ZGNmYWM=, ActorId: [1:7501625707592984346:2338], ActorState: ExecuteState, TraceId: 01jtmzggd56wx32y6fb27z9z81, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T08:59:01.871638Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T08:59:01.909756Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7501625711887952025:2622] 2025-05-07T08:59:02.274446Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501625690413114350:2061];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:02.274551Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-05-07T08:59:07.281149Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-05-07T08:59:07.293249Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-05-07T08:59:07.294336Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:7501625733362788663:2681], Recipient [1:7501625690413114750:2180]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T08:59:07.294376Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T08:59:07.294390Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T08:59:07.294421Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122432, Sender [1:7501625733362788659:2678], Recipient [1:7501625690413114750:2180]: {TEvModifySchemeTransaction txid# 281474976710672 TabletId# 72057594046644480} 2025-05-07T08:59:07.294433Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4851: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-05-07T08:59:07.342078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreatePersQueueGroup CreatePersQueueGroup { Name: "test-topic" TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } PartitionStrategy { MinPartitionCount: 1 MaxPartitionCount: 100 ScaleThresholdSeconds: 300 ScaleUpPartitionWriteSpeedThresholdPercent: 90 ScaleDownPartitionWriteSpeedThresholdPercent: 30 PartitionStrategyType: CAN_SPLIT } Consumers { Name: "test-consumer" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } ServiceType: "data-streams" Version: 0 } } } } TxId: 281474976710672 TabletId: 72057594046644480 Owner: "root@builtin" UserToken: "***" PeerName: "" , at schemeshard: 72057594046644480 2025-05-07T08:59:07.342570Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_pq.cpp:307: TCreatePQ Pr ... SQUEUE TRACE: pq_impl.cpp:2882: [PQ: 72075186224037892] Handle TEvTabletPipe::TEvServerDisconnected 2025-05-07T09:02:42.334443Z node 7 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: producer-1|18db4b49-f2b65296-86e05b00-846efb7b_0 grpc read done: success: 0 data: 2025-05-07T09:02:42.334455Z node 7 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 1 sessionId: producer-1|18db4b49-f2b65296-86e05b00-846efb7b_0 grpc read failed 2025-05-07T09:02:42.334472Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037892] server disconnected, pipe [7:7501626590367317378:2483] destroyed 2025-05-07T09:02:42.334492Z node 7 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:818: session v1 closed cookie: 1 sessionId: producer-1|18db4b49-f2b65296-86e05b00-846efb7b_0 2025-05-07T09:02:42.334501Z node 7 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: producer-1|18db4b49-f2b65296-86e05b00-846efb7b_0 is DEAD 2025-05-07T09:02:42.334536Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188506 (NKikimr::TEvPQ::TEvPipeDisconnected), Tablet [7:7501626556007577931:2456], Partition 0, Sender [7:7501626556007577931:2456], Recipient [7:7501626556007577992:2460], Cookie: 0 2025-05-07T09:02:42.334577Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188506, Sender [7:7501626556007577931:2456], Recipient [7:7501626556007577992:2460]: NKikimr::TEvPQ::TEvPipeDisconnected 2025-05-07T09:02:42.334599Z node 7 :PERSQUEUE TRACE: partition.h:591: StateIdle, processing event TEvPQ::TEvPipeDisconnected 2025-05-07T09:02:42.334628Z node 7 :PERSQUEUE DEBUG: partition_write.cpp:138: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-05-07T09:02:42.334657Z node 7 :PERSQUEUE TRACE: partition_write.cpp:854: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessChangeOwnerRequests. 2025-05-07T09:02:42.334688Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-05-07T09:02:42.334746Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-05-07T09:02:42.334772Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-05-07T09:02:42.334787Z node 7 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:278: StateIdle, received event# 65543, Sender [7:7501626556007578080:2478], Recipient [7:7501626556007578100:2478]: NActors::TEvents::TEvPoison 2025-05-07T09:02:42.334796Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-05-07T09:02:42.334837Z node 7 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-05-07T09:02:42.334928Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5200: HandleHook, received event# 269877764, Sender [7:7501626590367317385:3180], Recipient [7:7501626556007577931:2456]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-05-07T09:02:42.334945Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5219: HandleHook, processing event TEvTabletPipe::TEvServerDisconnected 2025-05-07T09:02:42.334954Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:2882: [PQ: 72075186224037892] Handle TEvTabletPipe::TEvServerDisconnected 2025-05-07T09:02:42.334989Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037892] server disconnected, pipe [7:7501626590367317384:2478] destroyed 2025-05-07T09:02:42.335040Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188506 (NKikimr::TEvPQ::TEvPipeDisconnected), Tablet [7:7501626556007577931:2456], Partition 0, Sender [7:7501626556007577931:2456], Recipient [7:7501626556007577992:2460], Cookie: 0 2025-05-07T09:02:42.340199Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188506, Sender [7:7501626556007577931:2456], Recipient [7:7501626556007577992:2460]: NKikimr::TEvPQ::TEvPipeDisconnected 2025-05-07T09:02:42.340257Z node 7 :PERSQUEUE TRACE: partition.h:591: StateIdle, processing event TEvPQ::TEvPipeDisconnected 2025-05-07T09:02:42.340295Z node 7 :PERSQUEUE DEBUG: partition_write.cpp:138: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-05-07T09:02:42.340326Z node 7 :PERSQUEUE TRACE: partition_write.cpp:854: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessChangeOwnerRequests. 2025-05-07T09:02:42.340357Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-05-07T09:02:42.340414Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-05-07T09:02:42.340445Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-05-07T09:02:42.340470Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-05-07T09:02:42.346434Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7501626633316991237:2994], Partition 3, Sender [0:0:0], Recipient [7:7501626633316991330:3004], Cookie: 0 2025-05-07T09:02:42.346430Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7501626633316991236:2993], Partition 4, Sender [0:0:0], Recipient [7:7501626633316991327:3002], Cookie: 0 2025-05-07T09:02:42.346509Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7501626633316991327:3002]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:02:42.346536Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7501626633316991330:3004]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:02:42.346549Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:02:42.346572Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:02:42.346618Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037898, Partition: 4, State: StateIdle] Have 0 items to delete old stuff 2025-05-07T09:02:42.346633Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037899, Partition: 3, State: StateIdle] Have 0 items to delete old stuff 2025-05-07T09:02:42.346704Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037898, Partition: 4, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-05-07T09:02:42.346719Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037899, Partition: 3, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-05-07T09:02:42.346750Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037898, Partition: 4, State: StateIdle] TPartition::ProcessReserveRequests. 2025-05-07T09:02:42.346752Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037899, Partition: 3, State: StateIdle] TPartition::ProcessReserveRequests. 2025-05-07T09:02:42.346788Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037898, Partition: 4, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-05-07T09:02:42.346792Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037899, Partition: 3, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-05-07T09:02:42.351655Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7501626611842154234:2857], Partition 1, Sender [0:0:0], Recipient [7:7501626611842154313:2866], Cookie: 0 2025-05-07T09:02:42.351759Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7501626611842154313:2866]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:02:42.351801Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:02:42.351873Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-05-07T09:02:42.351982Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-05-07T09:02:42.352019Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-05-07T09:02:42.352020Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7501626556007577931:2456], Partition 0, Sender [0:0:0], Recipient [7:7501626556007577992:2460], Cookie: 0 2025-05-07T09:02:42.352054Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-05-07T09:02:42.352082Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7501626556007577992:2460]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:02:42.352110Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:02:42.352153Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-05-07T09:02:42.352221Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-05-07T09:02:42.352246Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-05-07T09:02:42.352274Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-05-07T09:02:42.352499Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7501626611842154235:2858], Partition 2, Sender [0:0:0], Recipient [7:7501626611842154310:2864], Cookie: 0 2025-05-07T09:02:42.352573Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7501626611842154310:2864]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:02:42.352613Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-05-07T09:02:42.352658Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-05-07T09:02:42.352703Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-05-07T09:02:42.352741Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-05-07T09:02:42.352772Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 >> YdbOlapStore::LogPagingAfter [GOOD] >> TResourceBroker::TestErrors >> TTabletLabeledCountersAggregator::HeavyAggregation >> TFlatMetrics::TimeSeriesKV2 [GOOD] >> TPipeCacheTest::TestAutoConnect >> TTabletPipeTest::TestTwoNodes >> TTabletPipeTest::TestConsumerSidePipeReset >> TTabletPipeTest::TestSendWithoutWaitOpenToWrongTablet >> TFlatMetrics::TimeSeriesAvg16x60 [GOOD] >> TFlatMetrics::TimeSeriesAvg16Signed [GOOD] >> TTabletPipeTest::TestKillClientBeforServerIdKnown >> TTabletPipeTest::TestPipeWithVersionInfo >> TFlatMetrics::TimeSeriesAvg4 [GOOD] >> TFlatMetrics::MaximumValue1 [GOOD] >> TFlatMetrics::MaximumValue2 [GOOD] >> TFlatMetrics::TimeSeriesKV [GOOD] >> TTabletLabeledCountersAggregator::SimpleAggregation >> TTabletPipeTest::TestSendAfterReboot >> TTabletPipeTest::TestRebootUsingTabletWithoutAcceptor >> TTabletCountersPercentile::WithoutZero [GOOD] >> TTabletCountersPercentile::StartFromZero [GOOD] >> TTabletPipeTest::TestSendBeforeBootTarget >> TResourceBroker::TestRealUsage |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletCountersPercentile::StartFromZero [GOOD] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TFlatMetrics::TimeSeriesAvg16Signed [GOOD] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TFlatMetrics::MaximumValue2 [GOOD] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TFlatMetrics::TimeSeriesKV [GOOD] >> TTabletLabeledCountersAggregator::SimpleAggregation [GOOD] >> TTabletLabeledCountersAggregator::Version3Aggregation >> TTabletLabeledCountersAggregator::Version3Aggregation [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletLabeledCountersAggregator::Version3Aggregation [GOOD] Test command err: { LabeledCountersByGroup { Group: "aba/caba/daba|man" LabeledCounter { Value: 13 AggregateFunc: EAF_SUM Type: CT_SIMPLE NameId: 0 } Delimiter: "|" } LabeledCountersByGroup { Group: "cons/aaa|1|aba/caba/daba|man" LabeledCounter { Value: 13 AggregateFunc: EAF_SUM Type: CT_SIMPLE NameId: 0 } Delimiter: "|" } CounterNames: "value1" } >> TTabletPipeTest::TestSendAfterOpenUsingTabletWithoutAcceptor >> TTabletPipeTest::TestOpen >> TTabletPipeTest::TestShutdown >> TFlatMetrics::MaximumValue3 [GOOD] >> TFlatMetrics::MaximumValue4 [GOOD] >> TResourceBroker::TestErrors [GOOD] >> TResourceBroker::TestExecutionStat >> TResourceBroker::TestRealUsage [GOOD] >> TResourceBroker::TestRandomQueue >> TTabletPipeTest::TestSendAfterOpenUsingTabletWithoutAcceptor [GOOD] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TFlatMetrics::MaximumValue4 [GOOD] >> TResourceBroker::TestExecutionStat [GOOD] >> TTabletPipeTest::TestShutdown [GOOD] >> TTabletPipeTest::TestConsumerSidePipeReset [GOOD] >> TTabletPipeTest::TestInterconnectSession >> TTabletPipeTest::TestSendWithoutWaitOpenToWrongTablet [GOOD] >> TTabletPipeTest::TestKillClientBeforServerIdKnown [GOOD] >> TTabletPipeTest::TestPipeWithVersionInfo [GOOD] >> TTabletPipeTest::TestOpen [GOOD] >> TResourceBroker::TestRandomQueue [GOOD] >> TTabletPipeTest::TestSendAfterReboot [GOOD] >> TTabletPipeTest::TestRebootUsingTabletWithoutAcceptor [GOOD] >> TTabletPipeTest::TestTwoNodes [GOOD] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestSendAfterOpenUsingTabletWithoutAcceptor [GOOD] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestSendWithoutWaitOpenToWrongTablet [GOOD] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestPipeWithVersionInfo [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TResourceBroker::TestRandomQueue [GOOD] Test command err: 2025-05-07T09:02:49.505607Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-1 (1 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.505660Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-1 (1 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.505934Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-18 (18 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.505984Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-19 (19 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.506024Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-20 (20 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.506090Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-25 (25 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.506132Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-28 (28 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.506176Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-31 (31 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.506214Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-32 (32 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.506301Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-38 (38 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.506327Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-39 (39 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.506419Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-44 (44 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.506446Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-45 (45 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.506468Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-46 (46 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.506619Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-55 (55 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.506662Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-57 (57 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.506715Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-61 (61 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.506825Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-69 (69 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.506905Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-73 (73 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.506934Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-74 (74 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.506977Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-77 (77 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.507001Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-78 (78 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.507032Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-80 (80 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.507058Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-81 (81 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.507099Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-83 (83 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.507136Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-85 (85 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.507176Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-88 (88 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.507245Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-92 (92 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.507292Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-94 (94 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.507316Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-95 (95 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.507391Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-99 (99 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.507420Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-100 (100 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.507506Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-104 (104 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.507547Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-106 (106 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.507610Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-111 (111 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.507635Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-112 (112 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.507659Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-113 (113 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.507743Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-120 (120 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.507767Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-121 (121 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.507827Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-124 (124 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.507852Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-125 (125 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.507899Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-128 (128 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.507951Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-131 (131 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.507994Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-132 (132 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.508057Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-135 (135 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.508093Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-137 (137 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.508126Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-138 (138 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.508186Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-142 (142 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.508255Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-146 (146 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.508282Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-147 (147 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.508316Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-148 (148 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.508361Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-151 (151 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.508383Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-152 (152 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.508427Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-154 (154 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.508481Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-156 (156 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.508547Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-159 (159 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.508601Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-163 (163 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.508731Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-174 (174 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.508869Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-184 (184 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.508917Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-186 (186 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.509108Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-201 (201 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.509161Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-204 (204 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.509225Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-208 (208 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.509259Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-210 (210 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.509282Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-211 (211 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.509305Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-212 (212 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.509360Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-215 (215 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05 ... e 'wrong' to default queue 2025-05-07T09:02:49.539051Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-18 (18 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.539080Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-19 (19 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.539113Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-20 (20 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.539149Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-38 (38 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.539183Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-39 (39 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.539227Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-69 (69 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.539245Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-74 (74 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.539293Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-77 (77 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.539310Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-80 (80 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.539366Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-94 (94 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.539394Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-100 (100 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.539444Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-120 (120 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.539490Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-132 (132 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.539506Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-137 (137 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.539563Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-151 (151 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.539587Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-163 (163 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.539622Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-186 (186 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.539647Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-204 (204 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.539693Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-223 (223 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.539749Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-250 (250 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.539776Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-268 (268 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.539829Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-308 (308 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.539877Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-319 (319 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.539948Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-343 (343 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.539978Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-346 (346 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.540004Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-348 (348 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.540031Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-356 (356 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.540067Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-358 (358 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.540092Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-368 (368 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.540123Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-378 (378 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.540152Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-409 (409 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.540179Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-410 (410 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.540228Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-441 (441 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.540253Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-457 (457 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.540299Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-487 (487 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.540323Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-489 (489 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.540350Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-495 (495 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.540384Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-509 (509 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.540420Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-520 (520 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.540460Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-526 (526 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.540479Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-555 (555 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.540537Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-560 (560 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.540588Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-566 (566 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.540614Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-576 (576 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.540652Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-577 (577 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.540694Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-636 (636 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.540718Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-641 (641 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.540743Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-647 (647 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.540779Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-662 (662 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.540823Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-673 (673 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.540850Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-675 (675 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.540895Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-699 (699 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.540943Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-724 (724 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.540977Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-731 (731 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.541014Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-754 (754 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.541070Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-783 (783 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.541137Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-796 (796 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.541176Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-814 (814 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.541211Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-816 (816 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.541242Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-818 (818 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.541306Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-842 (842 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.541333Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-846 (846 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.541383Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-903 (903 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.541452Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-918 (918 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.541498Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-973 (973 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.541538Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-981 (981 by [2:99:2134])' of unknown type 'wrong' to default queue 2025-05-07T09:02:49.541585Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-990 (990 by [2:99:2134])' of unknown type 'wrong' to default queue |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TResourceBroker::TestExecutionStat [GOOD] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestOpen [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestKillClientBeforServerIdKnown [GOOD] Test command err: 2025-05-07T09:02:49.215369Z node 1 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:315: [9437185] Detach 2025-05-07T09:02:49.521461Z node 1 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:338: [9437185] Activate 2025-05-07T09:02:49.537749Z node 1 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:338: [9437185] Activate 2025-05-07T09:02:49.547929Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[9437185] ::Bootstrap [1:128:2154] 2025-05-07T09:02:49.548002Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[9437185] lookup [1:128:2154] 2025-05-07T09:02:49.548260Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[9437185] forward result local node, try to connect [1:128:2154] 2025-05-07T09:02:49.548315Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[9437185]::SendEvent [1:128:2154] 2025-05-07T09:02:49.548390Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:387: TClient[9437185] poison pill while connecting [1:128:2154] 2025-05-07T09:02:49.548413Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:498: TClient[9437185] connect failed [1:128:2154] 2025-05-07T09:02:49.548485Z node 1 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [9437185] Accept Connect Originator# [1:128:2154] 2025-05-07T09:02:49.548584Z node 1 :PIPE_SERVER INFO: tablet_pipe_server.cpp:236: [9437185] Undelivered Target# [1:128:2154] Type# 269877249 Reason# ActorUnknown 2025-05-07T09:02:49.548731Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[9437185] ::Bootstrap [1:131:2156] 2025-05-07T09:02:49.548765Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[9437185] lookup [1:131:2156] 2025-05-07T09:02:49.548820Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[9437185] forward result local node, try to connect [1:131:2156] 2025-05-07T09:02:49.548841Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[9437185]::SendEvent [1:131:2156] 2025-05-07T09:02:49.548868Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:387: TClient[9437185] poison pill while connecting [1:131:2156] 2025-05-07T09:02:49.548885Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:498: TClient[9437185] connect failed [1:131:2156] 2025-05-07T09:02:49.548914Z node 1 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [9437185] Accept Connect Originator# [1:131:2156] 2025-05-07T09:02:49.549021Z node 1 :PIPE_SERVER INFO: tablet_pipe_server.cpp:236: [9437185] Undelivered Target# [1:131:2156] Type# 269877249 Reason# ActorUnknown 2025-05-07T09:02:49.549119Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[9437185] ::Bootstrap [1:133:2158] 2025-05-07T09:02:49.549136Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[9437185] lookup [1:133:2158] 2025-05-07T09:02:49.549170Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[9437185] forward result local node, try to connect [1:133:2158] 2025-05-07T09:02:49.549195Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[9437185]::SendEvent [1:133:2158] 2025-05-07T09:02:49.549227Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:387: TClient[9437185] poison pill while connecting [1:133:2158] 2025-05-07T09:02:49.549250Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:498: TClient[9437185] connect failed [1:133:2158] 2025-05-07T09:02:49.549303Z node 1 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [9437185] Accept Connect Originator# [1:133:2158] 2025-05-07T09:02:49.549354Z node 1 :PIPE_SERVER INFO: tablet_pipe_server.cpp:236: [9437185] Undelivered Target# [1:133:2158] Type# 269877249 Reason# ActorUnknown |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestShutdown [GOOD] >> TTabletPipeTest::TestInterconnectSession [GOOD] >> TTabletPipeTest::TestRewriteSameNode ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestSendAfterReboot [GOOD] Test command err: Leader for TabletID 9437184 is [0:0:0] sender: [1:108:2057] recipient: [1:104:2137] IGNORE Leader for TabletID 9437184 is [0:0:0] sender: [1:108:2057] recipient: [1:104:2137] Leader for TabletID 9437185 is [0:0:0] sender: [1:109:2057] recipient: [1:106:2138] IGNORE Leader for TabletID 9437185 is [0:0:0] sender: [1:109:2057] recipient: [1:106:2138] Leader for TabletID 9437184 is [1:116:2145] sender: [1:117:2057] recipient: [1:104:2137] Leader for TabletID 9437185 is [1:119:2147] sender: [1:121:2057] recipient: [1:106:2138] Leader for TabletID 9437184 is [1:116:2145] sender: [1:156:2057] recipient: [1:14:2061] Leader for TabletID 9437185 is [1:119:2147] sender: [1:158:2057] recipient: [1:14:2061] Leader for TabletID 9437185 is [1:119:2147] sender: [1:161:2057] recipient: [1:101:2136] Leader for TabletID 9437185 is [1:119:2147] sender: [1:163:2057] recipient: [1:14:2061] Leader for TabletID 9437185 is [1:119:2147] sender: [1:165:2057] recipient: [1:164:2176] Leader for TabletID 9437185 is [1:166:2177] sender: [1:167:2057] recipient: [1:164:2176] Leader for TabletID 9437185 is [1:166:2177] sender: [1:195:2057] recipient: [1:14:2061] Leader for TabletID 9437184 is [1:116:2145] sender: [1:198:2057] recipient: [1:100:2135] Leader for TabletID 9437184 is [1:116:2145] sender: [1:201:2057] recipient: [1:14:2061] Leader for TabletID 9437184 is [1:116:2145] sender: [1:202:2057] recipient: [1:200:2200] Leader for TabletID 9437184 is [1:203:2201] sender: [1:204:2057] recipient: [1:200:2200] Leader for TabletID 9437184 is [1:203:2201] sender: [1:232:2057] recipient: [1:14:2061] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestTwoNodes [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestRebootUsingTabletWithoutAcceptor [GOOD] Test command err: Leader for TabletID 9437184 is [0:0:0] sender: [1:108:2057] recipient: [1:104:2137] IGNORE Leader for TabletID 9437184 is [0:0:0] sender: [1:108:2057] recipient: [1:104:2137] Leader for TabletID 9437185 is [0:0:0] sender: [1:109:2057] recipient: [1:106:2138] IGNORE Leader for TabletID 9437185 is [0:0:0] sender: [1:109:2057] recipient: [1:106:2138] Leader for TabletID 9437184 is [1:116:2145] sender: [1:117:2057] recipient: [1:104:2137] Leader for TabletID 9437185 is [1:119:2147] sender: [1:121:2057] recipient: [1:106:2138] Leader for TabletID 9437184 is [1:116:2145] sender: [1:156:2057] recipient: [1:14:2061] Leader for TabletID 9437185 is [1:119:2147] sender: [1:158:2057] recipient: [1:14:2061] Leader for TabletID 9437185 is [1:119:2147] sender: [1:160:2057] recipient: [1:101:2136] Leader for TabletID 9437185 is [1:119:2147] sender: [1:163:2057] recipient: [1:14:2061] Leader for TabletID 9437185 is [1:119:2147] sender: [1:165:2057] recipient: [1:164:2176] Leader for TabletID 9437185 is [1:166:2177] sender: [1:167:2057] recipient: [1:164:2176] Leader for TabletID 9437185 is [1:166:2177] sender: [1:195:2057] recipient: [1:14:2061] Leader for TabletID 9437184 is [1:116:2145] sender: [1:198:2057] recipient: [1:100:2135] Leader for TabletID 9437184 is [1:116:2145] sender: [1:201:2057] recipient: [1:14:2061] Leader for TabletID 9437184 is [1:116:2145] sender: [1:202:2057] recipient: [1:200:2200] Leader for TabletID 9437184 is [1:203:2201] sender: [1:204:2057] recipient: [1:200:2200] Leader for TabletID 9437184 is [1:203:2201] sender: [1:232:2057] recipient: [1:14:2061] |92.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_olap_reboots/ydb-core-tx-schemeshard-ut_olap_reboots |92.2%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_olap_reboots/ydb-core-tx-schemeshard-ut_olap_reboots |92.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_olap_reboots/ydb-core-tx-schemeshard-ut_olap_reboots |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestInterconnectSession [GOOD] >> TPipeCacheTest::TestAutoConnect [GOOD] >> TTabletPipeTest::TestRewriteSameNode [GOOD] >> TFlatMetrics::TimeSeriesAvg16 [GOOD] >> TFlatMetrics::TimeSeriesAVG [GOOD] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/address_classification/ut/unittest >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDyNumberMicroSeconds >> DistributedEraseTests::ConditionalEraseRowsShouldErase >> DistributedEraseTests::DistributedEraseTxShouldFailOnVariousErrors >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint32 >> DistributedEraseTests::ConditionalEraseRowsShouldEraseOnUint32 >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDyNumberSeconds >> DistributedEraseTests::ConditionalEraseRowsShouldNotErase >> EraseRowsTests::ConditionalEraseRowsShouldNotEraseModifiedRows >> DistributedEraseTests::ConditionalEraseRowsShouldSuccessOnShardedIndex >> EraseRowsTests::EraseRowsShouldSuccess >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64Seconds |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/address_classification/ut/unittest |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/address_classification/ut/unittest >> EraseRowsTests::ConditionalEraseRowsShouldNotErase >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64MilliSeconds >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgInt8Seconds >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgInt4Seconds >> TNetClassifierTest::TestInitFromRemoteSource |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TPipeCacheTest::TestAutoConnect [GOOD] |92.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_serverless_reboots/ydb-core-tx-schemeshard-ut_serverless_reboots |92.2%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_serverless_reboots/ydb-core-tx-schemeshard-ut_serverless_reboots >> TTabletPipeTest::TestSendBeforeBootTarget [GOOD] >> TPipeTrackerTest::TestSimpleAdd [GOOD] >> TResourceBroker::TestAutoTaskId |92.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_serverless_reboots/ydb-core-tx-schemeshard-ut_serverless_reboots |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestRewriteSameNode [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbOlapStore::LogPagingAfter [GOOD] Test command err: 2025-05-07T08:59:22.463412Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625796721554124:2206];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:22.463565Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028bd/r3tmp/tmp0A2Bqu/pdisk_1.dat 2025-05-07T08:59:23.018085Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:59:23.056699Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:59:23.060469Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:59:23.067668Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14215, node 1 2025-05-07T08:59:23.376045Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:59:23.376067Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:59:23.376074Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:59:23.376193Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:32284 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:59:23.793636Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... TClient is connected to server localhost:32284 2025-05-07T08:59:24.127503Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnStore, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:24.362294Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7501625805311489648:2326];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-05-07T08:59:24.362618Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7501625805311489648:2326];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-05-07T08:59:24.362923Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7501625805311489648:2326];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-05-07T08:59:24.363123Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7501625805311489648:2326];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-05-07T08:59:24.363222Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7501625805311489648:2326];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-05-07T08:59:24.363621Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7501625805311489648:2326];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-05-07T08:59:24.363750Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7501625805311489648:2326];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-05-07T08:59:24.363906Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7501625805311489648:2326];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-05-07T08:59:24.364029Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7501625805311489648:2326];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-05-07T08:59:24.364472Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7501625805311489648:2326];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-05-07T08:59:24.364625Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7501625805311489648:2326];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-05-07T08:59:24.364756Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7501625805311489648:2326];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-05-07T08:59:24.437659Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7501625805311489644:2325];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-05-07T08:59:24.437731Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7501625805311489644:2325];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-05-07T08:59:24.437998Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7501625805311489644:2325];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-05-07T08:59:24.438117Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7501625805311489644:2325];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-05-07T08:59:24.438209Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7501625805311489644:2325];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-05-07T08:59:24.439831Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7501625805311489644:2325];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-05-07T08:59:24.440005Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7501625805311489644:2325];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-05-07T08:59:24.440103Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7501625805311489644:2325];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-05-07T08:59:24.440263Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7501625805311489644:2325];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-05-07T08:59:24.440368Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7501625805311489644:2325];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-05-07T08:59:24.440525Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7501625805311489644:2325];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-05-07T08:59:24.440636Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7501625805311489644:2325];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-05-07T08:59:24.530646Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7501625805311489642:2324];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-05-07T08:59:24.530726Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7501625805311489642:2324];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-05-07T08:59:24.530985Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7501625805311489642:2324];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-05-07T08:59:24.531151Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7501625805311489642:2324];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-05-07T08:59:24.531272Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7501625805311489642:2324];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-05-07T08:59:24.531377Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7501625805311489642:2324];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-05-07T08:59:24.531483Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7501625805311489642:2324];tablet_id=7207518622403789 ... gwYmItY2ExMDk4Y2MtZTE0Mjc2NTgtODJjMDZkMWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [28:7501626664438520203:3190], task: 64, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 1398 Tasks { TaskId: 64 CpuTimeUs: 595 FinishTimeMs: 1746608565011 Tables { TablePath: "/Root/OlapStore/log1" } ComputeCpuTimeUs: 35 BuildCpuTimeUs: 560 Sources { IngressName: "CS" Ingress { } } HostName: "ghrun-sykirh5vua" NodeId: 28 CreateTimeMs: 1746608564922 UpdateTimeMs: 1746608565011 } MaxMemoryUsage: 1048576 } 2025-05-07T09:02:45.027356Z node 28 :KQP_EXECUTER INFO: kqp_planner.cpp:688: TxId: 281474976710670. Ctx: { TraceId: 01jtmzq9pje5s621sqhtkqxs2n, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=OTdlMjgwYmItY2ExMDk4Y2MtZTE0Mjc2NTgtODJjMDZkMWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [28:7501626664438520203:3190] 2025-05-07T09:02:45.027383Z node 28 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:644: ActorId: [28:7501626664438520113:3117] TxId: 281474976710670. Ctx: { TraceId: 01jtmzq9pje5s621sqhtkqxs2n, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=OTdlMjgwYmItY2ExMDk4Y2MtZTE0Mjc2NTgtODJjMDZkMWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [28:7501626664438520205:3191], CA [28:7501626664438520206:3192], 2025-05-07T09:02:45.027636Z node 28 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:348: ActorId: [28:7501626664438520113:3117] TxId: 281474976710670. Ctx: { TraceId: 01jtmzq9pje5s621sqhtkqxs2n, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=OTdlMjgwYmItY2ExMDk4Y2MtZTE0Mjc2NTgtODJjMDZkMWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Send TEvStreamData to [28:7501626660143552768:3117], seqNo: 1, nRows: 0 2025-05-07T09:02:45.027755Z node 28 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:433: ActorId: [28:7501626664438520113:3117] TxId: 281474976710670. Ctx: { TraceId: 01jtmzq9pje5s621sqhtkqxs2n, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=OTdlMjgwYmItY2ExMDk4Y2MtZTE0Mjc2NTgtODJjMDZkMWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [28:7501626664438520206:3192], task: 66, state: COMPUTE_STATE_EXECUTING, stats: { CpuTimeUs: 912 Tasks { TaskId: 66 StageId: 2 CpuTimeUs: 187 FinishTimeMs: 1746608565011 ComputeCpuTimeUs: 49 BuildCpuTimeUs: 138 HostName: "ghrun-sykirh5vua" NodeId: 28 CreateTimeMs: 1746608564936 CurrentWaitOutputTimeUs: 50 UpdateTimeMs: 1746608565011 } MaxMemoryUsage: 1048576 } 2025-05-07T09:02:45.027798Z node 28 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:644: ActorId: [28:7501626664438520113:3117] TxId: 281474976710670. Ctx: { TraceId: 01jtmzq9pje5s621sqhtkqxs2n, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=OTdlMjgwYmItY2ExMDk4Y2MtZTE0Mjc2NTgtODJjMDZkMWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [28:7501626664438520205:3191], CA [28:7501626664438520206:3192], 2025-05-07T09:02:45.027867Z node 28 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:433: ActorId: [28:7501626664438520113:3117] TxId: 281474976710670. Ctx: { TraceId: 01jtmzq9pje5s621sqhtkqxs2n, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=OTdlMjgwYmItY2ExMDk4Y2MtZTE0Mjc2NTgtODJjMDZkMWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [28:7501626664438520205:3191], task: 65, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 8621 DurationUs: 18000 Tasks { TaskId: 65 StageId: 1 CpuTimeUs: 534 FinishTimeMs: 1746608565011 ComputeCpuTimeUs: 201 BuildCpuTimeUs: 333 HostName: "ghrun-sykirh5vua" NodeId: 28 StartTimeMs: 1746608564993 CreateTimeMs: 1746608564923 UpdateTimeMs: 1746608565012 } MaxMemoryUsage: 1048576 } 2025-05-07T09:02:45.027900Z node 28 :KQP_EXECUTER INFO: kqp_planner.cpp:688: TxId: 281474976710670. Ctx: { TraceId: 01jtmzq9pje5s621sqhtkqxs2n, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=OTdlMjgwYmItY2ExMDk4Y2MtZTE0Mjc2NTgtODJjMDZkMWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [28:7501626664438520205:3191] 2025-05-07T09:02:45.027926Z node 28 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:644: ActorId: [28:7501626664438520113:3117] TxId: 281474976710670. Ctx: { TraceId: 01jtmzq9pje5s621sqhtkqxs2n, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=OTdlMjgwYmItY2ExMDk4Y2MtZTE0Mjc2NTgtODJjMDZkMWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [28:7501626664438520206:3192], 2025-05-07T09:02:45.027985Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1797: SessionId: ydb://session/3?node_id=28&id=OTdlMjgwYmItY2ExMDk4Y2MtZTE0Mjc2NTgtODJjMDZkMWY=, ActorId: [28:7501626660143552768:3117], ActorState: ExecuteState, TraceId: 01jtmzq9pje5s621sqhtkqxs2n, Forwarded TEvStreamData to [28:7501626660143552766:3116] 2025-05-07T09:02:45.028566Z node 28 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:414: TxId: 281474976710670, send ack to channelId: 66, seqNo: 1, enough: 0, freeSpace: 8388469, to: [28:7501626664438520250:3192] 2025-05-07T09:02:45.028634Z node 28 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [28:7501626664438520206:3192], TxId: 281474976710670, task: 66. Ctx: { TraceId : 01jtmzq9pje5s621sqhtkqxs2n. SessionId : ydb://session/3?node_id=28&id=OTdlMjgwYmItY2ExMDk4Y2MtZTE0Mjc2NTgtODJjMDZkMWY=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646922 2025-05-07T09:02:45.028740Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 66. Tasks execution finished, don't wait for ack delivery in input channelId: 65, seqNo: [1] 2025-05-07T09:02:45.028760Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976710670, task: 66. Tasks execution finished 2025-05-07T09:02:45.028788Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [28:7501626664438520206:3192], TxId: 281474976710670, task: 66. Ctx: { TraceId : 01jtmzq9pje5s621sqhtkqxs2n. SessionId : ydb://session/3?node_id=28&id=OTdlMjgwYmItY2ExMDk4Y2MtZTE0Mjc2NTgtODJjMDZkMWY=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Compute state finished. All channels and sinks finished 2025-05-07T09:02:45.028931Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976710670, task: 66. pass away 2025-05-07T09:02:45.029018Z node 28 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:433: ActorId: [28:7501626664438520113:3117] TxId: 281474976710670. Ctx: { TraceId: 01jtmzq9pje5s621sqhtkqxs2n, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=OTdlMjgwYmItY2ExMDk4Y2MtZTE0Mjc2NTgtODJjMDZkMWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [28:7501626664438520206:3192], task: 66, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 1465 Tasks { TaskId: 66 StageId: 2 CpuTimeUs: 193 FinishTimeMs: 1746608565028 ComputeCpuTimeUs: 55 BuildCpuTimeUs: 138 HostName: "ghrun-sykirh5vua" NodeId: 28 CreateTimeMs: 1746608564936 UpdateTimeMs: 1746608565028 } MaxMemoryUsage: 1048576 } 2025-05-07T09:02:45.029062Z node 28 :KQP_EXECUTER INFO: kqp_planner.cpp:688: TxId: 281474976710670. Ctx: { TraceId: 01jtmzq9pje5s621sqhtkqxs2n, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=OTdlMjgwYmItY2ExMDk4Y2MtZTE0Mjc2NTgtODJjMDZkMWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [28:7501626664438520206:3192] 2025-05-07T09:02:45.029100Z node 28 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:66;problem=finish_compute_actor;tx_id=281474976710670;task_id=66;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-05-07T09:02:45.029200Z node 28 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2140: ActorId: [28:7501626664438520113:3117] TxId: 281474976710670. Ctx: { TraceId: 01jtmzq9pje5s621sqhtkqxs2n, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=OTdlMjgwYmItY2ExMDk4Y2MtZTE0Mjc2NTgtODJjMDZkMWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-05-07T09:02:45.029265Z node 28 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:838: ActorId: [28:7501626664438520113:3117] TxId: 281474976710670. Ctx: { TraceId: 01jtmzq9pje5s621sqhtkqxs2n, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=OTdlMjgwYmItY2ExMDk4Y2MtZTE0Mjc2NTgtODJjMDZkMWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.083318s ReadRows: 0 ReadBytes: 0 ru: 55 rate limiter was not found force flag: 1 2025-05-07T09:02:45.029360Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1699: SessionId: ydb://session/3?node_id=28&id=OTdlMjgwYmItY2ExMDk4Y2MtZTE0Mjc2NTgtODJjMDZkMWY=, ActorId: [28:7501626660143552768:3117], ActorState: ExecuteState, TraceId: 01jtmzq9pje5s621sqhtkqxs2n, TEvTxResponse, CurrentTx: 2/2 response.status: SUCCESS 2025-05-07T09:02:45.029724Z node 28 :KQP_SESSION INFO: kqp_session_actor.cpp:1958: SessionId: ydb://session/3?node_id=28&id=OTdlMjgwYmItY2ExMDk4Y2MtZTE0Mjc2NTgtODJjMDZkMWY=, ActorId: [28:7501626660143552768:3117], ActorState: ExecuteState, TraceId: 01jtmzq9pje5s621sqhtkqxs2n, txInfo Status: Active Kind: ReadOnly TotalDuration: 0 ServerDuration: 157.978 QueriesCount: 1 2025-05-07T09:02:45.029796Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2113: SessionId: ydb://session/3?node_id=28&id=OTdlMjgwYmItY2ExMDk4Y2MtZTE0Mjc2NTgtODJjMDZkMWY=, ActorId: [28:7501626660143552768:3117], ActorState: ExecuteState, TraceId: 01jtmzq9pje5s621sqhtkqxs2n, Create QueryResponse for action: QUERY_ACTION_EXECUTE with SUCCESS status 2025-05-07T09:02:45.029905Z node 28 :KQP_SESSION INFO: kqp_session_actor.cpp:2473: SessionId: ydb://session/3?node_id=28&id=OTdlMjgwYmItY2ExMDk4Y2MtZTE0Mjc2NTgtODJjMDZkMWY=, ActorId: [28:7501626660143552768:3117], ActorState: ExecuteState, TraceId: 01jtmzq9pje5s621sqhtkqxs2n, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-05-07T09:02:45.029951Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2534: SessionId: ydb://session/3?node_id=28&id=OTdlMjgwYmItY2ExMDk4Y2MtZTE0Mjc2NTgtODJjMDZkMWY=, ActorId: [28:7501626660143552768:3117], ActorState: ExecuteState, TraceId: 01jtmzq9pje5s621sqhtkqxs2n, EndCleanup, isFinal: 1 2025-05-07T09:02:45.030046Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2270: SessionId: ydb://session/3?node_id=28&id=OTdlMjgwYmItY2ExMDk4Y2MtZTE0Mjc2NTgtODJjMDZkMWY=, ActorId: [28:7501626660143552768:3117], ActorState: ExecuteState, TraceId: 01jtmzq9pje5s621sqhtkqxs2n, Sent query response back to proxy, proxyRequestId: 5, proxyId: [28:7501626612898908802:2280] 2025-05-07T09:02:45.030090Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2546: SessionId: ydb://session/3?node_id=28&id=OTdlMjgwYmItY2ExMDk4Y2MtZTE0Mjc2NTgtODJjMDZkMWY=, ActorId: [28:7501626660143552768:3117], ActorState: unknown state, TraceId: 01jtmzq9pje5s621sqhtkqxs2n, Cleanup temp tables: 0 2025-05-07T09:02:45.033592Z node 28 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746608564000, txId: 18446744073709551615] shutting down 2025-05-07T09:02:45.033771Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2637: SessionId: ydb://session/3?node_id=28&id=OTdlMjgwYmItY2ExMDk4Y2MtZTE0Mjc2NTgtODJjMDZkMWY=, ActorId: [28:7501626660143552768:3117], ActorState: unknown state, TraceId: 01jtmzq9pje5s621sqhtkqxs2n, Session actor destroyed >> TTabletPipeTest::TestPipeConnectToHint >> TTabletCountersAggregator::IntegralPercentileAggregationHistNamedSingleBucket >> TResourceBroker::TestQueueWithConfigure >> TTabletPipeTest::TestSendWithoutWaitOpen >> TTabletResolver::TabletResolvePriority [GOOD] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TFlatMetrics::TimeSeriesAVG [GOOD] >> TTabletPipeTest::TestTwoNodesAndRebootOfProducer >> TResourceBroker::TestOverusage >> TTabletCountersAggregator::IntegralPercentileAggregationHistNamedSingleBucket [GOOD] >> TTabletCountersAggregator::IntegralPercentileAggregationHistNamedNoOverflowCheck >> TBlockBlobStorageTest::DelayedErrorsNotIgnored >> TTabletPipeTest::TestTwoNodesAndRebootOfConsumer >> TResourceBroker::TestAutoTaskId [GOOD] >> TResourceBrokerConfig::UpdateTasks [GOOD] >> TResourceBrokerInstant::Test >> TTabletPipeTest::TestPipeConnectToHint [GOOD] >> TResourceBroker::TestQueueWithConfigure [GOOD] >> TResourceBroker::TestOverusageDifferentResources |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletResolver::TabletResolvePriority [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestSendBeforeBootTarget [GOOD] Test command err: Leader for TabletID 9437184 is [0:0:0] sender: [1:104:2057] recipient: [1:102:2136] IGNORE Leader for TabletID 9437184 is [0:0:0] sender: [1:104:2057] recipient: [1:102:2136] Leader for TabletID 9437184 is [1:108:2140] sender: [1:109:2057] recipient: [1:102:2136] Leader for TabletID 9437184 is [1:108:2140] sender: [1:128:2057] recipient: [1:14:2061] Leader for TabletID 9437185 is [0:0:0] sender: [1:163:2057] recipient: [1:161:2168] IGNORE Leader for TabletID 9437185 is [0:0:0] sender: [1:163:2057] recipient: [1:161:2168] Leader for TabletID 9437185 is [1:167:2172] sender: [1:168:2057] recipient: [1:161:2168] Leader for TabletID 9437185 is [1:167:2172] sender: [1:187:2057] recipient: [1:14:2061] >> TTabletPipeTest::TestSendWithoutWaitOpen [GOOD] >> TTabletCountersAggregator::IntegralPercentileAggregationHistNamedNoOverflowCheck [GOOD] >> TResourceBroker::TestOverusage [GOOD] >> TResourceBroker::TestNotifyActorDied >> TTabletCountersAggregator::IntegralPercentileAggregationRegularCheckSingleTablet >> TResourceBrokerInstant::Test [GOOD] >> TResourceBroker::TestOverusageDifferentResources [GOOD] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TResourceBroker::TestAutoTaskId [GOOD] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestSendWithoutWaitOpen [GOOD] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestPipeConnectToHint [GOOD] >> TTabletPipeTest::TestTwoNodesAndRebootOfProducer [GOOD] >> TTabletCountersAggregator::IntegralPercentileAggregationHistNamed >> TTabletCountersAggregator::IntegralPercentileAggregationRegularCheckSingleTablet [GOOD] >> TTabletCountersAggregator::IntegralPercentileAggregationRegular >> TResourceBroker::TestNotifyActorDied [GOOD] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletCountersAggregator::IntegralPercentileAggregationHistNamedNoOverflowCheck [GOOD] >> TPipeTrackerTest::TestShareTablet [GOOD] >> TPipeTrackerTest::TestIdempotentAttachDetach [GOOD] >> TTabletPipeTest::TestTwoNodesAndRebootOfConsumer [GOOD] >> TTabletPipeTest::TestSendAfterOpen >> TTabletCountersAggregator::IntegralPercentileAggregationHistNamed [GOOD] >> TTabletCountersAggregator::ColumnShardCounters >> TTabletPipeTest::TestConnectReject >> TTabletCountersAggregator::IntegralPercentileAggregationRegular [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TResourceBroker::TestOverusageDifferentResources [GOOD] Test command err: 2025-05-07T09:02:52.083839Z node 1 :RESOURCE_BROKER ERROR: resource_broker.cpp:1240: Configure result: Success: false Message: "task \'compaction1\' uses unknown queue \'queue_default1\'" 2025-05-07T09:02:52.084063Z node 1 :RESOURCE_BROKER ERROR: resource_broker.cpp:1240: Configure result: Success: false Message: "task \'unknown\' is required" 2025-05-07T09:02:52.084232Z node 1 :RESOURCE_BROKER ERROR: resource_broker.cpp:1240: Configure result: Success: false Message: "task \'unknown\' uses unknown queue \'queue_default\'" |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TResourceBrokerInstant::Test [GOOD] >> TResourceBrokerConfig::UpdateQueues [GOOD] >> TResourceBrokerConfig::UpdateResourceLimit [GOOD] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestTwoNodesAndRebootOfProducer [GOOD] >> TResourceBrokerInstant::TestErrors >> TTabletCountersAggregator::ColumnShardCounters [GOOD] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TPipeTrackerTest::TestIdempotentAttachDetach [GOOD] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TResourceBroker::TestNotifyActorDied [GOOD] >> TPipeTrackerTest::TestAddSameTabletTwice [GOOD] >> TPipeTrackerTest::TestAddTwoTablets [GOOD] >> TTabletPipeTest::TestSendAfterOpen [GOOD] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletCountersAggregator::IntegralPercentileAggregationRegular [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestTwoNodesAndRebootOfConsumer [GOOD] Test command err: Leader for TabletID 9437184 is [0:0:0] sender: [1:159:2058] recipient: [1:157:2137] IGNORE Leader for TabletID 9437184 is [0:0:0] sender: [1:159:2058] recipient: [1:157:2137] Leader for TabletID 9437184 is [1:165:2141] sender: [1:166:2058] recipient: [1:157:2137] Leader for TabletID 9437185 is [0:0:0] sender: [2:169:2049] recipient: [2:160:2095] IGNORE Leader for TabletID 9437185 is [0:0:0] sender: [2:169:2049] recipient: [2:160:2095] Leader for TabletID 9437185 is [2:181:2098] sender: [2:183:2049] recipient: [2:160:2095] Leader for TabletID 9437184 is [1:165:2141] sender: [1:209:2058] recipient: [1:15:2062] Leader for TabletID 9437185 is [2:181:2098] sender: [1:211:2058] recipient: [1:15:2062] Leader for TabletID 9437185 is [2:181:2098] sender: [2:213:2049] recipient: [2:42:2053] Leader for TabletID 9437185 is [2:181:2098] sender: [2:214:2049] recipient: [2:154:2094] Leader for TabletID 9437185 is [2:181:2098] sender: [1:217:2058] recipient: [1:15:2062] Leader for TabletID 9437185 is [2:181:2098] sender: [2:218:2049] recipient: [2:42:2053] Leader for TabletID 9437185 is [2:181:2098] sender: [2:220:2049] recipient: [2:219:2111] Leader for TabletID 9437185 is [2:221:2112] sender: [2:222:2049] recipient: [2:219:2111] Leader for TabletID 9437185 is [2:221:2112] sender: [1:251:2058] recipient: [1:15:2062] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TResourceBrokerConfig::UpdateResourceLimit [GOOD] >> TTabletPipeTest::TestConnectReject [GOOD] >> TTabletPipeTest::TestClientDisconnectAfterPipeOpen >> TResourceBrokerInstant::TestErrors [GOOD] >> TResourceBrokerInstant::TestMerge |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TPipeTrackerTest::TestAddTwoTablets [GOOD] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletCountersAggregator::ColumnShardCounters [GOOD] >> TTabletCountersAggregator::IntegralPercentileAggregationRegularNoOverflowCheck >> BootstrapperTest::LoneBootstrapper >> TTabletLabeledCountersAggregator::HeavyAggregation [GOOD] >> TTabletLabeledCountersAggregator::DbAggregation |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestSendAfterOpen [GOOD] >> TTabletCountersAggregator::IntegralPercentileAggregationRegularNoOverflowCheck [GOOD] >> TTabletCountersPercentile::SingleBucket [GOOD] >> TResourceBrokerInstant::TestMerge [GOOD] >> TTabletLabeledCountersAggregator::DbAggregation [GOOD] >> TTabletPipeTest::TestClientDisconnectAfterPipeOpen [GOOD] >> BootstrapperTest::RestartUnavailableTablet >> BootstrapperTest::LoneBootstrapper [GOOD] >> BootstrapperTest::MultipleBootstrappers >> BootstrapperTest::KeepExistingTablet >> TResourceBroker::TestCounters >> TTabletResolver::NodeProblem >> TBlockBlobStorageTest::DelayedErrorsNotIgnored [GOOD] >> TFlatMetrics::DecayingAverageAvg [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TResourceBrokerInstant::TestMerge [GOOD] Test command err: 2025-05-07T09:02:53.671714Z node 1 :RESOURCE_BROKER ERROR: resource_broker.cpp:1080: FinishTaskInstant failed for task 2: cannot finish unknown task |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletCountersPercentile::SingleBucket [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestClientDisconnectAfterPipeOpen [GOOD] Test command err: 2025-05-07T09:02:53.960669Z node 3 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [9437185] NodeDisconnected NodeId# 2 >> TResourceBroker::TestCounters [GOOD] >> TResourceBroker::TestChangeTaskType ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TFlatMetrics::DecayingAverageAvg [GOOD] Test command err: ... waiting for all block results ... passing block result OK for [1:101:2135] ... blocking block result NO_GROUP for [1:102:2135] ... blocking block result NO_GROUP for [1:103:2135] ... blocking block result NO_GROUP for [1:104:2135] >> TResourceBroker::TestChangeTaskType [GOOD] >> BootstrapperTest::RestartUnavailableTablet [GOOD] >> BootstrapperTest::UnavailableStateStorage >> TTabletResolver::NodeProblem [GOOD] |92.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_replication_reboots/ydb-core-tx-schemeshard-ut_replication_reboots |92.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_replication_reboots/ydb-core-tx-schemeshard-ut_replication_reboots |92.2%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_replication_reboots/ydb-core-tx-schemeshard-ut_replication_reboots ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletLabeledCountersAggregator::DbAggregation [GOOD] Test command err: 2025-05-07T09:02:48.237116Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1983: aggregator new request V2 [1:7:2054] 2025-05-07T09:02:48.250844Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1981: aggregator new request V2 Initiator [1:7:2054] self [1:8:2055] worker 0 2025-05-07T09:02:48.250927Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1981: aggregator new request V2 Initiator [1:7:2054] self [1:9:2056] worker 1 2025-05-07T09:02:48.250959Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1981: aggregator new request V2 Initiator [1:7:2054] self [1:10:2057] worker 2 2025-05-07T09:02:48.250986Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1981: aggregator new request V2 Initiator [1:7:2054] self [1:11:2058] worker 3 2025-05-07T09:02:48.251014Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1981: aggregator new request V2 Initiator [1:7:2054] self [1:12:2059] worker 4 2025-05-07T09:02:48.251058Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1981: aggregator new request V2 Initiator [1:7:2054] self [1:13:2060] worker 5 2025-05-07T09:02:48.251103Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1981: aggregator new request V2 Initiator [1:7:2054] self [1:14:2061] worker 6 2025-05-07T09:02:48.251130Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1981: aggregator new request V2 Initiator [1:7:2054] self [1:15:2062] worker 7 2025-05-07T09:02:48.251155Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1981: aggregator new request V2 Initiator [1:7:2054] self [1:16:2063] worker 8 2025-05-07T09:02:48.251178Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1981: aggregator new request V2 Initiator [1:7:2054] self [1:17:2064] worker 9 Sending message to [1:9:2056] from [1:7:2054] id 1 Sending message to [1:10:2057] from [1:7:2054] id 2 Sending message to [1:11:2058] from [1:7:2054] id 3 Sending message to [1:12:2059] from [1:7:2054] id 4 Sending message to [1:13:2060] from [1:7:2054] id 5 Sending message to [1:14:2061] from [1:7:2054] id 6 Sending message to [1:15:2062] from [1:7:2054] id 7 Sending message to [1:16:2063] from [1:7:2054] id 8 Sending message to [1:17:2064] from [1:7:2054] id 9 Sending message to [1:8:2055] from [1:7:2054] id 10 2025-05-07T09:02:48.801876Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 2 [1:10:2057] 2025-05-07T09:02:48.801947Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 3 [1:11:2058] 2025-05-07T09:02:48.801996Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 4 [1:12:2059] 2025-05-07T09:02:48.802033Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 5 [1:13:2060] 2025-05-07T09:02:48.802092Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 6 [1:14:2061] 2025-05-07T09:02:48.802118Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 7 [1:15:2062] 2025-05-07T09:02:48.802147Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 8 [1:16:2063] 2025-05-07T09:02:48.802174Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 9 [1:17:2064] 2025-05-07T09:02:48.802426Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 10 [1:8:2055] 2025-05-07T09:02:48.802458Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 1 [1:9:2056] 2025-05-07T09:02:48.802487Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 6 [1:14:2061] 2025-05-07T09:02:48.803446Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 6 [1:14:2061] 2025-05-07T09:02:48.825106Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2072: aggregator request processed [1:14:2061] Initiator [1:7:2054] 2025-05-07T09:02:48.838011Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 7 [1:15:2062] 2025-05-07T09:02:48.838999Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 7 [1:15:2062] 2025-05-07T09:02:48.861223Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2072: aggregator request processed [1:15:2062] Initiator [1:7:2054] 2025-05-07T09:02:48.877915Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 8 [1:16:2063] 2025-05-07T09:02:48.878883Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 8 [1:16:2063] 2025-05-07T09:02:48.900815Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2072: aggregator request processed [1:16:2063] Initiator [1:7:2054] 2025-05-07T09:02:48.913796Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 9 [1:17:2064] 2025-05-07T09:02:48.914796Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 9 [1:17:2064] 2025-05-07T09:02:48.936403Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2072: aggregator request processed [1:17:2064] Initiator [1:7:2054] 2025-05-07T09:02:48.949465Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 6 [1:7:2054] 2025-05-07T09:02:48.949555Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 6 [1:7:2054] 2025-05-07T09:02:48.954679Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 10 [1:8:2055] 2025-05-07T09:02:48.956415Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 10 [1:8:2055] 2025-05-07T09:02:48.996634Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2072: aggregator request processed [1:8:2055] Initiator [1:7:2054] 2025-05-07T09:02:49.019609Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 1 [1:9:2056] 2025-05-07T09:02:49.020644Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 1 [1:9:2056] 2025-05-07T09:02:49.042586Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2072: aggregator request processed [1:9:2056] Initiator [1:7:2054] 2025-05-07T09:02:49.055344Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 2 [1:10:2057] 2025-05-07T09:02:49.056339Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 2 [1:10:2057] 2025-05-07T09:02:49.077307Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2072: aggregator request processed [1:10:2057] Initiator [1:7:2054] 2025-05-07T09:02:49.089894Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 3 [1:11:2058] 2025-05-07T09:02:49.090870Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 3 [1:11:2058] 2025-05-07T09:02:49.111840Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2072: aggregator request processed [1:11:2058] Initiator [1:7:2054] 2025-05-07T09:02:49.124389Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 4 [1:12:2059] 2025-05-07T09:02:49.125334Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 4 [1:12:2059] 2025-05-07T09:02:49.146545Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2072: aggregator request processed [1:12:2059] Initiator [1:7:2054] 2025-05-07T09:02:49.159234Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 5 [1:13:2060] 2025-05-07T09:02:49.160225Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 5 [1:13:2060] 2025-05-07T09:02:49.181353Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2072: aggregator request processed [1:13:2060] Initiator [1:7:2054] 2025-05-07T09:02:49.193872Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 7 [1:7:2054] 2025-05-07T09:02:49.194002Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 7 [1:7:2054] 2025-05-07T09:02:49.197529Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 8 [1:7:2054] 2025-05-07T09:02:49.197613Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 8 [1:7:2054] 2025-05-07T09:02:49.200827Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 9 [1:7:2054] 2025-05-07T09:02:49.200920Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 9 [1:7:2054] 2025-05-07T09:02:49.204675Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 0 [1:7:2054] 2025-05-07T09:02:49.204783Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 0 [1:7:2054] 2025-05-07T09:02:49.208447Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 1 [1:7:2054] 2025-05-07T09:02:49.208544Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 1 [1:7:2054] 2025-05-07T09:02:49.212001Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 2 [1:7:2054] 2025-05-07T09:02:49.212090Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 2 [1:7:2054] 2025-05-07T09:02:49.217038Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 3 [1:7:2054] 2025-05-07T09:02:49.217149Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 3 [1:7:2054] 2025-05-07T09:02:49.220531Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 4 [1:7:2054] 2025-05-07T09:02:49.220614Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 4 [1:7:2054] 2025-05-07T09:02:49.224147Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 5 [1:7:2054] 2025-05-07T09:02:49.224234Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 5 [1:7:2054] 2025-05-07T09:02:49.227760Z node 1 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2072: aggregator request processed [1:7:2054] Initiator [1:6:2053] TEST 2 10 duration 1.729926s 2025-05-07T09:02:49.454173Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1983: aggregator new request V2 [2:7:2054] 2025-05-07T09:02:49.454494Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1981: aggregator new request V2 Initiator [2:7:2054] self [2:8:2055] worker 0 2025-05-07T09:02:49.454532Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1981: aggregator new request V2 Initiator [2:7:2054] self [2:9:2056] worker 1 2025-05-07T09:02:49.454564Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregato ... or got response node 8 [2:7:2054] 2025-05-07T09:02:50.516042Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 8 [2:7:2054] 2025-05-07T09:02:50.520818Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 9 [2:7:2054] 2025-05-07T09:02:50.520964Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 9 [2:7:2054] 2025-05-07T09:02:50.526816Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 10 [2:7:2054] 2025-05-07T09:02:50.526983Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 10 [2:7:2054] 2025-05-07T09:02:50.532837Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2072: aggregator request processed [2:7:2054] Initiator [2:6:2053] TEST 2 20 duration 1.210083s 2025-05-07T09:02:50.755888Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1983: aggregator new request V2 [3:7:2054] 2025-05-07T09:02:50.756117Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1981: aggregator new request V2 Initiator [3:7:2054] self [3:8:2055] worker 0 Sending message to [3:8:2055] from [3:7:2054] id 1 Sending message to [3:8:2055] from [3:7:2054] id 2 Sending message to [3:8:2055] from [3:7:2054] id 3 Sending message to [3:8:2055] from [3:7:2054] id 4 Sending message to [3:8:2055] from [3:7:2054] id 5 Sending message to [3:8:2055] from [3:7:2054] id 6 Sending message to [3:8:2055] from [3:7:2054] id 7 Sending message to [3:8:2055] from [3:7:2054] id 8 Sending message to [3:8:2055] from [3:7:2054] id 9 Sending message to [3:8:2055] from [3:7:2054] id 10 2025-05-07T09:02:51.489709Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 1 [3:8:2055] 2025-05-07T09:02:51.489762Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 2 [3:8:2055] 2025-05-07T09:02:51.489779Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 3 [3:8:2055] 2025-05-07T09:02:51.489843Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 4 [3:8:2055] 2025-05-07T09:02:51.489886Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 5 [3:8:2055] 2025-05-07T09:02:51.489911Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 6 [3:8:2055] 2025-05-07T09:02:51.489944Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 7 [3:8:2055] 2025-05-07T09:02:51.489986Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 8 [3:8:2055] 2025-05-07T09:02:51.490020Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 9 [3:8:2055] 2025-05-07T09:02:51.490051Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 10 [3:8:2055] 2025-05-07T09:02:51.490435Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 1 [3:8:2055] 2025-05-07T09:02:51.491745Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 1 [3:8:2055] 2025-05-07T09:02:51.525252Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 2 [3:8:2055] 2025-05-07T09:02:51.526490Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 2 [3:8:2055] 2025-05-07T09:02:51.558092Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 3 [3:8:2055] 2025-05-07T09:02:51.559483Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 3 [3:8:2055] 2025-05-07T09:02:51.593857Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 4 [3:8:2055] 2025-05-07T09:02:51.595241Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 4 [3:8:2055] 2025-05-07T09:02:51.624609Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 5 [3:8:2055] 2025-05-07T09:02:51.626088Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 5 [3:8:2055] 2025-05-07T09:02:51.668265Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 6 [3:8:2055] 2025-05-07T09:02:51.669675Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 6 [3:8:2055] 2025-05-07T09:02:51.701998Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 7 [3:8:2055] 2025-05-07T09:02:51.703526Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 7 [3:8:2055] 2025-05-07T09:02:51.733040Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 8 [3:8:2055] 2025-05-07T09:02:51.734092Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 8 [3:8:2055] 2025-05-07T09:02:51.758061Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 9 [3:8:2055] 2025-05-07T09:02:51.759121Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 9 [3:8:2055] 2025-05-07T09:02:51.785215Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 10 [3:8:2055] 2025-05-07T09:02:51.786310Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 10 [3:8:2055] 2025-05-07T09:02:51.832567Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2072: aggregator request processed [3:8:2055] Initiator [3:7:2054] 2025-05-07T09:02:52.076120Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 0 [3:7:2054] 2025-05-07T09:02:52.077087Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 0 [3:7:2054] 2025-05-07T09:02:52.136804Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2072: aggregator request processed [3:7:2054] Initiator [3:6:2053] TEST 2 1 duration 1.597291s 2025-05-07T09:02:52.454906Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1981: aggregator new request V2 Initiator [4:6:2053] self [4:7:2054] worker 0 Sending message to [4:7:2054] from [4:7:2054] id 1 Sending message to [4:7:2054] from [4:7:2054] id 2 Sending message to [4:7:2054] from [4:7:2054] id 3 Sending message to [4:7:2054] from [4:7:2054] id 4 Sending message to [4:7:2054] from [4:7:2054] id 5 Sending message to [4:7:2054] from [4:7:2054] id 6 Sending message to [4:7:2054] from [4:7:2054] id 7 Sending message to [4:7:2054] from [4:7:2054] id 8 Sending message to [4:7:2054] from [4:7:2054] id 9 Sending message to [4:7:2054] from [4:7:2054] id 10 2025-05-07T09:02:53.110140Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 1 [4:7:2054] 2025-05-07T09:02:53.110203Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 2 [4:7:2054] 2025-05-07T09:02:53.110230Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 3 [4:7:2054] 2025-05-07T09:02:53.110253Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 4 [4:7:2054] 2025-05-07T09:02:53.110336Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 5 [4:7:2054] 2025-05-07T09:02:53.110374Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 6 [4:7:2054] 2025-05-07T09:02:53.110407Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 7 [4:7:2054] 2025-05-07T09:02:53.110440Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 8 [4:7:2054] 2025-05-07T09:02:53.110471Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 9 [4:7:2054] 2025-05-07T09:02:53.110505Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 10 [4:7:2054] 2025-05-07T09:02:53.110791Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 1 [4:7:2054] 2025-05-07T09:02:53.112459Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 1 [4:7:2054] 2025-05-07T09:02:53.142807Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 2 [4:7:2054] 2025-05-07T09:02:53.143952Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 2 [4:7:2054] 2025-05-07T09:02:53.174870Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 3 [4:7:2054] 2025-05-07T09:02:53.176393Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 3 [4:7:2054] 2025-05-07T09:02:53.212663Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 4 [4:7:2054] 2025-05-07T09:02:53.214177Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 4 [4:7:2054] 2025-05-07T09:02:53.242971Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 5 [4:7:2054] 2025-05-07T09:02:53.244653Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 5 [4:7:2054] 2025-05-07T09:02:53.284817Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 6 [4:7:2054] 2025-05-07T09:02:53.285895Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 6 [4:7:2054] 2025-05-07T09:02:53.310289Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 7 [4:7:2054] 2025-05-07T09:02:53.311906Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 7 [4:7:2054] 2025-05-07T09:02:53.339473Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 8 [4:7:2054] 2025-05-07T09:02:53.340553Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 8 [4:7:2054] 2025-05-07T09:02:53.367199Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 9 [4:7:2054] 2025-05-07T09:02:53.368566Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 9 [4:7:2054] 2025-05-07T09:02:53.398384Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 10 [4:7:2054] 2025-05-07T09:02:53.399658Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 10 [4:7:2054] 2025-05-07T09:02:53.450473Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2072: aggregator request processed [4:7:2054] Initiator [4:6:2053] TEST 2 1 duration 1.427826s >> TNetClassifierTest::TestInitFromRemoteSource [GOOD] |92.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TResourceBroker::TestChangeTaskType [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletResolver::NodeProblem [GOOD] Test command err: 2025-05-07T09:02:54.808062Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 123 entry.State: StInit ev: {EvForward TabletID: 123 Ev: nullptr Flags: 1:2:0} 2025-05-07T09:02:54.808257Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 123 entry.State: StInitResolve success: true ev: {EvInfo Status: 0 TabletID: 123 Cookie: 0 CurrentLeader: [1:207:2136] CurrentLeaderTablet: [1:208:2137] CurrentGeneration: 1 CurrentStep: 0 Locked: false LockedFor: 0 SignatureSz: 3 Signature: {3, 6, 0}} 2025-05-07T09:02:54.808287Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:354: ApplyEntry leader tabletId: 123 followers: 0 2025-05-07T09:02:54.808337Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 1 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 123 followers: 0 countLeader 1 allowFollowers 0 winner: [1:207:2136] 2025-05-07T09:02:54.808485Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 234 entry.State: StInit ev: {EvForward TabletID: 234 Ev: nullptr Flags: 1:2:0} 2025-05-07T09:02:54.808666Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 234 entry.State: StInitResolve success: true ev: {EvInfo Status: 0 TabletID: 234 Cookie: 0 CurrentLeader: [1:213:2140] CurrentLeaderTablet: [1:214:2141] CurrentGeneration: 1 CurrentStep: 0 Locked: false LockedFor: 0 SignatureSz: 3 Signature: {3, 6, 0}} 2025-05-07T09:02:54.808699Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:354: ApplyEntry leader tabletId: 234 followers: 0 2025-05-07T09:02:54.808744Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 1 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 234 followers: 0 countLeader 1 allowFollowers 0 winner: [1:213:2140] 2025-05-07T09:02:54.809673Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 123 entry.State: StNormal ev: {EvForward TabletID: 123 Ev: nullptr Flags: 1:2:0} 2025-05-07T09:02:54.809725Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 1 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 123 followers: 0 countLeader 1 allowFollowers 0 winner: [1:207:2136] 2025-05-07T09:02:54.809886Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 234 entry.State: StNormal ev: {EvForward TabletID: 234 Ev: nullptr Flags: 1:2:0} 2025-05-07T09:02:54.809928Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 1 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 234 followers: 0 countLeader 1 allowFollowers 0 winner: [1:213:2140] 2025-05-07T09:02:54.810100Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:583: Handle TEvNodeProblem nodeId: 1 max(problemEpoch): 2 2025-05-07T09:02:54.810138Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:429: Delayed invalidation of tabletId: 123 leader: [1:207:2136] by NodeId 2025-05-07T09:02:54.810187Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 123 entry.State: StProblemResolve ev: {EvForward TabletID: 123 Ev: nullptr Flags: 1:2:0} 2025-05-07T09:02:54.810361Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 123 entry.State: StProblemResolve success: true ev: {EvInfo Status: 0 TabletID: 123 Cookie: 0 CurrentLeader: [2:223:2094] CurrentLeaderTablet: [2:224:2095] CurrentGeneration: 2 CurrentStep: 0 Locked: false LockedFor: 0 SignatureSz: 3 Signature: {3, 6, 0}} 2025-05-07T09:02:54.810409Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:354: ApplyEntry leader tabletId: 123 followers: 0 2025-05-07T09:02:54.810444Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 2 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 123 followers: 0 countLeader 1 allowFollowers 0 winner: [2:223:2094] 2025-05-07T09:02:54.810613Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:429: Delayed invalidation of tabletId: 234 leader: [1:213:2140] by NodeId 2025-05-07T09:02:54.810664Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 234 entry.State: StProblemResolve ev: {EvForward TabletID: 234 Ev: nullptr Flags: 1:2:0} 2025-05-07T09:02:54.810811Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 234 entry.State: StProblemResolve success: true ev: {EvInfo Status: 0 TabletID: 234 Cookie: 0 CurrentLeader: [2:229:2096] CurrentLeaderTablet: [2:230:2097] CurrentGeneration: 2 CurrentStep: 0 Locked: false LockedFor: 0 SignatureSz: 3 Signature: {3, 6, 0}} 2025-05-07T09:02:54.810843Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:354: ApplyEntry leader tabletId: 234 followers: 0 2025-05-07T09:02:54.810885Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 2 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 234 followers: 0 countLeader 1 allowFollowers 0 winner: [2:229:2096] 2025-05-07T09:02:54.811997Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:583: Handle TEvNodeProblem nodeId: 2 max(problemEpoch): 2 2025-05-07T09:02:54.812069Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 123 entry.State: StNormal ev: {EvForward TabletID: 123 Ev: nullptr Flags: 1:2:0} 2025-05-07T09:02:54.812107Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 2 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 123 followers: 0 countLeader 1 allowFollowers 0 winner: [2:223:2094] 2025-05-07T09:02:54.812277Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 234 entry.State: StNormal ev: {EvForward TabletID: 234 Ev: nullptr Flags: 1:2:0} 2025-05-07T09:02:54.812312Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 2 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 234 followers: 0 countLeader 1 allowFollowers 0 winner: [2:229:2096] 2025-05-07T09:02:54.812468Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:583: Handle TEvNodeProblem nodeId: 2 max(problemEpoch): 4 2025-05-07T09:02:54.812513Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:429: Delayed invalidation of tabletId: 123 leader: [2:223:2094] by NodeId 2025-05-07T09:02:54.812559Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 123 entry.State: StProblemResolve ev: {EvForward TabletID: 123 Ev: nullptr Flags: 1:2:0} 2025-05-07T09:02:54.812801Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 123 entry.State: StProblemResolve success: true ev: {EvInfo Status: 0 TabletID: 123 Cookie: 0 CurrentLeader: [3:241:2094] CurrentLeaderTablet: [3:242:2095] CurrentGeneration: 3 CurrentStep: 0 Locked: false LockedFor: 0 SignatureSz: 3 Signature: {3, 6, 0}} 2025-05-07T09:02:54.812850Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:354: ApplyEntry leader tabletId: 123 followers: 0 2025-05-07T09:02:54.812896Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 3 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 123 followers: 0 countLeader 1 allowFollowers 0 winner: [3:241:2094] 2025-05-07T09:02:54.813171Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 234 entry.State: StNormal ev: {EvForward TabletID: 234 Ev: nullptr Flags: 1:2:0} 2025-05-07T09:02:54.813214Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 2 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 234 followers: 0 countLeader 1 allowFollowers 0 winner: [2:229:2096] 2025-05-07T09:02:54.813361Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:583: Handle TEvNodeProblem nodeId: 2 max(problemEpoch): 5 2025-05-07T09:02:54.813411Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 123 entry.State: StNormal ev: {EvForward TabletID: 123 Ev: nullptr Flags: 1:2:0} 2025-05-07T09:02:54.813448Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 3 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 123 followers: 0 countLeader 1 allowFollowers 0 winner: [3:241:2094] 2025-05-07T09:02:54.813622Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:429: Delayed invalidation of tabletId: 234 leader: [2:229:2096] by NodeId 2025-05-07T09:02:54.813665Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 234 entry.State: StProblemResolve ev: {EvForward TabletID: 234 Ev: nullptr Flags: 1:2:0} 2025-05-07T09:02:54.813832Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 234 entry.State: StProblemResolve success: true ev: {EvInfo Status: 0 TabletID: 234 Cookie: 0 CurrentLeader: [3:247:2096] CurrentLeaderTablet: [3:248:2097] CurrentGeneration: 3 CurrentStep: 0 Locked: false LockedFor: 0 SignatureSz: 3 Signature: {3, 6, 0}} 2025-05-07T09:02:54.813876Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:354: ApplyEntry leader tabletId: 234 followers: 0 2025-05-07T09:02:54.813923Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 3 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 234 followers: 0 countLeader 1 allowFollowers 0 winner: [3:247:2096] >> BootstrapperTest::KeepExistingTablet [GOOD] >> BootstrapperTest::DuplicateNodes ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/address_classification/ut/unittest >> TNetClassifierTest::TestInitFromRemoteSource [GOOD] Test command err: 2025-05-07T09:02:52.075852Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626697670228717:2066];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:02:52.075914Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00162f/r3tmp/tmpUV09yh/pdisk_1.dat 2025-05-07T09:02:52.656975Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:02:52.702233Z node 1 :HTTP ERROR: http_proxy_outgoing.cpp:75: (#26,[::1]:18292) connection closed with error: Connection refused 2025-05-07T09:02:52.702740Z node 1 :CMS_CONFIGS ERROR: net_classifier_updater.cpp:278: NetClassifierUpdater failed to get subnets: Connection refused 2025-05-07T09:02:52.707911Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:02:52.708054Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:02:52.713084Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:02:52.786029Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:02:52.786054Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:02:52.786066Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:02:52.786197Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration |92.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_backup_collection_reboots/tx-schemeshard-ut_backup_collection_reboots >> BootstrapperTest::UnavailableStateStorage [GOOD] |92.2%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_backup_collection_reboots/tx-schemeshard-ut_backup_collection_reboots |92.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_backup_collection_reboots/tx-schemeshard-ut_backup_collection_reboots ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> BootstrapperTest::UnavailableStateStorage [GOOD] Test command err: ... waiting for pipe to connect ... waiting for blocked connect attempt ... blocking NKikimr::TEvTabletPipe::TEvConnect from TABLET_PIPE_CLIENT to TABLET_ACTOR cookie 1 ... waiting for blocked connect attempt (done) ... disconnecting nodes 2 <-> 1 ... waiting for pipe to disconnect ... waiting for pipe to connect ... waiting for pipe to connect ... waiting for multiple state storage lookup attempts 2025-05-07T09:02:55.529745Z node 6 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) ... disconnecting nodes 2 <-> 0 ({EvReplicaLookup TabletID: 9437184 Cookie: 0} for [4:3:2050]) ... blocking NKikimr::TEvStateStorage::TEvReplicaLookup from SS_PROXY_REQUEST to SS_REPLICA cookie 0 ... disconnecting nodes 2 <-> 0 ({EvReplicaLookup TabletID: 9437184 Cookie: 1} for [4:6:2053]) ... blocking NKikimr::TEvStateStorage::TEvReplicaLookup from SS_PROXY_REQUEST to SS_REPLICA cookie 1 ... disconnecting nodes 2 <-> 0 ({EvReplicaLookup TabletID: 9437184 Cookie: 2} for [4:9:2056]) ... blocking NKikimr::TEvStateStorage::TEvReplicaLookup from SS_PROXY_REQUEST to SS_REPLICA cookie 2 2025-05-07T09:02:55.530490Z node 6 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: ERROR, leader: [0:0:0] 2025-05-07T09:02:55.530544Z node 6 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:242: tablet: 9437184, type: Dummy, state storage unavailable, sleeping for 0.148014s 2025-05-07T09:02:55.646305Z node 6 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) ... disconnecting nodes 2 <-> 0 ({EvReplicaLookup TabletID: 9437184 Cookie: 0} for [4:3:2050]) ... blocking NKikimr::TEvStateStorage::TEvReplicaLookup from SS_PROXY_REQUEST to SS_REPLICA cookie 0 ... disconnecting nodes 2 <-> 0 ({EvReplicaLookup TabletID: 9437184 Cookie: 1} for [4:6:2053]) ... blocking NKikimr::TEvStateStorage::TEvReplicaLookup from SS_PROXY_REQUEST to SS_REPLICA cookie 1 ... disconnecting nodes 2 <-> 0 ({EvReplicaLookup TabletID: 9437184 Cookie: 2} for [4:9:2056]) ... blocking NKikimr::TEvStateStorage::TEvReplicaLookup from SS_PROXY_REQUEST to SS_REPLICA cookie 2 ... waiting for multiple state storage lookup attempts (done) >> BootstrapperTest::DuplicateNodes [GOOD] |92.2%| [TA] $(B)/ydb/core/mind/address_classification/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> BootstrapperTest::DuplicateNodes [GOOD] Test command err: ... waiting for pipe to connect ... sleeping (original instance should be preserved) ... waiting for original instance to stop ... waiting for original instance to stop (done) ... waiting for pipe to connect 2025-05-07T09:02:55.953347Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-05-07T09:02:55.953422Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-05-07T09:02:55.954019Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: NODATA, leader: [0:0:0] 2025-05-07T09:02:55.954065Z node 4 :BOOTSTRAPPER INFO: bootstrapper.cpp:330: tablet:9437184, type: Dummy, begin new round, seed: 12552810490399048506 2025-05-07T09:02:55.954155Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: NODATA, leader: [0:0:0] 2025-05-07T09:02:55.954185Z node 5 :BOOTSTRAPPER INFO: bootstrapper.cpp:330: tablet:9437184, type: Dummy, begin new round, seed: 15249746964198841502 2025-05-07T09:02:55.954815Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 5 state: FREE 2025-05-07T09:02:55.954854Z node 4 :BOOTSTRAPPER NOTICE: bootstrapper.cpp:680: tablet: 9437184, type: Dummy, boot 2025-05-07T09:02:55.954979Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 4 state: FREE 2025-05-07T09:02:55.955001Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:499: tablet: 9437184, type: Dummy, lost round, wait for 0.139961s 2025-05-07T09:02:56.097661Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-05-07T09:02:56.098226Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: OK, leader: [4:212:2095] 2025-05-07T09:02:56.098597Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:266: tablet: 9437184, type: Dummy, connect: OK 2025-05-07T09:02:56.098627Z node 5 :BOOTSTRAPPER INFO: bootstrapper.cpp:277: tablet: 9437184, type: Dummy, connected to leader, waiting |92.3%| [TA] {RESULT} $(B)/ydb/core/mind/address_classification/ut/test-results/unittest/{meta.json ... results_accumulator.log} |92.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_vector_index_build_reboots/tx-schemeshard-ut_vector_index_build_reboots >> TSequence::CreateSequenceParallel |92.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_vector_index_build_reboots/tx-schemeshard-ut_vector_index_build_reboots |92.3%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_vector_index_build_reboots/tx-schemeshard-ut_vector_index_build_reboots >> DistributedEraseTests::DistributedEraseTxShouldFailOnVariousErrors [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldErase |92.3%| [TA] $(B)/ydb/core/persqueue/ut/ut_with_sdk/test-results/unittest/{meta.json ... results_accumulator.log} >> BootstrapperTest::MultipleBootstrappers [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> BootstrapperTest::MultipleBootstrappers [GOOD] Test command err: ... waiting for pipe to connect ... stopping current instance ... waiting for pipe to disconnect ... waiting for pipe to connect ... sleeping for 2 seconds 2025-05-07T09:02:54.697692Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-05-07T09:02:54.697774Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-05-07T09:02:54.697816Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-05-07T09:02:54.698815Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: NODATA, leader: [0:0:0] 2025-05-07T09:02:54.698869Z node 3 :BOOTSTRAPPER INFO: bootstrapper.cpp:330: tablet:9437184, type: Dummy, begin new round, seed: 15249746964198841502 2025-05-07T09:02:54.699231Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: NODATA, leader: [0:0:0] 2025-05-07T09:02:54.699264Z node 4 :BOOTSTRAPPER INFO: bootstrapper.cpp:330: tablet:9437184, type: Dummy, begin new round, seed: 838756400823690829 2025-05-07T09:02:54.699370Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: NODATA, leader: [0:0:0] 2025-05-07T09:02:54.699416Z node 5 :BOOTSTRAPPER INFO: bootstrapper.cpp:330: tablet:9437184, type: Dummy, begin new round, seed: 2303809724928703835 2025-05-07T09:02:54.700372Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 5 state: UNKNOWN 2025-05-07T09:02:54.700863Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 5 state: FREE 2025-05-07T09:02:54.700970Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 4 state: FREE 2025-05-07T09:02:54.701082Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 4 state: FREE 2025-05-07T09:02:54.701114Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:499: tablet: 9437184, type: Dummy, lost round, wait for 0.149198s 2025-05-07T09:02:54.701174Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 3 state: FREE 2025-05-07T09:02:54.701236Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 5 state: FREE 2025-05-07T09:02:54.701262Z node 4 :BOOTSTRAPPER NOTICE: bootstrapper.cpp:680: tablet: 9437184, type: Dummy, boot 2025-05-07T09:02:54.701436Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 3 state: FREE 2025-05-07T09:02:54.701478Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:499: tablet: 9437184, type: Dummy, lost round, wait for 0.190190s 2025-05-07T09:02:54.895332Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-05-07T09:02:54.896032Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: OK, leader: [4:274:2096] 2025-05-07T09:02:54.896464Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:266: tablet: 9437184, type: Dummy, connect: OK 2025-05-07T09:02:54.896509Z node 3 :BOOTSTRAPPER INFO: bootstrapper.cpp:277: tablet: 9437184, type: Dummy, connected to leader, waiting 2025-05-07T09:02:54.939162Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-05-07T09:02:54.939867Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: OK, leader: [4:274:2096] 2025-05-07T09:02:54.940375Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:266: tablet: 9437184, type: Dummy, connect: OK 2025-05-07T09:02:54.940416Z node 5 :BOOTSTRAPPER INFO: bootstrapper.cpp:277: tablet: 9437184, type: Dummy, connected to leader, waiting ... waiting for pipe to connect ... tablet initially started on node 4 (idx 2) in gen 2 ... disconnecting other nodes ... sleeping for 2 seconds (tablet expected to survive) 2025-05-07T09:02:55.700506Z node 4 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [9437184] NodeDisconnected NodeId# 5 2025-05-07T09:02:55.700745Z node 4 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [9437184] NodeDisconnected NodeId# 3 2025-05-07T09:02:55.700895Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:303: tablet: 9437184, type: Dummy, disconnected 2025-05-07T09:02:55.700935Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-05-07T09:02:55.701435Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:303: tablet: 9437184, type: Dummy, disconnected 2025-05-07T09:02:55.701469Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-05-07T09:02:55.703126Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: OK, leader: [4:274:2096] 2025-05-07T09:02:55.703805Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: OK, leader: [4:274:2096] 2025-05-07T09:02:55.704865Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:266: tablet: 9437184, type: Dummy, connect: OK 2025-05-07T09:02:55.704907Z node 3 :BOOTSTRAPPER INFO: bootstrapper.cpp:277: tablet: 9437184, type: Dummy, connected to leader, waiting 2025-05-07T09:02:55.704968Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:266: tablet: 9437184, type: Dummy, connect: OK 2025-05-07T09:02:55.704989Z node 5 :BOOTSTRAPPER INFO: bootstrapper.cpp:277: tablet: 9437184, type: Dummy, connected to leader, waiting ... disconnecting other nodes (new tablet connections fail) ... sleeping for 2 seconds (tablet expected to survive) 2025-05-07T09:02:56.399044Z node 4 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [9437184] NodeDisconnected NodeId# 5 2025-05-07T09:02:56.399104Z node 4 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [9437184] NodeDisconnected NodeId# 3 2025-05-07T09:02:56.399297Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:303: tablet: 9437184, type: Dummy, disconnected 2025-05-07T09:02:56.399331Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-05-07T09:02:56.399531Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:303: tablet: 9437184, type: Dummy, disconnected 2025-05-07T09:02:56.399549Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-05-07T09:02:56.401077Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: OK, leader: [4:274:2096] 2025-05-07T09:02:56.401179Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: OK, leader: [4:274:2096] ... disconnecting nodes 2 <-> 3 (tablet connect attempt) ... blocking NKikimr::TEvTabletPipe::TEvConnect from TABLET_PIPE_CLIENT to TABLET_ACTOR cookie 1 ... disconnecting nodes 2 <-> 1 (tablet connect attempt) ... blocking NKikimr::TEvTabletPipe::TEvConnect from TABLET_PIPE_CLIENT to TABLET_ACTOR cookie 1 2025-05-07T09:02:56.401958Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:266: tablet: 9437184, type: Dummy, connect: ERROR 2025-05-07T09:02:56.402034Z node 5 :BOOTSTRAPPER INFO: bootstrapper.cpp:330: tablet:9437184, type: Dummy, begin new round, seed: 6528562917658346564 2025-05-07T09:02:56.402375Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:266: tablet: 9437184, type: Dummy, connect: ERROR 2025-05-07T09:02:56.402395Z node 3 :BOOTSTRAPPER INFO: bootstrapper.cpp:330: tablet:9437184, type: Dummy, begin new round, seed: 16349739802483488852 2025-05-07T09:02:56.403079Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 4 state: OWNER 2025-05-07T09:02:56.403111Z node 5 :BOOTSTRAPPER INFO: bootstrapper.cpp:571: tablet: 9437184, type: Dummy, become watch on node 4 (owner) 2025-05-07T09:02:56.403204Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 4 state: OWNER 2025-05-07T09:02:56.403236Z node 3 :BOOTSTRAPPER INFO: bootstrapper.cpp:571: tablet: 9437184, type: Dummy, become watch on node 4 (owner) ... disconnect other nodes (new owner expected) ... sleeping for 2 seconds (new tablet expected to start once) 2025-05-07T09:02:57.084855Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:643: tablet: 9437184, type: Dummy, disconnected from 4, round 16045690984833335029 2025-05-07T09:02:57.084932Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-05-07T09:02:57.085035Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:643: tablet: 9437184, type: Dummy, disconnected from 4, round 16045690984833335029 2025-05-07T09:02:57.085074Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-05-07T09:02:57.085726Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: OK, leader: [4:274:2096] 2025-05-07T09:02:57.085873Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: OK, leader: [4:274:2096] ... disconnecting nodes 2 <-> 3 (tablet connect attempt) ... blocking NKikimr::TEvTabletPipe::TEvConnect from TABLET_PIPE_CLIENT to TABLET_ACTOR cookie 1 ... disconnecting nodes 2 <-> 1 (tablet connect attempt) ... blocking NKikimr::TEvTabletPipe::TEvConnect from TABLET_PIPE_CLIENT to TABLET_ACTOR cookie 1 2025-05-07T09:02:57.086301Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:266: tablet: 9437184, type: Dummy, connect: ERROR 2025-05-07T09:02:57.086332Z node 5 :BOOTSTRAPPER INFO: bootstrapper.cpp:330: tablet:9437184, type: Dummy, begin new round, seed: 13164802727073798053 2025-05-07T09:02:57.086503Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:266: tablet: 9437184, type: Dummy, connect: ERROR 2025-05-07T09:02:57.086519Z node 3 :BOOTSTRAPPER INFO: bootstrapper.cpp:330: tablet:9437184, type: Dummy, begin new round, seed: 10171326560769670008 ... disconnecting nodes 2 <-> 3 (bootstrap watch attempt) ... blocking NKikimr::TEvBootstrapper::TEvWatch from TABLET_BOOTSTRAPPER to TABLET_BOOTSTRAPPER cookie 16045690984833335031 2025-05-07T09:02:57.086769Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:403: tablet: 9437184, type: Dummy, disconnected from 4, round 16045690984833335031 2025-05-07T09:02:57.086795Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 4 state: DISCONNECTED ... disconnecting nodes 2 <-> 1 (bootstrap watch attempt) ... blocking NKikimr::TEvBootstrapper::TEvWatch from TABLET_BOOTSTRAPPER to TABLET_BOOTSTRAPPER cookie 16045690984833335031 2025-05-07T09:02:57.086947Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 5 state: FREE 2025-05-07T09:02:57.087009Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 3 state: FREE 2025-05-07T09:02:57.087029Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:499: tablet: 9437184, type: Dummy, lost round, wait for 0.116418s 2025-05-07T09:02:57.087070Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:403: tablet: 9437184, type: Dummy, disconnected from 4, round 16045690984833335031 2025-05-07T09:02:57.087084Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 4 state: DISCONNECTED 2025-05-07T09:02:57.087099Z node 3 :BOOTSTRAPPER NOTICE: bootstrapper.cpp:680: tablet: 9437184, type: Dummy, boot 2025-05-07T09:02:57.088673Z node 4 :BOOTSTRAPPER INFO: bootstrapper.cpp:715: tablet: 9437184, type: Dummy, tablet dead 2025-05-07T09:02:57.088739Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-05-07T09:02:57.090938Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: OK, leader: [3:394:2096] 2025-05-07T09:02:57.103570Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:266: tablet: 9437184, type: Dummy, connect: OK 2025-05-07T09:02:57.103613Z node 4 :BOOTSTRAPPER INFO: bootstrapper.cpp:277: tablet: 9437184, type: Dummy, connected to leader, waiting 2025-05-07T09:02:57.168332Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-05-07T09:02:57.168951Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: OK, leader: [3:394:2096] 2025-05-07T09:02:57.169402Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:266: tablet: 9437184, type: Dummy, connect: OK 2025-05-07T09:02:57.169438Z node 5 :BOOTSTRAPPER INFO: bootstrapper.cpp:277: tablet: 9437184, type: Dummy, connected to leader, waiting ... waiting for pipe to connect ... disconnecting nodes 2 <-> 0 (tablet connect attempt) ... blocking NKikimr::TEvTabletPipe::TEvConnect from TABLET_PIPE_CLIENT to cookie 1 |92.3%| [TA] {RESULT} $(B)/ydb/core/persqueue/ut/ut_with_sdk/test-results/unittest/{meta.json ... results_accumulator.log} >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_3__SYNC-pk_types1-all_types1-index1---SYNC] [GOOD] >> YdbOlapStore::LogExistingRequest [GOOD] >> YdbOlapStore::LogExistingUserId >> YdbLogStore::LogTable [GOOD] >> YdbLogStore::AlterLogTable >> TSequence::CreateSequenceParallel [GOOD] >> TSequence::CreateSequenceSequential >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgInt4Seconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDyNumberSeconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgInt8Microseconds >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDyNumberMilliSeconds >> EraseRowsTests::ConditionalEraseRowsShouldNotEraseModifiedRows [GOOD] >> EraseRowsTests::EraseRowsFromReplicatedTable >> EraseRowsTests::ConditionalEraseRowsShouldNotErase [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldFailOnVariousErrors >> EraseRowsTests::EraseRowsShouldSuccess [GOOD] >> EraseRowsTests::EraseRowsShouldFailOnVariousErrors >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64MilliSeconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64MicroSeconds >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDyNumberMicroSeconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDate32 >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint32 [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnTimestamp64 >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgInt8Seconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgInt8Milliseconds >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64Seconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64NanoSeconds >> EraseRowsTests::ConditionalEraseRowsShouldErase [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldBreakLocks >> TSequence::CreateSequenceSequential [GOOD] >> TSequence::CreateSequenceInsideTableThenDropSequence >> DistributedEraseTests::ConditionalEraseRowsShouldNotErase [GOOD] >> DistributedEraseTests::ConditionalEraseRowsShouldFailOnVariousErrors >> DistributedEraseTests::ConditionalEraseRowsShouldEraseOnUint32 [GOOD] >> DistributedEraseTests::ConditionalEraseRowsShouldFailOnSchemeTx >> DistributedEraseTests::ConditionalEraseRowsShouldErase [GOOD] >> DistributedEraseTests::ConditionalEraseRowsCheckLimits >> TSequence::CreateSequenceInsideTableThenDropSequence [GOOD] >> TSequence::CreateSequenceInsideTableThenDropTable >> DistributedEraseTests::ConditionalEraseRowsShouldSuccessOnShardedIndex [GOOD] >> DistributedEraseTests::ConditionalEraseRowsShouldNotEraseModifiedRows >> TSequence::CreateSequenceInsideTableThenDropTable [GOOD] >> TSequence::CreateSequencesWithIndexedTable >> EraseRowsTests::EraseRowsShouldFailOnVariousErrors [GOOD] >> TSequence::CreateSequencesWithIndexedTable [GOOD] >> TSequence::CreateTableWithDefaultFromSequence >> EraseRowsTests::EraseRowsFromReplicatedTable [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDyNumberMilliSeconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDyNumberNanoSeconds >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnTimestamp64 [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgInt8Microseconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgDate >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64NanoSeconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64MicroSeconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDate32 [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDatetime64 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::EraseRowsShouldFailOnVariousErrors [GOOD] Test command err: 2025-05-07T09:02:55.075119Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:02:55.075256Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:02:55.075512Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00291e/r3tmp/tmp1umSi5/pdisk_1.dat 2025-05-07T09:02:56.437948Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:02:56.541257Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:02:56.652196Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:02:56.658074Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:02:56.681635Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:02:56.879675Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:02:57.015025Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:665:2569] 2025-05-07T09:02:57.015284Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T09:02:57.078501Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T09:02:57.078599Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T09:02:57.089308Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-05-07T09:02:57.089401Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-05-07T09:02:57.089452Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-05-07T09:02:57.104852Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T09:02:57.105039Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T09:02:57.105143Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:681:2569] in generation 1 2025-05-07T09:02:57.115804Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T09:02:57.150845Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-05-07T09:02:57.151052Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T09:02:57.151153Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:683:2579] 2025-05-07T09:02:57.151193Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:02:57.151235Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-05-07T09:02:57.151269Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:02:57.151655Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T09:02:57.151751Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T09:02:57.151842Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:02:57.151880Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:02:57.151927Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T09:02:57.151963Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:02:57.152106Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:672:2573], sessionId# [0:0:0] 2025-05-07T09:02:57.164280Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T09:02:57.164543Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-05-07T09:02:57.164635Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-05-07T09:02:57.166356Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:02:57.177025Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T09:02:57.186616Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-05-07T09:02:57.333742Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:697:2587], serverId# [1:699:2589], sessionId# [0:0:0] 2025-05-07T09:02:57.337145Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-05-07T09:02:57.337216Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:02:57.337477Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:02:57.337543Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-05-07T09:02:57.337578Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-05-07T09:02:57.337812Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-05-07T09:02:57.337933Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-05-07T09:02:57.338438Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:02:57.338500Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-05-07T09:02:57.350161Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-05-07T09:02:57.364115Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:02:57.366151Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-05-07T09:02:57.366198Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:02:57.366643Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-05-07T09:02:57.366696Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:02:57.367426Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:02:57.367457Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:02:57.367490Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-05-07T09:02:57.367536Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:410:2405], exec latency: 0 ms, propose latency: 0 ms 2025-05-07T09:02:57.367616Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-05-07T09:02:57.367797Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:02:57.370862Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:02:57.372072Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-05-07T09:02:57.372188Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-05-07T09:02:57.372238Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-05-07T09:02:57.520552Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:731:2613], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:57.520680Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:741:2618], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:57.520793Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:57.548502Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474 ... shard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:03:05.681371Z node 2 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-05-07T09:03:05.681411Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:05.681686Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T09:03:05.681759Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T09:03:05.681837Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:03:05.681873Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:03:05.681912Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T09:03:05.681948Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:05.682342Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:661:2566], serverId# [2:671:2572], sessionId# [0:0:0] 2025-05-07T09:03:05.682449Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T09:03:05.682605Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-05-07T09:03:05.682662Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-05-07T09:03:05.683895Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:03:05.694423Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T09:03:05.694523Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-05-07T09:03:05.841444Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:697:2587], serverId# [2:699:2589], sessionId# [0:0:0] 2025-05-07T09:03:05.841878Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-05-07T09:03:05.841918Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:05.842075Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:03:05.842108Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-05-07T09:03:05.842147Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-05-07T09:03:05.842373Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-05-07T09:03:05.842477Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-05-07T09:03:05.842605Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:03:05.842650Z node 2 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-05-07T09:03:05.842999Z node 2 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-05-07T09:03:05.843309Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:03:05.844927Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-05-07T09:03:05.844970Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:05.845696Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-05-07T09:03:05.845786Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:05.846645Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:05.846683Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:03:05.846717Z node 2 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-05-07T09:03:05.846770Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [2:409:2404], exec latency: 0 ms, propose latency: 0 ms 2025-05-07T09:03:05.846814Z node 2 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-05-07T09:03:05.846895Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:05.847841Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:03:05.849259Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-05-07T09:03:05.849315Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-05-07T09:03:05.849800Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-05-07T09:03:05.853814Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:733:2615], serverId# [2:734:2616], sessionId# [0:0:0] 2025-05-07T09:03:05.853955Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-05-07T09:03:05.875047Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-05-07T09:03:05.875117Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:05.875386Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [2:733:2615], serverId# [2:734:2616], sessionId# [0:0:0] 2025-05-07T09:03:05.877145Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:739:2621], serverId# [2:740:2622], sessionId# [0:0:0] 2025-05-07T09:03:05.877310Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-05-07T09:03:05.877496Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-05-07T09:03:05.877536Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:05.877713Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [2:739:2621], serverId# [2:740:2622], sessionId# [0:0:0] 2025-05-07T09:03:05.879158Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:744:2626], serverId# [2:745:2627], sessionId# [0:0:0] 2025-05-07T09:03:05.879277Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-05-07T09:03:05.879434Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-05-07T09:03:05.879489Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:05.879659Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [2:744:2626], serverId# [2:745:2627], sessionId# [0:0:0] 2025-05-07T09:03:05.881088Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:749:2631], serverId# [2:750:2632], sessionId# [0:0:0] 2025-05-07T09:03:05.881218Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-05-07T09:03:05.881364Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-05-07T09:03:05.881405Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:05.881566Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [2:749:2631], serverId# [2:750:2632], sessionId# [0:0:0] 2025-05-07T09:03:05.882857Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:754:2636], serverId# [2:755:2637], sessionId# [0:0:0] 2025-05-07T09:03:05.882959Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-05-07T09:03:05.893391Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-05-07T09:03:05.893460Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:05.893704Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [2:754:2636], serverId# [2:755:2637], sessionId# [0:0:0] 2025-05-07T09:03:05.895254Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:759:2641], serverId# [2:760:2642], sessionId# [0:0:0] 2025-05-07T09:03:05.895418Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-05-07T09:03:05.895572Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-05-07T09:03:05.895608Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:05.895756Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [2:759:2641], serverId# [2:760:2642], sessionId# [0:0:0] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgInt8Milliseconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgTimestamp ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::EraseRowsFromReplicatedTable [GOOD] Test command err: 2025-05-07T09:02:55.069924Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:02:55.070101Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:02:55.070393Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0029b3/r3tmp/tmpJ46g4y/pdisk_1.dat 2025-05-07T09:02:56.438018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:02:56.541220Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:02:56.652329Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:02:56.658239Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:02:56.681855Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:02:56.879639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:02:57.014943Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:665:2569] 2025-05-07T09:02:57.015204Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T09:02:57.083143Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T09:02:57.083283Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T09:02:57.089269Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-05-07T09:02:57.089347Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-05-07T09:02:57.089398Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-05-07T09:02:57.104826Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T09:02:57.105059Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T09:02:57.105159Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:681:2569] in generation 1 2025-05-07T09:02:57.115807Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T09:02:57.139733Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-05-07T09:02:57.143951Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T09:02:57.144099Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:683:2579] 2025-05-07T09:02:57.144131Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:02:57.144161Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-05-07T09:02:57.144199Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:02:57.150750Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T09:02:57.150838Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T09:02:57.150930Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:02:57.150968Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:02:57.151005Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T09:02:57.151060Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:02:57.151174Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:672:2573], sessionId# [0:0:0] 2025-05-07T09:02:57.164190Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T09:02:57.164416Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-05-07T09:02:57.164506Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-05-07T09:02:57.165860Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:02:57.176461Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T09:02:57.186562Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-05-07T09:02:57.334167Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:697:2587], serverId# [1:699:2589], sessionId# [0:0:0] 2025-05-07T09:02:57.342237Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-05-07T09:02:57.342299Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:02:57.342520Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:02:57.342550Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-05-07T09:02:57.342585Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-05-07T09:02:57.342811Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-05-07T09:02:57.342944Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-05-07T09:02:57.343466Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:02:57.343519Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-05-07T09:02:57.350163Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-05-07T09:02:57.364094Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:02:57.366157Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-05-07T09:02:57.366201Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:02:57.366722Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-05-07T09:02:57.366793Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:02:57.367795Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:02:57.367832Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:02:57.367893Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-05-07T09:02:57.367940Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:410:2405], exec latency: 0 ms, propose latency: 0 ms 2025-05-07T09:02:57.368063Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-05-07T09:02:57.368156Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:02:57.371740Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:02:57.373256Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-05-07T09:02:57.373416Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-05-07T09:02:57.373477Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-05-07T09:02:57.520553Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:731:2613], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:57.520697Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:741:2618], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:57.520827Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:57.548596Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474 ... operation [0:281474976715662] at 72075186224037888 for ReadTableScan 2025-05-07T09:03:02.743753Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:03:02.743810Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:02.743861Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:05.462925Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:311:2354], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:03:05.463184Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:03:05.463293Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0029b3/r3tmp/tmpPwsoYR/pdisk_1.dat 2025-05-07T09:03:05.685315Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:03:05.713320Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:05.758785Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:05.758884Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:03:05.770403Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:03:05.848269Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:03:05.867250Z node 2 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [2:664:2568] 2025-05-07T09:03:05.867467Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T09:03:05.902161Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T09:03:05.902264Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T09:03:05.903562Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-05-07T09:03:05.903664Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-05-07T09:03:05.903716Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-05-07T09:03:05.904014Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T09:03:05.904108Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T09:03:05.904167Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [2:680:2568] in generation 1 2025-05-07T09:03:05.914827Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T09:03:05.914928Z node 2 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-05-07T09:03:05.915039Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T09:03:05.915131Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [2:682:2578] 2025-05-07T09:03:05.915172Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:03:05.915208Z node 2 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-05-07T09:03:05.915252Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:05.915612Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T09:03:05.915746Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T09:03:05.915859Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:03:05.915898Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:03:05.915935Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T09:03:05.915975Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:05.916356Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:661:2566], serverId# [2:671:2572], sessionId# [0:0:0] 2025-05-07T09:03:05.916465Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T09:03:05.916648Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-05-07T09:03:05.916718Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-05-07T09:03:05.918486Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:03:05.929265Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T09:03:05.929379Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-05-07T09:03:06.076507Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:697:2587], serverId# [2:699:2589], sessionId# [0:0:0] 2025-05-07T09:03:06.077051Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-05-07T09:03:06.077111Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:06.077289Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:03:06.077331Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-05-07T09:03:06.077382Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-05-07T09:03:06.077627Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-05-07T09:03:06.077725Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-05-07T09:03:06.077847Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:03:06.077907Z node 2 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-05-07T09:03:06.078331Z node 2 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-05-07T09:03:06.078632Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:03:06.080213Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-05-07T09:03:06.080263Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:06.080886Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-05-07T09:03:06.080942Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:06.081810Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:06.081843Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:03:06.081884Z node 2 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-05-07T09:03:06.081941Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [2:409:2404], exec latency: 0 ms, propose latency: 0 ms 2025-05-07T09:03:06.082014Z node 2 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-05-07T09:03:06.082091Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:06.082895Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:03:06.084377Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-05-07T09:03:06.084431Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-05-07T09:03:06.084903Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-05-07T09:03:06.089075Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:733:2615], serverId# [2:734:2616], sessionId# [0:0:0] 2025-05-07T09:03:06.089163Z node 2 :TX_DATASHARD NOTICE: datashard__op_rows.cpp:168: Rejecting erase request on datashard: tablet# 72075186224037888, error# Can't execute erase at replicated table 2025-05-07T09:03:06.089293Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [2:733:2615], serverId# [2:734:2616], sessionId# [0:0:0] |92.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_0__SYNC-pk_types4-all_types4-index4---SYNC] [GOOD] >> DistributedEraseTests::ConditionalEraseRowsShouldFailOnVariousErrors [GOOD] >> DistributedEraseTests::ConditionalEraseRowsShouldFailOnSplit >> TSequence::CreateTableWithDefaultFromSequence [GOOD] >> TSequence::CreateTableWithDefaultFromSequenceAndIndex ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnTimestamp64 [GOOD] Test command err: 2025-05-07T09:02:55.070094Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:02:55.070323Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:02:55.070644Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00293c/r3tmp/tmpAgtOx5/pdisk_1.dat 2025-05-07T09:02:56.443018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:02:56.541281Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:02:56.652231Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:02:56.658067Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:02:56.681670Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:02:56.879645Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:02:57.014990Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:665:2569] 2025-05-07T09:02:57.015240Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T09:02:57.078609Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T09:02:57.078693Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T09:02:57.089065Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-05-07T09:02:57.089147Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-05-07T09:02:57.089199Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-05-07T09:02:57.104769Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T09:02:57.104959Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T09:02:57.105041Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:681:2569] in generation 1 2025-05-07T09:02:57.115656Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T09:02:57.140896Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-05-07T09:02:57.144092Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T09:02:57.144242Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:683:2579] 2025-05-07T09:02:57.144283Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:02:57.144313Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-05-07T09:02:57.144348Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:02:57.150740Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T09:02:57.150840Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T09:02:57.150926Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:02:57.150962Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:02:57.151023Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T09:02:57.151078Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:02:57.151197Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:672:2573], sessionId# [0:0:0] 2025-05-07T09:02:57.164282Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T09:02:57.164559Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-05-07T09:02:57.164667Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-05-07T09:02:57.166463Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:02:57.177080Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T09:02:57.186621Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-05-07T09:02:57.334500Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:697:2587], serverId# [1:699:2589], sessionId# [0:0:0] 2025-05-07T09:02:57.338776Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-05-07T09:02:57.338851Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:02:57.339106Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:02:57.339147Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-05-07T09:02:57.339203Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-05-07T09:02:57.339426Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-05-07T09:02:57.339578Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-05-07T09:02:57.340132Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:02:57.340209Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-05-07T09:02:57.350163Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-05-07T09:02:57.364171Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:02:57.366301Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-05-07T09:02:57.366364Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:02:57.366971Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-05-07T09:02:57.367041Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:02:57.368034Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:02:57.368080Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:02:57.368126Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-05-07T09:02:57.368188Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:410:2405], exec latency: 0 ms, propose latency: 0 ms 2025-05-07T09:02:57.368289Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-05-07T09:02:57.368387Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:02:57.372284Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:02:57.373828Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-05-07T09:02:57.373987Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-05-07T09:02:57.374051Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-05-07T09:02:57.522109Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:731:2613], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:57.522257Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:741:2618], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:57.522387Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:57.548623Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474 ... letID: 72075186224037888 } 2025-05-07T09:03:06.080129Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:06.080295Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:03:06.080329Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-05-07T09:03:06.080358Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-05-07T09:03:06.080539Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-05-07T09:03:06.080646Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-05-07T09:03:06.080783Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:03:06.080841Z node 2 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-05-07T09:03:06.081159Z node 2 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-05-07T09:03:06.081475Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:03:06.082968Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-05-07T09:03:06.083017Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:06.083677Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-05-07T09:03:06.083729Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:06.084479Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:06.084559Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:03:06.084608Z node 2 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-05-07T09:03:06.084661Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [2:409:2404], exec latency: 0 ms, propose latency: 0 ms 2025-05-07T09:03:06.084701Z node 2 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-05-07T09:03:06.084762Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:06.085591Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:03:06.087155Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-05-07T09:03:06.087209Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-05-07T09:03:06.087666Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-05-07T09:03:06.093316Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:731:2613], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:06.093402Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:742:2618], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:06.093461Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:06.097090Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T09:03:06.101288Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:03:06.256222Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:03:06.258611Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:745:2621], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T09:03:06.292250Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:815:2660] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:03:06.370699Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715660. Ctx: { TraceId: 01jtmzqzbc1afqcwjn70q2p9nk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YWI1NWE1NjgtMTNhYTI1OS01ZjgxMDZjLWViNTBhZjc5, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:03:06.375924Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:846:2677], serverId# [2:847:2678], sessionId# [0:0:0] 2025-05-07T09:03:06.376278Z node 2 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:2] at 72075186224037888 2025-05-07T09:03:06.376453Z node 2 :TX_DATASHARD DEBUG: execute_write_unit.cpp:410: Executed write operation for [0:2] at 72075186224037888, row count=5 2025-05-07T09:03:06.387162Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:06.389859Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:854:2684], serverId# [2:855:2685], sessionId# [0:0:0] 2025-05-07T09:03:06.390566Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-05-07T09:03:06.401395Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-05-07T09:03:06.401461Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:06.401665Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-05-07T09:03:06.401701Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4487: Conditional erase complete: cookie: 3, at: 72075186224037888 2025-05-07T09:03:06.401940Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:03:06.402001Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:03:06.402050Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T09:03:06.402105Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:06.402179Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [2:854:2684], serverId# [2:855:2685], sessionId# [0:0:0] 2025-05-07T09:03:06.402973Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T09:03:06.403281Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T09:03:06.403465Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:03:06.403525Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-05-07T09:03:06.403568Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for WaitForStreamClearance 2025-05-07T09:03:06.403761Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-05-07T09:03:06.403817Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:06.404372Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 1 2025-05-07T09:03:06.404602Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715661, Size: 36, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-05-07T09:03:06.404714Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715661, PendingAcks: 0 2025-05-07T09:03:06.404753Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:717: Finish scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 0 2025-05-07T09:03:06.406133Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-05-07T09:03:06.406176Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715661, at: 72075186224037888 2025-05-07T09:03:06.406421Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:03:06.406445Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-05-07T09:03:06.406472Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for ReadTableScan 2025-05-07T09:03:06.406574Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:03:06.406612Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:06.406642Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64NanoSeconds [GOOD] Test command err: 2025-05-07T09:02:55.070040Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:02:55.070293Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:02:55.070623Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0029a5/r3tmp/tmp99Xa2c/pdisk_1.dat 2025-05-07T09:02:56.437503Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:02:56.541296Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:02:56.652320Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:02:56.658223Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:02:56.681654Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:02:56.879663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:02:57.014949Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:665:2569] 2025-05-07T09:02:57.015228Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T09:02:57.090494Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T09:02:57.090612Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T09:02:57.092306Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-05-07T09:02:57.092379Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-05-07T09:02:57.092432Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-05-07T09:02:57.104905Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T09:02:57.105080Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T09:02:57.105202Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:681:2569] in generation 1 2025-05-07T09:02:57.115933Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T09:02:57.156144Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-05-07T09:02:57.156354Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T09:02:57.156476Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:683:2579] 2025-05-07T09:02:57.156519Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:02:57.156550Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-05-07T09:02:57.156584Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:02:57.156994Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T09:02:57.157087Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T09:02:57.157206Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:02:57.157246Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:02:57.157298Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T09:02:57.157355Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:02:57.157485Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:672:2573], sessionId# [0:0:0] 2025-05-07T09:02:57.164270Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T09:02:57.164544Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-05-07T09:02:57.164655Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-05-07T09:02:57.166394Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:02:57.177112Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T09:02:57.186635Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-05-07T09:02:57.334327Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:697:2587], serverId# [1:699:2589], sessionId# [0:0:0] 2025-05-07T09:02:57.338781Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-05-07T09:02:57.338861Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:02:57.339107Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:02:57.339147Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-05-07T09:02:57.339201Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-05-07T09:02:57.339429Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-05-07T09:02:57.339577Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-05-07T09:02:57.340131Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:02:57.340220Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-05-07T09:02:57.350189Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-05-07T09:02:57.364149Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:02:57.366414Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-05-07T09:02:57.366466Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:02:57.367122Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-05-07T09:02:57.367202Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:02:57.368249Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:02:57.368293Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:02:57.368342Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-05-07T09:02:57.368405Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:410:2405], exec latency: 0 ms, propose latency: 0 ms 2025-05-07T09:02:57.368517Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-05-07T09:02:57.368623Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:02:57.372851Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:02:57.374601Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-05-07T09:02:57.374747Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-05-07T09:02:57.374806Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-05-07T09:02:57.520614Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:731:2613], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:57.520752Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:741:2618], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:57.520880Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:57.548660Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474 ... D: 72075186224037888 } 2025-05-07T09:03:06.273038Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:06.273178Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:03:06.273214Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-05-07T09:03:06.273253Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-05-07T09:03:06.273448Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-05-07T09:03:06.273595Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-05-07T09:03:06.273770Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:03:06.273860Z node 2 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-05-07T09:03:06.274262Z node 2 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-05-07T09:03:06.274589Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:03:06.276162Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-05-07T09:03:06.276227Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:06.276880Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-05-07T09:03:06.276940Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:06.277741Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:06.277794Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:03:06.277835Z node 2 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-05-07T09:03:06.277884Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [2:409:2404], exec latency: 0 ms, propose latency: 0 ms 2025-05-07T09:03:06.277926Z node 2 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-05-07T09:03:06.278015Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:06.278844Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:03:06.280488Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-05-07T09:03:06.280543Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-05-07T09:03:06.281030Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-05-07T09:03:06.286974Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:731:2613], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:06.287065Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:742:2618], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:06.287129Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:06.291032Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T09:03:06.295744Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:03:06.453084Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:03:06.456256Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:745:2621], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T09:03:06.490565Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:815:2660] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:03:06.580115Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715660. Ctx: { TraceId: 01jtmzqzhd3ewtaehxbkc00maa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTNhNTJjNjktZTFhYTU4MGQtYzAwNmVkYjctOTgxYzAyYTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:03:06.582670Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:846:2677], serverId# [2:847:2678], sessionId# [0:0:0] 2025-05-07T09:03:06.583107Z node 2 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:2] at 72075186224037888 2025-05-07T09:03:06.583265Z node 2 :TX_DATASHARD DEBUG: execute_write_unit.cpp:410: Executed write operation for [0:2] at 72075186224037888, row count=4 2025-05-07T09:03:06.594193Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:06.596949Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:854:2684], serverId# [2:855:2685], sessionId# [0:0:0] 2025-05-07T09:03:06.597629Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-05-07T09:03:06.608559Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-05-07T09:03:06.608624Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:06.608810Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-05-07T09:03:06.608846Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4487: Conditional erase complete: cookie: 3, at: 72075186224037888 2025-05-07T09:03:06.609038Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:03:06.609074Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:03:06.609113Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T09:03:06.609155Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:06.609217Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [2:854:2684], serverId# [2:855:2685], sessionId# [0:0:0] 2025-05-07T09:03:06.609889Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T09:03:06.610150Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T09:03:06.610278Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:03:06.610324Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-05-07T09:03:06.610368Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for WaitForStreamClearance 2025-05-07T09:03:06.610533Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-05-07T09:03:06.610578Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:06.611007Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 1 2025-05-07T09:03:06.611178Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715661, Size: 36, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-05-07T09:03:06.611265Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715661, PendingAcks: 0 2025-05-07T09:03:06.611310Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:717: Finish scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 0 2025-05-07T09:03:06.612603Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-05-07T09:03:06.612641Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715661, at: 72075186224037888 2025-05-07T09:03:06.612910Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:03:06.612939Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-05-07T09:03:06.612966Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for ReadTableScan 2025-05-07T09:03:06.613055Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:03:06.613086Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:06.613118Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64MicroSeconds [GOOD] Test command err: 2025-05-07T09:02:55.073600Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:02:55.074000Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:02:55.074302Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0029bc/r3tmp/tmp7oTNOr/pdisk_1.dat 2025-05-07T09:02:56.438127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:02:56.541240Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:02:56.652322Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:02:56.658223Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:02:56.681661Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:02:56.879724Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:02:57.014944Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:665:2569] 2025-05-07T09:02:57.015202Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T09:02:57.077527Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T09:02:57.077612Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T09:02:57.089067Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-05-07T09:02:57.089153Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-05-07T09:02:57.089225Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-05-07T09:02:57.104789Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T09:02:57.105020Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T09:02:57.105104Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:681:2569] in generation 1 2025-05-07T09:02:57.115814Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T09:02:57.151922Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-05-07T09:02:57.152145Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T09:02:57.152277Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:683:2579] 2025-05-07T09:02:57.152320Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:02:57.152360Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-05-07T09:02:57.152392Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:02:57.152684Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T09:02:57.152736Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T09:02:57.152797Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:02:57.152823Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:02:57.152860Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T09:02:57.152897Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:02:57.152989Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:672:2573], sessionId# [0:0:0] 2025-05-07T09:02:57.164268Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T09:02:57.164539Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-05-07T09:02:57.164616Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-05-07T09:02:57.166349Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:02:57.176956Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T09:02:57.186552Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-05-07T09:02:57.334645Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:697:2587], serverId# [1:699:2589], sessionId# [0:0:0] 2025-05-07T09:02:57.344475Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-05-07T09:02:57.344535Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:02:57.344748Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:02:57.344780Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-05-07T09:02:57.344813Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-05-07T09:02:57.344979Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-05-07T09:02:57.345088Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-05-07T09:02:57.345574Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:02:57.345630Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-05-07T09:02:57.350170Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-05-07T09:02:57.364123Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:02:57.366212Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-05-07T09:02:57.366264Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:02:57.366894Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-05-07T09:02:57.366975Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:02:57.367959Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:02:57.368002Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:02:57.368044Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-05-07T09:02:57.368102Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:410:2405], exec latency: 0 ms, propose latency: 0 ms 2025-05-07T09:02:57.368202Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-05-07T09:02:57.368303Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:02:57.372052Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:02:57.373621Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-05-07T09:02:57.373752Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-05-07T09:02:57.373804Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-05-07T09:02:57.522073Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:731:2613], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:57.522212Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:741:2618], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:57.522335Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:57.548562Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474 ... D: 72075186224037888 } 2025-05-07T09:03:06.183393Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:06.183557Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:03:06.183600Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-05-07T09:03:06.183650Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-05-07T09:03:06.183886Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-05-07T09:03:06.184080Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-05-07T09:03:06.184266Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:03:06.184342Z node 2 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-05-07T09:03:06.184741Z node 2 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-05-07T09:03:06.185171Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:03:06.187352Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-05-07T09:03:06.187424Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:06.188247Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-05-07T09:03:06.188321Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:06.189406Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:06.189468Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:03:06.189515Z node 2 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-05-07T09:03:06.189573Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [2:409:2404], exec latency: 0 ms, propose latency: 0 ms 2025-05-07T09:03:06.189622Z node 2 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-05-07T09:03:06.189704Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:06.190746Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:03:06.192841Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-05-07T09:03:06.192910Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-05-07T09:03:06.193480Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-05-07T09:03:06.201095Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:731:2613], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:06.201191Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:742:2618], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:06.201294Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:06.206521Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T09:03:06.212499Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:03:06.380739Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:03:06.382932Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:745:2621], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T09:03:06.416904Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:815:2660] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:03:06.470194Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715660. Ctx: { TraceId: 01jtmzqzeqcca6m9nh8em2h3km, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NDcxODk2YzUtYzgzMzNlMTQtOTZmZTcyMDQtOGEyMzlmYzk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:03:06.472000Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:846:2677], serverId# [2:847:2678], sessionId# [0:0:0] 2025-05-07T09:03:06.472298Z node 2 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:2] at 72075186224037888 2025-05-07T09:03:06.472463Z node 2 :TX_DATASHARD DEBUG: execute_write_unit.cpp:410: Executed write operation for [0:2] at 72075186224037888, row count=4 2025-05-07T09:03:06.483272Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:06.486190Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:854:2684], serverId# [2:855:2685], sessionId# [0:0:0] 2025-05-07T09:03:06.486875Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-05-07T09:03:06.497718Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-05-07T09:03:06.497781Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:06.497944Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-05-07T09:03:06.498007Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4487: Conditional erase complete: cookie: 3, at: 72075186224037888 2025-05-07T09:03:06.498210Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:03:06.498247Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:03:06.498284Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T09:03:06.498323Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:06.498377Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [2:854:2684], serverId# [2:855:2685], sessionId# [0:0:0] 2025-05-07T09:03:06.498970Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T09:03:06.499213Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T09:03:06.499340Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:03:06.499369Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-05-07T09:03:06.499402Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for WaitForStreamClearance 2025-05-07T09:03:06.499559Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-05-07T09:03:06.499621Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:06.500118Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 1 2025-05-07T09:03:06.500280Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715661, Size: 36, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-05-07T09:03:06.500366Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715661, PendingAcks: 0 2025-05-07T09:03:06.500402Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:717: Finish scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 0 2025-05-07T09:03:06.501795Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-05-07T09:03:06.501832Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715661, at: 72075186224037888 2025-05-07T09:03:06.502116Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:03:06.502146Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-05-07T09:03:06.502174Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for ReadTableScan 2025-05-07T09:03:06.502259Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:03:06.502292Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:06.502320Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 >> EraseRowsTests::ConditionalEraseRowsShouldFailOnVariousErrors [GOOD] >> YdbLogStore::AlterLogTable [FAIL] >> EraseRowsTests::ConditionalEraseRowsShouldBreakLocks [GOOD] |92.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_index_build_reboots/ydb-core-tx-schemeshard-ut_index_build_reboots |92.3%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_index_build_reboots/ydb-core-tx-schemeshard-ut_index_build_reboots |92.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_index_build_reboots/ydb-core-tx-schemeshard-ut_index_build_reboots >> DistributedEraseTests::ConditionalEraseRowsShouldFailOnSchemeTx [GOOD] >> DistributedEraseTests::ConditionalEraseRowsShouldFailOnDeadShard >> TSequence::CreateTableWithDefaultFromSequenceAndIndex [GOOD] >> YdbOlapStore::LogWithUnionAllAscending [GOOD] >> YdbOlapStore::LogWithUnionAllDescending ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::ConditionalEraseRowsShouldFailOnVariousErrors [GOOD] Test command err: 2025-05-07T09:02:55.073456Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:02:55.073730Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:02:55.074064Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0029ee/r3tmp/tmpzcKkTp/pdisk_1.dat 2025-05-07T09:02:56.437709Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:02:56.541196Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:02:56.652292Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:02:56.658200Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:02:56.681910Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:02:56.879786Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:02:57.015020Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:665:2569] 2025-05-07T09:02:57.015373Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T09:02:57.078100Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T09:02:57.078192Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T09:02:57.089473Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-05-07T09:02:57.089572Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-05-07T09:02:57.089630Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-05-07T09:02:57.104908Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T09:02:57.105086Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T09:02:57.105172Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:681:2569] in generation 1 2025-05-07T09:02:57.115780Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T09:02:57.137355Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-05-07T09:02:57.143960Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T09:02:57.144153Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:683:2579] 2025-05-07T09:02:57.144197Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:02:57.144229Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-05-07T09:02:57.144267Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:02:57.150739Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T09:02:57.150851Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T09:02:57.150941Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:02:57.150974Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:02:57.151016Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T09:02:57.151052Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:02:57.151226Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:672:2573], sessionId# [0:0:0] 2025-05-07T09:02:57.164327Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T09:02:57.164575Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-05-07T09:02:57.164683Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-05-07T09:02:57.166398Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:02:57.177109Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T09:02:57.186639Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-05-07T09:02:57.334641Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:697:2587], serverId# [1:699:2589], sessionId# [0:0:0] 2025-05-07T09:02:57.339240Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-05-07T09:02:57.339313Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:02:57.339564Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:02:57.339608Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-05-07T09:02:57.339664Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-05-07T09:02:57.339882Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-05-07T09:02:57.340033Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-05-07T09:02:57.340605Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:02:57.340691Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-05-07T09:02:57.350211Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-05-07T09:02:57.364191Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:02:57.366459Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-05-07T09:02:57.366523Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:02:57.367172Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-05-07T09:02:57.367255Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:02:57.368406Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:02:57.368451Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:02:57.368496Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-05-07T09:02:57.368567Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:410:2405], exec latency: 0 ms, propose latency: 0 ms 2025-05-07T09:02:57.368622Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-05-07T09:02:57.368729Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:02:57.372917Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:02:57.374582Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-05-07T09:02:57.374755Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-05-07T09:02:57.374811Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-05-07T09:02:57.520668Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:731:2613], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:57.520837Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:741:2618], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:57.520988Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:57.548650Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474 ... main_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037892 2025-05-07T09:03:07.314423Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037893 2025-05-07T09:03:07.314462Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:03:07.351677Z node 2 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037894 actor [2:1249:3028] 2025-05-07T09:03:07.351872Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T09:03:07.359790Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T09:03:07.359954Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T09:03:07.361412Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037894 2025-05-07T09:03:07.361493Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037894 2025-05-07T09:03:07.361540Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037894 2025-05-07T09:03:07.361916Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T09:03:07.362073Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T09:03:07.362144Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037894 persisting started state actor id [2:1266:3028] in generation 1 2025-05-07T09:03:07.383254Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T09:03:07.383349Z node 2 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037894 2025-05-07T09:03:07.383447Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037894 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T09:03:07.383523Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037894, actorId: [2:1268:3038] 2025-05-07T09:03:07.383549Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037894 2025-05-07T09:03:07.383578Z node 2 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037894, state: WaitScheme 2025-05-07T09:03:07.383607Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037894 2025-05-07T09:03:07.383981Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037894 2025-05-07T09:03:07.384083Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037894 2025-05-07T09:03:07.384171Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037894 2025-05-07T09:03:07.384204Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037894 active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:03:07.384240Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037894 TxInFly 0 2025-05-07T09:03:07.384273Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037894 2025-05-07T09:03:07.384352Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037894, clientId# [2:1247:3026], serverId# [2:1255:3030], sessionId# [0:0:0] 2025-05-07T09:03:07.384731Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037894 2025-05-07T09:03:07.384942Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037894 txId 281474976715663 ssId 72057594046644480 seqNo 2:7 2025-05-07T09:03:07.385044Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715663 at tablet 72075186224037894 2025-05-07T09:03:07.385489Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037894 2025-05-07T09:03:07.396191Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037894 2025-05-07T09:03:07.396278Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037894 not sending time cast registration request in state WaitScheme 2025-05-07T09:03:07.533813Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037894, clientId# [2:1274:3044], serverId# [2:1276:3046], sessionId# [0:0:0] 2025-05-07T09:03:07.534261Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715663 at step 4000 at tablet 72075186224037894 { Transactions { TxId: 281474976715663 AckTo { RawX1: 0 RawX2: 0 } } Step: 4000 MediatorID: 72057594046382081 TabletID: 72075186224037894 } 2025-05-07T09:03:07.534307Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037894 2025-05-07T09:03:07.534687Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037894 2025-05-07T09:03:07.534729Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037894 active 0 active planned 0 immediate 0 planned 1 2025-05-07T09:03:07.534771Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [4000:281474976715663] in PlanQueue unit at 72075186224037894 2025-05-07T09:03:07.535012Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037894 loaded tx from db 4000:281474976715663 keys extracted: 0 2025-05-07T09:03:07.535148Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037894 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-05-07T09:03:07.535820Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037894 2025-05-07T09:03:07.535888Z node 2 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037894 tableId# [OwnerId: 72057594046644480, LocalPathId: 8] schema version# 1 2025-05-07T09:03:07.536249Z node 2 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037894 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-05-07T09:03:07.536618Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037894 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:03:07.538411Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037894 time 3500 2025-05-07T09:03:07.538454Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037894 2025-05-07T09:03:07.538993Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037894 step# 4000} 2025-05-07T09:03:07.539051Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037894 2025-05-07T09:03:07.540578Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037894 2025-05-07T09:03:07.540621Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037894 2025-05-07T09:03:07.540659Z node 2 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037894 2025-05-07T09:03:07.540742Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [4000 : 281474976715663] from 72075186224037894 at tablet 72075186224037894 send result to client [2:409:2404], exec latency: 0 ms, propose latency: 0 ms 2025-05-07T09:03:07.540787Z node 2 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037894 Sending notify to schemeshard 72057594046644480 txId 281474976715663 state Ready TxInFly 0 2025-05-07T09:03:07.540852Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037894 2025-05-07T09:03:07.541621Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037894 2025-05-07T09:03:07.541696Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-05-07T09:03:07.541933Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037890 2025-05-07T09:03:07.542129Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037891 2025-05-07T09:03:07.542187Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037892 2025-05-07T09:03:07.542243Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037893 2025-05-07T09:03:07.542291Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:03:07.542790Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037894 coordinator 72057594046316545 last step 0 next step 4000 2025-05-07T09:03:07.543673Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715663 datashard 72075186224037894 state Ready 2025-05-07T09:03:07.543724Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037894 Got TEvSchemaChangedResult from SS at 72075186224037894 2025-05-07T09:03:07.548353Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037894, clientId# [2:1303:3067], serverId# [2:1304:3068], sessionId# [0:0:0] 2025-05-07T09:03:07.548553Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037894, clientId# [2:1303:3067], serverId# [2:1304:3068], sessionId# [0:0:0] 2025-05-07T09:03:07.549856Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037894, clientId# [2:1308:3072], serverId# [2:1309:3073], sessionId# [0:0:0] 2025-05-07T09:03:07.550074Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037894, clientId# [2:1308:3072], serverId# [2:1309:3073], sessionId# [0:0:0] 2025-05-07T09:03:07.551645Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037894, clientId# [2:1313:3077], serverId# [2:1314:3078], sessionId# [0:0:0] 2025-05-07T09:03:07.551872Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037894, clientId# [2:1313:3077], serverId# [2:1314:3078], sessionId# [0:0:0] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::ConditionalEraseRowsShouldBreakLocks [GOOD] Test command err: 2025-05-07T09:02:55.069612Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:02:55.069791Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:02:55.070068Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0029cc/r3tmp/tmpZ12NU7/pdisk_1.dat 2025-05-07T09:02:56.441791Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:02:56.541233Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:02:56.652259Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:02:56.658213Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:02:56.681823Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:02:56.879623Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:02:57.014991Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:687:2585] 2025-05-07T09:02:57.015208Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T09:02:57.077952Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T09:02:57.078123Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T09:02:57.089041Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-05-07T09:02:57.089145Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-05-07T09:02:57.089207Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-05-07T09:02:57.104768Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T09:02:57.105087Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T09:02:57.105151Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:712:2585] in generation 1 2025-05-07T09:02:57.106622Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:689:2587] 2025-05-07T09:02:57.106747Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T09:02:57.113165Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037890 actor [1:693:2589] 2025-05-07T09:02:57.113304Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T09:02:57.119861Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T09:02:57.119944Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T09:02:57.121075Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-05-07T09:02:57.121134Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-05-07T09:02:57.121171Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-05-07T09:02:57.121399Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T09:02:57.121476Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T09:02:57.121525Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:736:2587] in generation 1 2025-05-07T09:02:57.121850Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T09:02:57.121915Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T09:02:57.122968Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037890 2025-05-07T09:02:57.123009Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037890 2025-05-07T09:02:57.123039Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037890 2025-05-07T09:02:57.123237Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T09:02:57.123306Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T09:02:57.123340Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037890 persisting started state actor id [1:737:2589] in generation 1 2025-05-07T09:02:57.134069Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T09:02:57.155896Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-05-07T09:02:57.156093Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T09:02:57.156193Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:741:2615] 2025-05-07T09:02:57.156253Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:02:57.156287Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-05-07T09:02:57.156335Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:02:57.156801Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T09:02:57.156844Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-05-07T09:02:57.156898Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T09:02:57.156971Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:742:2616] 2025-05-07T09:02:57.156993Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-05-07T09:02:57.157009Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-05-07T09:02:57.157036Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-05-07T09:02:57.157085Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T09:02:57.157104Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037890 2025-05-07T09:02:57.157135Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037890 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T09:02:57.157168Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037890, actorId: [1:743:2617] 2025-05-07T09:02:57.157181Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037890 2025-05-07T09:02:57.157204Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037890, state: WaitScheme 2025-05-07T09:02:57.157217Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-05-07T09:02:57.157333Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T09:02:57.157401Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T09:02:57.157529Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:02:57.157562Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:02:57.157595Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T09:02:57.157625Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:02:57.157723Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:676:2580], serverId# [1:702:2593], sessionId# [0:0:0] 2025-05-07T09:02:57.157753Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-05-07T09:02:57.157801Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-05-07T09:02:57.157827Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037890 2025-05-07T09:02:57.157855Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037890 2025-05-07T09:02:57.157955Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T09:02:57.164434Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-05-07T09:02:57.164566Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-05-07T09:02:57.165009Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-05-07T09:02:57.165051Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:02:57.165083Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-05-07T09:02:57.165125Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-05-07T09:02:57.165206Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037890 2025-05-07T09:02:57.165230Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:02:57.165254Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186 ... dcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:06.406166Z node 3 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-05-07T09:03:06.406226Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:06.407162Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:06.407195Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:03:06.407250Z node 3 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-05-07T09:03:06.407298Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [3:419:2412], exec latency: 0 ms, propose latency: 0 ms 2025-05-07T09:03:06.407351Z node 3 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-05-07T09:03:06.407416Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:06.407887Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:03:06.409671Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-05-07T09:03:06.409813Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-05-07T09:03:06.409859Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-05-07T09:03:06.416812Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:731:2613], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:06.416907Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:742:2618], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:06.416975Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:06.421812Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T09:03:06.427337Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:03:06.584813Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:03:06.587168Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:745:2621], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T09:03:06.620788Z node 3 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [3:815:2660] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:03:06.673338Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715660. Ctx: { TraceId: 01jtmzqznf46jr0cv23ykcheyp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ODZlZGVjZmQtZTA5MmEzNTYtNmE3OTUyNS04NTI4YjVlNA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:03:06.675476Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:846:2677], serverId# [3:847:2678], sessionId# [0:0:0] 2025-05-07T09:03:06.675805Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:2] at 72075186224037888 2025-05-07T09:03:06.675931Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:410: Executed write operation for [0:2] at 72075186224037888, row count=3 2025-05-07T09:03:06.686709Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:07.526900Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jtmzqzzq5cey1bcy09bjtxbc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZTZjMmIyZGEtZjA5MThlMDgtMmNmN2JiZmUtYTVlYmU3MzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:03:07.539446Z node 3 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:2410: 72075186224037888 Acquired lock# 281474976715661, counter# 0 for [OwnerId: 72057594046644480, LocalPathId: 2] { items { uint64_value: 0 } } 2025-05-07T09:03:07.566105Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:885:2708], serverId# [3:886:2709], sessionId# [0:0:0] 2025-05-07T09:03:07.567290Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-05-07T09:03:07.578857Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-05-07T09:03:07.578963Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:07.579035Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2560: Waiting for PlanStep# 1501 from mediator time cast 2025-05-07T09:03:07.579878Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3780: Notified by mediator time cast with PlanStep# 1501 at tablet 72075186224037888 2025-05-07T09:03:07.579962Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:07.580155Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-05-07T09:03:07.580227Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4487: Conditional erase complete: cookie: 4, at: 72075186224037888 2025-05-07T09:03:07.580612Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:03:07.580670Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:03:07.580729Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T09:03:07.580797Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:07.580909Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [3:885:2708], serverId# [3:886:2709], sessionId# [0:0:0] 2025-05-07T09:03:07.638475Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jtmzr0sx7sccajtcrstk0s7w, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZTZjMmIyZGEtZjA5MThlMDgtMmNmN2JiZmUtYTVlYmU3MzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:03:07.640562Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:6] at 72075186224037888 2025-05-07T09:03:07.640696Z node 3 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=Operation is aborting because locks are not valid;tx_id=6; 2025-05-07T09:03:07.646730Z node 3 :TX_DATASHARD INFO: datashard_write_operation.cpp:684: Write transaction 6 at 72075186224037888 has an error: Operation is aborting because locks are not valid 2025-05-07T09:03:07.646937Z node 3 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 6 at tablet 72075186224037888 errors: Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because locks are not valid" issue_code: 2001 severity: 1 } 2025-05-07T09:03:07.647105Z node 3 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 6 at tablet 72075186224037888 Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because locks are not valid" issue_code: 2001 severity: 1 } 2025-05-07T09:03:07.647175Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:07.647387Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:748: SelfId: [3:907:2683], Table: `/Root/table-1` ([72057594046644480:2:1]), SessionActorId: [3:853:2683]Got LOCKS BROKEN for table `/Root/table-1`. ShardID=72075186224037888, Sink=[3:907:2683].{
: Error: Operation is aborting because locks are not valid, code: 2001 } 2025-05-07T09:03:07.647848Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:2833: SelfId: [3:900:2683], SessionActorId: [3:853:2683], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/table-1`., code: 2001
: Error: Operation is aborting because locks are not valid, code: 2001 . sessionActorId=[3:853:2683]. isRollback=0 2025-05-07T09:03:07.648215Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:1840: SessionId: ydb://session/3?node_id=3&id=ZTZjMmIyZGEtZjA5MThlMDgtMmNmN2JiZmUtYTVlYmU3MzI=, ActorId: [3:853:2683], ActorState: ExecuteState, TraceId: 01jtmzr0sx7sccajtcrstk0s7w, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [3:901:2683] from: [3:900:2683] 2025-05-07T09:03:07.648355Z node 3 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1944: ActorId: [3:901:2683] TxId: 281474976715662. Ctx: { TraceId: 01jtmzr0sx7sccajtcrstk0s7w, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZTZjMmIyZGEtZjA5MThlMDgtMmNmN2JiZmUtYTVlYmU3MzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/table-1`., code: 2001 subissue: {
: Error: Operation is aborting because locks are not valid, code: 2001 } } 2025-05-07T09:03:07.648559Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=3&id=ZTZjMmIyZGEtZjA5MThlMDgtMmNmN2JiZmUtYTVlYmU3MzI=, ActorId: [3:853:2683], ActorState: ExecuteState, TraceId: 01jtmzr0sx7sccajtcrstk0s7w, Create QueryResponse for error on request, msg: 2025-05-07T09:03:07.649208Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:7] at 72075186224037888 2025-05-07T09:03:07.649255Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:414: Skip empty write operation for [0:7] at 72075186224037888 2025-05-07T09:03:07.649375Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_sequence/unittest >> TSequence::CreateTableWithDefaultFromSequenceAndIndex [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:03:01.346639Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:03:01.346752Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:03:01.346791Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:03:01.346827Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:03:01.352660Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:03:01.352724Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:03:01.352809Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:03:01.352890Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:03:01.353545Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:03:01.376069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:03:01.527790Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:03:01.527865Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:01.565863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:03:01.566062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:03:01.566214Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:03:01.590110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:03:01.590429Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:03:01.637768Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:01.638092Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:03:01.677452Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:01.741301Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:03:01.741386Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:01.741455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:03:01.741497Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:03:01.741577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:03:01.748216Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:03:01.754923Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:03:01.906673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:03:01.914169Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:01.935904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:03:01.945616Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:03:01.945754Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:01.961635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:01.961774Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:03:01.961924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:01.962057Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:03:01.962098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:03:01.962135Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:03:01.963564Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:01.963609Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:03:01.963641Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:03:01.964748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:01.964793Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:01.964833Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:01.964868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:03:01.977385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:03:01.979225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:03:01.992661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:03:01.993635Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:01.993755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:03:01.993793Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:02.003408Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:03:02.003491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:02.003691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:03:02.003803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:03:02.006013Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:03:02.006096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:03:02.006290Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:02.006336Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 102 ready parts: 3/4 2025-05-07T09:03:08.340660Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:2 progress is 3/4 2025-05-07T09:03:08.340695Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 3/4 2025-05-07T09:03:08.340730Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 102, ready parts: 3/4, is published: true 2025-05-07T09:03:08.341290Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-05-07T09:03:08.341314Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 102:0 2025-05-07T09:03:08.341361Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [7:340:2318] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 102 at schemeshard: 72057594046678944 2025-05-07T09:03:08.341644Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-05-07T09:03:08.341673Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-05-07T09:03:08.341848Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435072, Sender [7:134:2157], Recipient [7:134:2157]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-05-07T09:03:08.341871Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4857: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-05-07T09:03:08.341902Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T09:03:08.341928Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 102:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:03:08.342125Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-05-07T09:03:08.342201Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-05-07T09:03:08.342220Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 4/4 2025-05-07T09:03:08.342237Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 4/4 2025-05-07T09:03:08.342262Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 4/4 2025-05-07T09:03:08.342279Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 4/4 2025-05-07T09:03:08.342298Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 102, ready parts: 4/4, is published: true 2025-05-07T09:03:08.342348Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [7:411:2369] message: TxId: 102 2025-05-07T09:03:08.342389Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 4/4 2025-05-07T09:03:08.342427Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-05-07T09:03:08.342462Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 102:0 2025-05-07T09:03:08.342568Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T09:03:08.342606Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:1 2025-05-07T09:03:08.342622Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 102:1 2025-05-07T09:03:08.342650Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-05-07T09:03:08.342671Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:2 2025-05-07T09:03:08.342685Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 102:2 2025-05-07T09:03:08.342714Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-05-07T09:03:08.342734Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:3 2025-05-07T09:03:08.342747Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 102:3 2025-05-07T09:03:08.342780Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 1 2025-05-07T09:03:08.343053Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-05-07T09:03:08.343075Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-05-07T09:03:08.343197Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435084, Sender [7:134:2157], Recipient [7:134:2157]: NKikimr::NSchemeShard::TEvPrivate::TEvCleanDroppedPaths 2025-05-07T09:03:08.343225Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5011: StateWork, processing event TEvPrivate::TEvCleanDroppedPaths 2025-05-07T09:03:08.343285Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T09:03:08.343339Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 5], at schemeshard: 72057594046678944 2025-05-07T09:03:08.343405Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T09:03:08.343652Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-05-07T09:03:08.343691Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-05-07T09:03:08.343731Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-05-07T09:03:08.343763Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-05-07T09:03:08.343800Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-05-07T09:03:08.343816Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-05-07T09:03:08.344848Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-05-07T09:03:08.344877Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-05-07T09:03:08.346115Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-05-07T09:03:08.346180Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-05-07T09:03:08.346242Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [7:411:2369] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 102 at schemeshard: 72057594046678944 2025-05-07T09:03:08.346364Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T09:03:08.346404Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [7:516:2467] 2025-05-07T09:03:08.346478Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-05-07T09:03:08.346629Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877764, Sender [7:518:2469], Recipient [7:134:2157]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-05-07T09:03:08.346658Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4938: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-05-07T09:03:08.346680Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5761: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 102 2025-05-07T09:03:08.347036Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122945, Sender [7:595:2546], Recipient [7:134:2157]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/Table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false } 2025-05-07T09:03:08.347081Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4852: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-05-07T09:03:08.347177Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:03:08.347382Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table" took 206us result status StatusPathDoesNotExist 2025-05-07T09:03:08.347514Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table\', error: path has been deleted (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeTable, state: EPathStateNotExist), drop stepId: 5000003, drop txId: 102" Path: "/MyRoot/Table" PathId: 2 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |92.3%| [TA] $(B)/ydb/core/tx/schemeshard/ut_sequence/test-results/unittest/{meta.json ... results_accumulator.log} |92.3%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_sequence/test-results/unittest/{meta.json ... results_accumulator.log} >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDyNumberNanoSeconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgDate [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgTimestamp [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDatetime64 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDyNumberNanoSeconds [GOOD] Test command err: 2025-05-07T09:02:55.070191Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:02:55.070360Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:02:55.070672Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0029ab/r3tmp/tmprLPyvs/pdisk_1.dat 2025-05-07T09:02:56.437573Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:02:56.541273Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:02:56.652355Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:02:56.658248Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:02:56.681890Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:02:56.879822Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:02:57.015028Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:665:2569] 2025-05-07T09:02:57.015322Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T09:02:57.091301Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T09:02:57.091444Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T09:02:57.093249Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-05-07T09:02:57.093346Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-05-07T09:02:57.093400Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-05-07T09:02:57.104928Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T09:02:57.105122Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T09:02:57.105214Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:681:2569] in generation 1 2025-05-07T09:02:57.115969Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T09:02:57.151008Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-05-07T09:02:57.151258Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T09:02:57.151428Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:683:2579] 2025-05-07T09:02:57.151476Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:02:57.151513Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-05-07T09:02:57.151551Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:02:57.151969Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T09:02:57.152055Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T09:02:57.152148Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:02:57.152197Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:02:57.152256Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T09:02:57.152318Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:02:57.152449Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:672:2573], sessionId# [0:0:0] 2025-05-07T09:02:57.164284Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T09:02:57.164565Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-05-07T09:02:57.164653Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-05-07T09:02:57.166464Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:02:57.177201Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T09:02:57.186634Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-05-07T09:02:57.334361Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:697:2587], serverId# [1:699:2589], sessionId# [0:0:0] 2025-05-07T09:02:57.338750Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-05-07T09:02:57.338829Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:02:57.339081Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:02:57.339123Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-05-07T09:02:57.339190Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-05-07T09:02:57.339467Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-05-07T09:02:57.339606Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-05-07T09:02:57.340171Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:02:57.340256Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-05-07T09:02:57.350194Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-05-07T09:02:57.364099Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:02:57.366231Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-05-07T09:02:57.366290Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:02:57.366856Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-05-07T09:02:57.366927Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:02:57.367790Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:02:57.367843Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:02:57.367891Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-05-07T09:02:57.367965Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:410:2405], exec latency: 0 ms, propose latency: 0 ms 2025-05-07T09:02:57.368079Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-05-07T09:02:57.368168Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:02:57.372067Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:02:57.373661Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-05-07T09:02:57.373803Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-05-07T09:02:57.373870Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-05-07T09:02:57.520679Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:731:2613], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:57.520828Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:741:2618], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:57.520971Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:57.548253Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474 ... D: 72075186224037888 } 2025-05-07T09:03:09.567405Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:09.567537Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:03:09.567580Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-05-07T09:03:09.567619Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-05-07T09:03:09.567846Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-05-07T09:03:09.567959Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-05-07T09:03:09.568093Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:03:09.568141Z node 3 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-05-07T09:03:09.568497Z node 3 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-05-07T09:03:09.568852Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:03:09.570680Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-05-07T09:03:09.570725Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:09.571261Z node 3 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-05-07T09:03:09.571332Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:09.572434Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:09.572481Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:03:09.572534Z node 3 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-05-07T09:03:09.572599Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [3:419:2412], exec latency: 0 ms, propose latency: 0 ms 2025-05-07T09:03:09.572652Z node 3 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-05-07T09:03:09.572739Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:09.573353Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:03:09.575730Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-05-07T09:03:09.575931Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-05-07T09:03:09.575995Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-05-07T09:03:09.584857Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:731:2613], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:09.584957Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:742:2618], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:09.585054Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:09.590716Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T09:03:09.596991Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:03:09.757465Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:03:09.760697Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:745:2621], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T09:03:09.794569Z node 3 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [3:815:2660] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:03:09.852654Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715660. Ctx: { TraceId: 01jtmzr2rfccxgrt31xf58acs7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZDNmOThkNWItNjAzMjQ1ZDEtNTAwNjAxMWMtYjQ0YWI4ZmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:03:09.854749Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:846:2677], serverId# [3:847:2678], sessionId# [0:0:0] 2025-05-07T09:03:09.855069Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:2] at 72075186224037888 2025-05-07T09:03:09.855196Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:410: Executed write operation for [0:2] at 72075186224037888, row count=4 2025-05-07T09:03:09.866125Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:09.869425Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:854:2684], serverId# [3:855:2685], sessionId# [0:0:0] 2025-05-07T09:03:09.870266Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-05-07T09:03:09.881378Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-05-07T09:03:09.881467Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:09.881721Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-05-07T09:03:09.881771Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4487: Conditional erase complete: cookie: 3, at: 72075186224037888 2025-05-07T09:03:09.882092Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:03:09.882150Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:03:09.882213Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T09:03:09.882286Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:09.882389Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [3:854:2684], serverId# [3:855:2685], sessionId# [0:0:0] 2025-05-07T09:03:09.883410Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T09:03:09.883781Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T09:03:09.883989Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:03:09.884041Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-05-07T09:03:09.884090Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for WaitForStreamClearance 2025-05-07T09:03:09.884317Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-05-07T09:03:09.884390Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:09.885117Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 1 2025-05-07T09:03:09.885398Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715661, Size: 37, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-05-07T09:03:09.885532Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715661, PendingAcks: 0 2025-05-07T09:03:09.885602Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:717: Finish scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 0 2025-05-07T09:03:09.887291Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-05-07T09:03:09.887363Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715661, at: 72075186224037888 2025-05-07T09:03:09.887770Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:03:09.887813Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-05-07T09:03:09.887852Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for ReadTableScan 2025-05-07T09:03:09.887977Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:03:09.888045Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:09.888105Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgDate [GOOD] Test command err: 2025-05-07T09:02:55.073504Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:02:55.073735Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:02:55.074096Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0029c3/r3tmp/tmp82QM6y/pdisk_1.dat 2025-05-07T09:02:56.441984Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:02:56.541266Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:02:56.652217Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:02:56.658075Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:02:56.681738Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:02:56.879816Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:02:57.014973Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:665:2569] 2025-05-07T09:02:57.015254Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T09:02:57.076948Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T09:02:57.077050Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T09:02:57.089120Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-05-07T09:02:57.089208Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-05-07T09:02:57.089260Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-05-07T09:02:57.104898Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T09:02:57.105087Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T09:02:57.105160Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:681:2569] in generation 1 2025-05-07T09:02:57.115799Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T09:02:57.135305Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-05-07T09:02:57.143950Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T09:02:57.144107Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:683:2579] 2025-05-07T09:02:57.144137Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:02:57.144187Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-05-07T09:02:57.144220Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:02:57.150739Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T09:02:57.150829Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T09:02:57.150893Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:02:57.150932Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:02:57.150975Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T09:02:57.151005Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:02:57.151129Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:672:2573], sessionId# [0:0:0] 2025-05-07T09:02:57.164176Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T09:02:57.164421Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-05-07T09:02:57.164521Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-05-07T09:02:57.165873Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:02:57.176449Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T09:02:57.186529Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-05-07T09:02:57.334577Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:697:2587], serverId# [1:699:2589], sessionId# [0:0:0] 2025-05-07T09:02:57.339496Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-05-07T09:02:57.339565Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:02:57.339867Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:02:57.339923Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-05-07T09:02:57.339970Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-05-07T09:02:57.340242Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-05-07T09:02:57.340393Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-05-07T09:02:57.341068Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:02:57.341147Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-05-07T09:02:57.350202Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-05-07T09:02:57.364112Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:02:57.366251Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-05-07T09:02:57.366312Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:02:57.366824Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-05-07T09:02:57.366878Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:02:57.367747Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:02:57.367779Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:02:57.367812Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-05-07T09:02:57.367857Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:410:2405], exec latency: 0 ms, propose latency: 0 ms 2025-05-07T09:02:57.367960Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-05-07T09:02:57.368041Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:02:57.371196Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:02:57.372761Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-05-07T09:02:57.372906Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-05-07T09:02:57.372956Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-05-07T09:02:57.520685Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:731:2613], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:57.520823Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:741:2618], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:57.520967Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:57.548656Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474 ... D: 72075186224037888 } 2025-05-07T09:03:09.675522Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:09.675676Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:03:09.675715Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-05-07T09:03:09.675749Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-05-07T09:03:09.675961Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-05-07T09:03:09.676077Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-05-07T09:03:09.676207Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:03:09.676257Z node 3 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-05-07T09:03:09.676606Z node 3 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-05-07T09:03:09.676929Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:03:09.678623Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-05-07T09:03:09.678682Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:09.679325Z node 3 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-05-07T09:03:09.679387Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:09.680375Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:09.680412Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:03:09.680456Z node 3 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-05-07T09:03:09.680509Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [3:419:2412], exec latency: 0 ms, propose latency: 0 ms 2025-05-07T09:03:09.680552Z node 3 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-05-07T09:03:09.680617Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:09.681099Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:03:09.682814Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-05-07T09:03:09.682958Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-05-07T09:03:09.683006Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-05-07T09:03:09.689551Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:731:2613], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:09.689627Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:742:2618], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:09.689680Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:09.693660Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T09:03:09.697799Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:03:09.855216Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:03:09.858470Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:745:2621], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T09:03:09.893216Z node 3 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [3:815:2660] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:03:10.123067Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715660. Ctx: { TraceId: 01jtmzr2vr8a2n6zfd0f93h32a, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=MjQwMGYyMDgtOTNjNTk0YTAtODQzNjA5ZDAtNzcyNGEzMWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:03:10.133404Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:846:2677], serverId# [3:847:2678], sessionId# [0:0:0] 2025-05-07T09:03:10.133749Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:2] at 72075186224037888 2025-05-07T09:03:10.133922Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:410: Executed write operation for [0:2] at 72075186224037888, row count=5 2025-05-07T09:03:10.144622Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:10.147738Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:854:2684], serverId# [3:855:2685], sessionId# [0:0:0] 2025-05-07T09:03:10.158168Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-05-07T09:03:10.169436Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-05-07T09:03:10.169508Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:10.169700Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-05-07T09:03:10.169738Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4487: Conditional erase complete: cookie: 3, at: 72075186224037888 2025-05-07T09:03:10.170009Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:03:10.170059Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:03:10.170104Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T09:03:10.170149Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:10.170220Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [3:854:2684], serverId# [3:855:2685], sessionId# [0:0:0] 2025-05-07T09:03:10.170930Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T09:03:10.171194Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T09:03:10.171410Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:03:10.171448Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-05-07T09:03:10.171486Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for WaitForStreamClearance 2025-05-07T09:03:10.171668Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-05-07T09:03:10.171718Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:10.172173Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 1 2025-05-07T09:03:10.172471Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715661, Size: 43, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-05-07T09:03:10.172589Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715661, PendingAcks: 0 2025-05-07T09:03:10.172633Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:717: Finish scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 0 2025-05-07T09:03:10.173907Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-05-07T09:03:10.173948Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715661, at: 72075186224037888 2025-05-07T09:03:10.174260Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:03:10.174289Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-05-07T09:03:10.174314Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for ReadTableScan 2025-05-07T09:03:10.174400Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:03:10.174437Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:10.174467Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDatetime64 [GOOD] Test command err: 2025-05-07T09:02:55.068680Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:02:55.068831Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:02:55.069079Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002946/r3tmp/tmpG3L75j/pdisk_1.dat 2025-05-07T09:02:56.437313Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:02:56.541288Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:02:56.652263Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:02:56.658198Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:02:56.681862Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:02:56.879742Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:02:57.014966Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:665:2569] 2025-05-07T09:02:57.015261Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T09:02:57.090184Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T09:02:57.090317Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T09:02:57.092002Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-05-07T09:02:57.092078Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-05-07T09:02:57.092127Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-05-07T09:02:57.104880Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T09:02:57.105072Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T09:02:57.105158Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:681:2569] in generation 1 2025-05-07T09:02:57.115897Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T09:02:57.143861Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-05-07T09:02:57.144029Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T09:02:57.144105Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:683:2579] 2025-05-07T09:02:57.144131Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:02:57.144156Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-05-07T09:02:57.144180Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:02:57.150747Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T09:02:57.150838Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T09:02:57.150899Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:02:57.150927Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:02:57.151014Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T09:02:57.151061Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:02:57.151186Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:672:2573], sessionId# [0:0:0] 2025-05-07T09:02:57.164286Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T09:02:57.164527Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-05-07T09:02:57.164620Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-05-07T09:02:57.166336Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:02:57.177006Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T09:02:57.186615Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-05-07T09:02:57.334067Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:697:2587], serverId# [1:699:2589], sessionId# [0:0:0] 2025-05-07T09:02:57.338240Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-05-07T09:02:57.338317Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:02:57.338552Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:02:57.338595Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-05-07T09:02:57.338635Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-05-07T09:02:57.338857Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-05-07T09:02:57.339010Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-05-07T09:02:57.339626Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:02:57.339698Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-05-07T09:02:57.350160Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-05-07T09:02:57.364113Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:02:57.366337Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-05-07T09:02:57.366398Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:02:57.367047Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-05-07T09:02:57.367134Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:02:57.368145Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:02:57.368200Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:02:57.368252Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-05-07T09:02:57.368325Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:410:2405], exec latency: 0 ms, propose latency: 0 ms 2025-05-07T09:02:57.368432Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-05-07T09:02:57.368526Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:02:57.372002Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:02:57.373318Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-05-07T09:02:57.373442Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-05-07T09:02:57.373493Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-05-07T09:02:57.520578Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:731:2613], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:57.520727Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:741:2618], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:57.520859Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:57.548920Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474 ... D: 72075186224037888 } 2025-05-07T09:03:09.999933Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:10.000119Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:03:10.000171Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-05-07T09:03:10.000221Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-05-07T09:03:10.000456Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-05-07T09:03:10.000600Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-05-07T09:03:10.000782Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:03:10.000854Z node 3 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-05-07T09:03:10.001307Z node 3 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-05-07T09:03:10.001734Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:03:10.004145Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-05-07T09:03:10.004208Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:10.004993Z node 3 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-05-07T09:03:10.005080Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:10.006575Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:10.006630Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:03:10.006683Z node 3 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-05-07T09:03:10.006758Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [3:419:2412], exec latency: 0 ms, propose latency: 0 ms 2025-05-07T09:03:10.006813Z node 3 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-05-07T09:03:10.006906Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:10.007560Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:03:10.010023Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-05-07T09:03:10.010236Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-05-07T09:03:10.010306Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-05-07T09:03:10.019265Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:731:2613], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:10.019391Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:742:2618], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:10.019471Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:10.025154Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T09:03:10.031367Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:03:10.188205Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:03:10.190297Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:745:2621], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T09:03:10.223768Z node 3 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [3:815:2660] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:03:10.282424Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715660. Ctx: { TraceId: 01jtmzr3615gybvj6epxt6v4a8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YzU2MGYxNTEtYTJmNzkxNzItNjc4NjhmZTItODc5YTlhZmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:03:10.284585Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:846:2677], serverId# [3:847:2678], sessionId# [0:0:0] 2025-05-07T09:03:10.284957Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:2] at 72075186224037888 2025-05-07T09:03:10.285091Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:410: Executed write operation for [0:2] at 72075186224037888, row count=5 2025-05-07T09:03:10.295943Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:10.299137Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:854:2684], serverId# [3:855:2685], sessionId# [0:0:0] 2025-05-07T09:03:10.299923Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-05-07T09:03:10.311040Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-05-07T09:03:10.311116Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:10.311337Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-05-07T09:03:10.311377Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4487: Conditional erase complete: cookie: 3, at: 72075186224037888 2025-05-07T09:03:10.311612Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:03:10.311651Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:03:10.311693Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T09:03:10.311747Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:10.311826Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [3:854:2684], serverId# [3:855:2685], sessionId# [0:0:0] 2025-05-07T09:03:10.312575Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T09:03:10.312875Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T09:03:10.313016Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:03:10.313050Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-05-07T09:03:10.313086Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for WaitForStreamClearance 2025-05-07T09:03:10.313316Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-05-07T09:03:10.313380Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:10.313873Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 1 2025-05-07T09:03:10.314085Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715661, Size: 36, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-05-07T09:03:10.314202Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715661, PendingAcks: 0 2025-05-07T09:03:10.314243Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:717: Finish scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 0 2025-05-07T09:03:10.315648Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-05-07T09:03:10.315690Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715661, at: 72075186224037888 2025-05-07T09:03:10.316007Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:03:10.316045Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-05-07T09:03:10.316086Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for ReadTableScan 2025-05-07T09:03:10.316191Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:03:10.316232Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:10.316269Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgTimestamp [GOOD] Test command err: 2025-05-07T09:02:55.069309Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:02:55.069453Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:02:55.069724Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0029d8/r3tmp/tmpTzvZmt/pdisk_1.dat 2025-05-07T09:02:56.437943Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:02:56.541278Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:02:56.652375Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:02:56.658264Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:02:56.681956Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:02:56.879867Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:02:57.014903Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:665:2569] 2025-05-07T09:02:57.015142Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T09:02:57.076085Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T09:02:57.076190Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T09:02:57.089225Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-05-07T09:02:57.089321Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-05-07T09:02:57.089387Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-05-07T09:02:57.104868Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T09:02:57.105169Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T09:02:57.105252Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:681:2569] in generation 1 2025-05-07T09:02:57.115926Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T09:02:57.149863Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-05-07T09:02:57.150100Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T09:02:57.150218Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:683:2579] 2025-05-07T09:02:57.150254Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:02:57.150324Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-05-07T09:02:57.150367Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:02:57.150778Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T09:02:57.150851Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T09:02:57.150915Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:02:57.150945Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:02:57.150983Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T09:02:57.151011Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:02:57.151092Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:672:2573], sessionId# [0:0:0] 2025-05-07T09:02:57.164192Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T09:02:57.164415Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-05-07T09:02:57.164508Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-05-07T09:02:57.165851Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:02:57.176447Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T09:02:57.186530Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-05-07T09:02:57.334302Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:697:2587], serverId# [1:699:2589], sessionId# [0:0:0] 2025-05-07T09:02:57.338786Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-05-07T09:02:57.338853Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:02:57.339136Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:02:57.339183Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-05-07T09:02:57.339242Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-05-07T09:02:57.339521Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-05-07T09:02:57.339659Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-05-07T09:02:57.340273Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:02:57.340356Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-05-07T09:02:57.350178Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-05-07T09:02:57.364145Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:02:57.366211Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-05-07T09:02:57.366302Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:02:57.366887Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-05-07T09:02:57.366955Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:02:57.367925Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:02:57.367962Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:02:57.368006Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-05-07T09:02:57.368060Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:410:2405], exec latency: 0 ms, propose latency: 0 ms 2025-05-07T09:02:57.368154Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-05-07T09:02:57.368229Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:02:57.371605Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:02:57.373096Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-05-07T09:02:57.373218Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-05-07T09:02:57.373261Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-05-07T09:02:57.521996Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:731:2613], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:57.522175Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:741:2618], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:57.522308Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:02:57.548474Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474 ... D: 72075186224037888 } 2025-05-07T09:03:09.998082Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:09.998239Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:03:09.998284Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-05-07T09:03:09.998325Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-05-07T09:03:09.998548Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-05-07T09:03:09.998661Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-05-07T09:03:09.998806Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:03:09.998863Z node 3 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-05-07T09:03:09.999266Z node 3 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-05-07T09:03:09.999613Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:03:10.001322Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-05-07T09:03:10.001369Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:10.001965Z node 3 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-05-07T09:03:10.002059Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:10.003227Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:10.003277Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:03:10.003339Z node 3 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-05-07T09:03:10.003391Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [3:419:2412], exec latency: 0 ms, propose latency: 0 ms 2025-05-07T09:03:10.003428Z node 3 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-05-07T09:03:10.003495Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:10.003933Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:03:10.005547Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-05-07T09:03:10.005676Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-05-07T09:03:10.005732Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-05-07T09:03:10.012596Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:731:2613], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:10.012666Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:742:2618], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:10.012717Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:10.016390Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T09:03:10.021288Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:03:10.178140Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:03:10.180281Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:745:2621], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T09:03:10.213689Z node 3 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [3:815:2660] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:03:10.315766Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715660. Ctx: { TraceId: 01jtmzr35v53agr3w3efv9pmdz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NzRkZGEwYWItMWJlOWUwODctNTZhZWRhMTYtNzg2MDg5YWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:03:10.329656Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:846:2677], serverId# [3:847:2678], sessionId# [0:0:0] 2025-05-07T09:03:10.330160Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:2] at 72075186224037888 2025-05-07T09:03:10.330298Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:410: Executed write operation for [0:2] at 72075186224037888, row count=5 2025-05-07T09:03:10.341143Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:10.344458Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:854:2684], serverId# [3:855:2685], sessionId# [0:0:0] 2025-05-07T09:03:10.345457Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-05-07T09:03:10.356626Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-05-07T09:03:10.356693Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:10.356884Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-05-07T09:03:10.356921Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4487: Conditional erase complete: cookie: 3, at: 72075186224037888 2025-05-07T09:03:10.357156Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:03:10.357200Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:03:10.357242Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T09:03:10.357293Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:10.357360Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [3:854:2684], serverId# [3:855:2685], sessionId# [0:0:0] 2025-05-07T09:03:10.358158Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T09:03:10.358516Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T09:03:10.358763Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:03:10.358802Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-05-07T09:03:10.358840Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for WaitForStreamClearance 2025-05-07T09:03:10.359037Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-05-07T09:03:10.359092Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:10.359617Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 1 2025-05-07T09:03:10.359875Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715661, Size: 48, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-05-07T09:03:10.359986Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715661, PendingAcks: 0 2025-05-07T09:03:10.360030Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:717: Finish scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 0 2025-05-07T09:03:10.361428Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-05-07T09:03:10.361473Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715661, at: 72075186224037888 2025-05-07T09:03:10.361793Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:03:10.361822Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-05-07T09:03:10.361850Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for ReadTableScan 2025-05-07T09:03:10.361943Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:03:10.362007Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:10.362045Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 >> DistributedEraseTests::ConditionalEraseRowsShouldFailOnSplit [GOOD] >> DistributedEraseTests::ConditionalEraseRowsShouldFailOnDeadShard [GOOD] >> YdbOlapStore::DuplicateRows [GOOD] >> YdbOlapStore::LogCountByResource ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> DistributedEraseTests::ConditionalEraseRowsShouldFailOnSplit [GOOD] Test command err: 2025-05-07T09:02:55.069745Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:02:55.069888Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:02:55.070196Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002933/r3tmp/tmpA9ePPh/pdisk_1.dat 2025-05-07T09:02:56.437366Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:02:56.541299Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:02:56.652238Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:02:56.658120Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:02:56.681750Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:02:56.879793Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:02:57.015065Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:687:2585] 2025-05-07T09:02:57.015311Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T09:02:57.090325Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T09:02:57.090552Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T09:02:57.092269Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-05-07T09:02:57.092358Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-05-07T09:02:57.092410Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-05-07T09:02:57.104888Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T09:02:57.105174Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T09:02:57.105257Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:712:2585] in generation 1 2025-05-07T09:02:57.107203Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:689:2587] 2025-05-07T09:02:57.107416Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T09:02:57.116845Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037890 actor [1:693:2589] 2025-05-07T09:02:57.117037Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T09:02:57.125937Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T09:02:57.126093Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T09:02:57.127619Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-05-07T09:02:57.127685Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-05-07T09:02:57.127731Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-05-07T09:02:57.128013Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T09:02:57.128126Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T09:02:57.128182Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:736:2587] in generation 1 2025-05-07T09:02:57.128565Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T09:02:57.128645Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T09:02:57.129878Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037890 2025-05-07T09:02:57.129938Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037890 2025-05-07T09:02:57.130001Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037890 2025-05-07T09:02:57.130335Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T09:02:57.130434Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T09:02:57.130475Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037890 persisting started state actor id [1:737:2589] in generation 1 2025-05-07T09:02:57.141356Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T09:02:57.166649Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-05-07T09:02:57.166819Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T09:02:57.166905Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:741:2615] 2025-05-07T09:02:57.166933Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:02:57.166960Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-05-07T09:02:57.167003Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:02:57.167399Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T09:02:57.167429Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-05-07T09:02:57.167467Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T09:02:57.167513Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:742:2616] 2025-05-07T09:02:57.167530Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-05-07T09:02:57.167546Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-05-07T09:02:57.167562Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-05-07T09:02:57.167599Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T09:02:57.167615Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037890 2025-05-07T09:02:57.167650Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037890 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T09:02:57.167683Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037890, actorId: [1:743:2617] 2025-05-07T09:02:57.167695Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037890 2025-05-07T09:02:57.167719Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037890, state: WaitScheme 2025-05-07T09:02:57.167732Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-05-07T09:02:57.167849Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T09:02:57.167919Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T09:02:57.168048Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:02:57.168076Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:02:57.168110Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T09:02:57.168145Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:02:57.168240Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:676:2580], serverId# [1:702:2593], sessionId# [0:0:0] 2025-05-07T09:02:57.168270Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-05-07T09:02:57.168329Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-05-07T09:02:57.168358Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037890 2025-05-07T09:02:57.168392Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037890 2025-05-07T09:02:57.168485Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T09:02:57.168642Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-05-07T09:02:57.168711Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-05-07T09:02:57.169012Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-05-07T09:02:57.169038Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:02:57.169055Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-05-07T09:02:57.169077Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-05-07T09:02:57.169113Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037890 2025-05-07T09:02:57.169127Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:02:57.169140Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186 ... node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 72075186224037889, table# 7, finished edge# 0, ts 1970-01-01T00:00:00.000000Z 2025-05-07T09:03:11.636918Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 72075186224037889, table# 7, finished edge# 0, front# 0 2025-05-07T09:03:11.638221Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 72075186224037889, table# 8, finished edge# 0, ts 1970-01-01T00:00:00.000000Z 2025-05-07T09:03:11.638249Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 72075186224037889, table# 8, finished edge# 0, front# 0 2025-05-07T09:03:11.638810Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 72075186224037889, table# 1001, finished edge# 0, ts 1970-01-01T00:00:00.000000Z 2025-05-07T09:03:11.638836Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 72075186224037889, table# 1001, finished edge# 0, front# 0 2025-05-07T09:03:11.639185Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:256: 72075186224037889 snapshot complete for split OpId 281474976715663 2025-05-07T09:03:11.639360Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 3 snapshot size is 12 total snapshot size is 12 for split OpId 281474976715663 2025-05-07T09:03:11.639417Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 4 snapshot size is 12 total snapshot size is 24 for split OpId 281474976715663 2025-05-07T09:03:11.639442Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 7 snapshot size is 12 total snapshot size is 36 for split OpId 281474976715663 2025-05-07T09:03:11.639465Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 8 snapshot size is 12 total snapshot size is 48 for split OpId 281474976715663 2025-05-07T09:03:11.639620Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 1001 snapshot size is 146 total snapshot size is 194 for split OpId 281474976715663 2025-05-07T09:03:11.639754Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 3 snapshot size is 12 total snapshot size is 206 for split OpId 281474976715663 2025-05-07T09:03:11.639781Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 4 snapshot size is 12 total snapshot size is 218 for split OpId 281474976715663 2025-05-07T09:03:11.639806Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 7 snapshot size is 12 total snapshot size is 230 for split OpId 281474976715663 2025-05-07T09:03:11.639828Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 8 snapshot size is 12 total snapshot size is 242 for split OpId 281474976715663 2025-05-07T09:03:11.639941Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 1001 snapshot size is 155 total snapshot size is 397 for split OpId 281474976715663 2025-05-07T09:03:11.640393Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:424: 72075186224037889 Sending snapshots from src for split OpId 281474976715663 2025-05-07T09:03:11.640548Z node 3 :TX_DATASHARD DEBUG: datashard_impl.h:2340: Sending snapshot for split opId 281474976715663 from datashard 72075186224037889 to datashard 72075186224037892 size 221 2025-05-07T09:03:11.640635Z node 3 :TX_DATASHARD DEBUG: datashard_impl.h:2340: Sending snapshot for split opId 281474976715663 from datashard 72075186224037889 to datashard 72075186224037891 size 215 2025-05-07T09:03:11.640857Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037891, clientId# [3:1192:2902], serverId# [3:1193:2903], sessionId# [0:0:0] 2025-05-07T09:03:11.640883Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037892, clientId# [3:1191:2901], serverId# [3:1194:2904], sessionId# [0:0:0] 2025-05-07T09:03:11.641035Z node 3 :TX_DATASHARD DEBUG: datashard_split_dst.cpp:175: 72075186224037891 Received snapshot for split/merge TxId 281474976715663 from tabeltId 72075186224037889 2025-05-07T09:03:11.641641Z node 3 :TX_DATASHARD DEBUG: datashard_split_dst.cpp:175: 72075186224037892 Received snapshot for split/merge TxId 281474976715663 from tabeltId 72075186224037889 2025-05-07T09:03:11.642945Z node 3 :TX_DATASHARD DEBUG: datashard_split_dst.cpp:304: 72075186224037891 ack snapshot OpId 281474976715663 2025-05-07T09:03:11.643073Z node 3 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state Ready tabletId 72075186224037891 2025-05-07T09:03:11.643175Z node 3 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037891 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-05-07T09:03:11.643268Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037891 2025-05-07T09:03:11.643351Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037891, actorId: [3:1197:2907] 2025-05-07T09:03:11.643380Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037891 2025-05-07T09:03:11.643417Z node 3 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037891 2025-05-07T09:03:11.643448Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037891 2025-05-07T09:03:11.643569Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:461: 72075186224037889 Received snapshot Ack from dst 72075186224037891 for split OpId 281474976715663 2025-05-07T09:03:11.644093Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037891, clientId# [3:1192:2902], serverId# [3:1193:2903], sessionId# [0:0:0] 2025-05-07T09:03:11.644192Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037891 time 2000 2025-05-07T09:03:11.644224Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037891 2025-05-07T09:03:11.644431Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037891 2025-05-07T09:03:11.644456Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037891 active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:03:11.644479Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037891 TxInFly 0 2025-05-07T09:03:11.644510Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037891 2025-05-07T09:03:11.644553Z node 3 :TX_DATASHARD DEBUG: datashard_split_dst.cpp:304: 72075186224037892 ack snapshot OpId 281474976715663 2025-05-07T09:03:11.644626Z node 3 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state Ready tabletId 72075186224037892 2025-05-07T09:03:11.644691Z node 3 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037892 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-05-07T09:03:11.644737Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037892 2025-05-07T09:03:11.644770Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037892, actorId: [3:1199:2909] 2025-05-07T09:03:11.644786Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037892 2025-05-07T09:03:11.644808Z node 3 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037892 2025-05-07T09:03:11.644825Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037892 2025-05-07T09:03:11.644979Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:461: 72075186224037889 Received snapshot Ack from dst 72075186224037892 for split OpId 281474976715663 2025-05-07T09:03:11.645329Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037892 2025-05-07T09:03:11.645353Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037892 active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:03:11.645381Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037892 TxInFly 0 2025-05-07T09:03:11.645404Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037892 2025-05-07T09:03:11.645607Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037892 time 2000 2025-05-07T09:03:11.645633Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037892 2025-05-07T09:03:11.645694Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037891 coordinator 72057594046316545 last step 1500 next step 2000 2025-05-07T09:03:11.645757Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2812: CheckMediatorStateRestored at 72075186224037891: waitStep# 2000 readStep# 2000 observedStep# 2000 2025-05-07T09:03:11.645834Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037892, clientId# [3:1191:2901], serverId# [3:1194:2904], sessionId# [0:0:0] 2025-05-07T09:03:11.646057Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037892 coordinator 72057594046316545 last step 1500 next step 2000 2025-05-07T09:03:11.646081Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2812: CheckMediatorStateRestored at 72075186224037892: waitStep# 2000 readStep# 2000 observedStep# 2000 2025-05-07T09:03:11.656699Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:485: 72075186224037889 ack split to schemeshard 281474976715663 2025-05-07T09:03:11.659097Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:565: Got TEvSplitPartitioningChanged: opId: 281474976715663, at datashard: 72075186224037889, state: SplitSrcWaitForPartitioningChanged 2025-05-07T09:03:11.661194Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037889 2025-05-07T09:03:11.661255Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4487: Conditional erase complete: cookie: 4, at: 72075186224037889 2025-05-07T09:03:11.661538Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037889, clientId# [3:1081:2821], serverId# [3:1082:2822], sessionId# [0:0:0] 2025-05-07T09:03:11.661628Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-05-07T09:03:11.661660Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:21: Progress tx at non-ready tablet 72075186224037889 state 5 2025-05-07T09:03:11.661792Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:532: 72075186224037889 ack split partitioning changed to schemeshard 281474976715663 2025-05-07T09:03:11.661838Z node 3 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037889 in PreOffline state HasSharedBobs: 1 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-05-07T09:03:11.661876Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 >> TPipeCacheTest::TestIdleRefresh ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> DistributedEraseTests::ConditionalEraseRowsShouldFailOnDeadShard [GOOD] Test command err: 2025-05-07T09:02:55.073457Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:02:55.073731Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:02:55.074063Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0029a2/r3tmp/tmp7ZUSi7/pdisk_1.dat 2025-05-07T09:02:56.437830Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:02:56.541260Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:02:56.652329Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:02:56.658201Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:02:56.681887Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:02:56.879642Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:02:57.015075Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:687:2585] 2025-05-07T09:02:57.015317Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T09:02:57.077061Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T09:02:57.077232Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T09:02:57.089540Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-05-07T09:02:57.089655Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-05-07T09:02:57.089717Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-05-07T09:02:57.104929Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T09:02:57.105220Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T09:02:57.105290Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:712:2585] in generation 1 2025-05-07T09:02:57.107192Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:689:2587] 2025-05-07T09:02:57.107405Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T09:02:57.116956Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037890 actor [1:693:2589] 2025-05-07T09:02:57.117151Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T09:02:57.125930Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T09:02:57.126079Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T09:02:57.127580Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-05-07T09:02:57.127649Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-05-07T09:02:57.127695Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-05-07T09:02:57.127967Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T09:02:57.128089Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T09:02:57.128142Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:736:2587] in generation 1 2025-05-07T09:02:57.128535Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T09:02:57.128617Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T09:02:57.129854Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037890 2025-05-07T09:02:57.129911Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037890 2025-05-07T09:02:57.129961Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037890 2025-05-07T09:02:57.130251Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T09:02:57.130359Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T09:02:57.130418Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037890 persisting started state actor id [1:737:2589] in generation 1 2025-05-07T09:02:57.141242Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T09:02:57.175362Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-05-07T09:02:57.175570Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T09:02:57.175668Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:741:2615] 2025-05-07T09:02:57.175698Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:02:57.175730Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-05-07T09:02:57.175775Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:02:57.176174Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T09:02:57.176205Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-05-07T09:02:57.176242Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T09:02:57.176280Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:742:2616] 2025-05-07T09:02:57.176300Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-05-07T09:02:57.176318Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-05-07T09:02:57.176342Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-05-07T09:02:57.176395Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T09:02:57.176422Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037890 2025-05-07T09:02:57.176463Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037890 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T09:02:57.176513Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037890, actorId: [1:743:2617] 2025-05-07T09:02:57.176528Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037890 2025-05-07T09:02:57.176554Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037890, state: WaitScheme 2025-05-07T09:02:57.176570Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-05-07T09:02:57.176708Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T09:02:57.176785Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T09:02:57.176888Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:02:57.176917Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:02:57.176959Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T09:02:57.176995Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:02:57.177105Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:676:2580], serverId# [1:702:2593], sessionId# [0:0:0] 2025-05-07T09:02:57.177134Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-05-07T09:02:57.177192Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-05-07T09:02:57.177237Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037890 2025-05-07T09:02:57.177270Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037890 2025-05-07T09:02:57.177376Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T09:02:57.177629Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-05-07T09:02:57.177710Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-05-07T09:02:57.178052Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-05-07T09:02:57.178080Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:02:57.178102Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-05-07T09:02:57.178128Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-05-07T09:02:57.178167Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037890 2025-05-07T09:02:57.178183Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:02:57.178198Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186 ... d__readset.cpp:91: TTxReadSet::Complete at 72075186224037888 2025-05-07T09:03:11.867688Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T09:03:11.867776Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2025-05-07T09:03:11.867828Z node 3 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037890 source 72075186224037890 dest 72075186224037888 consumer 72075186224037888 txId 281474976715661 2025-05-07T09:03:11.867886Z node 3 :TX_DATASHARD DEBUG: datashard_distributed_erase.cpp:784: [DistEraser] [3:1087:2826] HandlePropose TEvDataShard::TEvProposeTransactionResult: txId# 281474976715662, shard# 72075186224037888, status# 1 2025-05-07T09:03:11.867949Z node 3 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037889 source 72075186224037889 dest 72075186224037888 consumer 72075186224037888 txId 281474976715661 2025-05-07T09:03:11.867975Z node 3 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037890 2025-05-07T09:03:11.867993Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037890 2025-05-07T09:03:11.868034Z node 3 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037888 source 72075186224037888 dest 72075186224037890 consumer 72075186224037890 txId 281474976715661 2025-05-07T09:03:11.868056Z node 3 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037889 source 72075186224037889 dest 72075186224037890 consumer 72075186224037890 txId 281474976715661 2025-05-07T09:03:11.868078Z node 3 :TX_DATASHARD DEBUG: datashard_distributed_erase.cpp:784: [DistEraser] [3:1087:2826] HandlePropose TEvDataShard::TEvProposeTransactionResult: txId# 281474976715662, shard# 72075186224037889, status# 1 2025-05-07T09:03:11.868099Z node 3 :TX_DATASHARD DEBUG: datashard_distributed_erase.cpp:784: [DistEraser] [3:1087:2826] HandlePropose TEvDataShard::TEvProposeTransactionResult: txId# 281474976715662, shard# 72075186224037890, status# 1 2025-05-07T09:03:11.868120Z node 3 :TX_DATASHARD DEBUG: datashard_distributed_erase.cpp:901: [DistEraser] [3:1087:2826] Register plan: txId# 281474976715662, minStep# 1512, maxStep# 31512 2025-05-07T09:03:11.880083Z node 3 :TX_DATASHARD INFO: datashard.cpp:190: OnDetach: 72075186224037888 2025-05-07T09:03:11.886057Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037888 2025-05-07T09:03:11.887959Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3635: Client pipe to tablet 72075186224037888 from 72075186224037889 is reset 2025-05-07T09:03:11.888004Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3635: Client pipe to tablet 72075186224037888 from 72075186224037890 is reset 2025-05-07T09:03:11.888139Z node 3 :TX_DATASHARD ERROR: datashard_distributed_erase.cpp:167: [DistEraser] [3:1087:2826] Reply: txId# 281474976715662, status# SHARD_UNKNOWN, error# Tx state unknown: reason# lost pipe while waiting for reply (plan), txId# 281474976715662, shard# 72075186224037888 2025-05-07T09:03:11.888750Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037889 2025-05-07T09:03:11.888792Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4487: Conditional erase complete: cookie: 4, at: 72075186224037889 2025-05-07T09:03:11.889176Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-05-07T09:03:11.889214Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 1 2025-05-07T09:03:11.889263Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 1 2025-05-07T09:03:11.889304Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-05-07T09:03:11.889452Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037889, clientId# [3:1081:2821], serverId# [3:1082:2822], sessionId# [0:0:0] 2025-05-07T09:03:11.906169Z node 3 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [3:1098:2836] 2025-05-07T09:03:11.906355Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T09:03:11.909122Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T09:03:11.909829Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T09:03:11.911280Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-05-07T09:03:11.911355Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-05-07T09:03:11.911394Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-05-07T09:03:11.911673Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T09:03:11.911921Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T09:03:11.911963Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [3:1113:2836] in generation 2 2025-05-07T09:03:11.933119Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T09:03:11.933263Z node 3 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state Ready tabletId 72075186224037888 2025-05-07T09:03:11.933381Z node 3 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-05-07T09:03:11.933690Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [3:1116:2844] 2025-05-07T09:03:11.933734Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:03:11.933786Z node 3 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-05-07T09:03:11.933827Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:11.934092Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:711: TxInitSchemaDefaults.Execute 2025-05-07T09:03:11.934278Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:723: TxInitSchemaDefaults.Complete 2025-05-07T09:03:11.935429Z node 3 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T09:03:11.935536Z node 3 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T09:03:11.935666Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 1511 2025-05-07T09:03:11.935712Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:11.935880Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:03:11.935923Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-05-07T09:03:11.935969Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 1 2025-05-07T09:03:11.936019Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:11.936118Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:03:11.936230Z node 3 :TX_DATASHARD DEBUG: datashard__progress_resend_rs.cpp:14: Start TTxProgressResendRS at tablet 72075186224037888 2025-05-07T09:03:11.936274Z node 3 :TX_DATASHARD INFO: datashard.cpp:4101: Resend RS at 72075186224037888 from 72075186224037888 to 72075186224037889 txId 281474976715661 2025-05-07T09:03:11.936314Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3990: Send RS 1 at 72075186224037888 from 72075186224037888 to 72075186224037889 txId 281474976715661 2025-05-07T09:03:11.936488Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037889 source 72075186224037888 dest 72075186224037889 producer 72075186224037888 txId 281474976715661 2025-05-07T09:03:11.936567Z node 3 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037889 got read set: {TEvReadSet step# 1511 txid# 281474976715661 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037888 ReadSet.Size()# 2 Seqno# 1 Flags# 0} 2025-05-07T09:03:11.936621Z node 3 :TX_DATASHARD NOTICE: datashard_pipeline.cpp:734: Outdated readset for 1511:281474976715661 at 72075186224037889 2025-05-07T09:03:11.936668Z node 3 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037889 2025-05-07T09:03:11.936722Z node 3 :TX_DATASHARD DEBUG: datashard__readset.cpp:99: Send RS Ack at 72075186224037889 {TEvReadSet step# 1511 txid# 281474976715661 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037888 ReadSet.Size()# 2 Seqno# 1 Flags# 0} 2025-05-07T09:03:11.936814Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 1500 next step 1511 2025-05-07T09:03:11.936892Z node 3 :TX_DATASHARD DEBUG: datashard__progress_resend_rs.cpp:14: Start TTxProgressResendRS at tablet 72075186224037888 2025-05-07T09:03:11.936918Z node 3 :TX_DATASHARD INFO: datashard.cpp:4101: Resend RS at 72075186224037888 from 72075186224037888 to 72075186224037890 txId 281474976715661 2025-05-07T09:03:11.936944Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3990: Send RS 2 at 72075186224037888 from 72075186224037888 to 72075186224037890 txId 281474976715661 2025-05-07T09:03:11.937155Z node 3 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037888 source 72075186224037888 dest 72075186224037889 consumer 72075186224037889 txId 281474976715661 2025-05-07T09:03:11.937220Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037890 source 72075186224037888 dest 72075186224037890 producer 72075186224037888 txId 281474976715661 2025-05-07T09:03:11.937272Z node 3 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037890 got read set: {TEvReadSet step# 1511 txid# 281474976715661 TabletSource# 72075186224037888 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037888 ReadSet.Size()# 2 Seqno# 2 Flags# 0} 2025-05-07T09:03:11.937304Z node 3 :TX_DATASHARD NOTICE: datashard_pipeline.cpp:734: Outdated readset for 1511:281474976715661 at 72075186224037890 2025-05-07T09:03:11.937335Z node 3 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037890 2025-05-07T09:03:11.937371Z node 3 :TX_DATASHARD DEBUG: datashard__readset.cpp:99: Send RS Ack at 72075186224037890 {TEvReadSet step# 1511 txid# 281474976715661 TabletSource# 72075186224037888 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037888 ReadSet.Size()# 2 Seqno# 2 Flags# 0} 2025-05-07T09:03:11.937436Z node 3 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037888 source 72075186224037888 dest 72075186224037890 consumer 72075186224037890 txId 281474976715661 >> TPipeCacheTest::TestIdleRefresh [GOOD] >> TPipeCacheTest::TestTabletNode >> TPipeCacheTest::TestTabletNode [GOOD] |92.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TPipeCacheTest::TestTabletNode [GOOD] >> TReplicaTest::UpdateWithoutHandshake >> TReplicaTest::HandshakeWithStaleGeneration >> TReplicaTest::CommitWithoutHandshake >> TReplicaTest::Update >> TReplicaTest::Unsubscribe >> TReplicaTest::Merge >> TReplicaTest::Handshake >> DistributedEraseTests::ConditionalEraseRowsShouldNotEraseModifiedRows [GOOD] >> DistributedEraseTests::ConditionalEraseRowsShouldNotFailOnMissingRows |92.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_0__ASYNC-pk_types6-all_types6-index6---ASYNC] [GOOD] |92.3%| [TA] $(B)/ydb/core/tablet/ut/test-results/unittest/{meta.json ... results_accumulator.log} |92.3%| [TA] {RESULT} $(B)/ydb/core/tablet/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TReplicaCombinationTest::UpdatesCombinationsDomainRoot >> TReplicaTest::HandshakeWithStaleGeneration [GOOD] >> TReplicaTest::IdempotencyUpdatesAliveSubscriber >> TReplicaTest::Handshake [GOOD] >> TReplicaTest::DoubleUnsubscribe >> TReplicaTest::CommitWithoutHandshake [GOOD] >> TReplicaTest::CommitWithStaleGeneration >> TReplicaTest::Update [GOOD] >> TReplicaTest::UnsubscribeWithoutSubscribe >> TReplicaTest::Unsubscribe [GOOD] >> TReplicaTest::UnsubscribeUnknownPath >> TReplicaTest::Merge [GOOD] >> TReplicaTest::IdempotencyUpdatesWithoutSubscribers >> TReplicaTest::UpdateWithoutHandshake [GOOD] >> TReplicaTest::UpdateWithStaleGeneration >> TReplicaCombinationTest::UpdatesCombinationsDomainRoot [GOOD] >> TReplicaCombinationTest::UpdatesCombinationsMigratedPath >> TReplicaTest::UnsubscribeWithoutSubscribe [GOOD] >> TReplicaTest::UnsubscribeUnknownPath [GOOD] >> TReplicaTest::IdempotencyUpdatesWithoutSubscribers [GOOD] >> TReplicaTest::StrongNotificationAfterCommit >> TReplicaTest::UpdateWithStaleGeneration [GOOD] >> TReplicaTest::IdempotencyUpdatesAliveSubscriber [GOOD] >> TReplicaTest::IdempotencyUpdatesVariant2 >> TReplicaTest::DoubleUnsubscribe [GOOD] >> TReplicaTest::DoubleDelete >> TReplicaTest::CommitWithStaleGeneration [GOOD] >> TReplicaTest::Delete >> DistributedEraseTests::ConditionalEraseRowsCheckLimits [GOOD] >> DistributedEraseTests::ConditionalEraseRowsAsyncIndex >> TReplicaCombinationTest::UpdatesCombinationsMigratedPath [GOOD] >> TReplicaCombinationTest::MigratedPathRecreation >> TReplicaTest::DoubleDelete [GOOD] >> TReplicaTest::Delete [GOOD] >> TReplicaTest::StrongNotificationAfterCommit [GOOD] >> TReplicaTest::IdempotencyUpdatesVariant2 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_replica/unittest >> TReplicaTest::UnsubscribeWithoutSubscribe [GOOD] Test command err: 2025-05-07T09:03:15.650130Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [1:7:2054] 2025-05-07T09:03:15.650183Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:6:2053] Successful handshake: owner# 1, generation# 1 2025-05-07T09:03:15.691384Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:6:2053] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [1:7:2054], cookie# 0, event size# 72 2025-05-07T09:03:15.691443Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:6:2053] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-05-07T09:03:15.696073Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [1:6:2053] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-05-07T09:03:15.696187Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [1:7:2054] 2025-05-07T09:03:15.701902Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:6:2053] Subscribe: subscriber# [1:7:2054], path# path, domainOwnerId# 0, capabilities# 2025-05-07T09:03:15.702054Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:6:2053] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: path }: sender# [1:7:2054] 2025-05-07T09:03:15.712762Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:6:2053] Unsubscribe: subscriber# [1:7:2054], path# path 2025-05-07T09:03:15.712871Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { PathId: [OwnerId: 1, LocalPathId: 1] DomainOwnerId: 0 }: sender# [1:7:2054] 2025-05-07T09:03:15.712917Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:6:2053] Subscribe: subscriber# [1:7:2054], path# [OwnerId: 1, LocalPathId: 1], domainOwnerId# 0, capabilities# 2025-05-07T09:03:15.713007Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:6:2053] Handle NKikimrSchemeBoard.TEvUnsubscribe { PathId: [OwnerId: 1, LocalPathId: 1] }: sender# [1:7:2054] 2025-05-07T09:03:15.713042Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:6:2053] Unsubscribe: subscriber# [1:7:2054], path# [OwnerId: 1, LocalPathId: 1] 2025-05-07T09:03:15.850759Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [2:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [2:7:2054] 2025-05-07T09:03:15.850824Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [2:6:2053] Successful handshake: owner# 1, generation# 1 2025-05-07T09:03:15.850894Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:6:2053] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:7:2054], cookie# 0, event size# 72 2025-05-07T09:03:15.850938Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:6:2053] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-05-07T09:03:15.850978Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [2:6:2053] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-05-07T09:03:15.851031Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [2:6:2053] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: path }: sender# [2:7:2054] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_replica/unittest >> TReplicaTest::UnsubscribeUnknownPath [GOOD] Test command err: 2025-05-07T09:03:15.672242Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [1:7:2054] 2025-05-07T09:03:15.672308Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:6:2053] Successful handshake: owner# 1, generation# 1 2025-05-07T09:03:15.672394Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [1:8:2055] 2025-05-07T09:03:15.672417Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:6:2053] Upsert description: path# path 2025-05-07T09:03:15.701906Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:6:2053] Subscribe: subscriber# [1:8:2055], path# path, domainOwnerId# 0, capabilities# 2025-05-07T09:03:15.702083Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [1:9:2056] 2025-05-07T09:03:15.702124Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:6:2053] Subscribe: subscriber# [1:9:2056], path# path, domainOwnerId# 0, capabilities# 2025-05-07T09:03:15.702213Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:6:2053] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [1:7:2054], cookie# 0, event size# 72 2025-05-07T09:03:15.702247Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:6:2053] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-05-07T09:03:15.706741Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [1:6:2053] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-05-07T09:03:15.727130Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:6:2053] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: path }: sender# [1:8:2055] 2025-05-07T09:03:15.727184Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:6:2053] Unsubscribe: subscriber# [1:8:2055], path# path 2025-05-07T09:03:15.727274Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:6:2053] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [1:7:2054], cookie# 0, event size# 40 2025-05-07T09:03:15.727318Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:6:2053] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# true 2025-05-07T09:03:15.727343Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:575: [1:6:2053] Delete description: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2025-05-07T09:03:15.851001Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [2:6:2053] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: path }: sender# [2:7:2054] >> TReplicaCombinationTest::MigratedPathRecreation [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_replica/unittest >> TReplicaTest::UpdateWithStaleGeneration [GOOD] Test command err: 2025-05-07T09:03:15.692727Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:6:2053] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [1:7:2054], cookie# 0, event size# 72 2025-05-07T09:03:15.692779Z node 1 :SCHEME_BOARD_REPLICA ERROR: replica.cpp:797: [1:6:2053] Reject update from unknown populator: sender# [1:7:2054], owner# 1, generation# 1 2025-05-07T09:03:15.692852Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [1:7:2054] 2025-05-07T09:03:15.692877Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:6:2053] Upsert description: path# path 2025-05-07T09:03:15.701917Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:6:2053] Subscribe: subscriber# [1:7:2054], path# path, domainOwnerId# 0, capabilities# 2025-05-07T09:03:15.702067Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:6:2053] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: path }: sender# [1:7:2054] 2025-05-07T09:03:15.712769Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:6:2053] Unsubscribe: subscriber# [1:7:2054], path# path 2025-05-07T09:03:15.712873Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { PathId: [OwnerId: 1, LocalPathId: 1] DomainOwnerId: 0 }: sender# [1:7:2054] 2025-05-07T09:03:15.712905Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:6:2053] Upsert description: path# [OwnerId: 1, LocalPathId: 1] 2025-05-07T09:03:15.712957Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:6:2053] Subscribe: subscriber# [1:7:2054], path# [OwnerId: 1, LocalPathId: 1], domainOwnerId# 0, capabilities# 2025-05-07T09:03:15.713027Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:6:2053] Handle NKikimrSchemeBoard.TEvUnsubscribe { PathId: [OwnerId: 1, LocalPathId: 1] }: sender# [1:7:2054] 2025-05-07T09:03:15.713060Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:6:2053] Unsubscribe: subscriber# [1:7:2054], path# [OwnerId: 1, LocalPathId: 1] 2025-05-07T09:03:15.850380Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [2:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [2:7:2054] 2025-05-07T09:03:15.850468Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [2:6:2053] Successful handshake: owner# 1, generation# 1 2025-05-07T09:03:15.850547Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:6:2053] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 0 }: sender# [2:7:2054], cookie# 0, event size# 72 2025-05-07T09:03:15.850574Z node 2 :SCHEME_BOARD_REPLICA ERROR: replica.cpp:805: [2:6:2053] Reject update from stale populator: sender# [2:7:2054], owner# 1, generation# 0, pending generation# 1 2025-05-07T09:03:15.850643Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [2:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [2:7:2054] 2025-05-07T09:03:15.850673Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [2:6:2053] Upsert description: path# path 2025-05-07T09:03:15.850712Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [2:6:2053] Subscribe: subscriber# [2:7:2054], path# path, domainOwnerId# 0, capabilities# 2025-05-07T09:03:15.850770Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [2:6:2053] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: path }: sender# [2:7:2054] 2025-05-07T09:03:15.850798Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [2:6:2053] Unsubscribe: subscriber# [2:7:2054], path# path 2025-05-07T09:03:15.850845Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [2:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { PathId: [OwnerId: 1, LocalPathId: 1] DomainOwnerId: 0 }: sender# [2:7:2054] 2025-05-07T09:03:15.850869Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [2:6:2053] Upsert description: path# [OwnerId: 1, LocalPathId: 1] 2025-05-07T09:03:15.850904Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [2:6:2053] Subscribe: subscriber# [2:7:2054], path# [OwnerId: 1, LocalPathId: 1], domainOwnerId# 0, capabilities# 2025-05-07T09:03:15.850945Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [2:6:2053] Handle NKikimrSchemeBoard.TEvUnsubscribe { PathId: [OwnerId: 1, LocalPathId: 1] }: sender# [2:7:2054] 2025-05-07T09:03:15.850989Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [2:6:2053] Unsubscribe: subscriber# [2:7:2054], path# [OwnerId: 1, LocalPathId: 1] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_replica/unittest >> TReplicaTest::Delete [GOOD] Test command err: 2025-05-07T09:03:15.640657Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [1:6:2053] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 1 Generation: 1 }: sender# [1:7:2054] 2025-05-07T09:03:15.640754Z node 1 :SCHEME_BOARD_REPLICA ERROR: replica.cpp:969: [1:6:2053] Reject commit from unknown populator: sender# [1:7:2054], owner# 1, generation# 1 2025-05-07T09:03:15.648818Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [1:7:2054] 2025-05-07T09:03:15.648875Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:6:2053] Successful handshake: owner# 1, generation# 1 2025-05-07T09:03:15.850139Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [2:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 0 }: sender# [2:7:2054] 2025-05-07T09:03:15.850198Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [2:6:2053] Successful handshake: owner# 1, generation# 0 2025-05-07T09:03:15.850268Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [2:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [2:8:2055] 2025-05-07T09:03:15.850293Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [2:6:2053] Successful handshake: owner# 1, generation# 1 2025-05-07T09:03:15.850366Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [2:6:2053] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 1 Generation: 1 }: sender# [2:8:2055] 2025-05-07T09:03:15.850403Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [2:6:2053] Commit generation: owner# 1, generation# 1 2025-05-07T09:03:15.850445Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [2:6:2053] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 1 Generation: 0 }: sender# [2:7:2054] 2025-05-07T09:03:15.850472Z node 2 :SCHEME_BOARD_REPLICA ERROR: replica.cpp:979: [2:6:2053] Reject commit from stale populator: sender# [2:7:2054], owner# 1, generation# 0, pending generation# 1 2025-05-07T09:03:15.850518Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [2:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 2 }: sender# [2:7:2054] 2025-05-07T09:03:15.850582Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [2:6:2053] Successful handshake: owner# 1, generation# 2 2025-05-07T09:03:16.110374Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [3:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [3:7:2054] 2025-05-07T09:03:16.110421Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [3:6:2053] Successful handshake: owner# 1, generation# 1 2025-05-07T09:03:16.110523Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:6:2053] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:7:2054], cookie# 0, event size# 72 2025-05-07T09:03:16.110554Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:6:2053] Update description: path# path, pathId# [OwnerId: 42, LocalPathId: 1], deletion# false 2025-05-07T09:03:16.115849Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [3:6:2053] Upsert description: path# path, pathId# [OwnerId: 42, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 42, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-05-07T09:03:16.115995Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [3:8:2055] 2025-05-07T09:03:16.116081Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:6:2053] Subscribe: subscriber# [3:8:2055], path# path, domainOwnerId# 0, capabilities# 2025-05-07T09:03:16.116224Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { PathId: [OwnerId: 42, LocalPathId: 1] DomainOwnerId: 0 }: sender# [3:9:2056] 2025-05-07T09:03:16.116277Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:6:2053] Subscribe: subscriber# [3:9:2056], path# [OwnerId: 42, LocalPathId: 1], domainOwnerId# 0, capabilities# 2025-05-07T09:03:16.116373Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:6:2053] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:7:2054], cookie# 0, event size# 40 2025-05-07T09:03:16.116407Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:6:2053] Update description: path# path, pathId# [OwnerId: 42, LocalPathId: 1], deletion# true 2025-05-07T09:03:16.116434Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:575: [3:6:2053] Delete description: path# path, pathId# [OwnerId: 42, LocalPathId: 1] 2025-05-07T09:03:16.116558Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [3:10:2057] 2025-05-07T09:03:16.116595Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:6:2053] Subscribe: subscriber# [3:10:2057], path# path, domainOwnerId# 0, capabilities# 2025-05-07T09:03:16.116676Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { PathId: [OwnerId: 42, LocalPathId: 1] DomainOwnerId: 0 }: sender# [3:11:2058] 2025-05-07T09:03:16.116714Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:6:2053] Subscribe: subscriber# [3:11:2058], path# [OwnerId: 42, LocalPathId: 1], domainOwnerId# 0, capabilities# 2025-05-07T09:03:16.116801Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [3:12:2059] 2025-05-07T09:03:16.116831Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:6:2053] Subscribe: subscriber# [3:12:2059], path# path, domainOwnerId# 0, capabilities# ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_replica/unittest >> TReplicaTest::StrongNotificationAfterCommit [GOOD] Test command err: 2025-05-07T09:03:15.671935Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [1:8:2055] 2025-05-07T09:03:15.672030Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:6:2053] Upsert description: path# path 2025-05-07T09:03:15.701915Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:6:2053] Subscribe: subscriber# [1:8:2055], path# path, domainOwnerId# 0, capabilities# 2025-05-07T09:03:15.702056Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { PathId: [OwnerId: 1, LocalPathId: 1] DomainOwnerId: 0 }: sender# [1:9:2056] 2025-05-07T09:03:15.702094Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:6:2053] Upsert description: path# [OwnerId: 1, LocalPathId: 1] 2025-05-07T09:03:15.702135Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:6:2053] Subscribe: subscriber# [1:9:2056], path# [OwnerId: 1, LocalPathId: 1], domainOwnerId# 0, capabilities# 2025-05-07T09:03:15.702197Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [1:7:2054] 2025-05-07T09:03:15.702225Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:6:2053] Successful handshake: owner# 1, generation# 1 2025-05-07T09:03:15.702297Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:6:2053] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [1:7:2054], cookie# 0, event size# 72 2025-05-07T09:03:15.702329Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:6:2053] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-05-07T09:03:15.707034Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [1:6:2053] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-05-07T09:03:15.727181Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:6:2053] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [1:7:2054], cookie# 0, event size# 40 2025-05-07T09:03:15.727220Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:6:2053] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# true 2025-05-07T09:03:15.727249Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:575: [1:6:2053] Delete description: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2025-05-07T09:03:15.853963Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [2:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [2:7:2054] 2025-05-07T09:03:15.854027Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [2:6:2053] Successful handshake: owner# 1, generation# 1 2025-05-07T09:03:15.854102Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [2:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { PathId: [OwnerId: 1, LocalPathId: 1] DomainOwnerId: 0 }: sender# [2:8:2055] 2025-05-07T09:03:15.854127Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [2:6:2053] Upsert description: path# [OwnerId: 1, LocalPathId: 1] 2025-05-07T09:03:15.854183Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [2:6:2053] Subscribe: subscriber# [2:8:2055], path# [OwnerId: 1, LocalPathId: 1], domainOwnerId# 0, capabilities# 2025-05-07T09:03:15.854254Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:6:2053] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:7:2054], cookie# 0, event size# 72 2025-05-07T09:03:15.854277Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:6:2053] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-05-07T09:03:15.854326Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [2:6:2053] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-05-07T09:03:15.854419Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:6:2053] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:7:2054], cookie# 0, event size# 40 2025-05-07T09:03:15.854442Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:6:2053] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# true 2025-05-07T09:03:15.854461Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:575: [2:6:2053] Delete description: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2025-05-07T09:03:15.854511Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [2:6:2053] Handle NKikimrSchemeBoard.TEvUnsubscribe { PathId: [OwnerId: 1, LocalPathId: 1] }: sender# [2:8:2055] 2025-05-07T09:03:15.854551Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [2:6:2053] Unsubscribe: subscriber# [2:8:2055], path# [OwnerId: 1, LocalPathId: 1] 2025-05-07T09:03:15.854593Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:6:2053] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:7:2054], cookie# 0, event size# 72 2025-05-07T09:03:15.854626Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:6:2053] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-05-07T09:03:15.854648Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:834: [2:6:2053] Path was explicitly deleted, ignoring: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2025-05-07T09:03:15.854686Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:6:2053] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:7:2054], cookie# 0, event size# 72 2025-05-07T09:03:15.854712Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:6:2053] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 2], deletion# false 2025-05-07T09:03:15.854747Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [2:6:2053] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 2], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 2], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-05-07T09:03:15.854817Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [2:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { PathId: [OwnerId: 1, LocalPathId: 2] DomainOwnerId: 0 }: sender# [2:9:2056] 2025-05-07T09:03:15.854847Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [2:6:2053] Subscribe: subscriber# [2:9:2056], path# [OwnerId: 1, LocalPathId: 2], domainOwnerId# 0, capabilities# 2025-05-07T09:03:16.111368Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 1 }: sender# [3:8:2055] 2025-05-07T09:03:16.111405Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [3:6:2053] Upsert description: path# path 2025-05-07T09:03:16.111459Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:6:2053] Subscribe: subscriber# [3:8:2055], path# path, domainOwnerId# 1, capabilities# 2025-05-07T09:03:16.111553Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [3:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [3:7:2054] 2025-05-07T09:03:16.111576Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [3:6:2053] Successful handshake: owner# 1, generation# 1 2025-05-07T09:03:16.111624Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [3:6:2053] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 1 Generation: 1 }: sender# [3:7:2054] 2025-05-07T09:03:16.111651Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [3:6:2053] Commit generation: owner# 1, generation# 1 2025-05-07T09:03:16.111725Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:997: [3:6:2053] Handle NKikimr::NSchemeBoard::TReplica::TEvPrivate::TEvSendStrongNotifications { Owner: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_replica/unittest >> TReplicaTest::IdempotencyUpdatesVariant2 [GOOD] Test command err: 2025-05-07T09:03:15.650319Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 2 }: sender# [1:7:2054] 2025-05-07T09:03:15.650370Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:6:2053] Successful handshake: owner# 1, generation# 2 2025-05-07T09:03:15.650438Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [1:7:2054] 2025-05-07T09:03:15.650478Z node 1 :SCHEME_BOARD_REPLICA ERROR: replica.cpp:763: [1:6:2053] Reject handshake from stale populator: sender# [1:7:2054], owner# 1, generation# 1, pending generation# 2 2025-05-07T09:03:15.855233Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [2:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [2:7:2054] 2025-05-07T09:03:15.855306Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [2:6:2053] Successful handshake: owner# 1, generation# 1 2025-05-07T09:03:15.855409Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [2:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { PathId: [OwnerId: 1, LocalPathId: 1] DomainOwnerId: 0 }: sender# [2:8:2055] 2025-05-07T09:03:15.855454Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [2:6:2053] Upsert description: path# [OwnerId: 1, LocalPathId: 1] 2025-05-07T09:03:15.855538Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [2:6:2053] Subscribe: subscriber# [2:8:2055], path# [OwnerId: 1, LocalPathId: 1], domainOwnerId# 0, capabilities# 2025-05-07T09:03:15.855648Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:6:2053] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:7:2054], cookie# 0, event size# 72 2025-05-07T09:03:15.855673Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:6:2053] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-05-07T09:03:15.860255Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [2:6:2053] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-05-07T09:03:15.860390Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:6:2053] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:7:2054], cookie# 0, event size# 40 2025-05-07T09:03:15.860423Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:6:2053] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# true 2025-05-07T09:03:15.860456Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:575: [2:6:2053] Delete description: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2025-05-07T09:03:15.860575Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:6:2053] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:7:2054], cookie# 0, event size# 72 2025-05-07T09:03:15.860609Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:6:2053] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-05-07T09:03:15.860632Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:834: [2:6:2053] Path was explicitly deleted, ignoring: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2025-05-07T09:03:15.860690Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:6:2053] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:7:2054], cookie# 0, event size# 72 2025-05-07T09:03:15.860722Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:6:2053] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 2], deletion# false 2025-05-07T09:03:15.860756Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [2:6:2053] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 2], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 2], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-05-07T09:03:15.860822Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [2:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { PathId: [OwnerId: 1, LocalPathId: 2] DomainOwnerId: 0 }: sender# [2:9:2056] 2025-05-07T09:03:15.860874Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [2:6:2053] Subscribe: subscriber# [2:9:2056], path# [OwnerId: 1, LocalPathId: 2], domainOwnerId# 0, capabilities# 2025-05-07T09:03:16.105998Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [3:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [3:7:2054] 2025-05-07T09:03:16.106053Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [3:6:2053] Successful handshake: owner# 1, generation# 1 2025-05-07T09:03:16.106146Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:6:2053] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:7:2054], cookie# 0, event size# 72 2025-05-07T09:03:16.106171Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:6:2053] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-05-07T09:03:16.106224Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [3:6:2053] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-05-07T09:03:16.106291Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:6:2053] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:7:2054], cookie# 0, event size# 72 2025-05-07T09:03:16.106325Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:6:2053] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 2], deletion# false 2025-05-07T09:03:16.106348Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:575: [3:6:2053] Delete description: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2025-05-07T09:03:16.106385Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [3:6:2053] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 2], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 2], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-05-07T09:03:16.106432Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:6:2053] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:7:2054], cookie# 0, event size# 40 2025-05-07T09:03:16.106454Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:6:2053] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 2], deletion# true 2025-05-07T09:03:16.106474Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:575: [3:6:2053] Delete description: path# path, pathId# [OwnerId: 1, LocalPathId: 2] 2025-05-07T09:03:16.106514Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:6:2053] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:7:2054], cookie# 0, event size# 72 2025-05-07T09:03:16.106543Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:6:2053] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-05-07T09:03:16.106572Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:834: [3:6:2053] Path was explicitly deleted, ignoring: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2025-05-07T09:03:16.106630Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:6:2053] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:7:2054], cookie# 0, event size# 72 2025-05-07T09:03:16.106662Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:6:2053] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 2], deletion# false 2025-05-07T09:03:16.106687Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:834: [3:6:2053] Path was explicitly deleted, ignoring: path# path, pathId# [OwnerId: 1, LocalPathId: 2] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_replica/unittest >> TReplicaTest::DoubleDelete [GOOD] Test command err: 2025-05-07T09:03:15.651037Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [1:7:2054] 2025-05-07T09:03:15.651128Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:6:2053] Successful handshake: owner# 1, generation# 1 2025-05-07T09:03:15.872113Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [2:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [2:7:2054] 2025-05-07T09:03:15.872177Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [2:6:2053] Successful handshake: owner# 1, generation# 1 2025-05-07T09:03:15.872278Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:6:2053] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:7:2054], cookie# 0, event size# 72 2025-05-07T09:03:15.872311Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:6:2053] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-05-07T09:03:15.876506Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [2:6:2053] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-05-07T09:03:15.876631Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [2:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [2:7:2054] 2025-05-07T09:03:15.876717Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [2:6:2053] Subscribe: subscriber# [2:7:2054], path# path, domainOwnerId# 0, capabilities# 2025-05-07T09:03:15.876814Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [2:6:2053] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: path }: sender# [2:7:2054] 2025-05-07T09:03:15.876857Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [2:6:2053] Unsubscribe: subscriber# [2:7:2054], path# path 2025-05-07T09:03:15.876907Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [2:6:2053] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: path }: sender# [2:7:2054] 2025-05-07T09:03:16.130017Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [3:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [3:7:2054] 2025-05-07T09:03:16.130070Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [3:6:2053] Successful handshake: owner# 1, generation# 1 2025-05-07T09:03:16.130305Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [3:8:2055] 2025-05-07T09:03:16.130336Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [3:6:2053] Upsert description: path# path 2025-05-07T09:03:16.130389Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:6:2053] Subscribe: subscriber# [3:8:2055], path# path, domainOwnerId# 0, capabilities# 2025-05-07T09:03:16.130514Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:6:2053] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:7:2054], cookie# 0, event size# 72 2025-05-07T09:03:16.130555Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:6:2053] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-05-07T09:03:16.130597Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [3:6:2053] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-05-07T09:03:16.130738Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:6:2053] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:7:2054], cookie# 0, event size# 40 2025-05-07T09:03:16.130770Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:6:2053] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# true 2025-05-07T09:03:16.130795Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:575: [3:6:2053] Delete description: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2025-05-07T09:03:16.130903Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [3:9:2056] 2025-05-07T09:03:16.130939Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:6:2053] Subscribe: subscriber# [3:9:2056], path# path, domainOwnerId# 0, capabilities# 2025-05-07T09:03:16.131003Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:6:2053] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:7:2054], cookie# 0, event size# 40 2025-05-07T09:03:16.131037Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:6:2053] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# true ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_replica/unittest >> TReplicaCombinationTest::MigratedPathRecreation [GOOD] Test command err: 2025-05-07T09:03:15.649905Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 800 Generation: 1 }: sender# [1:7:2054] 2025-05-07T09:03:15.649962Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:6:2053] Successful handshake: owner# 800, generation# 1 2025-05-07T09:03:15.650062Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [1:6:2053] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 800 Generation: 1 }: sender# [1:7:2054] 2025-05-07T09:03:15.650101Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [1:6:2053] Commit generation: owner# 800, generation# 1 2025-05-07T09:03:15.650157Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 800 Generation: 1 }: sender# [1:8:2055] 2025-05-07T09:03:15.650178Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:6:2053] Successful handshake: owner# 800, generation# 1 2025-05-07T09:03:15.650230Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [1:6:2053] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 800 Generation: 1 }: sender# [1:8:2055] 2025-05-07T09:03:15.650261Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [1:6:2053] Commit generation: owner# 800, generation# 1 2025-05-07T09:03:15.691381Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:6:2053] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 800 Generation: 1 }: sender# [1:7:2054], cookie# 0, event size# 103 2025-05-07T09:03:15.691442Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:6:2053] Update description: path# /Root/Tenant, pathId# [OwnerId: 800, LocalPathId: 2], deletion# false 2025-05-07T09:03:15.696074Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [1:6:2053] Upsert description: path# /Root/Tenant, pathId# [OwnerId: 800, LocalPathId: 2], pathDescription# {Status StatusSuccess, Path /Root/Tenant, PathId [OwnerId: 800, LocalPathId: 2], PathVersion 1, SubdomainPathId [OwnerId: 800, LocalPathId: 2], PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 60} 2025-05-07T09:03:15.696214Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:6:2053] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 800 Generation: 1 }: sender# [1:8:2055], cookie# 0, event size# 103 2025-05-07T09:03:15.696250Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:6:2053] Update description: path# /Root/Tenant, pathId# [OwnerId: 800, LocalPathId: 2], deletion# false 2025-05-07T09:03:15.696296Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [1:6:2053] Upsert description: path# /Root/Tenant, pathId# [OwnerId: 800, LocalPathId: 2], pathDescription# {Status StatusSuccess, Path /Root/Tenant, PathId [OwnerId: 800, LocalPathId: 2], PathVersion 1, SubdomainPathId [OwnerId: 800, LocalPathId: 2], PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 60} 2025-05-07T09:03:15.696389Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /Root/Tenant DomainOwnerId: 0 }: sender# [1:9:2056] 2025-05-07T09:03:15.701901Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:6:2053] Subscribe: subscriber# [1:9:2056], path# /Root/Tenant, domainOwnerId# 0, capabilities# =========== Path: "/Root/Tenant" PathDescription { Self { PathVersion: 1 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 2 } } } PathId: 2 PathOwnerId: 800 =========== Path: "/Root/Tenant" PathDescription { Self { PathVersion: 1 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 2 } } } PathId: 2 PathOwnerId: 800 2025-05-07T09:03:15.744216Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:10:2057] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 800 Generation: 1 }: sender# [1:11:2058] 2025-05-07T09:03:15.744274Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:10:2057] Successful handshake: owner# 800, generation# 1 2025-05-07T09:03:15.744337Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [1:10:2057] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 800 Generation: 1 }: sender# [1:11:2058] 2025-05-07T09:03:15.744357Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [1:10:2057] Commit generation: owner# 800, generation# 1 2025-05-07T09:03:15.744394Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:10:2057] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 900 Generation: 1 }: sender# [1:12:2059] 2025-05-07T09:03:15.744409Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:10:2057] Successful handshake: owner# 900, generation# 1 2025-05-07T09:03:15.744445Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [1:10:2057] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 900 Generation: 1 }: sender# [1:12:2059] 2025-05-07T09:03:15.744465Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [1:10:2057] Commit generation: owner# 900, generation# 1 2025-05-07T09:03:15.744533Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:10:2057] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 800 Generation: 1 }: sender# [1:11:2058], cookie# 0, event size# 103 2025-05-07T09:03:15.744556Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:10:2057] Update description: path# /Root/Tenant, pathId# [OwnerId: 800, LocalPathId: 2], deletion# false 2025-05-07T09:03:15.744585Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [1:10:2057] Upsert description: path# /Root/Tenant, pathId# [OwnerId: 800, LocalPathId: 2], pathDescription# {Status StatusSuccess, Path /Root/Tenant, PathId [OwnerId: 800, LocalPathId: 2], PathVersion 1, SubdomainPathId [OwnerId: 800, LocalPathId: 2], PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 60} 2025-05-07T09:03:15.744642Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:10:2057] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 900 Generation: 1 }: sender# [1:12:2059], cookie# 0, event size# 103 2025-05-07T09:03:15.744666Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:10:2057] Update description: path# /Root/Tenant, pathId# [OwnerId: 900, LocalPathId: 1], deletion# false 2025-05-07T09:03:15.744719Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:884: [1:10:2057] Replace GSS by TSS description: path# /Root/Tenant, pathId# [OwnerId: 900, LocalPathId: 1], domainId# [OwnerId: 800, LocalPathId: 2], curPathId# [OwnerId: 800, LocalPathId: 2], curDomainId# [OwnerId: 800, LocalPathId: 2] 2025-05-07T09:03:15.744759Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [1:10:2057] Upsert description: path# /Root/Tenant, pathId# [OwnerId: 900, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path /Root/Tenant, PathId [OwnerId: 900, LocalPathId: 1], PathVersion 1, SubdomainPathId [OwnerId: 800, LocalPathId: 2], PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 60} 2025-05-07T09:03:15.744825Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:10:2057] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /Root/Tenant DomainOwnerId: 0 }: sender# [1:13:2060] 2025-05-07T09:03:15.744858Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:10:2057] Subscribe: subscriber# [1:13:2060], path# /Root/Tenant, domainOwnerId# 0, capabilities# =========== Path: "/Root/Tenant" PathDescription { Self { PathVersion: 1 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 2 } } } PathId: 2 PathOwnerId: 800 =========== Path: "/Root/Tenant" PathDescription { Self { PathVersion: 1 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 2 } } } PathId: 1 PathOwnerId: 900 2025-05-07T09:03:15.745066Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:14:2061] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 800 Generation: 1 }: sender# [1:15:2062] 2025-05-07T09:03:15.745091Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:14:2061] Successful handshake: owner# 800, generation# 1 2025-05-07T09:03:15.745142Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [1:14:2061] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 800 Generation: 1 }: sender# [1:15:2062] 2025-05-07T09:03:15.745166Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [1:14:2061] Commit generation: owner# 800, generation# 1 2025-05-07T09:03:15.745205Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:14:2061] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 800 Generation: 1 }: sender# [1:16:2063] 2025-05-07T09:03:15.745219Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:14:2061] Successful handshake: owner# 800, generation# 1 2025-05-07T09:03:15.745254Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [1:14:2061] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 800 Generation: 1 }: sender# [1:16:2063] 2025-05-07T09:03:15.745269Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [1:14:2061] Commit generation: owner# 800, generation# 1 2025-05-07T09:03:15.745305Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:14:2061] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 800 Generation: 1 }: sender# [1:15:2062], cookie# 0, event size# 103 2025-05-07T09:03:15.745323Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:14:2061] Update description: path# /Root/Tenant, pathId# [OwnerId: 800, LocalPathId: 2], deletion# false 2025-05-07T09:03:15.745345Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [1:14:2061] Upsert description: path# /Root/Tenant, pathId# [OwnerId: 800, LocalPathId: 2], pathDescription# {Status StatusSuccess, Path /Root/Tenant, PathId [OwnerId: 800, LocalPathId: 2], PathVersion 1, SubdomainPathId [OwnerId: 800, LocalPathId: 2], PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 60} 2025-05-07T09:03:15.745388Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:14:2061] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 800 Generation: 1 }: sender# [1:16:2063], cookie# 0, event size# 103 2025-05-07T09:03:15.745407Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:14:2061] Update description: path# /Root/Tenant, pathId# [OwnerId: 800, LocalPathId: 2], deletion# false 2025-05-07T09:03:15.745429Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [1:14:2061] Upsert description: path# /Root/Tenant, pathId# [OwnerId: 800, LocalPathId: 2], pathDescription# {Status StatusSuccess, Path /Root/Tenant, PathId [OwnerId: 800, LocalPathId: 2], PathVersion 2, SubdomainPathId [OwnerId: 800, LocalPathId: 2], PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 60} 2025-05-07T09:03:15.745464Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:14:2061] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /Root/Tenant DomainOwnerId: 0 }: sender# [1:17:2064] 2025-05-07T09:03:15.745492Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:14:2061] Subscribe: subscriber# [1:17:2064], path# /Root/Tenant, domainOwnerId# 0, capabilities# =========== Path: "/Root/Tenant" PathDescription { Self { PathVersion: 1 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 2 } } } PathId: 2 PathOwnerId: 800 =========== Path: "/Root/Tenant" PathDescription { Self { PathVersion: 2 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 2 } } } PathId: 2 PathOwnerId: 800 2025-05-07T09:03:15.745690Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:18:2065] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 800 Generation: 1 }: sender# [1:19:2066] 2025-05-07T09:03:15.745708Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:18:2065] Successful handshake: owner# 800, generation# 1 2025-05-07T09:03:15.745737Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [1:18:2065] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 800 Generation: 1 }: sender# [1:19:2066] 2025-05-07T09:03:15.745750Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [1:18:2065] Commit generation: owner# 800, generation# 1 2025-05-07T09:03:15.745773Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:18:2065] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 900 Generat ... { DomainKey { SchemeShard: 800 PathId: 333 } } } PathId: 9 PathOwnerId: 910 =========== super id == DomainId: [OwnerId: 800, LocalPathId: 333] IsDeletion: 1 PathId: [OwnerId: 910, LocalPathId: 9] Verions: 18446744073709551615 =========== WIN ==/Root/Tenant/table_inside PathID: [OwnerId: 0, LocalPathId: 0] deleted: 1 version: 0 domainId: [OwnerId: 0, LocalPathId: 0] 2025-05-07T09:03:16.154349Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [2:398:2445] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 910 Generation: 1 }: sender# [2:399:2446] 2025-05-07T09:03:16.154372Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [2:398:2445] Successful handshake: owner# 910, generation# 1 2025-05-07T09:03:16.154397Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [2:398:2445] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 910 Generation: 1 }: sender# [2:399:2446] 2025-05-07T09:03:16.154410Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [2:398:2445] Commit generation: owner# 910, generation# 1 2025-05-07T09:03:16.154445Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [2:398:2445] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 910 Generation: 1 }: sender# [2:400:2447] 2025-05-07T09:03:16.154459Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [2:398:2445] Successful handshake: owner# 910, generation# 1 2025-05-07T09:03:16.154493Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [2:398:2445] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 910 Generation: 1 }: sender# [2:400:2447] 2025-05-07T09:03:16.154508Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [2:398:2445] Commit generation: owner# 910, generation# 1 2025-05-07T09:03:16.154541Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:398:2445] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 910 Generation: 1 }: sender# [2:399:2446], cookie# 0, event size# 64 2025-05-07T09:03:16.154558Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:398:2445] Update description: path# /Root/Tenant/table_inside, pathId# [OwnerId: 910, LocalPathId: 9], deletion# true 2025-05-07T09:03:16.154571Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [2:398:2445] Upsert description: path# [OwnerId: 910, LocalPathId: 9] 2025-05-07T09:03:16.154614Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:398:2445] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 910 Generation: 1 }: sender# [2:400:2447], cookie# 0, event size# 130 2025-05-07T09:03:16.154638Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:398:2445] Update description: path# /Root/Tenant/table_inside, pathId# [OwnerId: 910, LocalPathId: 9], deletion# false 2025-05-07T09:03:16.154654Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:834: [2:398:2445] Path was explicitly deleted, ignoring: path# /Root/Tenant/table_inside, pathId# [OwnerId: 910, LocalPathId: 9] 2025-05-07T09:03:16.154686Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [2:398:2445] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /Root/Tenant/table_inside DomainOwnerId: 0 }: sender# [2:401:2448] 2025-05-07T09:03:16.154702Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [2:398:2445] Upsert description: path# /Root/Tenant/table_inside 2025-05-07T09:03:16.154737Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [2:398:2445] Subscribe: subscriber# [2:401:2448], path# /Root/Tenant/table_inside, domainOwnerId# 0, capabilities# =========== Left ==Path: "/Root/Tenant/table_inside" PathDescription { Self { PathVersion: 18446744073709551615 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 333 } } } PathId: 9 PathOwnerId: 910 =========== Right ==Path: "/Root/Tenant/table_inside" PathDescription { Self { PathVersion: 2 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 333 } } } PathId: 9 PathOwnerId: 910 =========== super id == DomainId: [OwnerId: 800, LocalPathId: 333] IsDeletion: 1 PathId: [OwnerId: 910, LocalPathId: 9] Verions: 18446744073709551615 =========== WIN ==/Root/Tenant/table_inside PathID: [OwnerId: 0, LocalPathId: 0] deleted: 1 version: 0 domainId: [OwnerId: 0, LocalPathId: 0] 2025-05-07T09:03:16.156123Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [2:402:2449] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 910 Generation: 1 }: sender# [2:403:2450] 2025-05-07T09:03:16.156145Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [2:402:2449] Successful handshake: owner# 910, generation# 1 2025-05-07T09:03:16.156172Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [2:402:2449] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 910 Generation: 1 }: sender# [2:403:2450] 2025-05-07T09:03:16.156195Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [2:402:2449] Commit generation: owner# 910, generation# 1 2025-05-07T09:03:16.156223Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [2:402:2449] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 910 Generation: 1 }: sender# [2:404:2451] 2025-05-07T09:03:16.156242Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [2:402:2449] Successful handshake: owner# 910, generation# 1 2025-05-07T09:03:16.156273Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [2:402:2449] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 910 Generation: 1 }: sender# [2:404:2451] 2025-05-07T09:03:16.156293Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [2:402:2449] Commit generation: owner# 910, generation# 1 2025-05-07T09:03:16.156328Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:402:2449] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 910 Generation: 1 }: sender# [2:403:2450], cookie# 0, event size# 64 2025-05-07T09:03:16.156350Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:402:2449] Update description: path# /Root/Tenant/table_inside, pathId# [OwnerId: 910, LocalPathId: 9], deletion# true 2025-05-07T09:03:16.156366Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [2:402:2449] Upsert description: path# [OwnerId: 910, LocalPathId: 9] 2025-05-07T09:03:16.156413Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:402:2449] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 910 Generation: 1 }: sender# [2:404:2451], cookie# 0, event size# 64 2025-05-07T09:03:16.156426Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:402:2449] Update description: path# /Root/Tenant/table_inside, pathId# [OwnerId: 910, LocalPathId: 9], deletion# true 2025-05-07T09:03:16.156457Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [2:402:2449] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /Root/Tenant/table_inside DomainOwnerId: 0 }: sender# [2:405:2452] 2025-05-07T09:03:16.156482Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [2:402:2449] Upsert description: path# /Root/Tenant/table_inside 2025-05-07T09:03:16.156507Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [2:402:2449] Subscribe: subscriber# [2:405:2452], path# /Root/Tenant/table_inside, domainOwnerId# 0, capabilities# =========== Left ==Path: "/Root/Tenant/table_inside" PathDescription { Self { PathVersion: 18446744073709551615 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 333 } } } PathId: 9 PathOwnerId: 910 =========== Right ==Path: "/Root/Tenant/table_inside" PathDescription { Self { PathVersion: 18446744073709551615 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 333 } } } PathId: 9 PathOwnerId: 910 =========== super id == DomainId: [OwnerId: 800, LocalPathId: 333] IsDeletion: 1 PathId: [OwnerId: 910, LocalPathId: 9] Verions: 18446744073709551615 =========== WIN ==/Root/Tenant/table_inside PathID: [OwnerId: 0, LocalPathId: 0] deleted: 1 version: 0 domainId: [OwnerId: 0, LocalPathId: 0] 2025-05-07T09:03:16.290310Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [3:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 800 Generation: 1 }: sender# [3:7:2054] 2025-05-07T09:03:16.290376Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [3:6:2053] Successful handshake: owner# 800, generation# 1 2025-05-07T09:03:16.290435Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [3:6:2053] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 800 Generation: 1 }: sender# [3:7:2054] 2025-05-07T09:03:16.290468Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [3:6:2053] Commit generation: owner# 800, generation# 1 2025-05-07T09:03:16.290523Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [3:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 900 Generation: 1 }: sender# [3:8:2055] 2025-05-07T09:03:16.290558Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [3:6:2053] Successful handshake: owner# 900, generation# 1 2025-05-07T09:03:16.290601Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [3:6:2053] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 900 Generation: 1 }: sender# [3:8:2055] 2025-05-07T09:03:16.290635Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [3:6:2053] Commit generation: owner# 900, generation# 1 2025-05-07T09:03:16.290711Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:6:2053] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 800 Generation: 1 }: sender# [3:7:2054], cookie# 0, event size# 118 2025-05-07T09:03:16.290738Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:6:2053] Update description: path# /root/db/dir_inside, pathId# [OwnerId: 800, LocalPathId: 1111], deletion# false 2025-05-07T09:03:16.290780Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [3:6:2053] Upsert description: path# /root/db/dir_inside, pathId# [OwnerId: 800, LocalPathId: 1111], pathDescription# {Status StatusSuccess, Path /root/db/dir_inside, PathId [OwnerId: 800, LocalPathId: 1111], PathVersion 1, SubdomainPathId [OwnerId: 800, LocalPathId: 1], PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 67} 2025-05-07T09:03:16.290841Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:6:2053] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 900 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 117 2025-05-07T09:03:16.290879Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:6:2053] Update description: path# /root/db/dir_inside, pathId# [OwnerId: 900, LocalPathId: 11], deletion# false 2025-05-07T09:03:16.290910Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:884: [3:6:2053] Update description by newest path form tenant schemeshard: path# /root/db/dir_inside, pathId# [OwnerId: 900, LocalPathId: 11], domainId# [OwnerId: 800, LocalPathId: 1], curPathId# [OwnerId: 800, LocalPathId: 1111], curDomainId# [OwnerId: 800, LocalPathId: 1] 2025-05-07T09:03:16.290934Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:575: [3:6:2053] Delete description: path# /root/db/dir_inside, pathId# [OwnerId: 800, LocalPathId: 1111] 2025-05-07T09:03:16.290969Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [3:6:2053] Upsert description: path# /root/db/dir_inside, pathId# [OwnerId: 900, LocalPathId: 11], pathDescription# {Status StatusSuccess, Path /root/db/dir_inside, PathId [OwnerId: 900, LocalPathId: 11], PathVersion 1, SubdomainPathId [OwnerId: 800, LocalPathId: 1], PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 67} 2025-05-07T09:03:16.291027Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /root/db/dir_inside DomainOwnerId: 0 }: sender# [3:9:2056] 2025-05-07T09:03:16.291077Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:6:2053] Subscribe: subscriber# [3:9:2056], path# /root/db/dir_inside, domainOwnerId# 0, capabilities# =========== Path: "/root/db/dir_inside" PathDescription { Self { PathVersion: 1 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 1 } } } PathId: 1111 PathOwnerId: 800 =========== Path: "/root/db/dir_inside" PathDescription { Self { PathVersion: 1 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 1 } } } PathId: 11 PathOwnerId: 900 =========== DomainId: [OwnerId: 800, LocalPathId: 1] IsDeletion: 0 PathId: [OwnerId: 900, LocalPathId: 11] Versions: 1 |92.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_all_types-pk_types7-all_types7-index7---] [GOOD] |92.3%| [TA] $(B)/ydb/core/tx/scheme_board/ut_replica/test-results/unittest/{meta.json ... results_accumulator.log} |92.3%| [TA] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_replica/test-results/unittest/{meta.json ... results_accumulator.log} >> DistributedEraseTests::ConditionalEraseRowsShouldNotFailOnMissingRows [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> DistributedEraseTests::ConditionalEraseRowsShouldNotFailOnMissingRows [GOOD] Test command err: 2025-05-07T09:02:55.069575Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:02:55.069715Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:02:55.070007Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0029a8/r3tmp/tmpEoPfzG/pdisk_1.dat 2025-05-07T09:02:56.437833Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:02:56.541255Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:02:56.652276Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:02:56.658201Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:02:56.681756Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:02:56.879718Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:02:57.014974Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:687:2585] 2025-05-07T09:02:57.015233Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T09:02:57.077814Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T09:02:57.078002Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T09:02:57.089021Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-05-07T09:02:57.089120Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-05-07T09:02:57.089184Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-05-07T09:02:57.104841Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T09:02:57.105077Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T09:02:57.105128Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:712:2585] in generation 1 2025-05-07T09:02:57.106577Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:689:2587] 2025-05-07T09:02:57.106723Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T09:02:57.113237Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037890 actor [1:693:2589] 2025-05-07T09:02:57.113373Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T09:02:57.119441Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T09:02:57.119525Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T09:02:57.120553Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-05-07T09:02:57.120596Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-05-07T09:02:57.120628Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-05-07T09:02:57.120886Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T09:02:57.120980Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T09:02:57.121031Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:736:2587] in generation 1 2025-05-07T09:02:57.121447Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T09:02:57.121505Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T09:02:57.122561Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037890 2025-05-07T09:02:57.122639Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037890 2025-05-07T09:02:57.122691Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037890 2025-05-07T09:02:57.123019Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T09:02:57.123130Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T09:02:57.123185Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037890 persisting started state actor id [1:737:2589] in generation 1 2025-05-07T09:02:57.133866Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T09:02:57.158607Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-05-07T09:02:57.158753Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T09:02:57.158828Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:741:2615] 2025-05-07T09:02:57.158867Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:02:57.158894Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-05-07T09:02:57.158950Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:02:57.159289Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T09:02:57.159317Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-05-07T09:02:57.159372Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T09:02:57.159430Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:742:2616] 2025-05-07T09:02:57.159446Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-05-07T09:02:57.159460Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-05-07T09:02:57.159476Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-05-07T09:02:57.159513Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T09:02:57.159537Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037890 2025-05-07T09:02:57.159573Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037890 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T09:02:57.159609Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037890, actorId: [1:743:2617] 2025-05-07T09:02:57.159625Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037890 2025-05-07T09:02:57.159651Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037890, state: WaitScheme 2025-05-07T09:02:57.159672Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-05-07T09:02:57.159830Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T09:02:57.159907Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T09:02:57.160045Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:02:57.160080Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:02:57.160124Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T09:02:57.160174Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:02:57.160293Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:676:2580], serverId# [1:702:2593], sessionId# [0:0:0] 2025-05-07T09:02:57.160325Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-05-07T09:02:57.160377Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-05-07T09:02:57.160421Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037890 2025-05-07T09:02:57.160452Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037890 2025-05-07T09:02:57.160537Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T09:02:57.164440Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-05-07T09:02:57.164573Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-05-07T09:02:57.165029Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-05-07T09:02:57.165069Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:02:57.165098Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-05-07T09:02:57.165136Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-05-07T09:02:57.165205Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037890 2025-05-07T09:02:57.165230Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:02:57.165253Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186 ... 037888 2025-05-07T09:03:18.877539Z node 3 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037889 source 72075186224037889 dest 72075186224037888 consumer 72075186224037888 txId 281474976715662 2025-05-07T09:03:18.877608Z node 3 :TX_DATASHARD DEBUG: datashard_distributed_erase.cpp:978: [DistEraser] [3:1087:2826] HandlePlan TEvDataShard::TEvProposeTransactionResult: txId# 281474976715662, shard# 72075186224037888, status# 2 2025-05-07T09:03:18.877693Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-05-07T09:03:18.877732Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [2000 : 281474976715662] from 72075186224037890 at tablet 72075186224037890 send result to client [3:1087:2826], exec latency: 0 ms, propose latency: 1 ms 2025-05-07T09:03:18.877779Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 72075186224037890 {TEvReadSet step# 2000 txid# 281474976715662 TabletSource# 72075186224037889 TabletDest# 72075186224037890 SetTabletConsumer# 72075186224037890 Flags# 0 Seqno# 6} 2025-05-07T09:03:18.877807Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-05-07T09:03:18.877880Z node 3 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037889 source 72075186224037889 dest 72075186224037890 consumer 72075186224037890 txId 281474976715662 2025-05-07T09:03:18.877913Z node 3 :TX_DATASHARD DEBUG: datashard_distributed_erase.cpp:978: [DistEraser] [3:1087:2826] HandlePlan TEvDataShard::TEvProposeTransactionResult: txId# 281474976715662, shard# 72075186224037890, status# 2 2025-05-07T09:03:18.877953Z node 3 :TX_DATASHARD DEBUG: datashard_distributed_erase.cpp:165: [DistEraser] [3:1087:2826] Reply: txId# 281474976715662, status# OK, error# 2025-05-07T09:03:18.878257Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037889 2025-05-07T09:03:18.878310Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4487: Conditional erase complete: cookie: 4, at: 72075186224037889 2025-05-07T09:03:18.878566Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-05-07T09:03:18.878601Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:03:18.878630Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-05-07T09:03:18.878703Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-05-07T09:03:18.878787Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037889, clientId# [3:1081:2821], serverId# [3:1082:2822], sessionId# [0:0:0] 2025-05-07T09:03:18.879860Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-05-07T09:03:18.880188Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2025-05-07T09:03:18.880369Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-05-07T09:03:18.880413Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 1 active planned 0 immediate 1 planned 0 2025-05-07T09:03:18.880463Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715664] at 72075186224037889 for WaitForStreamClearance 2025-05-07T09:03:18.880728Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-05-07T09:03:18.880792Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-05-07T09:03:18.881401Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037889, TxId: 281474976715664, MessageQuota: 1 2025-05-07T09:03:18.881649Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037889, TxId: 281474976715664, Size: 70, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-05-07T09:03:18.881784Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037889, TxId: 281474976715664, PendingAcks: 0 2025-05-07T09:03:18.881834Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:717: Finish scan ShardId: 72075186224037889, TxId: 281474976715664, MessageQuota: 0 2025-05-07T09:03:18.884383Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037889 2025-05-07T09:03:18.884434Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715664, at: 72075186224037889 2025-05-07T09:03:18.884816Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-05-07T09:03:18.884849Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 1 active planned 0 immediate 1 planned 0 2025-05-07T09:03:18.884879Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715664] at 72075186224037889 for ReadTableScan 2025-05-07T09:03:18.884983Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:03:18.885026Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-05-07T09:03:18.885067Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-05-07T09:03:18.887577Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T09:03:18.887880Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T09:03:18.888064Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:03:18.888109Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-05-07T09:03:18.888161Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715665] at 72075186224037888 for WaitForStreamClearance 2025-05-07T09:03:18.888367Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-05-07T09:03:18.888428Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:18.888959Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715665, MessageQuota: 1 2025-05-07T09:03:18.889193Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715665, Size: 35, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-05-07T09:03:18.889318Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715665, PendingAcks: 0 2025-05-07T09:03:18.889365Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:717: Finish scan ShardId: 72075186224037888, TxId: 281474976715665, MessageQuota: 0 2025-05-07T09:03:18.916680Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-05-07T09:03:18.916759Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715665, at: 72075186224037888 2025-05-07T09:03:18.916960Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:03:18.916998Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-05-07T09:03:18.917036Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715665] at 72075186224037888 for ReadTableScan 2025-05-07T09:03:18.917159Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:03:18.917220Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:03:18.917263Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:03:18.920092Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037890 2025-05-07T09:03:18.920453Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037890 2025-05-07T09:03:18.920636Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037890 2025-05-07T09:03:18.920681Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 active 1 active planned 0 immediate 1 planned 0 2025-05-07T09:03:18.920733Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715666] at 72075186224037890 for WaitForStreamClearance 2025-05-07T09:03:18.920971Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-05-07T09:03:18.921037Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-05-07T09:03:18.921677Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037890, TxId: 281474976715666, MessageQuota: 1 2025-05-07T09:03:18.921907Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037890, TxId: 281474976715666, Size: 35, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-05-07T09:03:18.922065Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037890, TxId: 281474976715666, PendingAcks: 0 2025-05-07T09:03:18.922112Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:717: Finish scan ShardId: 72075186224037890, TxId: 281474976715666, MessageQuota: 0 2025-05-07T09:03:18.957395Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037890 2025-05-07T09:03:18.957486Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715666, at: 72075186224037890 2025-05-07T09:03:18.957931Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037890 2025-05-07T09:03:18.957993Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 active 1 active planned 0 immediate 1 planned 0 2025-05-07T09:03:18.958033Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715666] at 72075186224037890 for ReadTableScan 2025-05-07T09:03:18.958159Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:03:18.958222Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-05-07T09:03:18.958267Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 >> DistributedEraseTests::ConditionalEraseRowsAsyncIndex [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> DistributedEraseTests::ConditionalEraseRowsAsyncIndex [GOOD] Test command err: 2025-05-07T09:02:55.077781Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:02:55.077948Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:02:55.078279Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0029b1/r3tmp/tmp91WLdD/pdisk_1.dat 2025-05-07T09:02:56.437471Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:02:56.541288Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:02:56.652233Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:02:56.658087Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:02:56.681926Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:02:56.879894Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:02:57.014900Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:687:2585] 2025-05-07T09:02:57.015106Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T09:02:57.076790Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T09:02:57.076961Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T09:02:57.089023Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-05-07T09:02:57.089126Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-05-07T09:02:57.089170Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-05-07T09:02:57.104821Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T09:02:57.105130Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T09:02:57.105181Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:712:2585] in generation 1 2025-05-07T09:02:57.106730Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:689:2587] 2025-05-07T09:02:57.106868Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T09:02:57.113558Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037890 actor [1:693:2589] 2025-05-07T09:02:57.113704Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T09:02:57.119734Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T09:02:57.119813Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T09:02:57.120835Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-05-07T09:02:57.120885Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-05-07T09:02:57.120917Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-05-07T09:02:57.121110Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T09:02:57.121195Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T09:02:57.121237Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:736:2587] in generation 1 2025-05-07T09:02:57.121562Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T09:02:57.121622Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T09:02:57.122477Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037890 2025-05-07T09:02:57.122527Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037890 2025-05-07T09:02:57.122553Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037890 2025-05-07T09:02:57.122786Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T09:02:57.122858Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T09:02:57.122901Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037890 persisting started state actor id [1:737:2589] in generation 1 2025-05-07T09:02:57.133672Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T09:02:57.156704Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-05-07T09:02:57.156880Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T09:02:57.156962Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:741:2615] 2025-05-07T09:02:57.156986Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:02:57.157012Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-05-07T09:02:57.157052Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:02:57.157452Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T09:02:57.157482Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-05-07T09:02:57.157519Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T09:02:57.157557Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:742:2616] 2025-05-07T09:02:57.157578Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-05-07T09:02:57.157592Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-05-07T09:02:57.157607Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-05-07T09:02:57.157640Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T09:02:57.157657Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037890 2025-05-07T09:02:57.157682Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037890 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T09:02:57.157720Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037890, actorId: [1:743:2617] 2025-05-07T09:02:57.157737Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037890 2025-05-07T09:02:57.157759Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037890, state: WaitScheme 2025-05-07T09:02:57.157773Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-05-07T09:02:57.157902Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T09:02:57.157984Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T09:02:57.158093Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:02:57.158119Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:02:57.158154Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T09:02:57.158180Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:02:57.158289Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:676:2580], serverId# [1:702:2593], sessionId# [0:0:0] 2025-05-07T09:02:57.158323Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-05-07T09:02:57.158374Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-05-07T09:02:57.158410Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037890 2025-05-07T09:02:57.158438Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037890 2025-05-07T09:02:57.158538Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T09:02:57.164429Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-05-07T09:02:57.164535Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-05-07T09:02:57.164887Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-05-07T09:02:57.164924Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:02:57.164951Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-05-07T09:02:57.164980Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-05-07T09:02:57.165031Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037890 2025-05-07T09:02:57.165048Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:02:57.165064Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186 ... datashard.cpp:3990: Send RS 2 at 72075186224037891 from 72075186224037891 to 72075186224037893 txId 281474976715666 2025-05-07T09:03:20.573419Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037891 2025-05-07T09:03:20.573481Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [2500 : 281474976715666] from 72075186224037891 at tablet 72075186224037891 send result to client [3:1407:3040], exec latency: 0 ms, propose latency: 0 ms 2025-05-07T09:03:20.573590Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037891, records: { Order: 4 PathId: [OwnerId: 72057594046644480, LocalPathId: 14] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 11] SchemaVersion: 1 }, { Order: 5 PathId: [OwnerId: 72057594046644480, LocalPathId: 14] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 11] SchemaVersion: 1 }, { Order: 6 PathId: [OwnerId: 72057594046644480, LocalPathId: 14] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 11] SchemaVersion: 1 } 2025-05-07T09:03:20.573649Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037891 2025-05-07T09:03:20.573812Z node 3 :TX_DATASHARD DEBUG: datashard_distributed_erase.cpp:978: [DistEraser] [3:1407:3040] HandlePlan TEvDataShard::TEvProposeTransactionResult: txId# 281474976715666, shard# 72075186224037891, status# 2 2025-05-07T09:03:20.573916Z node 3 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037893 step# 2500} 2025-05-07T09:03:20.573956Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037893 2025-05-07T09:03:20.574276Z node 3 :TX_DATASHARD INFO: datashard_change_sending.cpp:215: TTxRequestChangeRecords Execute: at tablet# 72075186224037891 2025-05-07T09:03:20.574530Z node 3 :TX_DATASHARD DEBUG: datashard_change_sending.cpp:235: Send 3 change records: to# [3:1206:2921], at tablet# 72075186224037891 2025-05-07T09:03:20.574574Z node 3 :TX_DATASHARD INFO: datashard_change_sending.cpp:260: TTxRequestChangeRecords Complete: sent# 3, forgotten# 0, left# 0, at tablet# 72075186224037891 2025-05-07T09:03:20.574635Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037893 source 72075186224037891 dest 72075186224037893 producer 72075186224037891 txId 281474976715666 2025-05-07T09:03:20.574704Z node 3 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037893 got read set: {TEvReadSet step# 2500 txid# 281474976715666 TabletSource# 72075186224037891 TabletDest# 72075186224037893 SetTabletProducer# 72075186224037891 ReadSet.Size()# 19 Seqno# 2 Flags# 0} 2025-05-07T09:03:20.574776Z node 3 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037893 2025-05-07T09:03:20.575199Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037893 2025-05-07T09:03:20.575248Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037893 active 1 active planned 1 immediate 0 planned 1 2025-05-07T09:03:20.575286Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [2500:281474976715666] at 72075186224037893 for LoadAndWaitInRS 2025-05-07T09:03:20.575597Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037893 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:03:20.575984Z node 3 :TX_DATASHARD DEBUG: datashard_change_receiving.cpp:470: Handle TEvChangeExchange::TEvApplyRecords: origin# 72075186224037891, generation# 1, at tablet# 72075186224037892 2025-05-07T09:03:20.586895Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037893 2025-05-07T09:03:20.586969Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [2500 : 281474976715666] from 72075186224037893 at tablet 72075186224037893 send result to client [3:1407:3040], exec latency: 0 ms, propose latency: 1 ms 2025-05-07T09:03:20.587052Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 72075186224037893 {TEvReadSet step# 2500 txid# 281474976715666 TabletSource# 72075186224037891 TabletDest# 72075186224037893 SetTabletConsumer# 72075186224037893 Flags# 0 Seqno# 2} 2025-05-07T09:03:20.587105Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037893 2025-05-07T09:03:20.587203Z node 3 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037891 source 72075186224037891 dest 72075186224037893 consumer 72075186224037893 txId 281474976715666 2025-05-07T09:03:20.587278Z node 3 :TX_DATASHARD DEBUG: datashard_distributed_erase.cpp:978: [DistEraser] [3:1407:3040] HandlePlan TEvDataShard::TEvProposeTransactionResult: txId# 281474976715666, shard# 72075186224037893, status# 2 2025-05-07T09:03:20.587322Z node 3 :TX_DATASHARD DEBUG: datashard_distributed_erase.cpp:165: [DistEraser] [3:1407:3040] Reply: txId# 281474976715666, status# OK, error# 2025-05-07T09:03:20.587554Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037891 2025-05-07T09:03:20.587592Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4487: Conditional erase complete: cookie: 4, at: 72075186224037891 2025-05-07T09:03:20.587827Z node 3 :TX_DATASHARD INFO: datashard_change_sending.cpp:310: TTxRemoveChangeRecords Execute: records# 3, at tablet# 72075186224037891 2025-05-07T09:03:20.587855Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1087: RemoveChangeRecord: order: 4, at tablet: 72075186224037891 2025-05-07T09:03:20.587937Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1087: RemoveChangeRecord: order: 5, at tablet: 72075186224037891 2025-05-07T09:03:20.587963Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1087: RemoveChangeRecord: order: 6, at tablet: 72075186224037891 2025-05-07T09:03:20.588067Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037891 2025-05-07T09:03:20.588097Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037891 active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:03:20.588136Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037891 TxInFly 0 2025-05-07T09:03:20.588212Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037891, clientId# [3:1402:3036], serverId# [3:1403:3037], sessionId# [0:0:0] 2025-05-07T09:03:20.589070Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037893 2025-05-07T09:03:20.589366Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037893 2025-05-07T09:03:20.589510Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037893 2025-05-07T09:03:20.589552Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037893 active 1 active planned 0 immediate 1 planned 0 2025-05-07T09:03:20.589596Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715667] at 72075186224037893 for WaitForStreamClearance 2025-05-07T09:03:20.589779Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037893 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-05-07T09:03:20.589831Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037893 2025-05-07T09:03:20.590292Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037893, TxId: 281474976715667, MessageQuota: 1 2025-05-07T09:03:20.590405Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:717: Finish scan ShardId: 72075186224037893, TxId: 281474976715667, MessageQuota: 1 2025-05-07T09:03:20.591645Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037893 2025-05-07T09:03:20.591685Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715667, at: 72075186224037893 2025-05-07T09:03:20.591850Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037893 2025-05-07T09:03:20.591876Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037893 active 1 active planned 0 immediate 1 planned 0 2025-05-07T09:03:20.591903Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715667] at 72075186224037893 for ReadTableScan 2025-05-07T09:03:20.591994Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037893 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:03:20.592030Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037893 2025-05-07T09:03:20.592067Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037893 2025-05-07T09:03:20.593010Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037892 2025-05-07T09:03:20.593227Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037892 2025-05-07T09:03:20.593341Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037892 2025-05-07T09:03:20.593373Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037892 active 1 active planned 0 immediate 1 planned 0 2025-05-07T09:03:20.593405Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715668] at 72075186224037892 for WaitForStreamClearance 2025-05-07T09:03:20.593535Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037892 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-05-07T09:03:20.593567Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037892 2025-05-07T09:03:20.593910Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037892, TxId: 281474976715668, MessageQuota: 1 2025-05-07T09:03:20.594005Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:717: Finish scan ShardId: 72075186224037892, TxId: 281474976715668, MessageQuota: 1 2025-05-07T09:03:20.622557Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037892 2025-05-07T09:03:20.622611Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715668, at: 72075186224037892 2025-05-07T09:03:20.622781Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037892 2025-05-07T09:03:20.622813Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037892 active 1 active planned 0 immediate 1 planned 0 2025-05-07T09:03:20.622847Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715668] at 72075186224037892 for ReadTableScan 2025-05-07T09:03:20.622951Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037892 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:03:20.623006Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037892 2025-05-07T09:03:20.623042Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037892 |92.3%| [TA] $(B)/ydb/core/tx/datashard/ut_erase_rows/test-results/unittest/{meta.json ... results_accumulator.log} |92.3%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_erase_rows/test-results/unittest/{meta.json ... results_accumulator.log} |92.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut/unittest |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut/unittest >> YdbOlapStore::LogExistingUserId [GOOD] |92.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot_fat/blobstorage-ut_blobstorage-ut_blob_depot_fat |92.3%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot_fat/blobstorage-ut_blobstorage-ut_blob_depot_fat |92.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot_fat/blobstorage-ut_blobstorage-ut_blob_depot_fat ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbOlapStore::LogExistingUserId [GOOD] Test command err: 2025-05-07T09:01:16.343773Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626286263381140:2071];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:16.343839Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002864/r3tmp/tmpESR2Ch/pdisk_1.dat 2025-05-07T09:01:17.244027Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:17.320649Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:17.320745Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:17.339978Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13474, node 1 2025-05-07T09:01:17.802790Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:17.802830Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:17.802843Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:17.802941Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8433 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:18.544813Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... TClient is connected to server localhost:8433 2025-05-07T09:01:19.010995Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnStore, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:19.321991Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7501626299148284133:2326];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-05-07T09:01:19.322263Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7501626299148284133:2326];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-05-07T09:01:19.322592Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7501626299148284133:2326];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-05-07T09:01:19.322717Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7501626299148284133:2326];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-05-07T09:01:19.322861Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7501626299148284133:2326];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-05-07T09:01:19.323015Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7501626299148284133:2326];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-05-07T09:01:19.323128Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7501626299148284133:2326];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-05-07T09:01:19.323229Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7501626299148284133:2326];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-05-07T09:01:19.323336Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7501626299148284133:2326];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-05-07T09:01:19.323450Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7501626299148284133:2326];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-05-07T09:01:19.323562Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7501626299148284133:2326];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-05-07T09:01:19.323659Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7501626299148284133:2326];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-05-07T09:01:19.454121Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7501626299148284131:2325];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-05-07T09:01:19.454202Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7501626299148284131:2325];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-05-07T09:01:19.454493Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7501626299148284131:2325];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-05-07T09:01:19.454610Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7501626299148284131:2325];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-05-07T09:01:19.454718Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7501626299148284131:2325];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-05-07T09:01:19.454843Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7501626299148284131:2325];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-05-07T09:01:19.454966Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7501626299148284131:2325];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-05-07T09:01:19.455108Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7501626299148284131:2325];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-05-07T09:01:19.455220Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7501626299148284131:2325];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-05-07T09:01:19.455318Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7501626299148284131:2325];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-05-07T09:01:19.455409Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7501626299148284131:2325];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-05-07T09:01:19.455527Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7501626299148284131:2325];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-05-07T09:01:19.468557Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-05-07T09:01:19.468633Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-05-07T09:01:19.468734Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-05-07T09:01:19.468759Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-05-07T09:01:19.468974Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-05-07T09:01:19.469001Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-05-07T09:01:19.469093Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-05-07T09:01:19.469126Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanInsertionDedup;id=CleanInsertionDedup; 2025 ... KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 38, seqNo: [1] 2025-05-07T09:03:23.124916Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 39, seqNo: [1] 2025-05-07T09:03:23.124932Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 40, seqNo: [1] 2025-05-07T09:03:23.124948Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 41, seqNo: [1] 2025-05-07T09:03:23.124965Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 42, seqNo: [1] 2025-05-07T09:03:23.124982Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 43, seqNo: [1] 2025-05-07T09:03:23.124998Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 44, seqNo: [1] 2025-05-07T09:03:23.125012Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 45, seqNo: [1] 2025-05-07T09:03:23.125026Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 46, seqNo: [1] 2025-05-07T09:03:23.125041Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 47, seqNo: [1] 2025-05-07T09:03:23.125055Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 48, seqNo: [1] 2025-05-07T09:03:23.125069Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 49, seqNo: [1] 2025-05-07T09:03:23.125086Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 50, seqNo: [1] 2025-05-07T09:03:23.125101Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 51, seqNo: [1] 2025-05-07T09:03:23.125117Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 52, seqNo: [1] 2025-05-07T09:03:23.125133Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 53, seqNo: [1] 2025-05-07T09:03:23.125149Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 54, seqNo: [1] 2025-05-07T09:03:23.125166Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 55, seqNo: [1] 2025-05-07T09:03:23.125180Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 56, seqNo: [1] 2025-05-07T09:03:23.125196Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 57, seqNo: [1] 2025-05-07T09:03:23.125211Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 58, seqNo: [1] 2025-05-07T09:03:23.125226Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 59, seqNo: [1] 2025-05-07T09:03:23.125241Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 60, seqNo: [1] 2025-05-07T09:03:23.125257Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 61, seqNo: [1] 2025-05-07T09:03:23.125273Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 62, seqNo: [1] 2025-05-07T09:03:23.125287Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 63, seqNo: [1] 2025-05-07T09:03:23.125303Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 64, seqNo: [1] 2025-05-07T09:03:23.125324Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715670, task: 65. Tasks execution finished 2025-05-07T09:03:23.125354Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [28:7501626824650572962:3171], TxId: 281474976715670, task: 65. Ctx: { SessionId : ydb://session/3?node_id=28&id=MWY1M2M0YmEtYjI5ZjU4YmItODRiN2JjODMtYjQ4NWE2NTQ=. TraceId : 01jtmzreyn1g13ykm9z2mrq9sy. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Compute state finished. All channels and sinks finished 2025-05-07T09:03:23.125544Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715670, task: 65. pass away 2025-05-07T09:03:23.125683Z node 28 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:433: ActorId: [28:7501626824650572881:3101] TxId: 281474976715670. Ctx: { TraceId: 01jtmzreyn1g13ykm9z2mrq9sy, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=MWY1M2M0YmEtYjI5ZjU4YmItODRiN2JjODMtYjQ4NWE2NTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [28:7501626824650572962:3171], task: 65, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 7453 Tasks { TaskId: 65 StageId: 1 CpuTimeUs: 517 FinishTimeMs: 1746608603124 InputRows: 1 InputBytes: 310 OutputRows: 1 OutputBytes: 310 ResultRows: 1 ResultBytes: 310 ComputeCpuTimeUs: 269 BuildCpuTimeUs: 248 HostName: "ghrun-sykirh5vua" NodeId: 28 CreateTimeMs: 1746608602600 UpdateTimeMs: 1746608603125 } MaxMemoryUsage: 1048576 } 2025-05-07T09:03:23.125726Z node 28 :KQP_EXECUTER INFO: kqp_planner.cpp:688: TxId: 281474976715670. Ctx: { TraceId: 01jtmzreyn1g13ykm9z2mrq9sy, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=MWY1M2M0YmEtYjI5ZjU4YmItODRiN2JjODMtYjQ4NWE2NTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [28:7501626824650572962:3171] 2025-05-07T09:03:23.125748Z node 28 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:66;problem=finish_compute_actor;tx_id=281474976715670;task_id=65;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-05-07T09:03:23.125844Z node 28 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2140: ActorId: [28:7501626824650572881:3101] TxId: 281474976715670. Ctx: { TraceId: 01jtmzreyn1g13ykm9z2mrq9sy, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=MWY1M2M0YmEtYjI5ZjU4YmItODRiN2JjODMtYjQ4NWE2NTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-05-07T09:03:23.125901Z node 28 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:838: ActorId: [28:7501626824650572881:3101] TxId: 281474976715670. Ctx: { TraceId: 01jtmzreyn1g13ykm9z2mrq9sy, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=MWY1M2M0YmEtYjI5ZjU4YmItODRiN2JjODMtYjQ4NWE2NTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.092337s ReadRows: 50 ReadBytes: 16000 ru: 61 rate limiter was not found force flag: 1 2025-05-07T09:03:23.125994Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1699: SessionId: ydb://session/3?node_id=28&id=MWY1M2M0YmEtYjI5ZjU4YmItODRiN2JjODMtYjQ4NWE2NTQ=, ActorId: [28:7501626824650572843:3101], ActorState: ExecuteState, TraceId: 01jtmzreyn1g13ykm9z2mrq9sy, TEvTxResponse, CurrentTx: 1/1 response.status: SUCCESS 2025-05-07T09:03:23.126279Z node 28 :KQP_SESSION INFO: kqp_session_actor.cpp:1958: SessionId: ydb://session/3?node_id=28&id=MWY1M2M0YmEtYjI5ZjU4YmItODRiN2JjODMtYjQ4NWE2NTQ=, ActorId: [28:7501626824650572843:3101], ActorState: ExecuteState, TraceId: 01jtmzreyn1g13ykm9z2mrq9sy, txInfo Status: Active Kind: ReadOnly TotalDuration: 0 ServerDuration: 573.432 QueriesCount: 1 2025-05-07T09:03:23.126338Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2113: SessionId: ydb://session/3?node_id=28&id=MWY1M2M0YmEtYjI5ZjU4YmItODRiN2JjODMtYjQ4NWE2NTQ=, ActorId: [28:7501626824650572843:3101], ActorState: ExecuteState, TraceId: 01jtmzreyn1g13ykm9z2mrq9sy, Create QueryResponse for action: QUERY_ACTION_EXECUTE with SUCCESS status 2025-05-07T09:03:23.126439Z node 28 :KQP_SESSION INFO: kqp_session_actor.cpp:2473: SessionId: ydb://session/3?node_id=28&id=MWY1M2M0YmEtYjI5ZjU4YmItODRiN2JjODMtYjQ4NWE2NTQ=, ActorId: [28:7501626824650572843:3101], ActorState: ExecuteState, TraceId: 01jtmzreyn1g13ykm9z2mrq9sy, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-05-07T09:03:23.126480Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2534: SessionId: ydb://session/3?node_id=28&id=MWY1M2M0YmEtYjI5ZjU4YmItODRiN2JjODMtYjQ4NWE2NTQ=, ActorId: [28:7501626824650572843:3101], ActorState: ExecuteState, TraceId: 01jtmzreyn1g13ykm9z2mrq9sy, EndCleanup, isFinal: 1 2025-05-07T09:03:23.126547Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2270: SessionId: ydb://session/3?node_id=28&id=MWY1M2M0YmEtYjI5ZjU4YmItODRiN2JjODMtYjQ4NWE2NTQ=, ActorId: [28:7501626824650572843:3101], ActorState: ExecuteState, TraceId: 01jtmzreyn1g13ykm9z2mrq9sy, Sent query response back to proxy, proxyRequestId: 5, proxyId: [28:7501626790290830887:2280] 2025-05-07T09:03:23.126589Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2546: SessionId: ydb://session/3?node_id=28&id=MWY1M2M0YmEtYjI5ZjU4YmItODRiN2JjODMtYjQ4NWE2NTQ=, ActorId: [28:7501626824650572843:3101], ActorState: unknown state, TraceId: 01jtmzreyn1g13ykm9z2mrq9sy, Cleanup temp tables: 0 2025-05-07T09:03:23.129764Z node 28 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746608602095, txId: 18446744073709551615] shutting down 2025-05-07T09:03:23.129935Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2637: SessionId: ydb://session/3?node_id=28&id=MWY1M2M0YmEtYjI5ZjU4YmItODRiN2JjODMtYjQ4NWE2NTQ=, ActorId: [28:7501626824650572843:3101], ActorState: unknown state, TraceId: 01jtmzreyn1g13ykm9z2mrq9sy, Session actor destroyed 2025-05-07T09:03:23.136049Z node 28 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;parent=[28:7501626794585798987:2324];fline=actor.cpp:33;event=skip_flush_writing; >> KqpProxy::NoLocalSessionExecution >> KqpProxy::CalcPeerStats [GOOD] >> KqpProxy::CreatesScriptExecutionsTable >> TableCreation::ConcurrentTableCreationWithDifferentVersions >> KqpProxy::PingNotExistedSession >> KqpProxy::PassErrroViaSessionActor |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut/unittest |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data/unittest |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data/unittest >> KqpUserConstraint::KqpReadNull+UploadNull |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/library/table_creator/ut/unittest |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/library/table_creator/ut/unittest |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/library/table_creator/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest >> Secret::Validation [FAIL] Test command err: 2025-05-07T09:00:29.929374Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:00:29.938144Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:00:29.938698Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002673/r3tmp/tmpMzVFBV/pdisk_1.dat 2025-05-07T09:00:33.814411Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2175} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.199919s 2025-05-07T09:00:33.814557Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:666} StateWork event processing took too much time Type# 2146435078 Duration# 0.200114s TServer::EnableGrpc on GrpcPort 1767, node 1 TClient is connected to server localhost:14842 2025-05-07T09:00:39.413868Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:00:39.766894Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:39.816103Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:39.816298Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:39.816355Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:39.816747Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T09:00:39.902765Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:39.922726Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:39.986603Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Initialization finished REQUEST=CREATE OBJECT secret-1 (TYPE SECRET) WITH value = `100`;EXPECTATION=0;WAITING=1 2025-05-07T09:00:54.931189Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:748:2627], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:00:54.931449Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } REQUEST=CREATE OBJECT secret-1 (TYPE SECRET) WITH value = `100`;RESULT=
:1:20: Error: mismatched input '-' expecting '(' ;EXPECTATION=0 FINISHED_REQUEST=CREATE OBJECT secret-1 (TYPE SECRET) WITH value = `100`;EXPECTATION=0;WAITING=1 REQUEST=ALTER OBJECT secret1 (TYPE SECRET) SET value = `abcde`;EXPECTATION=0;WAITING=1 2025-05-07T09:01:07.187363Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:772:2640], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:07.187524Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:07.490276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:2, at schemeshard: 72057594046644480 2025-05-07T09:01:08.465423Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:883:2717], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:08.465587Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:08.465876Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:888:2722], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:08.539988Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:2, at schemeshard: 72057594046644480 2025-05-07T09:01:08.719118Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:890:2724], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T09:01:12.971487Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:988:2793] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:01:15.840806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-05-07T09:01:16.441532Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:1, at schemeshard: 72057594046644480 2025-05-07T09:01:17.333729Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480 2025-05-07T09:01:18.186988Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715673:0, at schemeshard: 72057594046644480 2025-05-07T09:01:18.859559Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715676:0, at schemeshard: 72057594046644480 2025-05-07T09:01:25.120653Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480 REQUEST=ALTER OBJECT secret1 (TYPE SECRET) SET value = `abcde`;RESULT=
: Error: GRpc error: (1): Cancelled on the server side
: Error: Grpc error response on endpoint localhost:1767 ;EXPECTATION=0 GRpc shutdown warning: left infly: 1, spent: 3.631698 sec GRpc shutdown warning: left infly: 1, spent: 7.329487 sec GRpc shutdown warning: left infly: 1, spent: 10.949153 sec GRpc shutdown warning: left infly: 1, spent: 14.548449 sec GRpc shutdown warning: left infly: 1, spent: 18.237026 sec GRpc shutdown warning: left infly: 1, spent: 21.917084 sec GRpc shutdown warning: left infly: 1, spent: 25.436466 sec GRpc shutdown warning: left infly: 1, spent: 28.91938 sec GRpc shutdown warning: failed to shutdown all connections, left infly: 1, spent: 30.003659 sec assertion failed at ydb/core/testlib/common_helper.cpp:167, void NKikimr::Tests::NCommon::THelper::StartSchemaRequestTableServiceImpl(const TString &, const bool, const bool) const: (*rrPtr) TBackTrace::Capture()+28 (0x19022BBC) NUnitTest::NPrivate::RaiseError(char const*, TBasicString> const&, bool)+592 (0x194DE640) NKikimr::Tests::NCommon::THelper::StartSchemaRequestTableServiceImpl(TBasicString> const&, bool, bool) const+3762 (0x3601A492) NKikimr::NTestSuiteSecret::ValidationImpl(bool)+3138 (0x18C74892) std::__y1::__function::__func, void ()>::operator()()+280 (0x18C7F8A8) TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool)+534 (0x19515846) NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool)+505 (0x194E51C9) NKikimr::NTestSuiteSecret::TCurrentTest::Execute()+1204 (0x18C7E754) NUnitTest::TTestFactory::Execute()+2438 (0x194E6A96) NUnitTest::RunMain(int, char**)+5213 (0x1950FDBD) ??+0 (0x7FA802AA6D90) __libc_start_main+128 (0x7FA802AA6E40) _start+41 (0x1660B029) ================================================================= ==302183==ERROR: LeakSanitizer: detected memory leaks Indirect leak of 26080 byte(s) in 1 object(s) allocated from: #0 0x18d59cdd in operator new(unsigned long) /-S/contrib/libs/clang18-rt/lib/asan/asan_new_delete.cpp:86:3 #1 0x1f3be0a8 in __libcpp_operator_new /-S/contrib/libs/cxxsupp/libcxx/include/new:271:10 #2 0x1f3be0a8 in __libcpp_allocate /-S/contrib/libs/cxxsupp/libcxx/include/new:295:10 #3 0x1f3be0a8 in allocate /-S/contrib/libs/cxxsupp/libcxx/include/__memory/allocator.h:103:32 #4 0x1f3be0a8 in __allocate_at_least > /-S/contrib/libs/cxxsupp/libcxx/include/__memory/allocate_at_least.h:41:19 #5 0x1f3be0a8 in __vallocate /-S/contrib/libs/cxxsupp/libcxx/include/vector:807:25 #6 0x1f3be0a8 in vector /-S/contrib/libs/cxxsupp/libcxx/include/vector:461:7 #7 0x1f3be0a8 in make_unique >, unsigned long &> /-S/contrib/libs/cxxsupp/libcxx/include/__memory/unique_ptr.h:642:30 #8 0x1f3be0a8 in grpc_core::Server::ChannelData::InitTransport(grpc_core::RefCountedPtr, grpc_core::RefCountedPtr, unsigned long, grpc_transport*, long) /-S/contrib/libs/grpc/src/core/lib/surface/server.cc:1155:9 #9 0x1f3bd91d in grpc_core::Server::SetupTransport(grpc_transport*, grpc_pollset*, grpc_core::ChannelArgs const&, grpc_core::RefCountedPtr const&) /-S/contrib/libs/grpc/src/core/lib/surface/server.cc:763:10 #10 0x27312148 in grpc_core::(anonymous namespace)::Chttp2ServerListener::ActiveConnection::HandshakingState::OnHandshakeDone(void*, y_absl::lts_y_20240722::Status) /-S/contrib/libs/grpc/src/core/ext/transport/chttp2/server/chttp2_server.cc:479:52 # ... in TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool) /-S/library/cpp/testing/unittest/utmain.cpp:525:20 #36 0x194e51c8 in NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool) /-S/library/cpp/testing/unittest/registar.cpp:373:18 #37 0x18c7e753 in NKikimr::NTestSuiteSecret::TCurrentTest::Execute() /-S/ydb/services/metadata/secret/ut/ut_secret.cpp:28:1 #38 0x194e6a95 in NUnitTest::TTestFactory::Execute() /-S/library/cpp/testing/unittest/registar.cpp:494:19 #39 0x1950fdbc in NUnitTest::RunMain(int, char**) /-S/library/cpp/testing/unittest/utmain.cpp:872:44 #40 0x7fa802aa6d8f (/lib/x86_64-linux-gnu/libc.so.6+0x29d8f) (BuildId: cd410b710f0f094c6832edd95931006d883af48e) Indirect leak of 8 byte(s) in 1 object(s) allocated from: #0 0x18d59cdd in operator new(unsigned long) /-S/contrib/libs/clang18-rt/lib/asan/asan_new_delete.cpp:86:3 #1 0x48a15f76 in __libcpp_operator_new /-S/contrib/libs/cxxsupp/libcxx/include/new:271:10 #2 0x48a15f76 in __libcpp_allocate /-S/contrib/libs/cxxsupp/libcxx/include/new:295:10 #3 0x48a15f76 in allocate /-S/contrib/libs/cxxsupp/libcxx/include/__memory/allocator.h:103:32 #4 0x48a15f76 in __allocate_at_least *> > /-S/contrib/libs/cxxsupp/libcxx/include/__memory/allocate_at_least.h:41:19 #5 0x48a15f76 in __split_buffer /-S/contrib/libs/cxxsupp/libcxx/include/__split_buffer:354:25 #6 0x48a15f76 in std::__y1::deque, std::__y1::allocator>>::__add_back_capacity() /-S/contrib/libs/cxxsupp/libcxx/include/deque:2186:51 #7 0x48a10945 in emplace_back &> /-S/contrib/libs/cxxsupp/libcxx/include/deque:1611:5 #8 0x48a10945 in NKikimr::NMetadata::NInitializer::TDSAccessorInitialized::OnPreparationFinished(TVector, std::__y1::allocator>> const&) /-S/ydb/services/metadata/initializer/accessor_init.cpp:70:19 #9 0x23d6e16b in NKikimr::NMetadata::NSecret::TSecretInitializer::DoPrepare(std::__y1::shared_ptr) const /-S/ydb/services/metadata/secret/initializer.cpp:49:17 #10 0x48a1397d in Prepare /-S/ydb/services/metadata/abstract/initialization.h:14:16 #11 0x48a1397d in NKikimr::NMetadata::NInitializer::TDSAccessorInitialized::Execute(NKikimr::NMetadata::NRequest::TConfig const&, TBasicString> const&, std::__y1::shared_ptr, std::__y1::shared_ptr, std::__y1::shared_ptr const&) /-S/ydb/services/metadata/initializer/accessor_init.cpp:109:30 #12 0x48a07873 in NKikimr::NMetadata::NProvider::TBehaviourRegistrator::Handle(TAutoPtr, TDelete>&) /-S/ydb/services/metadata/ds_table/behaviour_registrator_actor.cpp:45:5 #13 0x48a08ee4 in NKikimr::NMetadata::NProvider::TBehaviourRegistrator::StateMain(TAutoPtr&) /-S/ydb/services/metadata/ds_table/behaviour_registrator_actor.h:47:13 #14 0x1a24ff2c in NActors::IActor::Receive(TAutoPtr&) /-S/ydb/library/actors/core/actor.cpp:280:13 #15 0x35de7444 in NActors::TTestActorRuntimeBase::SendInternal(TAutoPtr, unsigned int, bool) /-S/ydb/library/actors/testlib/test_runtime.cpp:1702:33 #16 0x35ddfcb9 in NActors::TTestActorRuntimeBase::DispatchEventsInternal(NActors::TDispatchOptions const&, TInstant) /-S/ydb/library/actors/testlib/test_runtime.cpp:1295:45 #17 0x35dea033 in NActors::TTestActorRuntimeBase::WaitForEdgeEvents(std::__y1::function&)>, TSet, std::__y1::allocator> const&, TDuration) /-S/ydb/library/actors/testlib/test_runtime.cpp:1554:22 #18 0x35fbb583 in NActors::TEvents::TEvWakeup::TPtr NActors::TTestActorRuntimeBase::GrabEdgeEventIf(TSet, std::__y1::allocator> const&, std::__y1::function const&, TDuration) /-S/ydb/library/actors/testlib/test_runtime.h:477:13 #19 0x35fba6a2 in GrabEdgeEvent /-S/ydb/library/actors/testlib/test_runtime.h:526:20 #20 0x35fba6a2 in NActors::TEvents::TEvWakeup::TPtr NActors::TTestActorRuntimeBase::GrabEdgeEvent(NActors::TActorId const&, TDuration) /-S/ydb/library/actors/testlib/test_runtime.h:532:20 #21 0x35fb28d2 in NActors::TEvents::TEvWakeup::TPtr NActors::TTestActorRuntimeBase::GrabEdgeEventRethrow(NActors::TActorId const&, TDuration) /-S/ydb/library/actors/testlib/test_runtime.h:577:24 #22 0x35fb24aa in NActors::TTestActorRuntime::SimulateSleep(TDuration) /-S/ydb/core/testlib/actors/test_runtime.cpp:301:9 #23 0x3601a16e in NKikimr::Tests::NCommon::THelper::StartSchemaRequestTableServiceImpl(TBasicString> const&, bool, bool) const /-S/ydb/core/testlib/common_helper.cpp:165:34 #24 0x18c74891 in NKikimr::NTestSuiteSecret::ValidationImpl(bool) /-S/ydb/services/metadata/secret/ut/ut_secret.cpp:285:21 #25 0x18c7f8a7 in operator() /-S/ydb/services/metadata/secret/ut/ut_secret.cpp:28:1 #26 0x18c7f8a7 in __invoke<(lambda at /-S/ydb/services/metadata/secret/ut/ut_secret.cpp:28:1) &> /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149:25 #27 0x18c7f8a7 in __call<(lambda at /-S/ydb/services/metadata/secret/ut/ut_secret.cpp:28:1) &> /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:224:5 #28 0x18c7f8a7 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:169:12 #29 0x18c7f8a7 in std::__y1::__function::__func, void ()>::operator()() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:314:10 #30 0x19515845 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:431:12 #31 0x19515845 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:990:10 #32 0x19515845 in TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool) /-S/library/cpp/testing/unittest/utmain.cpp:525:20 #33 0x194e51c8 in NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool) /-S/library/cpp/testing/unittest/registar.cpp:373:18 #34 0x18c7e753 in NKikimr::NTestSuiteSecret::TCurrentTest::Execute() /-S/ydb/services/metadata/secret/ut/ut_secret.cpp:28:1 #35 0x194e6a95 in NUnitTest::TTestFactory::Execute() /-S/library/cpp/testing/unittest/registar.cpp:494:19 #36 0x1950fdbc in NUnitTest::RunMain(int, char**) /-S/library/cpp/testing/unittest/utmain.cpp:872:44 #37 0x7fa802aa6d8f (/lib/x86_64-linux-gnu/libc.so.6+0x29d8f) (BuildId: cd410b710f0f094c6832edd95931006d883af48e) Indirect leak of 8 byte(s) in 1 object(s) allocated from: #0 0x18d59cdd in operator new(unsigned long) /-S/contrib/libs/clang18-rt/lib/asan/asan_new_delete.cpp:86:3 #1 0x1ea4c4f1 in grpc_core::internal::StatusAllocHeapPtr(y_absl::lts_y_20240722::Status) /-S/contrib/libs/grpc/src/core/lib/gprpp/status_helper.cc:427:25 #2 0x1eb9ab62 in grpc_core::CallCombiner::Cancel(y_absl::lts_y_20240722::Status) /-S/contrib/libs/grpc/src/core/lib/iomgr/call_combiner.cc:233:25 #3 0x1eb4318e in grpc_core::FilterStackCall::CancelWithError(y_absl::lts_y_20240722::Status) /-S/contrib/libs/grpc/src/core/lib/surface/call.cc:1037:18 #4 0x1eb3e8fc in grpc_core::Call::CancelWithStatus(grpc_status_code, char const*) /-S/contrib/libs/grpc/src/core/lib/surface/call.cc:366:3 #5 0x1eb603d3 in grpc_call_cancel_with_status /-S/contrib/libs/grpc/src/core/lib/surface/call.cc:3499:30 #6 0x272cfe76 in grpc::ServerContextBase::TryCancel() const /-S/contrib/libs/grpc/src/cpp/server/server_context.cc:347:7 #7 0x2729009c in NYdbGrpc::TGrpcServiceProtectiable::StopService() /-S/ydb/library/grpc/server/grpc_server.cpp:64:26 #8 0x2729711e in NYdbGrpc::TGRpcServer::Stop() /-S/ydb/library/grpc/server/grpc_server.cpp:277:18 #9 0x3682e0e5 in Shutdown /-S/ydb/core/testlib/test_client.h:400:33 #10 0x3682e0e5 in NKikimr::Tests::TServer::ShutdownGRpc() /-S/ydb/core/testlib/test_client.h:356:22 #11 0x3682d8e9 in NKikimr::Tests::TServer::~TServer() /-S/ydb/core/testlib/test_client.cpp:1700:9 #12 0x3682e1ed in NKikimr::Tests::TServer::~TServer() /-S/ydb/core/testlib/test_client.cpp:1699:25 #13 0x18c77047 in CheckedDelete /-S/util/generic/ptr.h:36:5 #14 0x18c77047 in Destroy /-S/util/generic/ptr.h:57:9 #15 0x18c77047 in UnRef /-S/util/generic/ptr.h:421:13 #16 0x18c77047 in UnRef /-S/util/generic/ptr.h:426:9 #17 0x18c77047 in UnRef /-S/util/generic/ptr.h:497:12 #18 0x18c77047 in UnRef /-S/util/generic/ptr.h:641:13 #19 0x18c77047 in ~TIntrusivePtr /-S/util/generic/ptr.h:539:9 #20 0x18c77047 in NKikimr::NTestSuiteSecret::ValidationImpl(bool) /-S/ydb/services/metadata/secret/ut/ut_secret.cpp:306:5 #21 0x18c7f8a7 in operator() /-S/ydb/services/metadata/secret/ut/ut_secret.cpp:28:1 #22 0x18c7f8a7 in __invoke<(lambda at /-S/ydb/services/metadata/secret/ut/ut_secret.cpp:28:1) &> /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149:25 #23 0x18c7f8a7 in __call<(lambda at /-S/ydb/services/metadata/secret/ut/ut_secret.cpp:28:1) &> /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:224:5 #24 0x18c7f8a7 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:169:12 #25 0x18c7f8a7 in std::__y1::__function::__func, void ()>::operator()() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:314:10 #26 0x19515845 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:431:12 #27 0x19515845 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:990:10 #28 0x19515845 in TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool) /-S/library/cpp/testing/unittest/utmain.cpp:525:20 #29 0x194e51c8 in NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool) /-S/library/cpp/testing/unittest/registar.cpp:373:18 #30 0x18c7e753 in NKikimr::NTestSuiteSecret::TCurrentTest::Execute() /-S/ydb/services/metadata/secret/ut/ut_secret.cpp:28:1 #31 0x194e6a95 in NUnitTest::TTestFactory::Execute() /-S/library/cpp/testing/unittest/registar.cpp:494:19 #32 0x1950fdbc in NUnitTest::RunMain(int, char**) /-S/library/cpp/testing/unittest/utmain.cpp:872:44 #33 0x7fa802aa6d8f (/lib/x86_64-linux-gnu/libc.so.6+0x29d8f) (BuildId: cd410b710f0f094c6832edd95931006d883af48e) SUMMARY: AddressSanitizer: 414241 byte(s) leaked in 5090 allocation(s). |92.4%| [TA] $(B)/ydb/core/load_test/ut/test-results/unittest/{meta.json ... results_accumulator.log} |92.4%| [TA] {RESULT} $(B)/ydb/core/load_test/ut/test-results/unittest/{meta.json ... results_accumulator.log} |92.4%| [TA] $(B)/ydb/library/table_creator/ut/test-results/unittest/{meta.json ... results_accumulator.log} |92.4%| [TA] {RESULT} $(B)/ydb/library/table_creator/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TKeyValueTracingTest::WriteHuge |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut_trace/unittest >> KqpProxy::PassErrroViaSessionActor [GOOD] >> KqpProxy::NodeDisconnectedTest >> TKeyValueTracingTest::ReadSmall >> TKeyValueTracingTest::ReadHuge >> TKeyValueTracingTest::WriteSmall >> Secret::SimpleQueryService [GOOD] >> BackupRestore::RestoreTablePartitioningSettings >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeInvalid [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeTable ------- [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest >> Secret::SimpleQueryService [GOOD] Test command err: 2025-05-07T09:01:03.064777Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:01:03.064965Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:01:03.065237Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002670/r3tmp/tmpkUIFFL/pdisk_1.dat TServer::EnableGrpc on GrpcPort 25523, node 1 TClient is connected to server localhost:14559 2025-05-07T09:01:03.899523Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:01:03.946349Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:01:03.951437Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:03.951515Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:03.951566Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:03.951912Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T09:01:04.003772Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:04.003931Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:04.015517Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected snapshot->GetSecrets().size() incorrect: SECRETS:ACCESS: Initialization finished REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;EXPECTATION=1;WAITING=1 2025-05-07T09:01:16.366806Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:805:2673], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:16.366977Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:816:2678], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:16.374899Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:16.384402Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715657:3, at schemeshard: 72057594046644480 2025-05-07T09:01:16.432874Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:819:2681], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715657 completed, doublechecking } 2025-05-07T09:01:16.515990Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:870:2713] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:01:16.817989Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:1, at schemeshard: 72057594046644480 2025-05-07T09:01:17.891152Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-05-07T09:01:18.373418Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:1, at schemeshard: 72057594046644480 2025-05-07T09:01:19.356038Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480 2025-05-07T09:01:20.188519Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715673:0, at schemeshard: 72057594046644480 2025-05-07T09:01:20.738333Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715676:0, at schemeshard: 72057594046644480 2025-05-07T09:01:25.275313Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480 snapshot->GetSecrets().size() incorrect: SECRETS:ACCESS: 2025-05-07T09:01:26.066297Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480 2025-05-07T09:01:31.673008Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jtmzmm4v5wxef6bn94dd81yq", SessionId: ydb://session/3?node_id=1&id=YWI0YWEzMjUtNGIyYjViZDUtNGE4NmZmODItNzE0NGY3Njk=, Slow query, duration: 15.308943s, status: STATUS_CODE_UNSPECIFIED, user: root@builtin, results: 0b, text: "CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`", parameters: 0b REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;RESULT=;EXPECTATION=1 FINISHED_REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;EXPECTATION=1;WAITING=1 REQUEST=UPSERT OBJECT secret1_1 (TYPE SECRET) WITH value = `100`;EXPECTATION=1;WAITING=1 snapshot->GetAccess().size() incorrect: SECRETS:root@builtin:secret1:100;ACCESS: REQUEST=UPSERT OBJECT secret1_1 (TYPE SECRET) WITH value = `100`;RESULT=;EXPECTATION=1 snapshot->GetSecrets().size() incorrect: SECRETS:root@builtin:secret1:100;root@builtin:secret1_1:100;ACCESS: 2025-05-07T09:01:46.968808Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7261: Cannot get console configs 2025-05-07T09:01:46.968892Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded FINISHED_REQUEST=UPSERT OBJECT secret1_1 (TYPE SECRET) WITH value = `100`;EXPECTATION=1;WAITING=1 REQUEST=UPSERT OBJECT secret1_1 (TYPE SECRET) WITH value = `200`;EXPECTATION=1;WAITING=1 REQUEST=UPSERT OBJECT secret1_1 (TYPE SECRET) WITH value = `200`;RESULT=;EXPECTATION=1 snapshot->GetSecrets().size() incorrect: SECRETS:root@builtin:secret1:100;root@builtin:secret1_1:200;ACCESS: FINISHED_REQUEST=UPSERT OBJECT secret1_1 (TYPE SECRET) WITH value = `200`;EXPECTATION=1;WAITING=1 2025-05-07T09:02:13.910826Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715719. Ctx: { TraceId: 01jtmzpbqf22kbskkrxv27d9g1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTg4ZDA1Zi0xNzlmZjMwNi0yNTczNTMwLWIwMmM3OGRj, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root REQUEST=SELECT COUNT(*) FROM `/Root/.metadata/initialization/migrations`;RESULT=;EXPECTATION=1 REQUEST=SELECT COUNT(*) FROM `/Root/.metadata/initialization/migrations`;EXPECTATION=1 REQUEST=ALTER OBJECT secret1 (TYPE SECRET) SET value = `abcde`;EXPECTATION=1;WAITING=1 REQUEST=ALTER OBJECT secret1 (TYPE SECRET) SET value = `abcde`;RESULT=;EXPECTATION=1 FINISHED_REQUEST=ALTER OBJECT secret1 (TYPE SECRET) SET value = `abcde`;EXPECTATION=1;WAITING=1 REQUEST=CREATE OBJECT `secret1:test@test1` (TYPE SECRET_ACCESS);EXPECTATION=1;WAITING=1 2025-05-07T09:02:38.312107Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715736:0, at schemeshard: 72057594046644480 2025-05-07T09:02:39.297591Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715743:0, at schemeshard: 72057594046644480 2025-05-07T09:02:40.668612Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715754:0, at schemeshard: 72057594046644480 2025-05-07T09:02:41.102452Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715757:0, at schemeshard: 72057594046644480 REQUEST=CREATE OBJECT `secret1:test@test1` (TYPE SECRET_ACCESS);RESULT=;EXPECTATION=1 snapshot->GetAccess().size() incorrect (zero expects): SECRETS:root@builtin:secret1:abcde;root@builtin:secret1_1:200;ACCESS:root@builtin:secret1:test@test1; FINISHED_REQUEST=CREATE OBJECT `secret1:test@test1` (TYPE SECRET_ACCESS);EXPECTATION=1;WAITING=1 2025-05-07T09:02:53.535518Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715769. Ctx: { TraceId: 01jtmzqjrzdyexy8n7wqhtfnwp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDFiMWUxODAtNTUwNjA0MDMtMzNjYjQ3NzktZGMxZGMzYTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root REQUEST=SELECT COUNT(*) FROM `/Root/.metadata/initialization/migrations`;RESULT=;EXPECTATION=1 REQUEST=SELECT COUNT(*) FROM `/Root/.metadata/initialization/migrations`;EXPECTATION=1 REQUEST=DROP OBJECT `secret1:test@test1` (TYPE SECRET_ACCESS);EXPECTATION=1;WAITING=1 REQUEST=DROP OBJECT `secret1:test@test1` (TYPE SECRET_ACCESS);RESULT=;EXPECTATION=1 snapshot->GetAccess().size() incorrect: SECRETS:root@builtin:secret1:abcde;root@builtin:secret1_1:200;ACCESS: FINISHED_REQUEST=DROP OBJECT `secret1:test@test1` (TYPE SECRET_ACCESS);EXPECTATION=1;WAITING=1 REQUEST=DROP OBJECT `secret1` (TYPE SECRET);EXPECTATION=1;WAITING=1 REQUEST=DROP OBJECT `secret1` (TYPE SECRET);RESULT=;EXPECTATION=1 snapshot->GetSecrets().size() incorrect: SECRETS:root@builtin:secret1_1:200;ACCESS: FINISHED_REQUEST=DROP OBJECT `secret1` (TYPE SECRET);EXPECTATION=1;WAITING=1 2025-05-07T09:03:30.321702Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715809. Ctx: { TraceId: 01jtmzrpvfag3hacsz2sm6y7zd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjUyM2VhYzgtMjQ5Y2QyYTktZGEwN2Q5ZTctYzBlMTFhYmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root REQUEST=SELECT * FROM `/Root/.metadata/initialization/migrations`;RESULT=;EXPECTATION=1 REQUEST=SELECT * FROM `/Root/.metadata/initialization/migrations`;EXPECTATION=1 >> KqpProxy::PingNotExistedSession [GOOD] >> ScriptExecutionsTest::AttemptToUpdateDeletedLease >> YdbOlapStore::LogCountByResource [GOOD] >> KqpProxy::NoLocalSessionExecution [GOOD] >> KqpProxy::NoUserAccessToScriptExecutionsTable >> TableCreation::ConcurrentTableCreationWithDifferentVersions [GOOD] >> TableCreation::ConcurrentUpdateTable |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_3__SYNC-pk_types1-all_types1-index1---SYNC] [GOOD] >> TKeyValueTracingTest::ReadSmall [FAIL] >> TKeyValueTracingTest::ReadHuge [FAIL] >> TKeyValueTracingTest::WriteSmall [FAIL] >> TKeyValueTracingTest::WriteHuge [FAIL] >> KqpProxy::CreatesScriptExecutionsTable [GOOD] >> KqpProxy::DatabasesCacheForServerless >> TPersQueueTest::UpdatePartitionLocation >> TPersQueueTest::SetupLockSession2 >> TopicService::OneConsumer_TheRangesDoNotOverlap >> TPersQueueTest::WriteExisting >> TPersQueueTest::SchemeshardRestart >> TPersQueueTest::BadTopic >> TPersQueueTest::DirectReadPreCached >> DemoTx::Scenario_1 >> TPartitionWriterCacheActorTests::WriteReplyOrder |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_populator/unittest |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_populator/unittest |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_populator/unittest |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_populator/unittest >> KqpProxy::NodeDisconnectedTest [GOOD] >> TPopulatorTest::Boot >> TPopulatorTestWithResets::UpdateAck >> TPopulatorTest::RemoveDir >> TPopulatorTest::MakeDir |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_populator/unittest |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_populator/unittest >> TPartitionWriterCacheActorTests::WriteReplyOrder [GOOD] >> TPartitionWriterCacheActorTests::DropOldWriter >> TPartitionWriterCacheActorTests::DropOldWriter [GOOD] >> TPersQueueCommonTest::Auth_CreateGrpcStreamWithInvalidTokenInInitialMetadata_SessionClosedWithUnauthenticatedError >> TableCreation::ConcurrentUpdateTable [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbOlapStore::LogCountByResource [GOOD] Test command err: 2025-05-07T08:59:24.075923Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501625805497385040:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:24.075980Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028b5/r3tmp/tmpRpXEMP/pdisk_1.dat 2025-05-07T08:59:24.968618Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:59:24.996465Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:59:24.996530Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:59:25.002650Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8362, node 1 2025-05-07T08:59:25.164884Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:59:25.164920Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:59:25.164929Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:59:25.165082Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2082 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:59:25.542234Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... self_check_result: GOOD issue_log { id: "YELLOW-f489-1231c6b1" status: YELLOW message: "Database has compute issues" location { database { name: "/Root" } } reason: "YELLOW-1ba8-1231c6b1" type: "DATABASE" level: 1 } issue_log { id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-1" reason: "YELLOW-e9e2-1231c6b1-2" reason: "YELLOW-e9e2-1231c6b1-3" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-1231c6b1-1" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 1 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-2" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 2 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-3" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 3 host: "::1" port: 12003 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } location { id: 1 host: "::1" port: 12001 } 2025-05-07T08:59:29.808053Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501625825048100033:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:29.808137Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:59:29.954722Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7501625825549363543:2082];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:29.954787Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:59:30.015441Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7501625825965840330:2092];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028b5/r3tmp/tmpIlPjMh/pdisk_1.dat 2025-05-07T08:59:30.442680Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T08:59:30.686141Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:59:30.731729Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:59:30.731817Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:59:30.734312Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:59:30.734386Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:59:30.734805Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T08:59:30.734867Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T08:59:30.735974Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:59:30.738644Z node 4 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 5 Cookie 5 2025-05-07T08:59:30.738675Z node 4 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 6 Cookie 6 2025-05-07T08:59:30.740580Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T08:59:30.740786Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26213, node 4 2025-05-07T08:59:30.917354Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T08:59:30.917382Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T08:59:30.917389Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T08:59:30.917516Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2296 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T08:59:31.147893Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T08:59:34.810930Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7501625825048100033:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:34.813058Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:59:34.946751Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[5:7501625825965840330:2092];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:34.946805Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T08:59:34.955578Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[6:7501625825549363543:2082];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:34.955641Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Killing node 4 Killing node 5 2025-05-07T08:59:45.668940Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7261: Cannot get console configs 2025-05-07T08:59:45.668976Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded Killing node 6 2025-05-07T08:59:54.220317Z node 8 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[8:7501625931661507194:2280];send_to=[0:7307199536658146131:7762515]; 2025-05-07T08:59:54.220368Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028b5/r3tmp/tmpordPTL/pdisk_1.dat 2025-05-07T08:59:54.584229Z node 8 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T08:59:54.607865Z node 8 : ... node 47 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:644: ActorId: [47:7501626868823971258:3104] TxId: 281474976715674. Ctx: { TraceId: 01jtmzrrdx4gdwr6npgasbsk52, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=47&id=YjUzZTg1YmItMmYxN2UwZWItYWIwMTIyNGEtZDdiZjFiNTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [47:7501626868823971261:3384], 2025-05-07T09:03:32.862775Z node 47 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1074: SelfId: [47:7501626868823971261:3384], TxId: 281474976715674, task: 1. Ctx: { SessionId : ydb://session/3?node_id=47&id=YjUzZTg1YmItMmYxN2UwZWItYWIwMTIyNGEtZDdiZjFiNTg=. TraceId : 01jtmzrrdx4gdwr6npgasbsk52. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Received channels info: Update { Id: 1 TransportVersion: DATA_TRANSPORT_OOB_PICKLE_1_0 SrcTaskId: 1 SrcEndpoint { ActorId { RawX1: 7501626868823971261 RawX2: 4503801490836792 } } DstEndpoint { ActorId { RawX1: 7501626868823971258 RawX2: 4503801490836512 } } InMemory: true } 2025-05-07T09:03:32.862941Z node 47 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [47:7501626868823971261:3384], TxId: 281474976715674, task: 1. Ctx: { SessionId : ydb://session/3?node_id=47&id=YjUzZTg1YmItMmYxN2UwZWItYWIwMTIyNGEtZDdiZjFiNTg=. TraceId : 01jtmzrrdx4gdwr6npgasbsk52. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-05-07T09:03:32.863083Z node 47 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:348: ActorId: [47:7501626868823971258:3104] TxId: 281474976715674. Ctx: { TraceId: 01jtmzrrdx4gdwr6npgasbsk52, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=47&id=YjUzZTg1YmItMmYxN2UwZWItYWIwMTIyNGEtZDdiZjFiNTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Send TEvStreamData to [47:7501626864529003166:3104], seqNo: 1, nRows: 1 2025-05-07T09:03:32.863087Z node 47 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:2027: SelfId: [47:7501626868823971261:3384], TxId: 281474976715674, task: 1. Ctx: { SessionId : ydb://session/3?node_id=47&id=YjUzZTg1YmItMmYxN2UwZWItYWIwMTIyNGEtZDdiZjFiNTg=. TraceId : 01jtmzrrdx4gdwr6npgasbsk52. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Send stats to executor actor [47:7501626868823971258:3104] TaskId: 1 Stats: CpuTimeUs: 630 Tasks { TaskId: 1 CpuTimeUs: 218 FinishTimeMs: 1746608612862 OutputRows: 1 OutputBytes: 3 ResultRows: 1 ResultBytes: 3 ComputeCpuTimeUs: 42 BuildCpuTimeUs: 176 HostName: "ghrun-sykirh5vua" NodeId: 47 CreateTimeMs: 1746608612862 CurrentWaitOutputTimeUs: 44 UpdateTimeMs: 1746608612862 } MaxMemoryUsage: 1048576 2025-05-07T09:03:32.863112Z node 47 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [47:7501626868823971261:3384], TxId: 281474976715674, task: 1. Ctx: { SessionId : ydb://session/3?node_id=47&id=YjUzZTg1YmItMmYxN2UwZWItYWIwMTIyNGEtZDdiZjFiNTg=. TraceId : 01jtmzrrdx4gdwr6npgasbsk52. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. CA StateFunc 271646922 2025-05-07T09:03:32.863144Z node 47 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:670: TxId: 281474976715674, task: 1. Tasks execution finished, waiting for chunk delivery in output channelId: 1, seqNo: [1] 2025-05-07T09:03:32.863254Z node 47 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:433: ActorId: [47:7501626868823971258:3104] TxId: 281474976715674. Ctx: { TraceId: 01jtmzrrdx4gdwr6npgasbsk52, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=47&id=YjUzZTg1YmItMmYxN2UwZWItYWIwMTIyNGEtZDdiZjFiNTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [47:7501626868823971261:3384], task: 1, state: COMPUTE_STATE_EXECUTING, stats: { CpuTimeUs: 630 Tasks { TaskId: 1 CpuTimeUs: 218 FinishTimeMs: 1746608612862 OutputRows: 1 OutputBytes: 3 ResultRows: 1 ResultBytes: 3 ComputeCpuTimeUs: 42 BuildCpuTimeUs: 176 HostName: "ghrun-sykirh5vua" NodeId: 47 CreateTimeMs: 1746608612862 CurrentWaitOutputTimeUs: 44 UpdateTimeMs: 1746608612862 } MaxMemoryUsage: 1048576 } 2025-05-07T09:03:32.863329Z node 47 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:644: ActorId: [47:7501626868823971258:3104] TxId: 281474976715674. Ctx: { TraceId: 01jtmzrrdx4gdwr6npgasbsk52, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=47&id=YjUzZTg1YmItMmYxN2UwZWItYWIwMTIyNGEtZDdiZjFiNTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [47:7501626868823971261:3384], 2025-05-07T09:03:32.863373Z node 47 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1797: SessionId: ydb://session/3?node_id=47&id=YjUzZTg1YmItMmYxN2UwZWItYWIwMTIyNGEtZDdiZjFiNTg=, ActorId: [47:7501626864529003166:3104], ActorState: ExecuteState, TraceId: 01jtmzrrdx4gdwr6npgasbsk52, Forwarded TEvStreamData to [47:7501626864529003164:3103] 2025-05-07T09:03:32.863813Z node 47 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:414: TxId: 281474976715674, send ack to channelId: 1, seqNo: 1, enough: 0, freeSpace: 8388552, to: [47:7501626868823971262:3384] 2025-05-07T09:03:32.863876Z node 47 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:143: SelfId: [47:7501626868823971261:3384], TxId: 281474976715674, task: 1. Ctx: { SessionId : ydb://session/3?node_id=47&id=YjUzZTg1YmItMmYxN2UwZWItYWIwMTIyNGEtZDdiZjFiNTg=. TraceId : 01jtmzrrdx4gdwr6npgasbsk52. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. CA StateFunc 271646922 2025-05-07T09:03:32.863917Z node 47 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715674, task: 1. Tasks execution finished 2025-05-07T09:03:32.863934Z node 47 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [47:7501626868823971261:3384], TxId: 281474976715674, task: 1. Ctx: { SessionId : ydb://session/3?node_id=47&id=YjUzZTg1YmItMmYxN2UwZWItYWIwMTIyNGEtZDdiZjFiNTg=. TraceId : 01jtmzrrdx4gdwr6npgasbsk52. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Compute state finished. All channels and sinks finished 2025-05-07T09:03:32.864003Z node 47 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715674, task: 1. pass away 2025-05-07T09:03:32.864089Z node 47 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:66;problem=finish_compute_actor;tx_id=281474976715674;task_id=1;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-05-07T09:03:32.864137Z node 47 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:433: ActorId: [47:7501626868823971258:3104] TxId: 281474976715674. Ctx: { TraceId: 01jtmzrrdx4gdwr6npgasbsk52, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=47&id=YjUzZTg1YmItMmYxN2UwZWItYWIwMTIyNGEtZDdiZjFiNTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [47:7501626868823971261:3384], task: 1, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 1270 Tasks { TaskId: 1 CpuTimeUs: 221 FinishTimeMs: 1746608612863 OutputRows: 1 OutputBytes: 3 ResultRows: 1 ResultBytes: 3 ComputeCpuTimeUs: 45 BuildCpuTimeUs: 176 HostName: "ghrun-sykirh5vua" NodeId: 47 CreateTimeMs: 1746608612862 UpdateTimeMs: 1746608612863 } MaxMemoryUsage: 1048576 } 2025-05-07T09:03:32.864197Z node 47 :KQP_EXECUTER INFO: kqp_planner.cpp:688: TxId: 281474976715674. Ctx: { TraceId: 01jtmzrrdx4gdwr6npgasbsk52, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=47&id=YjUzZTg1YmItMmYxN2UwZWItYWIwMTIyNGEtZDdiZjFiNTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [47:7501626868823971261:3384] 2025-05-07T09:03:32.864288Z node 47 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2140: ActorId: [47:7501626868823971258:3104] TxId: 281474976715674. Ctx: { TraceId: 01jtmzrrdx4gdwr6npgasbsk52, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=47&id=YjUzZTg1YmItMmYxN2UwZWItYWIwMTIyNGEtZDdiZjFiNTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-05-07T09:03:32.864335Z node 47 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:838: ActorId: [47:7501626868823971258:3104] TxId: 281474976715674. Ctx: { TraceId: 01jtmzrrdx4gdwr6npgasbsk52, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=47&id=YjUzZTg1YmItMmYxN2UwZWItYWIwMTIyNGEtZDdiZjFiNTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.001270s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 2025-05-07T09:03:32.864405Z node 47 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1699: SessionId: ydb://session/3?node_id=47&id=YjUzZTg1YmItMmYxN2UwZWItYWIwMTIyNGEtZDdiZjFiNTg=, ActorId: [47:7501626864529003166:3104], ActorState: ExecuteState, TraceId: 01jtmzrrdx4gdwr6npgasbsk52, TEvTxResponse, CurrentTx: 2/2 response.status: SUCCESS 2025-05-07T09:03:32.864667Z node 47 :KQP_SESSION INFO: kqp_session_actor.cpp:1958: SessionId: ydb://session/3?node_id=47&id=YjUzZTg1YmItMmYxN2UwZWItYWIwMTIyNGEtZDdiZjFiNTg=, ActorId: [47:7501626864529003166:3104], ActorState: ExecuteState, TraceId: 01jtmzrrdx4gdwr6npgasbsk52, txInfo Status: Active Kind: ReadOnly TotalDuration: 0 ServerDuration: 522.914 QueriesCount: 1 2025-05-07T09:03:32.864725Z node 47 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2113: SessionId: ydb://session/3?node_id=47&id=YjUzZTg1YmItMmYxN2UwZWItYWIwMTIyNGEtZDdiZjFiNTg=, ActorId: [47:7501626864529003166:3104], ActorState: ExecuteState, TraceId: 01jtmzrrdx4gdwr6npgasbsk52, Create QueryResponse for action: QUERY_ACTION_EXECUTE with SUCCESS status 2025-05-07T09:03:32.864812Z node 47 :KQP_SESSION INFO: kqp_session_actor.cpp:2473: SessionId: ydb://session/3?node_id=47&id=YjUzZTg1YmItMmYxN2UwZWItYWIwMTIyNGEtZDdiZjFiNTg=, ActorId: [47:7501626864529003166:3104], ActorState: ExecuteState, TraceId: 01jtmzrrdx4gdwr6npgasbsk52, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-05-07T09:03:32.864848Z node 47 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2534: SessionId: ydb://session/3?node_id=47&id=YjUzZTg1YmItMmYxN2UwZWItYWIwMTIyNGEtZDdiZjFiNTg=, ActorId: [47:7501626864529003166:3104], ActorState: ExecuteState, TraceId: 01jtmzrrdx4gdwr6npgasbsk52, EndCleanup, isFinal: 1 2025-05-07T09:03:32.864899Z node 47 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2270: SessionId: ydb://session/3?node_id=47&id=YjUzZTg1YmItMmYxN2UwZWItYWIwMTIyNGEtZDdiZjFiNTg=, ActorId: [47:7501626864529003166:3104], ActorState: ExecuteState, TraceId: 01jtmzrrdx4gdwr6npgasbsk52, Sent query response back to proxy, proxyRequestId: 5, proxyId: [47:7501626830169261160:2280] 2025-05-07T09:03:32.864934Z node 47 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2546: SessionId: ydb://session/3?node_id=47&id=YjUzZTg1YmItMmYxN2UwZWItYWIwMTIyNGEtZDdiZjFiNTg=, ActorId: [47:7501626864529003166:3104], ActorState: unknown state, TraceId: 01jtmzrrdx4gdwr6npgasbsk52, Cleanup temp tables: 0 2025-05-07T09:03:32.865401Z node 47 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746608612000, txId: 18446744073709551615] shutting down 2025-05-07T09:03:32.865497Z node 47 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2637: SessionId: ydb://session/3?node_id=47&id=YjUzZTg1YmItMmYxN2UwZWItYWIwMTIyNGEtZDdiZjFiNTg=, ActorId: [47:7501626864529003166:3104], ActorState: unknown state, TraceId: 01jtmzrrdx4gdwr6npgasbsk52, Session actor destroyed RESULT: [[3u]] --------------------- STATS: total CPU: 964 duration: 514976 usec cpu: 347695 usec { name: "/Root/OlapStore/log1" reads { rows: 2 bytes: 16 } } duration: 3020 usec cpu: 4294 usec ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/proxy_service/ut/unittest >> KqpProxy::NodeDisconnectedTest [GOOD] Test command err: 2025-05-07T09:03:29.139859Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626856733191588:2067];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:29.139919Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00315f/r3tmp/tmp9JV8lx/pdisk_1.dat 2025-05-07T09:03:30.206024Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:30.251756Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:30.268554Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:03:30.276098Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:03:30.286104Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T09:03:30.409755Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2175} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.101781s 2025-05-07T09:03:30.409830Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:666} StateWork event processing took too much time Type# 2146435078 Duration# 0.101875s TClient is connected to server localhost:17204 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-05-07T09:03:30.812246Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:03:31.299822Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1665: Updated YQL logs priority to current level: 4 2025-05-07T09:03:31.309338Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:442: Cannot start publishing usage, tenants: /dc-1, empty 2025-05-07T09:03:31.392170Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1543: Created new session, sessionId: ydb://session/3?node_id=1&id=YmI5ZGYyNjAtZDdiNjg0ZmMtZDVhMTRiZjctYTBjY2JkODM=, workerId: [1:7501626865323126786:2309], database: , longSession: 0, local sessions count: 1 2025-05-07T09:03:31.392225Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:442: Cannot start publishing usage, tenants: /dc-1, empty 2025-05-07T09:03:31.402058Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: , DatabaseId: , SessionId: ydb://session/3?node_id=1&id=YmI5ZGYyNjAtZDdiNjg0ZmMtZDVhMTRiZjctYTBjY2JkODM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 0.010000s timeout: 0.010000s cancelAfter: 0.000000s. Send request to target, requestId: 2, targetId: [1:7501626865323126786:2309] 2025-05-07T09:03:31.402096Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 2 timeout: 0.010000s actor id: [0:0:0] 2025-05-07T09:03:31.402137Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:512: Subscribed for config changes. 2025-05-07T09:03:31.402170Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:519: Updated table service config. 2025-05-07T09:03:31.402226Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1665: Updated YQL logs priority to current level: 4 2025-05-07T09:03:31.402297Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:442: Cannot start publishing usage, tenants: /dc-1, empty 2025-05-07T09:03:31.402413Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T09:03:31.402461Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T09:03:31.402505Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T09:03:31.402529Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T09:03:31.402549Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T09:03:31.412372Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626865323126787:2310], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:31.412467Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1313: Handle TEvPrivate::TEvOnRequestTimeout(2) 2025-05-07T09:03:31.412484Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1321: Reply timeout: requestId 2 sessionId: ydb://session/3?node_id=1&id=YmI5ZGYyNjAtZDdiNjg0ZmMtZDVhMTRiZjctYTBjY2JkODM= status: TIMEOUT round: 0 2025-05-07T09:03:31.420266Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2191: SessionId: ydb://session/3?node_id=1&id=YmI5ZGYyNjAtZDdiNjg0ZmMtZDVhMTRiZjctYTBjY2JkODM=, ActorId: [1:7501626865323126786:2309], ActorState: ReadyState, Reply query error, msg:
: Error: SomeUniqTextForUt proxyRequestId: 2 2025-05-07T09:03:31.420688Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 2, sender: [1:7501626861028159444:2283], selfId: [1:7501626856733191831:2278], source: [1:7501626865323126786:2309] 2025-05-07T09:03:31.420782Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /dc-1, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:31.470813Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1313: Handle TEvPrivate::TEvOnRequestTimeout(2) 2025-05-07T09:03:31.470841Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1316: Invalid request info while on request timeout handle. RequestId: 2 2025-05-07T09:03:35.651422Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:700:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:03:35.651727Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T09:03:35.651975Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:03:35.652267Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:697:2355], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:03:35.652393Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T09:03:35.652499Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00315f/r3tmp/tmpavX1g7/pdisk_1.dat 2025-05-07T09:03:35.882586Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TClient is connected to server localhost:15006 KQP PROXY1 [2:8678280833929343339:121] KQP PROXY2 [3:8678280833929343339:121] SENDER [2:1142:2688] 2025-05-07T09:03:36.128220Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1543: Created new session, sessionId: ydb://session/3?node_id=3&id=YTc0YjVkM2ItNWZmNTEyMWUtZDhlYmI4MzUtZjIyNmU1MA==, workerId: [3:1143:2375], database: , longSession: 1, local sessions count: 1 2025-05-07T09:03:36.128362Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:649: Received create session request, trace_id: Created session ydb://session/3?node_id=3&id=YTc0YjVkM2ItNWZmNTEyMWUtZDhlYmI4MzUtZjIyNmU1MA== 2025-05-07T09:03:36.128797Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: , DatabaseId: , SessionId: ydb://session/3?node_id=3&id=YTc0YjVkM2ItNWZmNTEyMWUtZDhlYmI4MzUtZjIyNmU1MA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 0.001000s timeout: 0.001000s cancelAfter: 0.000000s. Send request to target, requestId: 2, targetId: [3:8678280833929343339:121] 2025-05-07T09:03:36.128839Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 2 timeout: 0.001000s actor id: [0:0:0] 2025-05-07T09:03:36.129314Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: , DatabaseId: , SessionId: ydb://session/3?node_id=3&id=YTc0YjVkM2ItNWZmNTEyMWUtZDhlYmI4MzUtZjIyNmU1MA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 0.001000s timeout: 0.001000s cancelAfter: 0.000000s. Send request to target, requestId: 3, targetId: [3:1143:2375] 2025-05-07T09:03:36.129347Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 3 timeout: 0.001000s actor id: [0:0:0] 2025-05-07T09:03:36.306222Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:1144:2689], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:36.306332Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /dc-1, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:36.306608Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:1146:2376], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:36.30665 ... _service.cpp:1543: Created new session, sessionId: ydb://session/3?node_id=3&id=ODA3YjRhM2QtZTU0NTJkYmItNjQ0ZWNmMTMtMTFlMGU3YmQ=, workerId: [3:1413:2519], database: , longSession: 1, local sessions count: 57 2025-05-07T09:03:37.460096Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:649: Received create session request, trace_id: Created session ydb://session/3?node_id=3&id=ODA3YjRhM2QtZTU0NTJkYmItNjQ0ZWNmMTMtMTFlMGU3YmQ= 2025-05-07T09:03:37.460388Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: , DatabaseId: , SessionId: ydb://session/3?node_id=3&id=ODA3YjRhM2QtZTU0NTJkYmItNjQ0ZWNmMTMtMTFlMGU3YmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 0.001000s timeout: 0.001000s cancelAfter: 0.000000s. Send request to target, requestId: 58, targetId: [3:8678280833929343339:121] 2025-05-07T09:03:37.460425Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 58 timeout: 0.001000s actor id: [0:0:0] 2025-05-07T09:03:37.460699Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: , DatabaseId: , SessionId: ydb://session/3?node_id=3&id=ODA3YjRhM2QtZTU0NTJkYmItNjQ0ZWNmMTMtMTFlMGU3YmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 0.001000s timeout: 0.001000s cancelAfter: 0.000000s. Send request to target, requestId: 87, targetId: [3:1413:2519] 2025-05-07T09:03:37.460721Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 87 timeout: 0.001000s actor id: [0:0:0] 2025-05-07T09:03:37.461485Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:1414:2751], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:37.461592Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /dc-1, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:37.479910Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:1416:2520], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:37.479985Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /dc-1, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:37.490445Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1313: Handle TEvPrivate::TEvOnRequestTimeout(87) 2025-05-07T09:03:37.490521Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1321: Reply timeout: requestId 87 sessionId: ydb://session/3?node_id=3&id=ODA3YjRhM2QtZTU0NTJkYmItNjQ0ZWNmMTMtMTFlMGU3YmQ= status: TIMEOUT round: 0 2025-05-07T09:03:37.490603Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1313: Handle TEvPrivate::TEvOnRequestTimeout(58) 2025-05-07T09:03:37.490623Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1321: Reply timeout: requestId 58 sessionId: ydb://session/3?node_id=3&id=ODA3YjRhM2QtZTU0NTJkYmItNjQ0ZWNmMTMtMTFlMGU3YmQ= status: TIMEOUT round: 0 2025-05-07T09:03:37.490731Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=3&id=ODA3YjRhM2QtZTU0NTJkYmItNjQ0ZWNmMTMtMTFlMGU3YmQ=, ActorId: [3:1413:2519], ActorState: ExecuteState, TraceId: 01jtmzrxzm5wgzwn9hyd7cxerf, Create QueryResponse for error on request, msg: 2025-05-07T09:03:37.490814Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 58, sender: [2:1142:2688], selfId: [2:206:2171], source: [2:206:2171] 2025-05-07T09:03:37.492132Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 87, sender: [2:206:2171], selfId: [3:236:2127], source: [3:1413:2519] 2025-05-07T09:03:37.492260Z node 2 :KQP_PROXY ERROR: kqp_proxy_service.cpp:936: Unknown sender for proxy response, requestId: 58 2025-05-07T09:03:37.493696Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1543: Created new session, sessionId: ydb://session/3?node_id=3&id=NTdkMWRiYTEtNWJjYjY2YmEtZDk0MGRmZTUtNTg0M2UyZGY=, workerId: [3:1420:2523], database: , longSession: 1, local sessions count: 58 2025-05-07T09:03:37.493827Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:649: Received create session request, trace_id: 2025-05-07T09:03:37.494118Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:881: Received ping session request, request_id: 59, sender: [2:1142:2688], trace_id: 2025-05-07T09:03:37.494199Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 59 timeout: 0.001000s actor id: [0:0:0] 2025-05-07T09:03:37.494282Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:835: Received ping session request, has local session: ydb://session/3?node_id=3&id=NTdkMWRiYTEtNWJjYjY2YmEtZDk0MGRmZTUtNTg0M2UyZGY=, rpc ctrl: [0:0:0], sameNode: 0, trace_id: 2025-05-07T09:03:37.494355Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 59, sender: [2:1142:2688], selfId: [2:206:2171], source: [3:236:2127] 2025-05-07T09:03:37.495575Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1543: Created new session, sessionId: ydb://session/3?node_id=3&id=ZGIyYjhiN2YtMTcyMWUwZC05N2ExYzk1ZS0yYThiMDBmNQ==, workerId: [3:1421:2524], database: , longSession: 1, local sessions count: 59 2025-05-07T09:03:37.495666Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:649: Received create session request, trace_id: Created session ydb://session/3?node_id=3&id=ZGIyYjhiN2YtMTcyMWUwZC05N2ExYzk1ZS0yYThiMDBmNQ== 2025-05-07T09:03:37.495948Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: , DatabaseId: , SessionId: ydb://session/3?node_id=3&id=ZGIyYjhiN2YtMTcyMWUwZC05N2ExYzk1ZS0yYThiMDBmNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 0.001000s timeout: 0.001000s cancelAfter: 0.000000s. Send request to target, requestId: 60, targetId: [3:8678280833929343339:121] 2025-05-07T09:03:37.495983Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 60 timeout: 0.001000s actor id: [0:0:0] 2025-05-07T09:03:37.496184Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: , DatabaseId: , SessionId: ydb://session/3?node_id=3&id=ZGIyYjhiN2YtMTcyMWUwZC05N2ExYzk1ZS0yYThiMDBmNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 0.001000s timeout: 0.001000s cancelAfter: 0.000000s. Send request to target, requestId: 90, targetId: [3:1421:2524] 2025-05-07T09:03:37.496223Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 90 timeout: 0.001000s actor id: [0:0:0] 2025-05-07T09:03:37.497164Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:1422:2753], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:37.497242Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /dc-1, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:37.515642Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:1423:2525], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:37.515841Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /dc-1, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:37.526270Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1313: Handle TEvPrivate::TEvOnRequestTimeout(90) 2025-05-07T09:03:37.526337Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1321: Reply timeout: requestId 90 sessionId: ydb://session/3?node_id=3&id=ZGIyYjhiN2YtMTcyMWUwZC05N2ExYzk1ZS0yYThiMDBmNQ== status: TIMEOUT round: 0 2025-05-07T09:03:37.526407Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1313: Handle TEvPrivate::TEvOnRequestTimeout(59) 2025-05-07T09:03:37.526426Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1316: Invalid request info while on request timeout handle. RequestId: 59 2025-05-07T09:03:37.526493Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=3&id=ZGIyYjhiN2YtMTcyMWUwZC05N2ExYzk1ZS0yYThiMDBmNQ==, ActorId: [3:1421:2524], ActorState: ExecuteState, TraceId: 01jtmzry0rf8wcqnhhj2x0wdrs, Create QueryResponse for error on request, msg: 2025-05-07T09:03:37.526566Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1313: Handle TEvPrivate::TEvOnRequestTimeout(60) 2025-05-07T09:03:37.526581Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1321: Reply timeout: requestId 60 sessionId: ydb://session/3?node_id=3&id=ZGIyYjhiN2YtMTcyMWUwZC05N2ExYzk1ZS0yYThiMDBmNQ== status: TIMEOUT round: 0 2025-05-07T09:03:37.527789Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 60, sender: [2:1142:2688], selfId: [2:206:2171], source: [2:206:2171] 2025-05-07T09:03:37.527914Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 90, sender: [2:206:2171], selfId: [3:236:2127], source: [3:1421:2524] 2025-05-07T09:03:37.528012Z node 2 :KQP_PROXY ERROR: kqp_proxy_service.cpp:936: Unknown sender for proxy response, requestId: 60 2025-05-07T09:03:37.529481Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1543: Created new session, sessionId: ydb://session/3?node_id=3&id=NDE2MjY2ZGQtNjczZmVlZjMtMmNmZTY0YzEtZjY3MzEzNjg=, workerId: [3:1428:2528], database: , longSession: 1, local sessions count: 60 2025-05-07T09:03:37.529578Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:649: Received create session request, trace_id: 2025-05-07T09:03:37.529819Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:881: Received ping session request, request_id: 61, sender: [2:1142:2688], trace_id: 2025-05-07T09:03:37.529898Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 61 timeout: 0.001000s actor id: [0:0:0] 2025-05-07T09:03:37.542448Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:37.542570Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:03:37.545046Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:37.545102Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:03:37.555789Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1313: Handle TEvPrivate::TEvOnRequestTimeout(61) 2025-05-07T09:03:37.555849Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1321: Reply timeout: requestId 61 sessionId: ydb://session/3?node_id=3&id=NDE2MjY2ZGQtNjczZmVlZjMtMmNmZTY0YzEtZjY3MzEzNjg= status: TIMEOUT round: 0 2025-05-07T09:03:37.555949Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 61, sender: [2:1142:2688], selfId: [2:206:2171], source: [2:206:2171] >> ScriptExecutionsTest::AttemptToUpdateDeletedLease [GOOD] >> TPopulatorTest::Boot [GOOD] >> KqpProxy::NoUserAccessToScriptExecutionsTable [GOOD] >> TPopulatorTestWithResets::UpdateAck [GOOD] >> KqpUserConstraint::KqpReadNull+UploadNull [GOOD] >> TPopulatorTest::RemoveDir [GOOD] >> TPopulatorTest::MakeDir [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_populator/unittest >> TPopulatorTest::Boot [GOOD] Test command err: 2025-05-07T09:03:39.528711Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:03:39.528779Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_populator/unittest >> TPopulatorTestWithResets::UpdateAck [GOOD] Test command err: 2025-05-07T09:03:39.527840Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:03:39.527912Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TestModificationResults wait txId: 100 2025-05-07T09:03:39.655499Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:658: [1:95:2121] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root" PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/Root" } } } PathId: 1 PathOwnerId: 72057594046678944 }: sender# [1:70:2109], cookie# 100, event size# 330, preserialized size# 51 2025-05-07T09:03:39.655603Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:675: [1:95:2121] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], cookie# 100, is deletion# false, version: 3 2025-05-07T09:03:39.660359Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:96:2122] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:95:2121], cookie# 100 2025-05-07T09:03:39.660451Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:97:2123] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:95:2121], cookie# 100 2025-05-07T09:03:39.660503Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:98:2124] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:95:2121], cookie# 100 2025-05-07T09:03:39.661010Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:658: [1:95:2121] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root/DirC" PathDescription { Self { Name: "DirC" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: false CreateTxId: 100 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944 }: sender# [1:70:2109], cookie# 100, event size# 220, preserialized size# 2 2025-05-07T09:03:39.661048Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:675: [1:95:2121] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 2], cookie# 100, is deletion# false, version: 2 FAKE_COORDINATOR: Add transaction: 100 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 100 at step: 5000001 FAKE_COORDINATOR: Erasing txId 100 2025-05-07T09:03:39.665002Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:658: [1:95:2121] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root" PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/Root" } } } PathId: 1 PathOwnerId: 72057594046678944 }: sender# [1:70:2109], cookie# 100, event size# 340, preserialized size# 56 2025-05-07T09:03:39.665053Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:675: [1:95:2121] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], cookie# 100, is deletion# false, version: 4 2025-05-07T09:03:39.665325Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:658: [1:95:2121] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root/DirC" PathDescription { Self { Name: "DirC" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 100 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944 }: sender# [1:70:2109], cookie# 100, event size# 225, preserialized size# 2 2025-05-07T09:03:39.665384Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:675: [1:95:2121] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 2], cookie# 100, is deletion# false, version: 3 TestModificationResult got TxId: 100, wait until txId: 100 TestWaitNotification wait txId: 100 2025-05-07T09:03:39.695804Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:231: [1:96:2122] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 2 }: sender# [1:12:2059] 2025-05-07T09:03:39.695885Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:243: [1:96:2122] Successful handshake: replica# [1:12:2059] 2025-05-07T09:03:39.695923Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:252: [1:96:2122] Resume sync: replica# [1:12:2059], fromPathId# [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T09:03:39.695987Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:231: [1:97:2123] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 2 }: sender# [1:15:2062] 2025-05-07T09:03:39.696009Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:243: [1:97:2123] Successful handshake: replica# [1:15:2062] 2025-05-07T09:03:39.696030Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:252: [1:97:2123] Resume sync: replica# [1:15:2062], fromPathId# [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T09:03:39.696064Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:231: [1:98:2124] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 2 }: sender# [1:18:2065] 2025-05-07T09:03:39.696090Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:243: [1:98:2124] Successful handshake: replica# [1:18:2065] 2025-05-07T09:03:39.696122Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:252: [1:98:2124] Resume sync: replica# [1:18:2065], fromPathId# [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T09:03:39.696199Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:526: [1:95:2121] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Replica: [1:24339059:0] }: sender# [1:96:2122] 2025-05-07T09:03:39.696272Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:263: [1:96:2122] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/DirC PathId: [OwnerId: 72057594046678944, LocalPathId: 2] PathVersion: 3 } }: sender# [1:95:2121] 2025-05-07T09:03:39.696372Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:620: [1:95:2121] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 1] }: sender# [1:96:2122] 2025-05-07T09:03:39.696501Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:96:2122] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:95:2121], cookie# 0 2025-05-07T09:03:39.696627Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:620: [1:95:2121] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 2] }: sender# [1:96:2122] 2025-05-07T09:03:39.696669Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:96:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 4 }: sender# [1:12:2059], cookie# 0 2025-05-07T09:03:39.696738Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:526: [1:95:2121] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Replica: [1:1099535966835:0] }: sender# [1:97:2123] 2025-05-07T09:03:39.696773Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:96:2122] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:95:2121], cookie# 0 2025-05-07T09:03:39.696828Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:263: [1:97:2123] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/DirC PathId: [OwnerId: 72057594046678944, LocalPathId: 2] PathVersion: 3 } }: sender# [1:95:2121] 2025-05-07T09:03:39.696946Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:620: [1:95:2121] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 1] }: sender# [1:97:2123] 2025-05-07T09:03:39.697015Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:96:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:12:2059], cookie# 0 2025-05-07T09:03:39.697061Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:97:2123] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:95:2121], cookie# 0 2025-05-07T09:03:39.697138Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:620: [1:95:2121] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 2] }: sender# [1:97:2123] 2025-05-07T09:03:39.697178Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:97:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 4 }: sender# [1:15:2062], cookie# 0 2025-05-07T09:03:39.697222Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:526: [1:95:2121] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Replica: [1:2199047594611:0] }: sender# [1:98:2124] 2025-05-07T09:03:39.697245Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:97:2123] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:95:2121], cookie# 0 2025-05-07T09:03:39.697301Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:263: [1:98:2124] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/DirC PathId: [OwnerId: 72057594046678944, LocalPathId: 2] PathVersion: 3 } }: sender# [1:95:2121] 2025-05-07T09:03:39.697353Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:620: [1:95:2121] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 1] }: sender# [1:98:2124] 2025-05-07T09:03:39.697377Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:97:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:15:2062], cookie# 0 2025-05-07T09:03:39.697409Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:98:2124] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:95:2121], cookie# 0 2025-05-07T09:03:39.697464Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:620: [1:95:2121] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 2] }: sender# [1:98:2124] 2025-05-07T09:03:39.697504Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:98:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 4 }: sender# [1:18:2065], cookie# 0 2025-05-07T09:03:39.697540Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:526: [1:95:2121] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 3] Replica: [1:24339059:0] }: sender# [1:96:2122] 2025-05-07T09:03:39.697567Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:263: [1:96:2122] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:95:2121] 2025-05-07T09:03:39.697588Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:98:2124] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:95:2121], cookie# 0 2025-05-07T09:03:39.697662Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:713: [1:95:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 4 }: sender# [1:96:2122], cookie# 0 2025-05-07T09:03:39.697697Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:719: [1:95:2121] Ack for unknown update (already acked?): sender# [1:96:2122], cookie# 0 2025-05-07T09:03:39.697759Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:297: [1:96:2122] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 2 }: sender# [1:12:2059] 2025-05-07T09:03:39.697835Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:98:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:18:2065], cookie# 0 2025-05-07T09:03:39.697889Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:713: [1:95:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 4 }: sender# [1:96:2122], cookie# 100 2025-05-07T09:03:39.697921Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:526: [1:95:2121] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 3] Replica: [1:1099535966835:0] }: sender# [1:97:2123] 2025-05-07T09:03:39.697948Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:263: [1:97:2123] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:95:2121] 2025-05-07T09:03:39.698010Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:713: [1:95:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:96:2122], cookie# 0 2025-05-07T09:03:39.698034Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:719: [1:95:2121] Ack for unknown update (already acked?): sender# [1:96:2122], cookie# 0 2025-05-07T09:03:39.698062Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:297: [1:97:2123] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 2 }: sender# [1:15:2062] 2025-05-07T09:03:39.698094Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:713: [1:95:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:96:2122], cookie# 100 2025-05-07T09:03:39.698128Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:713: [1:95:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 4 }: sender# [1:97:2123], cookie# 0 2025-05-07T09:03:39.698147Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:719: [1:95:2121] Ack for unknown update (already acked?): sender# [1:97:2123], cookie# 0 2025-05-07T09:03:39.698177Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:713: [1:95:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 4 }: sender# [1:97:2123], cookie# 100 2025-05-07T09:03:39.698200Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:735: [1:95:2121] Ack update: ack to# [1:70:2109], cookie# 100, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], version# 3 2025-05-07T09:03:39.698240Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:735: [1:95:2121] Ack update: ack to# [1:70:2109], cookie# 100, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], version# 4 2025-05-07T09:03:39.698547Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:526: [1:95:2121] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 3] Replica: [1:2199047594611:0] }: sender# [1:98:2124] 2025-05-07T09:03:39.698600Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:263: [1:98:2124] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:95:2121] 2025-05-07T09:03:39.698680Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:713: [1:95:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:97:2123], cookie# 0 2025-05-07T09:03:39.698724Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:719: [1:95:2121] Ack for unknown update (already acked?): sender# [1:97:2123], cookie# 0 2025-05-07T09:03:39.698756Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:297: [1:98:2124] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 2 }: sender# [1:18:2065] 2025-05-07T09:03:39.699080Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:713: [1:95:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:97:2123], cookie# 100 2025-05-07T09:03:39.699128Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:735: [1:95:2121] Ack update: ack to# [1:70:2109], cookie# 100, pathId# [OwnerId: 72057594046678944, LocalPathId: 2], version# 2 2025-05-07T09:03:39.699152Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:735: [1:95:2121] Ack update: ack to# [1:70:2109], cookie# 100, pathId# [OwnerId: 72057594046678944, LocalPathId: 2], version# 3 2025-05-07T09:03:39.699460Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:713: [1:95:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 4 }: sender# [1:98:2124], cookie# 0 2025-05-07T09:03:39.699491Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:719: [1:95:2121] Ack for unknown update (already acked?): sender# [1:98:2124], cookie# 0 2025-05-07T09:03:39.699543Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:713: [1:95:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 4 }: sender# [1:98:2124], cookie# 100 2025-05-07T09:03:39.699563Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:719: [1:95:2121] Ack for unknown update (already acked?): sender# [1:98:2124], cookie# 100 2025-05-07T09:03:39.699775Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:713: [1:95:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:98:2124], cookie# 0 2025-05-07T09:03:39.699803Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:719: [1:95:2121] Ack for unknown update (already acked?): sender# [1:98:2124], cookie# 0 2025-05-07T09:03:39.699912Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:713: [1:95:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:98:2124], cookie# 100 2025-05-07T09:03:39.699939Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:719: [1:95:2121] Ack for unknown update (already acked?): sender# [1:98:2124], cookie# 100 TestWaitNotification: OK eventTxId 100 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_populator/unittest >> TPopulatorTest::RemoveDir [GOOD] Test command err: 2025-05-07T09:03:39.523200Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:03:39.523296Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TestModificationResults wait txId: 100 2025-05-07T09:03:39.655977Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:658: [1:95:2121] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root" PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/Root" } } } PathId: 1 PathOwnerId: 72057594046678944 }: sender# [1:70:2109], cookie# 100, event size# 330, preserialized size# 51 2025-05-07T09:03:39.656069Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:675: [1:95:2121] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], cookie# 100, is deletion# false, version: 3 2025-05-07T09:03:39.657592Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:96:2122] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:95:2121], cookie# 100 2025-05-07T09:03:39.657715Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:97:2123] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:95:2121], cookie# 100 2025-05-07T09:03:39.657770Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:98:2124] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:95:2121], cookie# 100 2025-05-07T09:03:39.659494Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:658: [1:95:2121] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root/DirB" PathDescription { Self { Name: "DirB" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: false CreateTxId: 100 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944 }: sender# [1:70:2109], cookie# 100, event size# 220, preserialized size# 2 2025-05-07T09:03:39.659575Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:675: [1:95:2121] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 2], cookie# 100, is deletion# false, version: 2 2025-05-07T09:03:39.659695Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:96:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 3 }: sender# [1:12:2059], cookie# 100 2025-05-07T09:03:39.659756Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:97:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 3 }: sender# [1:15:2062], cookie# 100 2025-05-07T09:03:39.659795Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:98:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 3 }: sender# [1:18:2065], cookie# 100 2025-05-07T09:03:39.659989Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:713: [1:95:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 3 }: sender# [1:96:2122], cookie# 100 2025-05-07T09:03:39.660046Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:96:2122] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:95:2121], cookie# 100 2025-05-07T09:03:39.660100Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:97:2123] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:95:2121], cookie# 100 2025-05-07T09:03:39.660143Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:98:2124] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:95:2121], cookie# 100 2025-05-07T09:03:39.660318Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:713: [1:95:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 3 }: sender# [1:97:2123], cookie# 100 2025-05-07T09:03:39.660359Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:735: [1:95:2121] Ack update: ack to# [1:70:2109], cookie# 100, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], version# 3 2025-05-07T09:03:39.660410Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:96:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 2 }: sender# [1:12:2059], cookie# 100 2025-05-07T09:03:39.660549Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:97:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 2 }: sender# [1:15:2062], cookie# 100 2025-05-07T09:03:39.660591Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:98:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 2 }: sender# [1:18:2065], cookie# 100 2025-05-07T09:03:39.660931Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:713: [1:95:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 3 }: sender# [1:98:2124], cookie# 100 2025-05-07T09:03:39.661119Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:713: [1:95:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 2 }: sender# [1:96:2122], cookie# 100 2025-05-07T09:03:39.661214Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:713: [1:95:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 2 }: sender# [1:97:2123], cookie# 100 2025-05-07T09:03:39.661245Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:735: [1:95:2121] Ack update: ack to# [1:70:2109], cookie# 100, pathId# [OwnerId: 72057594046678944, LocalPathId: 2], version# 2 2025-05-07T09:03:39.661524Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:713: [1:95:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 2 }: sender# [1:98:2124], cookie# 100 2025-05-07T09:03:39.661564Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:719: [1:95:2121] Ack for unknown update (already acked?): sender# [1:98:2124], cookie# 100 FAKE_COORDINATOR: Add transaction: 100 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 100 at step: 5000001 FAKE_COORDINATOR: Erasing txId 100 2025-05-07T09:03:39.665053Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:658: [1:95:2121] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root" PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/Root" } } } PathId: 1 PathOwnerId: 72057594046678944 }: sender# [1:70:2109], cookie# 100, event size# 340, preserialized size# 56 2025-05-07T09:03:39.665106Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:675: [1:95:2121] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], cookie# 100, is deletion# false, version: 4 2025-05-07T09:03:39.665225Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:96:2122] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:95:2121], cookie# 100 2025-05-07T09:03:39.665287Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:97:2123] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:95:2121], cookie# 100 2025-05-07T09:03:39.665352Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:98:2124] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:95:2121], cookie# 100 2025-05-07T09:03:39.667413Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:658: [1:95:2121] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root/DirB" PathDescription { Self { Name: "DirB" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 100 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsI ... [1:95:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 5 }: sender# [1:96:2122], cookie# 101 2025-05-07T09:03:39.680054Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:96:2122] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:95:2121], cookie# 101 2025-05-07T09:03:39.680095Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:97:2123] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:95:2121], cookie# 101 2025-05-07T09:03:39.680127Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:98:2124] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:95:2121], cookie# 101 2025-05-07T09:03:39.680279Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:713: [1:95:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 5 }: sender# [1:97:2123], cookie# 101 2025-05-07T09:03:39.680324Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:735: [1:95:2121] Ack update: ack to# [1:70:2109], cookie# 101, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], version# 5 2025-05-07T09:03:39.680379Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:96:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:12:2059], cookie# 101 2025-05-07T09:03:39.680453Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:97:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:15:2062], cookie# 101 2025-05-07T09:03:39.680495Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:98:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:18:2065], cookie# 101 2025-05-07T09:03:39.680795Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:713: [1:95:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 5 }: sender# [1:98:2124], cookie# 101 2025-05-07T09:03:39.680892Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:713: [1:95:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:96:2122], cookie# 101 2025-05-07T09:03:39.681239Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:713: [1:95:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:97:2123], cookie# 101 2025-05-07T09:03:39.681278Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:735: [1:95:2121] Ack update: ack to# [1:70:2109], cookie# 101, pathId# [OwnerId: 72057594046678944, LocalPathId: 2], version# 3 FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 2025-05-07T09:03:39.681651Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:713: [1:95:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:98:2124], cookie# 101 2025-05-07T09:03:39.681703Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:719: [1:95:2121] Ack for unknown update (already acked?): sender# [1:98:2124], cookie# 101 2025-05-07T09:03:39.683281Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:658: [1:95:2121] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root" PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/Root" } } } PathId: 1 PathOwnerId: 72057594046678944 }: sender# [1:70:2109], cookie# 101, event size# 232, preserialized size# 2 2025-05-07T09:03:39.683328Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:675: [1:95:2121] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], cookie# 101, is deletion# false, version: 6 2025-05-07T09:03:39.683446Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:96:2122] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:95:2121], cookie# 101 2025-05-07T09:03:39.683500Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:97:2123] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:95:2121], cookie# 101 2025-05-07T09:03:39.683549Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:98:2124] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:95:2121], cookie# 101 FAKE_COORDINATOR: Erasing txId 101 2025-05-07T09:03:39.683828Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:658: [1:95:2121] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/Root/DirB\', error: path has been deleted (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeDir, state: EPathStateNotExist), drop stepId: 5000002, drop txId: 101" Path: "/Root/DirB" PathId: 2 LastExistedPrefixPath: "/Root" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 72057594046678944 }: sender# [1:70:2109], cookie# 101, event size# 306, preserialized size# 0 2025-05-07T09:03:39.683868Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:675: [1:95:2121] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 2], cookie# 101, is deletion# true, version: 0 2025-05-07T09:03:39.683959Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:96:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 6 }: sender# [1:12:2059], cookie# 101 2025-05-07T09:03:39.684014Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:97:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 6 }: sender# [1:15:2062], cookie# 101 2025-05-07T09:03:39.684053Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:98:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 6 }: sender# [1:18:2065], cookie# 101 2025-05-07T09:03:39.684117Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:713: [1:95:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 6 }: sender# [1:96:2122], cookie# 101 2025-05-07T09:03:39.684157Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:96:2122] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:95:2121], cookie# 101 2025-05-07T09:03:39.684198Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:97:2123] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:95:2121], cookie# 101 2025-05-07T09:03:39.684269Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:98:2124] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:95:2121], cookie# 101 2025-05-07T09:03:39.684330Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:713: [1:95:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 6 }: sender# [1:97:2123], cookie# 101 2025-05-07T09:03:39.684358Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:735: [1:95:2121] Ack update: ack to# [1:70:2109], cookie# 101, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], version# 6 2025-05-07T09:03:39.684850Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:713: [1:95:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 6 }: sender# [1:98:2124], cookie# 101 2025-05-07T09:03:39.684894Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:96:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 18446744073709551615 }: sender# [1:12:2059], cookie# 101 2025-05-07T09:03:39.684939Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:97:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 18446744073709551615 }: sender# [1:15:2062], cookie# 101 2025-05-07T09:03:39.684975Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:98:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 18446744073709551615 }: sender# [1:18:2065], cookie# 101 2025-05-07T09:03:39.685106Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:713: [1:95:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 18446744073709551615 }: sender# [1:96:2122], cookie# 101 2025-05-07T09:03:39.685196Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:713: [1:95:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 18446744073709551615 }: sender# [1:97:2123], cookie# 101 2025-05-07T09:03:39.685222Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:735: [1:95:2121] Ack update: ack to# [1:70:2109], cookie# 101, pathId# [OwnerId: 72057594046678944, LocalPathId: 2], version# 18446744073709551615 2025-05-07T09:03:39.685509Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:713: [1:95:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 18446744073709551615 }: sender# [1:98:2124], cookie# 101 2025-05-07T09:03:39.685543Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:719: [1:95:2121] Ack for unknown update (already acked?): sender# [1:98:2124], cookie# 101 TestModificationResult got TxId: 101, wait until txId: 101 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_populator/unittest >> TPopulatorTest::MakeDir [GOOD] Test command err: 2025-05-07T09:03:39.526899Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:03:39.526969Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TestModificationResults wait txId: 100 2025-05-07T09:03:39.658631Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:658: [1:95:2121] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root" PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/Root" } } } PathId: 1 PathOwnerId: 72057594046678944 }: sender# [1:70:2109], cookie# 100, event size# 330, preserialized size# 51 2025-05-07T09:03:39.658712Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:675: [1:95:2121] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], cookie# 100, is deletion# false, version: 3 2025-05-07T09:03:39.660097Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:96:2122] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:95:2121], cookie# 100 2025-05-07T09:03:39.660184Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:97:2123] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:95:2121], cookie# 100 2025-05-07T09:03:39.660232Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:98:2124] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:95:2121], cookie# 100 2025-05-07T09:03:39.660790Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:658: [1:95:2121] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root/DirA" PathDescription { Self { Name: "DirA" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: false CreateTxId: 100 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944 }: sender# [1:70:2109], cookie# 100, event size# 220, preserialized size# 2 2025-05-07T09:03:39.660862Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:675: [1:95:2121] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 2], cookie# 100, is deletion# false, version: 2 2025-05-07T09:03:39.660961Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:96:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 3 }: sender# [1:12:2059], cookie# 100 2025-05-07T09:03:39.661014Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:97:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 3 }: sender# [1:15:2062], cookie# 100 2025-05-07T09:03:39.661047Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:98:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 3 }: sender# [1:18:2065], cookie# 100 2025-05-07T09:03:39.661180Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:713: [1:95:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 3 }: sender# [1:96:2122], cookie# 100 2025-05-07T09:03:39.661227Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:96:2122] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:95:2121], cookie# 100 2025-05-07T09:03:39.661290Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:97:2123] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:95:2121], cookie# 100 2025-05-07T09:03:39.661327Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:98:2124] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:95:2121], cookie# 100 2025-05-07T09:03:39.661470Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:713: [1:95:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 3 }: sender# [1:97:2123], cookie# 100 2025-05-07T09:03:39.661499Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:735: [1:95:2121] Ack update: ack to# [1:70:2109], cookie# 100, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], version# 3 2025-05-07T09:03:39.661543Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:96:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 2 }: sender# [1:12:2059], cookie# 100 2025-05-07T09:03:39.661589Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:97:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 2 }: sender# [1:15:2062], cookie# 100 2025-05-07T09:03:39.661640Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:98:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 2 }: sender# [1:18:2065], cookie# 100 2025-05-07T09:03:39.662033Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:713: [1:95:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 3 }: sender# [1:98:2124], cookie# 100 2025-05-07T09:03:39.662178Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:713: [1:95:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 2 }: sender# [1:96:2122], cookie# 100 2025-05-07T09:03:39.662247Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:713: [1:95:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 2 }: sender# [1:97:2123], cookie# 100 2025-05-07T09:03:39.662280Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:735: [1:95:2121] Ack update: ack to# [1:70:2109], cookie# 100, pathId# [OwnerId: 72057594046678944, LocalPathId: 2], version# 2 2025-05-07T09:03:39.662508Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:713: [1:95:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 2 }: sender# [1:98:2124], cookie# 100 2025-05-07T09:03:39.662544Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:719: [1:95:2121] Ack for unknown update (already acked?): sender# [1:98:2124], cookie# 100 FAKE_COORDINATOR: Add transaction: 100 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 100 at step: 5000001 FAKE_COORDINATOR: Erasing txId 100 2025-05-07T09:03:39.664970Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:658: [1:95:2121] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root" PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/Root" } } } PathId: 1 PathOwnerId: 72057594046678944 }: sender# [1:70:2109], cookie# 100, event size# 340, preserialized size# 56 2025-05-07T09:03:39.665013Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:675: [1:95:2121] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], cookie# 100, is deletion# false, version: 4 2025-05-07T09:03:39.665116Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:96:2122] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:95:2121], cookie# 100 2025-05-07T09:03:39.665167Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:97:2123] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:95:2121], cookie# 100 2025-05-07T09:03:39.665222Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:98:2124] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:95:2121], cookie# 100 2025-05-07T09:03:39.667394Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:658: [1:95:2121] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root/DirA" PathDescription { Self { Name: "DirA" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 100 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944 }: sender# [1:70:2109], cookie# 100, event size# 225, preserialized size# 2 2025-05-07T09:03:39.667460Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:675: [1:95:2121] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 2], cookie# 100, is deletion# false, version: 3 2025-05-07T09:03:39.667546Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:96:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 4 }: sender# [1:12:2059], cookie# 100 2025-05-07T09:03:39.667587Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:97:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 4 }: sender# [1:15:2062], cookie# 100 2025-05-07T09:03:39.667659Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:98:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 4 }: sender# [1:18:2065], cookie# 100 2025-05-07T09:03:39.667829Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:713: [1:95:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 4 }: sender# [1:96:2122], cookie# 100 2025-05-07T09:03:39.667876Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:96:2122] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:95:2121], cookie# 100 2025-05-07T09:03:39.667911Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:97:2123] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:95:2121], cookie# 100 2025-05-07T09:03:39.667941Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:98:2124] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:95:2121], cookie# 100 2025-05-07T09:03:39.668057Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:713: [1:95:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 4 }: sender# [1:97:2123], cookie# 100 2025-05-07T09:03:39.668085Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:735: [1:95:2121] Ack update: ack to# [1:70:2109], cookie# 100, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], version# 4 2025-05-07T09:03:39.668127Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:96:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:12:2059], cookie# 100 2025-05-07T09:03:39.668170Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:97:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:15:2062], cookie# 100 2025-05-07T09:03:39.668398Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:713: [1:95:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 4 }: sender# [1:98:2124], cookie# 100 2025-05-07T09:03:39.668430Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:98:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:18:2065], cookie# 100 2025-05-07T09:03:39.668474Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:713: [1:95:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:96:2122], cookie# 100 2025-05-07T09:03:39.668715Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:713: [1:95:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:97:2123], cookie# 100 2025-05-07T09:03:39.668746Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:735: [1:95:2121] Ack update: ack to# [1:70:2109], cookie# 100, pathId# [OwnerId: 72057594046678944, LocalPathId: 2], version# 3 2025-05-07T09:03:39.668956Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:713: [1:95:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:98:2124], cookie# 100 2025-05-07T09:03:39.668993Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:719: [1:95:2121] Ack for unknown update (already acked?): sender# [1:98:2124], cookie# 100 TestModificationResult got TxId: 100, wait until txId: 100 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/proxy_service/ut/unittest >> TableCreation::ConcurrentUpdateTable [GOOD] Test command err: 2025-05-07T09:03:29.139772Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626854758392504:2067];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:29.139821Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00314d/r3tmp/tmpP3lr3Y/pdisk_1.dat 2025-05-07T09:03:30.196026Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:30.251721Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:30.268497Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:03:30.276649Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:03:30.286032Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T09:03:30.409718Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2175} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.101736s 2025-05-07T09:03:30.409799Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:666} StateWork event processing took too much time Type# 2146435078 Duration# 0.101833s TClient is connected to server localhost:25333 TServer::EnableGrpc on GrpcPort 24165, node 1 2025-05-07T09:03:31.309903Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1665: Updated YQL logs priority to current level: 4 2025-05-07T09:03:31.317839Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:442: Cannot start publishing usage, tenants: /dc-1, empty 2025-05-07T09:03:31.318410Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:512: Subscribed for config changes. 2025-05-07T09:03:31.318447Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:519: Updated table service config. 2025-05-07T09:03:31.318489Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1665: Updated YQL logs priority to current level: 4 2025-05-07T09:03:31.318520Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:442: Cannot start publishing usage, tenants: /dc-1, empty 2025-05-07T09:03:31.318624Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T09:03:31.318662Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T09:03:31.318678Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T09:03:31.318692Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T09:03:31.401893Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:03:31.401920Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:03:31.401937Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:03:31.402066Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-05-07T09:03:32.529294Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:03:32.538808Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_executions updater. Describe result: PathErrorUnknown 2025-05-07T09:03:32.538841Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_executions updater. Creating table 2025-05-07T09:03:32.538894Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_executions updater. Full table path:/dc-1/.metadata/script_executions 2025-05-07T09:03:32.538971Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_execution_leases updater. Describe result: PathErrorUnknown 2025-05-07T09:03:32.538984Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_execution_leases updater. Creating table 2025-05-07T09:03:32.539008Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_execution_leases updater. Full table path:/dc-1/.metadata/script_execution_leases 2025-05-07T09:03:32.539214Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table result_sets updater. Describe result: PathErrorUnknown 2025-05-07T09:03:32.539230Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table result_sets updater. Creating table 2025-05-07T09:03:32.539261Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table result_sets updater. Full table path:/dc-1/.metadata/result_sets 2025-05-07T09:03:32.541352Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:1, at schemeshard: 72057594046644480 2025-05-07T09:03:32.542986Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T09:03:32.543860Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 2025-05-07T09:03:32.557494Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table result_sets updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976710660 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 3 } 2025-05-07T09:03:32.557510Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table script_executions updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976710658 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 4 } 2025-05-07T09:03:32.557546Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table script_executions updater. Subscribe on create table tx: 281474976710658 2025-05-07T09:03:32.557546Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table result_sets updater. Subscribe on create table tx: 281474976710660 2025-05-07T09:03:32.557601Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table script_execution_leases updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976710659 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 5 } 2025-05-07T09:03:32.557609Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table script_execution_leases updater. Subscribe on create table tx: 281474976710659 2025-05-07T09:03:32.667829Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table result_sets updater. Request: create. Transaction completed: 281474976710660. Doublechecking... 2025-05-07T09:03:32.688971Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table script_execution_leases updater. Request: create. Transaction completed: 281474976710659. Doublechecking... 2025-05-07T09:03:32.690575Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table script_executions updater. Request: create. Transaction completed: 281474976710658. Doublechecking... 2025-05-07T09:03:32.745553Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table result_sets updater. Column diff is empty, finishing 2025-05-07T09:03:32.761406Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table script_executions updater. Column diff is empty, finishing 2025-05-07T09:03:32.764313Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table script_execution_leases updater. Column diff is empty, finishing 2025-05-07T09:03:32.764654Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TCreateScriptOperationQuery] TraceId: f389b71b-4dc09374-dcc92551-7c097d50, Bootstrap. Database: /dc-1 2025-05-07T09:03:32.777291Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1468: Request has 18444997465096.774353s seconds to be completed 2025-05-07T09:03:32.779922Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1543: Created new session, sessionId: ydb://session/3?node_id=1&id=OTM0NTQzZjEtNjJhZjIxY2MtYWY2MzRkNjItZTJkMjFiMWI=, workerId: [1:7501626867643295277:2334], database: /dc-1, longSession: 1, local sessions count: 1 2025-05-07T09:03:32.780078Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:649: Received create session request, trace_id: 2025-05-07T09:03:32.780127Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T09:03:32.780779Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TCreateScriptOperationQuery] TraceId: f389b71b-4dc09374-dcc92551-7c097d50, RunDataQuery: -- TCreateScriptOperationQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $run_script_actor_id AS Text; DECLARE $execution_status AS Int32; DECLARE $execution_mode AS Int32; DECLARE $query_text AS Text; DECLARE $syntax AS Int32; DECLARE $meta AS JsonDocument; DECLARE $lease_duration AS Interval; DECLARE $execution_meta_ttl AS Interval; UPSERT INTO `.metadata/script_executions` (database, execution_id, run_script_actor_id, execution_status, execution_mode, start_ts, query_text, syntax, meta, expire_at) VALUES ($database, $execution_id, $run_script_actor_id, $execution_status, $execution_mode, CurrentUtcTimestamp(), $query_text, $syntax, $meta, CurrentUtcTimestamp() + $execution_meta_ttl); UPSERT INTO `.metadata/script_execution_leases` (database, execution_id, lease_deadline, lease_generation, expire_at) VALUES ($database, $execution_id, CurrentUtcTimestamp() + $lease_duration, 1, CurrentUtcTimestamp() + $execution_meta_ttl); 2025-05-07T09:03:32.781273Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=1&id=OTM0NTQzZjEtNjJhZjIxY2MtYWY2MzRkNjItZTJkMjFiMWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s c ... athStateAlter)" severity: 1 } SchemeShardStatus: 8 SchemeShardReason: "Check failed: path: \'/dc-1/.test/test_table\', error: path is under operation (id: [OwnerId: 72057594046644480, LocalPathId: 10], type: EPathTypeTable, state: EPathStateAlter)" SchemeShardTabletId: 72057594046644480 } 2025-05-07T09:03:38.810314Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:249: Table test_table updater. Unable to subscribe to concurrent transaction, falling back 2025-05-07T09:03:38.810407Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:190: Table test_table updater. TEvProposeTransactionStatus: { Status: 52 TxId: 281474976715672 Issues { message: "Check failed: path: \'/dc-1/.test/test_table\', error: path is under operation (id: [OwnerId: 72057594046644480, LocalPathId: 10], type: EPathTypeTable, state: EPathStateAlter)" severity: 1 } SchemeShardStatus: 8 SchemeShardReason: "Check failed: path: \'/dc-1/.test/test_table\', error: path is under operation (id: [OwnerId: 72057594046644480, LocalPathId: 10], type: EPathTypeTable, state: EPathStateAlter)" SchemeShardTabletId: 72057594046644480 } 2025-05-07T09:03:38.810416Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:249: Table test_table updater. Unable to subscribe to concurrent transaction, falling back 2025-05-07T09:03:38.810425Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:190: Table test_table updater. TEvProposeTransactionStatus: { Status: 52 TxId: 281474976715670 Issues { message: "Check failed: path: \'/dc-1/.test/test_table\', error: path is under operation (id: [OwnerId: 72057594046644480, LocalPathId: 10], type: EPathTypeTable, state: EPathStateAlter)" severity: 1 } SchemeShardStatus: 8 SchemeShardReason: "Check failed: path: \'/dc-1/.test/test_table\', error: path is under operation (id: [OwnerId: 72057594046644480, LocalPathId: 10], type: EPathTypeTable, state: EPathStateAlter)" SchemeShardTabletId: 72057594046644480 } 2025-05-07T09:03:38.810440Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:249: Table test_table updater. Unable to subscribe to concurrent transaction, falling back 2025-05-07T09:03:38.810534Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:190: Table test_table updater. TEvProposeTransactionStatus: { Status: 52 TxId: 281474976715666 Issues { message: "Check failed: path: \'/dc-1/.test/test_table\', error: path is under operation (id: [OwnerId: 72057594046644480, LocalPathId: 10], type: EPathTypeTable, state: EPathStateAlter)" severity: 1 } SchemeShardStatus: 8 SchemeShardReason: "Check failed: path: \'/dc-1/.test/test_table\', error: path is under operation (id: [OwnerId: 72057594046644480, LocalPathId: 10], type: EPathTypeTable, state: EPathStateAlter)" SchemeShardTabletId: 72057594046644480 } 2025-05-07T09:03:38.810538Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:249: Table test_table updater. Unable to subscribe to concurrent transaction, falling back 2025-05-07T09:03:38.844839Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:290: Table test_table updater. Request: alter. Transaction completed: 281474976715668. Doublechecking... 2025-05-07T09:03:38.863501Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-05-07T09:03:38.864386Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-05-07T09:03:38.865894Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-05-07T09:03:38.880711Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-05-07T09:03:38.882121Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 8, sender: [2:7501626894937720454:2366], selfId: [2:7501626886347784975:2267], source: [2:7501626894937720453:2365] 2025-05-07T09:03:38.882296Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 1ea240e7-94c74fac-1d2993fc-753ca853, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZTY1NjY4MWUtNWZlMjEyMTMtMjljZTdhZTMtNzExMmVlYzk=, TxId: 2025-05-07T09:03:38.882324Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 1ea240e7-94c74fac-1d2993fc-753ca853, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZTY1NjY4MWUtNWZlMjEyMTMtMjljZTdhZTMtNzExMmVlYzk=, TxId: 2025-05-07T09:03:38.882525Z node 2 :KQP_PROXY DEBUG: kqp_script_executions.cpp:1907: [ScriptExecutions] [TSaveScriptExecutionResultActor] ExecutionId: 1ea240e7-94c74fac-1d2993fc-753ca853, start saving rows range [0; 1) 2025-05-07T09:03:38.882611Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TSaveScriptExecutionResultQuery] TraceId: 1ea240e7-94c74fac-1d2993fc-753ca853, Bootstrap. Database: /dc-1 2025-05-07T09:03:38.882760Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1353: Session closed, sessionId: ydb://session/3?node_id=2&id=ZTY1NjY4MWUtNWZlMjEyMTMtMjljZTdhZTMtNzExMmVlYzk=, workerId: [2:7501626894937720453:2365], local sessions count: 2 2025-05-07T09:03:38.882779Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-05-07T09:03:38.882795Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1468: Request has 18444997465090.668831s seconds to be completed 2025-05-07T09:03:38.884509Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1543: Created new session, sessionId: ydb://session/3?node_id=2&id=OTQyNGMyOWYtMzE3MzViZDQtOTEzYWUyYzctMzBkNzcz, workerId: [2:7501626894937720588:2375], database: /dc-1, longSession: 1, local sessions count: 3 2025-05-07T09:03:38.884655Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:649: Received create session request, trace_id: 2025-05-07T09:03:38.885017Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptExecutionResultQuery] TraceId: 1ea240e7-94c74fac-1d2993fc-753ca853, RunDataQuery: -- TSaveScriptExecutionResultQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $result_set_id AS Int32; DECLARE $expire_at AS Optional; DECLARE $items AS List>; UPSERT INTO `.metadata/result_sets` SELECT $database as database, $execution_id as execution_id, $result_set_id as result_set_id, T.row_id as row_id, $expire_at as expire_at, T.result_set as result_set, T.accumulated_size as accumulated_size FROM AS_TABLE($items) AS T; 2025-05-07T09:03:38.885400Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=OTQyNGMyOWYtMzE3MzViZDQtOTEzYWUyYzctMzBkNzcz, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 10, targetId: [2:7501626894937720588:2375] 2025-05-07T09:03:38.885433Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 10 timeout: 300.000000s actor id: [2:7501626894937720590:2684] 2025-05-07T09:03:38.887436Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-05-07T09:03:38.887442Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-05-07T09:03:38.889469Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-05-07T09:03:38.902878Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-05-07T09:03:38.910611Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-05-07T09:03:38.929532Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1353: Session closed, sessionId: ydb://session/3?node_id=2&id=MTI1ZGQ4MGYtNDNhMzBkN2YtMzkwMDZiMS1mMmFlMDkzZQ==, workerId: [2:7501626894937720362:2360], local sessions count: 2 2025-05-07T09:03:39.061527Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 10, sender: [2:7501626894937720589:2376], selfId: [2:7501626886347784975:2267], source: [2:7501626894937720588:2375] 2025-05-07T09:03:39.061839Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptExecutionResultQuery] TraceId: 1ea240e7-94c74fac-1d2993fc-753ca853, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=OTQyNGMyOWYtMzE3MzViZDQtOTEzYWUyYzctMzBkNzcz, TxId: 2025-05-07T09:03:39.061876Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TSaveScriptExecutionResultQuery] TraceId: 1ea240e7-94c74fac-1d2993fc-753ca853, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=OTQyNGMyOWYtMzE3MzViZDQtOTEzYWUyYzctMzBkNzcz, TxId: 2025-05-07T09:03:39.061998Z node 2 :KQP_PROXY DEBUG: kqp_script_executions.cpp:1939: [ScriptExecutions] [TSaveScriptExecutionResultActor] ExecutionId: 1ea240e7-94c74fac-1d2993fc-753ca853, result part successfully saved 2025-05-07T09:03:39.062025Z node 2 :KQP_PROXY DEBUG: kqp_script_executions.cpp:1946: [ScriptExecutions] [TSaveScriptExecutionResultActor] ExecutionId: 1ea240e7-94c74fac-1d2993fc-753ca853, reply SUCCESS, issues: 2025-05-07T09:03:39.062277Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1353: Session closed, sessionId: ydb://session/3?node_id=2&id=OTQyNGMyOWYtMzE3MzViZDQtOTEzYWUyYzctMzBkNzcz, workerId: [2:7501626894937720588:2375], local sessions count: 1 2025-05-07T09:03:39.062313Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: 1ea240e7-94c74fac-1d2993fc-753ca853, Bootstrap. Database: /dc-1 2025-05-07T09:03:39.062426Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1468: Request has 18444997465090.489209s seconds to be completed 2025-05-07T09:03:39.064433Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1543: Created new session, sessionId: ydb://session/3?node_id=2&id=ZjVlNDE4NWEtNmI3YmI2YTQtM2VjNmJhLTU0ZjFhNTRm, workerId: [2:7501626899232687922:2387], database: /dc-1, longSession: 1, local sessions count: 2 2025-05-07T09:03:39.064577Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:649: Received create session request, trace_id: 2025-05-07T09:03:39.064801Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: 1ea240e7-94c74fac-1d2993fc-753ca853, RunDataQuery: -- TSaveScriptFinalStatusActor::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; SELECT operation_status, finalization_status, meta, customer_supplied_id, user_token, script_sinks, script_secret_names FROM `.metadata/script_executions` WHERE database = $database AND execution_id = $execution_id AND (expire_at > CurrentUtcTimestamp() OR expire_at IS NULL); SELECT lease_generation FROM `.metadata/script_execution_leases` WHERE database = $database AND execution_id = $execution_id AND (expire_at > CurrentUtcTimestamp() OR expire_at IS NULL); 2025-05-07T09:03:39.065096Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=ZjVlNDE4NWEtNmI3YmI2YTQtM2VjNmJhLTU0ZjFhNTRm, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 12, targetId: [2:7501626899232687922:2387] 2025-05-07T09:03:39.065130Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 12 timeout: 300.000000s actor id: [2:7501626899232687924:2701] |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data/unittest >> KqpUserConstraint::KqpReadNull+UploadNull [GOOD] Test command err: 2025-05-07T09:03:33.130212Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:03:33.130429Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:03:33.130790Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004803/r3tmp/tmpdHQWL1/pdisk_1.dat 2025-05-07T09:03:34.660314Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2175} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.101120s 2025-05-07T09:03:34.660404Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:666} StateWork event processing took too much time Type# 2146435078 Duration# 0.101225s 2025-05-07T09:03:34.746077Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:03:34.864097Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:34.977234Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:34.977338Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:03:35.003420Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:03:35.221067Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:03:36.101519Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:853:2701], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:36.101670Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:864:2706], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:36.101797Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:36.160675Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T09:03:36.317122Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:867:2709], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T09:03:36.397403Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:936:2747] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:03:39.444437Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715660. Ctx: { TraceId: 01jtmzrwm84mfdhdvhf94xvp4y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmExNTczZTgtOWQzMDVjYTctMTIyNGFjY2EtNmZmYzhmMjU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:03:39.506696Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1552: SelfId: [1:967:2768], TxId: 281474976715660, task: 1. Ctx: { TraceId : 01jtmzrwm84mfdhdvhf94xvp4y. SessionId : ydb://session/3?node_id=1&id=NmExNTczZTgtOWQzMDVjYTctMTIyNGFjY2EtNmZmYzhmMjU=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Source[0] fatal error: {
: Fatal: Read from column index 1: got NULL from NOT NULL column, code: 2012 } 2025-05-07T09:03:39.509423Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [1:967:2768], TxId: 281474976715660, task: 1. Ctx: { TraceId : 01jtmzrwm84mfdhdvhf94xvp4y. SessionId : ydb://session/3?node_id=1&id=NmExNTczZTgtOWQzMDVjYTctMTIyNGFjY2EtNmZmYzhmMjU=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. InternalError: INTERNAL_ERROR KIKIMR_CONSTRAINT_VIOLATION: {
: Fatal: Read from column index 1: got NULL from NOT NULL column, code: 2012 }. 2025-05-07T09:03:39.521273Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [1:968:2769], TxId: 281474976715660, task: 2. Ctx: { CustomerSuppliedId : . TraceId : 01jtmzrwm84mfdhdvhf94xvp4y. SessionId : ydb://session/3?node_id=1&id=NmExNTczZTgtOWQzMDVjYTctMTIyNGFjY2EtNmZmYzhmMjU=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. InternalError: INTERNAL_ERROR DEFAULT_ERROR: {
: Error: Terminate execution }. 2025-05-07T09:03:39.529861Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=1&id=NmExNTczZTgtOWQzMDVjYTctMTIyNGFjY2EtNmZmYzhmMjU=, ActorId: [1:851:2699], ActorState: ExecuteState, TraceId: 01jtmzrwm84mfdhdvhf94xvp4y, Create QueryResponse for error on request, msg: 2025-05-07T09:03:39.535170Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jtmzrwm84mfdhdvhf94xvp4y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmExNTczZTgtOWQzMDVjYTctMTIyNGFjY2EtNmZmYzhmMjU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> SplitPathTests::WithDatabaseShouldFail >> SplitPathTests::WithoutDatabaseShouldSuccess [GOOD] |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/ut/unittest |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/ut/unittest |92.4%| [TA] $(B)/ydb/core/tx/scheme_board/ut_populator/test-results/unittest/{meta.json ... results_accumulator.log} |92.4%| [TA] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_populator/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/proxy_service/ut/unittest >> ScriptExecutionsTest::AttemptToUpdateDeletedLease [GOOD] Test command err: 2025-05-07T09:03:29.214828Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626858141439725:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:29.214897Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00307f/r3tmp/tmpYIuWN5/pdisk_1.dat 2025-05-07T09:03:30.286077Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T09:03:30.329870Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:30.329943Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:03:30.409156Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:03:30.409956Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2175} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.102273s 2025-05-07T09:03:30.410060Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:666} StateWork event processing took too much time Type# 2146435078 Duration# 0.102368s 2025-05-07T09:03:30.435128Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14093, node 1 2025-05-07T09:03:31.401851Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:03:31.401875Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:03:31.401880Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:03:31.402024Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63705 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:03:32.627719Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:03:32.754913Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:881: Received ping session request, request_id: 2, sender: [1:7501626871026342709:2332], trace_id: 01jtmzrsc6cjnv654wfb9y0bck 2025-05-07T09:03:32.755131Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 2 timeout: 5.000000s actor id: [0:0:0] 2025-05-07T09:03:32.755470Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:559: Session not found, targetId: [2:8678280833929343339:121] requestId: 2 2025-05-07T09:03:32.757322Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: TraceId: "01jtmzrsc6cjnv654wfb9y0bck", Forwarded response to sender actor, requestId: 2, sender: [1:7501626871026342709:2332], selfId: [1:7501626858141439979:2281], source: [1:7501626858141439979:2281] 2025-05-07T09:03:32.757411Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T09:03:33.718030Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501626871773401026:2059];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:33.718094Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00307f/r3tmp/tmplGoK1W/pdisk_1.dat 2025-05-07T09:03:33.779624Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:33.838321Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:33.838385Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:03:33.839903Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:16810 TServer::EnableGrpc on GrpcPort 5578, node 4 2025-05-07T09:03:33.928165Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:03:33.928185Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:03:33.928192Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:03:33.928305Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-05-07T09:03:33.958506Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:03:35.574986Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1665: Updated YQL logs priority to current level: 4 2025-05-07T09:03:35.576051Z node 4 :KQP_PROXY INFO: kqp_proxy_service.cpp:442: Cannot start publishing usage, tenants: /dc-1, empty 2025-05-07T09:03:35.576729Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:512: Subscribed for config changes. 2025-05-07T09:03:35.576761Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:519: Updated table service config. 2025-05-07T09:03:35.576776Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1665: Updated YQL logs priority to current level: 4 2025-05-07T09:03:35.576804Z node 4 :KQP_PROXY INFO: kqp_proxy_service.cpp:442: Cannot start publishing usage, tenants: /dc-1, empty 2025-05-07T09:03:35.576876Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T09:03:35.576908Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T09:03:35.576921Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T09:03:35.576942Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T09:03:35.577393Z node 4 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_execution_leases updater. Describe result: PathErrorUnknown 2025-05-07T09:03:35.577430Z node 4 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_execution_leases updater. Creating table 2025-05-07T09:03:35.577466Z node 4 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_execution_leases updater. Full table path:/dc-1/.metadata/script_execution_leases 2025-05-07T09:03:35.577554Z node 4 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_executions updater. Describe result: PathErrorUnknown 2025-05-07T09:03:35.577564Z node 4 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_executions updater. Creating table 2025-05-07T09:03:35.577582Z node 4 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_executions updater. Full table path:/dc-1/.metadata/script_executions 2025-05-07T09:03:35.578109Z node 4 :KQP_PROXY DEBUG: table_creator.cpp:147: Table result_sets updater. Describe result: PathErrorUnknown 2025-05-07T09:03:35.578121Z node 4 :KQP_PROXY NOTICE: table_creator.cpp:167: Table result_sets updater. Creating table 2025-05-07T09:03:35.578135Z node 4 :KQP_PROXY DEBUG: table_creator.cpp:100: Table result_sets updater. Full table path:/dc-1/.metadata/result_sets 2025-05-07T09:03:35.581589Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:1, at schemeshard: 72057594046644480 2025-05-07T09:03:35.583084Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T09:03:35.584826Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-05-07T09:03:35.594866Z node 4 :KQP_PROXY DEBUG: table_creator.cpp:190: Table script_executions updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976715659 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 3 } 2025-05-07T09:03:35.594878Z node 4 :KQP_PROXY DEBUG: table_creator.cpp:190: Table script_execution_leases updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976715658 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 4 } 2025-05-07T09:03:35.594928Z node 4 :KQP_PROXY DEBUG: table_creator.cpp:261: Table script_executions updater. Subscribe on create table tx: 281474976715659 2025-05-07T09:03:35.594929Z node 4 :KQP_PROXY DEBUG: table_creator.c ... e: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=4&id=OWVkM2EzNTUtNDNjNjVkYmEtM2IwZjBhNjMtYTg4MDc1OA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 20, targetId: [4:7501626893248238764:2442] 2025-05-07T09:03:38.541229Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 20 timeout: 300.000000s actor id: [4:7501626893248238766:2606] 2025-05-07T09:03:38.546527Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 20, sender: [4:7501626893248238765:2443], selfId: [4:7501626871773401238:2267], source: [4:7501626893248238764:2442] 2025-05-07T09:03:38.546708Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: 32175673-abbfb28-7f8c8a1b-c31da680, State: Get operation info, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=4&id=OWVkM2EzNTUtNDNjNjVkYmEtM2IwZjBhNjMtYTg4MDc1OA==, TxId: 01jtmzrz1demmc9eph5be2gmwg 2025-05-07T09:03:38.547145Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: 32175673-abbfb28-7f8c8a1b-c31da680, State: Get operation info, RunDataQuery: -- TSaveScriptFinalStatusActor::FinishScriptExecution DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $operation_status AS Int32; DECLARE $execution_status AS Int32; DECLARE $finalization_status AS Int32; DECLARE $issues AS JsonDocument; DECLARE $plan AS JsonDocument; DECLARE $stats AS JsonDocument; DECLARE $ast AS Optional; DECLARE $ast_compressed AS Optional; DECLARE $ast_compression_method AS Optional; DECLARE $operation_ttl AS Interval; DECLARE $customer_supplied_id AS Text; DECLARE $user_token AS Text; DECLARE $script_sinks AS Optional; DECLARE $script_secret_names AS Optional; DECLARE $applicate_script_external_effect_required AS Bool; UPDATE `.metadata/script_executions` SET operation_status = $operation_status, execution_status = $execution_status, finalization_status = IF($applicate_script_external_effect_required, $finalization_status, NULL), issues = $issues, plan = $plan, end_ts = CurrentUtcTimestamp(), stats = $stats, ast = $ast, ast_compressed = $ast_compressed, ast_compression_method = $ast_compression_method, expire_at = IF($operation_ttl > CAST(0 AS Interval), CurrentUtcTimestamp() + $operation_ttl, NULL), customer_supplied_id = IF($applicate_script_external_effect_required, $customer_supplied_id, NULL), user_token = IF($applicate_script_external_effect_required, $user_token, NULL), script_sinks = IF($applicate_script_external_effect_required, $script_sinks, NULL), script_secret_names = IF($applicate_script_external_effect_required, $script_secret_names, NULL) WHERE database = $database AND execution_id = $execution_id; DELETE FROM `.metadata/script_execution_leases` WHERE database = $database AND execution_id = $execution_id; 2025-05-07T09:03:38.547533Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=4&id=OWVkM2EzNTUtNDNjNjVkYmEtM2IwZjBhNjMtYTg4MDc1OA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 21, targetId: [4:7501626893248238764:2442] 2025-05-07T09:03:38.547566Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 21 timeout: 300.000000s actor id: [4:7501626893248238787:2611] 2025-05-07T09:03:38.560650Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 21, sender: [4:7501626893248238786:2449], selfId: [4:7501626871773401238:2267], source: [4:7501626893248238764:2442] 2025-05-07T09:03:38.561273Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: 32175673-abbfb28-7f8c8a1b-c31da680, State: Update final status, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=4&id=OWVkM2EzNTUtNDNjNjVkYmEtM2IwZjBhNjMtYTg4MDc1OA==, TxId: 2025-05-07T09:03:38.561357Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: 32175673-abbfb28-7f8c8a1b-c31da680, State: Update final status, Finish with SUCCESS, SessionId: ydb://session/3?node_id=4&id=OWVkM2EzNTUtNDNjNjVkYmEtM2IwZjBhNjMtYTg4MDc1OA==, TxId: 2025-05-07T09:03:38.561406Z node 4 :KQP_PROXY DEBUG: kqp_script_executions.cpp:2628: [ScriptExecutions] Finish script execution operation. ExecutionId: 32175673-abbfb28-7f8c8a1b-c31da680. UNAVAILABLE. Issues: {
: Error: Lease expired } 2025-05-07T09:03:38.561550Z node 4 :KQP_PROXY DEBUG: kqp_script_executions.cpp:633: [ScriptExecutions] [TCheckLeaseStatusActor] ExecutionId: 32175673-abbfb28-7f8c8a1b-c31da680, successfully finalized script execution operation 2025-05-07T09:03:38.561577Z node 4 :KQP_PROXY DEBUG: kqp_script_executions.cpp:838: [ScriptExecutions] [TCheckLeaseStatusActor] ExecutionId: 32175673-abbfb28-7f8c8a1b-c31da680, reply success 2025-05-07T09:03:38.561621Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1353: Session closed, sessionId: ydb://session/3?node_id=4&id=OWVkM2EzNTUtNDNjNjVkYmEtM2IwZjBhNjMtYTg4MDc1OA==, workerId: [4:7501626893248238764:2442], local sessions count: 1 2025-05-07T09:03:38.569903Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: 01jtmzrz29d78q32483xy9gd0s, Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=4&id=YTVmN2Y2YjQtNjhhN2M0ODEtZDg5YzRiYTMtYWQ1ZmY3ZDY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 22, targetId: [4:7501626884658303925:2360] 2025-05-07T09:03:38.569943Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 22 timeout: 300.000000s actor id: [4:7501626893248238812:2618] 2025-05-07T09:03:38.743152Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7501626871773401026:2059];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:38.743525Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:03:38.980440Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: TraceId: "01jtmzrz29d78q32483xy9gd0s", Forwarded response to sender actor, requestId: 22, sender: [4:7501626893248238811:2454], selfId: [4:7501626871773401238:2267], source: [4:7501626884658303925:2360] 2025-05-07T09:03:38.981940Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TScriptLeaseUpdater] TraceId: 32175673-abbfb28-7f8c8a1b-c31da680, Bootstrap. Database: /dc-1 2025-05-07T09:03:38.982090Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1468: Request has 18444997465090.569545s seconds to be completed 2025-05-07T09:03:38.983924Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1543: Created new session, sessionId: ydb://session/3?node_id=4&id=YzIwZTM0ZjYtODg3YTVhMTAtZGEyYTkxZWItOGUxYTFlOWI=, workerId: [4:7501626893248238858:2468], database: /dc-1, longSession: 1, local sessions count: 2 2025-05-07T09:03:38.984057Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:649: Received create session request, trace_id: 2025-05-07T09:03:38.984256Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TScriptLeaseUpdater] TraceId: 32175673-abbfb28-7f8c8a1b-c31da680, RunDataQuery: -- TScriptLeaseUpdater::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; SELECT lease_deadline FROM `.metadata/script_execution_leases` WHERE database = $database AND execution_id = $execution_id AND (expire_at > CurrentUtcTimestamp() OR expire_at IS NULL); 2025-05-07T09:03:38.984488Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=4&id=YzIwZTM0ZjYtODg3YTVhMTAtZGEyYTkxZWItOGUxYTFlOWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 24, targetId: [4:7501626893248238858:2468] 2025-05-07T09:03:38.984519Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 24 timeout: 300.000000s actor id: [4:7501626893248238860:2638] 2025-05-07T09:03:39.175290Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 24, sender: [4:7501626893248238859:2469], selfId: [4:7501626871773401238:2267], source: [4:7501626893248238858:2468] 2025-05-07T09:03:39.175510Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TScriptLeaseUpdater] TraceId: 32175673-abbfb28-7f8c8a1b-c31da680, State: Get lease info, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=4&id=YzIwZTM0ZjYtODg3YTVhMTAtZGEyYTkxZWItOGUxYTFlOWI=, TxId: 01jtmzrzn35g896a964s14n3sc 2025-05-07T09:03:39.175628Z node 4 :KQP_PROXY WARN: query_actor.cpp:372: [TQueryBase] [TScriptLeaseUpdater] TraceId: 32175673-abbfb28-7f8c8a1b-c31da680, State: Get lease info, Finish with BAD_REQUEST, Issues: {
: Error: No such execution }, SessionId: ydb://session/3?node_id=4&id=YzIwZTM0ZjYtODg3YTVhMTAtZGEyYTkxZWItOGUxYTFlOWI=, TxId: 01jtmzrzn35g896a964s14n3sc 2025-05-07T09:03:39.175687Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:428: [TQueryBase] [TScriptLeaseUpdater] TraceId: 32175673-abbfb28-7f8c8a1b-c31da680, State: Get lease info, Rollback transaction: 01jtmzrzn35g896a964s14n3sc 2025-05-07T09:03:39.177219Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=4&id=YzIwZTM0ZjYtODg3YTVhMTAtZGEyYTkxZWItOGUxYTFlOWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 600.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 25, targetId: [4:7501626893248238858:2468] 2025-05-07T09:03:39.177265Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 25 timeout: 600.000000s actor id: [4:7501626897543206179:2647] 2025-05-07T09:03:39.178198Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 25, sender: [4:7501626897543206178:2476], selfId: [4:7501626871773401238:2267], source: [4:7501626893248238858:2468] 2025-05-07T09:03:39.178304Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:437: [TQueryBase] [TScriptLeaseUpdater] TraceId: 32175673-abbfb28-7f8c8a1b-c31da680, State: Get lease info, RollbackTransactionResult: SUCCESS. Issues: 2025-05-07T09:03:39.178499Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1353: Session closed, sessionId: ydb://session/3?node_id=4&id=YzIwZTM0ZjYtODg3YTVhMTAtZGEyYTkxZWItOGUxYTFlOWI=, workerId: [4:7501626893248238858:2468], local sessions count: 1 2025-05-07T09:03:39.188769Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1353: Session closed, sessionId: ydb://session/3?node_id=4&id=YTVmN2Y2YjQtNjhhN2M0ODEtZDg5YzRiYTMtYWQ1ZmY3ZDY=, workerId: [4:7501626884658303925:2360], local sessions count: 0 >> SplitPathTests::WithDatabaseShouldFail [GOOD] >> OperationMapping::IndexBuildSuccess [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/proxy_service/ut/unittest >> KqpProxy::NoUserAccessToScriptExecutionsTable [GOOD] Test command err: 2025-05-07T09:03:29.190362Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626857749206234:2066];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:29.190434Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T09:03:29.413167Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501626858579421801:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:29.413334Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003123/r3tmp/tmpN1cVwF/pdisk_1.dat 2025-05-07T09:03:30.286062Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T09:03:30.329707Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:30.329816Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:03:30.332279Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:30.409291Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:03:30.409760Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2175} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.101958s 2025-05-07T09:03:30.409829Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:666} StateWork event processing took too much time Type# 2146435078 Duration# 0.102043s 2025-05-07T09:03:30.422326Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:30.422388Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:03:30.424423Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-05-07T09:03:30.425290Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:4416 2025-05-07T09:03:31.480233Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1665: Updated YQL logs priority to current level: 4 2025-05-07T09:03:31.481526Z node 2 :KQP_PROXY INFO: kqp_proxy_service.cpp:442: Cannot start publishing usage, tenants: /dc-1, empty 2025-05-07T09:03:31.493583Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1543: Created new session, sessionId: ydb://session/3?node_id=2&id=MmIxY2M2MzgtMjYyM2FkMjctMTQzODEyYzYtZTM1OTQ5OTU=, workerId: [2:7501626867169356713:2307], database: , longSession: 1, local sessions count: 1 2025-05-07T09:03:31.493620Z node 2 :KQP_PROXY INFO: kqp_proxy_service.cpp:442: Cannot start publishing usage, tenants: /dc-1, empty 2025-05-07T09:03:31.493763Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:649: Received create session request, trace_id: 2025-05-07T09:03:31.493818Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:512: Subscribed for config changes. 2025-05-07T09:03:31.493850Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:519: Updated table service config. 2025-05-07T09:03:31.493878Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1665: Updated YQL logs priority to current level: 4 2025-05-07T09:03:31.493916Z node 2 :KQP_PROXY INFO: kqp_proxy_service.cpp:442: Cannot start publishing usage, tenants: /dc-1, empty 2025-05-07T09:03:31.494033Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T09:03:31.494070Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T09:03:31.494099Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T09:03:31.494122Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T09:03:31.494141Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T09:03:31.569953Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1665: Updated YQL logs priority to current level: 4 2025-05-07T09:03:31.570507Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:442: Cannot start publishing usage, tenants: /dc-1, empty 2025-05-07T09:03:31.571077Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: , DatabaseId: , SessionId: ydb://session/3?node_id=2&id=MmIxY2M2MzgtMjYyM2FkMjctMTQzODEyYzYtZTM1OTQ5OTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 600.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 2, targetId: [2:8678280833929343339:121] 2025-05-07T09:03:31.571108Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 2 timeout: 600.000000s actor id: [1:7501626866339141631:2460] 2025-05-07T09:03:31.571153Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:512: Subscribed for config changes. 2025-05-07T09:03:31.571178Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:519: Updated table service config. 2025-05-07T09:03:31.571187Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1665: Updated YQL logs priority to current level: 4 2025-05-07T09:03:31.571211Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:442: Cannot start publishing usage, tenants: /dc-1, empty 2025-05-07T09:03:31.571280Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T09:03:31.571315Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T09:03:31.571336Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T09:03:31.571348Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T09:03:31.571430Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: , DatabaseId: , SessionId: ydb://session/3?node_id=2&id=MmIxY2M2MzgtMjYyM2FkMjctMTQzODEyYzYtZTM1OTQ5OTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 600.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 3, targetId: [2:7501626867169356713:2307] 2025-05-07T09:03:31.571458Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 3 timeout: 600.000000s actor id: [2:7501626867169356714:2118] 2025-05-07T09:03:31.573672Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626866339141635:2312], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:31.573673Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501626867169356715:2308], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:31.573820Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /dc-1, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:31.573820Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /dc-1, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:33.749287Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1543: TraceId: "01jtmzrr7k81gaxzah2g1gffk4", Created new session, sessionId: ydb://session/3?node_id=2&id=ZGY1ZTljYTUtNGQ2ZjNlNjAtYjVkZjM2MGQtYWExM2U1YjE=, workerId: [2:7501626875759291326:2314], database: , longSession: 0, local sessions count: 2 2025-05-07T09:03:33.749561Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: 01jtmzrr7k81gaxzah2g1gffk4, Database: , DatabaseId: , SessionId: ydb://session/3?node_id=2&id=ZGY1ZTljYTUtNGQ2ZjNlNjAtYjVkZjM2MGQtYWExM2U1YjE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 4, targetId: [2:7501626875759291326:2314] 2025-05-07T09:03:33.749588Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 4 timeout: 300.000000s actor id: [2:7501626875759291327:2125] 2025-05-07T09:03:33.749619Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T09:03:33.750096Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501626875759291328:2315], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:33.750151Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /dc-1, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:33.750307Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501626875759291333:2318], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:33.755314Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720657:3, at schemeshard: 72057594046644480 2025-05-07T09:03:33.778070Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7501626875759291335:2319], DatabaseId: /dc-1, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720657 completed, doublechecking } 2025-05-07T09:03:33.923361Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501626875759291363:2135] txid# 281474976720658, issues: { message: "Check failed: path: \'/dc-1/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:03:34.190458Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor; ... tself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:2, at schemeshard: 72057594046644480 2025-05-07T09:03:38.117421Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7501626897209810278:2363], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-05-07T09:03:38.174325Z node 3 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [3:7501626897209810339:3006] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:03:38.318031Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 5, sender: [3:7501626897209810269:2358], selfId: [3:7501626884324907150:2280], source: [3:7501626897209810268:2357] 2025-05-07T09:03:38.318264Z node 3 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TCreateScriptOperationQuery] TraceId: ae6700d9-542a5e1a-cc704319-8f85cf12, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=3&id=YzMzY2EzNDEtZjI3ZWQ0YzktNGRmMDk2YTMtODcxZTU3NGI=, TxId: 2025-05-07T09:03:38.318297Z node 3 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TCreateScriptOperationQuery] TraceId: ae6700d9-542a5e1a-cc704319-8f85cf12, Finish with SUCCESS, SessionId: ydb://session/3?node_id=3&id=YzMzY2EzNDEtZjI3ZWQ0YzktNGRmMDk2YTMtODcxZTU3NGI=, TxId: 2025-05-07T09:03:38.318308Z node 3 :KQP_PROXY DEBUG: kqp_script_executions.cpp:304: [ScriptExecutions] Create script execution operation. ExecutionId: ae6700d9-542a5e1a-cc704319-8f85cf12. Result: SUCCESS. Issues: 2025-05-07T09:03:38.320508Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1543: Created new session, sessionId: ydb://session/3?node_id=3&id=ZGE5ZTIxOTMtYTM2ZGU0MTMtYWFmZGI1NmMtODAzNmIyNGI=, workerId: [3:7501626897209810406:2375], database: /Root, longSession: 1, local sessions count: 2 2025-05-07T09:03:38.320657Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:649: Received create session request, trace_id: 2025-05-07T09:03:38.320936Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1353: Session closed, sessionId: ydb://session/3?node_id=3&id=YzMzY2EzNDEtZjI3ZWQ0YzktNGRmMDk2YTMtODcxZTU3NGI=, workerId: [3:7501626897209810268:2357], local sessions count: 1 2025-05-07T09:03:38.321052Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: 01jtmzrycf61909e19ws7tbcf1, Database: /Root, DatabaseId: , SessionId: ydb://session/3?node_id=3&id=ZGE5ZTIxOTMtYTM2ZGU0MTMtYWFmZGI1NmMtODAzNmIyNGI=, CurrentExecutionId: ae6700d9-542a5e1a-cc704319-8f85cf12, CustomerSuppliedId: 01jtmzrycf61909e19ws7tbcf1, PoolId: }. TEvQueryRequest, set timer for: 604800.000000s timeout: 604800.000000s cancelAfter: 0.000000s. Send request to target, requestId: 7, targetId: [3:7501626897209810406:2375] 2025-05-07T09:03:38.321084Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 7 timeout: 604800.000000s actor id: [3:7501626897209810409:3047] 2025-05-07T09:03:38.335860Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1468: TraceId: "01jtmzrytz9yss5ckgbpv7115w", Request has 18444997465091.215780s seconds to be completed 2025-05-07T09:03:38.337632Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1543: TraceId: "01jtmzrytz9yss5ckgbpv7115w", Created new session, sessionId: ydb://session/3?node_id=3&id=Y2M0OWNhMjItZTcxYjE2ZDMtMzJkNGViZTktM2MzODY1Yjk=, workerId: [3:7501626897209810418:2381], database: /Root, longSession: 1, local sessions count: 2 2025-05-07T09:03:38.337766Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:649: Received create session request, trace_id: 01jtmzrytz9yss5ckgbpv7115w 2025-05-07T09:03:38.343331Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: 01jtmzryv61957fydrqg1mw3cd, Database: /Root, DatabaseId: , SessionId: ydb://session/3?node_id=3&id=Y2M0OWNhMjItZTcxYjE2ZDMtMzJkNGViZTktM2MzODY1Yjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 9, targetId: [3:7501626897209810418:2381] 2025-05-07T09:03:38.343374Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 9 timeout: 300.000000s actor id: [3:7501626897209810422:3052] 2025-05-07T09:03:38.364327Z node 3 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:303: Access denied: self# [3:7501626897209810427:3055], for# user@builtin, access# DescribeSchema 2025-05-07T09:03:38.364357Z node 3 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:303: Access denied: self# [3:7501626897209810427:3055], for# user@builtin, access# DescribeSchema 2025-05-07T09:03:38.374220Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7501626897209810423:2383], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiReadTable!
:1:1: Error: Cannot find table 'db.[/Root/.metadata/script_executions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T09:03:38.374412Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=3&id=Y2M0OWNhMjItZTcxYjE2ZDMtMzJkNGViZTktM2MzODY1Yjk=, ActorId: [3:7501626897209810418:2381], ActorState: ExecuteState, TraceId: 01jtmzryv61957fydrqg1mw3cd, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T09:03:38.374602Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: TraceId: "01jtmzryv61957fydrqg1mw3cd", Forwarded response to sender actor, requestId: 9, sender: [3:7501626897209810421:2382], selfId: [3:7501626884324907150:2280], source: [3:7501626897209810418:2381] 2025-05-07T09:03:38.374887Z node 3 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: ae6700d9-542a5e1a-cc704319-8f85cf12, Bootstrap. Database: /Root 2025-05-07T09:03:38.375350Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1468: Request has 18444997465091.176280s seconds to be completed 2025-05-07T09:03:38.377096Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1543: Created new session, sessionId: ydb://session/3?node_id=3&id=NzA1ZjBlMzctZjQwYzVjMTYtNzgyNWQxOTMtY2Q4YjA4MGM=, workerId: [3:7501626897209810436:2386], database: /Root, longSession: 1, local sessions count: 3 2025-05-07T09:03:38.377244Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:649: Received create session request, trace_id: 2025-05-07T09:03:38.377331Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: TraceId: "01jtmzrycf61909e19ws7tbcf1", Forwarded response to sender actor, requestId: 7, sender: [3:7501626897209810265:2954], selfId: [3:7501626884324907150:2280], source: [3:7501626897209810406:2375] 2025-05-07T09:03:38.377473Z node 3 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: ae6700d9-542a5e1a-cc704319-8f85cf12, RunDataQuery: -- TSaveScriptExecutionResultMetaQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $result_set_metas AS JsonDocument; UPDATE `.metadata/script_executions` SET result_set_metas = $result_set_metas WHERE database = $database AND execution_id = $execution_id; 2025-05-07T09:03:38.377796Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: /Root, DatabaseId: , SessionId: ydb://session/3?node_id=3&id=NzA1ZjBlMzctZjQwYzVjMTYtNzgyNWQxOTMtY2Q4YjA4MGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 11, targetId: [3:7501626897209810436:2386] 2025-05-07T09:03:38.377833Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 11 timeout: 300.000000s actor id: [3:7501626897209810438:3059] 2025-05-07T09:03:38.380168Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1353: Session closed, sessionId: ydb://session/3?node_id=3&id=Y2M0OWNhMjItZTcxYjE2ZDMtMzJkNGViZTktM2MzODY1Yjk=, workerId: [3:7501626897209810418:2381], local sessions count: 2 2025-05-07T09:03:38.545575Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 11, sender: [3:7501626897209810437:2387], selfId: [3:7501626884324907150:2280], source: [3:7501626897209810436:2386] 2025-05-07T09:03:38.545769Z node 3 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: ae6700d9-542a5e1a-cc704319-8f85cf12, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=3&id=NzA1ZjBlMzctZjQwYzVjMTYtNzgyNWQxOTMtY2Q4YjA4MGM=, TxId: 2025-05-07T09:03:38.545815Z node 3 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: ae6700d9-542a5e1a-cc704319-8f85cf12, Finish with SUCCESS, SessionId: ydb://session/3?node_id=3&id=NzA1ZjBlMzctZjQwYzVjMTYtNzgyNWQxOTMtY2Q4YjA4MGM=, TxId: 2025-05-07T09:03:38.545954Z node 3 :KQP_PROXY DEBUG: kqp_script_executions.cpp:1907: [ScriptExecutions] [TSaveScriptExecutionResultActor] ExecutionId: ae6700d9-542a5e1a-cc704319-8f85cf12, start saving rows range [0; 1) 2025-05-07T09:03:38.546061Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1353: Session closed, sessionId: ydb://session/3?node_id=3&id=NzA1ZjBlMzctZjQwYzVjMTYtNzgyNWQxOTMtY2Q4YjA4MGM=, workerId: [3:7501626897209810436:2386], local sessions count: 1 2025-05-07T09:03:38.546071Z node 3 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TSaveScriptExecutionResultQuery] TraceId: ae6700d9-542a5e1a-cc704319-8f85cf12, Bootstrap. Database: /Root 2025-05-07T09:03:38.546157Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1468: Request has 18444997465091.005469s seconds to be completed 2025-05-07T09:03:38.547913Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1543: Created new session, sessionId: ydb://session/3?node_id=3&id=NDNhYzlkNjYtZGNlYmE3Y2UtYTg4ODc5ZTEtN2VhNWI3Mjk=, workerId: [3:7501626897209810472:2396], database: /Root, longSession: 1, local sessions count: 2 2025-05-07T09:03:38.548042Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:649: Received create session request, trace_id: 2025-05-07T09:03:38.548367Z node 3 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptExecutionResultQuery] TraceId: ae6700d9-542a5e1a-cc704319-8f85cf12, RunDataQuery: -- TSaveScriptExecutionResultQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $result_set_id AS Int32; DECLARE $expire_at AS Optional; DECLARE $items AS List>; UPSERT INTO `.metadata/result_sets` SELECT $database as database, $execution_id as execution_id, $result_set_id as result_set_id, T.row_id as row_id, $expire_at as expire_at, T.result_set as result_set, T.accumulated_size as accumulated_size FROM AS_TABLE($items) AS T; 2025-05-07T09:03:38.548732Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: /Root, DatabaseId: , SessionId: ydb://session/3?node_id=3&id=NDNhYzlkNjYtZGNlYmE3Y2UtYTg4ODc5ZTEtN2VhNWI3Mjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 13, targetId: [3:7501626897209810472:2396] 2025-05-07T09:03:38.548765Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 13 timeout: 300.000000s actor id: [3:7501626897209810474:3073] |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/ut/unittest >> SplitPathTests::WithDatabaseShouldFail [GOOD] |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/ut/unittest >> SplitPathTests::WithoutDatabaseShouldSuccess [GOOD] |92.4%| [TA] $(B)/ydb/core/kqp/ut/data/test-results/unittest/{meta.json ... results_accumulator.log} >> TExternalTableTest::ParallelCreateSameExternalTable >> TExternalTableTest::ReplaceExternalTableShouldFailIfEntityOfAnotherTypeWithSameNameExists >> TExternalTableTest::SchemeErrors >> TExternalTableTest::DropTableTwice >> TExternalTableTest::ParallelCreateExternalTable |92.4%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/data/test-results/unittest/{meta.json ... results_accumulator.log} >> TExternalTableTest::CreateExternalTable |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/ut/unittest >> OperationMapping::IndexBuildSuccess [GOOD] >> BackupRestore::RestoreTablePartitioningSettings [GOOD] >> BackupRestore::RestoreIndexTablePartitioningSettings >> OperationMapping::IndexBuildRejected [GOOD] |92.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/ut/unittest >> OperationMapping::IndexBuildRejected [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeTable [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypePersQueueGroup >> TExternalTableTest::SchemeErrors [GOOD] >> TExternalTableTest::DropTableTwice [GOOD] >> TExternalTableTest::ParallelCreateExternalTable [GOOD] >> TExternalTableTest::ParallelCreateSameExternalTable [GOOD] >> TExternalTableTest::ReplaceExternalTableShouldFailIfEntityOfAnotherTypeWithSameNameExists [GOOD] >> TExternalTableTest::CreateExternalTable [GOOD] >> TExternalTableTest::CreateExternalTableShouldFailIfSuchEntityAlreadyExists |92.4%| [TA] $(B)/ydb/core/grpc_services/ut/test-results/unittest/{meta.json ... results_accumulator.log} |92.4%| [TA] {RESULT} $(B)/ydb/core/grpc_services/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_table/unittest >> TExternalTableTest::DropTableTwice [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:127:2058] recipient: [1:109:2141] 2025-05-07T09:03:42.101841Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:03:42.101938Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:03:42.102010Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:03:42.102073Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:03:42.103448Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:03:42.103519Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:03:42.103597Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:03:42.103678Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:03:42.104456Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:03:42.106969Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:03:42.183548Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7610: Got new config: QueryServiceConfig { AvailableExternalDataSources: "ObjectStorage" AvailableExternalDataSources: "ClickHouse" AvailableExternalDataSources: "PostgreSQL" AvailableExternalDataSources: "MySQL" AvailableExternalDataSources: "Ydb" AvailableExternalDataSources: "YT" AvailableExternalDataSources: "Greenplum" AvailableExternalDataSources: "MsSQLServer" AvailableExternalDataSources: "Oracle" AvailableExternalDataSources: "Logging" AvailableExternalDataSources: "Solomon" } 2025-05-07T09:03:42.183615Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:42.184227Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# ObjectStorage, ClickHouse, PostgreSQL, MySQL, Ydb, YT, Greenplum, MsSQLServer, Oracle, Logging, Solomon 2025-05-07T09:03:42.198188Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:03:42.198642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:03:42.198771Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:03:42.206948Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:03:42.207138Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:03:42.210071Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:42.210350Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:03:42.216331Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:42.223737Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:03:42.223809Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:42.223931Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:03:42.223986Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:03:42.224099Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:03:42.225324Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:03:42.231830Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T09:03:42.352669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:03:42.352873Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:42.353081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:03:42.353302Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:03:42.353376Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:42.355572Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:42.355699Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:03:42.355882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:42.355937Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:03:42.355996Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:03:42.356030Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:03:42.357734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:42.357792Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:03:42.357831Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:03:42.359392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:42.359436Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:42.359481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:42.359547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:03:42.363246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:03:42.364961Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:03:42.365186Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:03:42.366237Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:42.366383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 134 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:03:42.366435Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:42.366732Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:03:42.366798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:42.366982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:03:42.367086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:03:42.368828Z node 1 :FLAT_TX_SCHEMESHARD INF ... .449579Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:03:42.449605Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:03:42.449701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-05-07T09:03:42.449791Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T09:03:42.449855Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:42.449905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2208], at schemeshard: 72057594046678944, txId: 103, path id: 1 2025-05-07T09:03:42.449936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2208], at schemeshard: 72057594046678944, txId: 103, path id: 3 2025-05-07T09:03:42.449982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2208], at schemeshard: 72057594046678944, txId: 103, path id: 2 2025-05-07T09:03:42.450144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-05-07T09:03:42.450176Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 103:0 ProgressState 2025-05-07T09:03:42.450250Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-05-07T09:03:42.450299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-05-07T09:03:42.450332Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-05-07T09:03:42.450357Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-05-07T09:03:42.450385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: false 2025-05-07T09:03:42.450415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-05-07T09:03:42.450446Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 103:0 2025-05-07T09:03:42.450469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 103:0 2025-05-07T09:03:42.450511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-05-07T09:03:42.450547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T09:03:42.450579Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 103, publications: 3, subscribers: 0 2025-05-07T09:03:42.450615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 1], 9 2025-05-07T09:03:42.450654Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 2], 2 2025-05-07T09:03:42.450677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 3], 18446744073709551615 2025-05-07T09:03:42.450915Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-05-07T09:03:42.450984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-05-07T09:03:42.451012Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 3, at schemeshard: 72057594046678944, txId: 103 2025-05-07T09:03:42.451044Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-05-07T09:03:42.451072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-05-07T09:03:42.451490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T09:03:42.451541Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-05-07T09:03:42.451591Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-05-07T09:03:42.452007Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 103 2025-05-07T09:03:42.452072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 103 2025-05-07T09:03:42.452099Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 103 2025-05-07T09:03:42.452130Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 9 2025-05-07T09:03:42.452165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T09:03:42.453028Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 103 2025-05-07T09:03:42.453093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 103 2025-05-07T09:03:42.453118Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 103 2025-05-07T09:03:42.453139Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 2 2025-05-07T09:03:42.453162Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T09:03:42.453242Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 103, subscribers: 0 2025-05-07T09:03:42.455406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-05-07T09:03:42.455700Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-05-07T09:03:42.456087Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-05-07T09:03:42.456489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-05-07T09:03:42.456774Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-05-07T09:03:42.456808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-05-07T09:03:42.457131Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-05-07T09:03:42.457193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-05-07T09:03:42.457218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:371:2362] TestWaitNotification: OK eventTxId 103 2025-05-07T09:03:42.457656Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:03:42.457843Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalTable" took 190us result status StatusPathDoesNotExist 2025-05-07T09:03:42.457995Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ExternalTable\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/ExternalTable" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_table/unittest >> TExternalTableTest::ParallelCreateSameExternalTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:127:2058] recipient: [1:109:2141] 2025-05-07T09:03:42.101815Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:03:42.101887Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:03:42.101932Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:03:42.101960Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:03:42.103407Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:03:42.103454Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:03:42.103554Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:03:42.103613Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:03:42.104275Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:03:42.106963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:03:42.201795Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7610: Got new config: QueryServiceConfig { AvailableExternalDataSources: "ObjectStorage" AvailableExternalDataSources: "ClickHouse" AvailableExternalDataSources: "PostgreSQL" AvailableExternalDataSources: "MySQL" AvailableExternalDataSources: "Ydb" AvailableExternalDataSources: "YT" AvailableExternalDataSources: "Greenplum" AvailableExternalDataSources: "MsSQLServer" AvailableExternalDataSources: "Oracle" AvailableExternalDataSources: "Logging" AvailableExternalDataSources: "Solomon" } 2025-05-07T09:03:42.201860Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:42.202662Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# ObjectStorage, ClickHouse, PostgreSQL, MySQL, Ydb, YT, Greenplum, MsSQLServer, Oracle, Logging, Solomon 2025-05-07T09:03:42.217038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:03:42.217527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:03:42.217700Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:03:42.224684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:03:42.224867Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:03:42.225411Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:42.225594Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:03:42.228022Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:42.229312Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:03:42.229379Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:42.229491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:03:42.229538Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:03:42.229662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:03:42.229875Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:03:42.236489Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T09:03:42.340947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:03:42.341120Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:42.341264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:03:42.341416Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:03:42.341470Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:42.343288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:42.343370Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:03:42.343504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:42.343550Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:03:42.343586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:03:42.343611Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:03:42.344967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:42.345015Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:03:42.345044Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:03:42.346320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:42.346352Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:42.346386Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:42.346443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:03:42.350052Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:03:42.351511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:03:42.352650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:03:42.353587Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:42.353712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 134 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:03:42.353762Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:42.354892Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:03:42.354969Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:42.355150Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:03:42.355246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:03:42.356978Z node 1 :FLAT_TX_SCHEMESHARD INF ... ults wait txId: 127 TestModificationResult got TxId: 127, wait until txId: 127 2025-05-07T09:03:42.445694Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/NilNoviSubLuna" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:03:42.445837Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/NilNoviSubLuna" took 146us result status StatusSuccess 2025-05-07T09:03:42.446067Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/NilNoviSubLuna" PathDescription { Self { Name: "NilNoviSubLuna" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 125 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalTableVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } ExternalTableDescription { Name: "NilNoviSubLuna" PathId { OwnerId: 72057594046678944 LocalId: 3 } Version: 1 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false } Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false } Content: "" } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:03:42.446443Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/NilNoviSubLuna" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:03:42.446535Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/NilNoviSubLuna" took 96us result status StatusSuccess 2025-05-07T09:03:42.446695Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/NilNoviSubLuna" PathDescription { Self { Name: "NilNoviSubLuna" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 125 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalTableVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } ExternalTableDescription { Name: "NilNoviSubLuna" PathId { OwnerId: 72057594046678944 LocalId: 3 } Version: 1 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false } Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false } Content: "" } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestWaitNotification wait txId: 125 2025-05-07T09:03:42.446898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 125: send EvNotifyTxCompletion 2025-05-07T09:03:42.446927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 125 TestWaitNotification wait txId: 126 2025-05-07T09:03:42.446982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 126: send EvNotifyTxCompletion 2025-05-07T09:03:42.446996Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 126 TestWaitNotification wait txId: 127 2025-05-07T09:03:42.447026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 127: send EvNotifyTxCompletion 2025-05-07T09:03:42.447038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 127 2025-05-07T09:03:42.447406Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 125, at schemeshard: 72057594046678944 2025-05-07T09:03:42.447483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 125: got EvNotifyTxCompletionResult 2025-05-07T09:03:42.447511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 125: satisfy waiter [1:343:2334] 2025-05-07T09:03:42.447650Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 126, at schemeshard: 72057594046678944 2025-05-07T09:03:42.447714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 126: got EvNotifyTxCompletionResult 2025-05-07T09:03:42.447731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 126: satisfy waiter [1:343:2334] 2025-05-07T09:03:42.447792Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 127, at schemeshard: 72057594046678944 2025-05-07T09:03:42.447823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 127: got EvNotifyTxCompletionResult 2025-05-07T09:03:42.447836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 127: satisfy waiter [1:343:2334] TestWaitNotification: OK eventTxId 125 TestWaitNotification: OK eventTxId 126 TestWaitNotification: OK eventTxId 127 2025-05-07T09:03:42.448214Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/NilNoviSubLuna" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:03:42.448381Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/NilNoviSubLuna" took 158us result status StatusSuccess 2025-05-07T09:03:42.448577Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/NilNoviSubLuna" PathDescription { Self { Name: "NilNoviSubLuna" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 125 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalTableVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } ExternalTableDescription { Name: "NilNoviSubLuna" PathId { OwnerId: 72057594046678944 LocalId: 3 } Version: 1 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false } Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false } Content: "" } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 128 2025-05-07T09:03:42.460270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalTable CreateExternalTable { Name: "NilNoviSubLuna" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Uint64" } Columns { Name: "value" Type: "Uint64" } } } TxId: 128 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:03:42.460518Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_table.cpp:427: [72057594046678944] CreateNewExternalTable, opId 128:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalTable FailOnExist: false CreateExternalTable { Name: "NilNoviSubLuna" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Uint64" } Columns { Name: "value" Type: "Uint64" } } 2025-05-07T09:03:42.460579Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_table.cpp:300: [72057594046678944] TCreateExternalTable Propose: opId# 128:0, path# /MyRoot/NilNoviSubLuna 2025-05-07T09:03:42.460677Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 128:1, propose status:StatusAlreadyExists, reason: Check failed: path: '/MyRoot/NilNoviSubLuna', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeExternalTable, state: EPathStateNoChanges), at schemeshard: 72057594046678944 2025-05-07T09:03:42.462392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 128, response: Status: StatusAlreadyExists Reason: "Check failed: path: \'/MyRoot/NilNoviSubLuna\', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeExternalTable, state: EPathStateNoChanges)" TxId: 128 SchemeshardId: 72057594046678944 PathId: 3 PathCreateTxId: 125, at schemeshard: 72057594046678944 2025-05-07T09:03:42.462493Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 128, database: /MyRoot, subject: , status: StatusAlreadyExists, reason: Check failed: path: '/MyRoot/NilNoviSubLuna', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeExternalTable, state: EPathStateNoChanges), operation: CREATE EXTERNAL TABLE, path: /MyRoot/NilNoviSubLuna TestModificationResult got TxId: 128, wait until txId: 128 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_table/unittest >> TExternalTableTest::ReplaceExternalTableShouldFailIfEntityOfAnotherTypeWithSameNameExists [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:127:2058] recipient: [1:109:2141] 2025-05-07T09:03:42.101775Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:03:42.101861Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:03:42.101896Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:03:42.101938Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:03:42.103429Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:03:42.103461Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:03:42.103523Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:03:42.103634Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:03:42.104357Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:03:42.106984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:03:42.183533Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7610: Got new config: QueryServiceConfig { AvailableExternalDataSources: "ObjectStorage" AvailableExternalDataSources: "ClickHouse" AvailableExternalDataSources: "PostgreSQL" AvailableExternalDataSources: "MySQL" AvailableExternalDataSources: "Ydb" AvailableExternalDataSources: "YT" AvailableExternalDataSources: "Greenplum" AvailableExternalDataSources: "MsSQLServer" AvailableExternalDataSources: "Oracle" AvailableExternalDataSources: "Logging" AvailableExternalDataSources: "Solomon" } 2025-05-07T09:03:42.183591Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:42.184163Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# ObjectStorage, ClickHouse, PostgreSQL, MySQL, Ydb, YT, Greenplum, MsSQLServer, Oracle, Logging, Solomon 2025-05-07T09:03:42.201306Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:03:42.201798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:03:42.201940Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:03:42.209379Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:03:42.209594Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:03:42.210253Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:42.210490Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:03:42.216588Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:42.223879Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:03:42.223970Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:42.224102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:03:42.224150Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:03:42.224277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:03:42.225391Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:03:42.231503Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T09:03:42.337287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:03:42.338369Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:42.339355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:03:42.340555Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:03:42.340607Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:42.342940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:42.343027Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:03:42.343169Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:42.343239Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:03:42.343268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:03:42.343292Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:03:42.344892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:42.344962Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:03:42.345012Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:03:42.346718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:42.346772Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:42.346813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:42.346881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:03:42.351414Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:03:42.353216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:03:42.353421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:03:42.354418Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:42.354541Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 134 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:03:42.354597Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:42.354887Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:03:42.354955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:42.355130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:03:42.355229Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:03:42.357189Z node 1 :FLAT_TX_SCHEMESHARD INF ... blish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2208], at schemeshard: 72057594046678944, txId: 102, path id: 3 2025-05-07T09:03:42.421193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T09:03:42.421218Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 102:0 ProgressState 2025-05-07T09:03:42.421284Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T09:03:42.421323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T09:03:42.421353Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T09:03:42.421380Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T09:03:42.421411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: false 2025-05-07T09:03:42.421447Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T09:03:42.421482Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-05-07T09:03:42.421503Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 102:0 2025-05-07T09:03:42.421557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-05-07T09:03:42.421583Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 102, publications: 2, subscribers: 0 2025-05-07T09:03:42.421605Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 1], 6 2025-05-07T09:03:42.421634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-05-07T09:03:42.422338Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 6 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T09:03:42.422403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 6 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T09:03:42.422434Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-05-07T09:03:42.422473Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 6 2025-05-07T09:03:42.422517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-05-07T09:03:42.423300Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T09:03:42.423391Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T09:03:42.423415Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-05-07T09:03:42.423433Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-05-07T09:03:42.423452Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-05-07T09:03:42.423503Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-05-07T09:03:42.424991Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-05-07T09:03:42.425715Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-05-07T09:03:42.425857Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-05-07T09:03:42.425887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-05-07T09:03:42.426238Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-05-07T09:03:42.426295Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T09:03:42.426318Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:326:2317] TestWaitNotification: OK eventTxId 102 2025-05-07T09:03:42.426623Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalDataSource" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:03:42.426762Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalDataSource" took 159us result status StatusSuccess 2025-05-07T09:03:42.426962Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ExternalDataSource" PathDescription { Self { Name: "ExternalDataSource" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalDataSource CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalDataSourceVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } ExternalDataSourceDescription { Name: "ExternalDataSource" PathId { OwnerId: 72057594046678944 LocalId: 3 } Version: 1 SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Installation: "" Auth { None { } } Properties { } References { } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 103 2025-05-07T09:03:42.429250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalTable CreateExternalTable { Name: "UniqueName" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Uint64" } ReplaceIfExists: true } } TxId: 103 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:03:42.429454Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_table.cpp:427: [72057594046678944] CreateNewExternalTable, opId 103:0, feature flag EnableReplaceIfExistsForExternalEntities 1, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalTable FailOnExist: false CreateExternalTable { Name: "UniqueName" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Uint64" } ReplaceIfExists: true } 2025-05-07T09:03:42.429591Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_external_table.cpp:312: [72057594046678944] TAlterExternalTable Propose: opId# 103:0, path# /MyRoot/UniqueName, ReplaceIfExists: 1 2025-05-07T09:03:42.429688Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 103:1, propose status:StatusNameConflict, reason: Check failed: path: '/MyRoot/UniqueName', error: unexpected path type (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeView, state: EPathStateNoChanges), expected types: EPathTypeExternalTable, at schemeshard: 72057594046678944 2025-05-07T09:03:42.431215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 103, response: Status: StatusNameConflict Reason: "Check failed: path: \'/MyRoot/UniqueName\', error: unexpected path type (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeView, state: EPathStateNoChanges), expected types: EPathTypeExternalTable" TxId: 103 SchemeshardId: 72057594046678944 PathId: 2 PathCreateTxId: 101, at schemeshard: 72057594046678944 2025-05-07T09:03:42.431314Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 103, database: /MyRoot, subject: , status: StatusNameConflict, reason: Check failed: path: '/MyRoot/UniqueName', error: unexpected path type (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeView, state: EPathStateNoChanges), expected types: EPathTypeExternalTable, operation: CREATE EXTERNAL TABLE, path: /MyRoot/UniqueName TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-05-07T09:03:42.431484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-05-07T09:03:42.431517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-05-07T09:03:42.431801Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-05-07T09:03:42.431864Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-05-07T09:03:42.431903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:334:2325] TestWaitNotification: OK eventTxId 103 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_table/unittest >> TExternalTableTest::SchemeErrors [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:127:2058] recipient: [1:109:2141] 2025-05-07T09:03:42.101781Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:03:42.101873Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:03:42.101943Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:03:42.102017Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:03:42.103423Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:03:42.103469Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:03:42.103544Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:03:42.103624Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:03:42.104286Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:03:42.106954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:03:42.201794Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7610: Got new config: QueryServiceConfig { AvailableExternalDataSources: "ObjectStorage" AvailableExternalDataSources: "ClickHouse" AvailableExternalDataSources: "PostgreSQL" AvailableExternalDataSources: "MySQL" AvailableExternalDataSources: "Ydb" AvailableExternalDataSources: "YT" AvailableExternalDataSources: "Greenplum" AvailableExternalDataSources: "MsSQLServer" AvailableExternalDataSources: "Oracle" AvailableExternalDataSources: "Logging" AvailableExternalDataSources: "Solomon" } 2025-05-07T09:03:42.201860Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:42.202662Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# ObjectStorage, ClickHouse, PostgreSQL, MySQL, Ydb, YT, Greenplum, MsSQLServer, Oracle, Logging, Solomon 2025-05-07T09:03:42.217038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:03:42.217530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:03:42.217686Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:03:42.224812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:03:42.225019Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:03:42.225570Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:42.225768Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:03:42.228530Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:42.229651Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:03:42.229707Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:42.229821Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:03:42.229870Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:03:42.229990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:03:42.230181Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:03:42.236489Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T09:03:42.349027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:03:42.349215Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:42.349390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:03:42.349567Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:03:42.349610Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:42.351669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:42.351788Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:03:42.351950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:42.352000Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:03:42.352042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:03:42.352070Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:03:42.353627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:42.353677Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:03:42.353716Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:03:42.355330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:42.355375Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:42.355419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:42.355474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:03:42.365325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:03:42.366865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:03:42.367036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:03:42.367891Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:42.368001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 134 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:03:42.368042Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:42.368245Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:03:42.368291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:42.368414Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:03:42.368484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:03:42.370320Z node 1 :FLAT_TX_SCHEMESHARD INF ... 025-05-07T09:03:42.437130Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_table.cpp:300: [72057594046678944] TCreateExternalTable Propose: opId# 126:0, path# /MyRoot/DirA/Table2 2025-05-07T09:03:42.437361Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 126:1, propose status:StatusSchemeError, reason: Type 'BlaBlaType' specified for column 'RowId' is not supported by storage, at schemeshard: 72057594046678944 2025-05-07T09:03:42.439017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 126, response: Status: StatusSchemeError Reason: "Type \'BlaBlaType\' specified for column \'RowId\' is not supported by storage" TxId: 126 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:03:42.439219Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 126, database: /MyRoot, subject: , status: StatusSchemeError, reason: Type 'BlaBlaType' specified for column 'RowId' is not supported by storage, operation: CREATE EXTERNAL TABLE, path: /MyRoot/DirA/Table2 TestModificationResult got TxId: 126, wait until txId: 126 TestModificationResults wait txId: 127 2025-05-07T09:03:42.441250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalTable CreateExternalTable { Name: "Table2" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "" Type: "Uint64" } } } TxId: 127 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:03:42.441507Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_table.cpp:427: [72057594046678944] CreateNewExternalTable, opId 127:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalTable FailOnExist: false CreateExternalTable { Name: "Table2" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "" Type: "Uint64" } } 2025-05-07T09:03:42.441590Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_table.cpp:300: [72057594046678944] TCreateExternalTable Propose: opId# 127:0, path# /MyRoot/DirA/Table2 2025-05-07T09:03:42.441689Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 127:1, propose status:StatusSchemeError, reason: Columns cannot have an empty name, at schemeshard: 72057594046678944 2025-05-07T09:03:42.443182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 127, response: Status: StatusSchemeError Reason: "Columns cannot have an empty name" TxId: 127 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:03:42.443334Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 127, database: /MyRoot, subject: , status: StatusSchemeError, reason: Columns cannot have an empty name, operation: CREATE EXTERNAL TABLE, path: /MyRoot/DirA/Table2 TestModificationResult got TxId: 127, wait until txId: 127 TestModificationResults wait txId: 128 2025-05-07T09:03:42.445369Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalTable CreateExternalTable { Name: "Table2" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "RowId" TypeId: 27 } } } TxId: 128 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:03:42.445595Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_table.cpp:427: [72057594046678944] CreateNewExternalTable, opId 128:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalTable FailOnExist: false CreateExternalTable { Name: "Table2" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "RowId" TypeId: 27 } } 2025-05-07T09:03:42.445704Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_table.cpp:300: [72057594046678944] TCreateExternalTable Propose: opId# 128:0, path# /MyRoot/DirA/Table2 2025-05-07T09:03:42.445807Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 128:1, propose status:StatusSchemeError, reason: Cannot set TypeId for column 'RowId', use Type, at schemeshard: 72057594046678944 2025-05-07T09:03:42.447437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 128, response: Status: StatusSchemeError Reason: "Cannot set TypeId for column \'RowId\', use Type" TxId: 128 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:03:42.447575Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 128, database: /MyRoot, subject: , status: StatusSchemeError, reason: Cannot set TypeId for column 'RowId', use Type, operation: CREATE EXTERNAL TABLE, path: /MyRoot/DirA/Table2 TestModificationResult got TxId: 128, wait until txId: 128 TestModificationResults wait txId: 129 2025-05-07T09:03:42.449507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalTable CreateExternalTable { Name: "Table2" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "RowId" } } } TxId: 129 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:03:42.449733Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_table.cpp:427: [72057594046678944] CreateNewExternalTable, opId 129:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalTable FailOnExist: false CreateExternalTable { Name: "Table2" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "RowId" } } 2025-05-07T09:03:42.449812Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_table.cpp:300: [72057594046678944] TCreateExternalTable Propose: opId# 129:0, path# /MyRoot/DirA/Table2 2025-05-07T09:03:42.449900Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 129:1, propose status:StatusSchemeError, reason: Missing Type for column 'RowId', at schemeshard: 72057594046678944 2025-05-07T09:03:42.451423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 129, response: Status: StatusSchemeError Reason: "Missing Type for column \'RowId\'" TxId: 129 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:03:42.451555Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 129, database: /MyRoot, subject: , status: StatusSchemeError, reason: Missing Type for column 'RowId', operation: CREATE EXTERNAL TABLE, path: /MyRoot/DirA/Table2 TestModificationResult got TxId: 129, wait until txId: 129 TestModificationResults wait txId: 130 2025-05-07T09:03:42.453671Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalTable CreateExternalTable { Name: "Table2" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "RowId" Type: "Uint64" Id: 2 } Columns { Name: "RowId2" Type: "Uint64" Id: 2 } } } TxId: 130 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:03:42.453884Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_table.cpp:427: [72057594046678944] CreateNewExternalTable, opId 130:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalTable FailOnExist: false CreateExternalTable { Name: "Table2" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "RowId" Type: "Uint64" Id: 2 } Columns { Name: "RowId2" Type: "Uint64" Id: 2 } } 2025-05-07T09:03:42.453986Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_table.cpp:300: [72057594046678944] TCreateExternalTable Propose: opId# 130:0, path# /MyRoot/DirA/Table2 2025-05-07T09:03:42.454143Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 130:1, propose status:StatusSchemeError, reason: Duplicate column id: 2, at schemeshard: 72057594046678944 2025-05-07T09:03:42.455734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 130, response: Status: StatusSchemeError Reason: "Duplicate column id: 2" TxId: 130 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:03:42.455857Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 130, database: /MyRoot, subject: , status: StatusSchemeError, reason: Duplicate column id: 2, operation: CREATE EXTERNAL TABLE, path: /MyRoot/DirA/Table2 TestModificationResult got TxId: 130, wait until txId: 130 TestModificationResults wait txId: 131 2025-05-07T09:03:42.457832Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalTable CreateExternalTable { Name: "Table2" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource1" Location: "/" Columns { Name: "RowId" Type: "Uint64" } Columns { Name: "Value" Type: "Utf8" } } } TxId: 131 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:03:42.458119Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_table.cpp:427: [72057594046678944] CreateNewExternalTable, opId 131:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalTable FailOnExist: false CreateExternalTable { Name: "Table2" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource1" Location: "/" Columns { Name: "RowId" Type: "Uint64" } Columns { Name: "Value" Type: "Utf8" } } 2025-05-07T09:03:42.458194Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_table.cpp:300: [72057594046678944] TCreateExternalTable Propose: opId# 131:0, path# /MyRoot/DirA/Table2 2025-05-07T09:03:42.458327Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 131:1, propose status:StatusPathDoesNotExist, reason: Check failed: path: '/MyRoot/ExternalDataSource1', error: path hasn't been resolved, nearest resolved path: '/MyRoot' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), at schemeshard: 72057594046678944 2025-05-07T09:03:42.459973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 131, response: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ExternalDataSource1\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" TxId: 131 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:03:42.460089Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 131, database: /MyRoot, subject: , status: StatusPathDoesNotExist, reason: Check failed: path: '/MyRoot/ExternalDataSource1', error: path hasn't been resolved, nearest resolved path: '/MyRoot' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), operation: CREATE EXTERNAL TABLE, path: /MyRoot/DirA/Table2 TestModificationResult got TxId: 131, wait until txId: 131 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_table/unittest >> TExternalTableTest::ParallelCreateExternalTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:127:2058] recipient: [1:109:2141] 2025-05-07T09:03:42.101854Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:03:42.101994Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:03:42.102051Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:03:42.102093Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:03:42.103477Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:03:42.103565Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:03:42.103651Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:03:42.103747Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:03:42.104588Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:03:42.106989Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:03:42.183579Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7610: Got new config: QueryServiceConfig { AvailableExternalDataSources: "ObjectStorage" AvailableExternalDataSources: "ClickHouse" AvailableExternalDataSources: "PostgreSQL" AvailableExternalDataSources: "MySQL" AvailableExternalDataSources: "Ydb" AvailableExternalDataSources: "YT" AvailableExternalDataSources: "Greenplum" AvailableExternalDataSources: "MsSQLServer" AvailableExternalDataSources: "Oracle" AvailableExternalDataSources: "Logging" AvailableExternalDataSources: "Solomon" } 2025-05-07T09:03:42.183659Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:42.184415Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# ObjectStorage, ClickHouse, PostgreSQL, MySQL, Ydb, YT, Greenplum, MsSQLServer, Oracle, Logging, Solomon 2025-05-07T09:03:42.199063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:03:42.199605Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:03:42.199768Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:03:42.207590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:03:42.207809Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:03:42.210075Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:42.210358Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:03:42.216510Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:42.223722Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:03:42.223824Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:42.223974Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:03:42.224032Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:03:42.224161Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:03:42.225350Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:03:42.232064Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T09:03:42.339441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:03:42.339619Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:42.339763Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:03:42.340557Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:03:42.340605Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:42.342929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:42.343022Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:03:42.343181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:42.343230Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:03:42.343264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:03:42.343291Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:03:42.344655Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:42.344704Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:03:42.344744Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:03:42.346085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:42.346122Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:42.346158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:42.346225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:03:42.349955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:03:42.351401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:03:42.352615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:03:42.353429Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:42.353522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 134 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:03:42.353566Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:42.354909Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:03:42.354976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:42.355128Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:03:42.355193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:03:42.356663Z node 1 :FLAT_TX_SCHEMESHARD INF ... ter [1:373:2364] 2025-05-07T09:03:42.460461Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 127: got EvNotifyTxCompletionResult 2025-05-07T09:03:42.460474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 127: satisfy waiter [1:373:2364] TestWaitNotification: OK eventTxId 125 TestWaitNotification: OK eventTxId 126 TestWaitNotification: OK eventTxId 127 2025-05-07T09:03:42.460889Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA/ExternalTable1" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:03:42.461059Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA/ExternalTable1" took 164us result status StatusSuccess 2025-05-07T09:03:42.461308Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA/ExternalTable1" PathDescription { Self { Name: "ExternalTable1" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 126 CreateStep: 5000005 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalTableVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } ExternalTableDescription { Name: "ExternalTable1" PathId { OwnerId: 72057594046678944 LocalId: 4 } Version: 1 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false } Content: "" } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:03:42.461883Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA/ExternalTable2" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:03:42.462032Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA/ExternalTable2" took 166us result status StatusSuccess 2025-05-07T09:03:42.462280Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA/ExternalTable2" PathDescription { Self { Name: "ExternalTable2" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 127 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalTableVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } ExternalTableDescription { Name: "ExternalTable2" PathId { OwnerId: 72057594046678944 LocalId: 5 } Version: 1 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key1" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false } Columns { Name: "key2" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false } Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 3 NotNull: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 4 NotNull: false } Content: "" } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:03:42.462891Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:03:42.463010Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA" took 128us result status StatusSuccess 2025-05-07T09:03:42.464475Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA" PathDescription { Self { Name: "DirA" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 125 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 6 } ChildrenExist: true } Children { Name: "ExternalTable1" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 126 CreateStep: 5000005 ParentPathId: 3 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "ExternalTable2" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 127 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:03:42.464906Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA/ExternalTable1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:03:42.465054Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA/ExternalTable1" took 147us result status StatusSuccess 2025-05-07T09:03:42.465273Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA/ExternalTable1" PathDescription { Self { Name: "ExternalTable1" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 126 CreateStep: 5000005 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalTableVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } ExternalTableDescription { Name: "ExternalTable1" PathId { OwnerId: 72057594046678944 LocalId: 4 } Version: 1 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false } Content: "" } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:03:42.465682Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA/ExternalTable2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:03:42.465847Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA/ExternalTable2" took 137us result status StatusSuccess 2025-05-07T09:03:42.466078Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA/ExternalTable2" PathDescription { Self { Name: "ExternalTable2" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 127 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalTableVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } ExternalTableDescription { Name: "ExternalTable2" PathId { OwnerId: 72057594046678944 LocalId: 5 } Version: 1 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key1" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false } Columns { Name: "key2" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false } Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 3 NotNull: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 4 NotNull: false } Content: "" } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TExternalTableTest::CreateExternalTableShouldFailIfSuchEntityAlreadyExists [GOOD] >> TBackupCollectionTests::HiddenByFeatureFlag >> TBackupCollectionTests::CreateAbsolutePath ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_table/unittest >> TExternalTableTest::CreateExternalTableShouldFailIfSuchEntityAlreadyExists [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:127:2058] recipient: [1:109:2141] 2025-05-07T09:03:42.101811Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:03:42.101910Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:03:42.101945Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:03:42.102030Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:03:42.103461Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:03:42.103512Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:03:42.103588Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:03:42.103658Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:03:42.104320Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:03:42.106933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:03:42.183546Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7610: Got new config: QueryServiceConfig { AvailableExternalDataSources: "ObjectStorage" AvailableExternalDataSources: "ClickHouse" AvailableExternalDataSources: "PostgreSQL" AvailableExternalDataSources: "MySQL" AvailableExternalDataSources: "Ydb" AvailableExternalDataSources: "YT" AvailableExternalDataSources: "Greenplum" AvailableExternalDataSources: "MsSQLServer" AvailableExternalDataSources: "Oracle" AvailableExternalDataSources: "Logging" AvailableExternalDataSources: "Solomon" } 2025-05-07T09:03:42.183607Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:42.184169Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# ObjectStorage, ClickHouse, PostgreSQL, MySQL, Ydb, YT, Greenplum, MsSQLServer, Oracle, Logging, Solomon 2025-05-07T09:03:42.198147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:03:42.198537Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:03:42.198685Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:03:42.206465Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:03:42.206751Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:03:42.210093Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:42.210337Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:03:42.216097Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:42.223615Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:03:42.223682Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:42.223779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:03:42.223831Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:03:42.224018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:03:42.225301Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:03:42.230976Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T09:03:42.337360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:03:42.338384Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:42.339349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:03:42.340554Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:03:42.340620Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:42.342915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:42.343010Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:03:42.343169Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:42.343266Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:03:42.343302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:03:42.343329Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:03:42.344807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:42.344849Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:03:42.344887Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:03:42.346078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:42.346112Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:42.346159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:42.346201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:03:42.350398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:03:42.351953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:03:42.352621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:03:42.353329Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:42.353437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 134 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:03:42.353488Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:42.354840Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:03:42.354890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:42.355055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:03:42.355148Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:03:42.356733Z node 1 :FLAT_TX_SCHEMESHARD INF ... pp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-05-07T09:03:43.121077Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T09:03:43.121131Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T09:03:43.121155Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-05-07T09:03:43.121176Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-05-07T09:03:43.121198Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-05-07T09:03:43.121800Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T09:03:43.121861Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T09:03:43.121882Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-05-07T09:03:43.121902Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 2 2025-05-07T09:03:43.121921Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T09:03:43.121986Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-05-07T09:03:43.123639Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-05-07T09:03:43.123832Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-05-07T09:03:43.124754Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-05-07T09:03:43.124920Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-05-07T09:03:43.124953Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-05-07T09:03:43.125272Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-05-07T09:03:43.125353Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T09:03:43.125384Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [2:332:2323] TestWaitNotification: OK eventTxId 102 2025-05-07T09:03:43.125724Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:03:43.125904Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalTable" took 173us result status StatusSuccess 2025-05-07T09:03:43.126170Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ExternalTable" PathDescription { Self { Name: "ExternalTable" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalTableVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } ExternalTableDescription { Name: "ExternalTable" PathId { OwnerId: 72057594046678944 LocalId: 3 } Version: 1 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false } Content: "" } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 103 2025-05-07T09:03:43.128566Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalTable CreateExternalTable { Name: "ExternalTable" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/new_location" Columns { Name: "key" Type: "Uint64" } Columns { Name: "value" Type: "Uint64" } } } TxId: 103 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:03:43.128866Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_table.cpp:427: [72057594046678944] CreateNewExternalTable, opId 103:0, feature flag EnableReplaceIfExistsForExternalEntities 1, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalTable FailOnExist: false CreateExternalTable { Name: "ExternalTable" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/new_location" Columns { Name: "key" Type: "Uint64" } Columns { Name: "value" Type: "Uint64" } } 2025-05-07T09:03:43.128952Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_table.cpp:300: [72057594046678944] TCreateExternalTable Propose: opId# 103:0, path# /MyRoot/ExternalTable 2025-05-07T09:03:43.129072Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 103:1, propose status:StatusAlreadyExists, reason: Check failed: path: '/MyRoot/ExternalTable', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeExternalTable, state: EPathStateNoChanges), at schemeshard: 72057594046678944 2025-05-07T09:03:43.131013Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 103, response: Status: StatusAlreadyExists Reason: "Check failed: path: \'/MyRoot/ExternalTable\', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeExternalTable, state: EPathStateNoChanges)" TxId: 103 SchemeshardId: 72057594046678944 PathId: 3 PathCreateTxId: 102, at schemeshard: 72057594046678944 2025-05-07T09:03:43.131160Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 103, database: /MyRoot, subject: , status: StatusAlreadyExists, reason: Check failed: path: '/MyRoot/ExternalTable', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeExternalTable, state: EPathStateNoChanges), operation: CREATE EXTERNAL TABLE, path: /MyRoot/ExternalTable TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-05-07T09:03:43.131403Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-05-07T09:03:43.131459Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-05-07T09:03:43.131786Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-05-07T09:03:43.131864Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-05-07T09:03:43.131894Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [2:340:2331] TestWaitNotification: OK eventTxId 103 2025-05-07T09:03:43.132302Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:03:43.132458Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalTable" took 166us result status StatusSuccess 2025-05-07T09:03:43.132688Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ExternalTable" PathDescription { Self { Name: "ExternalTable" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalTableVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } ExternalTableDescription { Name: "ExternalTable" PathId { OwnerId: 72057594046678944 LocalId: 3 } Version: 1 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false } Content: "" } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TExportToS3Tests::DropSourceTableBeforeTransferring >> TExportToS3Tests::ShouldOmitNonStrictStorageSettings >> TExportToS3Tests::CancelUponCreatingExportDirShouldSucceed >> TExportToS3Tests::ShouldPreserveIncrBackupFlag >> TExportToS3Tests::CheckItemProgress >> TExportToS3Tests::RebootDuringCompletion |92.5%| [TA] $(B)/ydb/core/tx/schemeshard/ut_external_table/test-results/unittest/{meta.json ... results_accumulator.log} |92.5%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_external_table/test-results/unittest/{meta.json ... results_accumulator.log} >> TExportToS3Tests::ShouldSucceedOnSingleShardTable >> TExportToS3Tests::DropCopiesBeforeTransferring1 >> TExportToS3Tests::RebootDuringAbortion >> TExportToS3Tests::CancelUponTransferringSingleShardTableShouldSucceed >> TExportToS3Tests::ShouldSucceedOnMultiShardTable >> overlapping_portions.py::TestOverlappingPortions::test [GOOD] >> TBackupCollectionTests::CreateAbsolutePath [GOOD] >> TBackupCollectionTests::Create >> TBackupCollectionTests::HiddenByFeatureFlag [GOOD] >> TBackupCollectionTests::DisallowedPath >> TBackupCollectionTests::Create [GOOD] >> TBackupCollectionTests::CreateTwice >> TBackupCollectionTests::DisallowedPath [GOOD] >> TBackupCollectionTests::ParallelCreate |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/executer_actor/ut/unittest |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/executer_actor/ut/unittest |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/executer_actor/ut/unittest |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/executer_actor/ut/unittest |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/executer_actor/ut/unittest |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/executer_actor/ut/unittest |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/executer_actor/ut/unittest >> BackupRestore::RestoreIndexTablePartitioningSettings [GOOD] >> BackupRestore::RestoreIndexTableReadReplicasSettings >> TBackupCollectionTests::ParallelCreate [GOOD] >> TBackupCollectionTests::Drop >> TBackupCollectionTests::CreateTwice [GOOD] >> TBackupCollectionTests::BackupAbsentCollection >> TExportToS3Tests::CancelUponCreatingExportDirShouldSucceed [GOOD] >> TBackupCollectionTests::Drop [GOOD] >> TBackupCollectionTests::DropTwice >> TBackupCollectionTests::BackupAbsentCollection [GOOD] >> TBackupCollectionTests::BackupDroppedCollection >> TExportToS3Tests::CancelUponCopyingTablesShouldSucceed >> TExportToS3Tests::DropCopiesBeforeTransferring1 [GOOD] >> TExportToS3Tests::ShouldSucceedOnSingleShardTable [GOOD] >> TExportToS3Tests::ShouldOmitNonStrictStorageSettings [GOOD] >> TExportToS3Tests::ShouldPreserveIncrBackupFlag [GOOD] >> TExportToS3Tests::DropSourceTableBeforeTransferring [GOOD] >> TExportToS3Tests::ShouldSucceedOnMultiShardTable [GOOD] >> TExportToS3Tests::ShouldRestartOnScanErrors >> TExportToS3Tests::UidAsIdempotencyKey >> TExportToS3Tests::CorruptedDyNumber >> TExportToS3Tests::RebootDuringAbortion [GOOD] >> TExportToS3Tests::ShouldExcludeBackupTableFromStats >> TExportToS3Tests::RebootDuringCompletion [GOOD] |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_cluster_discovery/ut/unittest >> BackupRestore::TestAllSchemeObjectTypes-EPathTypePersQueueGroup [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeSubDomain [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeRtmrVolume [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeKesus >> TPQCDTest::TestCloudClientsAreConsistentlyDistributed >> TBackupCollectionTests::DropTwice [GOOD] >> TBackupCollectionTests::TableWithSystemColumns >> TExportToS3Tests::DropCopiesBeforeTransferring2 >> TExportToS3Tests::ExportStartTime >> TExportToS3Tests::ShouldSucceedOnManyTables |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_cluster_discovery/ut/unittest |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_cluster_discovery/ut/unittest >> TPQCDTest::TestPrioritizeLocalDatacenter >> TBackupCollectionTests::BackupDroppedCollection [GOOD] >> TBackupCollectionTests::BackupAbsentDirs >> TPQCDTest::TestUnavailableWithoutClustersList >> TPQCDTest::TestRelatedServicesAreRunning >> TPQCDTest::TestUnavailableWithoutNetClassifier >> TPQCDTest::TestUnavailableWithoutBoth >> TExportToS3Tests::SchemaMapping >> TExportToS3Tests::CheckItemProgress [GOOD] >> TExportToS3Tests::UidAsIdempotencyKey [GOOD] >> TExportToS3Tests::Checksums >> TExportToS3Tests::CorruptedDyNumber [GOOD] >> TopicService::OneConsumer_TheRangesDoNotOverlap [GOOD] >> TExportToS3Tests::UserSID >> TExportToS3Tests::CompletedExportEndTime |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/executer_actor/ut/unittest >> TBackupCollectionTests::BackupAbsentDirs [GOOD] >> TBackupCollectionTests::BackupNonIncrementalCollection >> TExportToS3Tests::ExportStartTime [GOOD] >> TExportToS3Tests::ExportPartitioningSettings >> TExportToS3Tests::DropCopiesBeforeTransferring2 [GOOD] >> TExportToS3Tests::ShouldRestartOnScanErrors [GOOD] >> TBackupCollectionTests::TableWithSystemColumns [GOOD] >> DemoTx::Scenario_1 [GOOD] >> TExportToS3Tests::ShouldSucceedOnManyTables [GOOD] |92.5%| [TA] $(B)/ydb/core/kqp/executer_actor/ut/test-results/unittest/{meta.json ... results_accumulator.log} |92.5%| [TA] {RESULT} $(B)/ydb/core/kqp/executer_actor/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TExportToS3Tests::ShouldSucceedOnConcurrentExport >> TExportToS3Tests::UserSID [GOOD] >> TExportToS3Tests::EnableChecksumsPersistance >> TExportToS3Tests::ShouldSucceedOnConcurrentTxs >> TExportToS3Tests::Checksums [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut_trace/unittest >> TKeyValueTracingTest::ReadSmall [FAIL] Test command err: equal assertion failed at ydb/core/keyvalue/keyvalue_ut_trace.cpp:124, void TestOneRead(TString, TString): env.WilsonUploader->Traces.size() == 1 TBackTrace::Capture()+28 (0x103BC38C) NUnitTest::NPrivate::RaiseError(char const*, TBasicString> const&, bool)+592 (0x10877E10) TestOneRead(TBasicString>, TBasicString>)+4828 (0x1000C10C) NTestSuiteTKeyValueTracingTest::TTestCaseReadSmall::Execute_(NUnitTest::TTestContext&)+318 (0x100128FE) std::__y1::__function::__func, void ()>::operator()()+280 (0x10025F28) TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool)+534 (0x108A5F36) NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool)+505 (0x1087E999) NTestSuiteTKeyValueTracingTest::TCurrentTest::Execute()+1204 (0x10024DD4) NUnitTest::TTestFactory::Execute()+2438 (0x10880266) NUnitTest::RunMain(int, char**)+5213 (0x108A04AD) ??+0 (0x7F9269D2ED90) __libc_start_main+128 (0x7F9269D2EE40) _start+41 (0xD9A6029) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut_trace/unittest >> TKeyValueTracingTest::ReadHuge [FAIL] Test command err: equal assertion failed at ydb/core/keyvalue/keyvalue_ut_trace.cpp:124, void TestOneRead(TString, TString): env.WilsonUploader->Traces.size() == 1 TBackTrace::Capture()+28 (0x103BC38C) NUnitTest::NPrivate::RaiseError(char const*, TBasicString> const&, bool)+592 (0x10877E10) TestOneRead(TBasicString>, TBasicString>)+4828 (0x1000C10C) NTestSuiteTKeyValueTracingTest::TTestCaseReadHuge::Execute_(NUnitTest::TTestContext&)+318 (0x10012CEE) std::__y1::__function::__func, void ()>::operator()()+280 (0x10025F28) TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool)+534 (0x108A5F36) NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool)+505 (0x1087E999) NTestSuiteTKeyValueTracingTest::TCurrentTest::Execute()+1204 (0x10024DD4) NUnitTest::TTestFactory::Execute()+2438 (0x10880266) NUnitTest::RunMain(int, char**)+5213 (0x108A04AD) ??+0 (0x7F63FA446D90) __libc_start_main+128 (0x7F63FA446E40) _start+41 (0xD9A6029) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut_trace/unittest >> TKeyValueTracingTest::WriteSmall [FAIL] Test command err: assertion failed at ydb/core/keyvalue/keyvalue_ut_trace.cpp:103, void TestOneWrite(TString, TVector &&): (env.WilsonUploader->Traces.size() == 1) failed: (2 != 1) TBackTrace::Capture()+28 (0x103BC38C) NUnitTest::NPrivate::RaiseError(char const*, TBasicString> const&, bool)+592 (0x10877E10) TestOneWrite(TBasicString>, TVector>, std::__y1::allocator>>>&&)+4253 (0x1000673D) NTestSuiteTKeyValueTracingTest::TTestCaseWriteSmall::Execute_(NUnitTest::TTestContext&)+216 (0x10012278) std::__y1::__function::__func, void ()>::operator()()+280 (0x10025F28) TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool)+534 (0x108A5F36) NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool)+505 (0x1087E999) NTestSuiteTKeyValueTracingTest::TCurrentTest::Execute()+1204 (0x10024DD4) NUnitTest::TTestFactory::Execute()+2438 (0x10880266) NUnitTest::RunMain(int, char**)+5213 (0x108A04AD) ??+0 (0x7FEDA46A2D90) __libc_start_main+128 (0x7FEDA46A2E40) _start+41 (0xD9A6029) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut_trace/unittest >> TKeyValueTracingTest::WriteHuge [FAIL] Test command err: assertion failed at ydb/core/keyvalue/keyvalue_ut_trace.cpp:103, void TestOneWrite(TString, TVector &&): (env.WilsonUploader->Traces.size() == 1) failed: (2 != 1) TBackTrace::Capture()+28 (0x103BC38C) NUnitTest::NPrivate::RaiseError(char const*, TBasicString> const&, bool)+592 (0x10877E10) TestOneWrite(TBasicString>, TVector>, std::__y1::allocator>>>&&)+4253 (0x1000673D) NTestSuiteTKeyValueTracingTest::TTestCaseWriteHuge::Execute_(NUnitTest::TTestContext&)+216 (0x10012588) std::__y1::__function::__func, void ()>::operator()()+280 (0x10025F28) TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool)+534 (0x108A5F36) NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool)+505 (0x1087E999) NTestSuiteTKeyValueTracingTest::TCurrentTest::Execute()+1204 (0x10024DD4) NUnitTest::TTestFactory::Execute()+2438 (0x10880266) NUnitTest::RunMain(int, char**)+5213 (0x108A04AD) ??+0 (0x7FE127A58D90) __libc_start_main+128 (0x7FE127A58E40) _start+41 (0xD9A6029) >> TExportToS3Tests::SchemaMapping [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_2__SYNC-pk_types2-all_types2-index2---SYNC] [GOOD] >> TExportToS3Tests::ChecksumsWithCompression >> TExportToS3Tests::SchemaMappingEncryption >> TExportToS3Tests::TablePermissions >> TopicService::OneConsumer_TheRangesOverlap ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_backup_collection/unittest >> TBackupCollectionTests::TableWithSystemColumns [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:03:44.211099Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:03:44.211252Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:03:44.211297Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:03:44.211336Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:03:44.212513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:03:44.212563Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:03:44.212619Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:03:44.212677Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:03:44.213255Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:03:44.215592Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:03:44.283284Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:03:44.283343Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:44.304022Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:03:44.304239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:03:44.304396Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:03:44.311643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:03:44.311962Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:03:44.316676Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:44.317018Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:03:44.323753Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:44.329927Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:03:44.330021Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:44.330111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:03:44.330161Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:03:44.330222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:03:44.330898Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:03:44.336290Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:03:44.466054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:03:44.467168Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:44.468224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:03:44.469811Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:03:44.469886Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:44.472923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:44.473059Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:03:44.473237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:44.473366Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:03:44.473405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:03:44.473440Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:03:44.475216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:44.475263Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:03:44.475301Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:03:44.476695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:44.476737Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:44.476782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:44.476827Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:03:44.481526Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:03:44.483473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:03:44.484352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:03:44.485235Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:44.485343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:03:44.485394Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:44.487240Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:03:44.487325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:44.487504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:03:44.487584Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:03:44.489721Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:03:44.489770Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:03:44.489928Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:44.489964Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 8] Version: 3 } 2025-05-07T09:03:48.578586Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4924: StateWork, processing event NSchemeBoard::NSchemeshardEvents::TEvUpdateAck 2025-05-07T09:03:48.578617Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 8 Version: 3 PathOwnerId: 72057594046678944, cookie: 106 2025-05-07T09:03:48.578660Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 8 Version: 3 PathOwnerId: 72057594046678944, cookie: 106 2025-05-07T09:03:48.578676Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 106 2025-05-07T09:03:48.578693Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 106, pathId: [OwnerId: 72057594046678944, LocalPathId: 8], version: 3 2025-05-07T09:03:48.578711Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 8] was 4 2025-05-07T09:03:48.578752Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 106, ready parts: 1/2, is published: true 2025-05-07T09:03:48.578792Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-05-07T09:03:48.579138Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [6:654:2603], Recipient [6:124:2150]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T09:03:48.579174Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T09:03:48.579200Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046678944 2025-05-07T09:03:48.579374Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269551620, Sender [6:593:2550], Recipient [6:124:2150]: NKikimrTxDataShard.TEvSchemaChanged Source { RawX1: 593 RawX2: 25769806326 } Origin: 72075186233409548 State: 2 TxId: 106 Step: 0 Generation: 2 2025-05-07T09:03:48.579408Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4872: StateWork, processing event TEvDataShard::TEvSchemaChanged 2025-05-07T09:03:48.579491Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 593 RawX2: 25769806326 } Origin: 72075186233409548 State: 2 TxId: 106 Step: 0 Generation: 2 2025-05-07T09:03:48.579536Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 106, tablet: 72075186233409548, partId: 1 2025-05-07T09:03:48.579660Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 106:1, at schemeshard: 72057594046678944, message: Source { RawX1: 593 RawX2: 25769806326 } Origin: 72075186233409548 State: 2 TxId: 106 Step: 0 Generation: 2 2025-05-07T09:03:48.579711Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1005: NTableState::TProposedWaitParts operationId# 106:1 HandleReply TEvSchemaChanged at tablet: 72057594046678944 2025-05-07T09:03:48.579786Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1009: NTableState::TProposedWaitParts operationId# 106:1 HandleReply TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 593 RawX2: 25769806326 } Origin: 72075186233409548 State: 2 TxId: 106 Step: 0 Generation: 2 2025-05-07T09:03:48.579853Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 106:1, shardIdx: 72057594046678944:3, datashard: 72075186233409548, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:48.579899Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 106:1, at schemeshard: 72057594046678944 2025-05-07T09:03:48.579930Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 106:1, datashard: 72075186233409548, at schemeshard: 72057594046678944 2025-05-07T09:03:48.579962Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 106:1 129 -> 240 2025-05-07T09:03:48.580101Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-05-07T09:03:48.583987Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-05-07T09:03:48.584131Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-05-07T09:03:48.584213Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-05-07T09:03:48.584257Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-05-07T09:03:48.586826Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-05-07T09:03:48.586881Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-05-07T09:03:48.587015Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 106:1, at schemeshard: 72057594046678944 2025-05-07T09:03:48.587066Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-05-07T09:03:48.587166Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-05-07T09:03:48.587191Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-05-07T09:03:48.587316Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 106:1, at schemeshard: 72057594046678944 2025-05-07T09:03:48.587361Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-05-07T09:03:48.587400Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 106:1 2025-05-07T09:03:48.587517Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [6:593:2550] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 106 at schemeshard: 72057594046678944 2025-05-07T09:03:48.587877Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435072, Sender [6:124:2150], Recipient [6:124:2150]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-05-07T09:03:48.587922Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4857: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-05-07T09:03:48.587973Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 106:1, at schemeshard: 72057594046678944 2025-05-07T09:03:48.588036Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 106:1 ProgressState 2025-05-07T09:03:48.588199Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-05-07T09:03:48.588239Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#106:1 progress is 2/2 2025-05-07T09:03:48.588280Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 106 ready parts: 2/2 2025-05-07T09:03:48.588324Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#106:1 progress is 2/2 2025-05-07T09:03:48.588373Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 106 ready parts: 2/2 2025-05-07T09:03:48.588414Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 106, ready parts: 2/2, is published: true 2025-05-07T09:03:48.588492Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [6:303:2294] message: TxId: 106 2025-05-07T09:03:48.588556Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 106 ready parts: 2/2 2025-05-07T09:03:48.588600Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 106:0 2025-05-07T09:03:48.588628Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 106:0 2025-05-07T09:03:48.588695Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 2 2025-05-07T09:03:48.588734Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 106:1 2025-05-07T09:03:48.588755Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 106:1 2025-05-07T09:03:48.588840Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 8] was 3 2025-05-07T09:03:48.590470Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-05-07T09:03:48.590546Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [6:303:2294] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 106 at schemeshard: 72057594046678944 2025-05-07T09:03:48.590684Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-05-07T09:03:48.590717Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [6:621:2570] 2025-05-07T09:03:48.590895Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877764, Sender [6:623:2572], Recipient [6:124:2150]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-05-07T09:03:48.590923Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4938: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-05-07T09:03:48.590940Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5761: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 106 |92.5%| [TA] $(B)/ydb/core/keyvalue/ut_trace/test-results/unittest/{meta.json ... results_accumulator.log} |92.5%| [TA] {RESULT} $(B)/ydb/core/keyvalue/ut_trace/test-results/unittest/{meta.json ... results_accumulator.log} |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator_client/ut/unittest >> TTxAllocatorClientTest::InitiatingRequest |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator_client/ut/unittest |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator_client/ut/unittest |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator_client/ut/unittest |92.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator_client/ut/unittest >> TTxAllocatorClientTest::Boot >> TPersQueueTest::SchemeshardRestart [GOOD] >> TPersQueueTest::SameOffset >> TExportToS3Tests::CompletedExportEndTime [GOOD] >> TExportToS3Tests::ExportPartitioningSettings [GOOD] >> TBackupCollectionTests::BackupNonIncrementalCollection [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbLogStore::AlterLogTable [FAIL] Test command err: 2025-05-07T09:00:18.970189Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626038164085207:2139];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:18.970245Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002889/r3tmp/tmpVYzVkD/pdisk_1.dat 2025-05-07T09:00:20.074867Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T09:00:20.267872Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:20.364007Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:20.364113Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:20.388100Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28483, node 1 2025-05-07T09:00:20.933494Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:20.933521Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:20.933528Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:20.933642Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27282 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:21.561001Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:00:22.010872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateColumnStore CreateColumnStore { Name: "LogStore" ColumnShardCount: 4 SchemaPresets { Name: "default" Schema { Columns { Name: "timestamp" Type: "Uint8" NotNull: true } Columns { Name: "resource_type" Type: "Utf8" NotNull: true } Columns { Name: "resource_id" Type: "Utf8" NotNull: true } Columns { Name: "uid" Type: "Utf8" NotNull: true } Columns { Name: "level" Type: "Int32" } Columns { Name: "message" Type: "Utf8" } Columns { Name: "json_payload" Type: "JsonDocument" } Columns { Name: "request_id" Type: "Utf8" } Columns { Name: "ingested_at" Type: "Timestamp" } Columns { Name: "saved_at" Type: "Timestamp" } KeyColumnNames: "timestamp" KeyColumnNames: "resource_type" KeyColumnNames: "resource_id" KeyColumnNames: "uid" DefaultCompression { Codec: ColumnCodecLZ4 } } } } } TxId: 281474976710658 TabletId: 72057594046644480 PeerName: "ipv6:[::1]:38910" , at schemeshard: 72057594046644480 2025-05-07T09:00:22.011354Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: create_store.cpp:331: TCreateOlapStore Propose, path: /Root/LogStore, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T09:00:22.011894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:319: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046644480, LocalPathId: 1], parent name: Root, child name: LogStore, child id: [OwnerId: 72057594046644480, LocalPathId: 2], at schemeshard: 72057594046644480 2025-05-07T09:00:22.011936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 0 2025-05-07T09:00:22.012013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-05-07T09:00:22.012069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 2 2025-05-07T09:00:22.012133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 3 2025-05-07T09:00:22.012171Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 4 2025-05-07T09:00:22.012517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 5 2025-05-07T09:00:22.018591Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976710658:0 1 -> 2 2025-05-07T09:00:22.022499Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976710658:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-05-07T09:00:22.022527Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnStore, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T09:00:22.022670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-05-07T09:00:22.022707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 6 2025-05-07T09:00:22.027317Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 281474976710658, response: Status: StatusAccepted TxId: 281474976710658 SchemeshardId: 72057594046644480 PathId: 2, at schemeshard: 72057594046644480 2025-05-07T09:00:22.027513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710658, database: /Root, subject: , status: StatusAccepted, operation: CREATE COLUMN STORE, path: /Root/LogStore 2025-05-07T09:00:22.034634Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-05-07T09:00:22.034670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976710658, path id: [OwnerId: 72057594046644480, LocalPathId: 1] 2025-05-07T09:00:22.034857Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976710658, path id: [OwnerId: 72057594046644480, LocalPathId: 2] 2025-05-07T09:00:22.034959Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-05-07T09:00:22.034975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:7501626046754020389:2393], at schemeshard: 72057594046644480, txId: 281474976710658, path id: 1 2025-05-07T09:00:22.034990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:7501626046754020389:2393], at schemeshard: 72057594046644480, txId: 281474976710658, path id: 2 2025-05-07T09:00:22.035034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T09:00:22.035081Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 281474976710658:0 ProgressState, operation type: TxCreateOlapStore, at tablet# 72057594046644480 2025-05-07T09:00:22.035797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:357: TCreateParts opId# 281474976710658:0 CreateRequest Event to Hive: 72057594037968897 msg: Owner: 72057594046644480 OwnerIdx: 1 TabletType: ColumnShard ObjectDomain { SchemeShard: 72057594046644480 PathId: 1 } ObjectId: 2 BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedCh ... DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 4 2025-05-07T09:02:58.952566Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046644480 ShardLocalIdx: 4, at schemeshard: 72057594046644480 2025-05-07T09:02:58.952619Z node 64 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 64, TabletId: 72075186224037889 not found 2025-05-07T09:02:58.952643Z node 64 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 64, TabletId: 72075186224037891 not found 2025-05-07T09:02:58.952661Z node 64 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 64, TabletId: 72075186224037888 not found 2025-05-07T09:02:58.953318Z node 64 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 64, TabletId: 72075186224037890 not found 2025-05-07T09:02:58.953453Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 3 2025-05-07T09:02:58.953607Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046644480 ShardLocalIdx: 1, at schemeshard: 72057594046644480 2025-05-07T09:02:58.954315Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 2 2025-05-07T09:02:58.954441Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046644480 ShardLocalIdx: 3, at schemeshard: 72057594046644480 2025-05-07T09:02:58.954948Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-05-07T09:02:58.955116Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-05-07T09:02:58.955137Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 2], at schemeshard: 72057594046644480 2025-05-07T09:02:58.955169Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-05-07T09:02:58.956135Z node 64 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[64:7501626722223183986:2327];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:1153;event=tablet_die; 2025-05-07T09:02:58.960152Z node 64 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[64:7501626722223183953:2326];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:1153;event=tablet_die; 2025-05-07T09:02:58.964316Z node 64 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[64:7501626722223183947:2324];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:1153;event=tablet_die; 2025-05-07T09:02:58.967875Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:2 2025-05-07T09:02:58.967912Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:2 tabletId 72075186224037889 2025-05-07T09:02:58.967964Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:4 2025-05-07T09:02:58.967972Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:4 tabletId 72075186224037891 2025-05-07T09:02:58.977026Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:1 2025-05-07T09:02:58.977069Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:1 tabletId 72075186224037888 2025-05-07T09:02:58.977122Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:3 2025-05-07T09:02:58.977142Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:3 tabletId 72075186224037890 2025-05-07T09:02:58.977182Z node 64 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-05-07T09:03:02.836734Z node 67 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[67:7501626739123664625:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:02.836807Z node 67 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002889/r3tmp/tmpltG61h/pdisk_1.dat 2025-05-07T09:03:02.969751Z node 67 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:03.006775Z node 67 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(67, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:03.006858Z node 67 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(67, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:03:03.009021Z node 67 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(67, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29059, node 67 2025-05-07T09:03:03.067255Z node 67 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:03:03.067277Z node 67 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:03:03.067285Z node 67 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:03:03.067420Z node 67 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25015 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:03:03.348742Z node 67 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:03:03.418799Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateColumnStore CreateColumnStore { Name: "LogStore" ColumnShardCount: 4 SchemaPresets { Name: "default" Schema { Columns { Name: "timestamp" Type: "Timestamp" NotNull: true } Columns { Name: "resource_type" Type: "Utf8" NotNull: true } Columns { Name: "resource_id" Type: "Utf8" NotNull: true } Columns { Name: "uid" Type: "Utf8" NotNull: true } Columns { Name: "level" Type: "Int32" } Columns { Name: "message" Type: "Utf8" } Columns { Name: "json_payload" Type: "JsonDocument" } Columns { Name: "request_id" Type: "Utf8" } Columns { Name: "ingested_at" Type: "Timestamp" } Columns { Name: "saved_at" Type: "Timestamp" } KeyColumnNames: "timestamp" KeyColumnNames: "resource_type" KeyColumnNames: "resource_id" KeyColumnNames: "uid" DefaultCompression { Codec: ColumnCodecLZ4 } } } } } TxId: 281474976715658 TabletId: 72057594046644480 PeerName: "ipv6:[::1]:53400" , at schemeshard: 72057594046644480 2025-05-07T09:03:03.419259Z node 67 :FLAT_TX_SCHEMESHARD NOTICE: create_store.cpp:331: TCreateOlapStore Propose, path: /Root/LogStore, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T09:03:03.419308Z node 67 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976715658:1, propose status:StatusPreconditionFailed, reason: Column stores are not supported, at schemeshard: 72057594046644480 2025-05-07T09:03:03.421679Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 281474976715658, response: Status: StatusPreconditionFailed Reason: "Column stores are not supported" TxId: 281474976715658 SchemeshardId: 72057594046644480, at schemeshard: 72057594046644480 2025-05-07T09:03:03.421883Z node 67 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715658, database: /Root, subject: , status: StatusPreconditionFailed, reason: Column stores are not supported, operation: CREATE COLUMN STORE, path: /Root/LogStore 2025-05-07T09:03:03.422121Z node 67 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [67:7501626743418632839:2600] txid# 281474976715658, issues: { message: "Column stores are not supported" severity: 1 } assertion failed at ydb/services/ydb/ydb_logstore_ut.cpp:435, virtual void NTestSuiteYdbLogStore::TTestCaseAlterLogTable::Execute_(NUnitTest::TTestContext &): (res.GetStatus() == EStatus::SUCCESS) failed: (PRECONDITION_FAILED != SUCCESS)
: Error: Column stores are not supported , with diff: (PRE|SUC)C(ONDITION_FAIL|)E(D|SS) TBackTrace::Capture()+28 (0x1CDBA80C) NUnitTest::NPrivate::RaiseError(char const*, TBasicString> const&, bool)+592 (0x1D2774B0) NTestSuiteYdbLogStore::TTestCaseAlterLogTable::Execute_(NUnitTest::TTestContext&)+8888 (0x1C8E00A8) std::__y1::__function::__func, void ()>::operator()()+280 (0x1C909538) TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool)+534 (0x1D2AE696) NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool)+505 (0x1D27E039) NTestSuiteYdbLogStore::TCurrentTest::Execute()+1204 (0x1C908704) NUnitTest::TTestFactory::Execute()+2438 (0x1D27F906) NUnitTest::RunMain(int, char**)+5213 (0x1D2A8C0D) ??+0 (0x7EFC51149D90) __libc_start_main+128 (0x7EFC51149E40) _start+41 (0x19692029) >> TExportToS3Tests::DisableAutoDropping >> TExportToS3Tests::ExportIndexTablePartitioningSettings >> TTxAllocatorClientTest::Boot [GOOD] >> TExportToS3Tests::EnableChecksumsPersistance [GOOD] >> TTxAllocatorClientTest::InitiatingRequest [GOOD] >> DemoTx::Scenario_2 >> TExportToS3Tests::ShouldSucceedOnConcurrentTxs [GOOD] >> TExportToS3Tests::ChecksumsWithCompression [GOOD] >> TExportToS3Tests::TablePermissions [GOOD] >> TExportToS3Tests::EncryptedExport >> TPersQueueTest::UpdatePartitionLocation [GOOD] >> TPersQueueTest::TopicServiceCommitOffset >> TExportToS3Tests::ShouldSucceedOnConcurrentImport >> TExportToS3Tests::Changefeeds ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_backup_collection/unittest >> TBackupCollectionTests::BackupNonIncrementalCollection [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:03:44.211119Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:03:44.211244Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:03:44.211284Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:03:44.211327Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:03:44.212555Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:03:44.212620Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:03:44.212688Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:03:44.212760Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:03:44.213474Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:03:44.215640Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:03:44.298767Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:03:44.298826Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:44.315508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:03:44.315766Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:03:44.315931Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:03:44.321871Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:03:44.322175Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:03:44.322826Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:44.323031Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:03:44.325786Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:44.330099Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:03:44.330168Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:44.330243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:03:44.330284Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:03:44.330335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:03:44.330958Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:03:44.337090Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:03:44.472065Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:03:44.472301Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:44.472509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:03:44.472732Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:03:44.472792Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:44.474941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:44.475075Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:03:44.475272Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:44.475336Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:03:44.475385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:03:44.475435Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:03:44.476996Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:44.477066Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:03:44.477104Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:03:44.478672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:44.478726Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:44.478784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:44.478835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:03:44.490027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:03:44.492205Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:03:44.492443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:03:44.493435Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:44.493581Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:03:44.493631Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:44.493956Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:03:44.494036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:44.494233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:03:44.494320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:03:44.496306Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:03:44.496375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:03:44.496567Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:44.496609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... UG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [7:304:2295] message: TxId: 105 2025-05-07T09:03:49.441815Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 105 ready parts: 2/2 2025-05-07T09:03:49.441859Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 105:0 2025-05-07T09:03:49.441894Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 105:0 2025-05-07T09:03:49.441986Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 2 2025-05-07T09:03:49.442023Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 105:1 2025-05-07T09:03:49.442047Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 105:1 2025-05-07T09:03:49.442125Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 3 2025-05-07T09:03:49.442147Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 3 2025-05-07T09:03:49.443813Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-05-07T09:03:49.443920Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [7:304:2295] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 105 at schemeshard: 72057594046678944 2025-05-07T09:03:49.444098Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-05-07T09:03:49.444143Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [7:534:2495] 2025-05-07T09:03:49.444352Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877764, Sender [7:536:2497], Recipient [7:134:2157]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-05-07T09:03:49.444390Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4938: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-05-07T09:03:49.444418Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5761: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 105 TestModificationResults wait txId: 106 2025-05-07T09:03:49.444945Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122432, Sender [7:602:2561], Recipient [7:134:2157]: {TEvModifySchemeTransaction txid# 106 TabletId# 72057594046678944} 2025-05-07T09:03:49.445004Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4851: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-05-07T09:03:49.447397Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpBackupIncrementalBackupCollection BackupIncrementalBackupCollection { Name: ".backups/collections/MyCollection1" } } TxId: 106 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:03:49.447813Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental, operationId: 106:0, at schemeshard: 72057594046678944 2025-05-07T09:03:49.448013Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:319: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 4], parent name: MyCollection1, child name: 19700101000000Z_incremental, child id: [OwnerId: 72057594046678944, LocalPathId: 8], at schemeshard: 72057594046678944 2025-05-07T09:03:49.448071Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 8] was 0 2025-05-07T09:03:49.448141Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 106:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:03:49.448251Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 106:1, explain: Incremental backup is disabled on this collection, at schemeshard: 72057594046678944 2025-05-07T09:03:49.448312Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 106:2, propose status:StatusInvalidParameter, reason: Incremental backup is disabled on this collection, at schemeshard: 72057594046678944 2025-05-07T09:03:49.450691Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:151: Abort operation: IgniteOperation fail to propose a part, opId: 106:1, at schemeshard: 72057594046678944, already accepted parts: 1, propose result status: StatusInvalidParameter, with reason: Incremental backup is disabled on this collection, tx message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpBackupIncrementalBackupCollection BackupIncrementalBackupCollection { Name: ".backups/collections/MyCollection1" } } TxId: 106 TabletId: 72057594046678944 2025-05-07T09:03:49.450841Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_mkdir.cpp:275: MkDir AbortPropose, opId: 106:0, at schemeshard: 72057594046678944 2025-05-07T09:03:49.451088Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-05-07T09:03:49.453840Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 106, response: Status: StatusInvalidParameter Reason: "Incremental backup is disabled on this collection" TxId: 106 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:03:49.454060Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 106, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Incremental backup is disabled on this collection, operation: BACKUP INCREMENTAL, path: /MyRoot/.backups/collections/MyCollection1 2025-05-07T09:03:49.454130Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 TestModificationResult got TxId: 106, wait until txId: 106 TestWaitNotification wait txId: 106 2025-05-07T09:03:49.454478Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 106: send EvNotifyTxCompletion 2025-05-07T09:03:49.454532Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 106 2025-05-07T09:03:49.455001Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [7:608:2567], Recipient [7:134:2157]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T09:03:49.455087Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T09:03:49.455133Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046678944 2025-05-07T09:03:49.455302Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124996, Sender [7:304:2295], Recipient [7:134:2157]: NKikimrScheme.TEvNotifyTxCompletion TxId: 106 2025-05-07T09:03:49.455349Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4853: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-05-07T09:03:49.455446Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 106, at schemeshard: 72057594046678944 2025-05-07T09:03:49.455579Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-05-07T09:03:49.455624Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [7:606:2565] 2025-05-07T09:03:49.455828Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877764, Sender [7:608:2567], Recipient [7:134:2157]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-05-07T09:03:49.455866Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4938: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-05-07T09:03:49.455910Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5761: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 106 2025-05-07T09:03:49.456445Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122945, Sender [7:609:2568], Recipient [7:134:2157]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/.backups/collections/MyCollection1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false } 2025-05-07T09:03:49.456515Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4852: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-05-07T09:03:49.456633Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.backups/collections/MyCollection1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:03:49.456895Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.backups/collections/MyCollection1" took 259us result status StatusSuccess 2025-05-07T09:03:49.457377Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.backups/collections/MyCollection1" PathDescription { Self { Name: "MyCollection1" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeBackupCollection CreateFinished: true CreateTxId: 103 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 BackupCollectionVersion: 0 } ChildrenExist: true } Children { Name: "19700101000000Z_full" PathId: 6 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 105 CreateStep: 5000006 ParentPathId: 4 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 6 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } BackupCollectionDescription { Name: "MyCollection1" ExplicitEntryList { Entries { Type: ETypeTable Path: "/MyRoot/Table1" } } Cluster { } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TExportToS3Tests::ShouldSucceedOnConcurrentExport [GOOD] >> TPersQueueTest::WriteExisting [GOOD] >> TPersQueueTest::WriteExistingBigValue ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator_client/ut/unittest >> TTxAllocatorClientTest::Boot [GOOD] Test command err: 2025-05-07T09:03:49.913997Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1904: Tablet: 72057594046447617 LockedInitializationPath Marker# TSYS32 2025-05-07T09:03:49.917339Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:917: Tablet: 72057594046447617 HandleFindLatestLogEntry, NODATA Promote Marker# TSYS19 2025-05-07T09:03:49.919941Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:221: Tablet: 72057594046447617 TTablet::WriteZeroEntry. logid# [72057594046447617:2:0:0:0:0:0] Marker# TSYS01 2025-05-07T09:03:49.938072Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:0:0:0:20:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T09:03:49.948243Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:17: tablet# 72057594046447617 OnActivateExecutor 2025-05-07T09:03:49.972502Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:1:28672:35:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T09:03:49.972622Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:0:0:42:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T09:03:49.972699Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:1:8192:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T09:03:49.972791Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1381: Tablet: 72057594046447617 GcCollect 0 channel, tablet:gen:step => 2:0 Marker# TSYS28 2025-05-07T09:03:49.972939Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T09:03:49.973058Z node 1 :TX_ALLOCATOR DEBUG: txallocator__scheme.cpp:22: tablet# 72057594046447617 TTxSchema Complete 2025-05-07T09:03:49.973218Z node 1 :TABLET_MAIN INFO: tablet_sys.cpp:1015: Tablet: 72057594046447617 Active! Generation: 2, Type: TxAllocator started in 0msec Marker# TSYS24 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator_client/ut/unittest >> TTxAllocatorClientTest::InitiatingRequest [GOOD] Test command err: 2025-05-07T09:03:49.913936Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1904: Tablet: 72057594046447617 LockedInitializationPath Marker# TSYS32 2025-05-07T09:03:49.918001Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:917: Tablet: 72057594046447617 HandleFindLatestLogEntry, NODATA Promote Marker# TSYS19 2025-05-07T09:03:49.919909Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:221: Tablet: 72057594046447617 TTablet::WriteZeroEntry. logid# [72057594046447617:2:0:0:0:0:0] Marker# TSYS01 2025-05-07T09:03:49.937527Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:0:0:0:20:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T09:03:49.947062Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:17: tablet# 72057594046447617 OnActivateExecutor 2025-05-07T09:03:49.968528Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:1:28672:35:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T09:03:49.968642Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:0:0:42:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T09:03:49.968707Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:1:8192:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T09:03:49.968787Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1381: Tablet: 72057594046447617 GcCollect 0 channel, tablet:gen:step => 2:0 Marker# TSYS28 2025-05-07T09:03:49.968910Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T09:03:49.969026Z node 1 :TX_ALLOCATOR DEBUG: txallocator__scheme.cpp:22: tablet# 72057594046447617 TTxSchema Complete 2025-05-07T09:03:49.969179Z node 1 :TABLET_MAIN INFO: tablet_sys.cpp:1015: Tablet: 72057594046447617 Active! Generation: 2, Type: TxAllocator started in 0msec Marker# TSYS24 2025-05-07T09:03:49.970764Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:70:2105] requested range size#5000 2025-05-07T09:03:49.972826Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:1:24576:70:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T09:03:49.972894Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-05-07T09:03:49.972988Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 0 Reserved to# 5000 2025-05-07T09:03:49.973039Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:70:2105] TEvAllocateResult from# 0 to# 5000 >> TExportToS3Tests::SchemaMappingEncryption [GOOD] >> TPersQueueTest::BadTopic [GOOD] >> TPersQueueTest::CloseActiveWriteSessionOnClusterDisable >> TExportToS3Tests::ShouldRetryAtFinalStage |92.5%| [TS] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_json_change_record/unittest |92.5%| [TS] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_json_change_record/unittest |92.5%| [TS] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_json_change_record/unittest |92.5%| [TS] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_json_change_record/unittest >> TExportToS3Tests::SchemaMappingEncryptionIncorrectKey >> TPersQueueTest::DirectReadPreCached [GOOD] >> TPersQueueTest::DirectReadNotCached >> TPersQueueCommonTest::Auth_CreateGrpcStreamWithInvalidTokenInInitialMetadata_SessionClosedWithUnauthenticatedError [GOOD] >> TPersQueueCommonTest::Auth_MultipleUpdateTokenRequestIterationsWithValidToken_GotUpdateTokenResponseForEachRequest |92.5%| [TS] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_json_change_record/unittest |92.5%| [TA] $(B)/ydb/core/tx/schemeshard/ut_backup_collection/test-results/unittest/{meta.json ... results_accumulator.log} |92.5%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_backup_collection/test-results/unittest/{meta.json ... results_accumulator.log} >> TExportToS3Tests::DisableAutoDropping [GOOD] >> TExportToS3Tests::CancelUponTransferringSingleShardTableShouldSucceed [GOOD] >> TExportToS3Tests::ExportIndexTablePartitioningSettings [GOOD] |92.5%| [TA] $(B)/ydb/core/tx/tx_allocator_client/ut/test-results/unittest/{meta.json ... results_accumulator.log} |92.5%| [TA] {RESULT} $(B)/ydb/core/tx/tx_allocator_client/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TPersQueueTest::SetupLockSession2 [GOOD] >> TPersQueueTest::SetupLockSession >> TExportToS3Tests::CancelUponTransferringMultiShardTableShouldSucceed >> BackupRestore::RestoreIndexTableReadReplicasSettings [GOOD] >> BackupRestore::RestoreTableSplitBoundaries >> TExportToS3Tests::CancelUponCopyingTablesShouldSucceed [GOOD] |92.5%| [TS] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_json_change_record/unittest >> TExportToS3Tests::SchemaMappingEncryptionIncorrectKey [GOOD] >> TExportToS3Tests::AuditCompletedExport ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::TablePermissions [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:03:46.199995Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:03:46.200077Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:03:46.200115Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:03:46.200169Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:03:46.201467Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:03:46.201513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:03:46.201566Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:03:46.201611Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:03:46.202266Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:03:46.205704Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:03:46.274671Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:03:46.274723Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:46.290755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:03:46.290943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:03:46.291141Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:03:46.298354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:03:46.298629Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:03:46.303818Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:46.304034Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:03:46.311977Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:46.321269Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:03:46.321350Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:46.321426Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:03:46.321474Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:03:46.321532Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:03:46.322486Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.329116Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:03:46.461167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:03:46.461373Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.461577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:03:46.461829Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:03:46.461878Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.464072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:46.464188Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:03:46.464355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.464421Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:03:46.464462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:03:46.464513Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:03:46.466174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.466223Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:03:46.466258Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:03:46.467634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.467679Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.467715Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:46.467772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:03:46.470666Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:03:46.472809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:03:46.472993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:03:46.473825Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:46.473956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:03:46.474025Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:46.474251Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:03:46.474292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:46.474463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:03:46.474544Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:03:46.476367Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:03:46.476425Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:03:46.476589Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:46.476639Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 0005 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710759 at step: 5000005 FAKE_COORDINATOR: Send Plan to tablet 72075186233409547 for txId: 281474976710759 at step: 5000005 2025-05-07T09:03:50.187594Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000005, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:50.187705Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710759 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 17179871340 } } Step: 5000005 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:03:50.187770Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:412: TBackup TPropose, opId: 281474976710759:0 HandleReply TEvOperationPlan, stepId: 5000005, at schemeshard: 72057594046678944 2025-05-07T09:03:50.187896Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976710759:0 128 -> 129 2025-05-07T09:03:50.188058Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000005 REQUEST: PUT /metadata.json HTTP/1.1 HEADERS: Host: localhost:31565 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: ED20F8C1-A752-4325-BF26-EA7CE10AD5AB amz-sdk-request: attempt=1 content-length: 73 content-md5: q/ySd5GvS6I/qOVxS/4Thg== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 2025-05-07T09:03:50.224981Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 S3_MOCK::HttpServeWrite: /metadata.json / / 73 2025-05-07T09:03:50.225070Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710759, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-05-07T09:03:50.225308Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:50.225348Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:204:2206], at schemeshard: 72057594046678944, txId: 281474976710759, path id: 4 2025-05-07T09:03:50.227424Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-05-07T09:03:50.227501Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:258: TBackup TProposedWaitParts, opId: 281474976710759:0 ProgressState, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 281474976710759 2025-05-07T09:03:50.228231Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 3 PathOwnerId: 72057594046678944, cookie: 281474976710759 2025-05-07T09:03:50.228323Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 3 PathOwnerId: 72057594046678944, cookie: 281474976710759 2025-05-07T09:03:50.228353Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 281474976710759 2025-05-07T09:03:50.228387Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710759, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 3 2025-05-07T09:03:50.228422Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-05-07T09:03:50.228511Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976710759, ready parts: 0/1, is published: true REQUEST: PUT /permissions.pb HTTP/1.1 HEADERS: Host: localhost:31565 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: AE88E358-0E96-4186-A4CA-D30857A76FCA amz-sdk-request: attempt=1 content-length: 137 content-md5: WeIr3D5bqIjvqMGEjx2JrA== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /permissions.pb / / 137 2025-05-07T09:03:50.232041Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710759 REQUEST: PUT /scheme.pb HTTP/1.1 HEADERS: Host: localhost:31565 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: B2A3F38D-66D5-47D0-BF9D-9E733A5103B1 amz-sdk-request: attempt=1 content-length: 355 content-md5: 4DhJNWgTpoG3PVvZ0uCHUA== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /scheme.pb / / 355 REQUEST: PUT /data_00.csv HTTP/1.1 HEADERS: Host: localhost:31565 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 7C182AE7-B186-4F37-8FB7-CE3B8E366C65 amz-sdk-request: attempt=1 content-length: 0 content-md5: 1B2M2Y8AsgTpgAmY7PhCfg== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /data_00.csv / / 0 2025-05-07T09:03:50.249262Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 452 RawX2: 17179871605 } Origin: 72075186233409547 State: 2 TxId: 281474976710759 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 0 RowsProcessed: 0 } 2025-05-07T09:03:50.249322Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 281474976710759, tablet: 72075186233409547, partId: 0 2025-05-07T09:03:50.249452Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944, message: Source { RawX1: 452 RawX2: 17179871605 } Origin: 72075186233409547 State: 2 TxId: 281474976710759 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 0 RowsProcessed: 0 } 2025-05-07T09:03:50.249572Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 281474976710759:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 452 RawX2: 17179871605 } Origin: 72075186233409547 State: 2 TxId: 281474976710759 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 0 RowsProcessed: 0 } 2025-05-07T09:03:50.249654Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976710759:0, shardIdx: 72057594046678944:2, datashard: 72075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:50.249702Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-05-07T09:03:50.249753Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 281474976710759:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-05-07T09:03:50.249804Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976710759:0 129 -> 240 2025-05-07T09:03:50.250007Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 281474976710759:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:03:50.251988Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-05-07T09:03:50.252139Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-05-07T09:03:50.252178Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 281474976710759:0 ProgressState 2025-05-07T09:03:50.252296Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710759:0 progress is 1/1 2025-05-07T09:03:50.252335Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976710759 ready parts: 1/1 2025-05-07T09:03:50.252370Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710759:0 progress is 1/1 2025-05-07T09:03:50.252398Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976710759 ready parts: 1/1 2025-05-07T09:03:50.252428Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976710759, ready parts: 1/1, is published: true 2025-05-07T09:03:50.252485Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [4:124:2150] message: TxId: 281474976710759 2025-05-07T09:03:50.252525Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976710759 ready parts: 1/1 2025-05-07T09:03:50.252568Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976710759:0 2025-05-07T09:03:50.252599Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976710759:0 2025-05-07T09:03:50.252705Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-05-07T09:03:50.254522Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6706: Handle: TEvNotifyTxCompletionResult: txId# 281474976710759 2025-05-07T09:03:50.254586Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6708: Message: TxId: 281474976710759 2025-05-07T09:03:50.256288Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-05-07T09:03:50.256341Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [4:482:2443] TestWaitNotification: OK eventTxId 103 >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeKesus [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeSolomonVolume [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeTableIndex >> TExportToS3Tests::ShouldSucceedOnConcurrentImport [GOOD] |92.6%| [TA] $(B)/ydb/core/tx/replication/service/ut_json_change_record/test-results/unittest/{meta.json ... results_accumulator.log} |92.6%| [TA] {RESULT} $(B)/ydb/core/tx/replication/service/ut_json_change_record/test-results/unittest/{meta.json ... results_accumulator.log} >> TExportToS3Tests::EncryptedExport [GOOD] >> TPQCDTest::TestUnavailableWithoutClustersList [GOOD] >> TPQCDTest::TestUnavailableWithoutBoth [GOOD] >> TExportToS3Tests::Changefeeds [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::ExportIndexTablePartitioningSettings [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:03:46.200032Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:03:46.200122Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:03:46.200164Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:03:46.200202Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:03:46.201447Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:03:46.201513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:03:46.201599Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:03:46.201678Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:03:46.202412Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:03:46.205723Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:03:46.288537Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:03:46.288594Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:46.302621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:03:46.302782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:03:46.302896Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:03:46.307352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:03:46.307561Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:03:46.308004Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:46.308124Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:03:46.311375Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:46.321001Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:03:46.321051Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:46.321101Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:03:46.321143Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:03:46.321188Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:03:46.322417Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.327664Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:03:46.464218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:03:46.464438Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.464664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:03:46.464923Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:03:46.464982Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.467206Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:46.467343Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:03:46.467537Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.467605Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:03:46.467662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:03:46.467705Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:03:46.469585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.469639Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:03:46.469676Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:03:46.471002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.471036Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.471095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:46.471142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:03:46.473877Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:03:46.475461Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:03:46.475649Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:03:46.476591Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:46.476705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:03:46.476749Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:46.476978Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:03:46.477025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:46.477149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:03:46.477201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:03:46.479162Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:03:46.479204Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:03:46.479419Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:46.479465Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... shard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 281474976710759:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:281474976710759 msg type: 269090816 2025-05-07T09:03:50.924359Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 281474976710759, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 281474976710759 at step: 5000005 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710759 at step: 5000005 FAKE_COORDINATOR: Send Plan to tablet 72075186233409548 for txId: 281474976710759 at step: 5000005 2025-05-07T09:03:50.925524Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000005, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:50.925642Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710759 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 17179871340 } } Step: 5000005 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:03:50.925700Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:412: TBackup TPropose, opId: 281474976710759:0 HandleReply TEvOperationPlan, stepId: 5000005, at schemeshard: 72057594046678944 2025-05-07T09:03:50.925832Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976710759:0 128 -> 129 2025-05-07T09:03:50.926020Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 3 2025-05-07T09:03:50.961562Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:03:50.961606Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710759, path id: [OwnerId: 72057594046678944, LocalPathId: 6] 2025-05-07T09:03:50.961796Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:50.961839Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:204:2206], at schemeshard: 72057594046678944, txId: 281474976710759, path id: 6 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000005 2025-05-07T09:03:50.962315Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-05-07T09:03:50.962361Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:258: TBackup TProposedWaitParts, opId: 281474976710759:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:03:50.962845Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 6 Version: 3 PathOwnerId: 72057594046678944, cookie: 281474976710759 2025-05-07T09:03:50.962908Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 6 Version: 3 PathOwnerId: 72057594046678944, cookie: 281474976710759 2025-05-07T09:03:50.962928Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 281474976710759 2025-05-07T09:03:50.962951Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710759, pathId: [OwnerId: 72057594046678944, LocalPathId: 6], version: 3 2025-05-07T09:03:50.962983Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 4 2025-05-07T09:03:50.963079Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976710759, ready parts: 0/1, is published: true FAKE_COORDINATOR: Erasing txId 281474976710759 2025-05-07T09:03:50.964753Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710759 REQUEST: PUT /metadata.json HTTP/1.1 HEADERS: Host: localhost:18062 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 92DFECAB-151C-4EFD-B52D-907335B5C722 amz-sdk-request: attempt=1 content-length: 73 content-md5: q/ySd5GvS6I/qOVxS/4Thg== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /metadata.json / / 73 REQUEST: PUT /scheme.pb HTTP/1.1 HEADERS: Host: localhost:18062 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: B0326B7D-49C8-4390-85E2-5B1B98E011B1 amz-sdk-request: attempt=1 content-length: 602 content-md5: GgrERoUcI3sF1n0Je2MTCQ== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /scheme.pb / / 602 REQUEST: PUT /data_00.csv HTTP/1.1 HEADERS: Host: localhost:18062 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 41B95EE5-1116-483F-BF71-FD83E9ED0A75 amz-sdk-request: attempt=1 content-length: 0 content-md5: 1B2M2Y8AsgTpgAmY7PhCfg== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /data_00.csv / / 0 2025-05-07T09:03:50.991322Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 507 RawX2: 17179871649 } Origin: 72075186233409548 State: 2 TxId: 281474976710759 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 0 RowsProcessed: 0 } 2025-05-07T09:03:50.991376Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 281474976710759, tablet: 72075186233409548, partId: 0 2025-05-07T09:03:50.991513Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944, message: Source { RawX1: 507 RawX2: 17179871649 } Origin: 72075186233409548 State: 2 TxId: 281474976710759 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 0 RowsProcessed: 0 } 2025-05-07T09:03:50.991616Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 281474976710759:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 507 RawX2: 17179871649 } Origin: 72075186233409548 State: 2 TxId: 281474976710759 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 0 RowsProcessed: 0 } 2025-05-07T09:03:50.991676Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976710759:0, shardIdx: 72057594046678944:3, datashard: 72075186233409548, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:50.991720Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-05-07T09:03:50.991756Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 281474976710759:0, datashard: 72075186233409548, at schemeshard: 72057594046678944 2025-05-07T09:03:50.991803Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976710759:0 129 -> 240 2025-05-07T09:03:50.991962Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 281474976710759:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:03:50.994332Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-05-07T09:03:50.994597Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-05-07T09:03:50.994649Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 281474976710759:0 ProgressState 2025-05-07T09:03:50.994748Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710759:0 progress is 1/1 2025-05-07T09:03:50.994771Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976710759 ready parts: 1/1 2025-05-07T09:03:50.994815Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710759:0 progress is 1/1 2025-05-07T09:03:50.994838Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976710759 ready parts: 1/1 2025-05-07T09:03:50.994872Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976710759, ready parts: 1/1, is published: true 2025-05-07T09:03:50.994927Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [4:124:2150] message: TxId: 281474976710759 2025-05-07T09:03:50.994971Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976710759 ready parts: 1/1 2025-05-07T09:03:50.994998Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976710759:0 2025-05-07T09:03:50.995018Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976710759:0 2025-05-07T09:03:50.995131Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 3 2025-05-07T09:03:50.996654Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6706: Handle: TEvNotifyTxCompletionResult: txId# 281474976710759 2025-05-07T09:03:50.996701Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6708: Message: TxId: 281474976710759 2025-05-07T09:03:50.998386Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T09:03:50.998465Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [4:537:2487] TestWaitNotification: OK eventTxId 102 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::DisableAutoDropping [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:03:46.199993Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:03:46.200107Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:03:46.200153Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:03:46.200187Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:03:46.201460Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:03:46.201528Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:03:46.201606Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:03:46.201669Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:03:46.202434Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:03:46.205697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:03:46.287646Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:03:46.287701Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:46.303307Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:03:46.303497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:03:46.303644Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:03:46.308849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:03:46.309115Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:03:46.309668Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:46.309810Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:03:46.312170Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:46.321030Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:03:46.321097Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:46.321161Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:03:46.321217Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:03:46.321274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:03:46.322498Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.327736Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:03:46.442012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:03:46.442260Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.442451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:03:46.442679Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:03:46.442738Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.445012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:46.445158Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:03:46.445359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.445445Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:03:46.445507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:03:46.445547Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:03:46.447372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.447462Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:03:46.447512Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:03:46.449113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.449150Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.449201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:46.449245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:03:46.452239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:03:46.454489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:03:46.454693Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:03:46.455744Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:46.455890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:03:46.455933Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:46.456238Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:03:46.456291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:46.456469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:03:46.456541Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:03:46.458618Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:03:46.458678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:03:46.458900Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:46.458948Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... T09:03:50.950424Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 281474976710761 2025-05-07T09:03:50.950451Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710761, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 7 2025-05-07T09:03:50.950479Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-05-07T09:03:50.950551Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976710761, ready parts: 0/1, is published: true 2025-05-07T09:03:50.955058Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-05-07T09:03:50.955204Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710761, at schemeshard: 72057594046678944 2025-05-07T09:03:50.955248Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976710761, ready parts: 0/1, is published: true 2025-05-07T09:03:50.955292Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710761, at schemeshard: 72057594046678944 2025-05-07T09:03:50.955847Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 281474976710761:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:281474976710761 msg type: 269090816 2025-05-07T09:03:50.955923Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 281474976710761, partId: 4294967295, tablet: 72057594046316545 2025-05-07T09:03:50.956049Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2025-05-07T09:03:50.956097Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 FAKE_COORDINATOR: Add transaction: 281474976710761 at step: 5000007 FAKE_COORDINATOR: advance: minStep5000007 State->FrontStep: 5000006 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710761 at step: 5000007 2025-05-07T09:03:50.956262Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000007, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:50.956346Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710761 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 17179871340 } } Step: 5000007 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:03:50.956383Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_rmdir.cpp:129: TRmDir HandleReply TEvOperationPlan, opId: 281474976710761:0, step: 5000007, at schemeshard: 72057594046678944 2025-05-07T09:03:50.956472Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_rmdir.cpp:180: RmDir is done, opId: 281474976710761:0, at schemeshard: 72057594046678944 2025-05-07T09:03:50.956529Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710761:0 progress is 1/1 2025-05-07T09:03:50.956561Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-05-07T09:03:50.956607Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710761:0 progress is 1/1 2025-05-07T09:03:50.956634Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-05-07T09:03:50.956681Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T09:03:50.956729Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-05-07T09:03:50.956774Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976710761, ready parts: 1/1, is published: false 2025-05-07T09:03:50.956819Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-05-07T09:03:50.956856Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976710761:0 2025-05-07T09:03:50.956887Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976710761:0 2025-05-07T09:03:50.956938Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-05-07T09:03:50.956977Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 281474976710761, publications: 2, subscribers: 1 2025-05-07T09:03:50.957008Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 281474976710761, [OwnerId: 72057594046678944, LocalPathId: 1], 11 2025-05-07T09:03:50.957037Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 281474976710761, [OwnerId: 72057594046678944, LocalPathId: 3], 18446744073709551615 FAKE_COORDINATOR: Erasing txId 281474976710761 2025-05-07T09:03:50.960106Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:03:50.960149Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710761, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:03:50.960289Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710761, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-05-07T09:03:50.960417Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:50.960450Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:204:2206], at schemeshard: 72057594046678944, txId: 281474976710761, path id: 1 2025-05-07T09:03:50.960489Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:204:2206], at schemeshard: 72057594046678944, txId: 281474976710761, path id: 3 2025-05-07T09:03:50.961224Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-05-07T09:03:50.961324Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-05-07T09:03:50.961368Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 281474976710761 2025-05-07T09:03:50.961426Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710761, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 11 2025-05-07T09:03:50.961472Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-05-07T09:03:50.962225Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-05-07T09:03:50.962304Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-05-07T09:03:50.962330Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 281474976710761 2025-05-07T09:03:50.962358Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710761, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-05-07T09:03:50.962385Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-05-07T09:03:50.962471Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 281474976710761, subscribers: 1 2025-05-07T09:03:50.962531Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [4:124:2150] 2025-05-07T09:03:50.967788Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2025-05-07T09:03:50.968177Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2025-05-07T09:03:50.968259Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6706: Handle: TEvNotifyTxCompletionResult: txId# 281474976710761 2025-05-07T09:03:50.968318Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6708: Message: TxId: 281474976710761 2025-05-07T09:03:50.968366Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-05-07T09:03:50.968395Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:1239: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761 2025-05-07T09:03:50.968431Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:1270: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761, id# 102, itemIdx# 4294967295 2025-05-07T09:03:50.970056Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-05-07T09:03:50.970128Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T09:03:50.970186Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [4:610:2567] TestWaitNotification: OK eventTxId 102 >> TPQCDTest::TestPrioritizeLocalDatacenter [GOOD] >> TPQCDTest::TestCloudClientsAreConsistentlyDistributed [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::SchemaMappingEncryptionIncorrectKey [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:03:46.200040Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:03:46.200138Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:03:46.200187Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:03:46.200226Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:03:46.201496Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:03:46.201569Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:03:46.201649Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:03:46.201728Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:03:46.202451Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:03:46.205696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:03:46.291597Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:03:46.291652Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:46.307606Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:03:46.307845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:03:46.308017Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:03:46.313418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:03:46.313710Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:03:46.314401Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:46.314601Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:03:46.317336Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:46.321010Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:03:46.321056Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:46.321106Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:03:46.321149Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:03:46.321195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:03:46.322434Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.327740Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:03:46.422905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:03:46.424454Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.426920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:03:46.428315Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:03:46.428394Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.431255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:46.431345Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:03:46.431496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.431605Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:03:46.431638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:03:46.431663Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:03:46.433333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.433381Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:03:46.433410Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:03:46.434638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.434679Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.434717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:46.434759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:03:46.438138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:03:46.439638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:03:46.441040Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:03:46.441877Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:46.442036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:03:46.442072Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:46.443134Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:03:46.443193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:46.443317Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:03:46.443394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:03:46.445156Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:03:46.445214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:03:46.445389Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:46.445432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... hemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710758, at schemeshard: 72057594046678944 2025-05-07T09:03:51.719170Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 281474976710758:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:281474976710758 msg type: 269090816 2025-05-07T09:03:51.719328Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 281474976710758, partId: 4294967295, tablet: 72057594046316545 2025-05-07T09:03:51.719959Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710758 FAKE_COORDINATOR: Add transaction: 281474976710758 at step: 5000005 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710758 at step: 5000005 2025-05-07T09:03:51.721847Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000005, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:51.722049Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710758 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 17179871340 } } Step: 5000005 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:03:51.722121Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_rmdir.cpp:129: TRmDir HandleReply TEvOperationPlan, opId: 281474976710758:0, step: 5000005, at schemeshard: 72057594046678944 2025-05-07T09:03:51.722267Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_rmdir.cpp:180: RmDir is done, opId: 281474976710758:0, at schemeshard: 72057594046678944 2025-05-07T09:03:51.722341Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710758:0 progress is 1/1 2025-05-07T09:03:51.722382Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976710758 ready parts: 1/1 2025-05-07T09:03:51.722447Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710758:0 progress is 1/1 2025-05-07T09:03:51.722490Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976710758 ready parts: 1/1 2025-05-07T09:03:51.722552Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-05-07T09:03:51.722635Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-05-07T09:03:51.722688Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976710758, ready parts: 1/1, is published: false 2025-05-07T09:03:51.722746Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976710758 ready parts: 1/1 2025-05-07T09:03:51.722791Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976710758:0 2025-05-07T09:03:51.722834Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976710758:0 2025-05-07T09:03:51.722896Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-05-07T09:03:51.722933Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 281474976710758, publications: 2, subscribers: 1 2025-05-07T09:03:51.722977Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 281474976710758, [OwnerId: 72057594046678944, LocalPathId: 1], 11 2025-05-07T09:03:51.723020Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 281474976710758, [OwnerId: 72057594046678944, LocalPathId: 4], 18446744073709551615 2025-05-07T09:03:51.723787Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710758 FAKE_COORDINATOR: Erasing txId 281474976710758 2025-05-07T09:03:51.725417Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:03:51.725454Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710758, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:03:51.725595Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710758, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-05-07T09:03:51.725727Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:51.725758Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:204:2206], at schemeshard: 72057594046678944, txId: 281474976710758, path id: 1 2025-05-07T09:03:51.725814Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:204:2206], at schemeshard: 72057594046678944, txId: 281474976710758, path id: 4 2025-05-07T09:03:51.726601Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 281474976710758 2025-05-07T09:03:51.726684Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 281474976710758 2025-05-07T09:03:51.726717Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 281474976710758 2025-05-07T09:03:51.726781Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710758, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 11 2025-05-07T09:03:51.726827Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-05-07T09:03:51.727331Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710758 2025-05-07T09:03:51.727406Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710758 2025-05-07T09:03:51.727435Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 281474976710758 2025-05-07T09:03:51.727463Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710758, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-05-07T09:03:51.727490Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-05-07T09:03:51.727561Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 281474976710758, subscribers: 1 2025-05-07T09:03:51.727614Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [4:124:2150] 2025-05-07T09:03:51.727913Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T09:03:51.727967Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-05-07T09:03:51.728038Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-05-07T09:03:51.730817Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710758 2025-05-07T09:03:51.732252Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710758 2025-05-07T09:03:51.732356Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6706: Handle: TEvNotifyTxCompletionResult: txId# 281474976710758 2025-05-07T09:03:51.732431Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6708: Message: TxId: 281474976710758 2025-05-07T09:03:51.732493Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-05-07T09:03:51.732532Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:1239: TExport::TTxProgress: OnNotifyResult: txId# 281474976710758 2025-05-07T09:03:51.732579Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:1270: TExport::TTxProgress: OnNotifyResult: txId# 281474976710758, id# 103, itemIdx# 4294967295 2025-05-07T09:03:51.732963Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-05-07T09:03:51.734197Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete TestWaitNotification wait txId: 103 2025-05-07T09:03:51.734410Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-05-07T09:03:51.734454Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-05-07T09:03:51.734882Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-05-07T09:03:51.734970Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-05-07T09:03:51.735015Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [4:544:2503] TestWaitNotification: OK eventTxId 103 >> TPQCDTest::TestRelatedServicesAreRunning [GOOD] >> TExportToS3Tests::AuditCompletedExport [GOOD] >> KqpProxy::DatabasesCacheForServerless [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_cluster_discovery/ut/unittest >> TPQCDTest::TestUnavailableWithoutClustersList [GOOD] Test command err: 2025-05-07T09:03:47.803743Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626935739445244:2276];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:47.803802Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003d7f/r3tmp/tmpmY2awk/pdisk_1.dat 2025-05-07T09:03:48.160640Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:48.220670Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:48.220829Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 32386, node 1 2025-05-07T09:03:48.222795Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:03:48.366319Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/zvgn/003d7f/r3tmp/yandex4BfGX1.tmp 2025-05-07T09:03:48.366342Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/zvgn/003d7f/r3tmp/yandex4BfGX1.tmp 2025-05-07T09:03:48.369653Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/zvgn/003d7f/r3tmp/yandex4BfGX1.tmp 2025-05-07T09:03:48.369843Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T09:03:50.644971Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626948624347595:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:50.645166Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626948624347608:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:50.645265Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:50.651613Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710657:3, at schemeshard: 72057594046644480 2025-05-07T09:03:50.675298Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501626948624347618:2370], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710657 completed, doublechecking } 2025-05-07T09:03:50.799726Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501626948624347681:2356] txid# 281474976710658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:03:51.161180Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7501626948624347698:2377], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T09:03:51.161493Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=1&id=NTc5NWMxZjEtYmNlNTM4ZTgtNTAxYWE2MTEtZGRiZTVjZGU=, ActorId: [1:7501626948624347587:2365], ActorState: ExecuteState, TraceId: 01jtmzsavj2hh8dw9retzpdf97, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T09:03:51.197234Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_cluster_discovery/ut/unittest >> TPQCDTest::TestUnavailableWithoutBoth [GOOD] Test command err: 2025-05-07T09:03:47.767881Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626934489481234:2139];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:47.776555Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003d2d/r3tmp/tmpaLZWYr/pdisk_1.dat 2025-05-07T09:03:48.151962Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:48.219589Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:48.219708Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:03:48.221096Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13185, node 1 2025-05-07T09:03:48.365940Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:03:48.365987Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:03:48.365995Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:03:48.366133Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T09:03:50.548520Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626947374383756:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:50.548541Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626947374383744:2364], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:50.548664Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:50.556956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715657:3, at schemeshard: 72057594046644480 2025-05-07T09:03:50.593686Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501626947374383758:2368], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715657 completed, doublechecking } 2025-05-07T09:03:50.712548Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501626947374383819:2353] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:03:51.076440Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7501626947374383828:2374], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T09:03:51.076645Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=1&id=MjY5YjU0OTctOGM3NThhYjgtN2UyZDMwZjgtZWZhNjE4Yzk=, ActorId: [1:7501626947374383742:2363], ActorState: ExecuteState, TraceId: 01jtmzsarhf39w4k3n29fr71sa, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T09:03:51.086153Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } >> TExportToS3Tests::AuditCancelledExport ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::ShouldSucceedOnConcurrentImport [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:03:46.200011Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:03:46.200129Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:03:46.200162Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:03:46.200203Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:03:46.201429Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:03:46.201489Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:03:46.201578Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:03:46.201652Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:03:46.202305Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:03:46.205706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:03:46.277001Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:03:46.277054Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:46.290343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:03:46.290472Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:03:46.290653Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:03:46.298327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:03:46.298598Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:03:46.303770Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:46.304042Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:03:46.311918Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:46.321200Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:03:46.321292Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:46.321365Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:03:46.321410Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:03:46.321451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:03:46.322517Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.328890Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:03:46.431049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:03:46.431244Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.431447Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:03:46.431626Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:03:46.431664Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.433475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:46.433583Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:03:46.433738Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.433786Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:03:46.433812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:03:46.433836Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:03:46.435250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.435292Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:03:46.435316Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:03:46.436469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.436501Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.436549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:46.436592Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:03:46.439185Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:03:46.440461Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:03:46.441054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:03:46.441743Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:46.441842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:03:46.441877Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:46.443135Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:03:46.443192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:46.443347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:03:46.443409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:03:46.445009Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:03:46.445050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:03:46.445185Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:46.445232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... ablet strongly msg operationId: 281474976710765:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:281474976710765 msg type: 269090816 2025-05-07T09:03:52.052953Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 281474976710765, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 281474976710765 at step: 5000010 FAKE_COORDINATOR: advance: minStep5000010 State->FrontStep: 5000009 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710765 at step: 5000010 FAKE_COORDINATOR: Send Plan to tablet 72075186233409549 for txId: 281474976710765 at step: 5000010 2025-05-07T09:03:52.053349Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000010, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:52.053457Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710765 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 17179871340 } } Step: 5000010 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:03:52.053522Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:412: TBackup TPropose, opId: 281474976710765:0 HandleReply TEvOperationPlan, stepId: 5000010, at schemeshard: 72057594046678944 2025-05-07T09:03:52.053648Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976710765:0 128 -> 129 2025-05-07T09:03:52.053801Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 3 REQUEST: PUT /Backup2/metadata.json HTTP/1.1 HEADERS: Host: localhost:8697 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 0F0FD647-1CDD-4D73-8CEA-662EC89DA140 amz-sdk-request: attempt=1 content-length: 73 content-md5: 5UnTthDw7DG9u0TfCJZu+w== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /Backup2/metadata.json / / 73 FAKE_COORDINATOR: advance: minStep5000010 State->FrontStep: 5000010 2025-05-07T09:03:52.084367Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:03:52.084417Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710765, path id: [OwnerId: 72057594046678944, LocalPathId: 7] 2025-05-07T09:03:52.084653Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:52.084696Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:204:2206], at schemeshard: 72057594046678944, txId: 281474976710765, path id: 7 2025-05-07T09:03:52.084774Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710765:0, at schemeshard: 72057594046678944 2025-05-07T09:03:52.084835Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:258: TBackup TProposedWaitParts, opId: 281474976710765:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:03:52.086153Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 7 Version: 3 PathOwnerId: 72057594046678944, cookie: 281474976710765 2025-05-07T09:03:52.086292Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 7 Version: 3 PathOwnerId: 72057594046678944, cookie: 281474976710765 2025-05-07T09:03:52.086330Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 281474976710765 2025-05-07T09:03:52.086364Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710765, pathId: [OwnerId: 72057594046678944, LocalPathId: 7], version: 3 2025-05-07T09:03:52.086401Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 4 2025-05-07T09:03:52.086504Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976710765, ready parts: 0/1, is published: true REQUEST: PUT /Backup2/scheme.pb HTTP/1.1 HEADERS: Host: localhost:8697 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 7C87312D-DD60-4020-AE6E-95F1C304746C amz-sdk-request: attempt=1 content-length: 355 content-md5: 4DhJNWgTpoG3PVvZ0uCHUA== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /Backup2/scheme.pb / / 355 FAKE_COORDINATOR: Erasing txId 281474976710765 2025-05-07T09:03:52.089963Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710765 REQUEST: PUT /Backup2/data_00.csv HTTP/1.1 HEADERS: Host: localhost:8697 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: FBA2C086-ED1C-47C0-A0B5-3B23C27BE381 amz-sdk-request: attempt=1 content-length: 0 content-md5: 1B2M2Y8AsgTpgAmY7PhCfg== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /Backup2/data_00.csv / / 0 2025-05-07T09:03:52.099933Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 811 RawX2: 17179871930 } Origin: 72075186233409549 State: 2 TxId: 281474976710765 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 0 RowsProcessed: 0 } 2025-05-07T09:03:52.099995Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 281474976710765, tablet: 72075186233409549, partId: 0 2025-05-07T09:03:52.100142Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 281474976710765:0, at schemeshard: 72057594046678944, message: Source { RawX1: 811 RawX2: 17179871930 } Origin: 72075186233409549 State: 2 TxId: 281474976710765 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 0 RowsProcessed: 0 } 2025-05-07T09:03:52.100259Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 281474976710765:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 811 RawX2: 17179871930 } Origin: 72075186233409549 State: 2 TxId: 281474976710765 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 0 RowsProcessed: 0 } 2025-05-07T09:03:52.100334Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976710765:0, shardIdx: 72057594046678944:4, datashard: 72075186233409549, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:52.100383Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 281474976710765:0, at schemeshard: 72057594046678944 2025-05-07T09:03:52.100429Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 281474976710765:0, datashard: 72075186233409549, at schemeshard: 72057594046678944 2025-05-07T09:03:52.100474Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976710765:0 129 -> 240 2025-05-07T09:03:52.100628Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 281474976710765:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:03:52.102352Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 281474976710765:0, at schemeshard: 72057594046678944 2025-05-07T09:03:52.102651Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710765:0, at schemeshard: 72057594046678944 2025-05-07T09:03:52.102696Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 281474976710765:0 ProgressState 2025-05-07T09:03:52.102820Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710765:0 progress is 1/1 2025-05-07T09:03:52.102855Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976710765 ready parts: 1/1 2025-05-07T09:03:52.102904Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710765:0 progress is 1/1 2025-05-07T09:03:52.102936Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976710765 ready parts: 1/1 2025-05-07T09:03:52.102978Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976710765, ready parts: 1/1, is published: true 2025-05-07T09:03:52.103054Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [4:124:2150] message: TxId: 281474976710765 2025-05-07T09:03:52.103101Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976710765 ready parts: 1/1 2025-05-07T09:03:52.103136Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976710765:0 2025-05-07T09:03:52.103164Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976710765:0 2025-05-07T09:03:52.103279Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 3 2025-05-07T09:03:52.104808Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6706: Handle: TEvNotifyTxCompletionResult: txId# 281474976710765 2025-05-07T09:03:52.104869Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6708: Message: TxId: 281474976710765 2025-05-07T09:03:52.106576Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-05-07T09:03:52.106627Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [4:842:2769] TestWaitNotification: OK eventTxId 104 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::EncryptedExport [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:03:46.200019Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:03:46.200129Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:03:46.200192Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:03:46.200231Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:03:46.201460Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:03:46.201531Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:03:46.201612Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:03:46.201685Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:03:46.202431Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:03:46.205703Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:03:46.276506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:03:46.276565Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:46.292852Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:03:46.293087Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:03:46.293253Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:03:46.299091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:03:46.299385Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:03:46.303765Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:46.303991Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:03:46.311902Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:46.321290Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:03:46.321364Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:46.321439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:03:46.321506Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:03:46.321584Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:03:46.322517Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.327872Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:03:46.441999Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:03:46.442220Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.442455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:03:46.442729Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:03:46.442800Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.445180Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:46.445313Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:03:46.445511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.445589Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:03:46.445640Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:03:46.445676Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:03:46.447695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.447761Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:03:46.447814Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:03:46.449575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.449636Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.449689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:46.449749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:03:46.453436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:03:46.455570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:03:46.455781Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:03:46.456857Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:46.456991Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:03:46.457040Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:46.457346Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:03:46.457414Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:46.457605Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:03:46.457709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:03:46.459865Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:03:46.459934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:03:46.460125Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:46.460197Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 9:03:52.204385Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 281474976710763 2025-05-07T09:03:52.204413Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710763, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 11 2025-05-07T09:03:52.204443Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-05-07T09:03:52.204530Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976710763, ready parts: 0/1, is published: true 2025-05-07T09:03:52.205611Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-05-07T09:03:52.205950Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710763, at schemeshard: 72057594046678944 2025-05-07T09:03:52.206029Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976710763, ready parts: 0/1, is published: true 2025-05-07T09:03:52.206074Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710763, at schemeshard: 72057594046678944 2025-05-07T09:03:52.207416Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 281474976710763:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:281474976710763 msg type: 269090816 2025-05-07T09:03:52.207529Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 281474976710763, partId: 4294967295, tablet: 72057594046316545 2025-05-07T09:03:52.207707Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710763 FAKE_COORDINATOR: Add transaction: 281474976710763 at step: 5000010 FAKE_COORDINATOR: advance: minStep5000010 State->FrontStep: 5000009 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710763 at step: 5000010 2025-05-07T09:03:52.208072Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000010, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:52.208199Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710763 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 17179871340 } } Step: 5000010 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:03:52.208243Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_rmdir.cpp:129: TRmDir HandleReply TEvOperationPlan, opId: 281474976710763:0, step: 5000010, at schemeshard: 72057594046678944 2025-05-07T09:03:52.208389Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_rmdir.cpp:180: RmDir is done, opId: 281474976710763:0, at schemeshard: 72057594046678944 2025-05-07T09:03:52.208457Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710763:0 progress is 1/1 2025-05-07T09:03:52.208497Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976710763 ready parts: 1/1 2025-05-07T09:03:52.208558Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710763:0 progress is 1/1 2025-05-07T09:03:52.208597Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976710763 ready parts: 1/1 2025-05-07T09:03:52.208662Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-05-07T09:03:52.208738Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-05-07T09:03:52.208780Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976710763, ready parts: 1/1, is published: false 2025-05-07T09:03:52.208838Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976710763 ready parts: 1/1 2025-05-07T09:03:52.208884Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976710763:0 2025-05-07T09:03:52.208924Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976710763:0 2025-05-07T09:03:52.208984Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-05-07T09:03:52.209024Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 281474976710763, publications: 2, subscribers: 1 2025-05-07T09:03:52.209065Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 281474976710763, [OwnerId: 72057594046678944, LocalPathId: 1], 13 2025-05-07T09:03:52.209108Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 281474976710763, [OwnerId: 72057594046678944, LocalPathId: 4], 18446744073709551615 2025-05-07T09:03:52.210179Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710763 2025-05-07T09:03:52.211679Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:03:52.211719Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710763, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:03:52.211864Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710763, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-05-07T09:03:52.211981Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:52.212028Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:204:2206], at schemeshard: 72057594046678944, txId: 281474976710763, path id: 1 2025-05-07T09:03:52.212066Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:204:2206], at schemeshard: 72057594046678944, txId: 281474976710763, path id: 4 FAKE_COORDINATOR: Erasing txId 281474976710763 2025-05-07T09:03:52.212964Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 13 PathOwnerId: 72057594046678944, cookie: 281474976710763 2025-05-07T09:03:52.213049Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 13 PathOwnerId: 72057594046678944, cookie: 281474976710763 2025-05-07T09:03:52.213105Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 281474976710763 2025-05-07T09:03:52.213166Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710763, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 13 2025-05-07T09:03:52.213211Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-05-07T09:03:52.214022Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710763 2025-05-07T09:03:52.214110Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710763 2025-05-07T09:03:52.214152Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 281474976710763 2025-05-07T09:03:52.214187Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710763, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-05-07T09:03:52.214228Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-05-07T09:03:52.214302Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 281474976710763, subscribers: 1 2025-05-07T09:03:52.214352Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [4:124:2150] 2025-05-07T09:03:52.216890Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710763 2025-05-07T09:03:52.217234Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710763 2025-05-07T09:03:52.217317Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6706: Handle: TEvNotifyTxCompletionResult: txId# 281474976710763 2025-05-07T09:03:52.217362Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6708: Message: TxId: 281474976710763 2025-05-07T09:03:52.217401Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-05-07T09:03:52.217431Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:1239: TExport::TTxProgress: OnNotifyResult: txId# 281474976710763 2025-05-07T09:03:52.217474Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:1270: TExport::TTxProgress: OnNotifyResult: txId# 281474976710763, id# 103, itemIdx# 4294967295 2025-05-07T09:03:52.218876Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-05-07T09:03:52.218959Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-05-07T09:03:52.219012Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [4:1137:3015] TestWaitNotification: OK eventTxId 103 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_cluster_discovery/ut/unittest >> TPQCDTest::TestPrioritizeLocalDatacenter [GOOD] Test command err: 2025-05-07T09:03:47.772553Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626932634855825:2141];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:47.772823Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003d71/r3tmp/tmpIQ9Gd2/pdisk_1.dat 2025-05-07T09:03:48.150491Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:48.217709Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:48.217818Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:03:48.220891Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21001, node 1 2025-05-07T09:03:48.367856Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/zvgn/003d71/r3tmp/yandexYiKyGi.tmp 2025-05-07T09:03:48.367916Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/zvgn/003d71/r3tmp/yandexYiKyGi.tmp 2025-05-07T09:03:48.368118Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/zvgn/003d71/r3tmp/yandexYiKyGi.tmp 2025-05-07T09:03:48.368269Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12103 PQClient connected to localhost:21001 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:03:48.826291Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... waiting... 2025-05-07T09:03:50.701557Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626945519758343:2333], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:50.701729Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:50.703022Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626945519758370:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:50.709408Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480 2025-05-07T09:03:50.713331Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626945519758404:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:50.713416Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:50.729234Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501626945519758372:2337], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-05-07T09:03:50.972736Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501626945519758429:2381] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:03:51.069451Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7501626945519758448:2344], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T09:03:51.070754Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=1&id=N2I3OTQyMmUtNzhiMzU1YjUtMTQ3YzNhMzktM2NmMmU5NGM=, ActorId: [1:7501626945519758340:2331], ActorState: ExecuteState, TraceId: 01jtmzsax1e84ydep1zqbja3p2, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T09:03:51.072872Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T09:03:51.159092Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:03:51.277348Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:03:51.361956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-05-07T09:03:51.888500Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710666. Ctx: { TraceId: 01jtmzsbmd22c5vs5xam40jb9d, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDVlOWNjNDItOTY0YTQ4Yy04YWVmYjgyZS02NzcxM2I1ZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::Changefeeds [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:03:46.200009Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:03:46.200119Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:03:46.200151Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:03:46.200180Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:03:46.201455Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:03:46.201507Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:03:46.201582Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:03:46.201655Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:03:46.202331Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:03:46.205613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:03:46.274731Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:03:46.274802Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:46.290252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:03:46.290419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:03:46.290575Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:03:46.297733Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:03:46.298145Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:03:46.303794Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:46.304068Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:03:46.311992Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:46.321051Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:03:46.321110Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:46.321174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:03:46.321234Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:03:46.321297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:03:46.322501Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.327703Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:03:46.434316Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:03:46.434520Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.434720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:03:46.434979Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:03:46.435030Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.436890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:46.436995Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:03:46.437133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.437183Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:03:46.437215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:03:46.437247Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:03:46.438841Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.438883Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:03:46.438914Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:03:46.440530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.440580Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.440626Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:46.440714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:03:46.444746Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:03:46.446667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:03:46.446872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:03:46.447816Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:46.447952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:03:46.447997Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:46.448201Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:03:46.448240Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:46.448359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:03:46.448412Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:03:46.450034Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:03:46.450073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:03:46.450203Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:46.450249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 09:03:52.335172Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 281474976710761 2025-05-07T09:03:52.335201Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710761, pathId: [OwnerId: 72057594046678944, LocalPathId: 9], version: 7 2025-05-07T09:03:52.335229Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 9] was 3 2025-05-07T09:03:52.335296Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976710761, ready parts: 0/1, is published: true 2025-05-07T09:03:52.337747Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-05-07T09:03:52.337886Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710761, at schemeshard: 72057594046678944 2025-05-07T09:03:52.337920Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976710761, ready parts: 0/1, is published: true 2025-05-07T09:03:52.337991Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710761, at schemeshard: 72057594046678944 2025-05-07T09:03:52.338592Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 281474976710761:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:281474976710761 msg type: 269090816 2025-05-07T09:03:52.338698Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 281474976710761, partId: 4294967295, tablet: 72057594046316545 2025-05-07T09:03:52.338831Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 FAKE_COORDINATOR: Add transaction: 281474976710761 at step: 5000010 FAKE_COORDINATOR: advance: minStep5000010 State->FrontStep: 5000009 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710761 at step: 5000010 2025-05-07T09:03:52.339148Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000010, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:52.339231Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710761 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 17179871340 } } Step: 5000010 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:03:52.339274Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_rmdir.cpp:129: TRmDir HandleReply TEvOperationPlan, opId: 281474976710761:0, step: 5000010, at schemeshard: 72057594046678944 2025-05-07T09:03:52.339391Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_rmdir.cpp:180: RmDir is done, opId: 281474976710761:0, at schemeshard: 72057594046678944 2025-05-07T09:03:52.339449Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710761:0 progress is 1/1 2025-05-07T09:03:52.339488Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-05-07T09:03:52.339534Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710761:0 progress is 1/1 2025-05-07T09:03:52.339566Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-05-07T09:03:52.339626Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T09:03:52.339685Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 9] was 2 2025-05-07T09:03:52.339733Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976710761, ready parts: 1/1, is published: false 2025-05-07T09:03:52.339774Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-05-07T09:03:52.339803Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976710761:0 2025-05-07T09:03:52.339834Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976710761:0 2025-05-07T09:03:52.339908Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 9] was 3 2025-05-07T09:03:52.339943Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 281474976710761, publications: 2, subscribers: 1 2025-05-07T09:03:52.339976Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 281474976710761, [OwnerId: 72057594046678944, LocalPathId: 1], 12 2025-05-07T09:03:52.340018Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 281474976710761, [OwnerId: 72057594046678944, LocalPathId: 9], 18446744073709551615 2025-05-07T09:03:52.341014Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2025-05-07T09:03:52.342282Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:03:52.342314Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710761, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:03:52.342430Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710761, path id: [OwnerId: 72057594046678944, LocalPathId: 9] 2025-05-07T09:03:52.342516Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:52.342541Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:204:2206], at schemeshard: 72057594046678944, txId: 281474976710761, path id: 1 2025-05-07T09:03:52.342578Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:204:2206], at schemeshard: 72057594046678944, txId: 281474976710761, path id: 9 FAKE_COORDINATOR: Erasing txId 281474976710761 2025-05-07T09:03:52.343195Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 12 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-05-07T09:03:52.343261Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 12 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-05-07T09:03:52.343301Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 281474976710761 2025-05-07T09:03:52.343344Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710761, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 12 2025-05-07T09:03:52.343410Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-05-07T09:03:52.343812Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 9 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-05-07T09:03:52.343877Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 9 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-05-07T09:03:52.343907Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 281474976710761 2025-05-07T09:03:52.343938Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710761, pathId: [OwnerId: 72057594046678944, LocalPathId: 9], version: 18446744073709551615 2025-05-07T09:03:52.343960Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 9] was 2 2025-05-07T09:03:52.344032Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 281474976710761, subscribers: 1 2025-05-07T09:03:52.344070Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [4:124:2150] 2025-05-07T09:03:52.346367Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2025-05-07T09:03:52.346686Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2025-05-07T09:03:52.346762Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6706: Handle: TEvNotifyTxCompletionResult: txId# 281474976710761 2025-05-07T09:03:52.346809Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6708: Message: TxId: 281474976710761 2025-05-07T09:03:52.346865Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-05-07T09:03:52.346895Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:1239: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761 2025-05-07T09:03:52.346921Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:1270: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761, id# 105, itemIdx# 4294967295 2025-05-07T09:03:52.348344Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-05-07T09:03:52.348408Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-05-07T09:03:52.348459Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [4:1378:3168] TestWaitNotification: OK eventTxId 105 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_cluster_discovery/ut/unittest >> TPQCDTest::TestCloudClientsAreConsistentlyDistributed [GOOD] Test command err: 2025-05-07T09:03:47.755925Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626933635812620:2067];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:47.756030Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003da8/r3tmp/tmpQyIkrA/pdisk_1.dat 2025-05-07T09:03:48.157953Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:48.175440Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:48.176232Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:03:48.180663Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8302, node 1 2025-05-07T09:03:48.365924Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/zvgn/003da8/r3tmp/yandextVlHEi.tmp 2025-05-07T09:03:48.365956Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/zvgn/003da8/r3tmp/yandextVlHEi.tmp 2025-05-07T09:03:48.368401Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/zvgn/003da8/r3tmp/yandextVlHEi.tmp 2025-05-07T09:03:48.368572Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27861 PQClient connected to localhost:8302 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:03:48.831277Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... waiting... 2025-05-07T09:03:50.633891Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626946520715231:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:50.633890Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626946520715223:2333], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:50.634117Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:50.637087Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626946520715240:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:50.637186Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:50.639717Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480 2025-05-07T09:03:50.654925Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501626946520715238:2338], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-05-07T09:03:50.735242Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501626946520715293:2381] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:03:51.062970Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7501626946520715310:2343], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T09:03:51.069857Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=1&id=MmU0YTMzOWQtOGEyNDg3MzMtYWMwYTQ4MjktYjljYWE0OA==, ActorId: [1:7501626946520715220:2331], ActorState: ExecuteState, TraceId: 01jtmzsatmabzt05vwxs8wtchd, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T09:03:51.073149Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T09:03:51.159081Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:03:51.272587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:03:51.373432Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-05-07T09:03:51.888521Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710666. Ctx: { TraceId: 01jtmzsbmrc1ymaamhhdmajjdw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmNjYTUxNDUtZDM5ODVmZWYtYWViNGQ5YWUtMzM0ODc5ZTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_cluster_discovery/ut/unittest >> TPQCDTest::TestRelatedServicesAreRunning [GOOD] Test command err: 2025-05-07T09:03:47.755935Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626932725971738:2067];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:47.756031Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003d1c/r3tmp/tmpSK2j8T/pdisk_1.dat 2025-05-07T09:03:48.163211Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:48.195949Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:48.196082Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:03:48.197914Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26681, node 1 2025-05-07T09:03:48.370664Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/zvgn/003d1c/r3tmp/yandexcGbaBp.tmp 2025-05-07T09:03:48.370688Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/zvgn/003d1c/r3tmp/yandexcGbaBp.tmp 2025-05-07T09:03:48.370851Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/zvgn/003d1c/r3tmp/yandexcGbaBp.tmp 2025-05-07T09:03:48.370987Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16323 PQClient connected to localhost:26681 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:03:48.826485Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... waiting... 2025-05-07T09:03:50.892893Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626945610874318:2331], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:50.893094Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:50.893182Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626945610874354:2335], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:50.898279Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480 2025-05-07T09:03:50.917320Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501626945610874356:2336], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-05-07T09:03:51.133233Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501626945610874421:2384] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:03:51.161912Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:03:51.279968Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:03:51.283688Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7501626949905841725:2342], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T09:03:51.283934Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=1&id=MzE0MmFlZGYtZTIyNWQ2ZDAtN2ViNjRjOGItZDE5NDYyZTc=, ActorId: [1:7501626945610874314:2328], ActorState: ExecuteState, TraceId: 01jtmzsb2s4a4brc7hf4mvxad4, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T09:03:51.286130Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T09:03:51.368482Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-05-07T09:03:51.888500Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710666. Ctx: { TraceId: 01jtmzsbme9as364r5b1h5ynf9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODAyYjQ4YjktNDVhYmQxMTQtNjcyZTNlYmYtNWZlM2ZkYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> ColumnShardTiers::DSConfigsStub >> S3SettingsConversion::StyleDeduction [GOOD] |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> S3SettingsConversion::FoldersStyleDeduction [GOOD] >> S3SettingsConversion::Basic [GOOD] |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/proxy_service/ut/unittest >> KqpProxy::DatabasesCacheForServerless [GOOD] Test command err: 2025-05-07T09:03:29.264046Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626858675004509:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:29.264180Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T09:03:29.413522Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501626857570667051:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:29.413578Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T09:03:29.416479Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7501626857552704139:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:29.416913Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T09:03:29.421509Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501626856205342707:2141];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:29.422986Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7501626858653344267:2071];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:29.423228Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T09:03:29.427807Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003142/r3tmp/tmpYxkiGv/pdisk_1.dat 2025-05-07T09:03:30.295192Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:30.295259Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:03:30.297123Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T09:03:30.307822Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:03:30.409648Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:30.409676Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2175} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.101398s 2025-05-07T09:03:30.409723Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:03:30.409767Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:666} StateWork event processing took too much time Type# 2146435078 Duration# 0.101464s 2025-05-07T09:03:30.412055Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 5 Cookie 5 2025-05-07T09:03:30.412932Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:03:30.413406Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:30.413484Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:03:30.415472Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-05-07T09:03:30.416332Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:03:30.417794Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:30.417880Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:03:30.419900Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 4 Cookie 4 2025-05-07T09:03:30.420785Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:03:30.444252Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:30.466298Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:30.466378Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:03:30.468896Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-05-07T09:03:30.469435Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:16968 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-05-07T09:03:30.812834Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:03:31.856522Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1665: Updated YQL logs priority to current level: 4 2025-05-07T09:03:31.858231Z node 4 :KQP_PROXY INFO: kqp_proxy_service.cpp:442: Cannot start publishing usage, tenants: /dc-1, empty 2025-05-07T09:03:31.865713Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:512: Subscribed for config changes. 2025-05-07T09:03:31.865748Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:519: Updated table service config. 2025-05-07T09:03:31.865772Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1665: Updated YQL logs priority to current level: 4 2025-05-07T09:03:31.865821Z node 4 :KQP_PROXY INFO: kqp_proxy_service.cpp:442: Cannot start publishing usage, tenants: /dc-1, empty 2025-05-07T09:03:31.866048Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T09:03:31.866075Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T09:03:31.866137Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T09:03:31.866520Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T09:03:31.867601Z node 4 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_executions updater. Describe result: PathErrorUnknown 2025-05-07T09:03:31.867616Z node 4 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_executions updater. Creating table 2025-05-07T09:03:31.867622Z node 4 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_execution_leases updater. Describe result: PathErrorUnknown 2025-05-07T09:03:31.867629Z node 4 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_execution_leases updater. Creating table 2025-05-07T09:03:31.867640Z node 4 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_executions updater. Full table path:/dc-1/.metadata/script_executions 2025-05-07T09:03:31.867642Z node 4 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_execution_leases updater. Full table path:/dc-1/.metadata/script_execution_leases 2025-05-07T09:03:31.868168Z node 4 :KQP_PROXY DEBUG: table_creator.cpp:147: Table result_sets updater. Describe result: PathErrorUnknown 2025-05-07T09:03:31.868179Z node 4 :KQP_PROXY NOTICE: table_creator.cpp:167: Table result_sets updater. Creating table 2025-05-07T09:03:31.868195Z node 4 :KQP_PROXY DEBUG: table_creator.cpp:100: Table result_sets updater. Full table path:/dc-1/.metadata/result_sets 2025-05-07T09:03:31.881819Z node 5 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1665: Updated YQL logs priority to current level: 4 2025-05-07T09:03:31.883123Z node 5 :KQP_PROXY INFO: kqp_proxy_service.cpp:442: Cannot start publishing usage, tenants: /dc-1, empty 2025-05-07T09:03:31.883701Z node 5 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:512: Subscribed for config changes. 2025-05-07T09:03:31.883729Z node 5 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:519: Updated table service config. 2025-05-07T09:03:31.883742Z node 5 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1665: Updated YQL logs priority to current level: 4 2025-05-07T09:03:31.883762Z node 5 :KQP_PROXY INFO: kqp_proxy_service.cpp:442: Cannot start publishing usage, tenants: /dc-1, empty 2025-05-07T09:03:31.883813Z node 5 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T09:03:31.883839Z node 5 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T09:03:31.883939Z node 5 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T09:03:31.883958Z node 5 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-05-07T09:03:31.885098Z node 5 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_executions updater. Describe result: PathErrorUnknown 2025-05-07T09:03:31.885103Z node 5 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_execution_leases updater. Describe result: PathErrorUnknown 2025-05-07T09:03:31.885108Z node 5 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_execution_leases updater. Creating table 2025-05-07T09:03:31.885113Z node 5 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_executions updater. Creati ... et::Execute CreateTablet Postponed 2025-05-07T09:03:40.639232Z node 8 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T09:03:40.639337Z node 8 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T09:03:40.639425Z node 8 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T09:03:40.639487Z node 8 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T09:03:40.639552Z node 8 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T09:03:40.762925Z node 8 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:40.772773Z node 8 :STATISTICS WARN: tx_init.cpp:287: [72075186224037894] TTxInit::Complete. EnableColumnStatistics=false 2025-05-07T09:03:40.885604Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-05-07T09:03:40.910243Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7501626903395075305:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:40.910351Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/test-shared/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T09:03:40.948244Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:40.948347Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:03:40.963377Z node 6 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 7 Cookie 7 2025-05-07T09:03:40.964445Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:03:41.062335Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72075186224038889 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:41.062461Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72075186224038889 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:03:41.066504Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72075186224038889 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:03:41.081313Z node 7 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T09:03:41.081551Z node 7 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T09:03:41.081638Z node 7 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T09:03:41.081747Z node 7 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T09:03:41.081850Z node 7 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T09:03:41.081934Z node 7 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T09:03:41.082050Z node 7 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T09:03:41.082131Z node 7 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T09:03:41.082251Z node 7 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-05-07T09:03:41.292188Z node 7 :STATISTICS WARN: tx_init.cpp:287: [72075186224038895] TTxInit::Complete. EnableColumnStatistics=false 2025-05-07T09:03:41.293211Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:41.430288Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-05-07T09:03:41.531711Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:41.647847Z node 7 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:574: [WorkloadService] [TDatabaseFetcherActor] ActorId: [7:7501626907690043560:2543], Database: /Root/test-serverless, Start database fetching 2025-05-07T09:03:41.648093Z node 7 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:600: [WorkloadService] [TDatabaseFetcherActor] ActorId: [7:7501626907690043560:2543], Database: /Root/test-serverless, Database info successfully fetched, serverless: 1 2025-05-07T09:03:42.511976Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[6:7501626891202820723:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:42.512047Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:03:43.792923Z node 8 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:440: [WorkloadService] [Service] Started workload service initialization 2025-05-07T09:03:43.793235Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:241: [WorkloadService] [TCleanupTablesActor] ActorId: [8:7501626914724748987:2345], Start check tables existence, number paths: 2 2025-05-07T09:03:43.793424Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-05-07T09:03:43.793464Z node 8 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:111: [WorkloadService] [Service] Resource pools was enanbled 2025-05-07T09:03:43.793650Z node 8 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:125: [WorkloadService] [Service] Updated node info, noode count: 3 2025-05-07T09:03:43.794920Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [8:7501626914724748987:2345], Describe table /Root/test-dedicated/.metadata/workload_manager/delayed_requests status PathErrorUnknown 2025-05-07T09:03:43.795026Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [8:7501626914724748987:2345], Describe table /Root/test-dedicated/.metadata/workload_manager/running_requests status PathErrorUnknown 2025-05-07T09:03:43.795072Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:289: [WorkloadService] [TCleanupTablesActor] ActorId: [8:7501626914724748987:2345], Successfully finished 2025-05-07T09:03:43.795150Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-05-07T09:03:44.163505Z node 7 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:440: [WorkloadService] [Service] Started workload service initialization 2025-05-07T09:03:44.163985Z node 7 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:241: [WorkloadService] [TCleanupTablesActor] ActorId: [7:7501626920574945540:2371], Start check tables existence, number paths: 2 2025-05-07T09:03:44.164076Z node 7 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-05-07T09:03:44.164094Z node 7 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:111: [WorkloadService] [Service] Resource pools was enanbled 2025-05-07T09:03:44.164116Z node 7 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:125: [WorkloadService] [Service] Updated node info, noode count: 3 2025-05-07T09:03:44.165585Z node 7 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [7:7501626920574945540:2371], Describe table /Root/test-shared/.metadata/workload_manager/delayed_requests status PathErrorUnknown 2025-05-07T09:03:44.165679Z node 7 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [7:7501626920574945540:2371], Describe table /Root/test-shared/.metadata/workload_manager/running_requests status PathErrorUnknown 2025-05-07T09:03:44.165742Z node 7 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:289: [WorkloadService] [TCleanupTablesActor] ActorId: [7:7501626920574945540:2371], Successfully finished 2025-05-07T09:03:44.165818Z node 7 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-05-07T09:03:45.449851Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[8:7501626901839846331:2071];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:45.449929Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/test-dedicated/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:03:45.910685Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7501626903395075305:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:45.910763Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/test-shared/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:03:51.662405Z node 6 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 8 2025-05-07T09:03:51.664127Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-05-07T09:03:51.664430Z node 6 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 7 2025-05-07T09:03:51.664634Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-05-07T09:03:51.679541Z node 6 :KQP_SESSION INFO: kqp_session_actor.cpp:2315: SessionId: ydb://session/3?node_id=6&id=MmRiZDhkMGUtMmRlNGZmZDMtMzM2YWM1Y2ItZjcxY2RjYzQ=, ActorId: [6:7501626904087723433:2333], ActorState: ReadyState, Session closed due to explicit close event 2025-05-07T09:03:51.679593Z node 6 :KQP_SESSION INFO: kqp_session_actor.cpp:2473: SessionId: ydb://session/3?node_id=6&id=MmRiZDhkMGUtMmRlNGZmZDMtMzM2YWM1Y2ItZjcxY2RjYzQ=, ActorId: [6:7501626904087723433:2333], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-05-07T09:03:51.679624Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2534: SessionId: ydb://session/3?node_id=6&id=MmRiZDhkMGUtMmRlNGZmZDMtMzM2YWM1Y2ItZjcxY2RjYzQ=, ActorId: [6:7501626904087723433:2333], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-05-07T09:03:51.679663Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2546: SessionId: ydb://session/3?node_id=6&id=MmRiZDhkMGUtMmRlNGZmZDMtMzM2YWM1Y2ItZjcxY2RjYzQ=, ActorId: [6:7501626904087723433:2333], ActorState: unknown state, Cleanup temp tables: 0 2025-05-07T09:03:51.679741Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2637: SessionId: ydb://session/3?node_id=6&id=MmRiZDhkMGUtMmRlNGZmZDMtMzM2YWM1Y2ItZjcxY2RjYzQ=, ActorId: [6:7501626904087723433:2333], ActorState: unknown state, Session actor destroyed >> ColumnShardTiers::DSConfigsWithQueryServiceDdl >> TExportToS3Tests::AuditCancelledExport [GOOD] |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> S3SettingsConversion::StyleDeduction [GOOD] |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> S3SettingsConversion::FoldersStyleDeduction [GOOD] >> TExportToS3Tests::AutoDropping |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> S3SettingsConversion::Basic [GOOD] >> ColumnShardTiers::DSConfigs |92.6%| [TA] $(B)/ydb/core/kqp/proxy_service/ut/test-results/unittest/{meta.json ... results_accumulator.log} |92.6%| [TA] {RESULT} $(B)/ydb/core/kqp/proxy_service/ut/test-results/unittest/{meta.json ... results_accumulator.log} |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> TPQCDTest::TestUnavailableWithoutNetClassifier [GOOD] |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> ColumnShardTiers::TTLUsage |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |92.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> TExportToS3Tests::AutoDropping [GOOD] >> ColumnShardTiers::TieringUsage |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> S3SettingsConversion::FoldersStrictStyle [GOOD] >> S3SettingsConversion::Port [GOOD] |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> TExportToS3Tests::CancelUponTransferringMultiShardTableShouldSucceed [GOOD] |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> S3SettingsConversion::FoldersStrictStyle [GOOD] |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_cluster_discovery/ut/unittest >> TPQCDTest::TestUnavailableWithoutNetClassifier [GOOD] Test command err: 2025-05-07T09:03:47.773658Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626933028354282:2065];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:47.773881Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003d0a/r3tmp/tmpK8ZPEk/pdisk_1.dat 2025-05-07T09:03:48.173780Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:48.177286Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:48.177385Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:03:48.183621Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29560, node 1 2025-05-07T09:03:48.371962Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:03:48.371986Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:03:48.371995Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:03:48.372116Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8322 PQClient connected to localhost:29560 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:03:48.841317Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... waiting... 2025-05-07T09:03:50.569529Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626945913256902:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:50.569530Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626945913256877:2333], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:50.569674Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:50.574411Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480 2025-05-07T09:03:50.594168Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501626945913256906:2337], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-05-07T09:03:50.672410Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501626945913256971:2383] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:03:51.063504Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7501626945913256986:2343], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T09:03:51.078646Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=1&id=N2FjMjUwMTYtMzc1MmUzNTMtZTA2NjFkZDMtNDc3ZjAwYmU=, ActorId: [1:7501626945913256874:2331], ActorState: ExecuteState, TraceId: 01jtmzsarva2641c76yja73jym, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T09:03:51.095594Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T09:03:51.159546Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:03:51.292521Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:03:51.389109Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-05-07T09:03:51.888524Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710666. Ctx: { TraceId: 01jtmzsbnf2zpab00g614cqb6n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTc3ODNmY2ItMWZhY2UwOTgtNTE0MGVmMDItZjZiZjM0Zjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:03:52.773608Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501626933028354282:2065];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:52.773713Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> S3SettingsConversion::Port [GOOD] |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> TExportToS3Tests::CancelUponTransferringSingleTableShouldSucceed [GOOD] >> TExportToS3Tests::CancelUponTransferringManyTablesShouldSucceed >> BackupRestore::RestoreTableSplitBoundaries [GOOD] >> BackupRestore::ImportDataShouldHandleErrors |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |92.7%| [TA] $(B)/ydb/services/persqueue_cluster_discovery/ut/test-results/unittest/{meta.json ... results_accumulator.log} |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |92.7%| [TA] {RESULT} $(B)/ydb/services/persqueue_cluster_discovery/ut/test-results/unittest/{meta.json ... results_accumulator.log} |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::AutoDropping [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:127:2058] recipient: [1:109:2141] 2025-05-07T09:03:46.199958Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:03:46.200137Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:03:46.200196Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:03:46.200261Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:03:46.201451Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:03:46.201510Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:03:46.201564Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:03:46.201612Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:03:46.202216Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:03:46.205661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:03:46.290373Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:03:46.290425Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:46.302158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:03:46.302297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:03:46.302438Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:03:46.307159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:03:46.307413Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:03:46.307942Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:46.308066Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:03:46.311430Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:46.320973Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:03:46.321028Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:46.321096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:03:46.321134Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:03:46.321195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:03:46.322370Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.327666Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T09:03:46.424096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:03:46.424465Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.426912Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:03:46.428324Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:03:46.428387Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.431208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:46.431352Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:03:46.431513Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.431566Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:03:46.431599Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:03:46.431622Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:03:46.433328Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.433372Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:03:46.433399Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:03:46.434749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.434786Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.434819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:46.434855Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:03:46.438467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:03:46.440034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:03:46.441053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:03:46.441735Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:46.441861Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 134 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:03:46.441917Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:46.443103Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:03:46.443152Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:46.443336Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:03:46.443419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:03:46.445189Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:03:46.445231Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:03:46.445374Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:46.445416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... G: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [5:124:2150] 2025-05-07T09:03:55.870720Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2025-05-07T09:03:55.871083Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2025-05-07T09:03:55.871183Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6706: Handle: TEvNotifyTxCompletionResult: txId# 281474976710761 2025-05-07T09:03:55.871239Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6708: Message: TxId: 281474976710761 2025-05-07T09:03:55.871287Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-05-07T09:03:55.871316Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:1239: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761 2025-05-07T09:03:55.871368Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:1270: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761, id# 102, itemIdx# 4294967295 2025-05-07T09:03:55.873373Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-05-07T09:03:55.873460Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T09:03:55.873513Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [5:465:2426] TestWaitNotification: OK eventTxId 102 2025-05-07T09:03:55.874680Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:03:55.874874Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 249us result status StatusSuccess 2025-05-07T09:03:55.875374Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 11 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 11 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 9 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 desc: 1 2025-05-07T09:03:55.875988Z node 5 :EXPORT DEBUG: schemeshard_export__forget.cpp:79: TExport::TTxForget, dropping export tables, info: { Id: 102 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1] ExportPathId: [OwnerId: 72057594046678944, LocalPathId: 3] UserSID: '(empty maybe)' PeerName: '' State: Done WaitTxId: 281474976710761 Issue: '' Items: 1 PendingItems: 0 PendingDropItems: 0 } 2025-05-07T09:03:55.878582Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-05-07T09:03:55.878661Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:739: TExport::TTxProgress: Resume: id# 102 2025-05-07T09:03:55.878741Z node 5 :EXPORT INFO: schemeshard_export__create.cpp:537: TExport::TTxProgress: Allocate txId: info# { Id: 102 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1] ExportPathId: [OwnerId: 72057594046678944, LocalPathId: 3] UserSID: '(empty maybe)' PeerName: '' State: Dropping WaitTxId: 0 Issue: '' Items: 1 PendingItems: 0 PendingDropItems: 0 } 2025-05-07T09:03:55.878811Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-05-07T09:03:55.878912Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 102, at schemeshard: 72057594046678944 2025-05-07T09:03:55.878975Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-05-07T09:03:55.879038Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:859: TExport::TTxProgress: OnAllocateResult: txId# 281474976710762, id# 102 2025-05-07T09:03:55.879109Z node 5 :EXPORT INFO: schemeshard_export__create.cpp:529: TExport::TTxProgress: Drop propose: info# { Id: 102 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1] ExportPathId: [OwnerId: 72057594046678944, LocalPathId: 3] UserSID: '(empty maybe)' PeerName: '' State: Dropping WaitTxId: 0 Issue: '' Items: 1 PendingItems: 0 PendingDropItems: 0 }, txId# 281474976710762 2025-05-07T09:03:55.879225Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-05-07T09:03:55.881829Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpRmDir Drop { Name: "export-102" } Internal: true } TxId: 281474976710762 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:03:55.882001Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_rmdir.cpp:29: TRmDir Propose, path: /MyRoot/export-102, pathId: 0, opId: 281474976710762:0, at schemeshard: 72057594046678944 2025-05-07T09:03:55.882162Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976710762:1, propose status:StatusPathDoesNotExist, reason: Check failed: path: '/MyRoot/export-102', error: path has been deleted (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeDir, state: EPathStateNotExist), drop stepId: 5000007, drop txId: 281474976710761, at schemeshard: 72057594046678944 2025-05-07T09:03:55.884587Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 281474976710762, response: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/export-102\', error: path has been deleted (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeDir, state: EPathStateNotExist), drop stepId: 5000007, drop txId: 281474976710761" TxId: 281474976710762 SchemeshardId: 72057594046678944 PathId: 3 PathDropTxId: 281474976710761, at schemeshard: 72057594046678944 2025-05-07T09:03:55.884872Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710762, database: /MyRoot, subject: , status: StatusPathDoesNotExist, reason: Check failed: path: '/MyRoot/export-102', error: path has been deleted (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeDir, state: EPathStateNotExist), drop stepId: 5000007, drop txId: 281474976710761, operation: DROP DIRECTORY, path: /MyRoot/export-102 2025-05-07T09:03:55.885042Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6657: Handle: TEvModifySchemeTransactionResult: txId# 281474976710762, status# StatusPathDoesNotExist 2025-05-07T09:03:55.885162Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6659: Message: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/export-102\', error: path has been deleted (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeDir, state: EPathStateNotExist), drop stepId: 5000007, drop txId: 281474976710761" TxId: 281474976710762 SchemeshardId: 72057594046678944 PathId: 3 PathDropTxId: 281474976710761 2025-05-07T09:03:55.885232Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-05-07T09:03:55.885279Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:920: TExport::TTxProgress: OnModifyResult: txId# 281474976710762, status# StatusPathDoesNotExist 2025-05-07T09:03:55.885362Z node 5 :EXPORT TRACE: schemeshard_export__create.cpp:921: Message: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/export-102\', error: path has been deleted (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeDir, state: EPathStateNotExist), drop stepId: 5000007, drop txId: 281474976710761" TxId: 281474976710762 SchemeshardId: 72057594046678944 PathId: 3 PathDropTxId: 281474976710761 2025-05-07T09:03:55.885511Z node 5 :EXPORT INFO: schemeshard_export__create.cpp:1102: TExport::TTxProgress: Wait for completion: info# { Id: 102 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1] ExportPathId: [OwnerId: 72057594046678944, LocalPathId: 3] UserSID: '(empty maybe)' PeerName: '' State: Dropping WaitTxId: 281474976710761 Issue: '' Items: 1 PendingItems: 0 PendingDropItems: 0 }, itemIdx# 4294967295, txId# 281474976710761 2025-05-07T09:03:55.887488Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-05-07T09:03:55.887635Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710761, at schemeshard: 72057594046678944 2025-05-07T09:03:55.887775Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6706: Handle: TEvNotifyTxCompletionResult: txId# 281474976710761 2025-05-07T09:03:55.887841Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6708: Message: TxId: 281474976710761 2025-05-07T09:03:55.887908Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-05-07T09:03:55.887957Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:1239: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761 2025-05-07T09:03:55.888014Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:1270: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761, id# 102, itemIdx# 4294967295 2025-05-07T09:03:55.889860Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete TestWaitNotification wait txId: 102 2025-05-07T09:03:55.890121Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-05-07T09:03:55.890178Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-05-07T09:03:55.890639Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-05-07T09:03:55.890729Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T09:03:55.890769Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [5:683:2640] TestWaitNotification: OK eventTxId 102 |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeTableIndex [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeSequence |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> TopicService::OneConsumer_TheRangesOverlap [GOOD] >> TopicService::DifferentConsumers_TheRangesOverlap >> DemoTx::Scenario_2 [GOOD] >> TContinuousBackupTests::Basic >> TContinuousBackupTests::TakeIncrementalBackup >> DstCreator::GlobalConsistency >> DstCreator::NonExistentSrc >> DstCreator::WithIntermediateDir >> DstCreator::SameOwner >> DstCreator::ExistingDst >> DataShardVolatile::DistributedWriteThenImmediateUpsert >> DataShardVolatile::DistributedWrite >> DstCreator::ReplicationModeMismatch >> DstCreator::ColumnsSizeMismatch >> DstCreator::Basic >> DstCreator::WithSyncIndex >> TExportToS3Tests::CancelUponTransferringManyTablesShouldSucceed [GOOD] >> DemoTx::Scenario_3 |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_external_blobs/unittest |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_external_blobs/unittest |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_external_blobs/unittest |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_external_blobs/unittest |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_external_blobs/unittest >> TExportToS3Tests::CancelledExportEndTime >> BackupRestore::ImportDataShouldHandleErrors [GOOD] >> BackupRestore::BackupUuid >> ExternalBlobsMultipleChannels::WithNewColumnFamilyAndCompaction >> ExternalBlobsMultipleChannels::ExtBlobsMultipleColumns >> ExternalBlobsMultipleChannels::WithCompaction >> TSchemeShardSubDomainTest::Delete >> ExternalBlobsMultipleChannels::Simple >> TSchemeShardSubDomainTest::SimultaneousDeclareAndCreateTable >> ExternalBlobsMultipleChannels::SingleChannel >> TSchemeShardSubDomainTest::CreateSubDomainsInSeparateDir >> TSchemeShardSubDomainTest::SchemeLimitsRejects >> TPersQueueTest::TopicServiceCommitOffset [GOOD] >> TPersQueueTest::TopicServiceCommitOffsetBadOffsets |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/oom/py3test >> overlapping_portions.py::TestOverlappingPortions::test [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeSequence [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeReplication >> TPersQueueCommonTest::Auth_MultipleUpdateTokenRequestIterationsWithValidToken_GotUpdateTokenResponseForEachRequest [GOOD] >> TPersQueueCommonTest::Auth_WriteSessionWithValidTokenAndACEAndThenRemoveACEAndSendWriteRequest_SessionClosedWithUnauthorizedErrorAfterSuccessfullWriteResponse >> TExportToS3Tests::CancelledExportEndTime [GOOD] >> TCdcStreamTests::VirtualTimestamps >> TPersQueueTest::CloseActiveWriteSessionOnClusterDisable [GOOD] >> TContinuousBackupTests::TakeIncrementalBackup [GOOD] >> TPersQueueTest::Cache >> TContinuousBackupTests::Basic [GOOD] >> TSchemeShardSubDomainTest::SetSchemeLimits >> TSchemeShardSubDomainTest::SchemeDatabaseQuotaRejects >> TSchemeShardSubDomainTest::Create >> TSchemeShardSubDomainTest::SimultaneousCreateForceDropTwice >> TSchemeShardSubDomainTest::CreateItemsInsideSubdomain >> TSchemeShardSubDomainTest::SimultaneousDeclareAndCreateTable [GOOD] >> TSchemeShardSubDomainTest::Delete [GOOD] >> TSchemeShardSubDomainTest::CreateSubDomainsInSeparateDir [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_continuous_backup/unittest >> TContinuousBackupTests::TakeIncrementalBackup [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:01.573109Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:01.573219Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:01.573259Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:01.573320Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:01.574253Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:01.574311Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:01.574399Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:01.574478Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:01.575210Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:01.577118Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:01.669472Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:01.669535Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:01.691604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:01.691856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:01.692032Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:01.700431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:01.700763Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:01.707774Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:01.708083Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:01.715969Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:01.725483Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:01.725572Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:01.725656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:01.725704Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:01.725789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:01.726674Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:01.733302Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:01.865264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:01.874469Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:01.888276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:01.900513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:01.900616Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:01.915077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:01.915216Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:01.915395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:01.915457Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:01.915509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:01.915541Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:01.918227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:01.918290Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:01.918329Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:01.920322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:01.920387Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:01.920433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:01.920501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:01.926480Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:01.928711Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:01.935816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:01.937071Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:01.937249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:01.937300Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:01.943936Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:01.944051Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:01.944275Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:01.944382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:01.946925Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:01.947006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:01.947197Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:01.947262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... rd__operation.cpp:1629: TOperation IsReadyToDone TxId: 103 ready parts: 4/4 2025-05-07T09:04:03.062399Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:1 progress is 4/4 2025-05-07T09:04:03.062439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 103 ready parts: 4/4 2025-05-07T09:04:03.062487Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 103, ready parts: 4/4, is published: true 2025-05-07T09:04:03.062561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:334:2313] message: TxId: 103 2025-05-07T09:04:03.062608Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 103 ready parts: 4/4 2025-05-07T09:04:03.062650Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 103:0 2025-05-07T09:04:03.062681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 103:0 2025-05-07T09:04:03.062765Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-05-07T09:04:03.062800Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 103:1 2025-05-07T09:04:03.062818Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 103:1 2025-05-07T09:04:03.062880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T09:04:03.062907Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 103:2 2025-05-07T09:04:03.062925Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 103:2 2025-05-07T09:04:03.062982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 3 2025-05-07T09:04:03.063006Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 103:3 2025-05-07T09:04:03.063023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 103:3 2025-05-07T09:04:03.063109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-05-07T09:04:03.065550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-05-07T09:04:03.065610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:726:2630] TestWaitNotification: OK eventTxId 103 2025-05-07T09:04:03.066261Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/IncrBackupImpl" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-05-07T09:04:03.066526Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/IncrBackupImpl" took 288us result status StatusSuccess 2025-05-07T09:04:03.066999Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/IncrBackupImpl" PathDescription { Self { Name: "IncrBackupImpl" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 103 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "IncrBackupImpl" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "__ydb_incrBackupImpl_deleted" Type: "Bool" TypeId: 6 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 } UserAttributes { Key: "__incremental_backup" Value: "{}" } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:03.067622Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/continuousBackupImpl/streamImpl" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-05-07T09:04:03.067818Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/continuousBackupImpl/streamImpl" took 211us result status StatusSuccess 2025-05-07T09:04:03.068270Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/continuousBackupImpl/streamImpl" PathDescription { Self { Name: "streamImpl" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeStreamImpl Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 2 } ChildrenExist: false BalancerTabletID: 72075186233409548 } PersQueueGroup { Name: "streamImpl" PathId: 4 TotalGroupCount: 1 PartitionPerTablet: 2 PQTabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 } TopicName: "continuousBackupImpl" TopicPath: "/MyRoot/Table/continuousBackupImpl/streamImpl" YdbDatabasePath: "/MyRoot" PartitionKeySchema { Name: "key" TypeId: 4 } MeteringMode: METERING_MODE_REQUEST_UNITS OffloadConfig { IncrementalBackup { DstPath: "/MyRoot/IncrBackupImpl" DstPathId { OwnerId: 72057594046678944 LocalId: 5 } } } } Partitions { PartitionId: 0 TabletId: 72075186233409547 Status: Active } AlterVersion: 2 BalancerTabletID: 72075186233409548 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:03.070676Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/IncrBackupImpl" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-05-07T09:04:03.070976Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/IncrBackupImpl" took 244us result status StatusSuccess 2025-05-07T09:04:03.071392Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/IncrBackupImpl" PathDescription { Self { Name: "IncrBackupImpl" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 103 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "IncrBackupImpl" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "__ydb_incrBackupImpl_deleted" Type: "Bool" TypeId: 6 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 } UserAttributes { Key: "__incremental_backup" Value: "{}" } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> YdbOlapStore::LogWithUnionAllDescending [GOOD] >> YdbOlapStore::LogTsRangeDescending ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_continuous_backup/unittest >> TContinuousBackupTests::Basic [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:01.573081Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:01.573247Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:01.573291Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:01.573326Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:01.574243Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:01.574311Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:01.574401Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:01.574466Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:01.575112Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:01.577084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:01.669472Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:01.669538Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:01.692130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:01.692343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:01.692502Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:01.700434Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:01.700820Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:01.707778Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:01.708172Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:01.715833Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:01.725561Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:01.725636Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:01.725712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:01.725760Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:01.725804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:01.726751Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:01.736347Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:01.865227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:01.874460Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:01.888279Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:01.900493Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:01.900615Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:01.913207Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:01.913367Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:01.913560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:01.913695Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:01.913738Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:01.913774Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:01.915735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:01.915788Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:01.915825Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:01.917508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:01.917558Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:01.917599Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:01.917666Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:01.926441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:01.928687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:01.935953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:01.937075Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:01.937243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:01.937301Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:01.944288Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:01.944380Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:01.944550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:01.944633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:01.946984Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:01.947062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:01.947270Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:01.947330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 07T09:04:02.954045Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-05-07T09:04:02.976881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6245: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 104 Step: 5000005 OrderId: 104 ExecLatency: 0 ProposeLatency: 5 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 726 } } 2025-05-07T09:04:02.976963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 104, tablet: 72075186233409546, partId: 0 2025-05-07T09:04:02.977128Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 104:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 104 Step: 5000005 OrderId: 104 ExecLatency: 0 ProposeLatency: 5 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 726 } } 2025-05-07T09:04:02.977225Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 104 Step: 5000005 OrderId: 104 ExecLatency: 0 ProposeLatency: 5 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 726 } } FAKE_COORDINATOR: Erasing txId 104 2025-05-07T09:04:02.978281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 305 RawX2: 4294969588 } Origin: 72075186233409546 State: 2 TxId: 104 Step: 0 Generation: 2 2025-05-07T09:04:02.978330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 104, tablet: 72075186233409546, partId: 0 2025-05-07T09:04:02.978465Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 104:0, at schemeshard: 72057594046678944, message: Source { RawX1: 305 RawX2: 4294969588 } Origin: 72075186233409546 State: 2 TxId: 104 Step: 0 Generation: 2 2025-05-07T09:04:02.978522Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1005: NTableState::TProposedWaitParts operationId# 104:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 2025-05-07T09:04:02.978640Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1009: NTableState::TProposedWaitParts operationId# 104:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 305 RawX2: 4294969588 } Origin: 72075186233409546 State: 2 TxId: 104 Step: 0 Generation: 2 2025-05-07T09:04:02.978703Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 104:0, shardIdx: 72057594046678944:1, datashard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:02.978782Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 104:0, at schemeshard: 72057594046678944 2025-05-07T09:04:02.978821Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 104:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-05-07T09:04:02.978872Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 104:0 129 -> 240 2025-05-07T09:04:02.981864Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-05-07T09:04:02.982326Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-05-07T09:04:02.982628Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-05-07T09:04:02.982696Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 104:0 ProgressState 2025-05-07T09:04:02.982804Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 3/3 2025-05-07T09:04:02.982842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2025-05-07T09:04:02.982906Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 3/3 2025-05-07T09:04:02.982939Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2025-05-07T09:04:02.983012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 104, ready parts: 3/3, is published: true 2025-05-07T09:04:02.983103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:334:2313] message: TxId: 104 2025-05-07T09:04:02.983156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2025-05-07T09:04:02.983196Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 104:0 2025-05-07T09:04:02.983230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 104:0 2025-05-07T09:04:02.983347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T09:04:02.983387Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 104:1 2025-05-07T09:04:02.983407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 104:1 2025-05-07T09:04:02.983437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-05-07T09:04:02.983466Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 104:2 2025-05-07T09:04:02.983497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 104:2 2025-05-07T09:04:02.983560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-05-07T09:04:02.983925Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T09:04:02.983970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-05-07T09:04:02.984034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-05-07T09:04:02.984075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-05-07T09:04:02.984110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T09:04:02.986413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-05-07T09:04:02.986465Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:728:2643] 2025-05-07T09:04:02.987347Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 2 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 104 2025-05-07T09:04:02.987838Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/continuousBackupImpl" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-05-07T09:04:02.988084Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/continuousBackupImpl" took 261us result status StatusPathDoesNotExist 2025-05-07T09:04:02.988242Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table/continuousBackupImpl\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/Table\' (id: [OwnerId: 72057594046678944, LocalPathId: 2])" Path: "/MyRoot/Table/continuousBackupImpl" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/Table" LastExistedPrefixPathId: 2 LastExistedPrefixDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-05-07T09:04:02.988697Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/continuousBackupImpl/streamImpl" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-05-07T09:04:02.988873Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/continuousBackupImpl/streamImpl" took 176us result status StatusPathDoesNotExist 2025-05-07T09:04:02.989018Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table/continuousBackupImpl/streamImpl\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/Table\' (id: [OwnerId: 72057594046678944, LocalPathId: 2])" Path: "/MyRoot/Table/continuousBackupImpl/streamImpl" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/Table" LastExistedPrefixPathId: 2 LastExistedPrefixDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::SimultaneousDefineAndCreateTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousDeclareAndCreateTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:03.087172Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:03.087314Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:03.087365Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:03.087407Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:03.088289Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:03.088364Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:03.088446Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:03.088525Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:03.089368Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:03.093304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:03.182699Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:03.182750Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:03.208161Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:03.208359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:03.208659Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:03.215355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:03.217138Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:03.221086Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:03.222081Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:03.231011Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:03.246341Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:03.246436Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:03.246514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:03.246562Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:03.246659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:03.246892Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:03.254549Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:03.388799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:03.390535Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:03.392170Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:03.392760Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:03.392845Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:03.397332Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:03.397510Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:03.397729Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:03.397786Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:03.397827Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:03.397859Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:03.404071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:03.404139Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:03.404184Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:03.411390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:03.411486Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:03.411549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:03.411617Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:03.416456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:03.420338Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:03.420568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:03.422414Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:03.422557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:03.422609Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:03.423681Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:03.423746Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:03.425003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:03.425097Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:03.427470Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:03.427535Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:03.427704Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:03.427742Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 100 2025-05-07T09:04:03.491016Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:03.491054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:03.491175Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T09:04:03.491299Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:03.491365Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 100, path id: 1 2025-05-07T09:04:03.491418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 100, path id: 2 2025-05-07T09:04:03.491725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 100:0, at schemeshard: 72057594046678944 2025-05-07T09:04:03.491767Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 100:0 ProgressState 2025-05-07T09:04:03.491856Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#100:0 progress is 1/1 2025-05-07T09:04:03.491950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-05-07T09:04:03.492004Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#100:0 progress is 1/1 2025-05-07T09:04:03.492039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-05-07T09:04:03.492085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 100, ready parts: 1/1, is published: false 2025-05-07T09:04:03.492137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-05-07T09:04:03.492178Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 100:0 2025-05-07T09:04:03.492215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 100:0 2025-05-07T09:04:03.492276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T09:04:03.492311Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 100, publications: 2, subscribers: 0 2025-05-07T09:04:03.492345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-05-07T09:04:03.492369Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-05-07T09:04:03.493026Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-05-07T09:04:03.493103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-05-07T09:04:03.493137Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 100 2025-05-07T09:04:03.493190Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-05-07T09:04:03.493235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T09:04:03.493945Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-05-07T09:04:03.494276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-05-07T09:04:03.494313Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 100 2025-05-07T09:04:03.494340Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-05-07T09:04:03.494388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T09:04:03.494466Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 100, subscribers: 0 2025-05-07T09:04:03.497992Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 2025-05-07T09:04:03.498330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 TestModificationResult got TxId: 100, wait until txId: 100 TestModificationResults wait txId: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 100 2025-05-07T09:04:03.498621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-05-07T09:04:03.498718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 TestWaitNotification wait txId: 101 2025-05-07T09:04:03.504512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-05-07T09:04:03.504556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-05-07T09:04:03.505036Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-05-07T09:04:03.505204Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-05-07T09:04:03.505258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:310:2301] 2025-05-07T09:04:03.505439Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-05-07T09:04:03.505530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-05-07T09:04:03.505551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:310:2301] TestWaitNotification: OK eventTxId 100 TestWaitNotification: OK eventTxId 101 2025-05-07T09:04:03.505938Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:03.506203Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 249us result status StatusSuccess 2025-05-07T09:04:03.509226Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:03.513286Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/table_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:03.513487Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/table_0" took 209us result status StatusPathDoesNotExist 2025-05-07T09:04:03.513649Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0/table_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/USER_0\' (id: [OwnerId: 72057594046678944, LocalPathId: 2])" Path: "/MyRoot/USER_0/table_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/USER_0" LastExistedPrefixPathId: 2 LastExistedPrefixDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 |92.7%| [TA] $(B)/ydb/core/tx/schemeshard/ut_continuous_backup/test-results/unittest/{meta.json ... results_accumulator.log} |92.7%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_continuous_backup/test-results/unittest/{meta.json ... results_accumulator.log} >> TPersQueueTest::SameOffset [GOOD] >> TPersQueueTest::SchemeOperationsTest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateSubDomainsInSeparateDir [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:03.087674Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:03.087762Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:03.087839Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:03.087875Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:03.089634Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:03.089703Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:03.089789Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:03.089894Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:03.090707Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:03.091664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:03.181663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:03.181738Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:03.204611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:03.204814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:03.204971Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:03.219739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:03.220075Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:03.221069Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:03.222341Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:03.228855Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:03.245580Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:03.245674Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:03.245786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:03.245849Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:03.245953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:03.247629Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:03.259300Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:03.409650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:03.409910Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:03.410163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:03.410462Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:03.410554Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:03.412957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:03.413131Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:03.413330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:03.413399Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:03.413444Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:03.413489Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:03.416366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:03.416430Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:03.416480Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:03.421445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:03.421519Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:03.421574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:03.421641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:03.425531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:03.427837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:03.428048Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:03.429066Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:03.429214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:03.429265Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:03.429575Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:03.429644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:03.429825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:03.429900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:03.432371Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:03.432430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:03.432646Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:03.432692Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 72057594046678944 Generation: 2 LocalPathId: 2 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T09:04:03.766250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T09:04:03.766289Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-05-07T09:04:03.766331Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 7 2025-05-07T09:04:03.766389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T09:04:03.767117Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T09:04:03.767191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T09:04:03.767239Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-05-07T09:04:03.767280Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 3 2025-05-07T09:04:03.767322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 8 2025-05-07T09:04:03.767386Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-05-07T09:04:03.770452Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-05-07T09:04:03.770559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 100 2025-05-07T09:04:03.770828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-05-07T09:04:03.770878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 TestWaitNotification wait txId: 101 2025-05-07T09:04:03.770993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-05-07T09:04:03.771016Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 TestWaitNotification wait txId: 102 2025-05-07T09:04:03.771061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-05-07T09:04:03.771082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-05-07T09:04:03.771622Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-05-07T09:04:03.771776Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-05-07T09:04:03.771816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:929:2763] 2025-05-07T09:04:03.771976Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-05-07T09:04:03.772036Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-05-07T09:04:03.772088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-05-07T09:04:03.772113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:929:2763] 2025-05-07T09:04:03.772222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T09:04:03.772248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:929:2763] TestWaitNotification: OK eventTxId 100 TestWaitNotification: OK eventTxId 101 TestWaitNotification: OK eventTxId 102 2025-05-07T09:04:03.772715Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SubDomains/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:03.772916Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SubDomains/USER_0" took 234us result status StatusSuccess 2025-05-07T09:04:03.773371Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SubDomains/USER_0" PathDescription { Self { Name: "USER_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 Coordinators: 72075186233409547 Coordinators: 72075186233409548 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409549 Mediators: 72075186233409550 Mediators: 72075186233409551 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 3 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:03.774000Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SubDomains/USER_1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:03.774214Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SubDomains/USER_1" took 211us result status StatusSuccess 2025-05-07T09:04:03.774550Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SubDomains/USER_1" PathDescription { Self { Name: "USER_1" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 102 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409552 Coordinators: 72075186233409553 Coordinators: 72075186233409554 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409555 Mediators: 72075186233409556 Mediators: 72075186233409557 } DomainKey { SchemeShard: 72057594046678944 PathId: 4 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 4 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:03.775030Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SubDomains" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:03.775181Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SubDomains" took 184us result status StatusSuccess 2025-05-07T09:04:03.775522Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SubDomains" PathDescription { Self { Name: "SubDomains" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 6 } ChildrenExist: true } Children { Name: "USER_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" } Children { Name: "USER_1" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 102 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::Delete [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:03.090129Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:03.090226Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:03.090261Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:03.090294Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:03.090350Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:03.090393Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:03.090443Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:03.090514Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:03.091246Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:03.091620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:03.183588Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:03.183648Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:03.210786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:03.211071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:03.211286Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:03.227057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:03.227398Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:03.228134Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:03.228317Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:03.232336Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:03.245314Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:03.245528Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:03.245630Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:03.245684Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:03.245794Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:03.246612Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:03.255506Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:03.388781Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:03.390581Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:03.391438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:03.392743Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:03.392855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:03.402462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:03.402658Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:03.402905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:03.402991Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:03.404490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:03.404552Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:03.410925Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:03.411015Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:03.411071Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:03.415885Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:03.415967Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:03.416033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:03.416103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:03.420265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:03.427725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:03.427978Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:03.429004Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:03.429154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:03.429202Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:03.429510Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:03.429568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:03.429748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:03.429816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:03.435283Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:03.435347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:03.435523Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:03.435575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T09:04:03.645040Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 Forgetting tablet 72075186233409546 2025-05-07T09:04:03.647791Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:03.648226Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T09:04:03.648807Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186233409548 2025-05-07T09:04:03.652094Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 2025-05-07T09:04:03.652437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-05-07T09:04:03.652758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 Forgetting tablet 72075186233409548 2025-05-07T09:04:03.654028Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-05-07T09:04:03.654235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 Forgetting tablet 72075186233409547 2025-05-07T09:04:03.655179Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T09:04:03.655250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T09:04:03.655476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-05-07T09:04:03.656216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T09:04:03.656276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T09:04:03.656376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:03.656706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T09:04:03.658020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-05-07T09:04:03.658079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-05-07T09:04:03.660150Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-05-07T09:04:03.660192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-05-07T09:04:03.660257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-05-07T09:04:03.660306Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-05-07T09:04:03.660395Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-05-07T09:04:03.660488Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-05-07T09:04:03.660684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-05-07T09:04:03.660717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-05-07T09:04:03.661034Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-05-07T09:04:03.661105Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-05-07T09:04:03.661131Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:494:2448] TestWaitNotification: OK eventTxId 101 2025-05-07T09:04:03.661578Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:03.661726Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 163us result status StatusPathDoesNotExist 2025-05-07T09:04:03.661863Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-05-07T09:04:03.662410Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:03.662579Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 163us result status StatusSuccess 2025-05-07T09:04:03.662918Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 wait until 72075186233409546 is deleted wait until 72075186233409547 is deleted wait until 72075186233409548 is deleted 2025-05-07T09:04:03.663507Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409546 2025-05-07T09:04:03.664939Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409547 2025-05-07T09:04:03.664991Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409548 Deleted tabletId 72075186233409546 Deleted tabletId 72075186233409547 Deleted tabletId 72075186233409548 2025-05-07T09:04:03.666634Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:03.666871Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 214us result status StatusSuccess 2025-05-07T09:04:03.667242Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::SimultaneousCreateForceDropTwice [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::CancelledExportEndTime [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2141] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:127:2058] recipient: [1:109:2141] 2025-05-07T09:03:46.200024Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:03:46.200128Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:03:46.200181Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:03:46.200223Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:03:46.201459Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:03:46.201526Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:03:46.201602Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:03:46.201666Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:03:46.202431Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:03:46.205694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:03:46.287443Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:03:46.287502Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:46.302957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:03:46.303187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:03:46.303333Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:03:46.308341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:03:46.308615Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:03:46.309250Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:46.309392Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:03:46.312039Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:46.321108Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:03:46.321180Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:46.321233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:03:46.321264Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:03:46.321297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:03:46.322423Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.328054Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T09:03:46.464906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:03:46.465098Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.465336Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:03:46.465551Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:03:46.465613Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.467664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:46.467824Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:03:46.468050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.468105Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:03:46.468147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:03:46.468178Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:03:46.469930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.470040Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:03:46.470082Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:03:46.471749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.471803Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.471845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:46.471924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:03:46.481354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:03:46.483549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:03:46.483741Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:03:46.484628Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:46.484782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 134 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:03:46.484826Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:46.485086Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:03:46.485162Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:46.485321Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:03:46.485393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:03:46.487512Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:03:46.487558Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:03:46.487699Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:46.487733Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 58:0 2025-05-07T09:04:03.148264Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-05-07T09:04:03.148314Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T09:04:03.150294Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6706: Handle: TEvNotifyTxCompletionResult: txId# 281474976710758 2025-05-07T09:04:03.150396Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6708: Message: TxId: 281474976710758 2025-05-07T09:04:03.153992Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 102, at schemeshard: 72057594046678944 TestWaitNotification wait txId: 102 2025-05-07T09:04:03.170591Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-05-07T09:04:03.170665Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-05-07T09:04:03.174600Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/export-102" OperationType: ESchemeOpBackup Backup { TableName: "0" NumberOfRetries: 0 S3Settings { Endpoint: "localhost:2434" Scheme: HTTP Bucket: "" ObjectKeyPattern: "" AccessKey: "" SecretKey: "" StorageClass: STORAGE_CLASS_UNSPECIFIED UseVirtualAddressing: true } Table { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Utf8" TypeId: 4608 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } NeedToBill: true SnapshotStep: 0 SnapshotTxId: 0 EnableChecksums: false EnablePermissions: false } Internal: true } TxId: 281474976710759 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:03.175262Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_backup_restore_common.h:586: TBackup Propose, path: /MyRoot/export-102/0, opId: 281474976710759:0, at schemeshard: 72057594046678944 2025-05-07T09:04:03.175410Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-05-07T09:04:03.176294Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976710759:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:03.176364Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpBackup, opId: 281474976710759:0, at schemeshard: 72057594046678944 2025-05-07T09:04:03.178847Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:62: NotifyTxCompletion export in-flight, txId: 102, at schemeshard: 72057594046678944 2025-05-07T09:04:03.178914Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 102, at schemeshard: 72057594046678944 2025-05-07T09:04:03.179657Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 281474976710759, response: Status: StatusAccepted TxId: 281474976710759 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:03.179952Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710759, database: /MyRoot, subject: , status: StatusAccepted, operation: BACKUP TABLE, path: /MyRoot/export-102/0 2025-05-07T09:04:03.180231Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6657: Handle: TEvModifySchemeTransactionResult: txId# 281474976710759, status# StatusAccepted 2025-05-07T09:04:03.180311Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6659: Message: Status: StatusAccepted TxId: 281474976710759 SchemeshardId: 72057594046678944 2025-05-07T09:04:03.180658Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-05-07T09:04:03.180744Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 281474976710759:0 ProgressState, operation type: TxBackup, at tablet# 72057594046678944 2025-05-07T09:04:03.180809Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 281474976710759:0 ProgressState no shards to create, do next state 2025-05-07T09:04:03.180853Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976710759:0 2 -> 3 2025-05-07T09:04:03.184145Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:66: TTxOperationProposeCancelTx Execute, at schemeshard: 72057594046678944, message: TargetTxId: 281474976710759 TxId: 102 2025-05-07T09:04:03.184215Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_cancel_tx.cpp:37: Execute cancel tx: opId# 102:0, target opId# 281474976710759:0 2025-05-07T09:04:03.185082Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-05-07T09:04:03.185139Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_backup_restore_common.h:58: TBackup TConfigurePart ProgressState, opId: 281474976710759:0, at schemeshard: 72057594046678944 2025-05-07T09:04:03.185333Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_backup.cpp:41: Propose backup to datashard 72075186233409547 txid 281474976710759:0 at schemeshard 72057594046678944 2025-05-07T09:04:03.188032Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:83: TTxOperationProposeCancelTx Complete, at schemeshard: 72057594046678944 2025-05-07T09:04:03.188234Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-05-07T09:04:03.188276Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_backup_restore_common.h:58: TBackup TConfigurePart ProgressState, opId: 281474976710759:0, at schemeshard: 72057594046678944 2025-05-07T09:04:03.188419Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_backup.cpp:41: Propose backup to datashard 72075186233409547 txid 281474976710759:0 at schemeshard 72057594046678944 2025-05-07T09:04:03.189336Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6744: Handle: TEvCancelTxResult: Cookie: 102, at schemeshard: 72057594046678944 2025-05-07T09:04:03.189471Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6746: Message: Status: StatusAccepted Result: "Cancelled at SchemeShard" TargetTxId: 281474976710759 TxId: 102 2025-05-07T09:04:03.190881Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 281474976710759:0 from tablet: 72057594046678944 to tablet: 72075186233409547 cookie: 72057594046678944:2 msg type: 269549568 2025-05-07T09:04:03.191068Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 281474976710759, partId: 0, tablet: 72075186233409547 2025-05-07T09:04:03.193245Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 281474976710759:0 from tablet: 72057594046678944 to tablet: 72075186233409547 cookie: 72057594046678944:2 msg type: 269549568 2025-05-07T09:04:03.194662Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T09:04:03.194722Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [4:557:2516] TestWaitNotification: OK eventTxId 102 >> DstCreator::Basic [GOOD] >> DstCreator::CannotFindColumn >> TSchemeShardSubDomainTest::SetSchemeLimits [GOOD] >> TSchemeShardSubDomainTest::Create [GOOD] >> TSchemeShardSubDomainTest::CreateAlterNbsChannels >> DstCreator::NonExistentSrc [GOOD] >> DstCreator::KeyColumnsSizeMismatch >> DstCreator::ReplicationModeMismatch [GOOD] >> DstCreator::ReplicationConsistencyLevelMismatch >> DstCreator::GlobalConsistency [GOOD] >> DstCreator::KeyColumnNameMismatch >> DstCreator::ExistingDst [GOOD] >> DstCreator::EmptyReplicationConfig >> DstCreator::SameOwner [GOOD] >> DstCreator::SamePartitionCount >> TSchemeShardSubDomainTest::CreateItemsInsideSubdomain [GOOD] >> DstCreator::ColumnsSizeMismatch [GOOD] >> DstCreator::ColumnTypeMismatch >> DstCreator::WithIntermediateDir [GOOD] >> DstCreator::WithAsyncIndex ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousCreateForceDropTwice [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T09:04:04.222641Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:04.222726Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:04.222764Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:04.222812Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:04.222868Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:04.222899Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:04.222970Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:04.223062Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:04.223794Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:04.224180Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:04.309456Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:04.309521Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:04.324228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:04.324385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:04.324560Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:04.333113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:04.333743Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:04.334503Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:04.334822Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:04.337130Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:04.338731Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:04.338796Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:04.338855Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:04.338903Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:04.339037Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:04.339264Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:04.346170Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T09:04:04.481849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:04.482095Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:04.482336Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:04.482633Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:04.482699Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:04.485935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:04.486112Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:04.486315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:04.486408Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:04.486456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:04.486512Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:04.488842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:04.488910Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:04.488966Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:04.491260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:04.491313Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:04.491361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:04.491451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:04.501625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:04.503919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:04.504108Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:04.505129Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:04.505286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:04.505340Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:04.505661Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:04.505727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:04.505899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:04.506010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:04.508259Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:04.508345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:04.508524Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:04.508583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... ode 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 1 2025-05-07T09:04:04.570507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [1:276:2267] 2025-05-07T09:04:04.572835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:5 hive 72057594037968897 at ss 72057594046678944 2025-05-07T09:04:04.572877Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-05-07T09:04:04.572895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:3 hive 72057594037968897 at ss 72057594046678944 2025-05-07T09:04:04.572911Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:6 hive 72057594037968897 at ss 72057594046678944 2025-05-07T09:04:04.572925Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-05-07T09:04:04.572940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:4 hive 72057594037968897 at ss 72057594046678944 2025-05-07T09:04:04.573671Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-05-07T09:04:04.574667Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 5 TxId_Deprecated: 5 2025-05-07T09:04:04.574878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-05-07T09:04:04.575020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T09:04:04.575055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:277:2268] 2025-05-07T09:04:04.575139Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 2025-05-07T09:04:04.575350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 5 ShardOwnerId: 72057594046678944 ShardLocalIdx: 5, at schemeshard: 72057594046678944 2025-05-07T09:04:04.575699Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 7 2025-05-07T09:04:04.576015Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 2025-05-07T09:04:04.576213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:04.576436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-05-07T09:04:04.576676Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 6 TxId_Deprecated: 6 2025-05-07T09:04:04.576816Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 2025-05-07T09:04:04.576983Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-05-07T09:04:04.577291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-05-07T09:04:04.577546Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 2025-05-07T09:04:04.577912Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 6 ShardOwnerId: 72057594046678944 ShardLocalIdx: 6, at schemeshard: 72057594046678944 2025-05-07T09:04:04.578111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T09:04:04.578572Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-05-07T09:04:04.578744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T09:04:04.579273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-05-07T09:04:04.579431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T09:04:04.579607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T09:04:04.579666Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T09:04:04.579829Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-05-07T09:04:04.580898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T09:04:04.580967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T09:04:04.581083Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:04.581875Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:5 2025-05-07T09:04:04.584731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-05-07T09:04:04.584836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-05-07T09:04:04.585073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:6 2025-05-07T09:04:04.585725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-05-07T09:04:04.588431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-05-07T09:04:04.588697Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-05-07T09:04:04.588769Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 100 TestWaitNotification: OK eventTxId 101 TestWaitNotification: OK eventTxId 102 2025-05-07T09:04:04.594820Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:04.595049Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 275us result status StatusPathDoesNotExist 2025-05-07T09:04:04.595242Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-05-07T09:04:04.595706Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:04.595884Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 184us result status StatusSuccess 2025-05-07T09:04:04.596260Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::SchemeDatabaseQuotaRejects [GOOD] >> DstCreator::WithSyncIndex [GOOD] >> TCdcStreamTests::VirtualTimestamps [GOOD] >> TCdcStreamTests::ResolvedTimestamps >> TSchemeShardSubDomainTest::CreateDropNbs ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SetSchemeLimits [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:04.274259Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:04.274348Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:04.274410Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:04.274456Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:04.274514Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:04.274545Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:04.274598Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:04.274683Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:04.275433Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:04.275802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:04.357833Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:04.357887Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:04.374165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:04.374381Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:04.374564Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:04.394802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:04.395189Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:04.395913Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:04.396101Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:04.417866Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:04.419443Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:04.419510Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:04.419583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:04.419632Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:04.419739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:04.420008Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:04.428588Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:04.579421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:04.579674Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:04.579906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:04.580170Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:04.580266Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:04.582495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:04.582638Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:04.582819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:04.582896Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:04.582934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:04.582976Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:04.584860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:04.584905Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:04.584956Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:04.586528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:04.586586Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:04.586616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:04.586659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:04.589567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:04.594984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:04.595194Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:04.596215Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:04.596363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:04.596413Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:04.596758Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:04.596813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:04.596979Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:04.597059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:04.599128Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:04.599187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:04.599354Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:04.599391Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... sh path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T09:04:04.833181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 100 2025-05-07T09:04:04.836327Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:04.836385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:04.836541Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T09:04:04.836664Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:04.836702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:337:2313], at schemeshard: 72057594046678944, txId: 100, path id: 1 2025-05-07T09:04:04.836747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:337:2313], at schemeshard: 72057594046678944, txId: 100, path id: 2 2025-05-07T09:04:04.837067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 100:0, at schemeshard: 72057594046678944 2025-05-07T09:04:04.837112Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 100:0 ProgressState 2025-05-07T09:04:04.837233Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#100:0 progress is 1/1 2025-05-07T09:04:04.837270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-05-07T09:04:04.837314Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#100:0 progress is 1/1 2025-05-07T09:04:04.837347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-05-07T09:04:04.837406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 100, ready parts: 1/1, is published: false 2025-05-07T09:04:04.837457Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-05-07T09:04:04.837496Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 100:0 2025-05-07T09:04:04.837530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 100:0 2025-05-07T09:04:04.837760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-05-07T09:04:04.837807Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 100, publications: 2, subscribers: 0 2025-05-07T09:04:04.837856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-05-07T09:04:04.837889Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-05-07T09:04:04.838621Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-05-07T09:04:04.838707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-05-07T09:04:04.838743Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 100 2025-05-07T09:04:04.838803Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-05-07T09:04:04.838849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T09:04:04.839677Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-05-07T09:04:04.839775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-05-07T09:04:04.839808Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 100 2025-05-07T09:04:04.839839Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-05-07T09:04:04.839872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T09:04:04.839939Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 100, subscribers: 0 2025-05-07T09:04:04.845521Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 2025-05-07T09:04:04.845635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 TestModificationResult got TxId: 100, wait until txId: 100 TestWaitNotification wait txId: 100 2025-05-07T09:04:04.845957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-05-07T09:04:04.846026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 2025-05-07T09:04:04.846479Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-05-07T09:04:04.846575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-05-07T09:04:04.846611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:473:2422] TestWaitNotification: OK eventTxId 100 2025-05-07T09:04:04.847116Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:04.847317Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 227us result status StatusSuccess 2025-05-07T09:04:04.847692Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 3 ShardsInside: 2 ShardsLimit: 3 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 300 DatabaseQuotas { data_stream_shards_quota: 3 } SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:04.848222Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:04.848393Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 149us result status StatusSuccess 2025-05-07T09:04:04.848754Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 3 ShardsInside: 0 ShardsLimit: 3 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 300 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::CreateDropSolomon >> TSchemeShardSubDomainTest::SimultaneousDefineAndCreateTable [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateItemsInsideSubdomain [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:04.242968Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:04.243048Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:04.243097Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:04.243132Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:04.243191Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:04.243224Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:04.243290Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:04.243381Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:04.244106Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:04.244366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:04.307318Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:04.307375Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:04.343632Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:04.343827Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:04.344028Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:04.350020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:04.350291Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:04.350994Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:04.351183Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:04.354100Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:04.355521Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:04.355590Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:04.355681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:04.355729Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:04.355830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:04.356080Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:04.361474Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:04.492753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:04.493003Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:04.493224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:04.493509Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:04.493589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:04.496287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:04.496465Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:04.496736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:04.496818Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:04.496872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:04.496905Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:04.499245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:04.499330Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:04.499379Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:04.501349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:04.501410Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:04.501465Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:04.501523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:04.505291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:04.507407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:04.507608Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:04.508634Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:04.508768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:04.508819Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:04.509135Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:04.509197Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:04.509377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:04.509468Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:04.511535Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:04.511607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:04.511793Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:04.511834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... de 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-05-07T09:04:05.033181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-05-07T09:04:05.033206Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-05-07T09:04:05.033225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-05-07T09:04:05.033262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: true 2025-05-07T09:04:05.033320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:485:2442] message: TxId: 103 2025-05-07T09:04:05.033353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-05-07T09:04:05.033382Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 103:0 2025-05-07T09:04:05.033402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 103:0 2025-05-07T09:04:05.033476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 3 2025-05-07T09:04:05.034882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-05-07T09:04:05.034916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:486:2443] TestWaitNotification: OK eventTxId 103 2025-05-07T09:04:05.035373Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:05.035530Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 181us result status StatusSuccess 2025-05-07T09:04:05.035912Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 8 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 8 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 6 SubDomainVersion: 1 SecurityStateVersion: 0 } } Children { Name: "dir_0" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 102 CreateStep: 150 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: true } Children { Name: "table_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 150 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:05.037098Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/table_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:05.037285Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/table_0" took 194us result status StatusSuccess 2025-05-07T09:04:05.037591Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0/table_0" PathDescription { Self { Name: "table_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 150 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table_0" Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "RowId" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:05.038206Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/dir_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:05.038515Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/dir_0" took 267us result status StatusSuccess 2025-05-07T09:04:05.038833Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0/dir_0" PathDescription { Self { Name: "dir_0" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 102 CreateStep: 150 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 } ChildrenExist: true } Children { Name: "table_1" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 103 CreateStep: 200 ParentPathId: 4 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:05.039392Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/dir_0/table_1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:05.039600Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/dir_0/table_1" took 218us result status StatusSuccess 2025-05-07T09:04:05.039984Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0/dir_0/table_1" PathDescription { Self { Name: "table_1" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 103 CreateStep: 200 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table_1" Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "RowId" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SchemeDatabaseQuotaRejects [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:04.230435Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:04.230513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:04.230548Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:04.230574Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:04.230629Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:04.230653Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:04.230701Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:04.230775Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:04.231381Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:04.231658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:04.293117Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:04.293168Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:04.306921Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:04.307132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:04.307315Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:04.312871Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:04.313159Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:04.313720Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:04.313861Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:04.316241Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:04.317508Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:04.317566Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:04.317627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:04.317667Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:04.317763Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:04.317994Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:04.323677Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:04.486044Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:04.486321Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:04.486573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:04.486868Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:04.486930Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:04.490000Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:04.490171Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:04.490406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:04.490470Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:04.490515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:04.490555Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:04.492696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:04.492768Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:04.492817Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:04.494800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:04.494861Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:04.494906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:04.494986Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:04.499199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:04.501253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:04.501455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:04.502510Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:04.502648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:04.502702Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:04.503015Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:04.503070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:04.503247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:04.503329Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:04.505361Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:04.505430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:04.505635Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:04.505695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... :04:05.136752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:5 hive 72057594037968897 at ss 72057594046678944 2025-05-07T09:04:05.136788Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-05-07T09:04:05.136823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:3 hive 72057594037968897 at ss 72057594046678944 2025-05-07T09:04:05.136840Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-05-07T09:04:05.136869Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:4 hive 72057594037968897 at ss 72057594046678944 2025-05-07T09:04:05.137941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-05-07T09:04:05.138671Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 5 TxId_Deprecated: 5 TabletID: 72075186233409550 2025-05-07T09:04:05.141235Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 2025-05-07T09:04:05.141544Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 5 ShardOwnerId: 72057594046678944 ShardLocalIdx: 5, at schemeshard: 72057594046678944 2025-05-07T09:04:05.141884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 Forgetting tablet 72075186233409550 2025-05-07T09:04:05.142323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:05.142470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T09:04:05.143649Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186233409548 Forgetting tablet 72075186233409546 2025-05-07T09:04:05.144604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-05-07T09:04:05.144739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-05-07T09:04:05.145489Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 2025-05-07T09:04:05.145945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-05-07T09:04:05.146092Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T09:04:05.146607Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 TabletID: 72075186233409549 Forgetting tablet 72075186233409548 2025-05-07T09:04:05.147221Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-05-07T09:04:05.147383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 Forgetting tablet 72075186233409547 Forgetting tablet 72075186233409549 2025-05-07T09:04:05.148257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-05-07T09:04:05.148445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T09:04:05.148492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-05-07T09:04:05.148556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T09:04:05.148830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-05-07T09:04:05.149257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T09:04:05.149297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T09:04:05.149397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-05-07T09:04:05.151091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:5 2025-05-07T09:04:05.151143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:5 tabletId 72075186233409550 2025-05-07T09:04:05.151223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-05-07T09:04:05.151245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-05-07T09:04:05.153195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-05-07T09:04:05.153231Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-05-07T09:04:05.153320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-05-07T09:04:05.153345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-05-07T09:04:05.153382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-05-07T09:04:05.153409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186233409549 2025-05-07T09:04:05.153813Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 1 candidates, at schemeshard: 72057594046678944 2025-05-07T09:04:05.154043Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T09:04:05.154103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T09:04:05.154176Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:05.155157Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-05-07T09:04:05.156249Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 106, wait until txId: 106 TestWaitNotification wait txId: 106 2025-05-07T09:04:05.156522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 106: send EvNotifyTxCompletion 2025-05-07T09:04:05.156560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 106 2025-05-07T09:04:05.157013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 106, at schemeshard: 72057594046678944 2025-05-07T09:04:05.157103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-05-07T09:04:05.157134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [1:896:2797] TestWaitNotification: OK eventTxId 106 2025-05-07T09:04:05.157711Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:05.157897Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 179us result status StatusSuccess 2025-05-07T09:04:05.158186Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::RedefineErrors >> TStoragePoolsQuotasTest::DisableWritesToDatabase-IsExternalSubdomain-true >> TSchemeShardSubDomainTest::ConcurrentCreateSubDomainAndDescribe >> TSchemeShardSubDomainTest::CreateAlterNbsChannels [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousDefineAndCreateTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:04.763737Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:04.763816Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:04.763859Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:04.763895Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:04.763967Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:04.764015Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:04.764087Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:04.764170Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:04.764945Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:04.765306Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:04.849011Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:04.849091Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:04.867894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:04.868106Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:04.868294Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:04.874242Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:04.874557Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:04.875309Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:04.875489Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:04.882299Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:04.883762Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:04.883827Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:04.883905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:04.883952Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:04.884068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:04.884324Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:04.890981Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:05.024974Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:05.025247Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:05.025498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:05.025764Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:05.025827Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:05.028357Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:05.028509Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:05.028724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:05.028789Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:05.028829Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:05.028872Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:05.030867Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:05.030932Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:05.030994Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:05.032768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:05.032822Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:05.032870Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:05.032957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:05.036686Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:05.038625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:05.038830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:05.039890Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:05.040033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:05.040087Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:05.040397Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:05.040456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:05.040652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:05.040733Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:05.042792Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:05.042854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:05.043073Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:05.043118Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... nerId: 72057594046678944, LocalPathId: 2], version: 6 2025-05-07T09:04:05.423355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-05-07T09:04:05.423862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 502 RawX2: 4294969751 } Origin: 72075186233409549 State: 2 TxId: 102 Step: 0 Generation: 2 2025-05-07T09:04:05.423910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409549, partId: 0 2025-05-07T09:04:05.424045Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 502 RawX2: 4294969751 } Origin: 72075186233409549 State: 2 TxId: 102 Step: 0 Generation: 2 2025-05-07T09:04:05.424096Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1005: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 2025-05-07T09:04:05.424183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1009: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 502 RawX2: 4294969751 } Origin: 72075186233409549 State: 2 TxId: 102 Step: 0 Generation: 2 2025-05-07T09:04:05.424250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:4, datashard: 72075186233409549, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:05.424320Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T09:04:05.424368Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 102:0, datashard: 72075186233409549, at schemeshard: 72057594046678944 2025-05-07T09:04:05.424432Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 102:0 129 -> 240 2025-05-07T09:04:05.425108Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T09:04:05.425190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T09:04:05.425221Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-05-07T09:04:05.425252Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-05-07T09:04:05.425288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-05-07T09:04:05.425356Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true 2025-05-07T09:04:05.432683Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T09:04:05.432786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-05-07T09:04:05.432875Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T09:04:05.432957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-05-07T09:04:05.433242Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T09:04:05.433279Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 102:0 ProgressState 2025-05-07T09:04:05.433382Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T09:04:05.433420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T09:04:05.433473Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T09:04:05.433503Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T09:04:05.433533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-05-07T09:04:05.433588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:305:2296] message: TxId: 102 2025-05-07T09:04:05.433628Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T09:04:05.433659Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-05-07T09:04:05.433700Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 102:0 2025-05-07T09:04:05.433803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-05-07T09:04:05.442826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T09:04:05.442884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:530:2475] TestWaitNotification: OK eventTxId 102 2025-05-07T09:04:05.443503Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:05.443734Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 266us result status StatusSuccess 2025-05-07T09:04:05.444234Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 2 SecurityStateVersion: 0 } } Children { Name: "table_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 102 CreateStep: 130 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 2 PlanResolution: 10 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 Mediators: 72075186233409548 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:05.450607Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/table_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:05.450896Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/table_0" took 299us result status StatusSuccess 2025-05-07T09:04:05.451370Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0/table_0" PathDescription { Self { Name: "table_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 102 CreateStep: 130 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table_0" Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "RowId" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 2 PlanResolution: 10 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 Mediators: 72075186233409548 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::RestartAtInFly ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::WithSyncIndex [GOOD] Test command err: 2025-05-07T09:04:01.790619Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626993203659778:2201];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:04:01.792254Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00295e/r3tmp/tmp2mQdJT/pdisk_1.dat 2025-05-07T09:04:02.391242Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:02.408769Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:04:02.408905Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:04:02.431189Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:16855 TServer::EnableGrpc on GrpcPort 26544, node 1 2025-05-07T09:04:02.778681Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:04:02.778705Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:04:02.778725Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:04:02.778820Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16855 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:04:03.342972Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:04:03.360922Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1746608643682 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyCo... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1746608643402 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1746608643682 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: true } Children { Name: ".sys" PathId: 18446... (TRUNCATED) 2025-05-07T09:04:03.726833Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-05-07T09:04:03.726977Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-05-07T09:04:03.726992Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-05-07T09:04:03.727354Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-05-07T09:04:04.873678Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1746608643682, tx_id: 281474976710658 } } } 2025-05-07T09:04:04.874186Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-05-07T09:04:04.876556Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 2025-05-07T09:04:04.882494Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710659} 2025-05-07T09:04:04.882535Z node 1 :REPLICATION_CONTROLLER DEBUG: dst_creator.cpp:306: [DstCreator][rid 1][tid 1] Subscribe tx: txId# 281474976710659 2025-05-07T09:04:04.953731Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:311: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710659 2025-05-07T09:04:04.955302Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Replicated" PathDescription { Self { Name: "Replicated" PathId: 5 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1746608644991 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Replicated" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 0 MinPartitionsCount: 1 SplitByLoadSettings { Enabled: false } } ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } Ta ... untToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 SplitByLoadSettings { Enabled: false } } ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186224037905 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 6 PathsLimit: 10000 ShardsInside: 19 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 7 PathOwnerId: 72057594046644480 } 2025-05-07T09:04:04.971066Z node 1 :REPLICATION_CONTROLLER INFO: dst_creator.cpp:585: [DstCreator][rid 1][tid 2] Success: dstPathId# [OwnerId: 72057594046644480, LocalPathId: 7] TClient::Ls request: /Root/Replicated/index_by_value TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "index_by_value" PathId: 6 SchemeshardId: 72057594046644480 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1746608644991 ParentPathId: 5 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 1 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 7 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1746608644991 ParentPathId: 6 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeSyncIndexImplTable Version { ... (TRUNCATED) TClient::Ls request: /Root/Replicated/index_by_value/indexImplTable TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "indexImplTable" PathId: 7 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1746608644991 ParentPathId: 6 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } ... (TRUNCATED) Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "indexImplTable" PathId: 7 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1746608644991 ParentPathId: 6 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 SplitByLoadSettings { Enabled: false } } ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186224037905 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 6 PathsLimit: 10000 ShardsInside: 19 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } UserAttributes { Key: "__async_replica" Value: "true" } } Path: "/Root/Replicated/index_by_value/indexImplTable" >> TSchemeShardSubDomainTest::SchemeLimitsRejects [GOOD] >> TSchemeShardSubDomainTest::CreateDropNbs [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateAlterNbsChannels [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:04.279138Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:04.279241Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:04.279309Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:04.279367Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:04.279419Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:04.279449Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:04.279502Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:04.279591Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:04.280292Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:04.280624Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:04.361422Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:04.361476Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:04.378116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:04.378280Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:04.378452Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:04.384420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:04.384640Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:04.385103Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:04.385236Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:04.387920Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:04.389341Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:04.389403Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:04.389477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:04.389524Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:04.389633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:04.389861Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:04.396422Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:04.547550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:04.547798Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:04.548013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:04.548288Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:04.548366Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:04.550900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:04.551074Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:04.551248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:04.551316Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:04.551359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:04.551409Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:04.555193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:04.555264Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:04.555314Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:04.557349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:04.557402Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:04.557453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:04.557536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:04.567342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:04.572039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:04.572268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:04.573222Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:04.573373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:04.573426Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:04.573744Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:04.573806Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:04.573998Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:04.574109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:04.579068Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:04.579160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:04.579337Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:04.579393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-05-07T09:04:06.025350Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 105, subscribers: 0 2025-05-07T09:04:06.026680Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-05-07T09:04:06.026730Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:3 hive 72057594037968897 at ss 72057594046678944 2025-05-07T09:04:06.026759Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-05-07T09:04:06.026785Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:4 hive 72057594037968897 at ss 72057594046678944 2025-05-07T09:04:06.027353Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-05-07T09:04:06.028277Z node 2 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 2025-05-07T09:04:06.028467Z node 2 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186233409548 2025-05-07T09:04:06.028863Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:06.029140Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 Forgetting tablet 72075186233409546 2025-05-07T09:04:06.030879Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-05-07T09:04:06.033475Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 Forgetting tablet 72075186233409548 2025-05-07T09:04:06.034543Z node 2 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 2025-05-07T09:04:06.034689Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-05-07T09:04:06.034860Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T09:04:06.035786Z node 2 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 TabletID: 72075186233409549 2025-05-07T09:04:06.036002Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-05-07T09:04:06.036162Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 Forgetting tablet 72075186233409547 2025-05-07T09:04:06.036720Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T09:04:06.036770Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-05-07T09:04:06.036842Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 Forgetting tablet 72075186233409549 2025-05-07T09:04:06.037609Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T09:04:06.037666Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T09:04:06.037791Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-05-07T09:04:06.038230Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-05-07T09:04:06.038330Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-05-07T09:04:06.040760Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-05-07T09:04:06.040823Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-05-07T09:04:06.040913Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-05-07T09:04:06.040937Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-05-07T09:04:06.043275Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-05-07T09:04:06.043317Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-05-07T09:04:06.043395Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-05-07T09:04:06.043438Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186233409549 2025-05-07T09:04:06.043598Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 1 candidates, at schemeshard: 72057594046678944 2025-05-07T09:04:06.043688Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-05-07T09:04:06.043757Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T09:04:06.043807Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T09:04:06.043898Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:06.045474Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 2025-05-07T09:04:06.045744Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 105: send EvNotifyTxCompletion 2025-05-07T09:04:06.045788Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 105 2025-05-07T09:04:06.046276Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 105, at schemeshard: 72057594046678944 2025-05-07T09:04:06.046386Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-05-07T09:04:06.046424Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [2:648:2601] TestWaitNotification: OK eventTxId 105 2025-05-07T09:04:06.047004Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/BSVolume" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:06.047189Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/BSVolume" took 247us result status StatusPathDoesNotExist 2025-05-07T09:04:06.047367Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0/BSVolume\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0/BSVolume" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-05-07T09:04:06.047957Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:06.048111Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 166us result status StatusPathDoesNotExist 2025-05-07T09:04:06.048240Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TCdcStreamTests::ResolvedTimestamps [GOOD] >> TCdcStreamTests::RetentionPeriod >> TSchemeShardSubDomainTest::CreateWithNoEqualName >> TSchemeShardSubDomainTest::RedefineErrors [GOOD] >> TSchemeShardSubDomainTest::DeclareAndForbidTableInside >> TSchemeShardSubDomainTest::DiskSpaceUsage >> TSchemeShardSubDomainTest::ConcurrentCreateSubDomainAndDescribe [GOOD] >> TSchemeShardSubDomainTest::ColumnSchemeLimitsRejects ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateDropNbs [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:06.202217Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:06.202301Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:06.202340Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:06.202374Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:06.202446Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:06.202479Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:06.202535Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:06.202624Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:06.203364Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:06.203720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:06.282993Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:06.283049Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:06.300083Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:06.300279Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:06.300474Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:06.306439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:06.306781Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:06.307489Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:06.307662Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:06.310385Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:06.311816Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:06.311879Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:06.311957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:06.312007Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:06.312168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:06.312433Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:06.319657Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:06.442302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:06.442539Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:06.442750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:06.442986Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:06.443070Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:06.445081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:06.445214Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:06.445402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:06.445463Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:06.445501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:06.445531Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:06.447525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:06.447578Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:06.447617Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:06.449240Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:06.449293Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:06.449330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:06.449387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:06.452577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:06.454248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:06.454404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:06.455253Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:06.455378Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:06.455417Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:06.455649Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:06.455687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:06.455823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:06.455882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:06.457456Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:06.457504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:06.457634Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:06.457658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-05-07T09:04:06.690506Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-05-07T09:04:06.692105Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-05-07T09:04:06.692176Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:3 hive 72057594037968897 at ss 72057594046678944 2025-05-07T09:04:06.692218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-05-07T09:04:06.692244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:4 hive 72057594037968897 at ss 72057594046678944 2025-05-07T09:04:06.692912Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-05-07T09:04:06.693744Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 Forgetting tablet 72075186233409546 2025-05-07T09:04:06.695123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:06.695429Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T09:04:06.696544Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186233409548 2025-05-07T09:04:06.696950Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 2025-05-07T09:04:06.697082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-05-07T09:04:06.697332Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 Forgetting tablet 72075186233409548 2025-05-07T09:04:06.697762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-05-07T09:04:06.697939Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 Forgetting tablet 72075186233409547 2025-05-07T09:04:06.699177Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 TabletID: 72075186233409549 Forgetting tablet 72075186233409549 2025-05-07T09:04:06.699744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-05-07T09:04:06.699944Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-05-07T09:04:06.700600Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T09:04:06.700652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-05-07T09:04:06.700726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T09:04:06.701109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T09:04:06.701167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T09:04:06.701293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-05-07T09:04:06.701558Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-05-07T09:04:06.702269Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-05-07T09:04:06.704087Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-05-07T09:04:06.704167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-05-07T09:04:06.704292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-05-07T09:04:06.704318Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-05-07T09:04:06.704396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-05-07T09:04:06.704437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-05-07T09:04:06.706784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-05-07T09:04:06.706850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186233409549 2025-05-07T09:04:06.707063Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 1 candidates, at schemeshard: 72057594046678944 2025-05-07T09:04:06.707157Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-05-07T09:04:06.707231Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T09:04:06.707280Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T09:04:06.707378Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:06.709017Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-05-07T09:04:06.709302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-05-07T09:04:06.709345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-05-07T09:04:06.709803Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-05-07T09:04:06.709906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T09:04:06.709948Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:534:2490] TestWaitNotification: OK eventTxId 102 2025-05-07T09:04:06.725427Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/BSVolume" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:06.725678Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/BSVolume" took 255us result status StatusPathDoesNotExist 2025-05-07T09:04:06.725881Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0/BSVolume\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0/BSVolume" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-05-07T09:04:06.726591Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:06.726759Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 152us result status StatusPathDoesNotExist 2025-05-07T09:04:06.726882Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::RestartAtInFly [GOOD] >> TSchemeShardSubDomainTest::SimultaneousDefine ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SchemeLimitsRejects [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:03.088455Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:03.088570Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:03.088633Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:03.088675Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:03.088733Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:03.088767Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:03.088819Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:03.089655Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:03.090534Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:03.094077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:03.184740Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:03.184801Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:03.203068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:03.203253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:03.203388Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:03.214647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:03.217134Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:03.221068Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:03.226048Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:03.231405Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:03.245331Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:03.245432Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:03.245524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:03.245601Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:03.245762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:03.246576Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:03.255503Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:03.409987Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:03.410257Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:03.410484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:03.410718Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:03.410788Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:03.413528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:03.413664Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:03.413871Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:03.413955Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:03.414015Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:03.414054Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:03.415912Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:03.415977Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:03.416022Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:03.417877Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:03.417934Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:03.418000Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:03.418063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:03.421754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:03.423658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:03.423868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:03.424902Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:03.425049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:03.425095Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:03.425395Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:03.425454Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:03.425630Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:03.425702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:03.427621Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:03.427682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:03.427842Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:03.427880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 6Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-05-07T09:04:06.729515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:15 hive 72057594037968897 at ss 72057594046678944 2025-05-07T09:04:06.729540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:14 hive 72057594037968897 at ss 72057594046678944 2025-05-07T09:04:06.729565Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-05-07T09:04:06.729591Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:16 hive 72057594037968897 at ss 72057594046678944 2025-05-07T09:04:06.730824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 139 2025-05-07T09:04:06.731530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 139 2025-05-07T09:04:06.731644Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 2025-05-07T09:04:06.732916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:06.733193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T09:04:06.733487Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 15 TxId_Deprecated: 15 TabletID: 72075186233409556 Forgetting tablet 72075186233409546 2025-05-07T09:04:06.734684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 15 ShardOwnerId: 72057594046678944 ShardLocalIdx: 15, at schemeshard: 72057594046678944 2025-05-07T09:04:06.734871Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 16] was 3 2025-05-07T09:04:06.735230Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 14 TxId_Deprecated: 14 TabletID: 72075186233409555 2025-05-07T09:04:06.737283Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 14 ShardOwnerId: 72057594046678944 ShardLocalIdx: 14, at schemeshard: 72057594046678944 2025-05-07T09:04:06.737485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 16] was 2 Forgetting tablet 72075186233409556 2025-05-07T09:04:06.738329Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 Forgetting tablet 72075186233409555 2025-05-07T09:04:06.739412Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 16 TxId_Deprecated: 16 TabletID: 72075186233409557 2025-05-07T09:04:06.739813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-05-07T09:04:06.740004Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 Forgetting tablet 72075186233409547 Forgetting tablet 72075186233409557 2025-05-07T09:04:06.741499Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 16 ShardOwnerId: 72057594046678944 ShardLocalIdx: 16, at schemeshard: 72057594046678944 2025-05-07T09:04:06.741718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 16] was 1 2025-05-07T09:04:06.742190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 139 2025-05-07T09:04:06.742730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T09:04:06.742786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 16], at schemeshard: 72057594046678944 2025-05-07T09:04:06.742859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T09:04:06.743230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T09:04:06.743277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T09:04:06.743401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-05-07T09:04:06.745300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-05-07T09:04:06.745356Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-05-07T09:04:06.745445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:15 2025-05-07T09:04:06.745467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:15 tabletId 72075186233409556 2025-05-07T09:04:06.745528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:14 2025-05-07T09:04:06.745551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:14 tabletId 72075186233409555 2025-05-07T09:04:06.747867Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-05-07T09:04:06.747926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-05-07T09:04:06.748023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:16 2025-05-07T09:04:06.748072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:16 tabletId 72075186233409557 2025-05-07T09:04:06.748293Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 1 candidates, at schemeshard: 72057594046678944 2025-05-07T09:04:06.748386Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-05-07T09:04:06.748461Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T09:04:06.748525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T09:04:06.748623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:06.750129Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 139, wait until txId: 139 TestWaitNotification wait txId: 139 2025-05-07T09:04:06.750847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 139: send EvNotifyTxCompletion 2025-05-07T09:04:06.750892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 139 2025-05-07T09:04:06.751823Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 139, at schemeshard: 72057594046678944 2025-05-07T09:04:06.751920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 139: got EvNotifyTxCompletionResult 2025-05-07T09:04:06.751947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 139: satisfy waiter [1:2267:4038] TestWaitNotification: OK eventTxId 139 2025-05-07T09:04:06.753207Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:06.753403Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 205us result status StatusSuccess 2025-05-07T09:04:06.753659Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 5 ShardsInside: 0 ShardsLimit: 6 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 20 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::CreateDropSolomon [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::RedefineErrors [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:06.649274Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:06.649352Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:06.649383Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:06.649408Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:06.649471Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:06.649492Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:06.649532Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:06.649583Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:06.650198Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:06.650477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:06.713582Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:06.713636Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:06.729737Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:06.729925Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:06.730129Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:06.736068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:06.736466Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:06.737065Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:06.737236Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:06.739925Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:06.741260Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:06.741338Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:06.741417Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:06.741467Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:06.741578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:06.741843Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:06.748138Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:06.852358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:06.852599Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:06.852813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:06.853073Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:06.853145Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:06.855444Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:06.855586Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:06.855838Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:06.855910Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:06.855947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:06.855979Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:06.858195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:06.858263Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:06.858309Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:06.860579Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:06.860658Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:06.860708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:06.860801Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:06.863900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:06.866152Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:06.866386Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:06.867510Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:06.867667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:06.867712Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:06.868033Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:06.868100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:06.868272Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:06.868342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:06.870696Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:06.870763Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:06.870962Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:06.871003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... ation.cpp:485: TTxOperationProgress Execute, operationId: 108:0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.127896Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 108:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:07.127954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 108:0 ProgressState no shards to create, do next state 2025-05-07T09:04:07.128003Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 108:0 2 -> 3 2025-05-07T09:04:07.129896Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 108:0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.129956Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 108:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:07.130021Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 108:0 3 -> 128 2025-05-07T09:04:07.131775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 108:0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.131828Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 108:0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.131871Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 108:0, at tablet# 72057594046678944 2025-05-07T09:04:07.131923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 108 ready parts: 1/1 2025-05-07T09:04:07.132086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 108 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:07.133679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 108:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:108 msg type: 269090816 2025-05-07T09:04:07.133822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 108, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 108 at step: 5000007 FAKE_COORDINATOR: advance: minStep5000007 State->FrontStep: 5000006 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 108 at step: 5000007 2025-05-07T09:04:07.134174Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000007, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:07.134315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 108 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000007 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:07.134375Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 108:0, at tablet# 72057594046678944 2025-05-07T09:04:07.134693Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 108:0 128 -> 240 2025-05-07T09:04:07.134750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 108:0, at tablet# 72057594046678944 2025-05-07T09:04:07.134948Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-05-07T09:04:07.135028Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 108 2025-05-07T09:04:07.136992Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:07.137045Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 108, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T09:04:07.137224Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:07.137284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 108, path id: 2 2025-05-07T09:04:07.137704Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 108:0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.137754Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 108:0 ProgressState 2025-05-07T09:04:07.137860Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#108:0 progress is 1/1 2025-05-07T09:04:07.137899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 108 ready parts: 1/1 2025-05-07T09:04:07.137949Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#108:0 progress is 1/1 2025-05-07T09:04:07.138007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 108 ready parts: 1/1 2025-05-07T09:04:07.138043Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 108, ready parts: 1/1, is published: false 2025-05-07T09:04:07.138102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 108 ready parts: 1/1 2025-05-07T09:04:07.138149Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 108:0 2025-05-07T09:04:07.138195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 108:0 2025-05-07T09:04:07.138281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-05-07T09:04:07.138325Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 108, publications: 1, subscribers: 0 2025-05-07T09:04:07.138360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 108, [OwnerId: 72057594046678944, LocalPathId: 2], 8 2025-05-07T09:04:07.138871Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 8 PathOwnerId: 72057594046678944, cookie: 108 2025-05-07T09:04:07.139009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 8 PathOwnerId: 72057594046678944, cookie: 108 2025-05-07T09:04:07.139054Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 108 2025-05-07T09:04:07.139097Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 108, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 8 2025-05-07T09:04:07.139145Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-05-07T09:04:07.139248Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 108, subscribers: 0 2025-05-07T09:04:07.142634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 108 TestModificationResult got TxId: 108, wait until txId: 108 TestWaitNotification wait txId: 108 2025-05-07T09:04:07.142979Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 108: send EvNotifyTxCompletion 2025-05-07T09:04:07.143034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 108 2025-05-07T09:04:07.143551Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 108, at schemeshard: 72057594046678944 2025-05-07T09:04:07.143646Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 108: got EvNotifyTxCompletionResult 2025-05-07T09:04:07.143685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 108: satisfy waiter [1:595:2549] TestWaitNotification: OK eventTxId 108 2025-05-07T09:04:07.144325Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:07.144570Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 235us result status StatusSuccess 2025-05-07T09:04:07.144949Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 8 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 8 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 6 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 6 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 Mediators: 72075186233409548 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } StoragePools { Name: "pool-hdd-1" Kind: "hdd-1" } StoragePools { Name: "pool-hdd-2" Kind: "hdd-1" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::CreateAndWait >> TPersQueueTest::DirectReadNotCached [GOOD] >> TPersQueueTest::DirectReadBadCases >> TCdcStreamTests::RetentionPeriod [GOOD] >> TCdcStreamTests::TopicPartitions ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::RestartAtInFly [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:06.974968Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:06.975067Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:06.975109Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:06.975146Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:06.975218Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:06.975254Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:06.975312Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:06.975393Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:06.976215Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:06.976602Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:07.063428Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:07.063485Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:07.080661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:07.080880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:07.081101Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:07.088134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:07.088448Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:07.089114Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:07.089304Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:07.092109Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:07.093513Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:07.093572Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:07.093642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:07.093689Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:07.093792Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:07.094056Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.100548Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:07.230982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:07.231227Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.231440Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:07.231698Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:07.231764Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.233934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:07.234099Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:07.234294Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.234358Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:07.234398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:07.234435Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:07.236440Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.236506Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:07.236555Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:07.238331Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.238382Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.238425Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:07.238492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:07.242395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:07.244391Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:07.244595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:07.245584Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:07.245727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:07.245777Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:07.246104Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:07.246163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:07.246337Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:07.246408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:07.248451Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:07.248511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:07.248676Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:07.248721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 122: TTxUpgradeSchema.Complete 2025-05-07T09:04:07.422072Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:07.422121Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:07.422405Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:07.423291Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit for Paths, read records: 2, at schemeshard: 72057594046678944 2025-05-07T09:04:07.423383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:319: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 1], parent name: MyRoot, child name: USER_0, child id: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T09:04:07.423480Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1457: TTxInit for UserAttributes, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.423559Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1483: TTxInit for UserAttributesAlterData, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.423808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 0 2025-05-07T09:04:07.424080Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1785: TTxInit for Tables, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.424173Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:450: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-05-07T09:04:07.424370Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2011: TTxInit for Columns, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.424512Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2071: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.424624Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2129: TTxInit for Shards, read records: 3, at schemeshard: 72057594046678944 2025-05-07T09:04:07.424664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-05-07T09:04:07.424692Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T09:04:07.424732Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T09:04:07.424831Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2215: TTxInit for TablePartitions, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.424958Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2281: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.425167Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2431: TTxInit for ChannelsBinding, read records: 9, at schemeshard: 72057594046678944 2025-05-07T09:04:07.425535Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2810: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.425656Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2889: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.426090Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3387: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.426171Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3423: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.426386Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3637: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.426476Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3782: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.426572Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3799: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.426771Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3959: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.426924Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3975: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.427131Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4260: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.427356Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4558: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.427535Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4705: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.427587Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4732: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.427636Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4759: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.435510Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:07.435601Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:07.435728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:07.435780Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:07.435825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:07.435919Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 TestWaitNotification wait txId: 100 2025-05-07T09:04:07.488789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-05-07T09:04:07.488884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 Leader for TabletID 72057594046678944 is [1:459:2409] sender: [1:519:2058] recipient: [1:15:2062] 2025-05-07T09:04:07.489720Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-05-07T09:04:07.489860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-05-07T09:04:07.489929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:517:2454] TestWaitNotification: OK eventTxId 100 2025-05-07T09:04:07.490494Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:07.490743Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 236us result status StatusSuccess 2025-05-07T09:04:07.491202Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 Mediators: 72075186233409548 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:07.491780Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:07.491944Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 180us result status StatusSuccess 2025-05-07T09:04:07.492319Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::CreateWithNoEqualName [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateDropSolomon [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:06.321850Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:06.321944Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:06.322004Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:06.322063Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:06.322137Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:06.322173Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:06.322232Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:06.322316Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:06.323091Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:06.323447Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:06.409501Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:06.409566Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:06.427125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:06.427359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:06.427551Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:06.433667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:06.434027Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:06.434660Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:06.434869Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:06.437835Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:06.439319Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:06.439380Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:06.439458Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:06.439502Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:06.439629Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:06.439906Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:06.446365Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:06.561903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:06.562120Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:06.562314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:06.562483Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:06.562548Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:06.564670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:06.564868Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:06.565094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:06.565222Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:06.565261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:06.565292Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:06.567207Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:06.567263Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:06.567303Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:06.568731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:06.568768Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:06.568808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:06.568856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:06.571550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:06.572983Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:06.573195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:06.574104Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:06.574277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:06.574329Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:06.574623Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:06.574676Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:06.574856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:06.574961Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:06.576539Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:06.576595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:06.576784Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:06.576814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... de 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-05-07T09:04:07.575536Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 104:0 2025-05-07T09:04:07.575559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 104:0 2025-05-07T09:04:07.575671Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-05-07T09:04:07.575697Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 104, publications: 2, subscribers: 0 2025-05-07T09:04:07.575722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-05-07T09:04:07.575743Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 2], 18446744073709551615 2025-05-07T09:04:07.576246Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T09:04:07.576312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T09:04:07.576341Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 104 2025-05-07T09:04:07.576368Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-05-07T09:04:07.576397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T09:04:07.576852Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T09:04:07.576923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T09:04:07.576953Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-05-07T09:04:07.576970Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-05-07T09:04:07.576989Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T09:04:07.577029Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 0 2025-05-07T09:04:07.578537Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-05-07T09:04:07.578584Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-05-07T09:04:07.579611Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 2025-05-07T09:04:07.580672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:07.580884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 Forgetting tablet 72075186233409546 2025-05-07T09:04:07.581312Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 2025-05-07T09:04:07.582136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-05-07T09:04:07.582305Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 Forgetting tablet 72075186233409547 2025-05-07T09:04:07.582662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-05-07T09:04:07.582850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T09:04:07.582887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T09:04:07.582981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-05-07T09:04:07.583258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-05-07T09:04:07.583383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T09:04:07.583433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T09:04:07.583499Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:07.586736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-05-07T09:04:07.586798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-05-07T09:04:07.586908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-05-07T09:04:07.586946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-05-07T09:04:07.587149Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-05-07T09:04:07.587307Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 104 2025-05-07T09:04:07.587549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2025-05-07T09:04:07.587583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2025-05-07T09:04:07.588023Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-05-07T09:04:07.588159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-05-07T09:04:07.588205Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:2091:3697] TestWaitNotification: OK eventTxId 104 2025-05-07T09:04:07.594533Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/Solomon" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:07.594768Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/Solomon" took 265us result status StatusPathDoesNotExist 2025-05-07T09:04:07.594902Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0/Solomon\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0/Solomon" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-05-07T09:04:07.595461Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:07.595622Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 142us result status StatusPathDoesNotExist 2025-05-07T09:04:07.595757Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::DeclareAndForbidTableInside [GOOD] >> DstCreator::CannotFindColumn [GOOD] >> TSchemeShardSubDomainTest::SimultaneousCreateTenantTable >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTablets >> DstCreator::ReplicationConsistencyLevelMismatch [GOOD] >> TSchemeShardSubDomainTest::SimultaneousDefine [GOOD] >> DstCreator::KeyColumnNameMismatch [GOOD] >> DstCreator::WithAsyncIndex [GOOD] >> DstCreator::SamePartitionCount [GOOD] >> DstCreator::EmptyReplicationConfig [GOOD] >> DstCreator::ColumnTypeMismatch [GOOD] >> TSchemeShardSubDomainTest::CreateAndWait [GOOD] >> DstCreator::KeyColumnsSizeMismatch [GOOD] >> TStoragePoolsQuotasTest::DisableWritesToDatabase-IsExternalSubdomain-false >> TSchemeShardSubDomainTest::ColumnSchemeLimitsRejects [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateWithNoEqualName [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:07.446770Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:07.446834Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:07.446869Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:07.446907Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:07.446972Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:07.446996Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:07.447044Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:07.447113Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:07.447799Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:07.448108Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:07.523169Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:07.523231Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:07.541441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:07.541673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:07.541894Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:07.548337Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:07.548664Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:07.549389Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:07.549614Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:07.552933Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:07.554499Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:07.554570Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:07.554665Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:07.554738Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:07.554847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:07.555145Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.562540Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:07.703273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:07.703540Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.703807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:07.704082Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:07.704161Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.706845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:07.707014Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:07.707246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.707325Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:07.707381Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:07.707419Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:07.709609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.709691Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:07.709741Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:07.712011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.712062Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.712118Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:07.712171Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:07.714760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:07.716619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:07.716803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:07.717696Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:07.717813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:07.717863Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:07.718191Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:07.718252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:07.718406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:07.718482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:07.720380Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:07.720433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:07.720584Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:07.720620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... ready parts: 1/1 2025-05-07T09:04:08.125933Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T09:04:08.125958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T09:04:08.126002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-05-07T09:04:08.126069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:628:2561] message: TxId: 102 2025-05-07T09:04:08.126113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T09:04:08.126145Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-05-07T09:04:08.126182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 102:0 2025-05-07T09:04:08.126273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-05-07T09:04:08.128222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T09:04:08.128270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:629:2562] TestWaitNotification: OK eventTxId 102 TestModificationResults wait txId: 108 2025-05-07T09:04:08.131325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: "USER_3" } } TxId: 108 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:08.131581Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /MyRoot/USER_3, operationId: 108:0, at schemeshard: 72057594046678944 2025-05-07T09:04:08.131713Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 108:1, propose status:StatusAlreadyExists, reason: Check failed: path: '/MyRoot/USER_3', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 5], type: EPathTypeSubDomain, state: EPathStateNoChanges), at schemeshard: 72057594046678944 2025-05-07T09:04:08.133992Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 108, response: Status: StatusAlreadyExists Reason: "Check failed: path: \'/MyRoot/USER_3\', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 5], type: EPathTypeSubDomain, state: EPathStateNoChanges)" TxId: 108 SchemeshardId: 72057594046678944 PathId: 5 PathCreateTxId: 106, at schemeshard: 72057594046678944 2025-05-07T09:04:08.134137Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 108, database: /MyRoot, subject: , status: StatusAlreadyExists, reason: Check failed: path: '/MyRoot/USER_3', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 5], type: EPathTypeSubDomain, state: EPathStateNoChanges), operation: CREATE DIRECTORY, path: /MyRoot/USER_3 TestModificationResult got TxId: 108, wait until txId: 108 2025-05-07T09:04:08.134745Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:08.134956Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 189us result status StatusSuccess 2025-05-07T09:04:08.135331Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:08.135899Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:08.136116Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_1" took 186us result status StatusSuccess 2025-05-07T09:04:08.136545Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_1" PathDescription { Self { Name: "USER_1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 102 CreateStep: 5000005 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "USER_1" Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "RowId" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:08.137240Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:08.137403Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_2" took 158us result status StatusSuccess 2025-05-07T09:04:08.137651Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_2" PathDescription { Self { Name: "USER_2" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 104 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:08.138252Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_3" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:08.138436Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_3" took 177us result status StatusSuccess 2025-05-07T09:04:08.138748Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_3" PathDescription { Self { Name: "USER_3" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 106 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409549 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409550 } DomainKey { SchemeShard: 72057594046678944 PathId: 5 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 5 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::DeclareAndForbidTableInside [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:07.968610Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:07.968698Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:07.968739Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:07.968792Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:07.968851Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:07.968884Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:07.968952Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:07.969037Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:07.969781Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:07.970162Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:08.049288Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:08.049351Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:08.062059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:08.062210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:08.062347Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:08.067793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:08.068089Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:08.068748Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:08.068926Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:08.071774Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:08.073108Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:08.073172Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:08.073247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:08.073295Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:08.073392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:08.073667Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:08.080427Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:08.209266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:08.209510Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:08.209734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:08.209998Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:08.210074Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:08.212284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:08.212460Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:08.212669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:08.212740Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:08.212780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:08.212818Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:08.214720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:08.214785Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:08.214835Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:08.216814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:08.216876Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:08.216936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:08.217030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:08.221076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:08.223284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:08.223511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:08.224621Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:08.224776Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:08.224833Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:08.225204Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:08.225269Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:08.225472Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:08.225570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:08.228099Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:08.228162Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:08.228355Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:08.228399Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... : 72057594046678944, cookie: 101 2025-05-07T09:04:08.290832Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T09:04:08.290866Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 101 2025-05-07T09:04:08.290895Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-05-07T09:04:08.290947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-05-07T09:04:08.291030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 101, ready parts: 0/1, is published: true 2025-05-07T09:04:08.293034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 101:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:101 msg type: 269090816 2025-05-07T09:04:08.293180Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 101, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 101 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000003 2025-05-07T09:04:08.293853Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000003, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:08.294019Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 101 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:08.294076Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_mkdir.cpp:33: MkDir::TPropose operationId# 101:0 HandleReply TEvPrivate::TEvOperationPlan, step: 5000003, at schemeshard: 72057594046678944 2025-05-07T09:04:08.294215Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 101:0 128 -> 240 2025-05-07T09:04:08.294396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T09:04:08.294475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-05-07T09:04:08.295289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T09:04:08.296814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 FAKE_COORDINATOR: Erasing txId 101 2025-05-07T09:04:08.298071Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:08.298123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T09:04:08.298287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-05-07T09:04:08.298379Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:08.298415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 101, path id: 2 2025-05-07T09:04:08.298452Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 101, path id: 3 2025-05-07T09:04:08.298721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T09:04:08.298778Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 101:0 ProgressState 2025-05-07T09:04:08.298889Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T09:04:08.298948Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T09:04:08.298992Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T09:04:08.299028Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T09:04:08.299067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-05-07T09:04:08.299131Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T09:04:08.299171Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:0 2025-05-07T09:04:08.299211Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 101:0 2025-05-07T09:04:08.299288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-05-07T09:04:08.299329Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-05-07T09:04:08.299362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 5 2025-05-07T09:04:08.299405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 3], 3 2025-05-07T09:04:08.300073Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T09:04:08.300176Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T09:04:08.300223Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-05-07T09:04:08.300276Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-05-07T09:04:08.300319Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T09:04:08.301085Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T09:04:08.301160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T09:04:08.301188Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-05-07T09:04:08.301213Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-05-07T09:04:08.301245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-05-07T09:04:08.301329Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-05-07T09:04:08.304669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T09:04:08.304784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestModificationResults wait txId: 102 2025-05-07T09:04:08.308452Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_0/dir" OperationType: ESchemeOpCreateTable CreateTable { Name: "table_0" Columns { Name: "RowId" Type: "Uint64" } Columns { Name: "Value" Type: "Utf8" } KeyColumnNames: "RowId" } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:08.308853Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /MyRoot/USER_0/dir/table_0, opId: 102:0, at schemeshard: 72057594046678944 2025-05-07T09:04:08.308982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:433: TCreateTable Propose, path: /MyRoot/USER_0/dir/table_0, opId: 102:0, schema: Name: "table_0" Columns { Name: "RowId" Type: "Uint64" } Columns { Name: "Value" Type: "Utf8" } KeyColumnNames: "RowId", at schemeshard: 72057594046678944 2025-05-07T09:04:08.309125Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 102:1, propose status:StatusNameConflict, reason: Inclusive subDomain do not support shared transactions, at schemeshard: 72057594046678944 2025-05-07T09:04:08.311498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 102, response: Status: StatusNameConflict Reason: "Inclusive subDomain do not support shared transactions" TxId: 102 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:08.311707Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot/USER_0, subject: , status: StatusNameConflict, reason: Inclusive subDomain do not support shared transactions, operation: CREATE TABLE, path: /MyRoot/USER_0/dir/table_0 TestModificationResult got TxId: 102, wait until txId: 102 >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTablets [GOOD] >> DataShardVolatile::DistributedWriteThenImmediateUpsert [GOOD] >> DataShardVolatile::DistributedWriteThenSplit >> TSchemeShardSubDomainTest::DeclareDefineAndDelete ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousDefine [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:08.214137Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:08.214242Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:08.214283Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:08.214321Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:08.214388Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:08.214417Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:08.214466Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:08.214528Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:08.215271Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:08.215652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:08.295499Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:08.295555Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:08.310155Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:08.310335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:08.310470Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:08.314706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:08.314975Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:08.315443Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:08.315587Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:08.317964Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:08.319489Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:08.319557Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:08.319629Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:08.319681Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:08.319777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:08.320021Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:08.326740Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:08.470462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:08.470727Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:08.470999Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:08.471268Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:08.471343Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:08.473918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:08.474114Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:08.474348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:08.474409Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:08.474444Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:08.474482Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:08.477810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:08.477890Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:08.477933Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:08.480051Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:08.480119Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:08.480170Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:08.480232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:08.483976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:08.486133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:08.486334Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:08.487457Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:08.487610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:08.487657Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:08.487962Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:08.488016Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:08.488194Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:08.488277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:08.490718Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:08.490779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:08.490993Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:08.491032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... __operation.cpp:619: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: Status: SUCCESS OnTabletId: 72075186233409548 2025-05-07T09:04:08.630357Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:84: NSubDomainState::TConfigureParts operationId# 101:0 HandleReply TEvConfigureStatus operationId:101:0 at schemeshard:72057594046678944 2025-05-07T09:04:08.630424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:120: NSubDomainState::TConfigureParts operationId# 101:0 Got OK TEvConfigureStatus from tablet# 72075186233409548 shardIdx# 72057594046678944:3 at schemeshard# 72057594046678944 2025-05-07T09:04:08.630469Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 101:0 3 -> 128 2025-05-07T09:04:08.630719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T09:04:08.633329Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T09:04:08.633814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T09:04:08.633919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T09:04:08.633952Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T09:04:08.634043Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 101:0, at tablet# 72057594046678944 2025-05-07T09:04:08.634096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 101 ready parts: 1/1 2025-05-07T09:04:08.634196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 101 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:08.635657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 101:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:101 msg type: 269090816 2025-05-07T09:04:08.635760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 101, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 101 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000003 2025-05-07T09:04:08.636098Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000003, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:08.636203Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 101 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:08.636244Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 101:0, at tablet# 72057594046678944 2025-05-07T09:04:08.636481Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 101:0 128 -> 240 2025-05-07T09:04:08.636522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 101:0, at tablet# 72057594046678944 2025-05-07T09:04:08.636686Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-05-07T09:04:08.636770Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 101 2025-05-07T09:04:08.638424Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:08.638459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T09:04:08.638666Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:08.638715Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 101, path id: 2 2025-05-07T09:04:08.638796Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T09:04:08.638840Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 101:0 ProgressState 2025-05-07T09:04:08.638919Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T09:04:08.638975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T09:04:08.639008Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T09:04:08.639040Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T09:04:08.639072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-05-07T09:04:08.639111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T09:04:08.639146Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:0 2025-05-07T09:04:08.639182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 101:0 2025-05-07T09:04:08.639356Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-05-07T09:04:08.639395Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 101, publications: 1, subscribers: 1 2025-05-07T09:04:08.639430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 4 2025-05-07T09:04:08.640171Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T09:04:08.640289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T09:04:08.640338Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-05-07T09:04:08.640369Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 4 2025-05-07T09:04:08.640396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-05-07T09:04:08.640465Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 1 2025-05-07T09:04:08.640502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [1:305:2296] 2025-05-07T09:04:08.643737Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T09:04:08.643849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-05-07T09:04:08.643879Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:312:2303] TestWaitNotification: OK eventTxId 100 TestWaitNotification: OK eventTxId 101 2025-05-07T09:04:08.644415Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:08.644636Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 237us result status StatusSuccess 2025-05-07T09:04:08.644963Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 2 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 Mediators: 72075186233409548 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> BackupRestore::BackupUuid [GOOD] >> BackupRestore::RestoreViewQueryText >> TSchemeShardSubDomainTest::CreateForceDropSolomon ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateAndWait [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:08.434490Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:08.434568Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:08.434596Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:08.434623Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:08.434664Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:08.434684Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:08.434723Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:08.434784Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:08.435368Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:08.435615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:08.499667Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:08.499721Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:08.512950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:08.513124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:08.513298Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:08.518489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:08.518764Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:08.519388Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:08.519589Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:08.522134Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:08.523478Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:08.523538Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:08.523614Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:08.523661Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:08.523752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:08.523974Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:08.530202Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:08.645831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:08.646053Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:08.646265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:08.646455Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:08.646523Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:08.650802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:08.650955Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:08.651121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:08.651166Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:08.651201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:08.651239Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:08.654733Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:08.654785Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:08.654831Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:08.657049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:08.657091Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:08.657124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:08.657177Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:08.659840Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:08.661142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:08.661326Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:08.662077Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:08.662172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:08.662215Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:08.662464Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:08.662514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:08.662644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:08.662702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:08.664202Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:08.664243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:08.664364Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:08.664403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-05-07T09:04:08.718027Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:08.718078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 101, path id: 2 2025-05-07T09:04:08.718124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 101, path id: 3 2025-05-07T09:04:08.718322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T09:04:08.718358Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 101:0 ProgressState 2025-05-07T09:04:08.718409Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T09:04:08.718426Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T09:04:08.718447Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T09:04:08.718464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T09:04:08.718494Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-05-07T09:04:08.718518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T09:04:08.718536Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:0 2025-05-07T09:04:08.718578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 101:0 2025-05-07T09:04:08.718618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-05-07T09:04:08.718640Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-05-07T09:04:08.718668Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 5 2025-05-07T09:04:08.718683Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 3], 3 2025-05-07T09:04:08.719125Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T09:04:08.719188Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T09:04:08.719210Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-05-07T09:04:08.719235Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-05-07T09:04:08.719256Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T09:04:08.719744Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T09:04:08.719792Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T09:04:08.719820Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-05-07T09:04:08.719844Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-05-07T09:04:08.719864Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-05-07T09:04:08.719902Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-05-07T09:04:08.721896Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T09:04:08.722646Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 100, wait until txId: 101 TestModificationResults wait txId: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 100 2025-05-07T09:04:08.722825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-05-07T09:04:08.722865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 TestWaitNotification wait txId: 101 2025-05-07T09:04:08.722946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-05-07T09:04:08.722959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-05-07T09:04:08.723279Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-05-07T09:04:08.723362Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-05-07T09:04:08.723391Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-05-07T09:04:08.723417Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:330:2321] 2025-05-07T09:04:08.723455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-05-07T09:04:08.723479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:330:2321] TestWaitNotification: OK eventTxId 100 TestWaitNotification: OK eventTxId 101 2025-05-07T09:04:08.723856Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/dir/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:08.723997Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/dir/USER_0" took 155us result status StatusSuccess 2025-05-07T09:04:08.724307Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/dir/USER_0" PathDescription { Self { Name: "USER_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } StoragePools { Name: "/dc-1/users/tenant-1:hdd" Kind: "hdd" } StoragePools { Name: "/dc-1/users/tenant-1:hdd-1" Kind: "hdd-1" } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 3 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:08.724663Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/dir" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:08.724776Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/dir" took 109us result status StatusSuccess 2025-05-07T09:04:08.724985Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/dir" PathDescription { Self { Name: "dir" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 } ChildrenExist: true } Children { Name: "USER_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::SimultaneousCreateDelete >> TSchemeShardSubDomainTest::SimultaneousCreateTenantTable [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::ReplicationConsistencyLevelMismatch [GOOD] Test command err: 2025-05-07T09:04:01.684390Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626993288760200:2067];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:04:01.684459Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002964/r3tmp/tmpC59pfW/pdisk_1.dat 2025-05-07T09:04:02.384904Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:04:02.385029Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:04:02.387316Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:04:02.434759Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TClient is connected to server localhost:11486 TServer::EnableGrpc on GrpcPort 11067, node 1 2025-05-07T09:04:02.774722Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:04:02.774787Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:04:02.774800Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:04:02.778018Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11486 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:04:03.327670Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:04:03.350244Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:04:03.511752Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1746608643388 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1746608643591 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1746608643388 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1746608643591 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) 2025-05-07T09:04:03.571425Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-05-07T09:04:03.571619Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-05-07T09:04:03.571640Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-05-07T09:04:03.572482Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-05-07T09:04:04.634419Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Src, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1746608643472, tx_id: 281474976710658 } } } 2025-05-07T09:04:04.634807Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-05-07T09:04:04.638500Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976710660 Reason# Check failed: path: '/Root/Dst', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 3], type: EPathTypeTable, state: EPathStateNoChanges)} 2025-05-07T09:04:04.640491Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dst" PathDescription { Self { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1746608643591 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Dst" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_NONE ConsistencyLevel: CONSISTENCY_LEVEL_UNKNOWN } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 Row ... classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:04:05.758039Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:04:05.758046Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:04:05.758163Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18442 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:04:06.053047Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:04:06.062773Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:04:06.132570Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1746608646104 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1746608646195 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1746608646104 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1746608646195 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) 2025-05-07T09:04:06.165733Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-05-07T09:04:06.165911Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-05-07T09:04:06.165933Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-05-07T09:04:06.166417Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-05-07T09:04:08.235220Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Src, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1746608646167, tx_id: 281474976715658 } } } 2025-05-07T09:04:08.235477Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-05-07T09:04:08.236789Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976715660 Reason# Check failed: path: '/Root/Dst', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 3], type: EPathTypeTable, state: EPathStateNoChanges)} 2025-05-07T09:04:08.237760Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dst" PathDescription { Self { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1746608646195 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Dst" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_GLOBAL } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 3 PathOwnerId: 72057594046644480 2025-05-07T09:04:08.237949Z node 2 :REPLICATION_CONTROLLER ERROR: dst_creator.cpp:594: [DstCreator][rid 1][tid 1] Error: status# StatusSchemeError, reason# Replication consistency level mismatch: expected: CONSISTENCY_LEVEL_ROW, got: 1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::ColumnSchemeLimitsRejects [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:06.786968Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:06.787048Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:06.787111Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:06.787150Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:06.787192Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:06.787218Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:06.787267Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:06.787336Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:06.787986Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:06.788278Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:06.854204Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:06.854257Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:06.872621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:06.872814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:06.872991Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:06.879171Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:06.879508Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:06.880244Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:06.880440Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:06.885543Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:06.887295Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:06.887374Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:06.887458Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:06.887509Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:06.887635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:06.887918Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:06.896314Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:07.029149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:07.029341Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.029511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:07.029728Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:07.029793Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.031880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:07.032013Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:07.032185Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.032244Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:07.032281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:07.032309Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:07.034025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.034106Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:07.034152Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:07.035911Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.035958Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.035999Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:07.036057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:07.039406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:07.041500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:07.041742Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:07.042868Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:07.043032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:07.043104Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:07.043442Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:07.043509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:07.043702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:07.043803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:07.046219Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:07.046307Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:07.046500Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:07.046544Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... : schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 108 Coordinator: 72057594046316545 AckTo { RawX1: 131 RawX2: 8589936747 } } Step: 5000004 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:08.853121Z node 2 :FLAT_TX_SCHEMESHARD INFO: alter_store.cpp:199: TAlterOlapStore TPropose operationId# 108:0 HandleReply TEvOperationPlan at tablet: 72057594046678944, stepId: 5000004 2025-05-07T09:04:08.853301Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 108:0 128 -> 129 2025-05-07T09:04:08.853489Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T09:04:08.853553Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 3 2025-05-07T09:04:08.854221Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186233409549;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=108;fline=tx_controller.cpp:214;event=finished_tx;tx_id=108; FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000004 2025-05-07T09:04:08.855898Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:08.855956Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 108, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:08.856130Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 108, path id: [OwnerId: 72057594046678944, LocalPathId: 5] 2025-05-07T09:04:08.856289Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:08.856336Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:335:2311], at schemeshard: 72057594046678944, txId: 108, path id: 1 2025-05-07T09:04:08.856398Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:335:2311], at schemeshard: 72057594046678944, txId: 108, path id: 5 2025-05-07T09:04:08.856473Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 108:0, at schemeshard: 72057594046678944 2025-05-07T09:04:08.856531Z node 2 :FLAT_TX_SCHEMESHARD INFO: alter_store.cpp:305: TAlterOlapStore TProposedWaitParts operationId# 108:0 ProgressState at tablet: 72057594046678944 2025-05-07T09:04:08.856598Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: alter_store.cpp:332: TAlterOlapStore TProposedWaitParts operationId# 108:0 ProgressState wait for NotifyTxCompletionResult tabletId: 72075186233409549 2025-05-07T09:04:08.857685Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 1 Version: 8 PathOwnerId: 72057594046678944, cookie: 108 2025-05-07T09:04:08.857782Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 1 Version: 8 PathOwnerId: 72057594046678944, cookie: 108 2025-05-07T09:04:08.857821Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 108 2025-05-07T09:04:08.857862Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 108, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 8 2025-05-07T09:04:08.857931Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-05-07T09:04:08.858867Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 5 Version: 5 PathOwnerId: 72057594046678944, cookie: 108 2025-05-07T09:04:08.858962Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 5 Version: 5 PathOwnerId: 72057594046678944, cookie: 108 2025-05-07T09:04:08.858994Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 108 2025-05-07T09:04:08.859022Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 108, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 5 2025-05-07T09:04:08.859051Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 4 2025-05-07T09:04:08.859117Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 108, ready parts: 0/1, is published: true 2025-05-07T09:04:08.860916Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 108:0 from tablet: 72057594046678944 to tablet: 72075186233409549 cookie: 72057594046678944:4 msg type: 275382275 2025-05-07T09:04:08.862485Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 108 2025-05-07T09:04:08.863531Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 108 2025-05-07T09:04:08.875862Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6106: Handle TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, message: Origin: 72075186233409549 TxId: 108 2025-05-07T09:04:08.875930Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 108, tablet: 72075186233409549, partId: 0 2025-05-07T09:04:08.876065Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 108:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409549 TxId: 108 2025-05-07T09:04:08.876118Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 108:0 129 -> 240 FAKE_COORDINATOR: Erasing txId 108 2025-05-07T09:04:08.878165Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 108:0, at schemeshard: 72057594046678944 2025-05-07T09:04:08.878300Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 108:0, at schemeshard: 72057594046678944 2025-05-07T09:04:08.878331Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 108:0 ProgressState 2025-05-07T09:04:08.878434Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#108:0 progress is 1/1 2025-05-07T09:04:08.878465Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 108 ready parts: 1/1 2025-05-07T09:04:08.878501Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#108:0 progress is 1/1 2025-05-07T09:04:08.878533Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 108 ready parts: 1/1 2025-05-07T09:04:08.878567Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 108, ready parts: 1/1, is published: true 2025-05-07T09:04:08.878624Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:490:2439] message: TxId: 108 2025-05-07T09:04:08.878664Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 108 ready parts: 1/1 2025-05-07T09:04:08.878707Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 108:0 2025-05-07T09:04:08.878731Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 108:0 2025-05-07T09:04:08.878839Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 3 2025-05-07T09:04:08.880491Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 108: got EvNotifyTxCompletionResult 2025-05-07T09:04:08.880536Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 108: satisfy waiter [2:879:2792] TestWaitNotification: OK eventTxId 108 TestModificationResults wait txId: 109 2025-05-07T09:04:08.883011Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterColumnStore AlterColumnStore { Name: "OlapStore1" AlterSchemaPresets { Name: "default" AlterSchema { AddColumns { Name: "comment2" Type: "Utf8" } } } } } TxId: 109 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:08.883191Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: alter_store.cpp:465: TAlterOlapStore Propose, path: /MyRoot/OlapStore1, opId: 109:0, at schemeshard: 72057594046678944 2025-05-07T09:04:08.883478Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 109:1, propose status:StatusSchemeError, reason: Too many columns. new: 4. Limit: 3, at schemeshard: 72057594046678944 2025-05-07T09:04:08.885413Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 109, response: Status: StatusSchemeError Reason: "Too many columns. new: 4. Limit: 3" TxId: 109 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:08.885615Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 109, database: /MyRoot, subject: , status: StatusSchemeError, reason: Too many columns. new: 4. Limit: 3, operation: ALTER COLUMN STORE, path: /MyRoot/OlapStore1 TestModificationResult got TxId: 109, wait until txId: 109 TestWaitNotification wait txId: 109 2025-05-07T09:04:08.885935Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 109: send EvNotifyTxCompletion 2025-05-07T09:04:08.885985Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 109 2025-05-07T09:04:08.886393Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 109, at schemeshard: 72057594046678944 2025-05-07T09:04:08.886471Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 109: got EvNotifyTxCompletionResult 2025-05-07T09:04:08.886506Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 109: satisfy waiter [2:914:2827] TestWaitNotification: OK eventTxId 109 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::CannotFindColumn [GOOD] Test command err: 2025-05-07T09:04:01.706355Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626994736970012:2069];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:04:01.706473Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002942/r3tmp/tmplgh6Wd/pdisk_1.dat 2025-05-07T09:04:02.387777Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:02.389658Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:04:02.389753Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:04:02.425891Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:4921 TServer::EnableGrpc on GrpcPort 8104, node 1 2025-05-07T09:04:02.775165Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:04:02.775218Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:04:02.775234Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:04:02.775358Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4921 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:04:03.329922Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:04:03.348153Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1746608643479 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1746608643388 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1746608643479 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: ".sys" PathId: 1844... (TRUNCATED) 2025-05-07T09:04:03.528315Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-05-07T09:04:03.528570Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-05-07T09:04:03.528588Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-05-07T09:04:03.529358Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-05-07T09:04:04.511436Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1746608643479, tx_id: 281474976710658 } } } 2025-05-07T09:04:04.513779Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-05-07T09:04:04.515615Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 2025-05-07T09:04:04.516569Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710659} 2025-05-07T09:04:04.516593Z node 1 :REPLICATION_CONTROLLER DEBUG: dst_creator.cpp:306: [DstCreator][rid 1][tid 1] Subscribe tx: txId# 281474976710659 2025-05-07T09:04:04.544409Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:311: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710659 2025-05-07T09:04:04.544437Z node 1 :REPLICATION_CONTROLLER INFO: dst_creator.cpp:585: [DstCreator][rid 1][tid 1] Success: dstPathId# [OwnerId: 72057594046644480, LocalPathId: 3] TClient::Ls request: /Root/Replicated TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Replicated" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1746608644585 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Replicated" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "ke... (TRUNCATED) 2025-05-07T09:04:05.179830Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501627010625681931:2065];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:04:05.179876Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002942/r3tmp/tmpnCoxem/pdisk_1.dat 2025-05-07T09:04:05.287272Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:05.331126Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:04:05.331249Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:04:05.334498Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:16446 TServer::EnableGrpc on GrpcPort 21308, node 2 2025-05-07T09:04:05.518872Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:04:05.518896Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:04:05.518903Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:04:05.519041Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16446 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:04:05.786054Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:04:05.792059Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T09:04:05.794838Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:04:05.898053Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1746608645838 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1746608645964 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1746608645838 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1746608645964 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) 2025-05-07T09:04:05.933180Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-05-07T09:04:05.933364Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-05-07T09:04:05.933379Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-05-07T09:04:05.933851Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-05-07T09:04:08.093989Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Src, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1746608645901, tx_id: 281474976715658 } } } 2025-05-07T09:04:08.094227Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-05-07T09:04:08.095657Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976715660 Reason# Check failed: path: '/Root/Dst', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 3], type: EPathTypeTable, state: EPathStateNoChanges)} 2025-05-07T09:04:08.096690Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dst" PathDescription { Self { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1746608645964 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Dst" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value2" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 3 PathOwnerId: 72057594046644480 2025-05-07T09:04:08.096947Z node 2 :REPLICATION_CONTROLLER ERROR: dst_creator.cpp:594: [DstCreator][rid 1][tid 1] Error: status# StatusSchemeError, reason# Cannot find column: name: value ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTablets [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:08.966188Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:08.966265Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:08.966307Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:08.966342Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:08.966396Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:08.966425Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:08.966491Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:08.966575Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:08.967298Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:08.967663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:09.034338Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:09.034412Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:09.046751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:09.046897Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:09.047060Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:09.051734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:09.051979Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:09.052464Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:09.052638Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:09.054890Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:09.056124Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:09.056189Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:09.056256Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:09.056300Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:09.056408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:09.056652Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:09.062340Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:09.160888Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:09.161107Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:09.161306Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:09.161566Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:09.161617Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:09.164065Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:09.164200Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:09.164420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:09.164490Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:09.164531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:09.164564Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:09.166832Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:09.166905Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:09.166967Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:09.169168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:09.169245Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:09.169307Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:09.169378Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:09.173216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:09.175586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:09.175799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:09.176914Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:09.177025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:09.177065Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:09.177358Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:09.177423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:09.177613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:09.177688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:09.179799Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:09.179862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:09.180022Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:09.180053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... _SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T09:04:09.217887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T09:04:09.219383Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:09.219414Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:09.219533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T09:04:09.219591Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:09.219612Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 100, path id: 1 2025-05-07T09:04:09.219655Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 100, path id: 2 FAKE_COORDINATOR: Erasing txId 100 2025-05-07T09:04:09.219907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 100:0, at schemeshard: 72057594046678944 2025-05-07T09:04:09.219939Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 100:0 ProgressState 2025-05-07T09:04:09.220008Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#100:0 progress is 1/1 2025-05-07T09:04:09.220031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-05-07T09:04:09.220064Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#100:0 progress is 1/1 2025-05-07T09:04:09.220087Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-05-07T09:04:09.220115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 100, ready parts: 1/1, is published: false 2025-05-07T09:04:09.220156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-05-07T09:04:09.220185Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 100:0 2025-05-07T09:04:09.220213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 100:0 2025-05-07T09:04:09.220263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T09:04:09.220308Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 100, publications: 2, subscribers: 0 2025-05-07T09:04:09.220332Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-05-07T09:04:09.220349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-05-07T09:04:09.220790Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-05-07T09:04:09.220867Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-05-07T09:04:09.220894Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 100 2025-05-07T09:04:09.220941Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-05-07T09:04:09.220977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T09:04:09.221294Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-05-07T09:04:09.221338Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-05-07T09:04:09.221359Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 100 2025-05-07T09:04:09.221419Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-05-07T09:04:09.221438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T09:04:09.221484Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 100, subscribers: 0 2025-05-07T09:04:09.224309Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 2025-05-07T09:04:09.224395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 TestModificationResult got TxId: 100, wait until txId: 100 TestWaitNotification wait txId: 100 2025-05-07T09:04:09.224579Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-05-07T09:04:09.224618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 2025-05-07T09:04:09.225066Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-05-07T09:04:09.225195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-05-07T09:04:09.225232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:306:2297] TestWaitNotification: OK eventTxId 100 2025-05-07T09:04:09.225706Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:09.226020Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 235us result status StatusSuccess 2025-05-07T09:04:09.226423Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:09.226944Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:09.227116Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 187us result status StatusSuccess 2025-05-07T09:04:09.227473Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::KeyColumnNameMismatch [GOOD] Test command err: 2025-05-07T09:04:01.684503Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626993928835838:2068];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:04:01.684553Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002959/r3tmp/tmpgQUU9t/pdisk_1.dat 2025-05-07T09:04:02.380380Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:02.409918Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:04:02.410141Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:04:02.427644Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:3224 TServer::EnableGrpc on GrpcPort 17251, node 1 2025-05-07T09:04:02.773906Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:04:02.773932Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:04:02.773943Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:04:02.774157Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3224 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:04:03.349288Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:04:03.375984Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1746608643486 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1746608643416 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1746608643486 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: ".sys" PathId: 1844... (TRUNCATED) 2025-05-07T09:04:03.528671Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-05-07T09:04:03.528805Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-05-07T09:04:03.528820Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-05-07T09:04:03.529564Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-05-07T09:04:04.738403Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1746608643486, tx_id: 281474976710658 } } } 2025-05-07T09:04:04.738839Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-05-07T09:04:04.740476Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 2025-05-07T09:04:04.741699Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710659} 2025-05-07T09:04:04.741710Z node 1 :REPLICATION_CONTROLLER DEBUG: dst_creator.cpp:306: [DstCreator][rid 1][tid 1] Subscribe tx: txId# 281474976710659 2025-05-07T09:04:04.771633Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:311: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710659 2025-05-07T09:04:04.771667Z node 1 :REPLICATION_CONTROLLER INFO: dst_creator.cpp:585: [DstCreator][rid 1][tid 1] Success: dstPathId# [OwnerId: 72057594046644480, LocalPathId: 3] TClient::Ls request: /Root/Replicated TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Replicated" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1746608644809 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Replicated" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "ke... (TRUNCATED) 2025-05-07T09:04:05.325121Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501627012061700930:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:04:05.329003Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002959/r3tmp/tmpn5ZTLk/pdisk_1.dat 2025-05-07T09:04:05.508655Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:05.512960Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:04:05.513049Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:04:05.514882Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:15749 TServer::EnableGrpc on GrpcPort 17897, node 2 2025-05-07T09:04:05.798412Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:04:05.798435Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:04:05.798441Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:04:05.798556Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15749 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:04:06.091513Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:04:06.100680Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:04:06.168471Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1746608646139 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1746608646237 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1746608646139 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1746608646237 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) 2025-05-07T09:04:06.200996Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-05-07T09:04:06.201141Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-05-07T09:04:06.201155Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-05-07T09:04:06.202508Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-05-07T09:04:08.436659Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Src, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1746608646209, tx_id: 281474976715658 } } } 2025-05-07T09:04:08.436928Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-05-07T09:04:08.438281Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976715660 Reason# Check failed: path: '/Root/Dst', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 3], type: EPathTypeTable, state: EPathStateNoChanges)} 2025-05-07T09:04:08.439312Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dst" PathDescription { Self { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1746608646237 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Dst" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value" KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 3 PathOwnerId: 72057594046644480 2025-05-07T09:04:08.439521Z node 2 :REPLICATION_CONTROLLER ERROR: dst_creator.cpp:594: [DstCreator][rid 1][tid 1] Error: status# StatusSchemeError, reason# Key column name mismatch: position: 0, expected: key, got: value >> DataShardVolatile::DistributedWrite [GOOD] >> DataShardVolatile::DistributedWriteBrokenLock >> TSchemeShardSubDomainTest::CreateWithoutPlanResolution ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::WithAsyncIndex [GOOD] Test command err: 2025-05-07T09:04:01.684714Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626995332048068:2066];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:04:01.684831Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002950/r3tmp/tmpfSdIui/pdisk_1.dat 2025-05-07T09:04:02.384821Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:02.386865Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:04:02.386938Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:04:02.418943Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:11376 TServer::EnableGrpc on GrpcPort 24619, node 1 2025-05-07T09:04:02.774025Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:04:02.774052Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:04:02.774059Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:04:02.774228Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11376 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:04:03.327608Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:04:03.347300Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1746608643465 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1746608643388 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1746608643465 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: ".sys" PathId: 1844... (TRUNCATED) 2025-05-07T09:04:03.496361Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-05-07T09:04:03.496495Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-05-07T09:04:03.496509Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-05-07T09:04:03.497124Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-05-07T09:04:04.718436Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1746608643465, tx_id: 281474976710658 } } } 2025-05-07T09:04:04.718957Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-05-07T09:04:04.720790Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:1, at schemeshard: 72057594046644480 2025-05-07T09:04:04.722643Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710659} 2025-05-07T09:04:04.722672Z node 1 :REPLICATION_CONTROLLER DEBUG: dst_creator.cpp:306: [DstCreator][rid 1][tid 1] Subscribe tx: txId# 281474976710659 2025-05-07T09:04:04.789346Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:311: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710659 2025-05-07T09:04:04.789378Z node 1 :REPLICATION_CONTROLLER INFO: dst_creator.cpp:585: [DstCreator][rid 1][tid 1] Success: dstPathId# [OwnerId: 72057594046644480, LocalPathId: 4] TClient::Ls request: /Root/Dir/Replicated TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Replicated" PathId: 4 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1746608644830 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Replicated" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "ke... (TRUNCATED) 2025-05-07T09:04:05.519885Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501627011021003498:2067];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:04:05.520019Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002950/r3tmp/tmplSPy8e/pdisk_1.dat 2025-05-07T09:04:05.632126Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:05.656322Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:04:05.656410Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:04:05.658246Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:1063 TServer::EnableGrpc on GrpcPort 25394, node 2 2025-05-07T09:04:05.846625Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:04:05.846652Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:04:05.846661Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:04:05.846770Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1063 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:04:06.129080Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:04:06.136142Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1746608646433 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyCo... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1746608646174 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1746608646433 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: true } Children { Name: ".sys" PathId: 18446... (TRUNCATED) 2025-05-07T09:04:06.441237Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-05-07T09:04:06.441356Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-05-07T09:04:06.441375Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-05-07T09:04:06.441791Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-05-07T09:04:08.372276Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1746608646433, tx_id: 281474976715658 } } } 2025-05-07T09:04:08.372642Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-05-07T09:04:08.374210Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-05-07T09:04:08.374968Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715659} 2025-05-07T09:04:08.375005Z node 2 :REPLICATION_CONTROLLER DEBUG: dst_creator.cpp:306: [DstCreator][rid 1][tid 1] Subscribe tx: txId# 281474976715659 2025-05-07T09:04:08.401028Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:311: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715659 2025-05-07T09:04:08.401060Z node 2 :REPLICATION_CONTROLLER INFO: dst_creator.cpp:585: [DstCreator][rid 1][tid 1] Success: dstPathId# [OwnerId: 72057594046644480, LocalPathId: 5] TClient::Ls request: /Root/Replicated TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Replicated" PathId: 5 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1746608648442 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Replicated" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key... (TRUNCATED) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::SamePartitionCount [GOOD] Test command err: 2025-05-07T09:04:01.684324Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626992799525306:2066];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:04:01.684385Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002978/r3tmp/tmpg3pSMd/pdisk_1.dat 2025-05-07T09:04:02.383348Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:04:02.383498Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:04:02.386268Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:04:02.428415Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TClient is connected to server localhost:18661 TServer::EnableGrpc on GrpcPort 4843, node 1 2025-05-07T09:04:02.776340Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:04:02.776368Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:04:02.776377Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:04:02.776496Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18661 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:04:03.328346Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:04:03.346173Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T09:04:03.351644Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1746608643486 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1746608643388 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "user@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1746608643486 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: ".sys" PathId: 1844... (TRUNCATED) 2025-05-07T09:04:03.509603Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-05-07T09:04:03.509717Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-05-07T09:04:03.509729Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-05-07T09:04:03.510373Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-05-07T09:04:04.630305Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1746608643486, tx_id: 281474976710659 } } } 2025-05-07T09:04:04.630659Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-05-07T09:04:04.632291Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 2025-05-07T09:04:04.633457Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710660} 2025-05-07T09:04:04.633468Z node 1 :REPLICATION_CONTROLLER DEBUG: dst_creator.cpp:306: [DstCreator][rid 1][tid 1] Subscribe tx: txId# 281474976710660 2025-05-07T09:04:04.704139Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:311: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710660 2025-05-07T09:04:04.704169Z node 1 :REPLICATION_CONTROLLER INFO: dst_creator.cpp:585: [DstCreator][rid 1][tid 1] Success: dstPathId# [OwnerId: 72057594046644480, LocalPathId: 3] TClient::Ls request: /Root/Replicated TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Replicated" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710660 CreateStep: 1746608644746 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "user@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Replicated" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "ke... (TRUNCATED) 2025-05-07T09:04:05.377602Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501627011939401964:2068];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:04:05.377650Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002978/r3tmp/tmpP0prVa/pdisk_1.dat 2025-05-07T09:04:05.509444Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:05.531362Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:04:05.531463Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:04:05.532900Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:22452 TServer::EnableGrpc on GrpcPort 10139, node 2 2025-05-07T09:04:05.720469Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:04:05.720490Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:04:05.720495Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:04:05.720589Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22452 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:04:05.997481Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:04:06.005922Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1746608646111 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1746608646048 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1746608646111 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: ".sys" PathId: 1844... (TRUNCATED) 2025-05-07T09:04:06.083154Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-05-07T09:04:06.083244Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-05-07T09:04:06.083253Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-05-07T09:04:06.083626Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-05-07T09:04:08.442705Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1746608646111, tx_id: 281474976715658 } } } 2025-05-07T09:04:08.443134Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-05-07T09:04:08.444704Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-05-07T09:04:08.445482Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715659} 2025-05-07T09:04:08.445503Z node 2 :REPLICATION_CONTROLLER DEBUG: dst_creator.cpp:306: [DstCreator][rid 1][tid 1] Subscribe tx: txId# 281474976715659 2025-05-07T09:04:08.479976Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:311: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715659 2025-05-07T09:04:08.480005Z node 2 :REPLICATION_CONTROLLER INFO: dst_creator.cpp:585: [DstCreator][rid 1][tid 1] Success: dstPathId# [OwnerId: 72057594046644480, LocalPathId: 3] TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1746608646111 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) TClient::Ls request: /Root/Replicated TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Replicated" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1746608648519 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Replicated" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "ke... (TRUNCATED) >> TopicService::DifferentConsumers_TheRangesOverlap [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::ColumnTypeMismatch [GOOD] Test command err: 2025-05-07T09:04:01.688725Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626993929849328:2068];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:04:01.688813Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00297f/r3tmp/tmpSajjMO/pdisk_1.dat 2025-05-07T09:04:02.383792Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:04:02.383922Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:04:02.387591Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:04:02.449372Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TClient is connected to server localhost:27823 TServer::EnableGrpc on GrpcPort 29652, node 1 2025-05-07T09:04:02.773983Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:04:02.774017Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:04:02.774023Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:04:02.774135Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27823 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:04:03.341849Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:04:03.357013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T09:04:03.360624Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:04:03.525730Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1746608643402 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1746608643598 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1746608643402 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1746608643598 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) 2025-05-07T09:04:03.561640Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-05-07T09:04:03.561764Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-05-07T09:04:03.561781Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-05-07T09:04:03.562242Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-05-07T09:04:04.785769Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Src, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1746608643472, tx_id: 281474976710658 } } } 2025-05-07T09:04:04.786256Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-05-07T09:04:04.787878Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976710660 Reason# Check failed: path: '/Root/Dst', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 3], type: EPathTypeTable, state: EPathStateNoChanges)} 2025-05-07T09:04:04.790035Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dst" PathDescription { Self { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1746608643598 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Dst" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "extra" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_REA ... CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:04:05.794639Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:04:05.794647Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:04:05.794754Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22616 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:04:06.100189Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:04:06.106614Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:04:06.138786Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1746608646146 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1746608646209 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1746608646146 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1746608646209 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) 2025-05-07T09:04:06.175308Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-05-07T09:04:06.175460Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-05-07T09:04:06.175471Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-05-07T09:04:06.175949Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-05-07T09:04:08.369706Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Src, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1746608646174, tx_id: 281474976715658 } } } 2025-05-07T09:04:08.369949Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-05-07T09:04:08.371461Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976715660 Reason# Check failed: path: '/Root/Dst', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 3], type: EPathTypeTable, state: EPathStateNoChanges)} 2025-05-07T09:04:08.372513Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dst" PathDescription { Self { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1746608646209 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Dst" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 3 PathOwnerId: 72057594046644480 2025-05-07T09:04:08.372741Z node 2 :REPLICATION_CONTROLLER ERROR: dst_creator.cpp:594: [DstCreator][rid 1][tid 1] Error: status# StatusSchemeError, reason# Column type mismatch: name: value, expected: Utf8, got: Uint32 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::KeyColumnsSizeMismatch [GOOD] Test command err: 2025-05-07T09:04:01.684256Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626995080144318:2067];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:04:01.684375Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002979/r3tmp/tmptCeRjp/pdisk_1.dat 2025-05-07T09:04:02.390355Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:02.411412Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:04:02.411511Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:04:02.426330Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26808 TServer::EnableGrpc on GrpcPort 25684, node 1 2025-05-07T09:04:02.778647Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:04:02.778677Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:04:02.778704Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:04:02.778824Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26808 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:04:03.336639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:04:03.350590Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1746608643395 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1746608643395 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version... (TRUNCATED) 2025-05-07T09:04:03.357404Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-05-07T09:04:03.357580Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-05-07T09:04:03.357602Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-05-07T09:04:03.358281Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-05-07T09:04:04.679913Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { status: SCHEME_ERROR, issues: } } 2025-05-07T09:04:04.679982Z node 1 :REPLICATION_CONTROLLER ERROR: dst_creator.cpp:594: [DstCreator][rid 1][tid 1] Error: status# StatusSchemeError, reason# Cannot describe table: status: SCHEME_ERROR, issue: 2025-05-07T09:04:05.306314Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501627009649716757:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:04:05.306376Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002979/r3tmp/tmpXYcibN/pdisk_1.dat 2025-05-07T09:04:05.436959Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:05.486441Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:04:05.486535Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:04:05.488196Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:13000 TServer::EnableGrpc on GrpcPort 10650, node 2 2025-05-07T09:04:05.681625Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:04:05.681646Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:04:05.681654Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:04:05.681797Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13000 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:04:05.952530Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:04:05.963573Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:04:06.067838Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1746608646006 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1746608646132 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1746608646006 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1746608646132 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) 2025-05-07T09:04:06.093654Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-05-07T09:04:06.093831Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-05-07T09:04:06.093855Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-05-07T09:04:06.094237Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-05-07T09:04:08.508703Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Src, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1746608646069, tx_id: 281474976715658 } } } 2025-05-07T09:04:08.509089Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-05-07T09:04:08.510717Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976715660 Reason# Check failed: path: '/Root/Dst', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 3], type: EPathTypeTable, state: EPathStateNoChanges)} 2025-05-07T09:04:08.512704Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dst" PathDescription { Self { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1746608646132 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Dst" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnNames: "value" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 3 PathOwnerId: 72057594046644480 2025-05-07T09:04:08.512971Z node 2 :REPLICATION_CONTROLLER ERROR: dst_creator.cpp:594: [DstCreator][rid 1][tid 1] Error: status# StatusSchemeError, reason# Key columns size mismatch: expected: 1, got: 2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::EmptyReplicationConfig [GOOD] Test command err: 2025-05-07T09:04:01.684390Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626994682715711:2067];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:04:01.684462Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00296c/r3tmp/tmppzElrI/pdisk_1.dat 2025-05-07T09:04:02.385859Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:02.388422Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:04:02.388496Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:04:02.425783Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:5253 TServer::EnableGrpc on GrpcPort 26684, node 1 2025-05-07T09:04:02.773991Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:04:02.774017Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:04:02.774026Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:04:02.774152Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5253 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:04:03.345678Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:04:03.363517Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T09:04:03.506928Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1746608643402 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1746608643584 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1746608643402 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1746608643584 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) 2025-05-07T09:04:03.544967Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-05-07T09:04:03.545122Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-05-07T09:04:03.545137Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-05-07T09:04:03.545819Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-05-07T09:04:04.777485Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Src, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1746608643472, tx_id: 281474976710658 } } } 2025-05-07T09:04:04.777898Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-05-07T09:04:04.779432Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976710660 Reason# Check failed: path: '/Root/Dst', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 3], type: EPathTypeTable, state: EPathStateNoChanges)} 2025-05-07T09:04:04.781385Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dst" PathDescription { Self { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1746608643584 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Dst" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowU ... e(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:17789 TServer::EnableGrpc on GrpcPort 15508, node 2 2025-05-07T09:04:05.846598Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:04:05.846631Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:04:05.846638Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:04:05.846755Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17789 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:04:06.166387Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:04:06.173904Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:04:06.207977Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1746608646216 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1746608646272 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1746608646216 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1746608646272 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) 2025-05-07T09:04:06.237224Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-05-07T09:04:06.237418Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-05-07T09:04:06.237441Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-05-07T09:04:06.237918Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-05-07T09:04:08.471320Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Src, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1746608646244, tx_id: 281474976715658 } } } 2025-05-07T09:04:08.471667Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-05-07T09:04:08.473187Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976715660 Reason# Check failed: path: '/Root/Dst', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 3], type: EPathTypeTable, state: EPathStateNoChanges)} 2025-05-07T09:04:08.474219Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dst" PathDescription { Self { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1746608646272 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Dst" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72057594046644480 2025-05-07T09:04:08.474414Z node 2 :REPLICATION_CONTROLLER ERROR: dst_creator.cpp:594: [DstCreator][rid 1][tid 1] Error: status# StatusSchemeError, reason# Empty replication config ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousCreateTenantTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:08.949687Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:08.949765Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:08.949790Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:08.949814Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:08.949860Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:08.949897Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:08.949994Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:08.950072Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:08.950719Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:08.951100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:09.008656Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:09.008702Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:09.022016Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:09.022252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:09.022427Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:09.028267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:09.028619Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:09.029262Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:09.029451Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:09.032423Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:09.033898Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:09.033962Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:09.034062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:09.034106Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:09.034251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:09.034511Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:09.041470Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:09.191216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:09.191420Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:09.191628Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:09.191866Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:09.191920Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:09.194246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:09.194388Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:09.194584Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:09.194640Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:09.194677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:09.194709Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:09.196622Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:09.196677Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:09.196716Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:09.198476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:09.198533Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:09.198584Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:09.198642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:09.207645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:09.209662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:09.209862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:09.210885Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:09.211049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:09.211098Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:09.211387Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:09.211438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:09.211594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:09.211660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:09.213651Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:09.213702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:09.213881Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:09.213918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... rs: 72075186233409547 DomainCoordinators: 72075186233409548 TxStats { PerShardStats { ShardId: 72075186233409552 CpuTimeUsec: 1236 } } 2025-05-07T09:04:09.606677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409552, partId: 0 2025-05-07T09:04:09.606789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409552 Status: COMPLETE TxId: 101 Step: 140 OrderId: 101 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72075186233409546 DomainCoordinators: 72075186233409547 DomainCoordinators: 72075186233409548 TxStats { PerShardStats { ShardId: 72075186233409552 CpuTimeUsec: 1236 } } 2025-05-07T09:04:09.606894Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409552 Status: COMPLETE TxId: 101 Step: 140 OrderId: 101 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72075186233409546 DomainCoordinators: 72075186233409547 DomainCoordinators: 72075186233409548 TxStats { PerShardStats { ShardId: 72075186233409552 CpuTimeUsec: 1236 } } 2025-05-07T09:04:09.608208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 619 RawX2: 4294969824 } Origin: 72075186233409552 State: 2 TxId: 101 Step: 0 Generation: 2 2025-05-07T09:04:09.608276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409552, partId: 0 2025-05-07T09:04:09.608413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: Source { RawX1: 619 RawX2: 4294969824 } Origin: 72075186233409552 State: 2 TxId: 101 Step: 0 Generation: 2 2025-05-07T09:04:09.608474Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1005: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 2025-05-07T09:04:09.608567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1009: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 619 RawX2: 4294969824 } Origin: 72075186233409552 State: 2 TxId: 101 Step: 0 Generation: 2 2025-05-07T09:04:09.608652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 101:0, shardIdx: 72057594046678944:7, datashard: 72075186233409552, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:09.608689Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T09:04:09.608740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 101:0, datashard: 72075186233409552, at schemeshard: 72057594046678944 2025-05-07T09:04:09.608790Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 101:0 129 -> 240 2025-05-07T09:04:09.612478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T09:04:09.612875Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T09:04:09.613111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T09:04:09.614362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T09:04:09.614749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T09:04:09.614799Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 101:0 ProgressState 2025-05-07T09:04:09.614943Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T09:04:09.615014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T09:04:09.615049Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T09:04:09.615080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T09:04:09.615124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: true 2025-05-07T09:04:09.615190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:274:2265] message: TxId: 101 2025-05-07T09:04:09.615239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T09:04:09.615276Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:0 2025-05-07T09:04:09.615325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 101:0 2025-05-07T09:04:09.615440Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-05-07T09:04:09.617036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-05-07T09:04:09.617079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:275:2266] TestWaitNotification: OK eventTxId 101 2025-05-07T09:04:09.617554Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:09.617806Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 278us result status StatusSuccess 2025-05-07T09:04:09.618284Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } } Children { Name: "table_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 140 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 10 Coordinators: 72075186233409546 Coordinators: 72075186233409547 Coordinators: 72075186233409548 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409549 Mediators: 72075186233409550 Mediators: 72075186233409551 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:09.618976Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/table_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:09.619230Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/table_0" took 214us result status StatusSuccess 2025-05-07T09:04:09.619582Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0/table_0" PathDescription { Self { Name: "table_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 140 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table_0" Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "RowId" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 10 Coordinators: 72075186233409546 Coordinators: 72075186233409547 Coordinators: 72075186233409548 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409549 Mediators: 72075186233409550 Mediators: 72075186233409551 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TCdcStreamTests::TopicPartitions [GOOD] >> TCdcStreamTests::ReplicationAttribute >> TSchemeShardSubDomainTest::DeclareDefineAndDelete [GOOD] >> TSchemeShardSubDomainTest::ForceDropTwice |92.8%| [TA] $(B)/ydb/core/tx/replication/controller/ut_dst_creator/test-results/unittest/{meta.json ... results_accumulator.log} |92.8%| [TA] {RESULT} $(B)/ydb/core/tx/replication/controller/ut_dst_creator/test-results/unittest/{meta.json ... results_accumulator.log} >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTabletsThenMkDir >> TSchemeShardSubDomainTest::CreateWithoutPlanResolution [GOOD] >> TSchemeShardSubDomainTest::SimultaneousCreateDelete [GOOD] >> TSchemeShardSubDomainTest::DeleteAdd ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::DeclareDefineAndDelete [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:09.738004Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:09.738195Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:09.738239Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:09.738279Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:09.738329Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:09.738358Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:09.738432Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:09.738507Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:09.739291Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:09.739660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:09.822932Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:09.822983Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:09.845171Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:09.845442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:09.845660Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:09.852801Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:09.853140Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:09.853956Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:09.854210Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:09.857257Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:09.858784Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:09.858846Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:09.858937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:09.858991Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:09.859102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:09.859349Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:09.866691Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:10.018981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:10.019280Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:10.019535Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:10.019826Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:10.019918Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:10.022708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:10.022874Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:10.023145Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:10.023223Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:10.023269Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:10.023306Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:10.025543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:10.025608Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:10.025665Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:10.027847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:10.027921Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:10.027971Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:10.028052Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:10.032189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:10.034582Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:10.034791Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:10.035839Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:10.035982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:10.036036Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:10.036387Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:10.036448Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:10.036634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:10.036705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:10.039112Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:10.039173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:10.039371Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:10.039420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... y parts: 1/1 2025-05-07T09:04:10.207193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: false 2025-05-07T09:04:10.207240Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T09:04:10.207294Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-05-07T09:04:10.207336Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 102:0 2025-05-07T09:04:10.207522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-05-07T09:04:10.207562Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 102, publications: 2, subscribers: 0 2025-05-07T09:04:10.207611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-05-07T09:04:10.207644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 2], 18446744073709551615 2025-05-07T09:04:10.208294Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T09:04:10.208375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T09:04:10.208432Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-05-07T09:04:10.208476Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-05-07T09:04:10.208518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T09:04:10.209183Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T09:04:10.209261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T09:04:10.209288Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-05-07T09:04:10.209313Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-05-07T09:04:10.209346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-05-07T09:04:10.209428Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-05-07T09:04:10.211860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-05-07T09:04:10.211911Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:3 hive 72057594037968897 at ss 72057594046678944 2025-05-07T09:04:10.211934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-05-07T09:04:10.212504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-05-07T09:04:10.213204Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 2025-05-07T09:04:10.213867Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:10.214247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 Forgetting tablet 72075186233409546 2025-05-07T09:04:10.216168Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186233409548 2025-05-07T09:04:10.216355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-05-07T09:04:10.216578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T09:04:10.216801Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 2025-05-07T09:04:10.217338Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-05-07T09:04:10.217496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 Forgetting tablet 72075186233409548 2025-05-07T09:04:10.218411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T09:04:10.218459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T09:04:10.218567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 Forgetting tablet 72075186233409547 2025-05-07T09:04:10.219301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T09:04:10.219351Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T09:04:10.219426Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:10.219957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-05-07T09:04:10.221593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-05-07T09:04:10.221648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-05-07T09:04:10.223838Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-05-07T09:04:10.223879Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-05-07T09:04:10.223940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-05-07T09:04:10.223964Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-05-07T09:04:10.224070Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-05-07T09:04:10.224124Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-05-07T09:04:10.224301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-05-07T09:04:10.224330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-05-07T09:04:10.224727Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-05-07T09:04:10.224808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T09:04:10.224834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:516:2470] TestWaitNotification: OK eventTxId 102 2025-05-07T09:04:10.225189Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:10.225313Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 149us result status StatusPathDoesNotExist 2025-05-07T09:04:10.225447Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::RmDir >> TSchemeShardSubDomainTest::TopicDiskSpaceQuotas >> TSchemeShardSubDomainTest::CreateForceDropSolomon [GOOD] >> TopicService::UnknownConsumer >> TStoragePoolsQuotasTest::DifferentQuotasInteraction-IsExternalSubdomain-EnableSeparateQuotas ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateWithoutPlanResolution [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:10.403438Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:10.403507Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:10.403534Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:10.403557Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:10.403598Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:10.403619Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:10.403670Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:10.403722Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:10.404222Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:10.404460Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:10.460114Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:10.460161Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:10.472338Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:10.472485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:10.472620Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:10.477686Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:10.477938Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:10.478543Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:10.478704Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:10.480930Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:10.482041Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:10.482087Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:10.482155Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:10.482191Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:10.482274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:10.482498Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:10.487418Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:10.600372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:10.600585Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:10.600772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:10.601006Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:10.601073Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:10.602844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:10.602969Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:10.603113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:10.603160Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:10.603193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:10.603226Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:10.604736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:10.604791Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:10.604830Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:10.606058Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:10.606095Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:10.606129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:10.606178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:10.613525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:10.615143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:10.615293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:10.616046Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:10.616140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:10.616190Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:10.616456Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:10.616512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:10.616660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:10.616722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:10.618317Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:10.618358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:10.618484Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:10.618517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-05-07T09:04:10.618787Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:10.618820Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 1:0 ProgressState 2025-05-07T09:04:10.618892Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-05-07T09:04:10.618930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T09:04:10.618960Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-05-07T09:04:10.618981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T09:04:10.619006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-05-07T09:04:10.619047Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T09:04:10.619094Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1:0 2025-05-07T09:04:10.619122Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 1:0 2025-05-07T09:04:10.619175Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T09:04:10.619201Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-05-07T09:04:10.619235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-05-07T09:04:10.625603Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-05-07T09:04:10.625726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-05-07T09:04:10.625755Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-05-07T09:04:10.625787Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-05-07T09:04:10.625817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:10.625919Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-05-07T09:04:10.628794Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-05-07T09:04:10.629230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 100 2025-05-07T09:04:10.632203Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateSubDomain SubDomain { Coordinators: 1 Mediators: 1 Name: "USER_0" TimeCastBucketsPerMediator: 2 StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 100 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:10.632456Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_subdomain.cpp:92: TCreateSubDomain Propose, path: /MyRoot/USER_0, opId: 100:0, at schemeshard: 72057594046678944 2025-05-07T09:04:10.632580Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 100:1, propose status:StatusInvalidParameter, reason: Malformed subdomain request: plan resolution is 0, at schemeshard: 72057594046678944 2025-05-07T09:04:10.632982Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:268:2259] Bootstrap 2025-05-07T09:04:10.647298Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:268:2259] Become StateWork (SchemeCache [1:273:2264]) 2025-05-07T09:04:10.647964Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:268:2259] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-05-07T09:04:10.650783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 100, response: Status: StatusInvalidParameter Reason: "Malformed subdomain request: plan resolution is 0" TxId: 100 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:10.650954Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 100, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Malformed subdomain request: plan resolution is 0, operation: CREATE DATABASE, path: /MyRoot/USER_0 2025-05-07T09:04:10.651519Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 100, wait until txId: 100 TestWaitNotification wait txId: 100 2025-05-07T09:04:10.651706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-05-07T09:04:10.651770Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 2025-05-07T09:04:10.652158Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-05-07T09:04:10.652254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-05-07T09:04:10.652299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:283:2274] TestWaitNotification: OK eventTxId 100 2025-05-07T09:04:10.652683Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:10.652864Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 144us result status StatusPathDoesNotExist 2025-05-07T09:04:10.653150Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TStoragePoolsQuotasTest::DifferentQuotasInteraction-EnableSeparateQuotas >> TSchemeShardSubDomainTest::SimultaneousCreateForceDrop >> TPersQueueTest::WriteExistingBigValue [GOOD] >> TPersQueueTest::WriteEmptyData >> TSchemeShardSubDomainTest::DeclareAndDelete >> TStoragePoolsQuotasTest::QuoteNonexistentPool-IsExternalSubdomain-true ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousCreateDelete [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:10.148279Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:10.148364Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:10.148408Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:10.148446Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:10.148538Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:10.148570Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:10.148637Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:10.148735Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:10.149479Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:10.149862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:10.219624Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:10.219684Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:10.233938Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:10.234154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:10.234327Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:10.239588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:10.239822Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:10.240404Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:10.240545Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:10.243008Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:10.244209Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:10.244261Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:10.244333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:10.244369Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:10.244451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:10.244628Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:10.250756Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:10.362143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:10.362403Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:10.362648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:10.362892Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:10.363003Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:10.365459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:10.365624Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:10.365865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:10.365931Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:10.366010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:10.366057Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:10.368294Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:10.368370Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:10.368419Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:10.370391Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:10.370451Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:10.370498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:10.370563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:10.374451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:10.376735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:10.377014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:10.378137Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:10.378313Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:10.378375Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:10.378703Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:10.378769Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:10.379004Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:10.379084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:10.381376Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:10.381435Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:10.381622Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:10.381663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 101 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:10.601824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 101:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:101 msg type: 269090816 2025-05-07T09:04:10.602004Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 101, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 2025-05-07T09:04:10.602348Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000002, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:10.602471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 101 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000002 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:10.602517Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 101:0, at tablet# 72057594046678944 2025-05-07T09:04:10.602928Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 101:0 128 -> 240 2025-05-07T09:04:10.602991Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 101:0, at tablet# 72057594046678944 2025-05-07T09:04:10.603162Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:10.603223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 8 2025-05-07T09:04:10.603276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 101 2025-05-07T09:04:10.605377Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:10.605446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:10.605648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T09:04:10.605769Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:10.605810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-05-07T09:04:10.605861Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 101, path id: 2 2025-05-07T09:04:10.606154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T09:04:10.606202Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 101:0 ProgressState 2025-05-07T09:04:10.606321Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T09:04:10.606369Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T09:04:10.606414Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T09:04:10.606448Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T09:04:10.606519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-05-07T09:04:10.606563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T09:04:10.606621Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:0 2025-05-07T09:04:10.606656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 101:0 2025-05-07T09:04:10.606888Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 9 2025-05-07T09:04:10.606983Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 101, publications: 2, subscribers: 1 2025-05-07T09:04:10.607045Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-05-07T09:04:10.607073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-05-07T09:04:10.607844Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T09:04:10.607962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T09:04:10.608007Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-05-07T09:04:10.608062Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-05-07T09:04:10.608116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T09:04:10.608816Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T09:04:10.608920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T09:04:10.608961Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-05-07T09:04:10.608994Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-05-07T09:04:10.609026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 8 2025-05-07T09:04:10.609093Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 1 2025-05-07T09:04:10.609158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [1:565:2474] 2025-05-07T09:04:10.612515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T09:04:10.613576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T09:04:10.613669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-05-07T09:04:10.613704Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:566:2475] TestWaitNotification: OK eventTxId 101 2025-05-07T09:04:10.614229Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:10.614465Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 238us result status StatusSuccess 2025-05-07T09:04:10.614978Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TStoragePoolsQuotasTest::DifferentQuotasInteraction-IsExternalSubdomain >> TCdcStreamTests::ReplicationAttribute [GOOD] >> TCdcStreamTests::RebootSchemeShard >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTabletsThenMkDir [GOOD] >> TSchemeShardSubDomainTest::SchemeLimitsCreatePq >> TStoragePoolsQuotasTest::DisableWritesToDatabase-IsExternalSubdomain-true [GOOD] >> TSchemeShardSubDomainTest::Restart >> TSchemeShardSubDomainTest::ForceDropTwice [GOOD] >> TSchemeShardSubDomainTest::CreateSubDomainWithoutSomeTablets ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateForceDropSolomon [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:09.922112Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:09.922192Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:09.922223Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:09.922255Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:09.922325Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:09.922355Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:09.922418Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:09.922496Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:09.923179Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:09.923526Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:09.997743Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:09.997807Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:10.016140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:10.016394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:10.016579Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:10.025291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:10.025633Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:10.026323Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:10.026519Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:10.029833Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:10.030972Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:10.031022Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:10.031073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:10.031115Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:10.031196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:10.031386Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:10.037415Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:10.143116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:10.143360Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:10.143606Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:10.143842Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:10.143926Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:10.146153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:10.146303Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:10.146503Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:10.146571Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:10.146616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:10.146648Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:10.148423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:10.148477Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:10.148582Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:10.150394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:10.150452Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:10.150494Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:10.150555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:10.153654Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:10.155394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:10.155571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:10.156460Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:10.156590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:10.156645Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:10.156954Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:10.157023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:10.157162Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:10.157213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:10.159247Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:10.159324Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:10.159489Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:10.159536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... blet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:38 tabletId 72075186233409583 2025-05-07T09:04:11.043107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:8 2025-05-07T09:04:11.043132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:8 tabletId 72075186233409553 2025-05-07T09:04:11.044179Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:12 2025-05-07T09:04:11.044224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:12 tabletId 72075186233409557 2025-05-07T09:04:11.044301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:7 2025-05-07T09:04:11.044335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:7 tabletId 72075186233409552 2025-05-07T09:04:11.044483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:16 2025-05-07T09:04:11.044509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:16 tabletId 72075186233409561 2025-05-07T09:04:11.044594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:25 2025-05-07T09:04:11.044618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:25 tabletId 72075186233409570 2025-05-07T09:04:11.044714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:20 2025-05-07T09:04:11.044751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:20 tabletId 72075186233409565 2025-05-07T09:04:11.046998Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:29 2025-05-07T09:04:11.047038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:29 tabletId 72075186233409574 2025-05-07T09:04:11.047142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:33 2025-05-07T09:04:11.047166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:33 tabletId 72075186233409578 2025-05-07T09:04:11.048541Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-05-07T09:04:11.048581Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-05-07T09:04:11.048692Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:37 2025-05-07T09:04:11.048711Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:37 tabletId 72075186233409582 2025-05-07T09:04:11.048789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:42 2025-05-07T09:04:11.048815Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:42 tabletId 72075186233409587 2025-05-07T09:04:11.048873Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:6 2025-05-07T09:04:11.048887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:6 tabletId 72075186233409551 2025-05-07T09:04:11.048936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:11 2025-05-07T09:04:11.048951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:11 tabletId 72075186233409556 2025-05-07T09:04:11.049004Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:15 2025-05-07T09:04:11.049027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:15 tabletId 72075186233409560 2025-05-07T09:04:11.049986Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:19 2025-05-07T09:04:11.050023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:19 tabletId 72075186233409564 2025-05-07T09:04:11.050191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:24 2025-05-07T09:04:11.050263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:24 tabletId 72075186233409569 2025-05-07T09:04:11.050377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:23 2025-05-07T09:04:11.050405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:23 tabletId 72075186233409568 2025-05-07T09:04:11.050459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:28 2025-05-07T09:04:11.050480Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:28 tabletId 72075186233409573 2025-05-07T09:04:11.050544Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:32 2025-05-07T09:04:11.050571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:32 tabletId 72075186233409577 2025-05-07T09:04:11.050628Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-05-07T09:04:11.050650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-05-07T09:04:11.050706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:36 2025-05-07T09:04:11.050752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:36 tabletId 72075186233409581 2025-05-07T09:04:11.055037Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 1 candidates, at schemeshard: 72057594046678944 2025-05-07T09:04:11.055180Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-05-07T09:04:11.055266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T09:04:11.055327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T09:04:11.055429Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:11.057955Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-05-07T09:04:11.058295Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-05-07T09:04:11.058343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-05-07T09:04:11.058929Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-05-07T09:04:11.059053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-05-07T09:04:11.059093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:2056:3658] TestWaitNotification: OK eventTxId 103 2025-05-07T09:04:11.059618Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/Solomon" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:11.059885Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/Solomon" took 243us result status StatusPathDoesNotExist 2025-05-07T09:04:11.060086Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0/Solomon\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0/Solomon" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-05-07T09:04:11.060693Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:11.060883Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 186us result status StatusPathDoesNotExist 2025-05-07T09:04:11.061032Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::SchemeQuotas >> TSchemeShardSubDomainTest::RmDir [GOOD] >> TSchemeShardSubDomainTest::DeleteAdd [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTabletsThenMkDir [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:11.211453Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:11.211544Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:11.211582Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:11.211615Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:11.211680Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:11.211713Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:11.211770Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:11.211861Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:11.212572Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:11.212959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:11.286265Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:11.286315Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:11.301211Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:11.301364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:11.301510Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:11.306426Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:11.306694Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:11.307197Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:11.307340Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:11.309564Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:11.310691Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:11.310732Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:11.310783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:11.310833Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:11.310926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:11.311130Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:11.316641Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:11.411915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:11.412135Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:11.412333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:11.412575Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:11.412632Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:11.414709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:11.414840Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:11.415059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:11.415104Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:11.415130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:11.415154Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:11.416812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:11.416871Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:11.416910Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:11.418371Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:11.418413Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:11.418451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:11.418507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:11.421310Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:11.423107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:11.423284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:11.424156Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:11.424300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:11.424344Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:11.424554Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:11.424592Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:11.424732Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:11.424802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:11.426457Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:11.426497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:11.426649Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:11.426697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... # 101:0 ProgressState 2025-05-07T09:04:11.488624Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T09:04:11.488660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T09:04:11.488699Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T09:04:11.488744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T09:04:11.488791Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-05-07T09:04:11.488849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T09:04:11.488886Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:0 2025-05-07T09:04:11.488920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 101:0 2025-05-07T09:04:11.488998Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-05-07T09:04:11.489044Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-05-07T09:04:11.489084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 5 2025-05-07T09:04:11.489118Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 3], 3 2025-05-07T09:04:11.489727Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T09:04:11.489811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T09:04:11.489848Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-05-07T09:04:11.489893Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-05-07T09:04:11.489937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T09:04:11.490419Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T09:04:11.490472Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T09:04:11.490494Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-05-07T09:04:11.490520Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-05-07T09:04:11.490544Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-05-07T09:04:11.490616Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-05-07T09:04:11.493562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T09:04:11.493674Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-05-07T09:04:11.493922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-05-07T09:04:11.493981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-05-07T09:04:11.494413Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-05-07T09:04:11.494495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-05-07T09:04:11.494524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:332:2323] TestWaitNotification: OK eventTxId 101 2025-05-07T09:04:11.494923Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:11.495127Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 240us result status StatusSuccess 2025-05-07T09:04:11.495580Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:11.496098Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:11.496246Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 157us result status StatusSuccess 2025-05-07T09:04:11.496517Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } } Children { Name: "MyDir" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 101 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:11.496911Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/MyDir" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:11.497076Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/MyDir" took 125us result status StatusSuccess 2025-05-07T09:04:11.497317Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0/MyDir" PathDescription { Self { Name: "MyDir" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 101 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::DeclareAndDelete [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::ForceDropTwice [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:11.107297Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:11.107394Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:11.107591Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:11.107625Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:11.107689Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:11.107715Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:11.107767Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:11.107845Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:11.108559Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:11.108893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:11.191117Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:11.191188Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:11.207652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:11.207845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:11.208095Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:11.214350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:11.214652Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:11.215333Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:11.215533Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:11.218278Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:11.219636Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:11.219707Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:11.219794Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:11.219841Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:11.219938Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:11.220190Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:11.226726Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:11.343128Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:11.343364Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:11.343605Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:11.343843Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:11.343917Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:11.346167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:11.346327Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:11.346550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:11.346611Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:11.346653Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:11.346687Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:11.348637Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:11.348701Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:11.348741Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:11.350413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:11.350461Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:11.350508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:11.350563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:11.354086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:11.355811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:11.356042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:11.357046Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:11.357192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:11.357236Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:11.357526Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:11.357573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:11.357736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:11.357837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:11.359886Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:11.359939Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:11.360131Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:11.360169Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:11.630603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-05-07T09:04:11.630991Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186233409548 Forgetting tablet 72075186233409546 2025-05-07T09:04:11.632478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-05-07T09:04:11.632651Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-05-07T09:04:11.633005Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 6 TxId_Deprecated: 6 TabletID: 72075186233409551 2025-05-07T09:04:11.633776Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 2025-05-07T09:04:11.634193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 6 ShardOwnerId: 72057594046678944 ShardLocalIdx: 6, at schemeshard: 72057594046678944 2025-05-07T09:04:11.634350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 Forgetting tablet 72075186233409548 2025-05-07T09:04:11.636139Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 TabletID: 72075186233409549 2025-05-07T09:04:11.636353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-05-07T09:04:11.636527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 Forgetting tablet 72075186233409551 Forgetting tablet 72075186233409547 Forgetting tablet 72075186233409549 2025-05-07T09:04:11.638274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-05-07T09:04:11.638440Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T09:04:11.639117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T09:04:11.639168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T09:04:11.639318Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-05-07T09:04:11.639778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T09:04:11.639824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T09:04:11.639901Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:11.640889Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:5 2025-05-07T09:04:11.640958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:5 tabletId 72075186233409550 2025-05-07T09:04:11.643066Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-05-07T09:04:11.643105Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-05-07T09:04:11.643246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-05-07T09:04:11.643283Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-05-07T09:04:11.643401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:6 2025-05-07T09:04:11.643427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:6 tabletId 72075186233409551 2025-05-07T09:04:11.645645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-05-07T09:04:11.645686Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-05-07T09:04:11.645758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-05-07T09:04:11.645818Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186233409549 2025-05-07T09:04:11.646086Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-05-07T09:04:11.646160Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification wait txId: 102 2025-05-07T09:04:11.646388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-05-07T09:04:11.646426Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 TestWaitNotification wait txId: 103 2025-05-07T09:04:11.646527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-05-07T09:04:11.646551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-05-07T09:04:11.646976Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-05-07T09:04:11.647079Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-05-07T09:04:11.647142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T09:04:11.647170Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:670:2570] 2025-05-07T09:04:11.647305Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-05-07T09:04:11.647329Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:670:2570] TestWaitNotification: OK eventTxId 102 TestWaitNotification: OK eventTxId 103 2025-05-07T09:04:11.647811Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:11.647989Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 202us result status StatusPathDoesNotExist 2025-05-07T09:04:11.648133Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-05-07T09:04:11.648558Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:11.648720Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 160us result status StatusSuccess 2025-05-07T09:04:11.649053Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 8 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 8 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 6 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TStoragePoolsQuotasTest::DisableWritesToDatabase-IsExternalSubdomain-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:06.812144Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:06.812236Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:06.812277Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:06.812309Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:06.812369Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:06.812401Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:06.812450Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:06.812503Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:06.813158Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:06.813420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:06.879970Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:06.880019Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:06.895272Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:06.895403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:06.895526Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:06.901480Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:06.901742Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:06.902408Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:06.902571Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:06.905258Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:06.906593Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:06.906651Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:06.906721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:06.906768Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:06.906881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:06.907158Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:06.913305Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:07.059533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:07.059743Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.059945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:07.060200Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:07.060257Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.062481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:07.062632Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:07.062816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.062876Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:07.062925Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:07.062981Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:07.064842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.064907Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:07.064956Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:07.066602Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.066655Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:07.066711Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:07.066778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:07.070454Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:07.072260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:07.072443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:07.073441Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:07.073582Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:07.073630Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:07.073913Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:07.073992Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:07.074151Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:07.074260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:07.076119Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:07.076177Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:07.076370Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:07.076410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [ ... SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72075186233409546, LocalPathId: 1] was 4 2025-05-07T09:04:11.657160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 3 2025-05-07T09:04:11.658582Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72075186233409546 2025-05-07T09:04:11.658695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72075186233409546 2025-05-07T09:04:11.659880Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186233409546 2025-05-07T09:04:11.659942Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 104, path id: [OwnerId: 72075186233409546, LocalPathId: 1] 2025-05-07T09:04:11.660115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 104, path id: [OwnerId: 72075186233409546, LocalPathId: 2] 2025-05-07T09:04:11.660245Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186233409546 2025-05-07T09:04:11.660285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:443:2395], at schemeshard: 72075186233409546, txId: 104, path id: 1 2025-05-07T09:04:11.660322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:443:2395], at schemeshard: 72075186233409546, txId: 104, path id: 2 2025-05-07T09:04:11.660564Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72075186233409546 2025-05-07T09:04:11.660600Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1036: NTableState::TProposedWaitParts operationId# 104:0 ProgressState at tablet: 72075186233409546 2025-05-07T09:04:11.660672Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 104:0, at schemeshard: 72075186233409546 2025-05-07T09:04:11.660702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 104:0, datashard: 72075186233409549, at schemeshard: 72075186233409546 2025-05-07T09:04:11.660736Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 104:0 129 -> 240 2025-05-07T09:04:11.661483Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72075186233409546, cookie: 104 2025-05-07T09:04:11.661567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72075186233409546, cookie: 104 2025-05-07T09:04:11.661606Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 104 2025-05-07T09:04:11.661635Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 104, pathId: [OwnerId: 72075186233409546, LocalPathId: 1], version: 9 2025-05-07T09:04:11.661678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 1] was 5 2025-05-07T09:04:11.662296Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72075186233409546, cookie: 104 2025-05-07T09:04:11.662356Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72075186233409546, cookie: 104 2025-05-07T09:04:11.662372Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 104 2025-05-07T09:04:11.662393Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 104, pathId: [OwnerId: 72075186233409546, LocalPathId: 2], version: 18446744073709551615 2025-05-07T09:04:11.662445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 4 2025-05-07T09:04:11.662500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 104, ready parts: 0/1, is published: true 2025-05-07T09:04:11.664614Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72075186233409546 2025-05-07T09:04:11.664658Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 104:0 ProgressState, at schemeshard: 72075186233409546 2025-05-07T09:04:11.664996Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 3 2025-05-07T09:04:11.665177Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 1/1 2025-05-07T09:04:11.665207Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-05-07T09:04:11.665236Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 1/1 2025-05-07T09:04:11.665261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-05-07T09:04:11.665297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: true 2025-05-07T09:04:11.665369Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:552:2491] message: TxId: 104 2025-05-07T09:04:11.665416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-05-07T09:04:11.665442Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 104:0 2025-05-07T09:04:11.665463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 104:0 2025-05-07T09:04:11.665545Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 2 2025-05-07T09:04:11.666510Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186233409546 2025-05-07T09:04:11.666550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 0, path id: [OwnerId: 72075186233409546, LocalPathId: 1] 2025-05-07T09:04:11.667293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 104 2025-05-07T09:04:11.667417Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 104 2025-05-07T09:04:11.668190Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186233409546 2025-05-07T09:04:11.668252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:443:2395], at schemeshard: 72075186233409546, txId: 0, path id: 1 2025-05-07T09:04:11.668323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-05-07T09:04:11.668350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:744:2661] 2025-05-07T09:04:11.668863Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 10 PathOwnerId: 72075186233409546, cookie: 0 TestWaitNotification: OK eventTxId 104 2025-05-07T09:04:11.669539Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SomeDatabase" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72075186233409546 2025-05-07T09:04:11.669683Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72075186233409546 describe path "/MyRoot/SomeDatabase" took 137us result status StatusSuccess 2025-05-07T09:04:11.670048Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SomeDatabase" PathDescription { Self { Name: "MyRoot/SomeDatabase" PathId: 1 SchemeshardId: 72075186233409546 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 10 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 10 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 2 SubDomainStateVersion: 2 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 2 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409548 SchemeShard: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "quoted_storage_pool" Kind: "quoted_storage_pool_kind" } StoragePools { Name: "unquoted_storage_pool" Kind: "unquoted_storage_pool_kind" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } StoragePoolsUsage { PoolKind: "unquoted_storage_pool_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } StoragePoolsUsage { PoolKind: "quoted_storage_pool_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 DatabaseQuotas { storage_quotas { unit_kind: "quoted_storage_pool_kind" data_size_hard_quota: 1 } } SecurityState { Audience: "/MyRoot/SomeDatabase" } } } PathId: 1 PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 >> TStoragePoolsQuotasTest::QuoteNonexistentPool-IsExternalSubdomain-true [GOOD] >> TStoragePoolsQuotasTest::DifferentQuotasInteraction >> TSchemeShardSubDomainTest::SimultaneousCreateForceDrop [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::RmDir [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:11.476746Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:11.476828Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:11.476887Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:11.476928Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:11.476991Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:11.477023Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:11.477072Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:11.477133Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:11.477709Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:11.478002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:11.541122Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:11.541172Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:11.553468Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:11.553626Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:11.553765Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:11.558551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:11.558816Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:11.559355Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:11.559491Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:11.562034Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:11.563267Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:11.563320Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:11.563373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:11.563409Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:11.563501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:11.563709Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:11.569796Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:11.685125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:11.685386Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:11.685615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:11.685883Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:11.685987Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:11.689660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:11.689810Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:11.690053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:11.690127Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:11.690168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:11.690204Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:11.692414Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:11.692474Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:11.692527Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:11.694415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:11.694474Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:11.694519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:11.694589Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:11.698583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:11.700781Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:11.701001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:11.702141Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:11.702301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:11.702352Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:11.702677Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:11.702738Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:11.702947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:11.703022Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:11.705378Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:11.705438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:11.705637Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:11.705686Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... meshard_impl.cpp:2492: Change state for txid 100:0 128 -> 240 2025-05-07T09:04:11.983115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 100:0, at tablet# 72057594046678944 2025-05-07T09:04:11.983297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:11.983499Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 8 2025-05-07T09:04:11.983562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 100 2025-05-07T09:04:11.985881Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:11.985949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:11.986186Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T09:04:11.986320Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:11.986416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 100, path id: 1 2025-05-07T09:04:11.986472Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 100, path id: 2 2025-05-07T09:04:11.986542Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 100:0, at schemeshard: 72057594046678944 2025-05-07T09:04:11.986589Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 100:0 ProgressState 2025-05-07T09:04:11.986696Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#100:0 progress is 1/1 2025-05-07T09:04:11.986734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-05-07T09:04:11.986779Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#100:0 progress is 1/1 2025-05-07T09:04:11.986813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-05-07T09:04:11.986860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 100, ready parts: 1/1, is published: false 2025-05-07T09:04:11.986944Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-05-07T09:04:11.987000Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 100:0 2025-05-07T09:04:11.987047Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 100:0 2025-05-07T09:04:11.987325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 9 2025-05-07T09:04:11.987374Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 100, publications: 2, subscribers: 1 2025-05-07T09:04:11.987431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-05-07T09:04:11.987465Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-05-07T09:04:11.988425Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-05-07T09:04:11.988825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-05-07T09:04:11.988876Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 100 2025-05-07T09:04:11.988926Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-05-07T09:04:11.988970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T09:04:11.989771Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-05-07T09:04:11.989852Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-05-07T09:04:11.989894Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 100 2025-05-07T09:04:11.989925Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-05-07T09:04:11.989958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 8 2025-05-07T09:04:11.990148Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 100, subscribers: 1 2025-05-07T09:04:11.990202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [1:547:2459] 2025-05-07T09:04:11.993491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 2025-05-07T09:04:11.994267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 2025-05-07T09:04:11.994356Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-05-07T09:04:11.994390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:548:2460] TestWaitNotification: OK eventTxId 100 2025-05-07T09:04:11.994942Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:11.995192Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 296us result status StatusSuccess 2025-05-07T09:04:11.995752Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 Coordinators: 72075186233409547 Coordinators: 72075186233409548 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409549 Mediators: 72075186233409550 Mediators: 72075186233409551 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-05-07T09:04:12.002790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpRmDir Drop { Name: "USER_0" } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:12.003002Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_rmdir.cpp:29: TRmDir Propose, path: /MyRoot/USER_0, pathId: 0, opId: 101:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.003169Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 101:1, propose status:StatusPathIsNotDirectory, reason: Check failed: path: '/MyRoot/USER_0', error: path is not a directory (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeSubDomain, state: EPathStateNoChanges), at schemeshard: 72057594046678944 2025-05-07T09:04:12.010106Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 101, response: Status: StatusPathIsNotDirectory Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path is not a directory (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeSubDomain, state: EPathStateNoChanges)" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:12.010264Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusPathIsNotDirectory, reason: Check failed: path: '/MyRoot/USER_0', error: path is not a directory (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeSubDomain, state: EPathStateNoChanges), operation: DROP DIRECTORY, path: /MyRoot/USER_0 TestModificationResult got TxId: 101, wait until txId: 101 >> TSchemeShardSubDomainTest::CreateSubDomainWithoutSomeTablets [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::DeleteAdd [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:11.355040Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:11.355111Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:11.355146Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:11.355182Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:11.355244Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:11.355289Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:11.355344Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:11.355414Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:11.356122Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:11.356480Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:11.421159Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:11.421212Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:11.433376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:11.433528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:11.433675Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:11.438469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:11.438704Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:11.439242Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:11.439375Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:11.441551Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:11.442633Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:11.442682Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:11.442747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:11.442792Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:11.442866Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:11.443102Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:11.449470Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:11.578299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:11.578523Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:11.578709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:11.578928Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:11.579002Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:11.581160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:11.581342Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:11.581629Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:11.581703Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:11.581747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:11.581785Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:11.583750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:11.583801Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:11.583966Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:11.586210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:11.586275Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:11.586322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:11.586396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:11.590420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:11.592521Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:11.592756Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:11.593667Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:11.594007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:11.594068Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:11.594294Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:11.594338Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:11.594468Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:11.594561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:11.596430Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:11.596474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:11.596597Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:11.596629Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 75: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:12.139486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 8 2025-05-07T09:04:12.139544Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 102 2025-05-07T09:04:12.142068Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:12.142116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:12.142290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-05-07T09:04:12.142416Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:12.142461Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 102, path id: 1 2025-05-07T09:04:12.142507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 102, path id: 3 2025-05-07T09:04:12.142576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.142634Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 102:0 ProgressState 2025-05-07T09:04:12.142768Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T09:04:12.142808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T09:04:12.142862Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-05-07T09:04:12.142928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T09:04:12.142978Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: false 2025-05-07T09:04:12.143033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-05-07T09:04:12.143082Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-05-07T09:04:12.143119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 102:0 2025-05-07T09:04:12.143370Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 9 2025-05-07T09:04:12.143415Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 102, publications: 2, subscribers: 1 2025-05-07T09:04:12.143468Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 1], 9 2025-05-07T09:04:12.143504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 3], 3 2025-05-07T09:04:12.144401Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T09:04:12.144550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T09:04:12.144602Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-05-07T09:04:12.144647Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 9 2025-05-07T09:04:12.144700Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T09:04:12.145669Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T09:04:12.145749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-05-07T09:04:12.145778Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-05-07T09:04:12.145815Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-05-07T09:04:12.145855Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 8 2025-05-07T09:04:12.145933Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 1 2025-05-07T09:04:12.146036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [1:547:2459] 2025-05-07T09:04:12.148952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-05-07T09:04:12.150304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-05-07T09:04:12.150407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T09:04:12.150450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:966:2786] TestWaitNotification: OK eventTxId 102 2025-05-07T09:04:12.151042Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:12.151282Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 214us result status StatusSuccess 2025-05-07T09:04:12.151693Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 102 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409552 Coordinators: 72075186233409553 Coordinators: 72075186233409554 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409555 Mediators: 72075186233409556 Mediators: 72075186233409557 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 3 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:12.152266Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:12.152478Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 227us result status StatusSuccess 2025-05-07T09:04:12.152856Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 9 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 9 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 7 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "USER_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 102 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::DeclareAndDelete [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:11.960386Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:11.960463Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:11.960503Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:11.960544Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:11.960602Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:11.960632Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:11.960702Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:11.960776Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:11.961547Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:11.961917Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:12.044876Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:12.044925Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:12.061712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:12.061930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:12.062158Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:12.068663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:12.069005Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:12.069636Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:12.069817Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:12.072853Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:12.074242Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:12.074306Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:12.074380Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:12.074428Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:12.074581Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:12.074812Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.081414Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:12.224998Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:12.225211Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.225395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:12.225616Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:12.225677Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.228322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:12.228466Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:12.228694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.228751Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:12.228785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:12.228816Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:12.230647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.230706Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:12.230772Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:12.232344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.232393Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.232431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:12.232489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:12.235907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:12.237525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:12.237682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:12.238648Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:12.238778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:12.238850Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:12.239160Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:12.239222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:12.239404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:12.239480Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:12.243290Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:12.243349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:12.243566Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:12.243610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... LAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5180: ExamineTreeVFS visit path id [OwnerId: 72057594046678944, LocalPathId: 2] name: USER_0 type: EPathTypeSubDomain state: EPathStateDrop stepDropped: 0 droppedTxId: 101 parent: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:12.298960Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5196: ExamineTreeVFS run path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T09:04:12.299031Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 101:0 128 -> 130 2025-05-07T09:04:12.299118Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:12.299174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T09:04:12.299665Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T09:04:12.300642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 FAKE_COORDINATOR: Erasing txId 101 2025-05-07T09:04:12.301855Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:12.301894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:12.302038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T09:04:12.302161Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:12.302193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-05-07T09:04:12.302228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 101, path id: 2 2025-05-07T09:04:12.302555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.302602Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:416: [72057594046678944] TDeleteParts opId# 101:0 ProgressState 2025-05-07T09:04:12.302669Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T09:04:12.302706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T09:04:12.302747Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T09:04:12.302775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T09:04:12.302811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-05-07T09:04:12.302845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T09:04:12.302878Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:0 2025-05-07T09:04:12.302944Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 101:0 2025-05-07T09:04:12.303021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T09:04:12.303071Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-05-07T09:04:12.303102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-05-07T09:04:12.303135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 18446744073709551615 2025-05-07T09:04:12.303705Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T09:04:12.303782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T09:04:12.303811Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-05-07T09:04:12.303847Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-05-07T09:04:12.303894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T09:04:12.304552Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T09:04:12.304660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T09:04:12.304702Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-05-07T09:04:12.304731Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-05-07T09:04:12.304758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T09:04:12.304826Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-05-07T09:04:12.305185Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T09:04:12.305234Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T09:04:12.305321Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-05-07T09:04:12.305670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T09:04:12.305713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T09:04:12.305769Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:12.308814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T09:04:12.309125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T09:04:12.310304Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-05-07T09:04:12.310398Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-05-07T09:04:12.310614Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-05-07T09:04:12.310659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-05-07T09:04:12.311103Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-05-07T09:04:12.311193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-05-07T09:04:12.311230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:337:2328] TestWaitNotification: OK eventTxId 101 2025-05-07T09:04:12.311675Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:12.311860Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 159us result status StatusPathDoesNotExist 2025-05-07T09:04:12.312045Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::SimultaneousCreateTenantTableForceDrop >> TSchemeShardSubDomainTest::Restart [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TStoragePoolsQuotasTest::QuoteNonexistentPool-IsExternalSubdomain-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:12.044807Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:12.044929Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:12.044972Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:12.045014Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:12.045086Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:12.045124Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:12.045186Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:12.045527Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:12.046496Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:12.046917Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:12.139679Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:12.139742Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:12.159936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:12.160124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:12.160327Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:12.166258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:12.166566Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:12.167331Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:12.167539Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:12.171253Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:12.172676Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:12.172739Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:12.172815Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:12.172866Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:12.172977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:12.173259Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.180375Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:12.305284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:12.305478Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.305676Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:12.305903Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:12.305983Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.307929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:12.308053Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:12.308201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.308273Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:12.308313Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:12.308340Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:12.309997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.310043Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:12.310082Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:12.311579Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.311641Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.311691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:12.311758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:12.314635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:12.317266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:12.317487Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:12.318486Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:12.318629Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:12.318679Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:12.318970Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:12.319029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:12.319229Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:12.319301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:12.321252Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:12.321307Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:12.321470Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:12.321505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... meBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T09:04:12.354052Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T09:04:12.355268Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:12.355310Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:12.355444Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T09:04:12.355561Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:12.355603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-05-07T09:04:12.355658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 101, path id: 2 FAKE_COORDINATOR: Erasing txId 101 2025-05-07T09:04:12.356093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.356142Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 101:0 ProgressState 2025-05-07T09:04:12.356238Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T09:04:12.356278Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T09:04:12.356342Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T09:04:12.356385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T09:04:12.356428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-05-07T09:04:12.356474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T09:04:12.356517Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:0 2025-05-07T09:04:12.356553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 101:0 2025-05-07T09:04:12.356618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T09:04:12.356670Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-05-07T09:04:12.356721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-05-07T09:04:12.356754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-05-07T09:04:12.357292Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T09:04:12.357389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T09:04:12.357433Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-05-07T09:04:12.357475Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-05-07T09:04:12.357524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T09:04:12.358266Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T09:04:12.358323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T09:04:12.358349Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-05-07T09:04:12.358378Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-05-07T09:04:12.358409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T09:04:12.358480Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-05-07T09:04:12.360919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T09:04:12.361843Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestModificationResults wait txId: 102 2025-05-07T09:04:12.364456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { PlanResolution: 50 Coordinators: 1 Mediators: 1 Name: "SomeDatabase" TimeCastBucketsPerMediator: 2 ExternalSchemeShard: true DatabaseQuotas { storage_quotas { unit_kind: "nonexistent_storage_kind" data_size_hard_quota: 1 } } } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:12.364647Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1102: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 102:0, feature flag EnableAlterDatabaseCreateHiveFirst 1, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { PlanResolution: 50 Coordinators: 1 Mediators: 1 Name: "SomeDatabase" TimeCastBucketsPerMediator: 2 ExternalSchemeShard: true DatabaseQuotas { storage_quotas { unit_kind: "nonexistent_storage_kind" data_size_hard_quota: 1 } } } 2025-05-07T09:04:12.364684Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1108: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 102:0, path /MyRoot/SomeDatabase 2025-05-07T09:04:12.364809Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 102:0, explain: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: Malformed subdomain request: cannot set storage quotas of the following kinds: nonexistent_storage_kind, because no storage pool in the subdomain SomeDatabase has the specified kinds. Existing storage kinds are: , at schemeshard: 72057594046678944 2025-05-07T09:04:12.364849Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 102:1, propose status:StatusInvalidParameter, reason: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: Malformed subdomain request: cannot set storage quotas of the following kinds: nonexistent_storage_kind, because no storage pool in the subdomain SomeDatabase has the specified kinds. Existing storage kinds are: , at schemeshard: 72057594046678944 2025-05-07T09:04:12.366913Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 102, response: Status: StatusInvalidParameter Reason: "Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: Malformed subdomain request: cannot set storage quotas of the following kinds: nonexistent_storage_kind, because no storage pool in the subdomain SomeDatabase has the specified kinds. Existing storage kinds are: " TxId: 102 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:12.367056Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: Malformed subdomain request: cannot set storage quotas of the following kinds: nonexistent_storage_kind, because no storage pool in the subdomain SomeDatabase has the specified kinds. Existing storage kinds are: , operation: ALTER DATABASE, path: /MyRoot/SomeDatabase TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 101 2025-05-07T09:04:12.367347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-05-07T09:04:12.367425Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 TestWaitNotification wait txId: 102 2025-05-07T09:04:12.367548Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-05-07T09:04:12.367570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-05-07T09:04:12.368078Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-05-07T09:04:12.368212Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-05-07T09:04:12.368251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:306:2297] 2025-05-07T09:04:12.368405Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-05-07T09:04:12.368503Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T09:04:12.368533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:306:2297] TestWaitNotification: OK eventTxId 101 TestWaitNotification: OK eventTxId 102 >> TSchemeShardSubDomainTest::SimultaneousDeclare ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousCreateForceDrop [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:12.004967Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:12.005056Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:12.005104Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:12.005142Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:12.005197Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:12.005229Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:12.005304Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:12.005386Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:12.006105Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:12.006471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:12.080538Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:12.080601Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:12.097069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:12.097268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:12.097432Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:12.102547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:12.102745Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:12.103238Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:12.103447Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:12.105495Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:12.106529Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:12.106581Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:12.106638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:12.106669Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:12.106741Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:12.106957Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.112473Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:12.200745Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:12.200966Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.201170Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:12.201392Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:12.201467Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.203714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:12.203875Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:12.204084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.204165Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:12.204203Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:12.204235Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:12.205736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.205777Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:12.205809Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:12.207266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.207317Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.207356Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:12.207416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:12.210162Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:12.211644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:12.211795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:12.212546Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:12.212663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:12.212723Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:12.212946Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:12.212984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:12.213121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:12.213203Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:12.214948Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:12.214993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:12.215120Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:12.215148Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... ee tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-05-07T09:04:12.421176Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 Forgetting tablet 72075186233409546 2025-05-07T09:04:12.421861Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T09:04:12.422166Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 2025-05-07T09:04:12.422815Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 6 ShardOwnerId: 72057594046678944 ShardLocalIdx: 6, at schemeshard: 72057594046678944 2025-05-07T09:04:12.423015Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 Forgetting tablet 72075186233409548 Forgetting tablet 72075186233409551 2025-05-07T09:04:12.424591Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-05-07T09:04:12.424756Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T09:04:12.425442Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 TabletID: 72075186233409549 Forgetting tablet 72075186233409547 2025-05-07T09:04:12.426625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-05-07T09:04:12.426853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T09:04:12.427307Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T09:04:12.427365Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T09:04:12.427512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 Forgetting tablet 72075186233409549 2025-05-07T09:04:12.428832Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T09:04:12.428908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T09:04:12.428992Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:12.430327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:5 2025-05-07T09:04:12.430387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:5 tabletId 72075186233409550 2025-05-07T09:04:12.430985Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5699: Failed to connect, to tablet: 72075186233409550, at schemeshard: 72057594046678944 2025-05-07T09:04:12.432822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-05-07T09:04:12.432869Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-05-07T09:04:12.433009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-05-07T09:04:12.433036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-05-07T09:04:12.433169Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5699: Failed to connect, to tablet: 72075186233409548, at schemeshard: 72057594046678944 2025-05-07T09:04:12.436246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:6 2025-05-07T09:04:12.436296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:6 tabletId 72075186233409551 2025-05-07T09:04:12.436435Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5699: Failed to connect, to tablet: 72075186233409551, at schemeshard: 72057594046678944 2025-05-07T09:04:12.436510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-05-07T09:04:12.436536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-05-07T09:04:12.436638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-05-07T09:04:12.436684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186233409549 2025-05-07T09:04:12.436924Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-05-07T09:04:12.437025Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5699: Failed to connect, to tablet: 72075186233409549, at schemeshard: 72057594046678944 2025-05-07T09:04:12.437117Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 100 2025-05-07T09:04:12.437359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-05-07T09:04:12.437406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 TestWaitNotification wait txId: 101 2025-05-07T09:04:12.437529Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-05-07T09:04:12.437554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-05-07T09:04:12.438101Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-05-07T09:04:12.438217Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-05-07T09:04:12.438342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:596:2503] 2025-05-07T09:04:12.438527Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-05-07T09:04:12.438638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-05-07T09:04:12.438666Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:596:2503] TestWaitNotification: OK eventTxId 100 TestWaitNotification: OK eventTxId 101 2025-05-07T09:04:12.439128Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:12.439308Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 198us result status StatusPathDoesNotExist 2025-05-07T09:04:12.439514Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-05-07T09:04:12.439933Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:12.440136Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 180us result status StatusSuccess 2025-05-07T09:04:12.440551Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TCdcStreamTests::RebootSchemeShard [GOOD] >> TCdcStreamTests::StreamOnIndexTableNegative >> TSchemeShardSubDomainTest::SchemeLimitsCreatePq [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateSubDomainWithoutSomeTablets [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:12.471035Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:12.471118Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:12.471162Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:12.471200Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:12.471267Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:12.471297Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:12.471365Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:12.471449Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:12.472179Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:12.472571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:12.558396Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:12.558460Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:12.575780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:12.576011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:12.576197Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:12.582870Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:12.583262Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:12.584097Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:12.584313Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:12.587748Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:12.589357Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:12.589426Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:12.589505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:12.589550Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:12.589661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:12.589911Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.596901Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:12.732763Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:12.732940Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.733097Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:12.733267Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:12.733317Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.738786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:12.738937Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:12.739098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.739146Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:12.739174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:12.739201Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:12.740969Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.741017Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:12.741050Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:12.742347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.742400Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.742434Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:12.742490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:12.745120Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:12.746677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:12.746954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:12.747820Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:12.747923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:12.747958Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:12.748172Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:12.748214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:12.748363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:12.748439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:12.750118Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:12.750174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:12.750307Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:12.750344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... EMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-05-07T09:04:12.752893Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-05-07T09:04:12.752920Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-05-07T09:04:12.752951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:12.753078Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-05-07T09:04:12.756035Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-05-07T09:04:12.756583Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 100 2025-05-07T09:04:12.759664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateSubDomain SubDomain { PlanResolution: 50 Coordinators: 1 Name: "USER_1" TimeCastBucketsPerMediator: 2 StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 100 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:12.759910Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_subdomain.cpp:92: TCreateSubDomain Propose, path: /MyRoot/USER_1, opId: 100:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.760009Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 100:1, propose status:StatusInvalidParameter, reason: Malformed subdomain request: cant create subdomain with coordinators, but no mediators, at schemeshard: 72057594046678944 2025-05-07T09:04:12.760404Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:268:2259] Bootstrap 2025-05-07T09:04:12.774953Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:268:2259] Become StateWork (SchemeCache [1:273:2264]) 2025-05-07T09:04:12.775580Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:268:2259] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-05-07T09:04:12.778471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 100, response: Status: StatusInvalidParameter Reason: "Malformed subdomain request: cant create subdomain with coordinators, but no mediators" TxId: 100 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:12.778613Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 100, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Malformed subdomain request: cant create subdomain with coordinators, but no mediators, operation: CREATE DATABASE, path: /MyRoot/USER_1 2025-05-07T09:04:12.779213Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 100, wait until txId: 100 TestModificationResults wait txId: 101 2025-05-07T09:04:12.782075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateSubDomain SubDomain { PlanResolution: 50 Mediators: 1 Name: "USER_2" TimeCastBucketsPerMediator: 2 StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:12.782326Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_subdomain.cpp:92: TCreateSubDomain Propose, path: /MyRoot/USER_2, opId: 101:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.782442Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 101:1, propose status:StatusInvalidParameter, reason: Malformed subdomain request: cant create subdomain with mediators, but no coordinators, at schemeshard: 72057594046678944 2025-05-07T09:04:12.784378Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 101, response: Status: StatusInvalidParameter Reason: "Malformed subdomain request: cant create subdomain with mediators, but no coordinators" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:12.784523Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Malformed subdomain request: cant create subdomain with mediators, but no coordinators, operation: CREATE DATABASE, path: /MyRoot/USER_2 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 100 2025-05-07T09:04:12.784803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-05-07T09:04:12.784842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 TestWaitNotification wait txId: 101 2025-05-07T09:04:12.784939Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-05-07T09:04:12.784962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-05-07T09:04:12.785474Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-05-07T09:04:12.785574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-05-07T09:04:12.785606Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:287:2278] 2025-05-07T09:04:12.785732Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-05-07T09:04:12.785795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-05-07T09:04:12.785830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:287:2278] TestWaitNotification: OK eventTxId 100 TestWaitNotification: OK eventTxId 101 2025-05-07T09:04:12.786238Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:12.786420Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_1" took 202us result status StatusPathDoesNotExist 2025-05-07T09:04:12.786635Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_1\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_1" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-05-07T09:04:12.787155Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:12.787311Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_2" took 153us result status StatusPathDoesNotExist 2025-05-07T09:04:12.787441Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_2\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_2" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-05-07T09:04:12.787859Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:12.788041Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 191us result status StatusSuccess 2025-05-07T09:04:12.788375Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::CreateWithoutTimeCastBuckets ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::Restart [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:12.402041Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:12.402144Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:12.402186Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:12.402225Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:12.402294Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:12.402341Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:12.402402Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:12.402485Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:12.403288Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:12.403665Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:12.491364Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:12.491425Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:12.509865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:12.510080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:12.510242Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:12.523459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:12.523808Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:12.524481Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:12.524675Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:12.528400Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:12.529812Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:12.529875Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:12.529950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:12.530018Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:12.530326Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:12.530583Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.537806Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:12.683218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:12.683451Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.683657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:12.683894Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:12.683976Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.686303Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:12.686451Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:12.686647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.686704Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:12.686744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:12.686775Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:12.688795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.688857Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:12.688897Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:12.690710Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.690760Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.690803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:12.690859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:12.694422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:12.696408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:12.696628Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:12.697612Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:12.697761Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:12.697806Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:12.698094Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:12.698153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:12.698319Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:12.698396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:12.700649Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:12.700710Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:12.700901Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:12.700937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... __root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:12.880443Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:12.880769Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:12.893699Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:12.894854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:12.895031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:12.895210Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:12.895257Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:12.895485Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:12.896197Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit for Paths, read records: 2, at schemeshard: 72057594046678944 2025-05-07T09:04:12.896333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:319: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 1], parent name: MyRoot, child name: USER_0, child id: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T09:04:12.896429Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1457: TTxInit for UserAttributes, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.896495Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1483: TTxInit for UserAttributesAlterData, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.896701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 0 2025-05-07T09:04:12.896958Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1785: TTxInit for Tables, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.897045Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:450: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-05-07T09:04:12.897261Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2011: TTxInit for Columns, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.897393Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2071: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.897494Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2129: TTxInit for Shards, read records: 3, at schemeshard: 72057594046678944 2025-05-07T09:04:12.897538Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-05-07T09:04:12.897565Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T09:04:12.897601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T09:04:12.897685Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2215: TTxInit for TablePartitions, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.897787Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2281: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.897996Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2431: TTxInit for ChannelsBinding, read records: 9, at schemeshard: 72057594046678944 2025-05-07T09:04:12.898288Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2810: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.898423Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2889: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.898826Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3387: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.898910Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3423: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.899097Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3637: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.899191Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3782: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.899292Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3799: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.899480Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3959: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.899556Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3975: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.899686Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4260: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.899917Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4558: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.900093Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4705: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.900145Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4732: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.900198Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4759: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.906978Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:12.907065Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:12.907412Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:12.907464Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:12.907517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:12.908558Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:465:2415] sender: [1:525:2058] recipient: [1:15:2062] 2025-05-07T09:04:12.961541Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:12.961768Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 251us result status StatusSuccess 2025-05-07T09:04:12.962192Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 Mediators: 72075186233409548 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:12.962707Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:12.962928Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 177us result status StatusSuccess 2025-05-07T09:04:12.963244Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> ExternalBlobsMultipleChannels::WithNewColumnFamilyAndCompaction [GOOD] >> ExternalBlobsMultipleChannels::SingleChannel [GOOD] >> ExternalBlobsMultipleChannels::ExtBlobsMultipleColumns [GOOD] >> TSchemeShardSubDomainTest::SimultaneousDeclareAndDefine >> TPersQueueTest::SetupLockSession [GOOD] >> TPersQueueTest::StreamReadCreateAndDestroyMsgs >> ExternalBlobsMultipleChannels::Simple [GOOD] >> TSchemeShardSubDomainTest::SimultaneousDeclare [GOOD] >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTabletsThenDrop >> TSchemeShardSubDomainTest::CopyRejects ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SchemeLimitsCreatePq [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:12.339164Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:12.339256Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:12.339301Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:12.339337Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:12.339409Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:12.339442Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:12.339495Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:12.339571Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:12.340288Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:12.340646Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:12.429341Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:12.429396Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:12.447412Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:12.447578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:12.447763Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:12.453193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:12.453487Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:12.454214Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:12.454385Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:12.457028Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:12.458445Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:12.458514Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:12.458591Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:12.458636Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:12.458748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:12.459012Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.465310Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:12.601423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:12.601651Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.601863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:12.602146Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:12.602207Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.606932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:12.607093Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:12.607305Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.607370Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:12.607406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:12.607458Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:12.612977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.613060Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:12.613118Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:12.619041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.619113Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.619160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:12.619224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:12.622885Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:12.624824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:12.625035Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:12.626129Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:12.626302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:12.626347Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:12.626573Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:12.626620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:12.626792Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:12.626863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:12.628913Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:12.628972Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:12.629133Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:12.629173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... esult> execute, operationId: 104:0, at schemeshard: 72057594046678944, message: TabletId: 72075186233409551 TxId: 104 Status: OK 2025-05-07T09:04:13.255939Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_pq.cpp:643: NPQState::TPropose operationId# 104:0 HandleReply TEvProposeTransactionAttachResult triggers early, at schemeshard: 72057594046678944 message# TabletId: 72075186233409551 TxId: 104 Status: OK 2025-05-07T09:04:13.255977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:648: NPQState::TPropose operationId# 104:0 HandleReply TEvProposeTransactionAttachResult CollectPQConfigChanged: false 2025-05-07T09:04:13.256012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:754: NPQState::TPropose operationId# 104:0 can't persist state: ShardsInProgress is not empty, remain: 2 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 2025-05-07T09:04:13.261497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-05-07T09:04:13.261614Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 FAKE_COORDINATOR: Erasing txId 104 2025-05-07T09:04:13.355906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 104, tablet: 72075186233409550, partId: 0 2025-05-07T09:04:13.356071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 104:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409550 Status: COMPLETE TxId: 104 Step: 5000003 2025-05-07T09:04:13.356142Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_pq.cpp:624: NPQState::TPropose operationId# 104:0 HandleReply TEvProposeTransactionResult triggers early, at schemeshard: 72057594046678944 message# Origin: 72075186233409550 Status: COMPLETE TxId: 104 Step: 5000003 2025-05-07T09:04:13.356211Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:265: CollectPQConfigChanged accept TEvPersQueue::TEvProposeTransactionResult, operationId: 104:0, shardIdx: 72057594046678944:5, shard: 72075186233409550, left await: 1, txState.State: Propose, txState.ReadyForNotifications: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:13.356255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:629: NPQState::TPropose operationId# 104:0 HandleReply TEvProposeTransactionResult CollectPQConfigChanged: false 2025-05-07T09:04:13.356289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:754: NPQState::TPropose operationId# 104:0 can't persist state: ShardsInProgress is not empty, remain: 1 2025-05-07T09:04:13.356708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 104, tablet: 72075186233409551, partId: 0 2025-05-07T09:04:13.356804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 104:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409551 Status: COMPLETE TxId: 104 Step: 5000003 2025-05-07T09:04:13.356849Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_pq.cpp:624: NPQState::TPropose operationId# 104:0 HandleReply TEvProposeTransactionResult triggers early, at schemeshard: 72057594046678944 message# Origin: 72075186233409551 Status: COMPLETE TxId: 104 Step: 5000003 2025-05-07T09:04:13.356879Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:265: CollectPQConfigChanged accept TEvPersQueue::TEvProposeTransactionResult, operationId: 104:0, shardIdx: 72057594046678944:6, shard: 72075186233409551, left await: 0, txState.State: Propose, txState.ReadyForNotifications: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:13.356907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:629: NPQState::TPropose operationId# 104:0 HandleReply TEvProposeTransactionResult CollectPQConfigChanged: true 2025-05-07T09:04:13.357029Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 104:0 128 -> 240 2025-05-07T09:04:13.357249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T09:04:13.357343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-05-07T09:04:13.361509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-05-07T09:04:13.361745Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-05-07T09:04:13.361869Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:13.361893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:13.362076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-05-07T09:04:13.362299Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:13.362327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:337:2313], at schemeshard: 72057594046678944, txId: 104, path id: 1 2025-05-07T09:04:13.362356Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:337:2313], at schemeshard: 72057594046678944, txId: 104, path id: 3 2025-05-07T09:04:13.362729Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-05-07T09:04:13.362762Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 104:0 ProgressState 2025-05-07T09:04:13.362847Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 1/1 2025-05-07T09:04:13.362881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-05-07T09:04:13.362934Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 1/1 2025-05-07T09:04:13.362965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-05-07T09:04:13.363001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: false 2025-05-07T09:04:13.363044Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-05-07T09:04:13.363111Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 104:0 2025-05-07T09:04:13.363152Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 104:0 2025-05-07T09:04:13.363291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 6 2025-05-07T09:04:13.363320Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 104, publications: 2, subscribers: 0 2025-05-07T09:04:13.363347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-05-07T09:04:13.363374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-05-07T09:04:13.364030Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T09:04:13.364087Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T09:04:13.364120Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 104 2025-05-07T09:04:13.364173Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-05-07T09:04:13.364225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-05-07T09:04:13.364657Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T09:04:13.364715Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T09:04:13.364736Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-05-07T09:04:13.364754Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-05-07T09:04:13.364774Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-05-07T09:04:13.364822Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 0 2025-05-07T09:04:13.376181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-05-07T09:04:13.376487Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 TestModificationResult got TxId: 104, wait until txId: 104 >> TSchemeShardSubDomainTest::SimultaneousCreateTenantTableForceDrop [GOOD] >> TCdcStreamTests::StreamOnIndexTableNegative [GOOD] >> TCdcStreamTests::StreamOnIndexTable >> TSchemeShardSubDomainTest::SchemeLimitsRejectsWithIndexedTables >> DemoTx::Scenario_3 [GOOD] >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTabletsThenForceDrop >> ExternalBlobsMultipleChannels::WithCompaction [GOOD] >> TStoragePoolsQuotasTest::QuoteNonexistentPool-IsExternalSubdomain-false >> TStoragePoolsQuotasTest::DisableWritesToDatabase-IsExternalSubdomain-false [GOOD] >> TSchemeShardSubDomainTest::CreateWithoutTimeCastBuckets [GOOD] >> TSchemeShardSubDomainTest::SimultaneousCreateTableForceDrop >> TSchemeShardSubDomainTest::DeleteAndRestart ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousDeclare [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:13.666756Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:13.666844Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:13.666886Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:13.666950Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:13.667016Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:13.667054Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:13.667104Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:13.667182Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:13.667926Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:13.668311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:13.751435Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:13.751494Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:13.770575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:13.770783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:13.770992Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:13.776913Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:13.777256Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:13.777950Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:13.778176Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:13.781028Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:13.782535Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:13.782600Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:13.782668Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:13.782710Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:13.782817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:13.783080Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:13.789834Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:13.938259Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:13.938521Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:13.938755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:13.939039Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:13.939103Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:13.941499Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:13.941640Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:13.941864Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:13.941931Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:13.941993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:13.942029Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:13.943968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:13.944027Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:13.944087Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:13.945808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:13.945865Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:13.945927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:13.946012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:13.949817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:13.951976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:13.952181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:13.953226Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:13.953362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:13.953408Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:13.953733Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:13.953796Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:13.954002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:13.954080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:13.956192Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:13.956255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:13.956437Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:13.956496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... :04:14.001058Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 100 ready parts: 1/1 2025-05-07T09:04:14.001191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 100 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:14.002872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 100:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:100 msg type: 269090816 2025-05-07T09:04:14.003021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 100, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 100 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 100 at step: 5000002 2025-05-07T09:04:14.003476Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000002, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:14.003591Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 100 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000002 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:14.003635Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 100:0, at tablet# 72057594046678944 2025-05-07T09:04:14.003884Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 100:0 128 -> 240 2025-05-07T09:04:14.003934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 100:0, at tablet# 72057594046678944 2025-05-07T09:04:14.004107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:14.004167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T09:04:14.004215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 100 2025-05-07T09:04:14.006201Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:14.006239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:14.006393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T09:04:14.006533Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:14.006580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 100, path id: 1 2025-05-07T09:04:14.006622Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 100, path id: 2 2025-05-07T09:04:14.006950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 100:0, at schemeshard: 72057594046678944 2025-05-07T09:04:14.006999Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 100:0 ProgressState 2025-05-07T09:04:14.007095Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#100:0 progress is 1/1 2025-05-07T09:04:14.007129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-05-07T09:04:14.007165Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#100:0 progress is 1/1 2025-05-07T09:04:14.007195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-05-07T09:04:14.007237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 100, ready parts: 1/1, is published: false 2025-05-07T09:04:14.007285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-05-07T09:04:14.007330Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 100:0 2025-05-07T09:04:14.007382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 100:0 2025-05-07T09:04:14.007456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T09:04:14.007490Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 100, publications: 2, subscribers: 1 2025-05-07T09:04:14.007524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-05-07T09:04:14.007551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-05-07T09:04:14.008365Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-05-07T09:04:14.008516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-05-07T09:04:14.008557Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 100 2025-05-07T09:04:14.008596Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-05-07T09:04:14.008651Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T09:04:14.009768Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-05-07T09:04:14.009897Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-05-07T09:04:14.009931Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 100 2025-05-07T09:04:14.009958Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-05-07T09:04:14.010021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T09:04:14.010110Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 100, subscribers: 1 2025-05-07T09:04:14.010152Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [1:274:2265] 2025-05-07T09:04:14.013150Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 2025-05-07T09:04:14.013713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 2025-05-07T09:04:14.013814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-05-07T09:04:14.013847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:275:2266] TestWaitNotification: OK eventTxId 101 TestWaitNotification: OK eventTxId 100 2025-05-07T09:04:14.014414Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:14.014668Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 218us result status StatusSuccess 2025-05-07T09:04:14.015114Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousCreateTenantTableForceDrop [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:13.566163Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:13.566259Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:13.566304Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:13.566341Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:13.566406Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:13.566447Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:13.566514Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:13.566604Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:13.567437Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:13.567828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:13.648827Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:13.648899Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:13.666524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:13.666731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:13.666952Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:13.673183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:13.673559Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:13.674303Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:13.674526Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:13.677550Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:13.679101Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:13.679177Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:13.679257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:13.679305Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:13.679426Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:13.679713Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:13.687176Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:13.815625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:13.815879Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:13.816134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:13.816403Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:13.816482Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:13.819214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:13.819371Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:13.819594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:13.819666Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:13.819708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:13.819743Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:13.823806Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:13.823876Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:13.823928Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:13.826033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:13.826096Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:13.826138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:13.826219Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:13.829937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:13.832290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:13.832518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:13.833768Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:13.833931Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:13.834004Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:13.834297Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:13.834355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:13.834533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:13.834606Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:13.838260Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:13.838326Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:13.838539Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:13.838586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... n candidate queue, at schemeshard: 72057594046678944 2025-05-07T09:04:14.072387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T09:04:14.072536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-05-07T09:04:14.078392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:5 2025-05-07T09:04:14.078480Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:5 tabletId 72075186233409550 2025-05-07T09:04:14.078745Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5699: Failed to connect, to tablet: 72075186233409550, at schemeshard: 72057594046678944 2025-05-07T09:04:14.078864Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:7 2025-05-07T09:04:14.082846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-05-07T09:04:14.082942Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-05-07T09:04:14.083104Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 1 candidates, at schemeshard: 72057594046678944 2025-05-07T09:04:14.083217Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T09:04:14.083263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T09:04:14.083373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:14.083585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-05-07T09:04:14.083628Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-05-07T09:04:14.083751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:6 2025-05-07T09:04:14.083776Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:6 tabletId 72075186233409551 2025-05-07T09:04:14.083882Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5699: Failed to connect, to tablet: 72075186233409548, at schemeshard: 72057594046678944 2025-05-07T09:04:14.084002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-05-07T09:04:14.084034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-05-07T09:04:14.084193Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5699: Failed to connect, to tablet: 72075186233409551, at schemeshard: 72057594046678944 2025-05-07T09:04:14.084284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-05-07T09:04:14.084322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186233409549 2025-05-07T09:04:14.084527Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-05-07T09:04:14.084626Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5699: Failed to connect, to tablet: 72075186233409549, at schemeshard: 72057594046678944 2025-05-07T09:04:14.086597Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 100 2025-05-07T09:04:14.086914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-05-07T09:04:14.086980Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 TestWaitNotification wait txId: 101 2025-05-07T09:04:14.087099Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-05-07T09:04:14.087126Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 TestWaitNotification wait txId: 102 2025-05-07T09:04:14.087174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-05-07T09:04:14.087194Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-05-07T09:04:14.087770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-05-07T09:04:14.087877Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-05-07T09:04:14.087930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-05-07T09:04:14.087966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:611:2516] 2025-05-07T09:04:14.088102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-05-07T09:04:14.088132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:611:2516] 2025-05-07T09:04:14.088253Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-05-07T09:04:14.088341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T09:04:14.088365Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:611:2516] TestWaitNotification: OK eventTxId 100 TestWaitNotification: OK eventTxId 101 TestWaitNotification: OK eventTxId 102 2025-05-07T09:04:14.088889Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:14.089113Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 256us result status StatusPathDoesNotExist 2025-05-07T09:04:14.089331Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-05-07T09:04:14.089858Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/table_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:14.090081Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/table_0" took 196us result status StatusPathDoesNotExist 2025-05-07T09:04:14.090244Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0/table_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0/table_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-05-07T09:04:14.090770Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:14.090984Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 216us result status StatusSuccess 2025-05-07T09:04:14.091397Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_external_blobs/unittest >> ExternalBlobsMultipleChannels::WithNewColumnFamilyAndCompaction [GOOD] Test command err: 2025-05-07T09:04:05.537212Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:04:05.537369Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:04:05.537630Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003624/r3tmp/tmp1QrY9f/pdisk_1.dat 2025-05-07T09:04:06.072776Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:04:06.119318Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:06.178093Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:04:06.179256Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:04:06.192830Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:04:06.286760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:04:06.674913Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 100:0, at schemeshard: 72057594046644480 2025-05-07T09:04:06.953830Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:808:2666], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:06.954003Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:817:2671], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:06.954137Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:06.960060Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T09:04:07.107668Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:822:2674], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T09:04:07.180411Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:878:2711] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:04:07.552367Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715660. Ctx: { TraceId: 01jtmzsts7565texrcxk6vmy26, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTYyNDQ2NDYtYmU2YjIyMGItODMzNWM2MzgtNTgxNjQyMzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:07.656227Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jtmzsvdp150p8n0s30mskqcs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZGVlNjc3MTgtOTE0OWY1MmItMjQ0ZWE4MjktMzM3NGY3YWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:07.707354Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jtmzsvfd5ew0n7560cfaxn8j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTg2OTgxZjgtZTZkZGIwMDQtMzJhYzFjMjgtZDRkNmNiNjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:07.761299Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715663. Ctx: { TraceId: 01jtmzsvh130rjhzwhbs9n1zts, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjY2ZTc1MzMtOTU3MDRjZmItYTNjMzJjNmMtNjgzN2FlN2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:07.816241Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715664. Ctx: { TraceId: 01jtmzsvjqdxx961d5pq3n4mnf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWM4MzQyMmEtZmFkNTA0NTAtNTNmN2E1YTgtNzM3ZTE3MDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:07.872320Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715665. Ctx: { TraceId: 01jtmzsvme2e5bwaaf16fdx5py, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWQxMTM1YmYtNWQ5NzA5YS1hZTk1YzViNy1jMmU2MmRlNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:07.917521Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715666. Ctx: { TraceId: 01jtmzsvp60sw10k08zapcwmsb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWRmNTdhYWItZjk1YzQxNjYtMjIyM2U0YWItOWQ2OTY0ZmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:07.963166Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715667. Ctx: { TraceId: 01jtmzsvqj73s1p9hfdsj061t5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTAwMjE4My0xMjc2NWZmYy05MzlkN2I0LWRlNTY0ZDY1, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.005575Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715668. Ctx: { TraceId: 01jtmzsvrz5er3ebq1v67asvgz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjQxNzQzODctNTU5ZGFlYTMtNzJjZDYwMTktODdjN2NlMzg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.056439Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715669. Ctx: { TraceId: 01jtmzsvtadaw4jcmr2273n9ce, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTFjZWQxZC1hNmQ3NzAzZS0zMjhhZjA5NS1jNGMyN2ZkZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.104182Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715670. Ctx: { TraceId: 01jtmzsvvx0qq00td3sav4sxg4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWZiYzY0NTMtNTEyNzc5NTQtOGU2ZjRmNDUtNmJmZjk0NDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.148301Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715671. Ctx: { TraceId: 01jtmzsvxddkkpn14mkmtt9j74, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTQyNzE5MDctNjk1NDdhZTEtY2UwMjc2ZTEtZDc3OTBkYTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.192630Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715672. Ctx: { TraceId: 01jtmzsvysbkp7qm7av07f1xcz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzhhZjNlMmEtYmUyMmU0OGMtOTg4NjNlOTctYTkyMWIzZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.235809Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715673. Ctx: { TraceId: 01jtmzsw06bb2zzdfbmspbqwza, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmVmYjlhODItNGZhNWY3ODQtNzJiYzYyMTktYmU4YTIyOWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.284401Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715674. Ctx: { TraceId: 01jtmzsw1gf8h20r6wg1001xkw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzA5YWU1NjQtMzJlZDAwYzUtYjlmOTU1MGItMzVhNjlhZg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.376698Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715675. Ctx: { TraceId: 01jtmzsw4523ywrpp6v9cf8paz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmQ3MmRjZmYtNjM3YWY3NjQtNmViZDI5NTItMTZkOTVmNjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.425188Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715676. Ctx: { TraceId: 01jtmzsw5zf91p16ys3x17t47w, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NGU4NGQxNTktNjQ2MGUwOTktYmVlMjkyYzAtYjJjYzBmYTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.468887Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715677. Ctx: { TraceId: 01jtmzsw7e94rpz7gdgqyd2hzr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWFjOGVlZjgtOTgyNzUwYzMtMjEyMWIyNjAtMTNlYjZiMDY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.514489Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715678. Ctx: { TraceId: 01jtmzsw8vcgd9e209t67kx963, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjE5NDRhOTgtNTU1ZWYwM2UtZTMwYjgzNzQtOWVjNzRkODA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.562154Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715679. Ctx: { TraceId: 01jtmzswa7ayk946vh99qjtnz1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTYxMzNiNjQtM2VhY2VkMTItOTQyOWRhNzYtZmRkZGQzZjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.612876Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715680. Ctx: { TraceId: 01jtmzswbr30kn4vdtxv6pqh88, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmZhYzhmNGQtNGI3YWQyM2QtYzM1ZmEwY2MtMjg2MWE5ZTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.657634Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715681. Ctx: { TraceId: 01jtmzswd95jcay10178px35n3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjA5NzA3ODAtNjdhMjNmN2UtYzUwYTQ2LTgwZjU3NmIw, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.699784Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715682. Ctx: { TraceId: 01jtmzswep3hbkva782f4w2y0s, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id ... r.cpp:119: TxId: 281474976715727. Ctx: { TraceId: 01jtmzsys6cpxnv3s7aqcf86jk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWE0NzQ5NDYtYzc5NGY0NzktODZjNWI0OTktNDQwYTdjN2U=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:11.152000Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715728. Ctx: { TraceId: 01jtmzsytycj5tpr5tkymhmv69, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzJlNzA5ZTAtYjg0MWFmN2MtNDQwYzY3OGYtYTc4MGJkMmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:11.207687Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715729. Ctx: { TraceId: 01jtmzsywp5vp93j1m8w25phcy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWJjOTkzNDUtODc2YmU5NmMtOGRiZGNkMDAtN2JlMzQ1YTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:11.263328Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715730. Ctx: { TraceId: 01jtmzsyye19m3a9ytm6z88dr9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGJmNDZhMDctMzgxN2JkOTctZjY4NDZmZC1jZDE1MjdiZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:11.315734Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715731. Ctx: { TraceId: 01jtmzsz053r49szhrcz27ed41, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGY5ZTU3MzctZTAxYTQ5NTItN2RmZmQ1MzEtZGJhMTg4NDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:11.357507Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715732. Ctx: { TraceId: 01jtmzsz1r4xdg2jkp1d51zfy2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTIxOTA1ZDktNGRkMTAzNGMtZjlmZmRhMGUtZGZkYWFkNzk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:11.413153Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715733. Ctx: { TraceId: 01jtmzsz33b6d8wscng8vwm26r, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjcyMTgwODMtNWY3N2M5ZGItZWM1ZGJjMmItNGJhNWQ5ZmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:11.496110Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715734. Ctx: { TraceId: 01jtmzsz5x2z9xv3eg68sds7pg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjIwYTM4LWNmZDVjM2JlLWE4YTIzNGM5LTQ5ZTdjNGIx, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:11.543785Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715735. Ctx: { TraceId: 01jtmzsz7dan0wmj0wrh7t7knv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzUyNzljN2MtM2RkNDBiMGEtZjkxM2Q1OTQtY2YzNTIyZjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:11.586763Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715736. Ctx: { TraceId: 01jtmzsz8w2jfvvsmzq99m1mcy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjFhYTU2NTAtNGVmODg4YjEtOGFkNGVjZmYtNjQ1NDc5NDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:11.627982Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715737. Ctx: { TraceId: 01jtmzsza9cbmrrv1psvns4pf4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWM4ZmNjODYtZDdlZDU3MTItM2QxMWQwZWEtODczNjczN2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:11.676459Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715738. Ctx: { TraceId: 01jtmzszbg67vxcf6pb1vrz9c7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWJjM2Q1YmMtMTBjYzc0MjctYmU4OWI4MzQtOGVmOGVmMDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:11.731027Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715739. Ctx: { TraceId: 01jtmzszd2acfw9mtq5tax91y9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTJhY2RhODktYTM3MzkwMmUtMzM2ZjNmYTctYTgyZWFiZmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:11.787613Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715740. Ctx: { TraceId: 01jtmzszes6bz8hgekn124p4ga, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTQ2MGUzNjUtYzUyZDlkNTktMmFhOGQ0ZjMtMmViNGRlZDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:11.844140Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715741. Ctx: { TraceId: 01jtmzszgj59cbenx70mg3ygsj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjYzMTZjZGYtODFmM2UwMTMtZTgyZTk5NS1jYTdjZjBlZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:11.902619Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715742. Ctx: { TraceId: 01jtmzszjbdw6zp2440j2mvtrs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmEzYzUzYTYtZjQwNmViZDUtMTA2MTZjODUtODU5YjQ1MA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:11.962697Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715743. Ctx: { TraceId: 01jtmzszm5cv5jjhf9f3qxpfn6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTI3OTQ0M2MtYjI4NzQxZWEtYWRhYTBkZDctODllYWRhY2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.021852Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715744. Ctx: { TraceId: 01jtmzszp1138m3f3pqqrw8agb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzExYjJiZDctN2M4Yzg4MjQtN2IzMTI4MWUtMmY3ZjZhYzU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.100046Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715745. Ctx: { TraceId: 01jtmzszrh02a720f3cjhxt4mx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NGMyODNhMGQtOTk4ZDA2ZDAtNDk1ZTFkMDUtODFiZjJhZTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.158838Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715746. Ctx: { TraceId: 01jtmzsztbahcfh3penwnbsxwt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjU1ZTY5NjQtYTZhMGU2Y2QtOGM4OWY3ZTUtYmU4NDc2OWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.214484Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715747. Ctx: { TraceId: 01jtmzszw690yxm67sp2eyk0kj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZGUxOWMzMDUtNzk3ZWMyN2ItYWZiNTg4ZjUtYWE0MzM1NDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.271892Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715748. Ctx: { TraceId: 01jtmzszxx6pk4qv8zxgeq818j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWZiN2ExMmItNThjODY1NGYtNmQ0Y2U4OTgtYjBmNDllMGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.322169Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715749. Ctx: { TraceId: 01jtmzszzp56637xmc14f5qbft, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjRmOTg4ZWItZGY1OTNlNWEtNTQxMzM4OTMtYzgyMmFjM2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.375108Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715750. Ctx: { TraceId: 01jtmzt0173stvfctej5gc5z7m, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTUyNjI4NjctNGMxYjIzNTktYTU2MWY4NjQtZWIwOTBkZGY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.426104Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715751. Ctx: { TraceId: 01jtmzt02y70a0e6s8p0dkwe5n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjU0OWUwNjItZWRmNjFjNWMtZmVmNDVlOTYtNWNmYWE3Yjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.478011Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715752. Ctx: { TraceId: 01jtmzt04g0eky8f8gpyxjpjjx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjE3N2RhNjAtODZjNDg5OTgtMzFkNjk5MmMtNDdiMjljMDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.535232Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715753. Ctx: { TraceId: 01jtmzt065bmw1htyxt327j8h6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWM3ZjE0YjAtOTg5ZjI0ZDItODQ2OGU3Y2QtZjBjOGU3ODA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.590414Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715754. Ctx: { TraceId: 01jtmzt07x2baxfxr6c162bc4p, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2JmNGNhNmMtZmUyY2IyYzMtNThkYjJkNmMtMTNlMWE4MmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.648730Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715755. Ctx: { TraceId: 01jtmzt09n3yhysx90nreybvtn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWM3NzBlYi02YTZhNGZlNi1iNWVkZWExMC1kNzk2ZWE5ZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.720398Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715756. Ctx: { TraceId: 01jtmzt0bf79fazcqn47c78m9m, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODdkOGIxMjktZTY1ZDY5MzEtOTc1NDMyZDQtODRlNWQzMWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.771049Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715757. Ctx: { TraceId: 01jtmzt0dq9egh8px00mz768gb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzU4YjlkZDAtZDUwMDYwNmUtMmRmNzRiNy1lMDg0NTMzNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.818668Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715758. Ctx: { TraceId: 01jtmzt0f8596ew7a649s6g5f3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDYzNmEyM2QtZGY1OGQxZjQtOGJmMWVhMTUtMjVlZTBlMw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.867051Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715759. Ctx: { TraceId: 01jtmzt0gr0n1j9g8gzxskm04p, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjFkMjUyZDctZWVjYjE3MWYtYzNlMGNiZjEtZjU1MTA5NDE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:13.270453Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715760. Ctx: { TraceId: 01jtmzt0pv4qg02pk3hrpaakyf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NGY0ODJlYjgtNzQxMTI2YTItYTY1MjVkODAtNDM5ZDllMDY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_external_blobs/unittest >> ExternalBlobsMultipleChannels::SingleChannel [GOOD] Test command err: 2025-05-07T09:04:05.603281Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:04:05.603463Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:04:05.603748Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0035d9/r3tmp/tmpmDDFOS/pdisk_1.dat 2025-05-07T09:04:06.080912Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:04:06.125740Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:06.178965Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:04:06.179239Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:04:06.192053Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:04:06.286747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:04:06.632458Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:736:2617], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:06.632593Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:747:2622], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:06.632678Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:06.645072Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T09:04:06.815037Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:750:2625], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T09:04:06.894834Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:820:2664] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:04:07.552425Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715660. Ctx: { TraceId: 01jtmzstf6dsgdw79b1vf0t3ym, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmIyOWIxMzQtMTBkYjZiZWEtZmM2Yjc4MDMtYjNkMDMyNTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:07.663776Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jtmzsvdpce06ntfx114e64ya, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjIyMTc0ZGMtNDQ1OTY4MmYtN2E1OTg5ZjAtYWRkNzg1NWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:07.727801Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jtmzsvfnb5jam4syk8nzhn4y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MWYwZTM0ZGUtYmY4M2FjM2QtZmFlZTUzNjAtMTIzODFmMDI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:07.789818Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715663. Ctx: { TraceId: 01jtmzsvhn852madjk2aka6d5p, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDQyYjliZGItMjI4ZGU1Ny1lOGNjNjczNy0xOWNkNmNmZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:07.852664Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715664. Ctx: { TraceId: 01jtmzsvkkff8gpz3pect4hrph, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2JhYTNmYTYtMmQxZmRjZTktZDRlNWUwZmEtODkzODJlNDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:07.915672Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715665. Ctx: { TraceId: 01jtmzsvnj8fswh42qvdw2zhph, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWMwMDg3ZTgtNTNlY2UzMWYtYzY3YjQyOTYtMjAyZmUwNGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:07.977066Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715666. Ctx: { TraceId: 01jtmzsvqhdeekkt65zsqvnspv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzNhMzRjNjItZGU2ZWJmMzctMjJjODExYmMtYWZjNWVjZTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.037502Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715667. Ctx: { TraceId: 01jtmzsvse4s79jkaw1q4qwxjv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDMzZGI0Yi1iZGZmZjMzYi1mYzljOTgwMS01ZGUwNGQ0Nw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.100523Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715668. Ctx: { TraceId: 01jtmzsvvban3ndx5bpx7v4wpe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjQ0OWVmZmEtNDQyODI3NTYtMTY0NWViZTctYzA4NGU3ZmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.163015Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715669. Ctx: { TraceId: 01jtmzsvxactvj5epr037es2f1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzU4OWViYWItZWQ4NDdjODQtNmVhYTFiODAtM2Y2NjM5MTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.225677Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715670. Ctx: { TraceId: 01jtmzsvz94nd9z6hj79krtmqw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDMwZDA5YjktODE2MWRkNTMtZTUxYWY4OGUtNjIxOTI3OTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.288564Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715671. Ctx: { TraceId: 01jtmzsw179kzc4yw14jpdxj6b, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjZkYTY3MDUtNmZlMTZkNWEtZTgyNDEwOS03YWE3ODZiOQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.353247Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715672. Ctx: { TraceId: 01jtmzsw3639ede1z2c67b8s9a, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2JmYjhmMS1hN2ViYmUxNS0yYjQ3N2E2ZS1lMjQzN2U4MA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.417294Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715673. Ctx: { TraceId: 01jtmzsw572r8c2nvkmfap8wnw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NGJlOGIwMWMtYzQ5MzMxYzAtYWE1NTUzOWYtNWNkZmFhNmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.482366Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715674. Ctx: { TraceId: 01jtmzsw77dt4zcrgrs48eap49, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Nzk5ZTVkNjgtZmNmOGNlNjgtY2I0M2EyNC0zNDE4NGQyMQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.549188Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715675. Ctx: { TraceId: 01jtmzsw9a48gfytxbgknm7cn7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MWNkZTY1OTItYWU3NDdiZDgtYmQ3YWNmZmQtZmYwOTBkZjU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.613040Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715676. Ctx: { TraceId: 01jtmzswbb9jg7zcc089aevpbp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2M1Y2QzYzgtNDM4YzhmZTctNmU4OTY5MGYtY2RlMDc3YWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.675281Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715677. Ctx: { TraceId: 01jtmzswdad50s81hp3jk89nmq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzU4YWZjODAtMzA4ZjgzYTUtY2RlYWE5MzQtZmUwNThkY2E=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.738415Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715678. Ctx: { TraceId: 01jtmzswf95e82hx4f5es76xvd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTBmNTFkM2ItNTYxYzNkY2QtYWYyZGY4OWQtMTFlNDBhOTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.800155Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715679. Ctx: { TraceId: 01jtmzswh8dktg1q6yxxztbsn0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWYzNGQ3OTAtM2M2M2U2NTctMjk2OWRmNTItOTIzOWIwMjA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.865007Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715680. Ctx: { TraceId: 01jtmzswk611adw3zrp4pdn924, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzMyNTllYTQtNjU0MzIyNmUtZDBhODhmNGItMjY0ZjBmNzg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.905943Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715681. Ctx: { TraceId: 01jtmzswn721adpjzwc3kh0n7s, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDQzNDZiNGItODJiMmY3Ni02ODI5OTRkYy1lNmE4M2JiMQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.947509Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715682. Ctx: { TraceId: 01jtmzswpedjcr0rp576af9hne, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzI3ODRlZWQtMTI2OGYzYmItZTQ5NjhlM2UtODc3N2NjZmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.990425Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 2814749767156 ... r.cpp:119: TxId: 281474976715727. Ctx: { TraceId: 01jtmzsz0ba5tr5hxbvjnpmbga, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmM1OGZkOTEtNGFhYTliNjItYjQ1NzhmOGItMzRmZTFhODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:11.391257Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715728. Ctx: { TraceId: 01jtmzsz29291n1mgzwr0k4jcf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGEzN2NkZTgtMjk1ODY3N2YtODNkODdjNzMtZjc0MThjOGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:11.451281Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715729. Ctx: { TraceId: 01jtmzsz459pbggpgcn6sw8648, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWY1MmUxZTktOGMxNmYzMmUtM2E0MjMxOTAtYmI3ZjM4ZWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:11.502233Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715730. Ctx: { TraceId: 01jtmzsz622e82kdzqrqsfrzj0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDgxNWE5YzAtM2JhMDU4OWMtOGUxNTJhNzYtMzU3YzQwNzU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:11.550404Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715731. Ctx: { TraceId: 01jtmzsz7ka0cn4dp5tcr17q19, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmM3OTVlNTYtMjUzNjY5NWQtYzE1ZDUzYzYtZTBlM2IzMGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:11.620116Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715732. Ctx: { TraceId: 01jtmzsz9369ykpd5xfs137h55, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDcyYWE2MzktYTZmNjRiZjAtOGU2NGMzYTUtM2JmNjEyZWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:11.676229Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715733. Ctx: { TraceId: 01jtmzszba13bg6t7gt024f77b, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjgzMmM4Yi00MGUwNTRmZi03NTFkYTAwZi01YTBkNjUwYQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:11.726317Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715734. Ctx: { TraceId: 01jtmzszd107ek6fgk2r0hhr91, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTg5YzBhMDQtNWU3ZjllYmYtNjAyZTBiMmEtNjcxYzIzNA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:11.775709Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715735. Ctx: { TraceId: 01jtmzszemf39a3ew0grbkcnrf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmM5Zjc4LTRjNDMwNDU0LTE2ZDE3ODAyLWI1OGI3MTJk, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:11.834346Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715736. Ctx: { TraceId: 01jtmzszg5b15gmbkj4489vfm0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MWVjYmViOTEtODgzZDZkNDktMmFmYmExZGUtN2RjZDZhYmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:11.886960Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715737. Ctx: { TraceId: 01jtmzszj16b9ve5h59pnqkp70, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmE2YmM1NmQtZGRhNDU3ODktOTk2ZTk1NmMtMWQ4YjQ3YWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:11.947159Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715738. Ctx: { TraceId: 01jtmzszkm29wkzxnvwmt9xk64, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWU5MTdmYzEtNDBiYWI0ZjQtZDE1Mjg2NjItOWQ4NzIzNWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.003013Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715739. Ctx: { TraceId: 01jtmzsznj5pcg2d1k80ny09vh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWI0YjYwMWYtOTAzNTllOTEtNWY5YzU2OTAtNGNmZThkMjU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.067847Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715740. Ctx: { TraceId: 01jtmzszqa3k0jssk8kjjbcmvq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWM3MTg5ZmYtZGZiZjg5NzItYzQyYmRhMzItNTJhY2I3YTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.120387Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715741. Ctx: { TraceId: 01jtmzszs985kgk4pm3cdexjt0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MWU0Yjk1ZDMtNzM1MWRiMTQtMTEyYTU0YTAtMWU3YmY5NDc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.187519Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715742. Ctx: { TraceId: 01jtmzszty7qwset33v5degz6g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDBhNWYzOGYtY2ZkY2YyODUtMzE2NGYzNGQtNDY4YzU4ZjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.248738Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715743. Ctx: { TraceId: 01jtmzszx217xh992dee3m9yn0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjZkZTIyYWEtYmY0MzliMjUtYmMyOTc0N2MtMjExYWRhOA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.307271Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715744. Ctx: { TraceId: 01jtmzszyza5st163g438s46yf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmZlMWM3OWUtZWNhYjZhMWUtNDU1OWIwMjctODcwNTIzMzg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.368236Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715745. Ctx: { TraceId: 01jtmzt00s4y3tzn1pyh05qrjj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjhjMzA2ZjktMWEyM2YzMTQtOWUzNDBhMjQtZjU4YTVkNWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.431027Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715746. Ctx: { TraceId: 01jtmzt02p3ky8gkwacfysztpa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2MzNWFkNTctYzBhODBjNmYtOWIxMDE1ZjMtNGJhZTBlMTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.492789Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715747. Ctx: { TraceId: 01jtmzt04n8f3jhw8nqa7whexb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWMwOTc0NjQtYzk0ZjI1NjEtNjE3ZDBmYWUtNjg0MzlkZDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.556572Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715748. Ctx: { TraceId: 01jtmzt06k95gngttamdpp3q1w, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTc0NWFhODEtMzdjNmRlOGUtZDI3NDcyMmEtMzc1MmJlMjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.618416Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715749. Ctx: { TraceId: 01jtmzt08ka6szenjz27r82tee, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2M0MzVkNjYtNzMyZjU2MjctMjc4Y2FhN2QtNTI1ZDhkYjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.681202Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715750. Ctx: { TraceId: 01jtmzt0ah3v838vvk0d4pb6mm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2IzZGUwNy1mZGQ4ZGY4Mi1mZDcxNmM5Yi0xOWM5ZmY2Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.743440Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715751. Ctx: { TraceId: 01jtmzt0cf5z5sdqxdsfyam3q6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjNiMzY5NjAtYTQ3Yzc5M2QtNmY0NTkyMzQtMjI0NWY1MjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.820520Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715752. Ctx: { TraceId: 01jtmzt0ee3yrtw6be2cyxekb6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzAxOTU0MmMtZjcwMzQzMDUtYzRlOGRlZDItOGUyZTljZTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.876840Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715753. Ctx: { TraceId: 01jtmzt0gt9wjrfbnj9tf6f2t6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGU0M2ViYTgtZjFmY2MzNjctM2YyZThiN2YtNmU3MmFiMmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.934713Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715754. Ctx: { TraceId: 01jtmzt0jkan2ste9mkd82kj2r, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDE1MWJhZjgtYWI4ZDUzZWYtNmJkMzE5NjMtZWNmMTdiYjE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.990246Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715755. Ctx: { TraceId: 01jtmzt0mdfss9w18ktnrt3kj3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NGE4M2QxNzEtZjljYTRjNjYtZmI5M2JjNDItZTU5ZTMwNTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:13.045229Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715756. Ctx: { TraceId: 01jtmzt0p4712m690qzzjs8d0f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjNjNWNjNmUtNTVjNGZlZDQtMTg1MmMyN2QtZTY0MmI0MjE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:13.102857Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715757. Ctx: { TraceId: 01jtmzt0qvdx6ex6dje8gmywe4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzRhZDE5NjctYjI0ZDExMmMtZTczNWI1MmEtOWMwN2JmZTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:13.162658Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715758. Ctx: { TraceId: 01jtmzt0sna1rt1tsrj595psz7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2ZhYTMyMGYtYmE3YTcyNjctNzFmODE4M2QtM2YwY2NmNg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:13.221767Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715759. Ctx: { TraceId: 01jtmzt0vh43kzm8n7dh4smz81, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmZlODA0NjgtOTQ3MWUxZTAtYzgxM2IyZDYtYzJlZmJhNjA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:13.400836Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715760. Ctx: { TraceId: 01jtmzt0yfaf9w520hbf0ngc6w, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmY0ZTUxNDMtYmM5N2JkMDUtNGUwZjU2ODctZTk3NjlkOTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> TSchemeShardSubDomainTest::SchemeQuotas [GOOD] >> DataShardVolatile::DistributedWriteThenSplit [GOOD] >> DataShardVolatile::DistributedWriteThenReadIterator >> TSchemeShardSubDomainTest::SimultaneousDeclareAndDefine [GOOD] >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTabletsThenDrop [GOOD] >> TSchemeShardSubDomainTest::LS ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateWithoutTimeCastBuckets [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:14.230986Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:14.231072Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:14.231120Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:14.231153Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:14.231219Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:14.231251Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:14.231324Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:14.231406Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:14.232120Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:14.232456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:14.319994Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:14.320057Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:14.338821Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:14.339070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:14.339261Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:14.346186Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:14.346537Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:14.347242Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:14.347452Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:14.350668Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:14.352216Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:14.352295Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:14.352375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:14.352441Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:14.352579Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:14.352904Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:14.360613Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:14.487861Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:14.488096Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:14.488359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:14.488609Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:14.488691Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:14.491315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:14.491478Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:14.491712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:14.491782Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:14.491836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:14.491878Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:14.494186Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:14.494262Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:14.494315Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:14.496417Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:14.496484Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:14.496539Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:14.496604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:14.500240Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:14.502817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:14.503055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:14.504075Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:14.504227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:14.504273Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:14.504636Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:14.504702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:14.504899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:14.504980Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:14.507432Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:14.507499Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:14.507731Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:14.507777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-05-07T09:04:14.508163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:14.508214Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 1:0 ProgressState 2025-05-07T09:04:14.508309Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-05-07T09:04:14.508340Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T09:04:14.508381Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-05-07T09:04:14.508411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T09:04:14.508453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-05-07T09:04:14.508514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-05-07T09:04:14.508558Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1:0 2025-05-07T09:04:14.508594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 1:0 2025-05-07T09:04:14.508674Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T09:04:14.508710Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-05-07T09:04:14.508759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-05-07T09:04:14.510640Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-05-07T09:04:14.510765Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-05-07T09:04:14.510804Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-05-07T09:04:14.510844Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-05-07T09:04:14.510904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:14.511048Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-05-07T09:04:14.518209Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-05-07T09:04:14.518770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 100 2025-05-07T09:04:14.521931Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateSubDomain SubDomain { PlanResolution: 50 Coordinators: 1 Mediators: 1 Name: "USER_0" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 100 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:14.522240Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_subdomain.cpp:92: TCreateSubDomain Propose, path: /MyRoot/USER_0, opId: 100:0, at schemeshard: 72057594046678944 2025-05-07T09:04:14.522360Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 100:1, propose status:StatusInvalidParameter, reason: Malformed subdomain request: TimeCastBucketsPerMediator is 0, at schemeshard: 72057594046678944 2025-05-07T09:04:14.522786Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:268:2259] Bootstrap 2025-05-07T09:04:14.537682Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:268:2259] Become StateWork (SchemeCache [1:273:2264]) 2025-05-07T09:04:14.538321Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:268:2259] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-05-07T09:04:14.540984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 100, response: Status: StatusInvalidParameter Reason: "Malformed subdomain request: TimeCastBucketsPerMediator is 0" TxId: 100 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:14.541124Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 100, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Malformed subdomain request: TimeCastBucketsPerMediator is 0, operation: CREATE DATABASE, path: /MyRoot/USER_0 2025-05-07T09:04:14.541570Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 100, wait until txId: 100 TestWaitNotification wait txId: 100 2025-05-07T09:04:14.541727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-05-07T09:04:14.541771Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 2025-05-07T09:04:14.542122Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-05-07T09:04:14.542286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-05-07T09:04:14.542327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:283:2274] TestWaitNotification: OK eventTxId 100 2025-05-07T09:04:14.542827Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:14.543066Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 214us result status StatusPathDoesNotExist 2025-05-07T09:04:14.543343Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_external_blobs/unittest >> ExternalBlobsMultipleChannels::ExtBlobsMultipleColumns [GOOD] Test command err: 2025-05-07T09:04:05.596616Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:04:05.596764Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:04:05.597021Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0035d1/r3tmp/tmp1FhmuE/pdisk_1.dat 2025-05-07T09:04:06.073283Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:04:06.122713Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:06.178079Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:04:06.179295Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:04:06.192082Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:04:06.286991Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:04:06.629876Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:736:2617], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:06.630012Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:747:2622], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:06.630107Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:06.645135Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T09:04:06.823338Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:750:2625], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T09:04:06.885398Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:821:2665] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:04:07.552358Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715660. Ctx: { TraceId: 01jtmzstf25y1dfcs5c5vpypr5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjY4NDVhY2EtZjcwY2U2YjctMTkxMzliYTktNmFiZDJiMmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:07.667616Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jtmzsvdpdbnd8n6ef0e05nnf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MWNkOWFiMTktYzBkYjI5ZDgtMzkzZTAxMC0xMWE3Zjg1, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:07.731762Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jtmzsvfs7ky7421etn0zk8jb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzBiZDRkYjItM2JhMWFhNjgtNGI5MWRmYTEtMmM1YmIwODg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:07.796685Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715663. Ctx: { TraceId: 01jtmzsvhsa36pd6eff5gncyeg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWVkNDUxOTEtNmQxZTM4NmItNTI2N2JmMjAtOGJmNzk1NWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:07.861453Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715664. Ctx: { TraceId: 01jtmzsvktd6rr3d87dsyrv983, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NGIwNzhkZWYtYmFlMmU0OTUtOTA2NmQzNTItYmRiNmYzNmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:07.917800Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715665. Ctx: { TraceId: 01jtmzsvnv4nm5yv50hqa21f4w, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWI1OWRhOGQtYWYyOGQxMzEtZDEyMTNhNWYtMTU1MWQxNDY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:07.966663Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715666. Ctx: { TraceId: 01jtmzsvqj3sb8ezmy24b823t1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2QwZWRkNS1jODc0MDcxMy02YzZlNWIzOC1hZjA3ZWJjZg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.016902Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715667. Ctx: { TraceId: 01jtmzsvs2aemjkh602v4tvftm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2RjMTdiOTktMTdjMWExZmItZDUxMTZiYjEtZmQ4NThlNzE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.065685Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715668. Ctx: { TraceId: 01jtmzsvtpexk34ywhc2x1fd01, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWY3YzQxMzAtYTY1NjkzZmYtZGNmYjA2ZGMtNzA1MTZiNWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.122909Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715669. Ctx: { TraceId: 01jtmzsvw51j7e8tt39655b6w6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDIzZGJlMGMtZDc1YWQzZTEtM2FmMzFhMzEtM2Y2MDQ1OTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.169791Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715670. Ctx: { TraceId: 01jtmzsvxyd9srtdtj5sw7beqm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MThhM2ZkYzgtNTE0NjZlNDEtNTQyOGRmOTUtZGEzMWZiMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.224144Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715671. Ctx: { TraceId: 01jtmzsvzfc2n0w52gfhtgmknq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmZkYjA5LWFhZmNmN2Y3LTczNzI5NjFmLTExNGVlOTI0, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.272687Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715672. Ctx: { TraceId: 01jtmzsw14eyy4z9p93dydahc1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTUyNTFiYWUtMWIyZjA2NWYtNTc2YjNmMDctYzM3YjkwMzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.330555Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715673. Ctx: { TraceId: 01jtmzsw2nbwgxa8yxpt91cqss, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDljZjgwMDYtZDlmZjAyN2QtNTU0MWY1OTctNzNlODQ1ZDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.384747Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715674. Ctx: { TraceId: 01jtmzsw4e253j3phq4p6amphj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjFkOTQyODMtZDUxZDA0OGEtZmIxMTQ4NTMtMjVhZDQ0NDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.450782Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715675. Ctx: { TraceId: 01jtmzsw67dn50jaab58men17h, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTE1NTlmNjgtNTAyNWUzNjQtYWU4YTIzNTQtYmJjZDZlYjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.516917Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715676. Ctx: { TraceId: 01jtmzsw889xc8atfqwaahhpkc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjhmMmUxY2EtMzU4ZWE1YTEtZmE2N2FkY2UtMjc0NzljNjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.580626Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715677. Ctx: { TraceId: 01jtmzswa991yhhdvfx6m68ja2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODUwOTM5ZTUtMmE3N2M2OTQtMmRiMmI2ZTItZGZlMWRkMzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.645512Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715678. Ctx: { TraceId: 01jtmzswcad88xxdc78945qkkn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWNiZGIzZjUtZWU1MWQ2YWQtZDQzNmEzYWYtOWI4YWE2ZWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.712917Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715679. Ctx: { TraceId: 01jtmzswebbwag8vf3k6z1be2z, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjlhZWRlZGMtMzc1MzFlYTItNWY0ODdkZWEtMmZjZDllZWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.779120Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715680. Ctx: { TraceId: 01jtmzswgec5sh8fqb24ddkpr9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjU3NjNiOTItYTFlMmUyMTUtZTlhNDVlNzYtNTczY2I3MDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.844164Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715681. Ctx: { TraceId: 01jtmzswjgc8npa574kpxpepvm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjZiYzBjMS02OTZjZDA2ZS1lZmNlZjRjMi0zM2M4ZmY2Mg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.909949Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715682. Ctx: { TraceId: 01jtmzswmj6ym1y0p05nw5gwbw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzMwMTUyYTMtZWExZmU1YjYtYTRiY2JhOGUtZDhhNzc2OTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.964906Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715683. Ctx: ... p:119: TxId: 281474976715727. Ctx: { TraceId: 01jtmzsz520yf0ch9ax8jf5am0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWI5OTQ1ZWUtNGU1MTI3MmMtZTc2ZWY3YjAtMWJkZDk5NjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:11.534916Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715728. Ctx: { TraceId: 01jtmzsz75a0m9xx47edtdwgda, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MWQwNjRlY2MtODRiOTE2ZGYtZWM2ZmY4NjMtMTgyNDM1MjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:11.585037Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715729. Ctx: { TraceId: 01jtmzsz8k8n18ga7djy1p6vsp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmQzNTM2ZWMtNGUwYjFiMWYtYjcxZmVhNWMtYTdlNjY0M2E=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:11.643369Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715730. Ctx: { TraceId: 01jtmzsza52cbap772z0wdk1fv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmFjYjM1OTYtNGJmZTA2OGQtZjYxNWNlMjAtOGJjMmM4OTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:11.703783Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715731. Ctx: { TraceId: 01jtmzszbz4hzcqp37bs0bms74, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2Y1ZmQ4YzctYzRlMGQ2ZDItODUzYzFhOWEtMWM0YmJhNjE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:11.765300Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715732. Ctx: { TraceId: 01jtmzszdx0h0b7vsffvgdannw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Yzc5MWFhOGYtY2E1MGI1NDktM2Q5OGZhZC04NmYwZDFkMQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:11.825766Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715733. Ctx: { TraceId: 01jtmzszfv9h137qbvkz2zbv67, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjI0NDA4ZS1jYzI0MjA3NS02OGNlNWRjNy01YmNkOTUwMQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:11.887119Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715734. Ctx: { TraceId: 01jtmzszhr57rk12prrpravsad, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTMyZmY5YzktZGZiZWJiNjUtZjcxMTkyNzktOGNiZmFlODc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:11.958699Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715735. Ctx: { TraceId: 01jtmzszm49nehrppkbx4cb055, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWIyOWJlODktMjg4NDBmODUtYjBmOTgyN2EtNzkyMGVkZjU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.022197Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715736. Ctx: { TraceId: 01jtmzsznwegqv55ngkkheejj0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjMxZWMxMGMtMWJjOGFkNDgtZTVhYTY1YzgtMzFkZDI0MGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.078299Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715737. Ctx: { TraceId: 01jtmzszqwbd6spbv4gpa1hzd3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NGIzNWFkMTQtMzI1NjA1ODctNDEyMDAyMGQtMTY4NDBmY2U=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.133424Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715738. Ctx: { TraceId: 01jtmzszsk0b5hnhbkngwev9va, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjAyODM4OWEtMTdkZWJhOTItMjIwZWIxODgtZDJjYjYyMTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.199638Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715739. Ctx: { TraceId: 01jtmzszvcc6p3vz325wk2chcd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzZiMTlhM2YtZDliYWM0NTEtY2Q0NGIyN2ItOTdlNGQ1NDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.268618Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715740. Ctx: { TraceId: 01jtmzszxe9pys8kjejyg7vezw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmY2ZTlmNDEtYjUwZDYyZjItOTMyYjI3NmEtMmNkZmI1MjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.322537Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715741. Ctx: { TraceId: 01jtmzszzkamcdhv0h16vr59z5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDE0MTkwMzgtYzZiMDg4Y2MtNmE2YThkZDMtOWYxMTljMWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.383948Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715742. Ctx: { TraceId: 01jtmzt017ahkgccbwjkje2ncp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzczODlmYmEtNGJmOTU5Y2YtMjdkMDc1OWUtODEwNTFjYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.473268Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715743. Ctx: { TraceId: 01jtmzt036266sa539226nk2sg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWZhOTc4MWMtYzg3YzdhNDEtYjZiZWExZDctYWNhNjJhY2Q=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.535560Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715744. Ctx: { TraceId: 01jtmzt05yfgkd7r7nnypsmmg5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTNlNGQ4NDktNzBiMzg4ZTAtMThkMzU4MTgtNGY2MWI2MjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.586809Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715745. Ctx: { TraceId: 01jtmzt07xft4c5e9g0yewmcpn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2I2OTM0NzEtMjUzNTE3OTAtZmU1MTAwY2QtYWFjMDZhYTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.639919Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715746. Ctx: { TraceId: 01jtmzt09g6sx3105w2gtspe1y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTA3NDg2OGQtMTgyNDJjOWYtNTM1MDE5ZGMtMTI2NjA1Zjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.694666Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715747. Ctx: { TraceId: 01jtmzt0b5eg4p7xca9rkyhyg8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWRlZGNjMjItMTZiMzZiNTItODdiOWM4MTYtOTFmNmZlOGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.754421Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715748. Ctx: { TraceId: 01jtmzt0cw9m8enn6ppsxh6evt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjdlNmM4YmYtNDJiMzI5MDQtMzZhZTFkNjgtNTUzZWQwNzg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.817520Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715749. Ctx: { TraceId: 01jtmzt0er03p78fc07atnae7b, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjU2MjAxYy00N2Q3MDc4MC0yNDk5YWQ1Mi0zODFiNWQ1Yw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.868463Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715750. Ctx: { TraceId: 01jtmzt0gqcjcap66dwje1z40k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTI0YmU1YWUtYmYyNWY2MGItMTE0MzQyZjktOWRiZjYzMzg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.942870Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715751. Ctx: { TraceId: 01jtmzt0jy3c63c06k8z3cefww, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MWJmYzhmZDAtYzI2MmJmNjItZDljY2RiZTctM2IxMjIyOTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.995712Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715752. Ctx: { TraceId: 01jtmzt0mm49cgzxb5x9j1tx7q, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODk0ZDI3ZjAtODViOTE1MzItNmE4NDAxMmUtYmU1NTAzMjA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:13.047143Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715753. Ctx: { TraceId: 01jtmzt0p9fh7hngvpqdz32d03, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTM0NDU0ZDQtMmQxOTU5MmItOWJjNjRiOWUtYmNmNTAxYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:13.107831Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715754. Ctx: { TraceId: 01jtmzt0qw2ck6ye37683hm0fs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2YxOTg2NTEtYmJkZWQ0ZTktMTA4MGZiNzktODhjNmE5NjE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:13.174014Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715755. Ctx: { TraceId: 01jtmzt0stbyg5v671g4atcmwd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjhiM2E0MS02MGU5NTA2Yy0xZGQ3NDFkZS04YzFlZjM0Yw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:13.240194Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715756. Ctx: { TraceId: 01jtmzt0vw1pt2x7whaaqq8byw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODFkM2QzNDItZWJlNzk1Y2UtYWUyNTdiODktOWI5NmRiMDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:13.304224Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715757. Ctx: { TraceId: 01jtmzt0xy3wnk5mtz36pdan84, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjA3NmMyMDAtMTQ1OTJkZTQtODhiZDA2ZjEtZjkwNzNjMTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:13.370038Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715758. Ctx: { TraceId: 01jtmzt0zzf80ef815fp8jr4g4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODI0YTcxNWQtMTQwZDc0YzUtYmI0NDM0OWYtNzRkNmEzYWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:13.462004Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715759. Ctx: { TraceId: 01jtmzt120bn86a2nhc1qg51zr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjhlNmFiYS0zMjE5MjBjOC0yNzQ0Mjk4Yy0xZGQ5NTMzZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:13.624040Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715760. Ctx: { TraceId: 01jtmzt15dc69j5wcq0tg1pe40, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWQwOTA1Yi1lYWViMjBjOC01ZDNiMDkzMC1iNzZlN2MwYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TStoragePoolsQuotasTest::DisableWritesToDatabase-IsExternalSubdomain-false [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:09.551030Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:09.551133Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:09.551177Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:09.551220Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:09.551285Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:09.551323Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:09.551384Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:09.551470Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:09.552492Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:09.552932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:09.644104Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:09.644177Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:09.662997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:09.663248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:09.663455Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:09.670264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:09.670605Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:09.671250Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:09.671429Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:09.674818Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:09.676498Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:09.676570Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:09.676649Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:09.676692Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:09.676828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:09.677221Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:09.684481Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:09.806441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:09.806669Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:09.806878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:09.807176Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:09.807248Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:09.809680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:09.809852Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:09.810073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:09.810133Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:09.810178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:09.810216Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:09.812293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:09.812356Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:09.812407Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:09.814209Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:09.814263Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:09.814310Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:09.814386Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:09.818178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:09.820315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:09.820509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:09.821457Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:09.821562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:09.821610Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:09.821864Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:09.821923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:09.822108Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:09.822183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:09.824412Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:09.824470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:09.824678Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:09.824725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [ ... p:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T09:04:14.471784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-05-07T09:04:14.473276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-05-07T09:04:14.473453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-05-07T09:04:14.475299Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:14.475358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T09:04:14.475592Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-05-07T09:04:14.475775Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:14.475824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 103, path id: 2 2025-05-07T09:04:14.475875Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 103, path id: 3 2025-05-07T09:04:14.475959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-05-07T09:04:14.476011Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1036: NTableState::TProposedWaitParts operationId# 103:0 ProgressState at tablet: 72057594046678944 2025-05-07T09:04:14.476132Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 103:0, at schemeshard: 72057594046678944 2025-05-07T09:04:14.476199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 103:0, datashard: 72075186233409548, at schemeshard: 72057594046678944 2025-05-07T09:04:14.476253Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 103:0 129 -> 240 2025-05-07T09:04:14.477916Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 8 PathOwnerId: 72057594046678944, cookie: 103 2025-05-07T09:04:14.478062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 8 PathOwnerId: 72057594046678944, cookie: 103 2025-05-07T09:04:14.478110Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-05-07T09:04:14.478170Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 8 2025-05-07T09:04:14.478222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-05-07T09:04:14.479284Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-05-07T09:04:14.479382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-05-07T09:04:14.479414Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-05-07T09:04:14.479456Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-05-07T09:04:14.479501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-05-07T09:04:14.479577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 103, ready parts: 0/1, is published: true 2025-05-07T09:04:14.483445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-05-07T09:04:14.483531Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 103:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:14.483954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-05-07T09:04:14.484221Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-05-07T09:04:14.484263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-05-07T09:04:14.484311Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-05-07T09:04:14.484346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-05-07T09:04:14.484389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: true 2025-05-07T09:04:14.484492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:406:2374] message: TxId: 103 2025-05-07T09:04:14.484552Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-05-07T09:04:14.484597Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 103:0 2025-05-07T09:04:14.484633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 103:0 2025-05-07T09:04:14.484745Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-05-07T09:04:14.485490Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:14.485532Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 0, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T09:04:14.486706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-05-07T09:04:14.487374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-05-07T09:04:14.489064Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:14.489119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 0, path id: 2 2025-05-07T09:04:14.489220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-05-07T09:04:14.489266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:659:2594] 2025-05-07T09:04:14.490243Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 9 PathOwnerId: 72057594046678944, cookie: 0 TestWaitNotification: OK eventTxId 103 2025-05-07T09:04:14.491374Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SomeDatabase" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:14.491699Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SomeDatabase" took 274us result status StatusSuccess 2025-05-07T09:04:14.492216Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SomeDatabase" PathDescription { Self { Name: "SomeDatabase" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 9 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 9 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SubDomainStateVersion: 2 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "unquoted_storage_pool" Kind: "unquoted_storage_pool_kind" } StoragePools { Name: "quoted_storage_pool" Kind: "quoted_storage_pool_kind" } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } StoragePoolsUsage { PoolKind: "unquoted_storage_pool_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } StoragePoolsUsage { PoolKind: "quoted_storage_pool_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 DatabaseQuotas { storage_quotas { unit_kind: "quoted_storage_pool_kind" data_size_hard_quota: 1 } } SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::SimultaneousCreateTenantDirTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_external_blobs/unittest >> ExternalBlobsMultipleChannels::Simple [GOOD] Test command err: 2025-05-07T09:04:05.620510Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:04:05.620684Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:04:05.620944Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0035fe/r3tmp/tmpzhJE2L/pdisk_1.dat 2025-05-07T09:04:06.074023Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:04:06.119335Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:06.178034Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:04:06.179291Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:04:06.192595Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:04:06.286953Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:04:06.629783Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:736:2617], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:06.629961Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:747:2622], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:06.630111Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:06.645157Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T09:04:06.824481Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:750:2625], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T09:04:06.890607Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:821:2665] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:04:07.552369Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715660. Ctx: { TraceId: 01jtmzstf2a7gx5k8743watkgp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmZjNzE5OGUtNGNmZjFhZDEtMjAwNGFjNTQtNDYxMzU3OTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:07.657612Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jtmzsvdp0wvyzykm9pq01vnf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmI2OWI3NzEtOTcyNDM0MzEtZTQ5NTA0MS1iMDdlMjk4Mg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:07.707762Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jtmzsvfecfpfn14m7rkkhzgc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Yjg1OTI3MjItZTg0YzlmMzUtNmIyMTUxNTQtYzI4ZmIzNDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:07.758599Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715663. Ctx: { TraceId: 01jtmzsvh0edz6ebks71pg0bx4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjUwNjU5MjAtMjQ1MjgxM2EtZDM1OTRlYjEtNTJjMzZiMmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:07.811766Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715664. Ctx: { TraceId: 01jtmzsvjkfd667c3zwsvz6tb7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTVkYzkxMTQtZjczNzdkYjQtZDk5ODU5MGItYjhkNzcyMTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:07.863341Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715665. Ctx: { TraceId: 01jtmzsvm9038dw8082dxzyx0q, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDNjNjc0NTMtODg2YjZkOGEtN2E1YzUxMjMtOWViYzllNjU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:07.914122Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715666. Ctx: { TraceId: 01jtmzsvnweq83bt26de7gw4h7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmVhYmVkZTgtMmMwNWNiYzctNTcyMzRjOTUtNDEwMzdiNDE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:07.971465Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715667. Ctx: { TraceId: 01jtmzsvqf1xzffgtpfdce8byx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWVlZWZhNTMtNTg0MGQ0YjctNjIwY2M2Zi1lMGQzNzA3NA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.024254Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715668. Ctx: { TraceId: 01jtmzsvs7bgg4fexjscmm464y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTdmNzgxNTctZWVhMzViM2EtNDZjMTZjOGEtYmI5ZDU1MTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.080872Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715669. Ctx: { TraceId: 01jtmzsvtyfzeayyf4cqwb07qt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjRlZmY0ZjgtNDkyMjM0ODgtZDBlOGZiMGMtMTJkZmIyMw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.143668Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715670. Ctx: { TraceId: 01jtmzsvwq7fk9ck2p6zxs581m, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWZhNDU1MWItOTFjNDJlNzAtZGYxN2ZlNDYtMzBmMDgwZGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.199896Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715671. Ctx: { TraceId: 01jtmzsvypcy2707harmm68jyy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODc2ZTIyYjItNzk5MTU5M2ItYjg1YWVjM2QtYzM0MTc0NjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.261490Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715672. Ctx: { TraceId: 01jtmzsw0c10h7nqsth2nbjrge, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjVkZDAzZWItOWI0NDIzYTktN2JjNDNkZjAtNjllZjQ5NmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.325480Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715673. Ctx: { TraceId: 01jtmzsw2bf1gxxx24sqbetkyr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2YxNTI5MjctOGU4ZGMyOWQtOTI5NDU4Yi03NTUzOTViYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.388629Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715674. Ctx: { TraceId: 01jtmzsw4b2j9hxaghps2dkhd6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjUwZGEwNjMtNzU5OGMwZmYtZjA0MzA1YTUtNTJlZWEwNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.454101Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715675. Ctx: { TraceId: 01jtmzsw6b7cbtsc3dnjavnmcw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTYyYzkyZmItMTU1OTM5ZDEtZDk3ZWExMGYtMTRkM2VjNGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.516551Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715676. Ctx: { TraceId: 01jtmzsw8ceben2g63jbkamhvw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDY3MDgyMjktNmFmYjM2ZTMtMmYyOGJjNTctMTAzNTc2MDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.565586Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715677. Ctx: { TraceId: 01jtmzswa93y14vdqhzap7ka7n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZGRmNjhkOGQtMmRmYTdiMTMtYTkwN2U0MGMtODU0MmMzYzc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.620355Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715678. Ctx: { TraceId: 01jtmzswbv25ghaxe2bgbanwhf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGJjMzk3MWYtZTRmM2UzYWEtYzlhMDYzZWItMmFhYjYzNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.671427Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715679. Ctx: { TraceId: 01jtmzswdh440dj6a7pfv792jj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NGMyNWVjODctZDk0Njc4YmItYTBhNmZhMS1mODA4NTA4NQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.717914Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715680. Ctx: { TraceId: 01jtmzswf49jm67sekd1v7atm4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjliYWM0NDgtZDkzNjcwMjMtODYzNGZhOTQtZjIzMWFmNGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.765240Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715681. Ctx: { TraceId: 01jtmzswgkcvvbnsj73kqayz3r, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2I2NDQxZGQtOThmNGI2NjYtNDg1ZmM1NDItOWFjMjg4Yzk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.809833Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715682. Ctx: { TraceId: 01jtmzswj29p3n7paw4hs4xakx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjZhZmRhMWYtMjAyM2ZjOGItNWJjMTU4YTgtNmJkOTUzOTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.856937Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 2814749767156 ... r.cpp:119: TxId: 281474976715727. Ctx: { TraceId: 01jtmzsz5d4ggdq7knt1hv4hj7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjZhM2ZmZGEtZTQyNWE3NDEtOTA0MTU0ZTUtNTZjNDQ1YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:11.541549Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715728. Ctx: { TraceId: 01jtmzsz756pzts8zdg3v7a1tx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjRlZWZkMzAtYWM4NmNmNjItZjQ1NzcyMDAtMzJhYTViMDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:11.594330Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715729. Ctx: { TraceId: 01jtmzsz8vfzczyayq1nx3p8j3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjU2ZWU4MjctYjdmMjNhMGItY2I2MWY2ZTctMmMyOWM4YTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:11.641830Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715730. Ctx: { TraceId: 01jtmzszag315t477myem9ayax, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjM4N2VjOWMtZmU3ZWQyMjUtYThmZDc2NTMtMWNiZjhlOGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:11.691351Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715731. Ctx: { TraceId: 01jtmzszbzah3fk2yzxnq4jxx9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWQ1ZDI5Y2EtNGI3NmYxNDgtNzg5YTQ1MWMtZDY5YWE5OGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:11.768317Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715732. Ctx: { TraceId: 01jtmzszdg3zh9w7qfk1y1rs46, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmY1M2FiMC0xYjZjYmE3MC1kYjFkOWI3YS01NmU2ODUyMw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:11.829746Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715733. Ctx: { TraceId: 01jtmzszfz5yv3ztrk9zvjzy9v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTVkNTVjOWEtZWQ5NmQ3NDItNWFmNDYwZDYtN2Q3NmI5NTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:11.893362Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715734. Ctx: { TraceId: 01jtmzszhw3f1dmms6fqmxph6k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTUzNDI5MzgtYzVjZTU3MzUtZDk4ZjRlMTAtMmIxYzY3NWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:11.963280Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715735. Ctx: { TraceId: 01jtmzszkwas0aj5759pgjzjh9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWZjYmUyZTktMWMzYjBlYWItYjJjZTU5NWMtMWRhMzNhNDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.038996Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715736. Ctx: { TraceId: 01jtmzszp18xx06rc1bmbkk6a8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2Q3ZWNkY2YtN2FkYjRiM2EtYmFiMDU1OTEtOWM1YzU0OGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.105621Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715737. Ctx: { TraceId: 01jtmzszrf2dtef1cxj9131mey, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzJlMTI2MzMtODQ4NGJkZTAtZDE5M2VjMjYtNTcyZWNiYTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.164816Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715738. Ctx: { TraceId: 01jtmzsztffegyydhkjar9w4zn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODFjZTc0OGItNzkyNTk0ZTktYmU3ZDI4YWEtZGU0MmJhZmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.229539Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715739. Ctx: { TraceId: 01jtmzszwc58sabzrdsct38sp3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjVkMzMzYTgtODBlMTI1N2MtYTA0M2E3OTYtM2RhODUxMw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.289628Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715740. Ctx: { TraceId: 01jtmzszyb2j90mjsa96cg4y7r, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDFjODM1ZjQtMzgwY2YzYjAtMzNmZmViOTItN2QzN2E5ODE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.344945Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715741. Ctx: { TraceId: 01jtmzt0088xzcvy5eqk9scpsn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzY0NTJiNTEtNGYwZDE3MmEtODEwMGM5NTItNGJmM2Q4NTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.418741Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715742. Ctx: { TraceId: 01jtmzt01y8knjrxzzkd9h0ak5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmEzYTk5NTAtZTFjMGNkZTMtYjljNjc0NmQtMWJmYTVkZjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.482727Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715743. Ctx: { TraceId: 01jtmzt0499nf88b7ay6cq6dby, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmZkMjA4MDAtY2FkYThmOWEtODVhMDU3MjUtZTI4Y2I0YjA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.544364Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715744. Ctx: { TraceId: 01jtmzt06856gqp4ez0rpm7m7h, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDA5YzU5NWItNDA1YjRmZS0zYTUyOWVjZS0yNDBlY2RhMw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.608534Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715745. Ctx: { TraceId: 01jtmzt086fddsc4hk8mk16nx7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDg5MDQwNzctZjE5NmQwZjgtMWRiOTI4ZmItOGFkM2Q4MjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.672644Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715746. Ctx: { TraceId: 01jtmzt0a8d8fbx6ggsb20md3k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmZlZTMwMTktM2M2NGM3MGYtY2ZlMzY1YzYtOTQ4OWVlYWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.736900Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715747. Ctx: { TraceId: 01jtmzt0c78gyefkvph5rz876y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjZjYjY2LWVhMmM3ZTFkLWY1MmFkZWIwLTIwYjRlM2Vm, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.797576Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715748. Ctx: { TraceId: 01jtmzt0e77wvwsa0g31048g2c, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NGExNGVhZTAtMTg1ZmM0M2EtODcyYmY1ZjEtNmY2OGFlOWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.861332Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715749. Ctx: { TraceId: 01jtmzt0g467jm49bt3bwbf8yr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTI4YTEyOWQtN2UzNGUzMzQtMzA0ZGNjMzUtYjQ0ZTc0ODI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.911507Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715750. Ctx: { TraceId: 01jtmzt0j42cwxpnwzqntbqghs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDAxNWM2OGYtNGVhOWU5MWItMmJiNDk0YmItYzlhNzI5Y2Q=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.961211Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715751. Ctx: { TraceId: 01jtmzt0kmb33cd8c1dtnq5ts3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzEyYjcyZTUtNzAxNGI3YTItNTI4ODVmYjUtOGYxZTRiOTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:13.032124Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715752. Ctx: { TraceId: 01jtmzt0n7egx17hcxnr2zvr2w, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWFlOGM4MGUtOWE2Y2U5ZDgtY2FkZTQ0YzctYTJhYzliOWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:13.097045Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715753. Ctx: { TraceId: 01jtmzt0qfcgs3da0ngvsr5rjr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmRkMWQxNWItMTZjODMzNTMtNWZkYjI2Y2EtMjNhYmM2YTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:13.163412Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715754. Ctx: { TraceId: 01jtmzt0sgbja88ej9487m1r1x, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDZmY2RiNDktNWQ1MzM4YWMtMTBlNjc0YmUtY2MzMzUyZGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:13.228367Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715755. Ctx: { TraceId: 01jtmzt0vh7t28bgvcbpd462ex, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWY1NGNjNjMtNWEzMmM2ZjMtZDU4M2I1OTEtOWIzMTgwNTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:13.295514Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715756. Ctx: { TraceId: 01jtmzt0xkfp9qq0z97y5phwqr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Yjk3MmZjYjktYzM2MjViMGEtYTA5MWU5NmQtYTE2Yjc1Yzk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:13.362363Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715757. Ctx: { TraceId: 01jtmzt0zpd5h4trb0z53gq5gt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjUzOGJhMTgtNTVlY2RkYTMtZDExMWUxZGEtYzBlMGMxODI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:13.410693Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715758. Ctx: { TraceId: 01jtmzt11s20yyenbpm10ax4vh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzVjNTFiMWItNzg0OGEwOTMtODEyYjBhZjItZmMxMTZiZDM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:13.460887Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715759. Ctx: { TraceId: 01jtmzt1381g8kq0n6wkd3sr6w, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWE2YWJiN2EtY2I2ZGVhNDctZjZlNTliNWUtZjIxYjgyMTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:13.777541Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715760. Ctx: { TraceId: 01jtmzt19n7ebser429d33dn3n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NGU0M2JiNzAtZDczYzk5NzAtMWY4OTMwNjAtNmQxMjgwN2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> TSchemeShardSubDomainTest::CopyRejects [GOOD] >> TSchemeShardSubDomainTest::ConsistentCopyRejects >> TStoragePoolsQuotasTest::QuoteNonexistentPool-IsExternalSubdomain-false [GOOD] >> BackupRestore::RestoreViewQueryText [GOOD] >> BackupRestore::RestoreViewReferenceTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousDeclareAndDefine [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T09:04:14.656439Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:14.656540Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:14.656579Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:14.656614Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:14.656663Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:14.656695Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:14.656772Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:14.656860Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:14.657594Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:14.658018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:14.734623Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:14.734690Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:14.747580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:14.747704Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:14.747877Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:14.754986Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:14.755388Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:14.755860Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:14.756076Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:14.757660Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:14.759006Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:14.759060Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:14.759109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:14.759146Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:14.759225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:14.759335Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:14.765003Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T09:04:14.883902Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:14.884132Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:14.884346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:14.884601Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:14.884682Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:14.887062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:14.887218Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:14.887435Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:14.887502Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:14.887547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:14.887581Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:14.889154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:14.889195Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:14.889266Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:14.890927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:14.890961Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:14.891006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:14.891063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:14.899941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:14.902054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:14.902237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:14.903092Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:14.903223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:14.903268Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:14.903575Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:14.903617Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:14.903764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:14.903824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:14.905610Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:14.905643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:14.905775Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:14.905827Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... tionPlanStep Execute, stepId: 5000002, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:14.949734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 100 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000002 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:14.949767Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 100:0, at tablet# 72057594046678944 2025-05-07T09:04:14.949943Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 100:0 128 -> 240 2025-05-07T09:04:14.950002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 100:0, at tablet# 72057594046678944 2025-05-07T09:04:14.950155Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:14.950208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T09:04:14.950245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 100 2025-05-07T09:04:14.951860Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:14.951903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:14.952014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T09:04:14.952112Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:14.952160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2208], at schemeshard: 72057594046678944, txId: 100, path id: 1 2025-05-07T09:04:14.952209Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2208], at schemeshard: 72057594046678944, txId: 100, path id: 2 2025-05-07T09:04:14.952554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 100:0, at schemeshard: 72057594046678944 2025-05-07T09:04:14.952602Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 100:0 ProgressState 2025-05-07T09:04:14.952690Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#100:0 progress is 1/1 2025-05-07T09:04:14.952720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-05-07T09:04:14.952762Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#100:0 progress is 1/1 2025-05-07T09:04:14.952789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-05-07T09:04:14.952821Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 100, ready parts: 1/1, is published: false 2025-05-07T09:04:14.952888Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-05-07T09:04:14.952930Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 100:0 2025-05-07T09:04:14.952960Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 100:0 2025-05-07T09:04:14.953029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T09:04:14.953073Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 100, publications: 2, subscribers: 0 2025-05-07T09:04:14.953111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-05-07T09:04:14.953143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-05-07T09:04:14.953529Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-05-07T09:04:14.953609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-05-07T09:04:14.953642Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 100 2025-05-07T09:04:14.953684Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-05-07T09:04:14.953718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T09:04:14.954081Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-05-07T09:04:14.954156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-05-07T09:04:14.954197Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 100 2025-05-07T09:04:14.954219Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-05-07T09:04:14.954244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T09:04:14.954370Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 100, subscribers: 0 2025-05-07T09:04:14.957573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 2025-05-07T09:04:14.957716Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 TestModificationResult got TxId: 100, wait until txId: 100 TestModificationResults wait txId: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 100 2025-05-07T09:04:14.957997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-05-07T09:04:14.958044Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 TestWaitNotification wait txId: 101 2025-05-07T09:04:14.958145Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-05-07T09:04:14.958260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-05-07T09:04:14.958703Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-05-07T09:04:14.958797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-05-07T09:04:14.958826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:311:2302] 2025-05-07T09:04:14.959014Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-05-07T09:04:14.959123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-05-07T09:04:14.959144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:311:2302] TestWaitNotification: OK eventTxId 100 TestWaitNotification: OK eventTxId 101 2025-05-07T09:04:14.959528Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:14.959722Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 203us result status StatusSuccess 2025-05-07T09:04:14.960048Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTabletsThenDrop [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:14.701221Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:14.701324Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:14.701363Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:14.701395Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:14.701447Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:14.701468Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:14.701520Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:14.701600Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:14.702262Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:14.702609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:14.776389Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:14.776471Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:14.791790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:14.791957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:14.792124Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:14.797755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:14.798077Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:14.798542Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:14.798683Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:14.801100Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:14.802414Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:14.802465Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:14.802525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:14.802573Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:14.802661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:14.802911Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:14.808945Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:14.907752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:14.907958Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:14.908150Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:14.908357Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:14.908515Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:14.910514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:14.910633Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:14.910810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:14.910872Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:14.910922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:14.910953Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:14.912359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:14.912397Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:14.912428Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:14.913723Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:14.913770Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:14.913800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:14.913855Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:14.916867Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:14.918532Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:14.918705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:14.919688Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:14.919786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:14.919823Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:14.920114Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:14.920181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:14.920329Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:14.920388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:14.922609Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:14.922662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:14.922825Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:14.922861Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 7594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T09:04:14.991817Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:14.991854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-05-07T09:04:14.991884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 101, path id: 2 2025-05-07T09:04:14.992113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T09:04:14.992141Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:416: [72057594046678944] TDeleteParts opId# 101:0 ProgressState 2025-05-07T09:04:14.992174Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T09:04:14.992197Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T09:04:14.992271Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T09:04:14.992297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T09:04:14.992324Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-05-07T09:04:14.992375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T09:04:14.992402Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:0 2025-05-07T09:04:14.992424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 101:0 2025-05-07T09:04:14.992468Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T09:04:14.992494Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-05-07T09:04:14.992516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-05-07T09:04:14.992540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 18446744073709551615 2025-05-07T09:04:14.992954Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T09:04:14.993038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T09:04:14.993086Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-05-07T09:04:14.993124Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-05-07T09:04:14.993160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T09:04:14.993789Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T09:04:14.993884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T09:04:14.993915Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-05-07T09:04:14.993951Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-05-07T09:04:14.994018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T09:04:14.994096Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-05-07T09:04:14.994360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T09:04:14.994419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T09:04:14.994487Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-05-07T09:04:14.995285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T09:04:14.995341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T09:04:14.995417Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:14.997438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T09:04:14.997546Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T09:04:14.998876Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-05-07T09:04:14.998999Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-05-07T09:04:14.999203Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-05-07T09:04:14.999239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-05-07T09:04:14.999635Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-05-07T09:04:14.999726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-05-07T09:04:14.999768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:338:2329] TestWaitNotification: OK eventTxId 101 2025-05-07T09:04:15.000219Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:15.000399Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 158us result status StatusPathDoesNotExist 2025-05-07T09:04:15.000561Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-05-07T09:04:15.001038Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:15.001183Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 175us result status StatusSuccess 2025-05-07T09:04:15.001577Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SchemeQuotas [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:12.494553Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:12.494653Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:12.494710Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:12.494747Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:12.494802Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:12.494834Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:12.494885Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:12.494978Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:12.495732Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:12.496098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:12.590720Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:12.590772Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:12.607712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:12.607924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:12.608119Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:12.614807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:12.615136Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:12.615801Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:12.616014Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:12.618834Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:12.620196Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:12.620260Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:12.620338Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:12.620383Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:12.620484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:12.620739Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.627960Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:12.774380Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:12.774596Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.774817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:12.775089Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:12.775155Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.782810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:12.782986Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:12.783223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.783294Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:12.783335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:12.783374Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:12.785305Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.785374Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:12.785424Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:12.787236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.787292Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.787336Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:12.787397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:12.797295Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:12.799463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:12.799682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:12.800914Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:12.801074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:12.801132Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:12.801420Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:12.801486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:12.801671Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:12.801775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:12.804000Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:12.804058Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:12.804247Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:12.804291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... CHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 137:0, at schemeshard: 72057594046678944 2025-05-07T09:04:14.788041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 11 2025-05-07T09:04:14.788096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 10] was 3 2025-05-07T09:04:14.790921Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 137, response: Status: StatusAccepted TxId: 137 SchemeshardId: 72057594046678944 PathId: 10, at schemeshard: 72057594046678944 2025-05-07T09:04:14.791089Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 137, database: /MyRoot/USER_0, subject: , status: StatusAccepted, operation: CREATE TABLE, path: /MyRoot/USER_0/Table11 2025-05-07T09:04:14.791340Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:14.791380Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 137, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T09:04:14.791605Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 137, path id: [OwnerId: 72057594046678944, LocalPathId: 10] 2025-05-07T09:04:14.791711Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:14.791752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:1018:2881], at schemeshard: 72057594046678944, txId: 137, path id: 2 2025-05-07T09:04:14.791827Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:1018:2881], at schemeshard: 72057594046678944, txId: 137, path id: 10 2025-05-07T09:04:14.791906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 137:0, at schemeshard: 72057594046678944 2025-05-07T09:04:14.791981Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 137:0 ProgressState, operation type: TxCreateTable, at tablet# 72057594046678944 2025-05-07T09:04:14.792177Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:357: TCreateParts opId# 137:0 CreateRequest Event to Hive: 72057594037968897 msg: Owner: 72057594046678944 OwnerIdx: 10 TabletType: DataShard ObjectDomain { SchemeShard: 72057594046678944 PathId: 2 } ObjectId: 10 BindedChannels { StoragePoolName: "pool-1" } BindedChannels { StoragePoolName: "pool-1" } BindedChannels { StoragePoolName: "pool-1" } AllowedDomains { SchemeShard: 72057594046678944 PathId: 2 } 2025-05-07T09:04:14.793492Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 2 Version: 18 PathOwnerId: 72057594046678944, cookie: 137 2025-05-07T09:04:14.793645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 2 Version: 18 PathOwnerId: 72057594046678944, cookie: 137 2025-05-07T09:04:14.793692Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 137 2025-05-07T09:04:14.793732Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 137, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18 2025-05-07T09:04:14.793775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 12 2025-05-07T09:04:14.795356Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 10 Version: 1 PathOwnerId: 72057594046678944, cookie: 137 2025-05-07T09:04:14.795443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 10 Version: 1 PathOwnerId: 72057594046678944, cookie: 137 2025-05-07T09:04:14.795493Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 137 2025-05-07T09:04:14.795521Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 137, pathId: [OwnerId: 72057594046678944, LocalPathId: 10], version: 1 2025-05-07T09:04:14.795549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 10] was 4 2025-05-07T09:04:14.795637Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 137, ready parts: 0/1, is published: true 2025-05-07T09:04:14.798599Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 137:0 from tablet: 72057594046678944 to tablet: 72057594037968897 cookie: 72057594046678944:10 msg type: 268697601 2025-05-07T09:04:14.798764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 137, partId: 0, tablet: 72057594037968897 2025-05-07T09:04:14.798807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1767: TOperation RegisterRelationByShardIdx, TxId: 137, shardIdx: 72057594046678944:10, partId: 0 2025-05-07T09:04:14.799304Z node 1 :HIVE INFO: tablet_helpers.cpp:1181: [72057594037968897] TEvCreateTablet, msg: Owner: 72057594046678944 OwnerIdx: 10 TabletType: DataShard ObjectDomain { SchemeShard: 72057594046678944 PathId: 2 } ObjectId: 10 BindedChannels { StoragePoolName: "pool-1" } BindedChannels { StoragePoolName: "pool-1" } BindedChannels { StoragePoolName: "pool-1" } AllowedDomains { SchemeShard: 72057594046678944 PathId: 2 } 2025-05-07T09:04:14.799502Z node 1 :HIVE INFO: tablet_helpers.cpp:1245: [72057594037968897] TEvCreateTablet, Owner 72057594046678944, OwnerIdx 10, type DataShard, boot OK, tablet id 72075186233409555 2025-05-07T09:04:14.799631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5827: Handle TEvCreateTabletReply at schemeshard: 72057594046678944 message: Status: OK Owner: 72057594046678944 OwnerIdx: 10 TabletID: 72075186233409555 Origin: 72057594037968897 2025-05-07T09:04:14.799673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1781: TOperation FindRelatedPartByShardIdx, TxId: 137, shardIdx: 72057594046678944:10, partId: 0 2025-05-07T09:04:14.799779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 137:0, at schemeshard: 72057594046678944, message: Status: OK Owner: 72057594046678944 OwnerIdx: 10 TabletID: 72075186233409555 Origin: 72057594037968897 2025-05-07T09:04:14.799839Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:175: TCreateParts opId# 137:0 HandleReply TEvCreateTabletReply, at tabletId: 72057594046678944 2025-05-07T09:04:14.799914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:178: TCreateParts opId# 137:0 HandleReply TEvCreateTabletReply, message: Status: OK Owner: 72057594046678944 OwnerIdx: 10 TabletID: 72075186233409555 Origin: 72057594037968897 2025-05-07T09:04:14.799999Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 137:0 2 -> 3 2025-05-07T09:04:14.800877Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 137 2025-05-07T09:04:14.802504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 137 2025-05-07T09:04:14.803948Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 137:0, at schemeshard: 72057594046678944 2025-05-07T09:04:14.804174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 137:0, at schemeshard: 72057594046678944 2025-05-07T09:04:14.804245Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_table.cpp:200: TCreateTable TConfigureParts operationId# 137:0 ProgressState at tabletId# 72057594046678944 2025-05-07T09:04:14.804334Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:220: TCreateTable TConfigureParts operationId# 137:0 ProgressState Propose modify scheme on datashard datashardId: 72075186233409555 seqNo: 4:5 2025-05-07T09:04:14.804659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:236: TCreateTable TConfigureParts operationId# 137:0 ProgressState Propose modify scheme on datashard datashardId: 72075186233409555 message: TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 966 RawX2: 4294970136 } TxBody: "\n\236\004\n\007Table11\020\n\032\r\n\003key\030\002 \001(\000@\000\032\020\n\005Value\030\200$ \002(\000@\000(\001:\262\003\022\253\003\010\200\200\200\002\020\254\002\030\364\003 \200\200\200\010(\0000\200\200\200 8\200\200\200\010@\2008H\000RX\010\000\020\000\030\010 \010(\200\200\200@0\377\377\377\377\0178\001B$\010e\020d\031\000\000\000\000\000\000\360?*\025background_compactionJ\017compaction_gen1P\nX\200\200\001`nh\000p\000Rb\010\001\020\200\200\200\024\030\005 \020(\200\200\200\200\0020\377\377\377\377\0178\000B$\010e\020d\031\000\000\000\000\000\000\360?*\025background_compactionJ\017compaction_gen2P\nX\200\200\001`nh\200\200\200\004p\200\200\200\004Rc\010\002\020\200\200\200\310\001\030\005 \020(\200\200\200\200@0\377\377\377\377\0178\000B$\010e\020d\031\000\000\000\000\000\000\360?*\025background_compactionJ\017compaction_gen3P\nX\200\200\001`nh\200\200\200(p\200\200\200(X\001`\005j$\010e\020d\031\000\000\000\000\000\000\360?*\025background_compactionr\017compaction_gen0z\017compaction_gen0\202\001\004scan\210\001\200\200\200\010\220\001\364\003\230\0012\270\001\2008\300\001\006R\002\020\001J\026/MyRoot/USER_0/Table11\242\001\006\001\000\000\000\000\200\252\001\000\260\001\001\270\001\000\210\002\001\222\002\013\t\240\207\205\000\000\000\000\001\020\n:\004\010\004\020\005" TxId: 137 ExecLevel: 0 Flags: 0 SchemeShardId: 72057594046678944 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } SubDomainPathId: 2 2025-05-07T09:04:14.808133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 137:0 from tablet: 72057594046678944 to tablet: 72075186233409555 cookie: 72057594046678944:10 msg type: 269549568 2025-05-07T09:04:14.808264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 137, partId: 0, tablet: 72075186233409555 TestModificationResult got TxId: 137, wait until txId: 137 >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTabletsThenForceDrop [GOOD] >> TStoragePoolsQuotasTest::DifferentQuotasInteraction-IsExternalSubdomain [GOOD] >> TCdcStreamTests::StreamOnIndexTable [GOOD] >> TCdcStreamTests::StreamOnBuildingIndexTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_external_blobs/unittest >> ExternalBlobsMultipleChannels::WithCompaction [GOOD] Test command err: 2025-05-07T09:04:05.478913Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:04:05.479110Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:04:05.479391Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003658/r3tmp/tmp0rid8r/pdisk_1.dat 2025-05-07T09:04:06.069570Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:04:06.119346Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:06.179868Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:04:06.179987Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:04:06.192059Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:04:06.286747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:04:06.633596Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:736:2617], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:06.633689Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:747:2622], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:06.633776Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:06.645023Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T09:04:06.822225Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:750:2625], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T09:04:06.898214Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:821:2665] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:04:07.552380Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715660. Ctx: { TraceId: 01jtmzstf88bnfjynrdvyacrs4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Mjc4ZTcyMTYtYjE4YWZjYmMtYWRiYzRiZmMtYzIwYTNhYWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:07.649551Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jtmzsvdt3deqppyvgvegdz7r, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjhhZWZmN2YtYWVmZTViNGQtNmU0N2IyNjktYTFjNjc1YzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:07.711686Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jtmzsvfha11x47mhpwjn3dv8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmM1YmY2OTctNjAzYmNkMGMtNDY3MTY5NTItMzMyMjNmN2E=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:07.773247Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715663. Ctx: { TraceId: 01jtmzsvhf6pqqaetdzaqybmmp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDE4MWMzZDctZDdlYjY4NGEtOTBjYzg3ZWMtM2FlMDA1OGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:07.834738Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715664. Ctx: { TraceId: 01jtmzsvkdcnh0t4ss65c4dpns, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjM1YTU0OTUtMzE0NTQ4NWQtOGI0YWU3Y2ItODFmYjFkYWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:07.895959Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715665. Ctx: { TraceId: 01jtmzsvnb34mgqjzg3x59mmwv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODZjN2ZhNDYtMjNhOWM2YTYtNjlhZWQyNzMtOWI5ZGY4M2E=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:07.959212Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715666. Ctx: { TraceId: 01jtmzsvq7fq3jasd3akvk737b, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDI4ZmRjZTAtZDg4MzBjYzEtY2E2OGYxOGEtODg2ZWU5YmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.011242Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715667. Ctx: { TraceId: 01jtmzsvs6b3ekv4zyjcpa1zys, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDIzZTk5ODEtM2YyY2I5ZTMtNmRkYjNjZjgtNjI1YWI3Yzk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.063239Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715668. Ctx: { TraceId: 01jtmzsvtt6y490sk76st08e0e, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmUwNGQyNS05MDM0ODQ1My1iZWNkM2RjZi1jZTg1OTU0, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.111054Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715669. Ctx: { TraceId: 01jtmzsvwd3f6z3zfps70ayqan, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MWRhYWNlNmMtYmM5YzdiMjMtOWZkYmFjYTMtZDgxZGUwYTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.173915Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715670. Ctx: { TraceId: 01jtmzsvxy8fh4tvknvag9whce, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2Y2MTdkNDAtNGMxMjdlYzEtMTRhYzJjYzktMmFjZWVhZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.237691Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715671. Ctx: { TraceId: 01jtmzsvzy48fvv81btcvydg1r, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmFiMWM2YWUtNTUxY2FiNmUtNDVhZWE4OGItODJkYzViOGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.303211Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715672. Ctx: { TraceId: 01jtmzsw1y5cdj8n95p0nqdf0t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTEwZmZiYi1iMTVkMjI0ZS1kMGIyNWEzLTk5NzM3MzNj, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.369219Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715673. Ctx: { TraceId: 01jtmzsw3zb5jbn9amh28hq6qs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTdmYTUxZWMtMTM4NTcyZS02OGJiMzc0Ny1lOTU2MDFiMg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.435056Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715674. Ctx: { TraceId: 01jtmzsw6166fxr76pzbw0gvvs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzUyOWEzY2EtOTYxOTU3ZGMtZmRlNGRmOTktZGRhOGZlZg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.491983Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715675. Ctx: { TraceId: 01jtmzsw848dga84adybfqkk0q, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjExMGE2NWQtOTRlYWExYTgtOTI3ODFkMTAtNGEyMjNkMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.549100Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715676. Ctx: { TraceId: 01jtmzsw9waakkqdz17ryxhkns, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTZmZDIyNmQtYzZkODFmMjQtMzI5YmVjNTMtODIwYzFlZDc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.609105Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715677. Ctx: { TraceId: 01jtmzswbm72gacyp2rydpve1j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTZlZDIyYTAtNzYzZWRhNTAtODRkNDcxNTctNTRkYjAyNTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.668044Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715678. Ctx: { TraceId: 01jtmzswdhdsdqp9bnvx979e04, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTA2ZTUzYzUtZjE5ODFjMDktMWU2MzNjODgtYWFkMjAxMg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.727161Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715679. Ctx: { TraceId: 01jtmzswfb9hnmg4gqtweeh8h8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWU4ZmVkZmUtODEwYjM0ZTMtNjI3OTNmOGUtYTA2MjllOTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.775941Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715680. Ctx: { TraceId: 01jtmzswh751nv7gr4cfbxmxqb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2RlMjZiODEtNGMzM2ZmY2UtNGQzOGYyZDItMWJhNTYxOQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.821991Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715681. Ctx: { TraceId: 01jtmzswjpfscxvyaj2bd35qfv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTFlZDQ2MGItOTU2ODVkMDItNzA5OWY4YjMtOGFhMWY1ZDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.872139Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715682. Ctx: { TraceId: 01jtmzswm58m2ad9why45s152v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZGZjYWJiZjQtNjg3MjdjOTgtYWY2MTdlYTEtZjUyNGZhYzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:08.922629Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715683. Ctx: ... olId: default}. Database not set, use /Root 2025-05-07T09:04:11.688087Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715728. Ctx: { TraceId: 01jtmzszbz2n1bcm53hat1a4r5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDFlZTcyODItYWFjOTRmOTctMTdjMmM0YS03M2ZjZjIwZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:11.746793Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715729. Ctx: { TraceId: 01jtmzszdrd4cyt0fedhkrpwhw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzliZjRkMDUtOWE3NWViODctZjBlYzRmOTktNDQ4ZmRlYzk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:11.805364Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715730. Ctx: { TraceId: 01jtmzszfk7bhn8hmqwsgtr790, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzQ5YWJmNWMtYjQ4NmVjMDItYTVlNTFmOC1iOWIzMDQyNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:11.874112Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715731. Ctx: { TraceId: 01jtmzszhf34r6x539sdcwpr5k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Njk0YzNjZDEtNDRiMWIyNjAtZmE2MTIzN2ItNzZkYTBlMTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:11.940487Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715732. Ctx: { TraceId: 01jtmzszkk7hqm3y45130jh709, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2ZmNDAwMzYtZTQ4NDVjNGYtNDVjZjc3ZDItMmQwMzhlMmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.052387Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715733. Ctx: { TraceId: 01jtmzsznn9n5kxxg0er161g7d, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzEzMGIxZmItYTI3Mzg4NDctZmNhOWQ5MTAtZTcxYTgwN2U=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.110551Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715734. Ctx: { TraceId: 01jtmzszs6ab1w3249qnzqgz4a, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWE4NzIwMzItYmMxMzIxNzgtZjY3NDZhYzEtOTYxYWFmMGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.180743Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715735. Ctx: { TraceId: 01jtmzszv0bk3n4adxr15ew6h8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzM0MzI3OGItZWM2ZTIwY2EtODgxZDY0MTYtYzI1ZGJjZDc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.248827Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715736. Ctx: { TraceId: 01jtmzszx6b06z5d9hn8f2qcr4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmI2ZDVkMmItYjVhOTE0ODctZDE5YWUxYjYtMWFlNmM1ZDM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.320605Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715737. Ctx: { TraceId: 01jtmzszzac4xd9r74ngbea6nj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmVmZmNmZjgtNzMyNTcwYmUtMjM5Mjk5Ni0yMDNmOTkyNg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.394862Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715738. Ctx: { TraceId: 01jtmzt01j0b9scvqz9tp4fddw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWRkN2I0OGEtOGNkNGMyNTgtOTQyY2FlNjMtN2NkYTc3NmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.465361Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715739. Ctx: { TraceId: 01jtmzt03x5jp3fcypazxbg5nm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGYyOGQ4MzEtYTQ3MzZjNTctN2MxMzlmMmMtMzkxZjg5YjE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.528915Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715740. Ctx: { TraceId: 01jtmzt0638wvk14bqmjyxw9yg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTZkOGIxZDItOTFjMzM2ZTEtOWMzNTczZjItN2JhMmNjNDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.599150Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715741. Ctx: { TraceId: 01jtmzt0827tr6tqjpcga44kaf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzcxMGY0ZWUtNmZiZDBjYjktMzliYTgzNDktM2JkN2I4NGY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.668105Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715742. Ctx: { TraceId: 01jtmzt0a80p111zwx02hyzefe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmQ1OWM0YmQtYWYwMTRlMWMtMjA3Y2Y5MjItZDE2ZmUwNDM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.741116Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715743. Ctx: { TraceId: 01jtmzt0cf5nzn84ps66tfpv4j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTE2N2I2MmUtYjVjZmRlYmMtY2YzMWIxNzYtMjhiYmE4NGY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.813490Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715744. Ctx: { TraceId: 01jtmzt0eq196q1hcgtdww1yq0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTFlMDFkY2UtZjdjMGE3OTEtNjQ4ZDljOGYtMzk5NzBkOQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.904505Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715745. Ctx: { TraceId: 01jtmzt0hnasv29yep772ep0bv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWIwYjU0ODYtYmEyNWI2NzktMWRiMmU4NGMtNGVkMTAzMzY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:12.972706Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715746. Ctx: { TraceId: 01jtmzt0kt16bzcz0w1x7nthyz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjEwNjIwYWUtMjZlZTM4YjctY2IwMjQxMC1kMDI4MDExMw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:13.041572Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715747. Ctx: { TraceId: 01jtmzt0ny9h13e037bww5y6hb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmVkYTdmMWUtZWIxYThmOTUtYzFhZTFkZmEtZTY2YzExYjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:13.109208Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715748. Ctx: { TraceId: 01jtmzt0r3d2n80b9jtkf3edx7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODhlODFlZjUtNzk3YzgwNTktYzJlMDY1N2YtZGI4MTg5M2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:13.178928Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715749. Ctx: { TraceId: 01jtmzt0t7a94ny34vdgzpap9p, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjkwZWEyZDItOTI0NDM2OC1lNTVkZTQzNi0zODA4MWFiOQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:13.252462Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715750. Ctx: { TraceId: 01jtmzt0wffktwepdvb0b27k3y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjVhNTQwZmUtNWU0ZTYxMzMtNTE0NTNjZjAtNjk5MmY3NGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:13.312890Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715751. Ctx: { TraceId: 01jtmzt0ynd2c24bwwn8wmz55a, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmFiNGUyZjQtMzdkZDU4MzAtOGNhNzJhLTM0MTRiM2Y0, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:13.378301Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715752. Ctx: { TraceId: 01jtmzt10k8g1y62gytr4rpd05, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZGZhOGQzZDktOWExZWUxZjYtZDkwYTcxYzItYTA4OGYyZTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:13.448790Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715753. Ctx: { TraceId: 01jtmzt12meh57c9b796j0qqwt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NGE5MjhjNWEtZDY5ZTAyNTktNDBmNjhmMzgtZGU4NjVjYQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:13.517702Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715754. Ctx: { TraceId: 01jtmzt14t22caqn7xx5qe171w, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmJjZTRiMjQtOWMyMTU4MjgtZjE0NGVhMTktYjMwMzNkOWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:13.583710Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715755. Ctx: { TraceId: 01jtmzt16z98w52z2970h9jkm4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzYzMTVlNGEtOWUyN2Y3NTUtNzJmZWY1ZTYtYjQ4M2YyODk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:13.664994Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715756. Ctx: { TraceId: 01jtmzt191fvynmfdd2vq05jve, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2YwMzFjZjUtMTEzMzYwMS1kMWYyMTc1OS1iOWI3NDc1OA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:13.727112Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715757. Ctx: { TraceId: 01jtmzt1bjctqk4xe72pfw9m88, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmU2MzliMGItOTRhYTlmZDYtOWQ3YTYxNjQtODMzNTEyZmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:13.801010Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715758. Ctx: { TraceId: 01jtmzt1dh008c8fpx68yqfs2c, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDk4MWIyYjktZWRlNDJkYjAtN2NhZjE3ZTItMTM1ZWQwYzc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:13.865364Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715759. Ctx: { TraceId: 01jtmzt1fv1s1w1y5cx4jbegyz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2YyMTQwZmYtNTU2MzQ4MS03NDNhNTBlYS02NTY5OTQ3MA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:13.885927Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 100:0, at schemeshard: 72057594046644480 2025-05-07T09:04:14.271858Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715760. Ctx: { TraceId: 01jtmzt1sp9mj67ryans88a9vk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODc5OGE2NmQtYmZlNzYxMmYtNDkwMTQ4NGYtNDUxMTYwMzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> TSchemeShardSubDomainTest::CreateItemsInsideSubdomainWithStoragePools >> DemoTx::Scenario_4 >> DataShardVolatile::DistributedWriteBrokenLock [GOOD] >> DataShardVolatile::DistributedWriteShardRestartBeforePlan+UseSink >> TSchemeShardSubDomainTest::SchemeLimitsRejectsWithIndexedTables [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTabletsThenForceDrop [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T09:04:15.286592Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:15.286687Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:15.286722Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:15.286758Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:15.286812Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:15.286857Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:15.286947Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:15.287037Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:15.287680Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:15.288057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:15.369726Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:15.369784Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:15.391678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:15.391777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:15.391948Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:15.403642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:15.404380Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:15.405063Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:15.405361Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:15.407835Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:15.409475Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:15.409537Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:15.409595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:15.409648Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:15.409791Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:15.410003Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:15.416700Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T09:04:15.532264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:15.532472Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:15.532665Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:15.532887Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:15.532954Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:15.535270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:15.535400Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:15.535610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:15.535693Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:15.535737Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:15.535781Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:15.538144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:15.538199Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:15.538248Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:15.540811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:15.540864Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:15.540905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:15.540965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:15.550419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:15.553091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:15.553283Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:15.554280Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:15.554418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:15.554467Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:15.554772Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:15.554827Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:15.555023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:15.555102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:15.557368Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:15.557414Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:15.557603Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:15.557692Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T09:04:15.631140Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:15.631161Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2208], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-05-07T09:04:15.631186Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2208], at schemeshard: 72057594046678944, txId: 101, path id: 2 FAKE_COORDINATOR: Erasing txId 101 2025-05-07T09:04:15.631374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T09:04:15.631401Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:416: [72057594046678944] TDeleteParts opId# 101:0 ProgressState 2025-05-07T09:04:15.631464Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T09:04:15.631502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T09:04:15.631543Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T09:04:15.631569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T09:04:15.631593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-05-07T09:04:15.631674Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T09:04:15.631699Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:0 2025-05-07T09:04:15.631725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 101:0 2025-05-07T09:04:15.631781Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T09:04:15.631809Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-05-07T09:04:15.631839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-05-07T09:04:15.631865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 18446744073709551615 2025-05-07T09:04:15.632259Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T09:04:15.632320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T09:04:15.632342Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-05-07T09:04:15.632373Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-05-07T09:04:15.632405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T09:04:15.632879Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T09:04:15.632928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T09:04:15.632983Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-05-07T09:04:15.633003Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-05-07T09:04:15.633022Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T09:04:15.633082Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-05-07T09:04:15.633402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T09:04:15.633442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T09:04:15.633513Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-05-07T09:04:15.633655Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T09:04:15.633700Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T09:04:15.633755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:15.635846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T09:04:15.637793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T09:04:15.637908Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-05-07T09:04:15.638022Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-05-07T09:04:15.638245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-05-07T09:04:15.638284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-05-07T09:04:15.638695Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-05-07T09:04:15.638776Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-05-07T09:04:15.638813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:343:2334] TestWaitNotification: OK eventTxId 101 2025-05-07T09:04:15.639300Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:15.639479Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 195us result status StatusPathDoesNotExist 2025-05-07T09:04:15.639686Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-05-07T09:04:15.640182Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:15.640331Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 143us result status StatusSuccess 2025-05-07T09:04:15.640760Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |92.9%| [TA] $(B)/ydb/core/tx/datashard/ut_external_blobs/test-results/unittest/{meta.json ... results_accumulator.log} |92.9%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_external_blobs/test-results/unittest/{meta.json ... results_accumulator.log} >> TSchemeShardSubDomainTest::SimultaneousCreateTableForceDrop [GOOD] >> TSchemeShardSubDomainTest::LS [GOOD] >> TSchemeShardSubDomainTest::DeleteAndRestart [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TStoragePoolsQuotasTest::QuoteNonexistentPool-IsExternalSubdomain-false [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:15.296129Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:15.296232Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:15.296280Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:15.296325Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:15.296396Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:15.296433Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:15.296505Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:15.296613Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:15.297525Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:15.297930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:15.393275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:15.393343Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:15.415373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:15.415590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:15.415777Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:15.422701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:15.423070Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:15.423816Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:15.424004Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:15.427998Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:15.429398Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:15.429482Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:15.429571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:15.429631Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:15.429764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:15.430067Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:15.437509Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:15.602820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:15.603097Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:15.603342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:15.603603Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:15.603671Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:15.611198Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:15.611395Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:15.611649Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:15.611731Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:15.611784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:15.611845Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:15.614278Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:15.614375Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:15.614428Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:15.616563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:15.616631Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:15.616696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:15.616766Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:15.620959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:15.624073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:15.624295Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:15.625337Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:15.625501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:15.625555Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:15.626569Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:15.626643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:15.626847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:15.626952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:15.629171Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:15.629305Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:15.629518Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:15.629565Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... ons { TxId: 101 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000002 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:15.681320Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 101:0, at tablet# 72057594046678944 2025-05-07T09:04:15.681572Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 101:0 128 -> 240 2025-05-07T09:04:15.681677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 101:0, at tablet# 72057594046678944 2025-05-07T09:04:15.681872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:15.681942Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T09:04:15.682019Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T09:04:15.686608Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:15.686663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:15.686831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T09:04:15.686985Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:15.687041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-05-07T09:04:15.687091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 101, path id: 2 FAKE_COORDINATOR: Erasing txId 101 2025-05-07T09:04:15.687491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-05-07T09:04:15.687541Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 101:0 ProgressState 2025-05-07T09:04:15.687660Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T09:04:15.687702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T09:04:15.687753Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-05-07T09:04:15.687791Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T09:04:15.687833Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-05-07T09:04:15.687885Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-05-07T09:04:15.687934Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:0 2025-05-07T09:04:15.687975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 101:0 2025-05-07T09:04:15.688075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T09:04:15.688129Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-05-07T09:04:15.688170Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-05-07T09:04:15.688220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-05-07T09:04:15.689025Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T09:04:15.689187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T09:04:15.689231Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-05-07T09:04:15.689283Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-05-07T09:04:15.689351Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T09:04:15.689957Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T09:04:15.690061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-05-07T09:04:15.690098Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-05-07T09:04:15.690131Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-05-07T09:04:15.690162Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T09:04:15.690253Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-05-07T09:04:15.694013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-05-07T09:04:15.694122Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestModificationResults wait txId: 102 2025-05-07T09:04:15.697950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterSubDomain SubDomain { PlanResolution: 50 Coordinators: 1 Mediators: 1 Name: "SomeDatabase" TimeCastBucketsPerMediator: 2 DatabaseQuotas { storage_quotas { unit_kind: "nonexistent_storage_kind" data_size_hard_quota: 1 } } } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:15.698179Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: /MyRoot/SomeDatabase, opId: 102:0, at schemeshard: 72057594046678944 2025-05-07T09:04:15.698399Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 102:1, propose status:StatusInvalidParameter, reason: Malformed subdomain request: cannot set storage quotas of the following kinds: nonexistent_storage_kind, because no storage pool in the subdomain /MyRoot/SomeDatabase has the specified kinds. Existing storage kinds are: pool-kind-1, pool-kind-2, at schemeshard: 72057594046678944 2025-05-07T09:04:15.700961Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 102, response: Status: StatusInvalidParameter Reason: "Malformed subdomain request: cannot set storage quotas of the following kinds: nonexistent_storage_kind, because no storage pool in the subdomain /MyRoot/SomeDatabase has the specified kinds. Existing storage kinds are: pool-kind-1, pool-kind-2" TxId: 102 SchemeshardId: 72057594046678944 PathId: 2, at schemeshard: 72057594046678944 2025-05-07T09:04:15.701138Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Malformed subdomain request: cannot set storage quotas of the following kinds: nonexistent_storage_kind, because no storage pool in the subdomain /MyRoot/SomeDatabase has the specified kinds. Existing storage kinds are: pool-kind-1, pool-kind-2, operation: ALTER DATABASE, path: /MyRoot/SomeDatabase TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 101 2025-05-07T09:04:15.701624Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-05-07T09:04:15.701680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 TestWaitNotification wait txId: 102 2025-05-07T09:04:15.701809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-05-07T09:04:15.701838Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-05-07T09:04:15.702322Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-05-07T09:04:15.702513Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-05-07T09:04:15.702567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:310:2301] 2025-05-07T09:04:15.702776Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-05-07T09:04:15.702863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T09:04:15.702912Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:310:2301] TestWaitNotification: OK eventTxId 101 TestWaitNotification: OK eventTxId 102 >> TSchemeShardSubDomainTest::TableDiskSpaceQuotas ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TStoragePoolsQuotasTest::DifferentQuotasInteraction-IsExternalSubdomain [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T09:04:12.159386Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:12.159470Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:12.159504Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:12.159531Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:12.159568Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:12.159599Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:12.159667Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:12.159751Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:12.160347Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:12.160637Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:12.228811Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:12.228871Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:12.248052Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:12.248192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:12.248363Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:12.257347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:12.257990Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:12.258631Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:12.259000Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:12.261442Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:12.263155Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:12.263229Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:12.263291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:12.263342Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:12.263448Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:12.263630Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.274431Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T09:04:12.441158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:12.441429Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.441669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:12.441915Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:12.442248Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.446995Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:12.447155Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:12.447363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.447452Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:12.447504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:12.447546Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:12.449656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.449721Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:12.449769Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:12.452027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.452081Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.452125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:12.452193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:12.456391Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:12.458861Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:12.459104Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:12.460088Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:12.460238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:12.460293Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:12.460645Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:12.460734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:12.460945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:12.461013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:12.463053Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:12.463102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:12.463279Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:12.463346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [ ... ason publish path for pathId [OwnerId: 72075186233409546, LocalPathId: 1] was 4 2025-05-07T09:04:15.737519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 3 2025-05-07T09:04:15.739458Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72075186233409546 2025-05-07T09:04:15.739601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72075186233409546 2025-05-07T09:04:15.741247Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186233409546 2025-05-07T09:04:15.741339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 104, path id: [OwnerId: 72075186233409546, LocalPathId: 1] 2025-05-07T09:04:15.741522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 104, path id: [OwnerId: 72075186233409546, LocalPathId: 2] 2025-05-07T09:04:15.741735Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186233409546 2025-05-07T09:04:15.741803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:449:2401], at schemeshard: 72075186233409546, txId: 104, path id: 1 2025-05-07T09:04:15.741856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:449:2401], at schemeshard: 72075186233409546, txId: 104, path id: 2 2025-05-07T09:04:15.742249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72075186233409546 2025-05-07T09:04:15.742312Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1036: NTableState::TProposedWaitParts operationId# 104:0 ProgressState at tablet: 72075186233409546 2025-05-07T09:04:15.742422Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 104:0, at schemeshard: 72075186233409546 2025-05-07T09:04:15.742471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 104:0, datashard: 72075186233409549, at schemeshard: 72075186233409546 2025-05-07T09:04:15.742513Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 104:0 129 -> 240 2025-05-07T09:04:15.743539Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72075186233409546, cookie: 104 2025-05-07T09:04:15.743676Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72075186233409546, cookie: 104 2025-05-07T09:04:15.743724Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 104 2025-05-07T09:04:15.743774Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 104, pathId: [OwnerId: 72075186233409546, LocalPathId: 1], version: 9 2025-05-07T09:04:15.743837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 1] was 5 2025-05-07T09:04:15.744676Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72075186233409546, cookie: 104 2025-05-07T09:04:15.744752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72075186233409546, cookie: 104 2025-05-07T09:04:15.744778Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 104 2025-05-07T09:04:15.744820Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 104, pathId: [OwnerId: 72075186233409546, LocalPathId: 2], version: 18446744073709551615 2025-05-07T09:04:15.744853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 4 2025-05-07T09:04:15.744956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 104, ready parts: 0/1, is published: true 2025-05-07T09:04:15.748289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72075186233409546 2025-05-07T09:04:15.748350Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 104:0 ProgressState, at schemeshard: 72075186233409546 2025-05-07T09:04:15.748729Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 3 2025-05-07T09:04:15.748902Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 1/1 2025-05-07T09:04:15.748931Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-05-07T09:04:15.748989Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 1/1 2025-05-07T09:04:15.749035Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-05-07T09:04:15.749072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: true 2025-05-07T09:04:15.749129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:555:2494] message: TxId: 104 2025-05-07T09:04:15.749162Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-05-07T09:04:15.749192Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 104:0 2025-05-07T09:04:15.749216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 104:0 2025-05-07T09:04:15.749299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 2 2025-05-07T09:04:15.750135Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186233409546 2025-05-07T09:04:15.750174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 0, path id: [OwnerId: 72075186233409546, LocalPathId: 1] 2025-05-07T09:04:15.751611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 104 2025-05-07T09:04:15.752149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 104 2025-05-07T09:04:15.753935Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186233409546 2025-05-07T09:04:15.754026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:449:2401], at schemeshard: 72075186233409546, txId: 0, path id: 1 2025-05-07T09:04:15.754143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-05-07T09:04:15.754189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:817:2735] 2025-05-07T09:04:15.782220Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 10 PathOwnerId: 72075186233409546, cookie: 0 TestWaitNotification: OK eventTxId 104 2025-05-07T09:04:15.783494Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SomeDatabase" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72075186233409546 2025-05-07T09:04:15.783707Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72075186233409546 describe path "/MyRoot/SomeDatabase" took 235us result status StatusSuccess 2025-05-07T09:04:15.784111Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SomeDatabase" PathDescription { Self { Name: "MyRoot/SomeDatabase" PathId: 1 SchemeshardId: 72075186233409546 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 10 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 10 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 2 SubDomainStateVersion: 2 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 2 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409548 SchemeShard: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "fast" Kind: "fast_kind" } StoragePools { Name: "large" Kind: "large_kind" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } StoragePoolsUsage { PoolKind: "large_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } StoragePoolsUsage { PoolKind: "fast_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 DatabaseQuotas { data_size_hard_quota: 2800 data_size_soft_quota: 2200 storage_quotas { unit_kind: "fast_kind" data_size_hard_quota: 600 data_size_soft_quota: 500 } storage_quotas { unit_kind: "large_kind" data_size_hard_quota: 2200 data_size_soft_quota: 1700 } } SecurityState { Audience: "/MyRoot/SomeDatabase" } } } PathId: 1 PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 >> TExportToS3Tests::ShouldExcludeBackupTableFromStats [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::LS [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:15.891332Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:15.891455Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:15.891511Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:15.891548Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:15.891606Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:15.891639Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:15.891692Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:15.891767Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:15.892516Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:15.892866Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:15.975101Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:15.975155Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:15.991790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:15.991984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:15.992179Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:15.998556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:15.998910Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:15.999621Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:15.999828Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:16.002854Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:16.004304Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:16.004372Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:16.004452Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:16.004497Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:16.004612Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:16.004887Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.012086Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:16.147010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:16.147265Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.147505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:16.147769Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:16.147845Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.150371Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:16.150522Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:16.150731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.150804Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:16.150845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:16.150897Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:16.152959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.153023Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:16.153068Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:16.154990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.155051Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.155104Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:16.155171Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:16.165892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:16.168201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:16.168426Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:16.169485Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:16.169635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:16.169681Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:16.170028Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:16.170090Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:16.170278Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:16.170357Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:16.172459Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:16.172527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:16.172754Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:16.172801Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-05-07T09:04:16.281363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 100 2025-05-07T09:04:16.283317Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:16.283374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:16.283505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T09:04:16.283608Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:16.283636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 100, path id: 1 2025-05-07T09:04:16.283669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 100, path id: 2 2025-05-07T09:04:16.283967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 100:0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.284009Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 100:0 ProgressState 2025-05-07T09:04:16.284109Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#100:0 progress is 1/1 2025-05-07T09:04:16.284145Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-05-07T09:04:16.284181Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#100:0 progress is 1/1 2025-05-07T09:04:16.284208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-05-07T09:04:16.284244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 100, ready parts: 1/1, is published: false 2025-05-07T09:04:16.284276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-05-07T09:04:16.284302Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 100:0 2025-05-07T09:04:16.284326Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 100:0 2025-05-07T09:04:16.284493Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-05-07T09:04:16.284552Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 100, publications: 2, subscribers: 0 2025-05-07T09:04:16.284582Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-05-07T09:04:16.284602Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-05-07T09:04:16.285346Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-05-07T09:04:16.285440Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-05-07T09:04:16.285500Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 100 2025-05-07T09:04:16.285546Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-05-07T09:04:16.285619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T09:04:16.286164Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-05-07T09:04:16.286227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-05-07T09:04:16.286252Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 100 2025-05-07T09:04:16.286275Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-05-07T09:04:16.286301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-05-07T09:04:16.286345Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 100, subscribers: 0 2025-05-07T09:04:16.289192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 2025-05-07T09:04:16.290223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 TestModificationResult got TxId: 100, wait until txId: 100 TestWaitNotification wait txId: 100 2025-05-07T09:04:16.290379Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-05-07T09:04:16.290409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 2025-05-07T09:04:16.290785Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-05-07T09:04:16.290858Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-05-07T09:04:16.290907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:454:2408] TestWaitNotification: OK eventTxId 100 2025-05-07T09:04:16.291302Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:16.291539Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 231us result status StatusSuccess 2025-05-07T09:04:16.291917Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 Mediators: 72075186233409548 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:16.292459Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:16.292631Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 176us result status StatusSuccess 2025-05-07T09:04:16.292991Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SchemeLimitsRejectsWithIndexedTables [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:15.146862Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:15.146967Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:15.147012Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:15.147057Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:15.147107Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:15.147131Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:15.147185Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:15.147247Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:15.147889Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:15.148260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:15.224404Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:15.224463Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:15.238266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:15.238447Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:15.238593Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:15.244746Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:15.245080Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:15.245749Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:15.245953Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:15.248982Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:15.250328Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:15.250391Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:15.250467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:15.250511Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:15.250596Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:15.250805Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:15.256647Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:15.380930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:15.381213Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:15.381496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:15.381825Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:15.381890Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:15.384495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:15.384624Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:15.384802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:15.384863Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:15.384902Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:15.384934Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:15.386674Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:15.386727Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:15.386763Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:15.388522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:15.388578Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:15.388627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:15.388690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:15.391662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:15.393596Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:15.393774Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:15.394622Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:15.394779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:15.394821Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:15.395103Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:15.395147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:15.395284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:15.395353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:15.396983Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:15.397028Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:15.397164Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:15.397195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... CHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 598 RawX2: 4294969832 } Origin: 72075186233409549 State: 2 TxId: 107 Step: 0 Generation: 2 2025-05-07T09:04:16.095016Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 107, tablet: 72075186233409549, partId: 2 2025-05-07T09:04:16.095116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 107:2, at schemeshard: 72057594046678944, message: Source { RawX1: 598 RawX2: 4294969832 } Origin: 72075186233409549 State: 2 TxId: 107 Step: 0 Generation: 2 2025-05-07T09:04:16.095149Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1005: NTableState::TProposedWaitParts operationId# 107:2 HandleReply TEvSchemaChanged at tablet: 72057594046678944 2025-05-07T09:04:16.095233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1009: NTableState::TProposedWaitParts operationId# 107:2 HandleReply TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 598 RawX2: 4294969832 } Origin: 72075186233409549 State: 2 TxId: 107 Step: 0 Generation: 2 2025-05-07T09:04:16.095278Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 107:2, shardIdx: 72057594046678944:4, datashard: 72075186233409549, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:16.095316Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 107:2, at schemeshard: 72057594046678944 2025-05-07T09:04:16.095351Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 107:2, datashard: 72075186233409549, at schemeshard: 72057594046678944 2025-05-07T09:04:16.095388Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 107:2 129 -> 240 2025-05-07T09:04:16.098750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 107 2025-05-07T09:04:16.098866Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 107 2025-05-07T09:04:16.101760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 107 2025-05-07T09:04:16.101891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 107 2025-05-07T09:04:16.101985Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 107:0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.102078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 107:2, at schemeshard: 72057594046678944 2025-05-07T09:04:16.102149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 107:0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.102466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 107:0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.102517Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 107:0 ProgressState 2025-05-07T09:04:16.102612Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#107:0 progress is 2/3 2025-05-07T09:04:16.102660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 107 ready parts: 2/3 2025-05-07T09:04:16.102692Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#107:0 progress is 2/3 2025-05-07T09:04:16.102721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 107 ready parts: 2/3 2025-05-07T09:04:16.102748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 107, ready parts: 2/3, is published: true 2025-05-07T09:04:16.103004Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 107:2, at schemeshard: 72057594046678944 2025-05-07T09:04:16.103184Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 107:2, at schemeshard: 72057594046678944 2025-05-07T09:04:16.103208Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 107:2 ProgressState 2025-05-07T09:04:16.103285Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#107:2 progress is 3/3 2025-05-07T09:04:16.103328Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 107 ready parts: 3/3 2025-05-07T09:04:16.103357Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#107:2 progress is 3/3 2025-05-07T09:04:16.103379Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 107 ready parts: 3/3 2025-05-07T09:04:16.103401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 107, ready parts: 3/3, is published: true 2025-05-07T09:04:16.103472Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:472:2421] message: TxId: 107 2025-05-07T09:04:16.103515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 107 ready parts: 3/3 2025-05-07T09:04:16.103557Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 107:0 2025-05-07T09:04:16.103590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 107:0 2025-05-07T09:04:16.103730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-05-07T09:04:16.103767Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 107:1 2025-05-07T09:04:16.103783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 107:1 2025-05-07T09:04:16.103810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 3 2025-05-07T09:04:16.103831Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 107:2 2025-05-07T09:04:16.103852Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 107:2 2025-05-07T09:04:16.103896Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 3 2025-05-07T09:04:16.105668Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 107: got EvNotifyTxCompletionResult 2025-05-07T09:04:16.105713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 107: satisfy waiter [1:528:2477] TestWaitNotification: OK eventTxId 107 TestModificationResults wait txId: 108 2025-05-07T09:04:16.109268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_0" OperationType: ESchemeOpCreateIndexedTable CreateIndexedTable { TableDescription { Name: "Table7" Columns { Name: "RowId" Type: "Uint64" } Columns { Name: "Value0" Type: "Utf8" } Columns { Name: "Value1" Type: "Utf8" } Columns { Name: "Value2" Type: "Utf8" } Columns { Name: "Value3" Type: "Utf8" } Columns { Name: "Value4" Type: "Utf8" } KeyColumnNames: "RowId" } IndexDescription { Name: "UserDefinedIndexByValue0" KeyColumnNames: "Value0" } IndexDescription { Name: "UserDefinedIndexByValue1" KeyColumnNames: "Value1" } IndexDescription { Name: "UserDefinedIndexByValue2" KeyColumnNames: "Value2" } IndexDescription { Name: "UserDefinedIndexByValue3" KeyColumnNames: "Value3" } IndexDescription { Name: "UserDefinedIndexByValue4" KeyColumnNames: "Value4" } } } TxId: 108 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:16.109676Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_indexed_table.cpp:101: TCreateTableIndex construct operation table path: /MyRoot/USER_0/Table7 domain path id: [OwnerId: 72057594046678944, LocalPathId: 2] domain path: /MyRoot/USER_0 shardsToCreate: 6 GetShardsInside: 4 MaxShards: 7 2025-05-07T09:04:16.109768Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 108:0, explain: indexes count has reached maximum value in the table, children limit for dir in domain: 4, intention to create new children: 5, at schemeshard: 72057594046678944 2025-05-07T09:04:16.109822Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 108:1, propose status:StatusResourceExhausted, reason: indexes count has reached maximum value in the table, children limit for dir in domain: 4, intention to create new children: 5, at schemeshard: 72057594046678944 2025-05-07T09:04:16.112022Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 108, response: Status: StatusResourceExhausted Reason: "indexes count has reached maximum value in the table, children limit for dir in domain: 4, intention to create new children: 5" TxId: 108 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:16.112191Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 108, database: /MyRoot/USER_0, subject: , status: StatusResourceExhausted, reason: indexes count has reached maximum value in the table, children limit for dir in domain: 4, intention to create new children: 5, operation: CREATE TABLE WITH INDEXES, path: /MyRoot/USER_0/Table7 TestModificationResult got TxId: 108, wait until txId: 108 TestWaitNotification wait txId: 108 2025-05-07T09:04:16.112547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 108: send EvNotifyTxCompletion 2025-05-07T09:04:16.112593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 108 2025-05-07T09:04:16.113015Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 108, at schemeshard: 72057594046678944 2025-05-07T09:04:16.113111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 108: got EvNotifyTxCompletionResult 2025-05-07T09:04:16.113148Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 108: satisfy waiter [1:722:2638] TestWaitNotification: OK eventTxId 108 >> TSchemeShardSubDomainTest::SimultaneousCreateTenantDirTable [GOOD] >> TExportToS3Tests::ShouldCheckQuotasExportsLimited >> TStoragePoolsQuotasTest::DifferentQuotasInteraction [GOOD] >> TSchemeShardSubDomainTest::ConsistentCopyRejects [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousCreateTableForceDrop [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T09:04:15.377036Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:15.377129Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:15.377180Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:15.377224Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:15.377278Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:15.377307Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:15.377365Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:15.377456Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:15.378410Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:15.378797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:15.451568Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:15.451641Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:15.467062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:15.467184Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:15.467339Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:15.496295Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:15.497543Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:15.498341Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:15.498681Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:15.502820Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:15.504648Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:15.504722Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:15.504785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:15.504857Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:15.504980Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:15.505239Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:15.519183Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T09:04:15.649121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:15.649305Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:15.649480Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:15.649661Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:15.649730Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:15.652602Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:15.652748Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:15.652927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:15.653013Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:15.653074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:15.653114Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:15.655343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:15.655433Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:15.655493Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:15.657444Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:15.657514Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:15.657576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:15.657656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:15.667062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:15.671484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:15.671720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:15.672739Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:15.672894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:15.672950Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:15.673275Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:15.673338Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:15.673527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:15.673613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:15.676056Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:15.676121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:15.676345Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:15.676407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... at schemeshard: 72057594046678944, cookie: 102 Forgetting tablet 72075186233409551 Forgetting tablet 72075186233409547 2025-05-07T09:04:16.120156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-05-07T09:04:16.120364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 Forgetting tablet 72075186233409549 2025-05-07T09:04:16.122329Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-05-07T09:04:16.122514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T09:04:16.123248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T09:04:16.123312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T09:04:16.123464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-05-07T09:04:16.125833Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:5 2025-05-07T09:04:16.125899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:5 tabletId 72075186233409550 2025-05-07T09:04:16.126042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:7 2025-05-07T09:04:16.126069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:7 tabletId 72075186233409552 2025-05-07T09:04:16.126178Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5699: Failed to connect, to tablet: 72075186233409552, at schemeshard: 72057594046678944 2025-05-07T09:04:16.126324Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 1 candidates, at schemeshard: 72057594046678944 2025-05-07T09:04:16.127045Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T09:04:16.127097Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T09:04:16.127173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:16.129229Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-05-07T09:04:16.129269Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-05-07T09:04:16.129344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-05-07T09:04:16.129366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-05-07T09:04:16.129443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:6 2025-05-07T09:04:16.129480Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:6 tabletId 72075186233409551 2025-05-07T09:04:16.129562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-05-07T09:04:16.129587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-05-07T09:04:16.131445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-05-07T09:04:16.131503Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186233409549 2025-05-07T09:04:16.131748Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-05-07T09:04:16.133259Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 101 2025-05-07T09:04:16.133522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-05-07T09:04:16.133585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 TestWaitNotification wait txId: 102 2025-05-07T09:04:16.133680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-05-07T09:04:16.133700Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-05-07T09:04:16.134186Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-05-07T09:04:16.134401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-05-07T09:04:16.134488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-05-07T09:04:16.134525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:720:2608] 2025-05-07T09:04:16.134670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T09:04:16.134691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:720:2608] TestWaitNotification: OK eventTxId 101 TestWaitNotification: OK eventTxId 102 2025-05-07T09:04:16.135263Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:16.135498Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 290us result status StatusPathDoesNotExist 2025-05-07T09:04:16.135711Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-05-07T09:04:16.136178Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/table_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:16.136346Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/table_0" took 175us result status StatusPathDoesNotExist 2025-05-07T09:04:16.136502Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0/table_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0/table_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-05-07T09:04:16.136925Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:16.137090Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 170us result status StatusSuccess 2025-05-07T09:04:16.137469Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::DeleteAndRestart [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:15.502589Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:15.502674Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:15.502712Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:15.502750Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:15.502822Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:15.502855Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:15.502929Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:15.503010Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:15.503796Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:15.504152Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:15.591567Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:15.591626Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:15.615658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:15.615836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:15.616008Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:15.629827Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:15.630163Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:15.630797Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:15.630983Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:15.637561Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:15.639128Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:15.639189Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:15.639293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:15.639348Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:15.639460Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:15.639703Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:15.651062Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:15.768548Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:15.768804Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:15.769034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:15.769274Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:15.769342Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:15.777187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:15.777369Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:15.777563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:15.777622Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:15.777658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:15.777702Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:15.779618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:15.779689Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:15.779763Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:15.781677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:15.781739Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:15.781789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:15.781872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:15.785501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:15.787553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:15.787760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:15.788747Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:15.788862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:15.788898Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:15.789168Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:15.789219Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:15.789355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:15.789439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:15.792029Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:15.792082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:15.792233Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:15.792275Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... ground_cleaning.cpp:445: Clear TempDirsState with owners number: 0 Leader for TabletID 72057594046678944 is [1:562:2493] sender: [1:626:2058] recipient: [1:102:2137] Leader for TabletID 72057594046678944 is [1:562:2493] sender: [1:629:2058] recipient: [1:15:2062] Leader for TabletID 72057594046678944 is [1:562:2493] sender: [1:630:2058] recipient: [1:628:2544] Leader for TabletID 72057594046678944 is [1:631:2545] sender: [1:632:2058] recipient: [1:628:2544] 2025-05-07T09:04:16.188247Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:16.188332Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:16.188367Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:16.188404Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:16.188440Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:16.188466Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:16.188529Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:16.188610Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:16.189311Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:16.189634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:16.201701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:16.202934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:16.203065Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:16.203179Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:16.203217Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:16.203650Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:16.204283Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit for Paths, read records: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:16.204364Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1457: TTxInit for UserAttributes, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.204426Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1483: TTxInit for UserAttributesAlterData, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.204727Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1785: TTxInit for Tables, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.204805Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:450: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-05-07T09:04:16.204966Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2011: TTxInit for Columns, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.205038Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2071: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.205123Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2129: TTxInit for Shards, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.205201Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2215: TTxInit for TablePartitions, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.205286Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2281: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.205424Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2431: TTxInit for ChannelsBinding, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.205690Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2810: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.205792Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2889: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.206210Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3387: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.206282Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3423: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.206506Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3637: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.206623Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3782: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.206757Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3799: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.206942Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3959: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.207044Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3975: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.207182Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4260: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.207479Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4558: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.207692Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4705: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.207748Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4732: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.207793Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4759: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.215236Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:16.215307Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:16.215386Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:16.215437Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:16.215484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:16.216709Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:631:2545] sender: [1:689:2058] recipient: [1:15:2062] 2025-05-07T09:04:16.260233Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:16.260506Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 281us result status StatusPathDoesNotExist 2025-05-07T09:04:16.260702Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-05-07T09:04:16.261281Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:16.261467Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 183us result status StatusSuccess 2025-05-07T09:04:16.261831Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TPersQueueTest::TopicServiceCommitOffsetBadOffsets [GOOD] >> TPersQueueTest::TopicServiceReadBudget >> TSchemeShardSubDomainTest::CreateItemsInsideSubdomainWithStoragePools [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousCreateTenantDirTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:16.001638Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:16.001728Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:16.001775Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:16.001809Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:16.001875Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:16.001910Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:16.001996Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:16.002075Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:16.002778Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:16.003158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:16.079566Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:16.079632Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:16.097468Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:16.097684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:16.097854Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:16.103929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:16.104252Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:16.104867Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:16.105030Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:16.107976Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:16.109486Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:16.109549Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:16.109643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:16.109689Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:16.109790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:16.110060Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.116839Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:16.258763Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:16.259018Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.259254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:16.259505Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:16.259561Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.261627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:16.261766Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:16.262003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.262068Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:16.262110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:16.262144Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:16.264015Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.264083Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:16.264124Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:16.265788Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.265836Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.265880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:16.265938Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:16.269586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:16.271393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:16.271577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:16.272549Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:16.272681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:16.272728Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:16.272992Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:16.273047Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:16.273214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:16.273303Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:16.275212Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:16.275266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:16.275441Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:16.275478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 7] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 Forgetting tablet 72075186233409548 2025-05-07T09:04:16.753798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T09:04:16.753862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-05-07T09:04:16.753949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-05-07T09:04:16.754032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-05-07T09:04:16.754073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-05-07T09:04:16.754844Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 TabletID: 72075186233409549 Forgetting tablet 72075186233409551 Forgetting tablet 72075186233409547 2025-05-07T09:04:16.756631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-05-07T09:04:16.756839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 Forgetting tablet 72075186233409549 2025-05-07T09:04:16.757169Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 6 ShardOwnerId: 72057594046678944 ShardLocalIdx: 6, at schemeshard: 72057594046678944 2025-05-07T09:04:16.757325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T09:04:16.757855Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-05-07T09:04:16.758039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-05-07T09:04:16.758671Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-05-07T09:04:16.758829Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T09:04:16.759349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-05-07T09:04:16.760237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T09:04:16.760295Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T09:04:16.760423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-05-07T09:04:16.762700Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:5 2025-05-07T09:04:16.762758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:5 tabletId 72075186233409550 2025-05-07T09:04:16.762852Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:7 2025-05-07T09:04:16.762914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:7 tabletId 72075186233409552 2025-05-07T09:04:16.763021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-05-07T09:04:16.763052Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-05-07T09:04:16.765795Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 2 paths, skipped 0, left 1 candidates, at schemeshard: 72057594046678944 2025-05-07T09:04:16.765939Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T09:04:16.766010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T09:04:16.766112Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:16.766301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-05-07T09:04:16.766337Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-05-07T09:04:16.766755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:6 2025-05-07T09:04:16.766791Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:6 tabletId 72075186233409551 2025-05-07T09:04:16.766849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-05-07T09:04:16.766891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-05-07T09:04:16.767730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-05-07T09:04:16.767800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186233409549 2025-05-07T09:04:16.768363Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-05-07T09:04:16.770126Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-05-07T09:04:16.770389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-05-07T09:04:16.770437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-05-07T09:04:16.770935Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-05-07T09:04:16.771049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-05-07T09:04:16.771090Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:792:2679] TestWaitNotification: OK eventTxId 103 2025-05-07T09:04:16.771661Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:16.771945Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 262us result status StatusPathDoesNotExist 2025-05-07T09:04:16.772126Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-05-07T09:04:16.772594Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:16.772782Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 197us result status StatusSuccess 2025-05-07T09:04:16.773162Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TStoragePoolsQuotasTest::DifferentQuotasInteraction [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:13.220253Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:13.220345Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:13.220392Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:13.220425Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:13.220517Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:13.220549Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:13.220607Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:13.220692Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:13.221416Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:13.221774Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:13.297962Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:13.298046Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:13.315726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:13.315942Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:13.316116Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:13.321960Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:13.322293Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:13.322977Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:13.323159Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:13.325930Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:13.327347Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:13.327414Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:13.327493Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:13.327546Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:13.327648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:13.327904Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:13.338232Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:13.469905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:13.470126Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:13.470356Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:13.470585Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:13.470649Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:13.472935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:13.473105Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:13.473314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:13.473395Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:13.473436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:13.473474Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:13.475287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:13.475347Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:13.475390Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:13.477144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:13.477198Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:13.477236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:13.477310Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:13.480019Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:13.481656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:13.481822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:13.482679Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:13.482815Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:13.482868Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:13.483142Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:13.483182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:13.483337Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:13.483402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:13.485034Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:13.485097Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:13.485225Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:13.485256Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [ ... 72057594046678944, LocalPathId: 2] was 4 2025-05-07T09:04:16.791514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-05-07T09:04:16.794398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.795883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.796118Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:16.796157Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T09:04:16.796354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-05-07T09:04:16.796512Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:16.796545Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 103, path id: 2 2025-05-07T09:04:16.796594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 103, path id: 3 2025-05-07T09:04:16.797009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.797060Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1036: NTableState::TProposedWaitParts operationId# 103:0 ProgressState at tablet: 72057594046678944 2025-05-07T09:04:16.797158Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 103:0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.797198Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 103:0, datashard: 72075186233409548, at schemeshard: 72057594046678944 2025-05-07T09:04:16.797243Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 103:0 129 -> 240 2025-05-07T09:04:16.798111Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 8 PathOwnerId: 72057594046678944, cookie: 103 2025-05-07T09:04:16.798225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 8 PathOwnerId: 72057594046678944, cookie: 103 2025-05-07T09:04:16.798262Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-05-07T09:04:16.798300Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 8 2025-05-07T09:04:16.798339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-05-07T09:04:16.799139Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-05-07T09:04:16.799219Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-05-07T09:04:16.799248Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-05-07T09:04:16.799306Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-05-07T09:04:16.799346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-05-07T09:04:16.799419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 103, ready parts: 0/1, is published: true 2025-05-07T09:04:16.802689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.802755Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 103:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:16.803184Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-05-07T09:04:16.803382Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-05-07T09:04:16.803421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-05-07T09:04:16.803467Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-05-07T09:04:16.803501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-05-07T09:04:16.803541Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: true 2025-05-07T09:04:16.803618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:406:2374] message: TxId: 103 2025-05-07T09:04:16.803724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-05-07T09:04:16.803768Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 103:0 2025-05-07T09:04:16.803799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 103:0 2025-05-07T09:04:16.803903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-05-07T09:04:16.804437Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:16.804477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 0, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T09:04:16.805260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-05-07T09:04:16.806786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-05-07T09:04:16.808404Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:16.808463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 0, path id: 2 2025-05-07T09:04:16.808546Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-05-07T09:04:16.808586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:727:2663] 2025-05-07T09:04:16.809469Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 9 PathOwnerId: 72057594046678944, cookie: 0 TestWaitNotification: OK eventTxId 103 2025-05-07T09:04:16.811110Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SomeDatabase" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:16.811346Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SomeDatabase" took 240us result status StatusSuccess 2025-05-07T09:04:16.811829Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SomeDatabase" PathDescription { Self { Name: "SomeDatabase" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 9 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 9 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SubDomainStateVersion: 2 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "fast" Kind: "fast_kind" } StoragePools { Name: "large" Kind: "large_kind" } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } StoragePoolsUsage { PoolKind: "large_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } StoragePoolsUsage { PoolKind: "fast_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 DatabaseQuotas { data_size_hard_quota: 2800 data_size_soft_quota: 2200 storage_quotas { unit_kind: "fast_kind" data_size_hard_quota: 600 data_size_soft_quota: 500 } storage_quotas { unit_kind: "large_kind" data_size_hard_quota: 2200 data_size_soft_quota: 1700 } } SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::ConsistentCopyRejects [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T09:04:14.748144Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:14.748223Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:14.748270Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:14.748313Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:14.748371Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:14.748396Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:14.748449Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:14.748522Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:14.749265Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:14.749653Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:14.834534Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:14.834598Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:14.848480Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:14.848578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:14.848741Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:14.857637Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:14.858195Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:14.858683Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:14.858941Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:14.861370Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:14.862737Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:14.862789Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:14.862844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:14.862875Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:14.862963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:14.863119Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:14.869601Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T09:04:14.968260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:14.968415Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:14.968574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:14.968791Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:14.968855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:14.970578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:14.970692Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:14.970841Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:14.970914Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:14.970955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:14.970980Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:14.972425Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:14.972485Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:14.972517Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:14.973733Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:14.973764Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:14.973812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:14.973859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:14.977427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:14.979103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:14.979252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:14.980200Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:14.980339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:14.980386Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:14.980645Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:14.980717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:14.980911Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:14.980993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:14.982986Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:14.983038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:14.983210Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:14.983247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 72057594046678944 2025-05-07T09:04:16.851790Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 106:0 129 -> 240 2025-05-07T09:04:16.853771Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 106:0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.853939Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 106:0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.854022Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_copy_table.cpp:306: TCopyTable TCopyTableBarrier operationId: 106:0ProgressState, operation type TxCopyTable 2025-05-07T09:04:16.854072Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1059: Set barrier, OperationId: 106:0, name: CopyTableBarrier, done: 0, blocked: 1, parts count: 1 2025-05-07T09:04:16.854111Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1103: All parts have reached barrier, tx: 106, done: 0, blocked: 1 2025-05-07T09:04:16.854213Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_copy_table.cpp:289: TCopyTable TCopyTableBarrier operationId: 106:0 HandleReply TEvPrivate::TEvCompleteBarrier, msg: NKikimr::NSchemeShard::TEvPrivate::TEvCompleteBarrier { TxId: 106 Name: CopyTableBarrier }, at tablet# 72057594046678944 2025-05-07T09:04:16.854261Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 106:0 240 -> 240 2025-05-07T09:04:16.856628Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 106:0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.856690Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 106:0 ProgressState 2025-05-07T09:04:16.856804Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#106:0 progress is 1/1 2025-05-07T09:04:16.856843Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-05-07T09:04:16.856886Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#106:0 progress is 1/1 2025-05-07T09:04:16.856927Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-05-07T09:04:16.856966Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 106, ready parts: 1/1, is published: true 2025-05-07T09:04:16.857043Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:635:2558] message: TxId: 106 2025-05-07T09:04:16.857107Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-05-07T09:04:16.857174Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 106:0 2025-05-07T09:04:16.857212Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 106:0 2025-05-07T09:04:16.857359Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 3 2025-05-07T09:04:16.857402Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-05-07T09:04:16.859429Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-05-07T09:04:16.859485Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [2:812:2711] TestWaitNotification: OK eventTxId 106 2025-05-07T09:04:16.860192Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:16.860431Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/table" took 273us result status StatusSuccess 2025-05-07T09:04:16.860782Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0/table" PathDescription { Self { Name: "table" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 150 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table" Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "RowId" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:16.861436Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/dst" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:16.861637Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/dst" took 214us result status StatusSuccess 2025-05-07T09:04:16.862020Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0/dst" PathDescription { Self { Name: "dst" PathId: 6 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 106 CreateStep: 200 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "dst" Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "RowId" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 6 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:16.862544Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:16.862679Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 161us result status StatusSuccess 2025-05-07T09:04:16.862960Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } } Children { Name: "dst" PathId: 6 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 106 CreateStep: 200 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "table" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 150 ParentPathId: 2 PathState: EPathStateCopying Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 2 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TCdcStreamTests::StreamOnBuildingIndexTable [GOOD] >> TCdcStreamWithInitialScanTests::InitialScanEnabled >> TSchemeShardSubDomainTest::Redefine >> TPersQueueCommonTest::Auth_WriteSessionWithValidTokenAndACEAndThenRemoveACEAndSendWriteRequest_SessionClosedWithUnauthorizedErrorAfterSuccessfullWriteResponse [GOOD] >> TPersQueueCommonTest::Auth_MultipleInflightWriteUpdateTokenRequestWithDifferentValidToken_SessionClosedWithOverloadedError ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateItemsInsideSubdomainWithStoragePools [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:16.470183Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:16.470248Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:16.470274Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:16.470300Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:16.470345Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:16.470374Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:16.470425Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:16.470488Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:16.471081Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:16.471391Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:16.541268Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:16.541318Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:16.557363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:16.557596Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:16.557774Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:16.563512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:16.563779Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:16.564370Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:16.564536Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:16.567223Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:16.568557Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:16.568626Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:16.568684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:16.568717Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:16.568810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:16.568986Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.574673Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:16.692250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:16.692480Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.692688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:16.692946Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:16.693003Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.698849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:16.699015Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:16.699212Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.699330Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:16.699372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:16.699407Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:16.701174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.701233Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:16.701277Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:16.702910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.702979Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:16.703021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:16.703098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:16.712298Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:16.714536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:16.714785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:16.715883Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:16.716029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:16.716089Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:16.716442Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:16.716503Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:16.716686Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:16.716781Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:16.719218Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:16.719300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:16.719483Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:16.719522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 2025-05-07T09:04:17.209361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-05-07T09:04:17.209396Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-05-07T09:04:17.209424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-05-07T09:04:17.209456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: true 2025-05-07T09:04:17.209537Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:485:2442] message: TxId: 103 2025-05-07T09:04:17.209587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-05-07T09:04:17.209623Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 103:0 2025-05-07T09:04:17.209662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 103:0 2025-05-07T09:04:17.209773Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 3 2025-05-07T09:04:17.211685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-05-07T09:04:17.211737Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:486:2443] TestWaitNotification: OK eventTxId 103 2025-05-07T09:04:17.212316Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:17.212560Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 262us result status StatusSuccess 2025-05-07T09:04:17.213156Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 8 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 8 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 6 SubDomainVersion: 1 SecurityStateVersion: 0 } } Children { Name: "dir_0" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 102 CreateStep: 150 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: true } Children { Name: "table_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 150 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "name_USER_0_kind_hdd-1" Kind: "hdd-1" } StoragePools { Name: "name_USER_0_kind_hdd-2" Kind: "hdd-2" } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:17.213818Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/table_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:17.214059Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/table_0" took 256us result status StatusSuccess 2025-05-07T09:04:17.214492Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0/table_0" PathDescription { Self { Name: "table_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 150 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table_0" Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "RowId" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:17.215109Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/dir_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:17.215318Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/dir_0" took 179us result status StatusSuccess 2025-05-07T09:04:17.215676Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0/dir_0" PathDescription { Self { Name: "dir_0" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 102 CreateStep: 150 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 } ChildrenExist: true } Children { Name: "table_1" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 103 CreateStep: 200 ParentPathId: 4 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:17.216203Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/dir_0/table_1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:17.216402Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/dir_0/table_1" took 223us result status StatusSuccess 2025-05-07T09:04:17.216798Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0/dir_0/table_1" PathDescription { Self { Name: "table_1" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 103 CreateStep: 200 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table_1" Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "RowId" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TExportToS3Tests::ShouldCheckQuotasExportsLimited [GOOD] >> TExportToS3Tests::ShouldCheckQuotasChildrenLimited |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::NonUniformClusterMirror3dc >> TGroupMapperTest::NonUniformClusterDifferentSlotsPerDisk [GOOD] >> TGroupMapperTest::ReassignGroupTest3dc >> TGroupMapperTest::MakeDisksNonoperational [GOOD] >> TGroupMapperTest::Mirror3dc3Nodes [GOOD] >> TMultiversionObjectMap::MonteCarlo >> TGroupMapperTest::NonUniformCluster >> TGroupMapperTest::Mirror3dc >> TGroupMapperTest::MonteCarlo >> TGroupMapperTest::InterlacedRacksWithoutInterlacedNodes [GOOD] >> TGroupMapperTest::MakeDisksForbidden [GOOD] >> TGroupMapperTest::NonUniformClusterMirror3dcWithUnusableDomain >> TGroupMapperTest::Block42_1disk |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TCdcStreamWithInitialScanTests::InitialScanEnabled [GOOD] >> TCdcStreamWithInitialScanTests::InitialScanDisabled |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::Mirror3dc3Nodes [GOOD] |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::NonUniformClusterDifferentSlotsPerDisk [GOOD] |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::MakeDisksForbidden [GOOD] >> TSchemeShardSubDomainTest::Redefine [GOOD] >> TBlobStorageControllerGrouperTest::TestGroupFromCandidatesEmpty [GOOD] >> TGroupMapperTest::MapperSequentialCalls |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::InterlacedRacksWithoutInterlacedNodes [GOOD] |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::MakeDisksNonoperational [GOOD] >> TGroupMapperTest::NonUniformClusterMirror3dcWithUnusableDomain [GOOD] >> TGroupMapperTest::Mirror3dc [GOOD] >> TGroupMapperTest::CheckNotToBreakFailModel [GOOD] >> TGroupMapperTest::NonUniformClusterMirror3dc [GOOD] |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TBlobStorageControllerGrouperTest::TestGroupFromCandidatesEmpty [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::Redefine [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T09:04:18.417605Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:18.417708Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:18.417751Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:18.417788Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:18.417837Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:18.417868Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:18.417939Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:18.418082Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:18.418878Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:18.419284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:18.510071Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:18.510147Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:18.526339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:18.526500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:18.526661Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:18.553134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:18.553777Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:18.554541Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:18.554850Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:18.557324Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:18.558899Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:18.558963Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:18.559022Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:18.559095Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:18.559217Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:18.559397Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:18.570513Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T09:04:18.708492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:18.708834Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:18.709051Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:18.709308Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:18.709383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:18.712007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:18.712189Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:18.712413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:18.712492Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:18.712537Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:18.712576Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:18.714796Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:18.714879Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:18.714930Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:18.718031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:18.718088Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:18.718138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:18.718206Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:18.722244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:18.724438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:18.724619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:18.725660Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:18.725828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:18.725889Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:18.726268Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:18.726342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:18.726530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:18.726609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:18.728956Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:18.729018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:18.729208Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:18.729280Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 9:04:18.999753Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 104 2025-05-07T09:04:18.999819Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-05-07T09:04:18.999872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T09:04:19.000656Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T09:04:19.000739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-05-07T09:04:19.000772Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-05-07T09:04:19.000811Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-05-07T09:04:19.000840Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-05-07T09:04:19.000917Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 0 2025-05-07T09:04:19.003906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-05-07T09:04:19.003967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:3 hive 72057594037968897 at ss 72057594046678944 2025-05-07T09:04:19.003994Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-05-07T09:04:19.005680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-05-07T09:04:19.006074Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 Forgetting tablet 72075186233409546 2025-05-07T09:04:19.007298Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:19.007638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T09:04:19.007947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-05-07T09:04:19.008181Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186233409548 2025-05-07T09:04:19.008418Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 2025-05-07T09:04:19.009373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-05-07T09:04:19.009611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 Forgetting tablet 72075186233409548 Forgetting tablet 72075186233409547 2025-05-07T09:04:19.010382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-05-07T09:04:19.010597Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-05-07T09:04:19.011713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T09:04:19.011786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T09:04:19.011922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-05-07T09:04:19.012215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T09:04:19.012272Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T09:04:19.012351Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:19.013879Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-05-07T09:04:19.013952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-05-07T09:04:19.017064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-05-07T09:04:19.017117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-05-07T09:04:19.017214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-05-07T09:04:19.017252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-05-07T09:04:19.017458Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-05-07T09:04:19.017581Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 104 2025-05-07T09:04:19.017880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2025-05-07T09:04:19.017927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2025-05-07T09:04:19.018480Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-05-07T09:04:19.018583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-05-07T09:04:19.018620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:578:2533] TestWaitNotification: OK eventTxId 104 2025-05-07T09:04:19.019288Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:19.019541Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 264us result status StatusPathDoesNotExist 2025-05-07T09:04:19.019730Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-05-07T09:04:19.020290Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:19.020478Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 186us result status StatusSuccess 2025-05-07T09:04:19.020817Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TExportToS3Tests::ShouldCheckQuotasChildrenLimited [GOOD] |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::NonUniformClusterMirror3dcWithUnusableDomain [GOOD] >> TCdcStreamWithInitialScanTests::InitialScanDisabled [GOOD] >> TCdcStreamWithInitialScanTests::InitialScanProgress |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::Mirror3dc [GOOD] |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::NonUniformClusterMirror3dc [GOOD] |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::CheckNotToBreakFailModel [GOOD] >> TSchemeShardSubDomainTest::DiskSpaceUsage [GOOD] >> TBlobStorageControllerGrouperTest::when_one_server_per_rack_in_4_racks_then_can_construct_group_with_4_domains [GOOD] >> TPersQueueTest::DirectReadBadCases [GOOD] >> TPersQueueTest::DirectReadStop >> TGroupMapperTest::NonUniformCluster2 >> TBlobStorageControllerGrouperTest::TestGroupFromCandidatesTrivial [GOOD] >> TGroupMapperTest::SanitizeGroupTest3dc |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TPersQueueTest::Cache [GOOD] >> TPersQueueTest::CacheHead |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TBlobStorageControllerGrouperTest::when_one_server_per_rack_in_4_racks_then_can_construct_group_with_4_domains [GOOD] |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::DiskSpaceUsage [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:07.949192Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:07.949257Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:07.949302Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:07.949327Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:07.949367Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:07.949393Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:07.949443Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:07.949509Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:07.950157Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:07.950438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:08.028282Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:08.028329Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:08.043490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:08.043660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:08.043823Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:08.049336Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:08.049611Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:08.050234Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:08.050416Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:08.052933Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:08.054196Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:08.054259Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:08.054315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:08.054346Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:08.054428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:08.054624Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:08.059994Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:08.163929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:08.164097Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:08.164259Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:08.164423Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:08.164472Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:08.166144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:08.166252Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:08.166405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:08.166445Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:08.166469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:08.166491Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:08.167955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:08.168003Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:08.168038Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:08.169387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:08.169427Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:08.169460Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:08.169504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:08.172189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:08.173528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:08.173703Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:08.174596Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:08.174706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:08.174737Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:08.174961Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:08.175000Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:08.175121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:08.175168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:08.176946Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:08.176987Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:08.177124Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:08.177155Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [ ... ghtLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:19.714597Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:19.714647Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:19.714704Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:19.714758Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:19.714866Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:19.714932Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:19.715021Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:19.715824Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:19.716148Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:19.730447Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:19.731930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:19.732101Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:19.732217Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:19.732244Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:19.732380Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:19.733113Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit for Paths, read records: 3, at schemeshard: 72057594046678944 2025-05-07T09:04:19.733193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:319: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 1], parent name: MyRoot, child name: Table1, child id: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-05-07T09:04:19.733232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:319: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 1], parent name: MyRoot, child name: Table2, child id: [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-05-07T09:04:19.733282Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1457: TTxInit for UserAttributes, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:19.733361Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1483: TTxInit for UserAttributesAlterData, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:19.733686Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1785: TTxInit for Tables, read records: 2, at schemeshard: 72057594046678944 2025-05-07T09:04:19.733796Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 0 2025-05-07T09:04:19.733843Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 0 2025-05-07T09:04:19.733914Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:450: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-05-07T09:04:19.734162Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2011: TTxInit for Columns, read records: 4, at schemeshard: 72057594046678944 2025-05-07T09:04:19.734288Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2071: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:19.734382Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2129: TTxInit for Shards, read records: 3, at schemeshard: 72057594046678944 2025-05-07T09:04:19.734412Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-05-07T09:04:19.734448Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-05-07T09:04:19.734476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-05-07T09:04:19.734585Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2215: TTxInit for TablePartitions, read records: 3, at schemeshard: 72057594046678944 2025-05-07T09:04:19.734759Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2281: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:19.735035Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2431: TTxInit for ChannelsBinding, read records: 9, at schemeshard: 72057594046678944 2025-05-07T09:04:19.735296Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2810: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:19.735399Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2889: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:19.735777Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3387: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:19.735844Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3423: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:19.736044Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3637: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:19.736112Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3782: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:19.736199Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3799: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:19.736354Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3959: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:19.736432Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3975: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:19.736576Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4260: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:19.736745Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4558: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:19.736881Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4705: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:19.736925Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4732: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:19.736974Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4759: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-05-07T09:04:19.743907Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:19.743987Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:19.744912Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:19.744972Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:19.745041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:19.746318Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:754:2672] sender: [1:810:2058] recipient: [1:15:2062] 2025-05-07T09:04:19.778986Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:19.779232Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 254us result status StatusSuccess 2025-05-07T09:04:19.779740Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Table2" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 2 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 1752 DataSize: 1752 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TBlobStorageControllerGrouperTest::TestGroupFromCandidatesTrivial [GOOD] >> TGroupMapperTest::MakeDisksUnusable [GOOD] >> TBlobStorageControllerGrouperTest::when_one_server_per_rack_in_4_racks_then_can_construct_group_with_4_domains_and_one_small_node [GOOD] >> DataShardVolatile::DistributedWriteThenReadIterator [GOOD] >> DataShardVolatile::DistributedWriteThenReadIteratorStream |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::MakeDisksUnusable [GOOD] >> TCdcStreamWithInitialScanTests::InitialScanProgress [GOOD] >> TCdcStreamWithInitialScanTests::WithoutPqTransactions ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::ShouldCheckQuotasChildrenLimited [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:03:46.200018Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:03:46.200125Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:03:46.200164Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:03:46.200195Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:03:46.201397Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:03:46.201455Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:03:46.201522Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:03:46.201586Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:03:46.202268Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:03:46.205649Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:03:46.274713Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:03:46.274795Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:46.290548Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:03:46.290729Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:03:46.290891Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:03:46.298373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:03:46.298664Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:03:46.303796Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:46.303986Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:03:46.312023Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:46.321082Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:03:46.321148Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:46.321210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:03:46.321270Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:03:46.321328Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:03:46.322465Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.327811Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:03:46.431116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:03:46.431347Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.431573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:03:46.431848Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:03:46.431908Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.434105Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:46.434235Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:03:46.434415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.434480Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:03:46.434532Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:03:46.434568Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:03:46.436189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.436244Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:03:46.436279Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:03:46.437669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.437707Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.437756Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:46.437807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:03:46.440678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:03:46.442366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:03:46.442549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:03:46.443547Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:46.443673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:03:46.443730Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:46.443997Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:03:46.444051Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:46.444223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:03:46.444336Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:03:46.445869Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:03:46.445910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:03:46.446070Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:46.446122Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... T09:04:19.764732Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 281474976720762 2025-05-07T09:04:19.764769Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976720762, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 7 2025-05-07T09:04:19.764806Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-05-07T09:04:19.764891Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976720762, ready parts: 0/1, is published: true 2025-05-07T09:04:19.765928Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-05-07T09:04:19.766215Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976720762, at schemeshard: 72057594046678944 2025-05-07T09:04:19.766276Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976720762, ready parts: 0/1, is published: true 2025-05-07T09:04:19.766324Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976720762, at schemeshard: 72057594046678944 2025-05-07T09:04:19.767821Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 281474976720762:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:281474976720762 msg type: 269090816 2025-05-07T09:04:19.767972Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 281474976720762, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 281474976720762 at step: 5000007 FAKE_COORDINATOR: advance: minStep5000007 State->FrontStep: 5000006 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976720762 at step: 5000007 2025-05-07T09:04:19.768242Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976720762 2025-05-07T09:04:19.768504Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000007, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:19.768626Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976720762 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 17179871340 } } Step: 5000007 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:19.768683Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_rmdir.cpp:129: TRmDir HandleReply TEvOperationPlan, opId: 281474976720762:0, step: 5000007, at schemeshard: 72057594046678944 2025-05-07T09:04:19.768819Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_rmdir.cpp:180: RmDir is done, opId: 281474976720762:0, at schemeshard: 72057594046678944 2025-05-07T09:04:19.768886Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976720762:0 progress is 1/1 2025-05-07T09:04:19.768929Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976720762 ready parts: 1/1 2025-05-07T09:04:19.768982Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976720762:0 progress is 1/1 2025-05-07T09:04:19.769016Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976720762 ready parts: 1/1 2025-05-07T09:04:19.769096Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-05-07T09:04:19.769178Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-05-07T09:04:19.769224Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976720762, ready parts: 1/1, is published: false 2025-05-07T09:04:19.769294Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976720762 ready parts: 1/1 2025-05-07T09:04:19.769343Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976720762:0 2025-05-07T09:04:19.769379Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976720762:0 2025-05-07T09:04:19.769462Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-05-07T09:04:19.769508Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 281474976720762, publications: 2, subscribers: 1 2025-05-07T09:04:19.769550Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 281474976720762, [OwnerId: 72057594046678944, LocalPathId: 1], 11 2025-05-07T09:04:19.769586Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 281474976720762, [OwnerId: 72057594046678944, LocalPathId: 3], 18446744073709551615 2025-05-07T09:04:19.771484Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976720762 2025-05-07T09:04:19.773099Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:19.773141Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976720762, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:19.773272Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976720762, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-05-07T09:04:19.773351Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:19.773375Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:336:2312], at schemeshard: 72057594046678944, txId: 281474976720762, path id: 1 2025-05-07T09:04:19.773404Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:336:2312], at schemeshard: 72057594046678944, txId: 281474976720762, path id: 3 FAKE_COORDINATOR: Erasing txId 281474976720762 2025-05-07T09:04:19.774114Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 281474976720762 2025-05-07T09:04:19.774214Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 281474976720762 2025-05-07T09:04:19.774291Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 281474976720762 2025-05-07T09:04:19.774334Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976720762, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 11 2025-05-07T09:04:19.774381Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-05-07T09:04:19.774899Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976720762 2025-05-07T09:04:19.774956Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976720762 2025-05-07T09:04:19.774985Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 281474976720762 2025-05-07T09:04:19.775006Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976720762, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-05-07T09:04:19.775030Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-05-07T09:04:19.775110Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 281474976720762, subscribers: 1 2025-05-07T09:04:19.775155Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [4:287:2274] 2025-05-07T09:04:19.777961Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976720762 2025-05-07T09:04:19.778395Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976720762 2025-05-07T09:04:19.778472Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6706: Handle: TEvNotifyTxCompletionResult: txId# 281474976720762 2025-05-07T09:04:19.778517Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6708: Message: TxId: 281474976720762 2025-05-07T09:04:19.778552Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-05-07T09:04:19.778574Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:1239: TExport::TTxProgress: OnNotifyResult: txId# 281474976720762 2025-05-07T09:04:19.778600Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:1270: TExport::TTxProgress: OnNotifyResult: txId# 281474976720762, id# 102, itemIdx# 4294967295 2025-05-07T09:04:19.780172Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-05-07T09:04:19.780257Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T09:04:19.780296Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [4:707:2647] TestWaitNotification: OK eventTxId 102 |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TBlobStorageControllerGrouperTest::when_one_server_per_rack_in_4_racks_then_can_construct_group_with_4_domains_and_one_small_node [GOOD] >> TopicService::UnknownConsumer [GOOD] >> TBlobStorageControllerGrouperTest::TestGroupFromCandidatesHuge >> DataShardVolatile::DistributedWriteShardRestartBeforePlan+UseSink [GOOD] >> DataShardVolatile::DistributedWriteShardRestartBeforePlan-UseSink >> TBlobStorageControllerGrouperTest::TestGroupFromCandidatesHuge [GOOD] >> TPersQueueTest::SchemeOperationsTest [GOOD] >> TPersQueueTest::SchemeOperationFirstClassCitizen |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TBlobStorageControllerGrouperTest::TestGroupFromCandidatesHuge [GOOD] >> TopicService::UnknownTopic >> BackupRestore::RestoreViewReferenceTable [GOOD] >> BackupRestore::RestoreViewToDifferentDatabase >> TCdcStreamWithInitialScanTests::WithoutPqTransactions [GOOD] >> TCdcStreamWithInitialScanTests::WithPqTransactions >> TGroupMapperTest::NonUniformCluster2 [GOOD] |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::NonUniformCluster2 [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeReplication [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeView >> TStoragePoolsQuotasTest::DifferentQuotasInteraction-EnableSeparateQuotas [GOOD] >> GenericFederatedQuery::IcebergHadoopTokenSelectAll >> TStoragePoolsQuotasTest::DifferentQuotasInteraction-IsExternalSubdomain-EnableSeparateQuotas [GOOD] >> GenericFederatedQuery::ClickHouseManagedSelectAll >> GenericFederatedQuery::PostgreSQLOnPremSelectAll >> TCdcStreamWithInitialScanTests::WithPqTransactions [GOOD] >> TCdcStreamWithInitialScanTests::AlterStream |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_topic_reader/unittest |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_topic_reader/unittest |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_topic_reader/unittest |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_topic_reader/unittest |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_topic_reader/unittest |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_topic_reader/unittest >> TPersQueueTest::WriteEmptyData [GOOD] >> TPersQueueTest::WriteNonExistingPartition |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_topic_reader/unittest |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_topic_reader/unittest |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_topic_reader/unittest >> RemoteTopicReader::ReadTopic ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TStoragePoolsQuotasTest::DifferentQuotasInteraction-EnableSeparateQuotas [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:11.885466Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:11.885548Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:11.885588Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:11.885621Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:11.885681Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:11.885711Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:11.885761Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:11.885830Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:11.886620Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:11.886996Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:11.973017Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:11.973081Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:11.993592Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:11.993788Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:11.994266Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:12.000748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:12.001104Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:12.001764Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:12.001991Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:12.005149Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:12.006690Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:12.006755Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:12.006838Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:12.006889Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:12.007036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:12.007280Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.018656Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:12.185495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:12.185721Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.185954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:12.186202Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:12.186261Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.188438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:12.188589Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:12.188789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.188876Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:12.188910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:12.188940Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:12.190753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.190811Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:12.190930Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:12.192743Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.192797Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.192837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:12.192904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:12.196413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:12.198314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:12.198512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:12.199505Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:12.199627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:12.199677Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:12.199954Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:12.200010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:12.200194Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:12.200277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:12.202233Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:12.202291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:12.202456Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:12.202497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [ ... 94046678944, LocalPathId: 2] was 4 2025-05-07T09:04:24.308164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-05-07T09:04:24.310367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-05-07T09:04:24.311696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-05-07T09:04:24.311920Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:24.311957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T09:04:24.312172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-05-07T09:04:24.312349Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:24.312393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 103, path id: 2 2025-05-07T09:04:24.312439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 103, path id: 3 2025-05-07T09:04:24.312940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-05-07T09:04:24.313001Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1036: NTableState::TProposedWaitParts operationId# 103:0 ProgressState at tablet: 72057594046678944 2025-05-07T09:04:24.313089Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 103:0, at schemeshard: 72057594046678944 2025-05-07T09:04:24.313125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 103:0, datashard: 72075186233409548, at schemeshard: 72057594046678944 2025-05-07T09:04:24.313159Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 103:0 129 -> 240 2025-05-07T09:04:24.313995Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 10 PathOwnerId: 72057594046678944, cookie: 103 2025-05-07T09:04:24.314114Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 10 PathOwnerId: 72057594046678944, cookie: 103 2025-05-07T09:04:24.314155Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-05-07T09:04:24.314214Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 10 2025-05-07T09:04:24.314253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-05-07T09:04:24.315476Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-05-07T09:04:24.315563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-05-07T09:04:24.315590Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-05-07T09:04:24.315621Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-05-07T09:04:24.315650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-05-07T09:04:24.315717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 103, ready parts: 0/1, is published: true 2025-05-07T09:04:24.317685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-05-07T09:04:24.317732Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 103:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:24.318028Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-05-07T09:04:24.318180Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-05-07T09:04:24.318207Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-05-07T09:04:24.318254Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-05-07T09:04:24.318300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-05-07T09:04:24.318339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: true 2025-05-07T09:04:24.318392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:406:2374] message: TxId: 103 2025-05-07T09:04:24.318424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-05-07T09:04:24.318455Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 103:0 2025-05-07T09:04:24.318480Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 103:0 2025-05-07T09:04:24.318553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-05-07T09:04:24.319408Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:24.319437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 0, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T09:04:24.319962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-05-07T09:04:24.321261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-05-07T09:04:24.322336Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:24.322376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 0, path id: 2 2025-05-07T09:04:24.322627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-05-07T09:04:24.322659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:1335:3263] 2025-05-07T09:04:24.323171Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 11 PathOwnerId: 72057594046678944, cookie: 0 TestWaitNotification: OK eventTxId 103 2025-05-07T09:04:24.325990Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SomeDatabase" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:24.326159Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SomeDatabase" took 212us result status StatusSuccess 2025-05-07T09:04:24.326514Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SomeDatabase" PathDescription { Self { Name: "SomeDatabase" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 11 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 11 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SubDomainStateVersion: 4 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "fast" Kind: "fast_kind" } StoragePools { Name: "large" Kind: "large_kind" } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } StoragePoolsUsage { PoolKind: "large_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } StoragePoolsUsage { PoolKind: "fast_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 DatabaseQuotas { data_size_hard_quota: 2800 data_size_soft_quota: 2200 storage_quotas { unit_kind: "fast_kind" data_size_hard_quota: 600 data_size_soft_quota: 500 } storage_quotas { unit_kind: "large_kind" data_size_hard_quota: 2200 data_size_soft_quota: 1700 } } SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TStoragePoolsQuotasTest::DifferentQuotasInteraction-IsExternalSubdomain-EnableSeparateQuotas [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:11.829464Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:11.829567Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:11.829609Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:11.829649Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:11.829693Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:11.829728Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:11.829804Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:11.829902Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:11.830816Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:11.831239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:11.919340Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:11.919408Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:11.937308Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:11.937528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:11.937731Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:11.943933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:11.944287Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:11.945049Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:11.945260Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:11.948340Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:11.949887Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:11.949957Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:11.950075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:11.950131Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:11.950244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:11.950496Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:11.957213Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:12.117905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:12.118221Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.118478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:12.118761Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:12.118830Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.121398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:12.121569Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:12.121802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.121876Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:12.121920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:12.121959Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:12.124070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.124141Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:12.124204Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:12.126041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.126098Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.126147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:12.126224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:12.130554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:12.132698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:12.133002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:12.134208Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:12.134386Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:12.134457Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:12.134800Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:12.134868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:12.135139Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:12.135250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:12.137816Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:12.137868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:12.138093Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:12.138157Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [ ... publish path for pathId [OwnerId: 72075186233409546, LocalPathId: 1] was 4 2025-05-07T09:04:24.478249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 3 2025-05-07T09:04:24.479820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72075186233409546 2025-05-07T09:04:24.481772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72075186233409546 2025-05-07T09:04:24.483922Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186233409546 2025-05-07T09:04:24.483987Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 104, path id: [OwnerId: 72075186233409546, LocalPathId: 1] 2025-05-07T09:04:24.484225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 104, path id: [OwnerId: 72075186233409546, LocalPathId: 2] 2025-05-07T09:04:24.484426Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186233409546 2025-05-07T09:04:24.484475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:443:2395], at schemeshard: 72075186233409546, txId: 104, path id: 1 2025-05-07T09:04:24.484526Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:443:2395], at schemeshard: 72075186233409546, txId: 104, path id: 2 2025-05-07T09:04:24.484653Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72075186233409546 2025-05-07T09:04:24.484717Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1036: NTableState::TProposedWaitParts operationId# 104:0 ProgressState at tablet: 72075186233409546 2025-05-07T09:04:24.484827Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 104:0, at schemeshard: 72075186233409546 2025-05-07T09:04:24.484874Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 104:0, datashard: 72075186233409549, at schemeshard: 72075186233409546 2025-05-07T09:04:24.484937Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 104:0 129 -> 240 2025-05-07T09:04:24.486496Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72075186233409546, cookie: 104 2025-05-07T09:04:24.486617Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72075186233409546, cookie: 104 2025-05-07T09:04:24.486677Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 104 2025-05-07T09:04:24.486727Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 104, pathId: [OwnerId: 72075186233409546, LocalPathId: 1], version: 11 2025-05-07T09:04:24.486786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 1] was 5 2025-05-07T09:04:24.488387Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72075186233409546, cookie: 104 2025-05-07T09:04:24.488488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72075186233409546, cookie: 104 2025-05-07T09:04:24.488523Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 104 2025-05-07T09:04:24.488562Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 104, pathId: [OwnerId: 72075186233409546, LocalPathId: 2], version: 18446744073709551615 2025-05-07T09:04:24.488600Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 4 2025-05-07T09:04:24.488693Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 104, ready parts: 0/1, is published: true 2025-05-07T09:04:24.492351Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72075186233409546 2025-05-07T09:04:24.492432Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 104:0 ProgressState, at schemeshard: 72075186233409546 2025-05-07T09:04:24.492873Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 3 2025-05-07T09:04:24.493101Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 1/1 2025-05-07T09:04:24.493150Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-05-07T09:04:24.493198Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 1/1 2025-05-07T09:04:24.493242Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-05-07T09:04:24.493288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: true 2025-05-07T09:04:24.493368Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:552:2491] message: TxId: 104 2025-05-07T09:04:24.493475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-05-07T09:04:24.493525Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 104:0 2025-05-07T09:04:24.493565Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 104:0 2025-05-07T09:04:24.493681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 2 2025-05-07T09:04:24.494503Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186233409546 2025-05-07T09:04:24.494552Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 0, path id: [OwnerId: 72075186233409546, LocalPathId: 1] 2025-05-07T09:04:24.494972Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 104 2025-05-07T09:04:24.497432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 104 2025-05-07T09:04:24.498954Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186233409546 2025-05-07T09:04:24.499017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:443:2395], at schemeshard: 72075186233409546, txId: 0, path id: 1 2025-05-07T09:04:24.499573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-05-07T09:04:24.499624Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:1422:3331] 2025-05-07T09:04:24.500259Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 12 PathOwnerId: 72075186233409546, cookie: 0 TestWaitNotification: OK eventTxId 104 2025-05-07T09:04:24.505289Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SomeDatabase" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72075186233409546 2025-05-07T09:04:24.505582Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72075186233409546 describe path "/MyRoot/SomeDatabase" took 344us result status StatusSuccess 2025-05-07T09:04:24.506134Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SomeDatabase" PathDescription { Self { Name: "MyRoot/SomeDatabase" PathId: 1 SchemeshardId: 72075186233409546 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 12 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 12 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 2 SubDomainStateVersion: 4 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 2 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409548 SchemeShard: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "fast" Kind: "fast_kind" } StoragePools { Name: "large" Kind: "large_kind" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } StoragePoolsUsage { PoolKind: "large_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } StoragePoolsUsage { PoolKind: "fast_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 DatabaseQuotas { data_size_hard_quota: 2800 data_size_soft_quota: 2200 storage_quotas { unit_kind: "fast_kind" data_size_hard_quota: 600 data_size_soft_quota: 500 } storage_quotas { unit_kind: "large_kind" data_size_hard_quota: 2200 data_size_soft_quota: 1700 } } SecurityState { Audience: "/MyRoot/SomeDatabase" } } } PathId: 1 PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 >> TGroupMapperTest::MapperSequentialCalls [GOOD] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Uint64-pk_types10-all_types10-index10-Uint64--] >> TCdcStreamWithInitialScanTests::AlterStream [GOOD] >> TCdcStreamWithInitialScanTests::DropStream |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::MapperSequentialCalls [GOOD] >> DataShardVolatile::DistributedWriteThenReadIteratorStream [GOOD] >> DataShardVolatile::DistributedWriteThenScanQuery >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Timestamp-pk_types12-all_types12-index12-Timestamp--] |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_2__SYNC-pk_types2-all_types2-index2---SYNC] >> TExportToS3Tests::ShouldRetryAtFinalStage [GOOD] >> TSchemeShardSubDomainTest::TopicDiskSpaceQuotas [GOOD] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_1__SYNC-pk_types3-all_types3-index3---SYNC] >> TCdcStreamWithInitialScanTests::DropStream [GOOD] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_all_types-pk_types7-all_types7-index7---] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Datetime-pk_types11-all_types11-index11-Datetime--] >> DataShardVolatile::DistributedWriteShardRestartBeforePlan-UseSink [GOOD] |93.0%| [TA] $(B)/ydb/tests/olap/oom/test-results/py3test/{meta.json ... results_accumulator.log} >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Date-pk_types13-all_types13-index13-Date--] >> DemoTx::Scenario_4 [GOOD] >> TPersQueueTest::StreamReadCreateAndDestroyMsgs [GOOD] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_1__ASYNC-pk_types5-all_types5-index5---ASYNC] >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v0-fifo] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_0__ASYNC-pk_types6-all_types6-index6---ASYNC] >> TGroupMapperTest::SanitizeGroupTest3dc [GOOD] >> TSchemeShardSubDomainTest::TableDiskSpaceQuotas [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeView [GOOD] >> TopicService::UnknownTopic [GOOD] >> TPersQueueTest::DirectReadStop [GOOD] >> TPersQueueTest::TopicServiceReadBudget [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_fifo_groups_with_dlq_in_cloud[tables_format_v1] >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v1-fifo] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[std-tables_format_v0] >> DataShardVolatile::DistributedWriteShardRestartAfterExpectation+UseSink >> TCdcStreamWithInitialScanTests::RacyAlterStreamAndRestart >> TPersQueueTest::StreamReadCommitAndStatusMsgs >> DemoTx::Scenario_5 |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_2__SYNC-pk_types2-all_types2-index2---SYNC] [GOOD] |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> TPersQueueTest::DirectReadCleanCache >> TPersQueueTest::TopicServiceSimpleHappyWrites >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeResourcePool [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeTransfer [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeSysView [GOOD] >> RemoteTopicReader::ReadTopic [GOOD] >> DataShardVolatile::DistributedWriteThenScanQuery [GOOD] >> TPersQueueCommonTest::Auth_MultipleInflightWriteUpdateTokenRequestWithDifferentValidToken_SessionClosedWithOverloadedError [GOOD] >> BackupRestore::RestoreViewToDifferentDatabase [GOOD] >> TopicService::UseDoubleSlashInTopicPath >> DataShardVolatile::DistributedWriteShardRestartAfterExpectation+UseSink [GOOD] >> DataShardVolatile::DistributedWriteShardRestartAfterExpectation-UseSink >> TPersQueueTest::SchemeOperationFirstClassCitizen [GOOD] >> YdbOlapStore::LogTsRangeDescending [GOOD] >> TCdcStreamWithInitialScanTests::RacyAlterStreamAndRestart [GOOD] >> BackupRestore::RestoreViewDependentOnAnotherView >> TPersQueueCommonTest::Auth_WriteUpdateTokenRequestWithInvalidToken_SessionClosedWithUnauthenticatedError >> TPersQueueTest::SchemeOperationsCheckPropValues >> DataShardVolatile::DistributedWriteWithAsyncIndex >> TCdcStreamWithInitialScanTests::MeteringServerless >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v0-fifo] >> GenericFederatedQuery::PostgreSQLOnPremSelectAll [GOOD] >> GenericFederatedQuery::IcebergHadoopTokenSelectAll [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queues_count_over_limit[tables_format_v0] >> GenericFederatedQuery::IcebergHadoopTokenSelectConstant >> GenericFederatedQuery::PostgreSQLOnPremSelectConstant >> TCdcStreamWithInitialScanTests::MeteringServerless [GOOD] >> TCdcStreamWithInitialScanTests::MeteringDedicated >> TPersQueueTest::WriteNonExistingPartition [GOOD] >> TPersQueueTest::WriteNonExistingTopic |93.0%| [TA] {RESULT} $(B)/ydb/tests/olap/oom/test-results/py3test/{meta.json ... results_accumulator.log} |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::SanitizeGroupTest3dc [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> ColumnShardTiers::TTLUsage Test command err: 2025-05-07T09:03:58.195052Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:03:58.195203Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:03:58.195443Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0022d3/r3tmp/tmpHt6Jag/pdisk_1.dat TServer::EnableGrpc on GrpcPort 21901, node 1 TClient is connected to server localhost:27214 2025-05-07T09:03:58.654888Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:03:58.690971Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:58.694743Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:03:58.694793Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:03:58.694823Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:03:58.695037Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T09:03:58.740009Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:58.740136Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:03:58.751384Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:03:58.857104Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnStore, opId: 281474976715657:0, at schemeshard: 72057594046644480 Status: 53 TxId: 281474976715657 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 2 2025-05-07T09:03:58.932079Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:399: StateInit, received event# 268828672, Sender [1:687:2579], Recipient [1:744:2625]: NKikimr::TEvTablet::TEvBoot 2025-05-07T09:03:58.933185Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:399: StateInit, received event# 268828673, Sender [1:687:2579], Recipient [1:744:2625]: NKikimr::TEvTablet::TEvRestored 2025-05-07T09:03:58.933449Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037888;self_id=[1:744:2625];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-05-07T09:03:58.949519Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037888;self_id=[1:744:2625];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-05-07T09:03:58.949792Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 72075186224037888 2025-05-07T09:03:58.956417Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:744:2625];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-05-07T09:03:58.956642Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:744:2625];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-05-07T09:03:58.956877Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:744:2625];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-05-07T09:03:58.956955Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:744:2625];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-05-07T09:03:58.957026Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:744:2625];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-05-07T09:03:58.957117Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:744:2625];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-05-07T09:03:58.957216Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:744:2625];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-05-07T09:03:58.957290Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:744:2625];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-05-07T09:03:58.957387Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:744:2625];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-05-07T09:03:58.957475Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:744:2625];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-05-07T09:03:58.957556Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:744:2625];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-05-07T09:03:58.957654Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:744:2625];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-05-07T09:03:58.972730Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:399: StateInit, received event# 268828684, Sender [1:687:2579], Recipient [1:744:2625]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-05-07T09:03:58.974004Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 72075186224037888 2025-05-07T09:03:58.974238Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:137;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=11;current_normalizer=CLASS_NAME=Granules; 2025-05-07T09:03:58.974307Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-05-07T09:03:58.974453Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-05-07T09:03:58.974600Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-05-07T09:03:58.974675Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-05-07T09:03:58.974732Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-05-07T09:03:58.974895Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-05-07T09:03:58.974973Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-05-07T09:03:58.975023Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-05-07T09:03:58.975055Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-05-07T09:03:58.975193Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-05-07T09:03:58.975248Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-05-07T09:03:58.975286Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-05-07T09:03:58.975307Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-05-07T09:03:58.975386Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-05-07T09:03:58.975428Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-05-07T09:03:58.975454Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanInsertionDedup;id=CleanInsertionDedup; 2025-05-07T09:03:58.975473Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=8;type=CleanInsertionDedup; 2025-05-07T09:03:58.975536Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanInsertionDedup;id=8; 2025-05-07T09:03:58.975578Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-05-07T09:03:58.975601Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-05-07T09:03:58.975636Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-05 ... 07T09:04:27.340260Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;self_id=[1:744:2625];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:54;memory_size=78;data_size=50;sum=2097;count=53; 2025-05-07T09:04:27.340304Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;self_id=[1:744:2625];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:75;memory_size=270;data_size=258;sum=6897;count=54;size_of_meta=144; 2025-05-07T09:04:27.340349Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;self_id=[1:744:2625];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_portion.cpp:40;memory_size=342;data_size=330;sum=8841;count=27;size_of_portion=216; 2025-05-07T09:04:27.340491Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxWriteIndex[28] (CS::GENERAL) apply at tablet 72075186224037888 2025-05-07T09:04:27.345519Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 72075186224037888 Save Batch GenStep: 1:18 Blob count: 2 2025-05-07T09:04:27.345691Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=2912376;raw_bytes=96858227;count=2;records=82491} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=13924104;raw_bytes=469237864;count=7;records=393915} inactive {blob_bytes=23454448;raw_bytes=787207821;count=16;records=666063} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 72075186224037888 TEvBlobStorage::TEvPut tId=72075186224037888;c=1;:74/0:size=2443;count=18;;1:size=62454;count=9;;2:size=0;count=0;;3:size=1466448;count=1;;4:size=1479208;count=1;;5:size=1458600;count=1;;6:size=1445448;count=1;;7:size=1445920;count=1;;8:size=808584;count=1;;9:size=2711224;count=4;;10:size=1025584;count=1;;11:size=1445744;count=1;;12:size=1445408;count=1;;13:size=1445360;count=1;;14:size=2573720;count=4;;15:size=989536;count=1;;16:size=1445928;count=1;;17:size=1445608;count=1;;18:size=1445400;count=1;;19:size=2194408;count=3;;20:size=1574288;count=2;;21:size=0;count=0;;22:size=0;count=0;;23:size=0;count=0;;24:size=0;count=0;;25:size=0;count=0;;26:size=0;count=0;;27:size=0;count=0;;28:size=0;count=0;;29:size=0;count=0;;30:size=0;count=0;;31:size=0;count=0;;32:size=0;count=0;;33:size=0;count=0;;34:size=0;count=0;;35:size=0;count=0;;36:size=0;count=0;;37:size=0;count=0;;38:size=0;count=0;;39:size=0;count=0;;40:size=0;count=0;;41:size=0;count=0;;42:size=0;count=0;;43:size=0;count=0;;44:size=0;count=0;;45:size=0;count=0;;46:size=0;count=0;;47:size=0;count=0;;48:size=0;count=0;;49:size=0;count=0;;50:size=0;count=0;;51:size=0;count=0;;52:size=0;count=0;;53:size=0;count=0;;54:size=0;count=0;;55:size=0;count=0;;56:size=0;count=0;;57:size=0;count=0;;58:size=0;count=0;;59:size=0;count=0;;60:size=0;count=0;;61:size=0;count=0;;62:size=0;count=0;;63:size=0;count=0;;64:size=0;count=0;;65:size=0;count=0;; TEvBlobStorage::TEvPut tId=72075186224037888;c=0;:74/0:size=2512;count=19;;1:size=62454;count=9;;2:size=0;count=0;;3:size=1466448;count=1;;4:size=1479208;count=1;;5:size=1458600;count=1;;6:size=1445448;count=1;;7:size=1445920;count=1;;8:size=808584;count=1;;9:size=2711224;count=4;;10:size=1025584;count=1;;11:size=1445744;count=1;;12:size=1445408;count=1;;13:size=1445360;count=1;;14:size=2573720;count=4;;15:size=989536;count=1;;16:size=1445928;count=1;;17:size=1445608;count=1;;18:size=1445400;count=1;;19:size=2194408;count=3;;20:size=1574288;count=2;;21:size=0;count=0;;22:size=0;count=0;;23:size=0;count=0;;24:size=0;count=0;;25:size=0;count=0;;26:size=0;count=0;;27:size=0;count=0;;28:size=0;count=0;;29:size=0;count=0;;30:size=0;count=0;;31:size=0;count=0;;32:size=0;count=0;;33:size=0;count=0;;34:size=0;count=0;;35:size=0;count=0;;36:size=0;count=0;;37:size=0;count=0;;38:size=0;count=0;;39:size=0;count=0;;40:size=0;count=0;;41:size=0;count=0;;42:size=0;count=0;;43:size=0;count=0;;44:size=0;count=0;;45:size=0;count=0;;46:size=0;count=0;;47:size=0;count=0;;48:size=0;count=0;;49:size=0;count=0;;50:size=0;count=0;;51:size=0;count=0;;52:size=0;count=0;;53:size=0;count=0;;54:size=0;count=0;;55:size=0;count=0;;56:size=0;count=0;;57:size=0;count=0;;58:size=0;count=0;;59:size=0;count=0;;60:size=0;count=0;;61:size=0;count=0;;62:size=0;count=0;;63:size=0;count=0;;64:size=0;count=0;;65:size=0;count=0;; 2025-05-07T09:04:27.358068Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;task_id=46642b54-2b2211f0-be0c3459-dfbceee6;fline=abstract.cpp:53;event=WriteIndexComplete;type=CS::GENERAL;success=1; 2025-05-07T09:04:27.358144Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;task_id=46642b54-2b2211f0-be0c3459-dfbceee6;fline=with_appended.cpp:65;portions=26,27,;task_id=46642b54-2b2211f0-be0c3459-dfbceee6; 2025-05-07T09:04:27.358498Z node 1 :TX_COLUMNSHARD TRACE: log.cpp:784: tablet_id=72075186224037888;task_id=46642b54-2b2211f0-be0c3459-dfbceee6;fline=granule.cpp:19;event=upsert_portion;portion=(portion_id:26;path_id:3;records_count:55488;schema_version:1;level:0;;column_size:1954792;index_size:0;meta:((produced=SPLIT_COMPACTED;)););path_id=3; 2025-05-07T09:04:27.358778Z node 1 :TX_COLUMNSHARD TRACE: log.cpp:784: tablet_id=72075186224037888;task_id=46642b54-2b2211f0-be0c3459-dfbceee6;fline=tiering.cpp:49;tiering_info=__DEFAULT/0.000000s;$$DELETE/266252.000000s;; 2025-05-07T09:04:27.358930Z node 1 :TX_COLUMNSHARD TRACE: log.cpp:784: tablet_id=72075186224037888;task_id=46642b54-2b2211f0-be0c3459-dfbceee6;fline=granule.cpp:19;event=upsert_portion;portion=(portion_id:27;path_id:3;records_count:55480;schema_version:1;level:0;;column_size:1954512;index_size:0;meta:((produced=SPLIT_COMPACTED;)););path_id=3; 2025-05-07T09:04:27.359142Z node 1 :TX_COLUMNSHARD TRACE: log.cpp:784: tablet_id=72075186224037888;task_id=46642b54-2b2211f0-be0c3459-dfbceee6;fline=tiering.cpp:49;tiering_info=__DEFAULT/0.000000s;$$DELETE/321732.000000s;; 2025-05-07T09:04:27.359223Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;task_id=46642b54-2b2211f0-be0c3459-dfbceee6;fline=manager.cpp:15;event=unlock;process_id=CS::GENERAL::46642b54-2b2211f0-be0c3459-dfbceee6; 2025-05-07T09:04:27.359297Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;task_id=46642b54-2b2211f0-be0c3459-dfbceee6;fline=granule.cpp:101;event=OnCompactionFinished;info=(granule:3;path_id:3;size:16841008;portions_count:27;); 2025-05-07T09:04:27.359350Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;task_id=46642b54-2b2211f0-be0c3459-dfbceee6;tablet_id=72075186224037888;fline=columnshard_impl.cpp:516;event=EnqueueBackgroundActivities;periodic=0; 2025-05-07T09:04:27.359416Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;task_id=46642b54-2b2211f0-be0c3459-dfbceee6;tablet_id=72075186224037888;fline=columnshard_impl.cpp:785;event=start_indexation_tasks;insert_overload_size=0; 2025-05-07T09:04:27.359477Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;task_id=46642b54-2b2211f0-be0c3459-dfbceee6;tablet_id=72075186224037888;fline=column_engine_logs.cpp:246;event=StartCleanup;portions_count=1; 2025-05-07T09:04:27.359547Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;task_id=46642b54-2b2211f0-be0c3459-dfbceee6;tablet_id=72075186224037888;fline=column_engine_logs.cpp:288;event=StartCleanupStop;snapshot=plan_step=0;tx_id=18446744073709551615;;current_snapshot_ts=21000; 2025-05-07T09:04:27.359595Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;task_id=46642b54-2b2211f0-be0c3459-dfbceee6;tablet_id=72075186224037888;fline=column_engine_logs.cpp:321;event=StartCleanup;portions_count=1;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-05-07T09:04:27.359648Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;task_id=46642b54-2b2211f0-be0c3459-dfbceee6;tablet_id=72075186224037888;fline=columnshard_impl.cpp:1061;background=cleanup;skip_reason=no_changes; 2025-05-07T09:04:27.359690Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;task_id=46642b54-2b2211f0-be0c3459-dfbceee6;tablet_id=72075186224037888;fline=columnshard_impl.cpp:1093;background=cleanup;skip_reason=no_changes; 2025-05-07T09:04:27.359762Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;task_id=46642b54-2b2211f0-be0c3459-dfbceee6;tablet_id=72075186224037888;queue=ttl;external_count=0;fline=granule.cpp:168;event=skip_actualization;waiting=0.599000s; 2025-05-07T09:04:27.359815Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;task_id=46642b54-2b2211f0-be0c3459-dfbceee6;tablet_id=72075186224037888;fline=columnshard_impl.cpp:1002;background=ttl;skip_reason=no_changes; 2025-05-07T09:04:27.360053Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 72075186224037888 Save Batch GenStep: 1:18 Blob count: 2 VERIFY failed (2025-05-07T09:04:27.360279Z): tablet_id=72075186224037888;task_id=46642b54-2b2211f0-be0c3459-dfbceee6;verification=CompactionsLimit.Dec() >= 0;fline=ro_controller.cpp:39; ydb/library/actors/core/log.cpp:800 ~TVerifyFormattedRecordWriter(): requirement false failed NPrivate::InternalPanicImpl(int, char const*, char const*, int, int, int, TBasicStringBuf>, char const*, unsigned long)+873 (0x190AF859) NPrivate::Panic(NPrivate::TStaticBuf const&, int, char const*, char const*, char const*, ...)+571 (0x1909DEEB) NActors::TVerifyFormattedRecordWriter::~TVerifyFormattedRecordWriter()+326 (0x1A3B5AE6) NKikimr::NYDBTest::NColumnShard::TReadOnlyController::DoOnWriteIndexComplete(NKikimr::NOlap::TColumnEngineChanges const&, NKikimr::NColumnShard::TColumnShard const&)+4577 (0x48D1D041) NKikimr::NColumnShard::TTxWriteIndex::Complete(NActors::TActorContext const&)+4797 (0x306D5DAD) NKikimr::NTabletFlatExecutor::TSeat::Complete(NActors::TActorContext const&, bool)+899 (0x1EE15643) NKikimr::NTabletFlatExecutor::TLogicRedo::Confirm(unsigned int, NActors::TActorContext const&, NActors::TActorId const&)+3856 (0x1ECF9C30) NKikimr::NTabletFlatExecutor::TExecutor::Handle(TAutoPtr, TDelete>&, NActors::TActorContext const&)+1521 (0x1EB46261) NKikimr::NTabletFlatExecutor::TExecutor::StateWork(TAutoPtr&)+3039 (0x1EAE234F) NActors::IActor::Receive(TAutoPtr&)+237 (0x1A2E6A4D) NActors::TTestActorRuntimeBase::SendInternal(TAutoPtr, unsigned int, bool)+3557 (0x35E258E5) NActors::TTestActorRuntimeBase::DispatchEventsInternal(NActors::TDispatchOptions const&, TInstant)+12602 (0x35E1E15A) NActors::TTestActorRuntimeBase::DispatchEvents(NActors::TDispatchOptions const&)+49 (0x35E1AF01) NKikimr::Tests::NCS::THelperSchemaless::SendDataViaActorSystem(TBasicString>, std::__y1::shared_ptr, Ydb::StatusIds_StatusCode const&) const+7904 (0x367ABF80) NKikimr::NTestSuiteColumnShardTiers::TTestCaseTTLUsage::Execute_(NUnitTest::TTestContext&)+4568 (0x18C948C8) std::__y1::__function::__func, void ()>::operator()()+280 (0x18CA6BB8) TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool)+534 (0x1955C726) NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool)+505 (0x1952C0A9) NKikimr::NTestSuiteColumnShardTiers::TCurrentTest::Execute()+1204 (0x18CA5B64) NUnitTest::TTestFactory::Execute()+2438 (0x1952D976) NUnitTest::RunMain(int, char**)+5213 (0x19556C9D) ??+0 (0x7F6746EA1D90) __libc_start_main+128 (0x7F6746EA1E40) _start+41 (0x1661D029) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::TopicDiskSpaceQuotas [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:11.729647Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:11.729760Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:11.729814Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:11.729852Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:11.729931Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:11.729992Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:11.730060Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:11.730138Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:11.731042Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:11.731438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:11.819609Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:11.819675Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:11.836950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:11.837128Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:11.837326Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:11.844875Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:11.845224Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:11.846091Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:11.846317Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:11.849562Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:11.851112Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:11.851176Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:11.851259Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:11.851319Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:11.851440Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:11.851710Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:11.858602Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:12.015656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:12.015935Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.016203Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:12.016495Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:12.016559Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.019251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:12.019425Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:12.019669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.019737Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:12.019822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:12.019862Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:12.022270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.022353Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:12.022420Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:12.025183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.025259Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:12.025313Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:12.025397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:12.029685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:12.032154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:12.032404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:12.033564Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:12.033719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:12.033773Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:12.034121Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:12.034183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:12.034386Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:12.034501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:12.036977Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:12.037044Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:12.037284Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:12.037340Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [ ... T09:04:27.707963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T09:04:27.708077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T09:04:27.708134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-05-07T09:04:27.708235Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:27.708268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 103, path id: 2 2025-05-07T09:04:27.708311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 103, path id: 2 2025-05-07T09:04:27.708328Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 103, path id: 3 2025-05-07T09:04:27.708373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-05-07T09:04:27.708406Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 103:0 ProgressState 2025-05-07T09:04:27.708498Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-05-07T09:04:27.708529Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-05-07T09:04:27.708567Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-05-07T09:04:27.708623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-05-07T09:04:27.708654Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: false 2025-05-07T09:04:27.708690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-05-07T09:04:27.708721Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 103:0 2025-05-07T09:04:27.708746Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 103:0 2025-05-07T09:04:27.708877Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-05-07T09:04:27.708926Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 103, publications: 2, subscribers: 0 2025-05-07T09:04:27.708957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 2], 9 2025-05-07T09:04:27.708983Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 3], 18446744073709551615 2025-05-07T09:04:27.710188Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 2025-05-07T09:04:27.710361Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-05-07T09:04:27.710464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-05-07T09:04:27.710504Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 103 2025-05-07T09:04:27.710553Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-05-07T09:04:27.710609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-05-07T09:04:27.711052Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 2025-05-07T09:04:27.711205Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-05-07T09:04:27.711807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-05-07T09:04:27.711862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-05-07T09:04:27.711936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-05-07T09:04:27.712438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5898: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-05-07T09:04:27.712675Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 9 PathOwnerId: 72057594046678944, cookie: 103 2025-05-07T09:04:27.712747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 9 PathOwnerId: 72057594046678944, cookie: 103 2025-05-07T09:04:27.712782Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 103 2025-05-07T09:04:27.712815Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 9 2025-05-07T09:04:27.712849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T09:04:27.712951Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 103, subscribers: 0 2025-05-07T09:04:27.716558Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-05-07T09:04:27.719026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-05-07T09:04:27.719150Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-05-07T09:04:27.719241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-05-07T09:04:27.719381Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-05-07T09:04:27.719744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-05-07T09:04:27.719790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-05-07T09:04:27.720437Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-05-07T09:04:27.720575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-05-07T09:04:27.720615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:768:2683] TestWaitNotification: OK eventTxId 103 2025-05-07T09:04:28.223870Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:28.224075Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_1" took 272us result status StatusSuccess 2025-05-07T09:04:28.224423Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_1" PathDescription { Self { Name: "USER_1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 9 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 9 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SubDomainStateVersion: 2 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "name_USER_0_kind_hdd-1" Kind: "hdd-1" } StoragePools { Name: "name_USER_0_kind_hdd-2" Kind: "hdd-2" } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 DatabaseQuotas { data_size_hard_quota: 1 } SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/backup_ut/unittest >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeSysView [GOOD] Test command err: 2025-05-07T09:03:34.380289Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626876185812993:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:34.380374Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0020ce/r3tmp/tmptTpVXC/pdisk_1.dat 2025-05-07T09:03:35.495155Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T09:03:35.675884Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:35.716086Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:35.726161Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:03:35.738054Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27015, node 1 2025-05-07T09:03:37.042023Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:03:37.042046Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:03:37.042057Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:03:37.042177Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26026 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:03:39.112933Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:03:39.316564Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626897660650573:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:39.316659Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:39.380602Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501626876185812993:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:39.380663Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:03:39.852000Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T09:03:40.009751Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626901955618060:2352], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:40.009846Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626901955618065:2355], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:40.009858Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:40.019300Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480 2025-05-07T09:03:40.035856Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501626901955618067:2356], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-05-07T09:03:40.124037Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501626901955618147:2818] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:03:40.483973Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710661. Ctx: { TraceId: 01jtmzs0f80msyf2b71jv67e7g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmYxMzEyYTEtZTI4NDYyODAtNDYwZTM5MjgtNjlkZmY5Zjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:03:40.846199Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710662. Ctx: { TraceId: 01jtmzs10828zz4sa97qazbmx9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmYxMzEyYTEtZTI4NDYyODAtNDYwZTM5MjgtNjlkZmY5Zjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Backup "/Root" to "/home/runner/.ya/build/build_root/zvgn/0020ce/r3tmp/tmpCUyJxq/"Create temporary directory "/Root/~backup_20250507T090340" in databaseProcess "/home/runner/.ya/build/build_root/zvgn/0020ce/r3tmp/tmpCUyJxq/table"Copy tables: { src: "/Root/table", dst: "/Root/~backup_20250507T090340/table" }Backup table "/Root/~backup_20250507T090340/table" to "/home/runner/.ya/build/build_root/zvgn/0020ce/r3tmp/tmpCUyJxq/table"Describe table "/Root/~backup_20250507T090340/table"Write scheme into "/home/runner/.ya/build/build_root/zvgn/0020ce/r3tmp/tmpCUyJxq/table/scheme.pb"Describe table "/Root/table"Write ACL into "/home/runner/.ya/build/build_root/zvgn/0020ce/r3tmp/tmpCUyJxq/table/permissions.pb"Read table "/Root/~backup_20250507T090340/table"Write data into "/home/runner/.ya/build/build_root/zvgn/0020ce/r3tmp/tmpCUyJxq/table/data_00.csv"Drop table "/Root/~backup_20250507T090340/table"Remove temporary directory "/Root/~backup_20250507T090340" in database2025-05-07T09:03:41.221192Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037889 not found 2025-05-07T09:03:41.236902Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 281474976710668:0, at schemeshard: 72057594046644480 Backup completed successfully2025-05-07T09:03:41.318852Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found Restore "/home/runner/.ya/build/build_root/zvgn/0020ce/r3tmp/tmpCUyJxq/" to "/Root"Resolved db base path: "/Root"Restore folder "/home/runner/.ya/build/build_root/zvgn/0020ce/r3tmp/tmpCUyJxq/" to "/Root"Process "/home/runner/.ya/build/build_root/zvgn/0020ce/r3tmp/tmpCUyJxq/table"Read scheme from "/home/runner/.ya/build/build_root/zvgn/0020ce/r3tmp/tmpCUyJxq/table/scheme.pb"Restore table "/home/runner/.ya/build/build_root/zvgn/0020ce/r3tmp/tmpCUyJxq/table" to "/Root/table"2025-05-07T09:03:41.368379Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480 Created "/Root/table"Read data from "/home/runner/.ya/build/build_root/zvgn/0020ce/r3tmp/tmpCUyJxq/table/data_00.csv"2025-05-07T09:03:41.523778Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710671. Ctx: { TraceId: 01jtmzs1vj6sh4brayjett8pnh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MThjNzZkZTUtM2RiYzU3YTQtODk3OWFhNTEtODIxMzRmYzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Restore ACL "/home/runner/.ya/build/build_root/zvgn/0020ce/r3tmp/tmpCUyJxq/table" to "/Root/table"Read ACL from "/home/runner/.ya/build/build_root/zvgn/0020ce/r3tmp/tmpCUyJxq/table/permissions.pb"2025-05-07T09:03:41.550805Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710672:0, at schemeshard: 72057594046644480 Restore completed successfully2025-05-07T09:03:41.650142Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710673. Ctx: { TraceId: 01jtmzs1zxakagyk4akr8k44p9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmYxMzEyYTEtZTI4NDYyODAtNDYwZTM5MjgtNjlkZmY5Zjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:03:42.830827Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501626914611081720:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:42.830911Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0020ce/r3tmp/tmpnJfjhm/pdisk_1.dat 2025-05-07T09:03:42.944219Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:42.981847Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:42.981952Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:03:42.985582Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TS ... Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710746:0, at schemeshard: 72057594046644480 2025-05-07T09:04:17.481229Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710751:0, at schemeshard: 72057594046644480 2025-05-07T09:04:18.142025Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710758:0, at schemeshard: 72057594046644480 2025-05-07T09:04:18.740565Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7261: Cannot get console configs 2025-05-07T09:04:18.740601Z node 16 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:21.618124Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateReplication, opId: 281474976710780:0, at schemeshard: 72057594046644480 Created "/Root/replication"Restore ACL "/home/runner/.ya/build/build_root/zvgn/0020ce/r3tmp/tmp7zRNh2/replication" to "/Root/replication"Read ACL from "/home/runner/.ya/build/build_root/zvgn/0020ce/r3tmp/tmp7zRNh2/replication/permissions.pb"2025-05-07T09:04:21.650585Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710781:0, at schemeshard: 72057594046644480 Process "/home/runner/.ya/build/build_root/zvgn/0020ce/r3tmp/tmp7zRNh2/replica"Read scheme from "/home/runner/.ya/build/build_root/zvgn/0020ce/r3tmp/tmp7zRNh2/replica/scheme.pb"Restore table "/home/runner/.ya/build/build_root/zvgn/0020ce/r3tmp/tmp7zRNh2/replica" to "/Root/replica"2025-05-07T09:04:21.661559Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710782:0, at schemeshard: 72057594046644480 Created "/Root/replica"Read data from "/home/runner/.ya/build/build_root/zvgn/0020ce/r3tmp/tmp7zRNh2/replica/data_00.csv"Restore ACL "/home/runner/.ya/build/build_root/zvgn/0020ce/r3tmp/tmp7zRNh2/replica" to "/Root/replica"Read ACL from "/home/runner/.ya/build/build_root/zvgn/0020ce/r3tmp/tmp7zRNh2/replica/permissions.pb"2025-05-07T09:04:21.727659Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710783:0, at schemeshard: 72057594046644480 Restore completed successfully2025-05-07T09:04:21.980972Z node 16 :CHANGE_EXCHANGE WARN: change_sender_cdc_stream.cpp:398: [CdcChangeSenderMain][72075186224037892:1][16:7501627078202291024:3359] Failed entry at 'ResolveTopic': entry# { Path: TableId: [72057594046644480:28:0] RequestType: ByTableId Operation: OpTopic RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo } 2025-05-07T09:04:21.997126Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976710788:0, at schemeshard: 72057594046644480 2025-05-07T09:04:22.041279Z node 16 :REPLICATION_CONTROLLER ERROR: dst_creator.cpp:594: [DstCreator][rid 1][tid 1] Error: status# StatusSchemeError, reason# Empty replication config 2025-05-07T09:04:22.041496Z node 16 :REPLICATION_CONTROLLER ERROR: tx_create_dst_result.cpp:70: [controller 72075186224037903][TxCreateDstResult] Create dst error: rid# 1, tid# 1, StatusSchemeError, Empty replication config 2025-05-07T09:04:24.714136Z node 19 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[19:7501627094304252413:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:04:24.714220Z node 19 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0020ce/r3tmp/tmpHbLw1m/pdisk_1.dat 2025-05-07T09:04:24.890099Z node 19 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:24.946443Z node 19 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(19, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:04:24.946578Z node 19 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(19, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:04:24.950039Z node 19 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(19, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16174, node 19 2025-05-07T09:04:25.027163Z node 19 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:04:25.027192Z node 19 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:04:25.027210Z node 19 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:04:25.027387Z node 19 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12218 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:04:25.403931Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:04:29.191286Z node 19 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [19:7501627115779089976:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:29.191383Z node 19 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [19:7501627115779089984:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:29.191464Z node 19 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:29.195731Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T09:04:29.236563Z node 19 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [19:7501627115779089990:2343], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T09:04:29.300334Z node 19 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [19:7501627115779090063:2686] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:04:29.714572Z node 19 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jtmztgqddm1cn9fh8kb8d6mt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=19&id=ZWUxMzY3MTMtMjliYWQyZmQtNTFhYzU4NDktNjIwMDE3MTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:04:29.730369Z node 19 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[19:7501627094304252413:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:04:29.732028Z node 19 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Backup "/Root" to "/home/runner/.ya/build/build_root/zvgn/0020ce/r3tmp/tmpThYvJT/"Create temporary directory "/Root/~backup_20250507T090429" in databaseProcess "/home/runner/.ya/build/build_root/zvgn/0020ce/r3tmp/tmpThYvJT/view"Backup view "/Root/view" to "/home/runner/.ya/build/build_root/zvgn/0020ce/r3tmp/tmpThYvJT/view"Write view into "/home/runner/.ya/build/build_root/zvgn/0020ce/r3tmp/tmpThYvJT/view/create_view.sql"Write ACL into "/home/runner/.ya/build/build_root/zvgn/0020ce/r3tmp/tmpThYvJT/view/permissions.pb"Remove temporary directory "/Root/~backup_20250507T090429" in database2025-05-07T09:04:29.869877Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 281474976715663:0, at schemeshard: 72057594046644480 Backup completed successfullyRestore "/home/runner/.ya/build/build_root/zvgn/0020ce/r3tmp/tmpThYvJT/" to "/Root"Resolved db base path: "/Root"Restore folder "/home/runner/.ya/build/build_root/zvgn/0020ce/r3tmp/tmpThYvJT/" to "/Root"Process "/home/runner/.ya/build/build_root/zvgn/0020ce/r3tmp/tmpThYvJT/view"Restore view "/home/runner/.ya/build/build_root/zvgn/0020ce/r3tmp/tmpThYvJT/view" to "/Root/view"Read view from "/home/runner/.ya/build/build_root/zvgn/0020ce/r3tmp/tmpThYvJT/view/create_view.sql"Created "/Root/view"Restore ACL "/home/runner/.ya/build/build_root/zvgn/0020ce/r3tmp/tmpThYvJT/view" to "/Root/view"Read ACL from "/home/runner/.ya/build/build_root/zvgn/0020ce/r3tmp/tmpThYvJT/view/permissions.pb"2025-05-07T09:04:30.115843Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715666:0, at schemeshard: 72057594046644480 Restore completed successfully2025-05-07T09:04:30.331291Z node 19 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715667. Ctx: { TraceId: 01jtmzthdk7pvwfgqz879kjpp8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=19&id=ZWUxMzY3MTMtMjliYWQyZmQtNTFhYzU4NDktNjIwMDE3MTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_topic_reader/unittest >> RemoteTopicReader::ReadTopic [GOOD] Test command err: 2025-05-07T09:04:25.679066Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501627098445482982:2061];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:04:25.679183Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00417e/r3tmp/tmpuOmEho/pdisk_1.dat 2025-05-07T09:04:26.147398Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:26.200000Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:04:26.202200Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:04:26.207036Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10778 TServer::EnableGrpc on GrpcPort 19110, node 1 2025-05-07T09:04:26.635082Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:04:26.635114Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:04:26.635122Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:04:26.635272Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10778 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:04:27.232048Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:04:27.509136Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:2, at schemeshard: 72057594046644480 2025-05-07T09:04:28.904536Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501627111330385779:2363], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:28.904655Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501627111330385795:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:28.904735Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:28.905211Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501627111330385794:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:28.910286Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710660:2, at schemeshard: 72057594046644480 2025-05-07T09:04:28.922950Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501627111330385801:2439] txid# 281474976710661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-05-07T09:04:28.923002Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501627111330385799:2371], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2025-05-07T09:04:28.923362Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 2025-05-07T09:04:28.923496Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501627111330385798:2370], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2025-05-07T09:04:28.977243Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501627111330385848:2470] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:04:28.995649Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501627111330385866:2478] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:04:30.551058Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:04:30.679470Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501627098445482982:2061];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:04:30.679551Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:04:31.030514Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480 2025-05-07T09:04:31.626959Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710676:0, at schemeshard: 72057594046644480 2025-05-07T09:04:32.097091Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710681:0, at schemeshard: 72057594046644480 2025-05-07T09:04:32.586360Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710686:0, at schemeshard: 72057594046644480 2025-05-07T09:04:33.621293Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:32: [RemoteTopicReader][/Root/topic][0][1:7501627132805223062:2778] Handshake: worker# [1:7501627107035418188:2290] 2025-05-07T09:04:33.624435Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:41: [RemoteTopicReader][/Root/topic][0][1:7501627132805223062:2778] Create read session: session# [1:7501627132805223063:2289] 2025-05-07T09:04:33.624878Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:48: [RemoteTopicReader][/Root/topic][0][1:7501627132805223062:2778] Handle NKikimr::NReplication::NService::TEvWorker::TEvPoll { SkipCommit: 0 } 2025-05-07T09:04:33.652548Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:79: [RemoteTopicReader][/Root/topic][0][1:7501627132805223062:2778] Handle NKikimr::NReplication::TEvYdbProxy::TEvStartTopicReadingSession { Result: { ReadSessionId: consumer_1_1_16257821085010785800_v1 } } 2025-05-07T09:04:33.663407Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:58: [RemoteTopicReader][/Root/topic][0][1:7501627132805223062:2778] Handle NKikimr::NReplication::TEvYdbProxy::TEvReadTopicResponse { Result: { PartitionId: 0 Messages [{ Codec: RAW Data: 9b Offset: 0 SeqNo: 1 CreateTime: 2025-05-07T09:04:33.511000Z MessageGroupId: producer ProducerId: producer }] } } 2025-05-07T09:04:33.668241Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:48: [RemoteTopicReader][/Root/topic][0][1:7501627132805223062:2778] Handle NKikimr::NReplication::NService::TEvWorker::TEvPoll { SkipCommit: 0 } 2025-05-07T09:04:33.804824Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:58: [RemoteTopicReader][/Root/topic][0][1:7501627132805223062:2778] Handle NKikimr::NReplication::TEvYdbProxy::TEvReadTopicResponse { Result: { PartitionId: 0 Messages [{ Codec: RAW Data: 9b Offset: 1 SeqNo: 2 CreateTime: 2025-05-07T09:04:33.769000Z MessageGroupId: producer ProducerId: producer }] } } 2025-05-07T09:04:33.878965Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:32: [RemoteTopicReader][/Root/topic][0][1:7501627132805223173:2812] Handshake: worker# [1:7501627107035418188:2290] 2025-05-07T09:04:33.886183Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:41: [RemoteTopicReader][/Root/topic][0][1:7501627132805223173:2812] Create read session: session# [1:7501627132805223174:2289] 2025-05-07T09:04:33.886636Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:48: [RemoteTopicReader][/Root/topic][0][1:7501627132805223173:2812] Handle NKikimr::NReplication::NService::TEvWorker::TEvPoll { SkipCommit: 0 } 2025-05-07T09:04:33.910415Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:79: [RemoteTopicReader][/Root/topic][0][1:7501627132805223173:2812] Handle NKikimr::NReplication::TEvYdbProxy::TEvStartTopicReadingSession { Result: { ReadSessionId: consumer_1_2_7225918593025331151_v1 } } 2025-05-07T09:04:33.913286Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:58: [RemoteTopicReader][/Root/topic][0][1:7501627132805223173:2812] Handle NKikimr::NReplication::TEvYdbProxy::TEvReadTopicResponse { Result: { PartitionId: 0 Messages [{ Codec: RAW Data: 9b Offset: 1 SeqNo: 2 CreateTime: 2025-05-07T09:04:33.769000Z MessageGroupId: producer ProducerId: producer }] } } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::TableDiskSpaceQuotas [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:17.065449Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:17.065529Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:17.065574Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:17.065605Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:17.065651Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:17.065676Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:17.065711Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:17.065775Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:17.066522Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:17.066921Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:17.132616Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:17.132667Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:17.147992Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:17.148198Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:17.148381Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:17.154393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:17.154635Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:17.155278Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:17.155447Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:17.157985Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:17.159183Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:17.159229Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:17.159282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:17.159315Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:17.159405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:17.159612Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:17.165218Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:17.277558Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:17.277738Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:17.277908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:17.278136Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:17.278178Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:17.280237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:17.280362Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:17.280536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:17.280583Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:17.280610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:17.280635Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:17.282410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:17.282464Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:17.282517Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:17.284119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:17.284164Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:17.284192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:17.284237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:17.287086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:17.288583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:17.288750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:17.289584Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:17.289704Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:17.289741Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:17.289956Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:17.290042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:17.290188Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:17.290244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:17.291874Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:17.291919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:17.292054Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:17.292104Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [ ... ntPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-05-07T09:04:30.274174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-05-07T09:04:30.277030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 107:0, at schemeshard: 72057594046678944 2025-05-07T09:04:30.278653Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 107:0, at schemeshard: 72057594046678944 2025-05-07T09:04:30.279021Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:30.279112Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 107, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T09:04:30.279310Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 107, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-05-07T09:04:30.279453Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:30.279499Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 107, path id: 2 2025-05-07T09:04:30.279742Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 107, path id: 4 2025-05-07T09:04:30.279837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 107:0, at schemeshard: 72057594046678944 2025-05-07T09:04:30.279883Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1036: NTableState::TProposedWaitParts operationId# 107:0 ProgressState at tablet: 72057594046678944 2025-05-07T09:04:30.279977Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 107:0, at schemeshard: 72057594046678944 2025-05-07T09:04:30.280022Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 107:0, datashard: 72075186233409549, at schemeshard: 72057594046678944 2025-05-07T09:04:30.280068Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 107:0 129 -> 240 2025-05-07T09:04:30.281651Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 14 PathOwnerId: 72057594046678944, cookie: 107 2025-05-07T09:04:30.281762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 14 PathOwnerId: 72057594046678944, cookie: 107 2025-05-07T09:04:30.281801Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 107 2025-05-07T09:04:30.281859Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 107, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 14 2025-05-07T09:04:30.281910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-05-07T09:04:30.282771Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 107 2025-05-07T09:04:30.282869Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 107 2025-05-07T09:04:30.282895Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 107 2025-05-07T09:04:30.282921Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 107, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-05-07T09:04:30.282947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-05-07T09:04:30.283008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 107, ready parts: 0/1, is published: true 2025-05-07T09:04:30.299387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 107:0, at schemeshard: 72057594046678944 2025-05-07T09:04:30.299492Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 107:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:30.299781Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-05-07T09:04:30.299968Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#107:0 progress is 1/1 2025-05-07T09:04:30.300004Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 107 ready parts: 1/1 2025-05-07T09:04:30.300043Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#107:0 progress is 1/1 2025-05-07T09:04:30.300101Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 107 ready parts: 1/1 2025-05-07T09:04:30.300135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 107, ready parts: 1/1, is published: true 2025-05-07T09:04:30.300174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 107 ready parts: 1/1 2025-05-07T09:04:30.300215Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 107:0 2025-05-07T09:04:30.300248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 107:0 2025-05-07T09:04:30.300339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-05-07T09:04:30.300893Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:30.300948Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 0, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-05-07T09:04:30.301912Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 107 2025-05-07T09:04:30.308467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 107 2025-05-07T09:04:30.311788Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:30.311865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:205:2207], at schemeshard: 72057594046678944, txId: 0, path id: 2 2025-05-07T09:04:30.312758Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 15 PathOwnerId: 72057594046678944, cookie: 0 TestWaitNotification wait txId: 107 2025-05-07T09:04:30.313394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 107: send EvNotifyTxCompletion 2025-05-07T09:04:30.313443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 107 2025-05-07T09:04:30.314154Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 107, at schemeshard: 72057594046678944 2025-05-07T09:04:30.314274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 107: got EvNotifyTxCompletionResult 2025-05-07T09:04:30.314336Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 107: satisfy waiter [1:980:2908] TestWaitNotification: OK eventTxId 107 2025-05-07T09:04:30.315281Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-05-07T09:04:30.315498Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 250us result status StatusSuccess 2025-05-07T09:04:30.315962Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 15 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 15 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 9 SubDomainVersion: 1 SubDomainStateVersion: 4 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "name_USER_0_kind_hdd-1" Kind: "hdd-1" } StoragePools { Name: "name_USER_0_kind_hdd-2" Kind: "hdd-2" } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 DatabaseQuotas { data_size_hard_quota: 1 } SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::ShouldRetryAtFinalStage [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:03:46.200007Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:03:46.200102Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:03:46.200146Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:03:46.200184Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:03:46.201467Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:03:46.201536Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:03:46.201609Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:03:46.201676Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:03:46.202325Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:03:46.205648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:03:46.290314Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:03:46.290365Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:46.302215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:03:46.302346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:03:46.302499Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:03:46.306991Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:03:46.307254Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:03:46.307715Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:46.307841Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:03:46.311487Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:46.321100Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:03:46.321165Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:46.321219Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:03:46.321275Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:03:46.321324Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:03:46.322488Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.327869Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:03:46.440199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:03:46.440383Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.440575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:03:46.440843Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:03:46.440901Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.442813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:46.442931Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:03:46.443128Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.443187Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:03:46.443257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:03:46.443287Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:03:46.444912Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.444954Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:03:46.444997Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:03:46.446386Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.446432Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:03:46.446478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:46.446536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:03:46.449989Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:03:46.451656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:03:46.451837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:03:46.452845Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:03:46.453002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:03:46.453057Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:46.453365Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:03:46.453425Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:03:46.453588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:03:46.453670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:03:46.455629Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:03:46.455673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:03:46.455875Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:03:46.455932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... d: 72057594046678944, LocalPathId: 4] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0019 2025-05-07T09:04:21.104653Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:588: Started TEvPersistStats at tablet 72057594046678944, queue size# 2 2025-05-07T09:04:21.104823Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046678944:1 data size 70 row count 2 2025-05-07T09:04:21.104915Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409546 maps to shardIdx: 72057594046678944:1 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], pathId map=Table, is column=0, is olap=0, RowCount 2, DataSize 70 2025-05-07T09:04:21.105029Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186233409546 2025-05-07T09:04:21.105086Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:2 data size 0 row count 0 2025-05-07T09:04:21.105133Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409547 maps to shardIdx: 72057594046678944:2 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=0, is column=0, is olap=0, RowCount 0, DataSize 0, with borrowed parts 2025-05-07T09:04:21.105173Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186233409547 2025-05-07T09:04:21.115618Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:588: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-05-07T09:04:24.605511Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:561: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409546 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] state 'Ready' dataSize 70 rowCount 2 cpuUsage 0.0017 2025-05-07T09:04:24.627519Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:561: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409547 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 4] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0017 2025-05-07T09:04:24.672487Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:588: Started TEvPersistStats at tablet 72057594046678944, queue size# 2 2025-05-07T09:04:24.672676Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046678944:1 data size 70 row count 2 2025-05-07T09:04:24.672741Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409546 maps to shardIdx: 72057594046678944:1 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], pathId map=Table, is column=0, is olap=0, RowCount 2, DataSize 70 2025-05-07T09:04:24.672839Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186233409546 2025-05-07T09:04:24.672901Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:2 data size 0 row count 0 2025-05-07T09:04:24.672940Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409547 maps to shardIdx: 72057594046678944:2 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=0, is column=0, is olap=0, RowCount 0, DataSize 0, with borrowed parts 2025-05-07T09:04:24.672970Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186233409547 2025-05-07T09:04:24.683415Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:588: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-05-07T09:04:28.165429Z node 4 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:783: [Export] [s3] Bootstrap: self# [4:573:2531], attempt# 1 2025-05-07T09:04:28.190914Z node 4 :DATASHARD_BACKUP DEBUG: export_scan.cpp:118: [Export] [scanner] Handle TEvExportScan::TEvReset: self# [4:572:2530] 2025-05-07T09:04:28.201067Z node 4 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:427: [Export] [s3] Handle TEvExportScan::TEvReady: self# [4:573:2531], sender# [4:572:2530] 2025-05-07T09:04:28.201321Z node 4 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [4:572:2530] 2025-05-07T09:04:28.201563Z node 4 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:445: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [4:573:2531], sender# [4:572:2530], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 0 Checksum: } 2025-05-07T09:04:28.201800Z node 4 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:512: [Export] [s3] Handle TEvDataShard::TEvS3Upload: self# [4:573:2531], upload# { Id: 1 Status: Complete Error: (empty maybe) Parts: [6e3e0a41fdab8add833862f1bd2954c3,1d8dd09e584ce6a47582a31b591900e2,d41d8cd98f00b204e9800998ecf8427e] } REQUEST: POST /data_00.csv?uploadId=1 HTTP/1.1 HEADERS: Host: localhost:25533 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 594A3059-A9DA-49A4-86BC-5A8446F797B6 amz-sdk-request: attempt=1 content-length: 459 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeAction: 4 / /data_00.csv / uploadId=1 2025-05-07T09:04:28.207802Z node 4 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:609: [Export] [s3] Handle TEvExternalStorage::TEvCompleteMultipartUploadResponse: self# [4:573:2531], result# 2025-05-07T09:04:28.208052Z node 4 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [4:572:2530], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-05-07T09:04:28.229227Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 443 RawX2: 17179871596 } Origin: 72075186233409547 State: 2 TxId: 281474976710759 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-05-07T09:04:28.229634Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 281474976710759, tablet: 72075186233409547, partId: 0 2025-05-07T09:04:28.229884Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944, message: Source { RawX1: 443 RawX2: 17179871596 } Origin: 72075186233409547 State: 2 TxId: 281474976710759 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-05-07T09:04:28.230047Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 281474976710759:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 443 RawX2: 17179871596 } Origin: 72075186233409547 State: 2 TxId: 281474976710759 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-05-07T09:04:28.230136Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976710759:0, shardIdx: 72057594046678944:2, datashard: 72075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:28.230184Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-05-07T09:04:28.230233Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 281474976710759:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-05-07T09:04:28.230287Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976710759:0 129 -> 240 2025-05-07T09:04:28.230540Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 281474976710759:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:28.235406Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-05-07T09:04:28.235997Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-05-07T09:04:28.236072Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046678944] TDone opId# 281474976710759:0 ProgressState 2025-05-07T09:04:28.236316Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710759:0 progress is 1/1 2025-05-07T09:04:28.236371Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976710759 ready parts: 1/1 2025-05-07T09:04:28.236421Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710759:0 progress is 1/1 2025-05-07T09:04:28.236475Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976710759 ready parts: 1/1 2025-05-07T09:04:28.236533Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976710759, ready parts: 1/1, is published: true 2025-05-07T09:04:28.236625Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [4:124:2150] message: TxId: 281474976710759 2025-05-07T09:04:28.236691Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976710759 ready parts: 1/1 2025-05-07T09:04:28.236746Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976710759:0 2025-05-07T09:04:28.236782Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976710759:0 2025-05-07T09:04:28.236918Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-05-07T09:04:28.240906Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6706: Handle: TEvNotifyTxCompletionResult: txId# 281474976710759 2025-05-07T09:04:28.241018Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6708: Message: TxId: 281474976710759 2025-05-07T09:04:28.243536Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-05-07T09:04:28.243612Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [4:593:2548] TestWaitNotification: OK eventTxId 102 |93.0%| [TA] $(B)/ydb/core/tx/replication/service/ut_topic_reader/test-results/unittest/{meta.json ... results_accumulator.log} |93.0%| [TA] {RESULT} $(B)/ydb/core/tx/replication/service/ut_topic_reader/test-results/unittest/{meta.json ... results_accumulator.log} >> GenericFederatedQuery::ClickHouseManagedSelectAll [GOOD] >> GenericFederatedQuery::ClickHouseManagedSelectConstant |93.0%| [TA] $(B)/ydb/core/tx/schemeshard/ut_export/test-results/unittest/{meta.json ... results_accumulator.log} |93.0%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_export/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbOlapStore::LogTsRangeDescending [GOOD] Test command err: 2025-05-07T09:00:17.480129Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626032816725586:2217];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:00:17.480395Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002887/r3tmp/tmpTWdWiD/pdisk_1.dat 2025-05-07T09:00:18.048657Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:00:18.079886Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:00:18.079969Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:00:18.092042Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15140, node 1 2025-05-07T09:00:18.582804Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:00:18.582837Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:00:18.582845Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:00:18.582988Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7612 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:00:19.302286Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... TClient is connected to server localhost:7612 2025-05-07T09:00:20.035239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateColumnStore CreateColumnStore { Name: "OlapStore" ColumnShardCount: 4 SchemaPresets { Name: "default" Schema { Columns { Name: "message" Type: "Utf8" } Columns { Name: "json_payload" Type: "JsonDocument" } Columns { Name: "resource_id" Type: "Utf8" NotNull: true } Columns { Name: "uid" Type: "Utf8" NotNull: true } Columns { Name: "timestamp" Type: "Timestamp" NotNull: true } Columns { Name: "resource_type" Type: "Utf8" NotNull: true } Columns { Name: "level" Type: "Int32" } Columns { Name: "ingested_at" Type: "Timestamp" } Columns { Name: "saved_at" Type: "Timestamp" } Columns { Name: "request_id" Type: "Utf8" } KeyColumnNames: "timestamp" KeyColumnNames: "resource_type" KeyColumnNames: "resource_id" KeyColumnNames: "uid" } } } } TxId: 281474976710658 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-05-07T09:00:20.035706Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: create_store.cpp:331: TCreateOlapStore Propose, path: /Root/OlapStore, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T09:00:20.036200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:319: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046644480, LocalPathId: 1], parent name: Root, child name: OlapStore, child id: [OwnerId: 72057594046644480, LocalPathId: 2], at schemeshard: 72057594046644480 2025-05-07T09:00:20.036240Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 0 2025-05-07T09:00:20.036314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-05-07T09:00:20.036404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 2 2025-05-07T09:00:20.036470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 3 2025-05-07T09:00:20.036588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 4 2025-05-07T09:00:20.036885Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 5 2025-05-07T09:00:20.039960Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976710658:0 1 -> 2 2025-05-07T09:00:20.040254Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976710658:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-05-07T09:00:20.040283Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnStore, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T09:00:20.040443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-05-07T09:00:20.040512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 6 2025-05-07T09:00:20.042985Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 281474976710658, response: Status: StatusAccepted TxId: 281474976710658 SchemeshardId: 72057594046644480 PathId: 2, at schemeshard: 72057594046644480 2025-05-07T09:00:20.043131Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710658, database: /Root, subject: , status: StatusAccepted, operation: CREATE COLUMN STORE, path: /Root/OlapStore 2025-05-07T09:00:20.043373Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-05-07T09:00:20.043424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976710658, path id: [OwnerId: 72057594046644480, LocalPathId: 1] 2025-05-07T09:00:20.043570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976710658, path id: [OwnerId: 72057594046644480, LocalPathId: 2] 2025-05-07T09:00:20.043654Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-05-07T09:00:20.043670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:7501626037111693372:2389], at schemeshard: 72057594046644480, txId: 281474976710658, path id: 1 2025-05-07T09:00:20.043682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:7501626037111693372:2389], at schemeshard: 72057594046644480, txId: 281474976710658, path id: 2 2025-05-07T09:00:20.043714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T09:00:20.043809Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 281474976710658:0 ProgressState, operation type: TxCreateOlapStore, at tablet# 72057594046644480 2025-05-07T09:00:20.044535Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:357: TCreateParts opId# 281474976710658:0 CreateRequest Event to Hive: 72057594037968897 msg: Owner: 72057594046644480 OwnerIdx: 1 TabletType: ColumnShard ObjectDomain { SchemeShard: 72057594046644480 PathId: 1 } ObjectId: 2 BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StorageP ... dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 40, seqNo: [1] 2025-05-07T09:04:33.848114Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 41, seqNo: [1] 2025-05-07T09:04:33.848131Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 42, seqNo: [1] 2025-05-07T09:04:33.848148Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 43, seqNo: [1] 2025-05-07T09:04:33.848164Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 44, seqNo: [1] 2025-05-07T09:04:33.848181Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 45, seqNo: [1] 2025-05-07T09:04:33.848199Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 46, seqNo: [1] 2025-05-07T09:04:33.848218Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 47, seqNo: [1] 2025-05-07T09:04:33.848234Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 48, seqNo: [1] 2025-05-07T09:04:33.848250Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 49, seqNo: [1] 2025-05-07T09:04:33.848267Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 50, seqNo: [1] 2025-05-07T09:04:33.848283Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 51, seqNo: [1] 2025-05-07T09:04:33.848300Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 52, seqNo: [1] 2025-05-07T09:04:33.848318Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 53, seqNo: [1] 2025-05-07T09:04:33.848335Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 54, seqNo: [1] 2025-05-07T09:04:33.848353Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 55, seqNo: [1] 2025-05-07T09:04:33.848369Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 56, seqNo: [1] 2025-05-07T09:04:33.848385Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 57, seqNo: [1] 2025-05-07T09:04:33.848401Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 58, seqNo: [1] 2025-05-07T09:04:33.848419Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 59, seqNo: [1] 2025-05-07T09:04:33.848434Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 60, seqNo: [1] 2025-05-07T09:04:33.848451Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 61, seqNo: [1] 2025-05-07T09:04:33.848468Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 62, seqNo: [1] 2025-05-07T09:04:33.848485Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 63, seqNo: [1] 2025-05-07T09:04:33.848501Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 64, seqNo: [1] 2025-05-07T09:04:33.848535Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976710670, task: 65. Tasks execution finished 2025-05-07T09:04:33.848571Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [28:7501627132703176205:3195], TxId: 281474976710670, task: 65. Ctx: { TraceId : 01jtmztky07k7xwafyf4sgcb2q. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=28&id=ZDJjNDM4NDctYjExMGI3MzgtYjhhM2I4MjItMzg1MWU5NGE=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Compute state finished. All channels and sinks finished 2025-05-07T09:04:33.848798Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976710670, task: 65. pass away 2025-05-07T09:04:33.849048Z node 28 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:66;problem=finish_compute_actor;tx_id=281474976710670;task_id=65;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-05-07T09:04:33.849884Z node 28 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:433: ActorId: [28:7501627132703176116:3122] TxId: 281474976710670. Ctx: { TraceId: 01jtmztky07k7xwafyf4sgcb2q, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=ZDJjNDM4NDctYjExMGI3MzgtYjhhM2I4MjItMzg1MWU5NGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [28:7501627132703176205:3195], task: 65, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 12906 Tasks { TaskId: 65 StageId: 1 CpuTimeUs: 481 FinishTimeMs: 1746608673847 ComputeCpuTimeUs: 116 BuildCpuTimeUs: 365 HostName: "ghrun-sykirh5vua" NodeId: 28 CreateTimeMs: 1746608673682 UpdateTimeMs: 1746608673848 } MaxMemoryUsage: 1048576 } 2025-05-07T09:04:33.849981Z node 28 :KQP_EXECUTER INFO: kqp_planner.cpp:688: TxId: 281474976710670. Ctx: { TraceId: 01jtmztky07k7xwafyf4sgcb2q, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=ZDJjNDM4NDctYjExMGI3MzgtYjhhM2I4MjItMzg1MWU5NGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [28:7501627132703176205:3195] 2025-05-07T09:04:33.850150Z node 28 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2140: ActorId: [28:7501627132703176116:3122] TxId: 281474976710670. Ctx: { TraceId: 01jtmztky07k7xwafyf4sgcb2q, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=ZDJjNDM4NDctYjExMGI3MzgtYjhhM2I4MjItMzg1MWU5NGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-05-07T09:04:33.850237Z node 28 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:838: ActorId: [28:7501627132703176116:3122] TxId: 281474976710670. Ctx: { TraceId: 01jtmztky07k7xwafyf4sgcb2q, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=ZDJjNDM4NDctYjExMGI3MzgtYjhhM2I4MjItMzg1MWU5NGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.140015s ReadRows: 0 ReadBytes: 0 ru: 93 rate limiter was not found force flag: 1 2025-05-07T09:04:33.850361Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1699: SessionId: ydb://session/3?node_id=28&id=ZDJjNDM4NDctYjExMGI3MzgtYjhhM2I4MjItMzg1MWU5NGE=, ActorId: [28:7501627128408208770:3122], ActorState: ExecuteState, TraceId: 01jtmztky07k7xwafyf4sgcb2q, TEvTxResponse, CurrentTx: 2/2 response.status: SUCCESS 2025-05-07T09:04:33.850893Z node 28 :KQP_SESSION INFO: kqp_session_actor.cpp:1958: SessionId: ydb://session/3?node_id=28&id=ZDJjNDM4NDctYjExMGI3MzgtYjhhM2I4MjItMzg1MWU5NGE=, ActorId: [28:7501627128408208770:3122], ActorState: ExecuteState, TraceId: 01jtmztky07k7xwafyf4sgcb2q, txInfo Status: Active Kind: ReadOnly TotalDuration: 0 ServerDuration: 262.905 QueriesCount: 1 2025-05-07T09:04:33.850996Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2113: SessionId: ydb://session/3?node_id=28&id=ZDJjNDM4NDctYjExMGI3MzgtYjhhM2I4MjItMzg1MWU5NGE=, ActorId: [28:7501627128408208770:3122], ActorState: ExecuteState, TraceId: 01jtmztky07k7xwafyf4sgcb2q, Create QueryResponse for action: QUERY_ACTION_EXECUTE with SUCCESS status 2025-05-07T09:04:33.851161Z node 28 :KQP_SESSION INFO: kqp_session_actor.cpp:2473: SessionId: ydb://session/3?node_id=28&id=ZDJjNDM4NDctYjExMGI3MzgtYjhhM2I4MjItMzg1MWU5NGE=, ActorId: [28:7501627128408208770:3122], ActorState: ExecuteState, TraceId: 01jtmztky07k7xwafyf4sgcb2q, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-05-07T09:04:33.851224Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2534: SessionId: ydb://session/3?node_id=28&id=ZDJjNDM4NDctYjExMGI3MzgtYjhhM2I4MjItMzg1MWU5NGE=, ActorId: [28:7501627128408208770:3122], ActorState: ExecuteState, TraceId: 01jtmztky07k7xwafyf4sgcb2q, EndCleanup, isFinal: 1 2025-05-07T09:04:33.851328Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2270: SessionId: ydb://session/3?node_id=28&id=ZDJjNDM4NDctYjExMGI3MzgtYjhhM2I4MjItMzg1MWU5NGE=, ActorId: [28:7501627128408208770:3122], ActorState: ExecuteState, TraceId: 01jtmztky07k7xwafyf4sgcb2q, Sent query response back to proxy, proxyRequestId: 5, proxyId: [28:7501627072573630069:2280] 2025-05-07T09:04:33.851386Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2546: SessionId: ydb://session/3?node_id=28&id=ZDJjNDM4NDctYjExMGI3MzgtYjhhM2I4MjItMzg1MWU5NGE=, ActorId: [28:7501627128408208770:3122], ActorState: unknown state, TraceId: 01jtmztky07k7xwafyf4sgcb2q, Cleanup temp tables: 0 RESULT: [] --------------------- STATS: total CPU: 3236 duration: 1643 usec cpu: 1643 usec duration: 249909 usec cpu: 266197 usec { name: "/Root/OlapStore/log1" } 2025-05-07T09:04:33.856311Z node 28 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746608673000, txId: 18446744073709551615] shutting down 2025-05-07T09:04:33.856543Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2637: SessionId: ydb://session/3?node_id=28&id=ZDJjNDM4NDctYjExMGI3MzgtYjhhM2I4MjItMzg1MWU5NGE=, ActorId: [28:7501627128408208770:3122], ActorState: unknown state, TraceId: 01jtmztky07k7xwafyf4sgcb2q, Session actor destroyed 2025-05-07T09:04:33.871162Z node 28 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037891;parent=[28:7501627081163565520:2326];fline=actor.cpp:33;event=skip_flush_writing; 2025-05-07T09:04:33.959652Z node 28 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037889;parent=[28:7501627081163565514:2324];fline=actor.cpp:33;event=skip_flush_writing; 2025-05-07T09:04:33.959752Z node 28 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037890;parent=[28:7501627081163565539:2327];fline=actor.cpp:33;event=skip_flush_writing; |93.0%| [TA] $(B)/ydb/core/tx/schemeshard/ut_subdomain/test-results/unittest/{meta.json ... results_accumulator.log} >> TCdcStreamWithInitialScanTests::MeteringDedicated [GOOD] |93.0%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_subdomain/test-results/unittest/{meta.json ... results_accumulator.log} >> DataShardVolatile::DistributedWriteWithAsyncIndex [GOOD] >> DataShardVolatile::DistributedWriteThenLateWriteReadCommit ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_cdc_stream/unittest >> TCdcStreamWithInitialScanTests::MeteringDedicated [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:04:04.258185Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:04:04.258392Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:04.258469Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:04:04.258539Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:04:04.262045Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:04:04.262137Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:04:04.262239Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:04:04.262319Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:04:04.263196Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:04:04.267061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:04:04.353241Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:04:04.353310Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:04.379846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:04:04.380076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:04:04.380272Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:04:04.389271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:04:04.389686Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:04:04.396541Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:04.402199Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:04:04.424071Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:04.439618Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:04.440228Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:04.440353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:04:04.440404Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:04.440540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:04:04.441946Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:04:04.448953Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:04:04.574843Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:04:04.576934Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:04.577874Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:04:04.579901Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:04:04.579981Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:04.585214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:04.585344Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:04:04.585566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:04.585695Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:04:04.585733Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:04:04.585766Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:04:04.587989Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:04.588066Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:04:04.588112Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:04:04.589900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:04.589950Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:04:04.590011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:04.590082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:04:04.597665Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:04:04.599943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:04:04.600803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:04:04.601862Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:04:04.602059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:04:04.602112Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:04.603462Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:04:04.603522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:04:04.603748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:04:04.603825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:04:04.606285Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:04:04.606350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:04:04.606538Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:04:04.606576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... LocalPathId: 3] was 4 2025-05-07T09:04:34.844077Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5794: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72075186233409546, cookie: 281474976715657 2025-05-07T09:04:34.844204Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72075186233409546, cookie: 281474976715657 2025-05-07T09:04:34.844240Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 281474976715657 2025-05-07T09:04:34.844277Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 281474976715657, pathId: [OwnerId: 72075186233409546, LocalPathId: 2], version: 5 2025-05-07T09:04:34.844330Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 6 2025-05-07T09:04:34.844448Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976715657, ready parts: 2/3, is published: true 2025-05-07T09:04:34.851396Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 281474976715657 2025-05-07T09:04:34.851974Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 281474976715657 2025-05-07T09:04:34.866636Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6245: Handle TEvProposeTransactionResult, at schemeshard: 72075186233409546, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409552 Status: COMPLETE TxId: 281474976715657 Step: 250 OrderId: 281474976715657 ExecLatency: 0 ProposeLatency: 5 DomainCoordinators: 72075186233409547 TxStats { PerShardStats { ShardId: 72075186233409552 CpuTimeUsec: 1508 } } 2025-05-07T09:04:34.866711Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 281474976715657, tablet: 72075186233409552, partId: 1 2025-05-07T09:04:34.866894Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 281474976715657:1, at schemeshard: 72075186233409546, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409552 Status: COMPLETE TxId: 281474976715657 Step: 250 OrderId: 281474976715657 ExecLatency: 0 ProposeLatency: 5 DomainCoordinators: 72075186233409547 TxStats { PerShardStats { ShardId: 72075186233409552 CpuTimeUsec: 1508 } } 2025-05-07T09:04:34.867051Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72075186233409546, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409552 Status: COMPLETE TxId: 281474976715657 Step: 250 OrderId: 281474976715657 ExecLatency: 0 ProposeLatency: 5 DomainCoordinators: 72075186233409547 TxStats { PerShardStats { ShardId: 72075186233409552 CpuTimeUsec: 1508 } } 2025-05-07T09:04:34.868346Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5472: Handle TEvSchemaChanged, tabletId: 72075186233409546, at schemeshard: 72075186233409546, message: Source { RawX1: 749 RawX2: 81604381262 } Origin: 72075186233409552 State: 2 TxId: 281474976715657 Step: 0 Generation: 2 2025-05-07T09:04:34.868434Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation FindRelatedPartByTabletId, TxId: 281474976715657, tablet: 72075186233409552, partId: 1 2025-05-07T09:04:34.868678Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 281474976715657:1, at schemeshard: 72075186233409546, message: Source { RawX1: 749 RawX2: 81604381262 } Origin: 72075186233409552 State: 2 TxId: 281474976715657 Step: 0 Generation: 2 2025-05-07T09:04:34.868783Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1005: NTableState::TProposedWaitParts operationId# 281474976715657:1 HandleReply TEvSchemaChanged at tablet: 72075186233409546 2025-05-07T09:04:34.868947Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1009: NTableState::TProposedWaitParts operationId# 281474976715657:1 HandleReply TEvSchemaChanged at tablet: 72075186233409546 message: Source { RawX1: 749 RawX2: 81604381262 } Origin: 72075186233409552 State: 2 TxId: 281474976715657 Step: 0 Generation: 2 2025-05-07T09:04:34.869069Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:655: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976715657:1, shardIdx: 72075186233409546:4, datashard: 72075186233409552, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72075186233409546 2025-05-07T09:04:34.869144Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:674: all shard schema changes has been received, operationId: 281474976715657:1, at schemeshard: 72075186233409546 2025-05-07T09:04:34.869226Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:686: send schema changes ack message, operation: 281474976715657:1, datashard: 72075186233409552, at schemeshard: 72075186233409546 2025-05-07T09:04:34.869309Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976715657:1 129 -> 240 2025-05-07T09:04:34.878155Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 281474976715657:1, at schemeshard: 72075186233409546 2025-05-07T09:04:34.879833Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 281474976715657:1, at schemeshard: 72075186233409546 2025-05-07T09:04:34.880405Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976715657:1, at schemeshard: 72075186233409546 2025-05-07T09:04:34.880486Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72075186233409546] TDone opId# 281474976715657:1 ProgressState 2025-05-07T09:04:34.880714Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715657:1 progress is 3/3 2025-05-07T09:04:34.880781Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715657 ready parts: 3/3 2025-05-07T09:04:34.880876Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715657:1 progress is 3/3 2025-05-07T09:04:34.880937Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715657 ready parts: 3/3 2025-05-07T09:04:34.881006Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976715657, ready parts: 3/3, is published: true 2025-05-07T09:04:34.881078Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715657 ready parts: 3/3 2025-05-07T09:04:34.881150Z node 19 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715657:0 2025-05-07T09:04:34.881217Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976715657:0 2025-05-07T09:04:34.881330Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72075186233409546, LocalPathId: 3] was 3 2025-05-07T09:04:34.881385Z node 19 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715657:1 2025-05-07T09:04:34.881409Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976715657:1 2025-05-07T09:04:34.881499Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 5 2025-05-07T09:04:34.881536Z node 19 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715657:2 2025-05-07T09:04:34.881557Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976715657:2 2025-05-07T09:04:34.881587Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 4 2025-05-07T09:04:37.981430Z node 19 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 4 SchemeshardId: 72075186233409546, at schemeshard: 72075186233409546 2025-05-07T09:04:37.981882Z node 19 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409546 describe pathId 4 took 478us result status StatusNameConflict 2025-05-07T09:04:37.982148Z node 19 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusNameConflict Reason: "Check failed: path: \'/MyRoot/Shared/Table/Stream/streamImpl\', error: path is not a common path (id: [OwnerId: 72075186233409546, LocalPathId: 4], type: EPathTypePersQueueGroup, state: EPathStateNoChanges)" Path: "/MyRoot/Shared/Table/Stream/streamImpl" PathId: 4 LastExistedPrefixPath: "/MyRoot/Shared/Table/Stream/streamImpl" LastExistedPrefixPathId: 4 LastExistedPrefixDescription { Self { Name: "streamImpl" PathId: 4 SchemeshardId: 72075186233409546 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 106 CreateStep: 200 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeStreamImpl ChildrenExist: false BalancerTabletID: 72075186233409554 } } PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 2025-05-07T09:04:40.722788Z node 19 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 4 SchemeshardId: 72075186233409546, at schemeshard: 72075186233409546 2025-05-07T09:04:40.723265Z node 19 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409546 describe pathId 4 took 493us result status StatusNameConflict 2025-05-07T09:04:40.723492Z node 19 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusNameConflict Reason: "Check failed: path: \'/MyRoot/Shared/Table/Stream/streamImpl\', error: path is not a common path (id: [OwnerId: 72075186233409546, LocalPathId: 4], type: EPathTypePersQueueGroup, state: EPathStateNoChanges)" Path: "/MyRoot/Shared/Table/Stream/streamImpl" PathId: 4 LastExistedPrefixPath: "/MyRoot/Shared/Table/Stream/streamImpl" LastExistedPrefixPathId: 4 LastExistedPrefixDescription { Self { Name: "streamImpl" PathId: 4 SchemeshardId: 72075186233409546 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 106 CreateStep: 200 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeStreamImpl ChildrenExist: false BalancerTabletID: 72075186233409554 } } PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 >> DataShardVolatile::DistributedWriteShardRestartAfterExpectation-UseSink [GOOD] >> DataShardVolatile::DistributedWriteEarlierSnapshotNotBlocked >> BackupRestore::RestoreViewDependentOnAnotherView [GOOD] >> BackupRestore::RestoreKesusResources |93.0%| [TA] $(B)/ydb/core/tx/schemeshard/ut_cdc_stream/test-results/unittest/{meta.json ... results_accumulator.log} |93.0%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream/test-results/unittest/{meta.json ... results_accumulator.log} >> TMultiversionObjectMap::MonteCarlo [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v0-fifo] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v0-fifo] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v0-fifo] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_list_queues_for_unknown_cloud[tables_format_v0] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[fifo-tables_format_v0] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_ymq_expiring_counters >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_counters_when_sending_duplicates >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v1-fifo] >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_sqs_action_counters |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TMultiversionObjectMap::MonteCarlo [GOOD] >> TPersQueueTest::DirectReadCleanCache [GOOD] >> TPersQueueTest::DirectReadRestartPQRB >> DemoTx::Scenario_5 [GOOD] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v0-fifo] >> TopicService::UseDoubleSlashInTopicPath [GOOD] >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v0-fifo] [GOOD] >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v0-std] >> TGroupMapperTest::NonUniformCluster [GOOD] |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::NonUniformCluster [GOOD] >> TPersQueueTest::StreamReadCommitAndStatusMsgs [GOOD] >> TPersQueueTest::StreamReadManyUpdateTokenAndRead >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v0-fifo] >> TopicService::RelativePath >> TPersQueueTest::TopicServiceSimpleHappyWrites [GOOD] >> TPersQueueTest::WhenDisableNodeAndCreateTopic_ThenAllPartitionsAreOnOtherNode >> TFstClassSrcIdPQTest::TestTableCreated >> DataShardVolatile::DistributedWriteThenLateWriteReadCommit [GOOD] >> DataShardVolatile::TwoAppendsMustBeVolatile+UseSink >> GenericFederatedQuery::PostgreSQLOnPremSelectConstant [GOOD] >> GenericFederatedQuery::PostgreSQLSelectCount >> GenericFederatedQuery::IcebergHadoopTokenSelectConstant [GOOD] >> GenericFederatedQuery::IcebergHadoopTokenSelectCount >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v1-fifo] >> TPersQueueCommonTest::Auth_WriteUpdateTokenRequestWithInvalidToken_SessionClosedWithUnauthenticatedError [GOOD] >> TPersQueueCommonTest::Auth_WriteUpdateTokenRequestWithValidTokenButWithoutACL_SessionClosedWithUnauthorizedError >> DataShardVolatile::DistributedWriteEarlierSnapshotNotBlocked [GOOD] >> DataShardVolatile::DistributedWriteLaterSnapshotBlockedThenCommit+UseSink >> BackupRestore::RestoreKesusResources [GOOD] >> BackupRestore::RestoreReplicationWithoutSecret >> TPersQueueTest::CacheHead [GOOD] >> TPersQueueTest::CheckACLForGrpcWrite >> TPersQueueTest::SchemeOperationsCheckPropValues [GOOD] >> TPersQueueTest::ReadRuleServiceType >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[fifo-tables_format_v0] >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v0-std] [GOOD] >> TPersQueueTest::WriteNonExistingTopic [GOOD] >> TPersQueueTest::WriteAfterAlter >> GenericFederatedQuery::ClickHouseManagedSelectConstant [GOOD] >> GenericFederatedQuery::ClickHouseSelectCount >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[std-tables_format_v0] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[std-tables_format_v1] >> TGroupMapperTest::ReassignGroupTest3dc [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[std-tables_format_v1] [GOOD] >> DataShardVolatile::DistributedWriteLaterSnapshotBlockedThenCommit+UseSink [GOOD] >> DataShardVolatile::DistributedWriteLaterSnapshotBlockedThenCommit-UseSink >> TopicService::RelativePath [GOOD] |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::ReassignGroupTest3dc [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_fifo_groups_with_dlq_in_cloud[tables_format_v1] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_list_clouds >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_list_clouds [GOOD] >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v1-fifo] [GOOD] >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v1-std] >> TopicService::AccessRights >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v1-fifo] [GOOD] >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v1-std] >> GenericFederatedQuery::PostgreSQLSelectCount [GOOD] >> GenericFederatedQuery::PostgreSQLFilterPushdown >> GenericFederatedQuery::IcebergHadoopTokenSelectCount [GOOD] >> GenericFederatedQuery::IcebergHadoopTokenFilterPushdown >> TPersQueueTest::WhenDisableNodeAndCreateTopic_ThenAllPartitionsAreOnOtherNode [GOOD] >> TPersQueueTest::WhenTheTopicIsDeletedAfterDecompressingTheData_Compressed >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v1-std] [GOOD] >> DataShardVolatile::TwoAppendsMustBeVolatile+UseSink [GOOD] >> DataShardVolatile::TwoAppendsMustBeVolatile-UseSink >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v1-fifo] >> TPersQueueTest::CheckACLForGrpcWrite [GOOD] >> TPersQueueTest::CheckACLForGrpcRead >> TPersQueueTest::StreamReadManyUpdateTokenAndRead [GOOD] >> TPersQueueTest::SetupWriteSession >> TGroupMapperTest::Block42_1disk [GOOD] >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v0-fifo] [GOOD] >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v0-std] >> TPersQueueTest::ReadRuleServiceType [GOOD] >> TPersQueueTest::ReadRuleServiceTypeLimit >> DataShardVolatile::DistributedWriteLaterSnapshotBlockedThenCommit-UseSink [GOOD] >> DataShardVolatile::DistributedWriteLaterSnapshotBlockedThenAbort |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::Block42_1disk [GOOD] >> TPersQueueCommonTest::Auth_WriteUpdateTokenRequestWithValidTokenButWithoutACL_SessionClosedWithUnauthorizedError [GOOD] >> TPersQueueCommonTest::TestWriteWithRateLimiterWithBlobsRateLimit [GOOD] >> TPersQueueCommonTest::TestWriteWithRateLimiterWithUserPayloadRateLimit >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_sqs_action_counters [GOOD] >> GenericFederatedQuery::ClickHouseSelectCount [GOOD] >> GenericFederatedQuery::ClickHouseFilterPushdown >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_counters_when_sending_duplicates [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v0-fifo] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_list_queues_for_unknown_cloud[tables_format_v0] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v0-std] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[fifo-tables_format_v0] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_list_queues_for_unknown_cloud[tables_format_v1] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[fifo-tables_format_v1] >> TFstClassSrcIdPQTest::TestTableCreated [GOOD] >> TFstClassSrcIdPQTest::NoMapping >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v0-fifo] [GOOD] >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v0-std] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[fifo-tables_format_v1] [GOOD] >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_ymq_send_read_delete >> TPersQueueTest::WriteAfterAlter [GOOD] >> TPersQueueTest::WhenTheTopicIsDeletedBeforeDataIsDecompressed_Compressed >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_list_queues_for_unknown_cloud[tables_format_v1] [GOOD] |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v0-std] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v0-fifo] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v0-std] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v0-std] >> TopicService::AccessRights [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_ymq_expiring_counters [GOOD] |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[std-tables_format_v1] [GOOD] >> BackupRestore::RestoreExternalDataSourceWithoutSecret >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v1-std] [GOOD] >> GenericFederatedQuery::IcebergHadoopTokenFilterPushdown [GOOD] >> DataShardVolatile::DistributedWriteLaterSnapshotBlockedThenAbort [GOOD] >> DataShardVolatile::DistributedWriteAsymmetricExecute >> TopicService::ThereAreGapsInTheOffsetRanges >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v0-fifo] [GOOD] >> GenericFederatedQuery::PostgreSQLFilterPushdown [GOOD] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v0-std] ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::IcebergHadoopTokenFilterPushdown [GOOD] 2025-05-07 09:05:19,719 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-05-07 09:05:19,923 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 60 secs timeout. Process tree before termination: pid rss ref pdirt 368818 45.9M 46.0M 23.1M test_tool run_ut @/home/runner/.ya/build/build_root/zvgn/00485a/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unittest/testing_out_stuff/chunk3/testing_out_stuff/tes 369764 1.5G 1.5G 1015M └─ ydb-core-kqp-ut-federated_query-generic_ut --trace-path-append /home/runner/.ya/build/build_root/zvgn/00485a/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unit 398702 1.5G 0b 0b └─ ydb-core-kqp-ut-federated_query-generic_ut --trace-path-append /home/runner/.ya/build/build_root/zvgn/00485a/ydb/core/kqp/ut/federated_query/generic_ut/test-results/u Test command err: Trying to start YDB, gRPC: 24187, MsgBus: 9763 2025-05-07T09:04:25.089609Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501627098916507543:2063];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:04:25.089675Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00485a/r3tmp/tmpac1MxB/pdisk_1.dat 2025-05-07T09:04:25.518604Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:25.565203Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:04:25.565326Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:04:25.571431Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24187, node 1 2025-05-07T09:04:25.792438Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:04:25.792462Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:04:25.792470Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:04:25.792639Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9763 TClient is connected to server localhost:9763 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:04:26.595806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:04:26.614190Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T09:04:28.257703Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501627111801410093:2331], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:28.258050Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:28.757150Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:2, at schemeshard: 72057594046644480 2025-05-07T09:04:28.894292Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501627111801410215:2344], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:28.894386Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501627111801410220:2347], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:28.894425Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:28.897444Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:2, at schemeshard: 72057594046644480 2025-05-07T09:04:28.908418Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501627111801410222:2348], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-05-07T09:04:28.977428Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501627111801410262:2396] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:04:29.880733Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:04:30.090061Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501627098916507543:2063];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:04:30.090159Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:04:30.404191Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:1, at schemeshard: 72057594046644480 2025-05-07T09:04:30.915506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480 2025-05-07T09:04:31.426624Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710678:0, at schemeshard: 72057594046644480 2025-05-07T09:04:31.992777Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710683:0, at schemeshard: 72057594046644480 2025-05-07T09:04:32.466927Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480 2025-05-07T09:04:32.516256Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480 2025-05-07T09:04:34.344076Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710706:0, at schemeshard: 72057594046644480 2025-05-07T09:04:34.388485Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710707:0, at schemeshard: 72057594046644480 2025-05-07T09:04:34.390826Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710708:0, at schemeshard: 72057594046644480 2025-05-07T09:04:34.392316Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710709:0, at schemeshard: 72057594046644480 Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "col1" type { type_id: UINT16 } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_regio ... 4480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:05:10.965999Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7501627269553991771:2229];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:05:10.966101Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:05:11.318269Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-05-07T09:05:12.054732Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:1, at schemeshard: 72057594046644480 2025-05-07T09:05:13.007205Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715675:0, at schemeshard: 72057594046644480 2025-05-07T09:05:13.888028Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715682:0, at schemeshard: 72057594046644480 2025-05-07T09:05:14.720757Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715687:0, at schemeshard: 72057594046644480 2025-05-07T09:05:15.473575Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480 2025-05-07T09:05:15.534063Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480 2025-05-07T09:05:17.830502Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715712:0, at schemeshard: 72057594046644480 Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } columns { name: "data_column" type { optional_type { item { type_id: STRING } } } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Expected: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Actual: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL ReadSplits result. GRpcStatusCode: 0 Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 764, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: 60 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/8580453620/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/zvgn/00485a/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unittest/testing_out_stuff/chunk3/testing_out_stuff/test_tool.args']' stopped by 60 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("60 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/8580453620/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/zvgn/00485a/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unittest/testing_out_stuff/chunk3/testing_out_stuff/test_tool.args']' stopped by 60 seconds timeout",), {}) |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_list_clouds [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[fifo-tables_format_v0] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[fifo-tables_format_v1] >> TPersQueueTest::DirectReadRestartPQRB [GOOD] >> TPersQueueTest::DirectReadRestartTablet ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::PostgreSQLFilterPushdown [GOOD] 2025-05-07 09:05:21,662 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-05-07 09:05:21,718 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 60 secs timeout. Process tree before termination: pid rss ref pdirt 369302 45.9M 46.2M 23.1M test_tool run_ut @/home/runner/.ya/build/build_root/zvgn/004852/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unittest/testing_out_stuff/chunk7/testing_out_stuff/tes Test command err: Trying to start YDB, gRPC: 8465, MsgBus: 65079 2025-05-07T09:04:25.091752Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501627097383498062:2061];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:04:25.091985Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004852/r3tmp/tmp1qoTQ9/pdisk_1.dat 2025-05-07T09:04:25.515611Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:25.564074Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:04:25.565295Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:04:25.571538Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8465, node 1 2025-05-07T09:04:25.798859Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:04:25.798889Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:04:25.798903Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:04:25.799113Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:65079 TClient is connected to server localhost:65079 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:04:26.613398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:04:28.240568Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501627110268400617:2331], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:28.240688Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:28.758890Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:2, at schemeshard: 72057594046644480 2025-05-07T09:04:28.897578Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501627110268400740:2344], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:28.897646Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:28.897790Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501627110268400745:2347], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:28.901529Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:2, at schemeshard: 72057594046644480 2025-05-07T09:04:28.910206Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501627110268400747:2348], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-05-07T09:04:28.995770Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501627110268400787:2397] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:04:29.880726Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:04:30.094100Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501627097383498062:2061];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:04:30.094164Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:04:30.383910Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:1, at schemeshard: 72057594046644480 2025-05-07T09:04:30.863325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480 2025-05-07T09:04:31.481263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710678:0, at schemeshard: 72057594046644480 2025-05-07T09:04:31.903300Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710681:0, at schemeshard: 72057594046644480 2025-05-07T09:04:32.358120Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480 2025-05-07T09:04:32.449272Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480 2025-05-07T09:04:34.356431Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710706:0, at schemeshard: 72057594046644480 2025-05-07T09:04:34.396147Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710708:0, at schemeshard: 72057594046644480 2025-05-07T09:04:34.398209Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710707:0, at schemeshard: 72057594046644480 2025-05-07T09:04:34.399606Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710709:0, at schemeshard: 72057594046644480 Call DescribeTable. data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "col1" type { type_id: UINT16 } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" ... ou don't have access permissions } 2025-05-07T09:05:11.841818Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:05:11.842281Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501627295122338928:2351], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:05:11.847123Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:2, at schemeshard: 72057594046644480 2025-05-07T09:05:11.858342Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7501627295122338931:2352], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-05-07T09:05:11.936988Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:7501627295122338971:2403] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:05:12.915580Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:05:13.660137Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:1, at schemeshard: 72057594046644480 2025-05-07T09:05:14.404255Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480 2025-05-07T09:05:15.207615Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710678:0, at schemeshard: 72057594046644480 2025-05-07T09:05:15.901197Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710681:0, at schemeshard: 72057594046644480 2025-05-07T09:05:16.600084Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480 2025-05-07T09:05:16.657055Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480 2025-05-07T09:05:19.261322Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710702:0, at schemeshard: 72057594046644480 Call DescribeTable. data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } columns { name: "data_column" type { optional_type { item { type_id: STRING } } } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Expected: splits { select { data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Actual: splits { select { data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL ReadSplits result. GRpcStatusCode: 0 Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 764, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: 60 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/8580453620/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/zvgn/004852/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unittest/testing_out_stuff/chunk7/testing_out_stuff/test_tool.args']' stopped by 60 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("60 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/8580453620/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/zvgn/004852/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unittest/testing_out_stuff/chunk7/testing_out_stuff/test_tool.args']' stopped by 60 seconds timeout",), {}) 2025-05-07 09:05:22,275 WARNING library.python.cores: Core dump dir doesn't exist: /coredumps 2025-05-07 09:05:22,275 WARNING library.python.cores: Core dump dir doesn't exist: /var/tmp/cores >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v0-std] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[fifo-tables_format_v1] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v1-fifo] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v1-fifo] >> DataShardVolatile::TwoAppendsMustBeVolatile-UseSink [GOOD] >> DataShardVolatile::VolatileCommitOnBlobStorageFailure+UseSink ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::ClickHouseFilterPushdown 2025-05-07 09:05:20,123 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-05-07 09:05:20,376 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 60 secs timeout. Process tree before termination: pid rss ref pdirt 368924 46.2M 46.2M 23.3M test_tool run_ut @/home/runner/.ya/build/build_root/zvgn/004856/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unittest/testing_out_stuff/chunk0/testing_out_stuff/tes 369771 1.6G 1.6G 1.1G └─ ydb-core-kqp-ut-federated_query-generic_ut --trace-path-append /home/runner/.ya/build/build_root/zvgn/004856/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unit Test command err: Trying to start YDB, gRPC: 13696, MsgBus: 21313 2025-05-07T09:04:25.089597Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501627096235533209:2063];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:04:25.089662Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004856/r3tmp/tmpwJXVYl/pdisk_1.dat 2025-05-07T09:04:25.529794Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:25.568131Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:04:25.568279Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:04:25.571617Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13696, node 1 2025-05-07T09:04:25.791972Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:04:25.791991Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:04:25.791996Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:04:25.792077Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21313 TClient is connected to server localhost:21313 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:04:26.585332Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:04:26.613546Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T09:04:28.251768Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501627109120435763:2331], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:28.251886Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:28.756435Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:2, at schemeshard: 72057594046644480 2025-05-07T09:04:28.884768Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501627109120435884:2344], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:28.884854Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:28.885177Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501627109120435889:2347], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:28.890925Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:2, at schemeshard: 72057594046644480 2025-05-07T09:04:28.902873Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501627109120435891:2348], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-05-07T09:04:29.003220Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501627109120435931:2396] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:04:29.885117Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:04:30.089773Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501627096235533209:2063];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:04:30.089856Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:04:30.398853Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:1, at schemeshard: 72057594046644480 2025-05-07T09:04:30.909408Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480 2025-05-07T09:04:31.450399Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710678:0, at schemeshard: 72057594046644480 2025-05-07T09:04:31.938083Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710683:0, at schemeshard: 72057594046644480 2025-05-07T09:04:32.417711Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480 2025-05-07T09:04:32.504147Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480 2025-05-07T09:04:36.070241Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710721:0, at schemeshard: 72057594046644480 2025-05-07T09:04:36.100187Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710724:0, at schemeshard: 72057594046644480 2025-05-07T09:04:36.110635Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710722:0, at schemeshard: 72057594046644480 2025-05-07T09:04:36.112228Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710723:0, at schemeshard: 72057594046644480 Call DescribeTable. data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "col1" type { type_id: UINT16 } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: ... "qwerty12345" } } use_tls: true protocol: HTTP } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "col1" type { type_id: UINT16 } } columns { name: "col2" type { type_id: DOUBLE } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } what { } from { table: "example_1" } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Expected: splits { select { data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } what { } from { table: "example_1" } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Actual: splits { select { data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } what { } from { table: "example_1" } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL ReadSplits result. GRpcStatusCode: 0 Trying to start YDB, gRPC: 7972, MsgBus: 16933 2025-05-07T09:05:12.709790Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501627300996869838:2061];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:05:12.709843Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004856/r3tmp/tmpfO7Px4/pdisk_1.dat 2025-05-07T09:05:13.006333Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:05:13.050977Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:05:13.051085Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:05:13.053561Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7972, node 4 2025-05-07T09:05:13.174810Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:05:13.174837Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:05:13.174848Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:05:13.175002Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16933 TClient is connected to server localhost:16933 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:05:14.016821Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:05:14.029963Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T09:05:17.714109Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7501627300996869838:2061];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:05:17.714205Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:05:17.961069Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501627322471706978:2333], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:05:17.961222Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:05:17.981718Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:2, at schemeshard: 72057594046644480 2025-05-07T09:05:18.040825Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501627326766674395:2346], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:05:18.040929Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:05:18.041205Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501627326766674400:2349], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:05:18.045759Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:2, at schemeshard: 72057594046644480 2025-05-07T09:05:18.060323Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7501627326766674402:2350], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-05-07T09:05:18.117014Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:7501627326766674443:2399] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:05:18.790517Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:05:19.561388Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:1, at schemeshard: 72057594046644480 Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 764, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: 60 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/8580453620/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/zvgn/004856/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unittest/testing_out_stuff/chunk0/testing_out_stuff/test_tool.args']' stopped by 60 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("60 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/8580453620/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/zvgn/004856/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unittest/testing_out_stuff/chunk0/testing_out_stuff/test_tool.args']' stopped by 60 seconds timeout",), {}) >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v0-std] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_retryable_iam_error[tables_format_v0] |93.0%| [TA] $(B)/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unittest/{meta.json ... results_accumulator.log} |93.0%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unittest/{meta.json ... results_accumulator.log} >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queue_counters_are_in_folder[tables_format_v0] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Uint64-pk_types10-all_types10-index10-Uint64--] [GOOD] >> TPersQueueTest::ReadRuleServiceTypeLimit [GOOD] >> TPersQueueTest::ReadRuleDisallowDefaultServiceType >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Timestamp-pk_types12-all_types12-index12-Timestamp--] [GOOD] >> TPersQueueTest::CheckACLForGrpcRead [GOOD] >> TPersQueueTest::CheckKillBalancer >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Date-pk_types13-all_types13-index13-Date--] [GOOD] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v1-fifo] >> DataShardVolatile::DistributedWriteAsymmetricExecute [GOOD] >> DataShardVolatile::DistributedWriteThenDropTable >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Datetime-pk_types11-all_types11-index11-Datetime--] [GOOD] >> TPersQueueTest::WhenTheTopicIsDeletedAfterDecompressingTheData_Compressed [GOOD] >> TPersQueueTest::WhenTheTopicIsDeletedAfterDecompressingTheData_Uncompressed |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v1-std] [GOOD] >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v0-std] [GOOD] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v0-std] [GOOD] |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_sqs_action_counters [GOOD] >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v0-fifo] |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_counters_when_sending_duplicates [GOOD] >> TFstClassSrcIdPQTest::NoMapping [GOOD] >> TFstClassSrcIdPQTest::ProperPartitionSelected >> TPersQueueTest::SetupWriteSession [GOOD] >> TPersQueueTest::StoreNoMoreThanXSourceIDs >> TopicService::ThereAreGapsInTheOffsetRanges [GOOD] >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v1-fifo] [GOOD] >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v1-std] >> DataShardVolatile::VolatileCommitOnBlobStorageFailure+UseSink [GOOD] >> DataShardVolatile::VolatileCommitOnBlobStorageFailure-UseSink |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[fifo-tables_format_v1] [GOOD] >> TopicService::OnePartitionAndNoGapsInTheOffsets |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_list_queues_for_unknown_cloud[tables_format_v1] [GOOD] |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Uint64-pk_types10-all_types10-index10-Uint64--] [GOOD] |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Timestamp-pk_types12-all_types12-index12-Timestamp--] [GOOD] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v1-fifo] >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_counters_when_reading_from_empty_queue >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_empty_access_key_id[tables_format_v0] |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v0-std] [GOOD] >> BackupRestore::RestoreExternalDataSourceWithoutSecret [GOOD] >> BackupRestore::TestAllIndexTypes-EIndexTypeGlobal >> TPersQueueTest::WhenTheTopicIsDeletedBeforeDataIsDecompressed_Compressed [GOOD] >> TPersQueueTest::WhenTheTopicIsDeletedAfterReadingTheData_Compressed |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v1-std] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v1-fifo] >> DataShardVolatile::DistributedWriteThenDropTable [GOOD] >> DataShardVolatile::DistributedWriteThenCopyTable >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_ymq_send_read_delete [GOOD] >> TPersQueueCommonTest::TestWriteWithRateLimiterWithUserPayloadRateLimit [GOOD] >> TPersQueueCommonTest::TestLimiterLimitsWithBlobsRateLimit >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_count_queues[tables_format_v0] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v0-fifo] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_empty_auth_header >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v1-fifo] |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v0-std] [GOOD] |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Datetime-pk_types11-all_types11-index11-Datetime--] [GOOD] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v0-fifo] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v1-fifo] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v1-std] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[std-tables_format_v0] >> DataShardVolatile::VolatileCommitOnBlobStorageFailure-UseSink [GOOD] >> DataShardVolatile::VolatileTxAbortedOnSplit >> BackupRestore::TestAllIndexTypes-EIndexTypeGlobal [GOOD] >> BackupRestore::PrefixedVectorIndex >> TPersQueueTest::ReadRuleDisallowDefaultServiceType [GOOD] >> TPersQueueTest::ReadRuleServiceTypeMigration >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v0-fifo] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v0-std] |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Date-pk_types13-all_types13-index13-Date--] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v1-fifo] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v1-std] |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[fifo-tables_format_v1] [GOOD] >> ColumnShardTiers::DSConfigsStub [GOOD] >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_purge_queue_counters >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v1-fifo] >> DataShardVolatile::DistributedWriteThenCopyTable [GOOD] >> DataShardVolatile::DistributedWriteThenBulkUpsert ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> ColumnShardTiers::DSConfigsStub [GOOD] Test command err: 2025-05-07T09:03:57.573139Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:03:57.573268Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:03:57.573424Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002325/r3tmp/tmpLAErR2/pdisk_1.dat TServer::EnableGrpc on GrpcPort 28536, node 1 TClient is connected to server localhost:24255 2025-05-07T09:03:58.442754Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:03:58.478651Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:58.487135Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:03:58.487191Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:03:58.487221Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:03:58.487444Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T09:03:58.535938Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:58.536886Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:03:58.550310Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:03:58.682695Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnStore, opId: 281474976715657:0, at schemeshard: 72057594046644480 Status: 53 TxId: 281474976715657 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 2 2025-05-07T09:03:58.807734Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:744:2625];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-05-07T09:03:58.808035Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:744:2625];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-05-07T09:03:58.808294Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:744:2625];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-05-07T09:03:58.808380Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:744:2625];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-05-07T09:03:58.808448Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:744:2625];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-05-07T09:03:58.808544Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:744:2625];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-05-07T09:03:58.808633Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:744:2625];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-05-07T09:03:58.808750Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:744:2625];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-05-07T09:03:58.808836Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:744:2625];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-05-07T09:03:58.808911Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:744:2625];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-05-07T09:03:58.808986Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:744:2625];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-05-07T09:03:58.809062Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:744:2625];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-05-07T09:03:58.828499Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:128;event=start_subscribing_metadata; 2025-05-07T09:03:58.830697Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-05-07T09:03:58.830804Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-05-07T09:03:58.830968Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-05-07T09:03:58.831044Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-05-07T09:03:58.831270Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-05-07T09:03:58.831320Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-05-07T09:03:58.831435Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-05-07T09:03:58.831483Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanInsertionDedup;id=CleanInsertionDedup; 2025-05-07T09:03:58.831624Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanInsertionDedup;id=8; 2025-05-07T09:03:58.831682Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-05-07T09:03:58.831754Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-05-07T09:03:58.831813Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-05-07T09:03:58.832059Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-05-07T09:03:58.832170Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-05-07T09:03:58.832400Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-05-07T09:03:58.832448Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-05-07T09:03:58.832609Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-05-07T09:03:58.832659Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-05-07T09:03:58.832759Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-05-07T09:03:58.832862Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-05-07T09:03:58.832922Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-05-07T09:03:58.834823Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-05-07T09:03:58.834895Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-05-07T09:03:58.859246Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:748:2628];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-05-07T09:03:58.859326Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:748:2628];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-05-07T09:03:58.859491Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:748:2628];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-05-07T09:03:5 ... :secretKey;}; Initialization finished REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier1`;EXPECTATION=0;WAITING=1 2025-05-07T09:05:08.720804Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:3545:4682] txid# 281474976715753, issues: { message: "Other entities depend on this data source, please remove them at the beginning: /Root/olapStore/olapTable" severity: 1 } REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier1`;RESULT=
: Error: Execution, code: 1060
:1:27: Error: Executing DROP OBJECT EXTERNAL_DATA_SOURCE
: Error:
: Error: Other entities depend on this data source, please remove them at the beginning: /Root/olapStore/olapTable, code: 2003 , code: 2003 ;EXPECTATION=0 FINISHED_REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier1`;EXPECTATION=0;WAITING=1 REQUEST=DROP TABLE `/Root/olapStore/olapTable`;EXPECTATION=1;WAITING=1 2025-05-07T09:05:20.721156Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropTable, opId: 281474976715764:0, at schemeshard: 72057594046644480 2025-05-07T09:05:22.030863Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715764;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715764; 2025-05-07T09:05:22.031242Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715764;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715764; 2025-05-07T09:05:22.031791Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715764;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715764; REQUEST=DROP TABLE `/Root/olapStore/olapTable`;RESULT=
: Info: Execution, code: 1060
:1:12: Info: Executing DROP TABLE
: Info: Success, code: 4 ;EXPECTATION=1 FINISHED_REQUEST=DROP TABLE `/Root/olapStore/olapTable`;EXPECTATION=1;WAITING=1 REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier1`;EXPECTATION=1;WAITING=1 2025-05-07T09:05:32.551617Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-05-07T09:05:32.551713Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-05-07T09:05:32.551753Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-05-07T09:05:32.552088Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-05-07T09:05:32.552596Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-05-07T09:05:32.552662Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=0;has_config=0; 2025-05-07T09:05:32.552724Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 0 2025-05-07T09:05:32.552786Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:142 :Restarting tier '/Root/tier2' at tablet 0 2025-05-07T09:05:32.552837Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 0 2025-05-07T09:05:32.552913Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier2' started at tablet 0 2025-05-07T09:05:32.552987Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier2;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-05-07T09:05:32.553528Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-05-07T09:05:32.553579Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=72075186224037888;has_config=0; 2025-05-07T09:05:32.553617Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037888 2025-05-07T09:05:32.553655Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:142 :Restarting tier '/Root/tier2' at tablet 72075186224037888 2025-05-07T09:05:32.553688Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037888 2025-05-07T09:05:32.553736Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier2' started at tablet 72075186224037888 2025-05-07T09:05:32.553788Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier2;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-05-07T09:05:32.553827Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-05-07T09:05:32.553855Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=72075186224037889;has_config=0; 2025-05-07T09:05:32.553886Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037889 2025-05-07T09:05:32.553917Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:142 :Restarting tier '/Root/tier2' at tablet 72075186224037889 2025-05-07T09:05:32.553943Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037889 2025-05-07T09:05:32.554003Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier2' started at tablet 72075186224037889 2025-05-07T09:05:32.554045Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier2;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-05-07T09:05:32.554084Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-05-07T09:05:32.554114Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=72075186224037890;has_config=0; 2025-05-07T09:05:32.554148Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037890 2025-05-07T09:05:32.554183Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:142 :Restarting tier '/Root/tier2' at tablet 72075186224037890 2025-05-07T09:05:32.554211Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037890 2025-05-07T09:05:32.554248Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier2' started at tablet 72075186224037890 2025-05-07T09:05:32.554289Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier2;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-05-07T09:05:32.555207Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037888;self_id=[1:744:2625];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:242;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=1; 2025-05-07T09:05:32.555327Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037889;self_id=[1:748:2628];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:242;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=1; 2025-05-07T09:05:32.555495Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037890;self_id=[1:761:2635];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:242;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=1; REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier1`;RESULT=;EXPECTATION=1 FINISHED_REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier1`;EXPECTATION=1;WAITING=1 REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier2`;EXPECTATION=1;WAITING=1 2025-05-07T09:05:44.091719Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier2; 2025-05-07T09:05:44.092315Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier2; 2025-05-07T09:05:44.092374Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier2; 2025-05-07T09:05:44.092415Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier2; 2025-05-07T09:05:44.093079Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier2; 2025-05-07T09:05:44.093144Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier2;tablet=72075186224037888;has_config=0; 2025-05-07T09:05:44.093203Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037888 2025-05-07T09:05:44.093298Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-05-07T09:05:44.093345Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier2; 2025-05-07T09:05:44.093366Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier2;tablet=72075186224037889;has_config=0; 2025-05-07T09:05:44.093390Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037889 2025-05-07T09:05:44.093421Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-05-07T09:05:44.093555Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier2; 2025-05-07T09:05:44.093577Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier2;tablet=72075186224037890;has_config=0; 2025-05-07T09:05:44.093600Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037890 2025-05-07T09:05:44.093650Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-05-07T09:05:44.094195Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier2; 2025-05-07T09:05:44.094245Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier2;tablet=0;has_config=0; 2025-05-07T09:05:44.094287Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 0 2025-05-07T09:05:44.094359Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-05-07T09:05:44.095013Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037888;self_id=[1:744:2625];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:242;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=0; 2025-05-07T09:05:44.095122Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037889;self_id=[1:748:2628];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:242;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=0; 2025-05-07T09:05:44.095183Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037890;self_id=[1:761:2635];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:242;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=0; REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier2`;RESULT=;EXPECTATION=1 FINISHED_REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier2`;EXPECTATION=1;WAITING=1 >> TFstClassSrcIdPQTest::ProperPartitionSelected [GOOD] >> TPQCompatTest::DiscoverTopics |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v0-std] [GOOD] |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v0-std] [GOOD] >> TopicService::OnePartitionAndNoGapsInTheOffsets [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v1-std] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v1-fifo] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v1-std] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queue_counters_are_in_folder[tables_format_v0] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queue_counters_are_in_folder[tables_format_v1] >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v1-std] [GOOD] >> TPersQueueTest::WhenTheTopicIsDeletedAfterDecompressingTheData_Uncompressed [GOOD] >> TPersQueueTest::TestWriteStat >> TopicService::MultiplePartitionsAndNoGapsInTheOffsets >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v0-fifo] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queue_counters_are_in_folder[tables_format_v1] [GOOD] >> DataShardVolatile::VolatileTxAbortedOnSplit [GOOD] >> DataShardVolatile::VolatileTxAbortedOnDrop >> BackupRestore::PrefixedVectorIndex [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_yc_events_processor[tables_format_v0] |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v0-std] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/backup_ut/unittest >> BackupRestore::PrefixedVectorIndex [GOOD] Test command err: 2025-05-07T09:03:34.380607Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626879927061621:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:34.380669Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0020ae/r3tmp/tmpP2goGG/pdisk_1.dat 2025-05-07T09:03:35.479799Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T09:03:35.699786Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:35.716091Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:35.726164Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:03:35.738290Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26330, node 1 2025-05-07T09:03:37.041982Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:03:37.042002Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:03:37.042009Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:03:37.042136Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2123 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:03:39.113187Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:03:39.317596Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626901401899198:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:39.317678Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:39.381003Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501626879927061621:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:39.381062Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:03:39.851987Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 Backup "/Root" to "/home/runner/.ya/build/build_root/zvgn/0020ae/r3tmp/tmpzhRPuf/"Create temporary directory "/Root/~backup_20250507T090340" in databaseProcess "/home/runner/.ya/build/build_root/zvgn/0020ae/r3tmp/tmpzhRPuf/table"Copy tables: { src: "/Root/table", dst: "/Root/~backup_20250507T090340/table" }Backup table "/Root/~backup_20250507T090340/table" to "/home/runner/.ya/build/build_root/zvgn/0020ae/r3tmp/tmpzhRPuf/table"Describe table "/Root/~backup_20250507T090340/table"Write scheme into "/home/runner/.ya/build/build_root/zvgn/0020ae/r3tmp/tmpzhRPuf/table/scheme.pb"Describe table "/Root/table"Write ACL into "/home/runner/.ya/build/build_root/zvgn/0020ae/r3tmp/tmpzhRPuf/table/permissions.pb"Read table "/Root/~backup_20250507T090340/table"Write data into "/home/runner/.ya/build/build_root/zvgn/0020ae/r3tmp/tmpzhRPuf/table/data_00.csv"Drop table "/Root/~backup_20250507T090340/table"2025-05-07T09:03:40.370810Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037889 not found Remove temporary directory "/Root/~backup_20250507T090340" in database2025-05-07T09:03:40.396242Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 281474976710664:0, at schemeshard: 72057594046644480 Backup completed successfully2025-05-07T09:03:40.419767Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626905696867097:2391], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:40.419851Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } Restore "/home/runner/.ya/build/build_root/zvgn/0020ae/r3tmp/tmpzhRPuf/" to "/Root"2025-05-07T09:03:40.504355Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found Resolved db base path: "/Root"Restore folder "/home/runner/.ya/build/build_root/zvgn/0020ae/r3tmp/tmpzhRPuf/" to "/Root"Process "/home/runner/.ya/build/build_root/zvgn/0020ae/r3tmp/tmpzhRPuf/table"Read scheme from "/home/runner/.ya/build/build_root/zvgn/0020ae/r3tmp/tmpzhRPuf/table/scheme.pb"Restore table "/home/runner/.ya/build/build_root/zvgn/0020ae/r3tmp/tmpzhRPuf/table" to "/Root/table"2025-05-07T09:03:40.556733Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 Created "/Root/table"Read data from "/home/runner/.ya/build/build_root/zvgn/0020ae/r3tmp/tmpzhRPuf/table/data_00.csv"Restore ACL "/home/runner/.ya/build/build_root/zvgn/0020ae/r3tmp/tmpzhRPuf/table" to "/Root/table"Read ACL from "/home/runner/.ya/build/build_root/zvgn/0020ae/r3tmp/tmpzhRPuf/table/permissions.pb"2025-05-07T09:03:40.645926Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710667:0, at schemeshard: 72057594046644480 Restore completed successfully 2025-05-07T09:03:41.993177Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501626907513641378:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:41.993254Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0020ae/r3tmp/tmpf4ssrI/pdisk_1.dat 2025-05-07T09:03:42.072807Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:42.094511Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:42.094577Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:03:42.096745Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25193, node 4 2025-05-07T09:03:42.132786Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:03:42.132812Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:03:42.132825Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:03:42.132941Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26795 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:03:42.286687Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:03:44.220306Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501626920398544271:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:44.220382Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:44.237612Z node 4 :FLAT_TX_SCHEMESHARD WAR ... tsActor;event=timeout;self_id=[4:7501627403624115455:2076];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:05:41.591048Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:05:41.606558Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480 Restore ACL "/home/runner/.ya/build/build_root/zvgn/0020ae/r3tmp/tmpSOeTM1/table" to "/Root/table"Read ACL from "/home/runner/.ya/build/build_root/zvgn/0020ae/r3tmp/tmpSOeTM1/table/permissions.pb"2025-05-07T09:05:41.843182Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715669:0, at schemeshard: 72057594046644480 Restore completed successfully 2025-05-07T09:05:43.899196Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7501627431122296432:2078];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:05:43.899262Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0020ae/r3tmp/tmpKJgYOf/pdisk_1.dat 2025-05-07T09:05:44.245327Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:05:44.311920Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:05:44.312012Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:05:44.315688Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8565, node 7 2025-05-07T09:05:44.541879Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:05:44.541936Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:05:44.541945Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:05:44.542120Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9373 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:05:44.948324Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:05:48.771810Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7501627452597133998:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:05:48.771921Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:05:48.812713Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-05-07T09:05:48.935310Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7501627431122296432:2078];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:05:48.935444Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Backup "/Root" to "/home/runner/.ya/build/build_root/zvgn/0020ae/r3tmp/tmpPMaADO/"Create temporary directory "/Root/~backup_20250507T090549" in databaseProcess "/home/runner/.ya/build/build_root/zvgn/0020ae/r3tmp/tmpPMaADO/table"Copy tables: { src: "/Root/table", dst: "/Root/~backup_20250507T090549/table" }Backup table "/Root/~backup_20250507T090549/table" to "/home/runner/.ya/build/build_root/zvgn/0020ae/r3tmp/tmpPMaADO/table"Describe table "/Root/~backup_20250507T090549/table"Write scheme into "/home/runner/.ya/build/build_root/zvgn/0020ae/r3tmp/tmpPMaADO/table/scheme.pb"Describe table "/Root/table"Write ACL into "/home/runner/.ya/build/build_root/zvgn/0020ae/r3tmp/tmpPMaADO/table/permissions.pb"Read table "/Root/~backup_20250507T090549/table"Write data into "/home/runner/.ya/build/build_root/zvgn/0020ae/r3tmp/tmpPMaADO/table/data_00.csv"Drop table "/Root/~backup_20250507T090549/table"Remove temporary directory "/Root/~backup_20250507T090549" in database2025-05-07T09:05:49.748127Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037892 not found 2025-05-07T09:05:49.748167Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037893 not found 2025-05-07T09:05:49.748187Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037894 not found 2025-05-07T09:05:49.764404Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-05-07T09:05:49.772781Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037895 not found Backup completed successfully2025-05-07T09:05:49.792662Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7501627456892102596:2414], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:05:49.792780Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } Restore "/home/runner/.ya/build/build_root/zvgn/0020ae/r3tmp/tmpPMaADO/" to "/Root"2025-05-07T09:05:49.976227Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037890 not found 2025-05-07T09:05:49.976265Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037889 not found Resolved db base path: "/Root"2025-05-07T09:05:49.982492Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037888 not found 2025-05-07T09:05:49.989598Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037891 not found Restore folder "/home/runner/.ya/build/build_root/zvgn/0020ae/r3tmp/tmpPMaADO/" to "/Root"Process "/home/runner/.ya/build/build_root/zvgn/0020ae/r3tmp/tmpPMaADO/table"Read scheme from "/home/runner/.ya/build/build_root/zvgn/0020ae/r3tmp/tmpPMaADO/table/scheme.pb"Restore table "/home/runner/.ya/build/build_root/zvgn/0020ae/r3tmp/tmpPMaADO/table" to "/Root/table"2025-05-07T09:05:50.022642Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 Created "/Root/table"Read data from "/home/runner/.ya/build/build_root/zvgn/0020ae/r3tmp/tmpPMaADO/table/data_00.csv"Restore index "byValue" on "/Root/table"2025-05-07T09:05:50.192858Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480 2025-05-07T09:05:50.355871Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710759:0, at schemeshard: 72057594046644480 2025-05-07T09:05:50.467883Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710760:0, at schemeshard: 72057594046644480 2025-05-07T09:05:50.649473Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710763:0, at schemeshard: 72057594046644480 2025-05-07T09:05:50.658433Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037900 not found 2025-05-07T09:05:50.736274Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710765:0, at schemeshard: 72057594046644480 2025-05-07T09:05:50.831558Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037901 not found 2025-05-07T09:05:50.834043Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037902 not found Restore ACL "/home/runner/.ya/build/build_root/zvgn/0020ae/r3tmp/tmpPMaADO/table" to "/Root/table"Read ACL from "/home/runner/.ya/build/build_root/zvgn/0020ae/r3tmp/tmpPMaADO/table/permissions.pb"2025-05-07T09:05:50.976256Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715669:0, at schemeshard: 72057594046644480 Restore completed successfully >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v1-std] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v1-std] [GOOD] |93.1%| [TA] $(B)/ydb/services/ydb/backup_ut/test-results/unittest/{meta.json ... results_accumulator.log} |93.1%| [TA] {RESULT} $(B)/ydb/services/ydb/backup_ut/test-results/unittest/{meta.json ... results_accumulator.log} >> data_migration_when_alter_ttl.py::TestDataMigrationWhenAlterTtl::test >> tier_delete.py::TestTierDelete::test_delete_s3_ttl >> data_correctness.py::TestDataCorrectness::test >> TPersQueueTest::CheckKillBalancer [GOOD] >> TPersQueueTest::CheckDeleteTopic |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_ymq_send_read_delete [GOOD] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v1-fifo] [GOOD] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v1-std] >> DataShardVolatile::DistributedWriteThenBulkUpsert [GOOD] >> DataShardVolatile::DistributedWriteThenBulkUpsertWithCdc >> ColumnShardTiers::DSConfigs [GOOD] >> ColumnShardTiers::DSConfigsWithQueryServiceDdl [GOOD] >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[ALTER TABLE {} DROP COLUMN syntax-`.metadata/script_executions`] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> ColumnShardTiers::DSConfigs [GOOD] Test command err: 2025-05-07T09:03:58.000300Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:03:58.000418Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:03:58.000569Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0022ff/r3tmp/tmpBV3N8o/pdisk_1.dat TServer::EnableGrpc on GrpcPort 7895, node 1 TClient is connected to server localhost:30398 2025-05-07T09:03:58.469020Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:03:58.500810Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:58.505734Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:03:58.505808Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:03:58.505850Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:03:58.506129Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T09:03:58.551426Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:58.551557Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:03:58.562777Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Initialization finished REQUEST= UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`); UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`); ;EXPECTATION=1;WAITING=1 2025-05-07T09:04:10.564382Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:753:2631], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:10.564520Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:10.567286Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:2, at schemeshard: 72057594046644480 2025-05-07T09:04:10.713513Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:870:2709], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:10.713627Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:10.713888Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:875:2714], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:10.717862Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:2, at schemeshard: 72057594046644480 2025-05-07T09:04:10.848402Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:877:2716], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T09:04:11.133112Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:971:2781] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:04:11.645750Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-05-07T09:04:12.071937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:1, at schemeshard: 72057594046644480 2025-05-07T09:04:12.796664Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480 2025-05-07T09:04:13.518810Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715673:0, at schemeshard: 72057594046644480 2025-05-07T09:04:13.967160Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715676:0, at schemeshard: 72057594046644480 2025-05-07T09:04:15.115725Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480 2025-05-07T09:04:15.409080Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480 REQUEST= UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`); UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`); ;RESULT=;EXPECTATION=1 FINISHED_REQUEST= UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`); UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`); ;EXPECTATION=1;WAITING=1 REQUEST= CREATE EXTERNAL DATA SOURCE `/Root/tier1` WITH ( SOURCE_TYPE="ObjectStorage", LOCATION="http://fake.fake/abc1", AUTH_METHOD="AWS", AWS_ACCESS_KEY_ID_SECRET_NAME="accessKey", AWS_SECRET_ACCESS_KEY_SECRET_NAME="secretKey", AWS_REGION="ru-central1" ); ;EXPECTATION=1;WAITING=1 2025-05-07T09:04:30.082670Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715702:0, at schemeshard: 72057594046644480 REQUEST= CREATE EXTERNAL DATA SOURCE `/Root/tier1` WITH ( SOURCE_TYPE="ObjectStorage", LOCATION="http://fake.fake/abc1", AUTH_METHOD="AWS", AWS_ACCESS_KEY_ID_SECRET_NAME="accessKey", AWS_SECRET_ACCESS_KEY_SECRET_NAME="secretKey", AWS_REGION="ru-central1" ); ;RESULT=;EXPECTATION=1 2025-05-07T09:04:30.603287Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7261: Cannot get console configs 2025-05-07T09:04:30.603374Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded FINISHED_REQUEST= CREATE EXTERNAL DATA SOURCE `/Root/tier1` WITH ( SOURCE_TYPE="ObjectStorage", LOCATION="http://fake.fake/abc1", AUTH_METHOD="AWS", AWS_ACCESS_KEY_ID_SECRET_NAME="accessKey", AWS_SECRET_ACCESS_KEY_SECRET_NAME="secretKey", AWS_REGION="ru-central1" ); ;EXPECTATION=1;WAITING=1 2025-05-07T09:04:32.180763Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:215;event=skip_tier_manager_start;tier=/Root/tier1;has_secrets=1;tier_config=0; 2025-05-07T09:04:32.180855Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:196;event=skip_tier_manager_reloading;tier=/Root/tier1;has_secrets=1;found_tier_config=1; 2025-05-07T09:04:32.180913Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier1;has_config=0};SECRETS={}; 2025-05-07T09:04:32.181007Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:128;event=start_subscribing_metadata; 2025-05-07T09:04:32.181150Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:154;event=watch_scheme_objects;names=/Root/tier1; 2025-05-07T09:04:32.181459Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:62;event=TEvRefreshSubscriberData;snapshot=secrets; 2025-05-07T09:04:32.181508Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:271;event=update_secrets;tablet=0; 2025-05-07T09:04:32.181558Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:196;event=skip_tier_manager_reloading;tier=/Root/tier1;has_secrets=1;found_tier_config=1; 2025-05-07T09:04:32.181631Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier1;has_config=0};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-05-07T09:04:32.183224Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:111;component=TSchemeObjectWatcher;event=NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult;path=Root/tier1; 2025-05-07T09:04:32.184521Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:140;event=object_fetched;path=/Root/tier1; 2025-05-07T09:04:32.184694Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:75;component=tiering_manager;event=object_updated;path=/Root/tier1; 2025-05-07T09:04:32.184802Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=0;has_config=1; 2025-05-07T09:04:32.184903Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier1' started at tablet 0 2025-05-07T09:04:32.184971Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier1;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; REQUEST= CREATE EXTERNAL DATA SOURCE `/Root/tier2` WITH ( SOURCE_TYPE="ObjectStorage", LOCATION="http://fake.fake/abc2", AUTH_METHOD="AWS", AWS_ACCESS_KEY_ID_SECRET_NAME="accessKey", AWS_SECRET_ACCESS_KEY_SECRET_NAME="secretKey", AWS_REGION="ru-central1" ); ;EXPECTATION=1;WAITING=1 2025-05-07T09:04:43.703 ... ect_deleted;path=/Root/tier2; 2025-05-07T09:05:45.907398Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier2; 2025-05-07T09:05:45.907576Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier2; 2025-05-07T09:05:45.907620Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier2;tablet=0;has_config=0; 2025-05-07T09:05:45.907684Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 0 2025-05-07T09:05:45.907745Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:142 :Restarting tier '/Root/tier1' at tablet 0 2025-05-07T09:05:45.907792Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 0 2025-05-07T09:05:45.907867Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier1' started at tablet 0 2025-05-07T09:05:45.907941Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier1;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-05-07T09:05:45.908112Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier2; 2025-05-07T09:05:45.908144Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier2;tablet=72075186224037892;has_config=0; 2025-05-07T09:05:45.908180Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037892 2025-05-07T09:05:45.908219Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:142 :Restarting tier '/Root/tier1' at tablet 72075186224037892 2025-05-07T09:05:45.908249Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037892 2025-05-07T09:05:45.908289Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier1' started at tablet 72075186224037892 2025-05-07T09:05:45.908337Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier1;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-05-07T09:05:45.908375Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier2; 2025-05-07T09:05:45.908402Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier2;tablet=72075186224037893;has_config=0; 2025-05-07T09:05:45.908435Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037893 2025-05-07T09:05:45.908466Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:142 :Restarting tier '/Root/tier1' at tablet 72075186224037893 2025-05-07T09:05:45.908493Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037893 2025-05-07T09:05:45.908528Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier1' started at tablet 72075186224037893 2025-05-07T09:05:45.908571Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier1;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-05-07T09:05:45.908606Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier2; 2025-05-07T09:05:45.908635Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier2;tablet=72075186224037894;has_config=0; 2025-05-07T09:05:45.908670Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037894 2025-05-07T09:05:45.908699Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:142 :Restarting tier '/Root/tier1' at tablet 72075186224037894 2025-05-07T09:05:45.908745Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037894 2025-05-07T09:05:45.908780Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier1' started at tablet 72075186224037894 2025-05-07T09:05:45.908816Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier1;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-05-07T09:05:45.908861Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier2; 2025-05-07T09:05:45.908881Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier2;tablet=0;has_config=0; 2025-05-07T09:05:45.908908Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 0 2025-05-07T09:05:45.908936Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:142 :Restarting tier '/Root/tier1' at tablet 0 2025-05-07T09:05:45.908960Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 0 2025-05-07T09:05:45.908996Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier1' started at tablet 0 2025-05-07T09:05:45.909036Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier1;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-05-07T09:05:45.909481Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037892;self_id=[1:2978:4257];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:242;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=1; 2025-05-07T09:05:45.909610Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037893;self_id=[1:2986:4260];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:242;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=1; 2025-05-07T09:05:45.909679Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037894;self_id=[1:2989:4263];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:242;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=1; REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier2`;RESULT=;EXPECTATION=1 FINISHED_REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier2`;EXPECTATION=1;WAITING=1 REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier1`;EXPECTATION=1;WAITING=1 2025-05-07T09:05:57.245363Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-05-07T09:05:57.245459Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-05-07T09:05:57.245495Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-05-07T09:05:57.245777Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-05-07T09:05:57.245855Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=72075186224037892;has_config=0; 2025-05-07T09:05:57.245918Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037892 2025-05-07T09:05:57.246186Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-05-07T09:05:57.246247Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-05-07T09:05:57.246298Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=72075186224037893;has_config=0; 2025-05-07T09:05:57.246336Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037893 2025-05-07T09:05:57.246391Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-05-07T09:05:57.246489Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-05-07T09:05:57.246528Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=72075186224037894;has_config=0; 2025-05-07T09:05:57.246558Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037894 2025-05-07T09:05:57.246601Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-05-07T09:05:57.246678Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-05-07T09:05:57.246882Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-05-07T09:05:57.247509Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-05-07T09:05:57.247644Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-05-07T09:05:57.247681Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=0;has_config=0; 2025-05-07T09:05:57.247715Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 0 2025-05-07T09:05:57.247765Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-05-07T09:05:57.248034Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-05-07T09:05:57.248066Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=0;has_config=0; 2025-05-07T09:05:57.248098Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 0 2025-05-07T09:05:57.248141Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-05-07T09:05:57.248784Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-05-07T09:05:57.248825Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=0;has_config=0; 2025-05-07T09:05:57.248858Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 0 2025-05-07T09:05:57.248910Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-05-07T09:05:57.249213Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037892;self_id=[1:2978:4257];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:242;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=0; 2025-05-07T09:05:57.249301Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037893;self_id=[1:2986:4260];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:242;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=0; 2025-05-07T09:05:57.249359Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037894;self_id=[1:2989:4263];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:242;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=0; REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier1`;RESULT=;EXPECTATION=1 FINISHED_REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier1`;EXPECTATION=1;WAITING=1 >> TPersQueueTest::WhenTheTopicIsDeletedAfterReadingTheData_Compressed [GOOD] >> TPersQueueTest::WhenTheTopicIsDeletedBeforeDataIsDecompressed_Uncompressed |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/script_execution/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> ColumnShardTiers::DSConfigsWithQueryServiceDdl [GOOD] Test command err: 2025-05-07T09:03:57.471351Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:03:57.471489Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:03:57.471720Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0025aa/r3tmp/tmp9iQat2/pdisk_1.dat TServer::EnableGrpc on GrpcPort 1698, node 1 TClient is connected to server localhost:27109 2025-05-07T09:03:58.436343Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:03:58.478678Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:58.487120Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:03:58.487184Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:03:58.487217Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:03:58.487468Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T09:03:58.535938Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:58.536834Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:03:58.550296Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Initialization finished REQUEST= UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`); UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`); ;EXPECTATION=1;WAITING=1 2025-05-07T09:04:10.296538Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:745:2625], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:10.296642Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:756:2630], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:10.297067Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:10.301153Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715657:3, at schemeshard: 72057594046644480 2025-05-07T09:04:10.316839Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:759:2633], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715657 completed, doublechecking } 2025-05-07T09:04:10.365226Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:810:2665] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:04:10.625432Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:1, at schemeshard: 72057594046644480 2025-05-07T09:04:11.497904Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-05-07T09:04:11.881376Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:1, at schemeshard: 72057594046644480 2025-05-07T09:04:12.760373Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480 2025-05-07T09:04:13.541902Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715673:0, at schemeshard: 72057594046644480 2025-05-07T09:04:14.048098Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715676:0, at schemeshard: 72057594046644480 2025-05-07T09:04:15.086178Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480 2025-05-07T09:04:15.407490Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480 REQUEST= UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`); UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`); ;RESULT=;EXPECTATION=1 FINISHED_REQUEST= UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`); UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`); ;EXPECTATION=1;WAITING=1 REQUEST= CREATE EXTERNAL DATA SOURCE `/Root/tier1` WITH ( SOURCE_TYPE="ObjectStorage", LOCATION="http://fake.fake/abc1", AUTH_METHOD="AWS", AWS_ACCESS_KEY_ID_SECRET_NAME="accessKey", AWS_SECRET_ACCESS_KEY_SECRET_NAME="secretKey", AWS_REGION="ru-central1" ); ;EXPECTATION=1;WAITING=1 2025-05-07T09:04:30.115047Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715702:0, at schemeshard: 72057594046644480 REQUEST= CREATE EXTERNAL DATA SOURCE `/Root/tier1` WITH ( SOURCE_TYPE="ObjectStorage", LOCATION="http://fake.fake/abc1", AUTH_METHOD="AWS", AWS_ACCESS_KEY_ID_SECRET_NAME="accessKey", AWS_SECRET_ACCESS_KEY_SECRET_NAME="secretKey", AWS_REGION="ru-central1" ); ;RESULT=;EXPECTATION=1 2025-05-07T09:04:32.029838Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7261: Cannot get console configs 2025-05-07T09:04:32.029912Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded FINISHED_REQUEST= CREATE EXTERNAL DATA SOURCE `/Root/tier1` WITH ( SOURCE_TYPE="ObjectStorage", LOCATION="http://fake.fake/abc1", AUTH_METHOD="AWS", AWS_ACCESS_KEY_ID_SECRET_NAME="accessKey", AWS_SECRET_ACCESS_KEY_SECRET_NAME="secretKey", AWS_REGION="ru-central1" ); ;EXPECTATION=1;WAITING=1 2025-05-07T09:04:32.386940Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:215;event=skip_tier_manager_start;tier=/Root/tier1;has_secrets=1;tier_config=0; 2025-05-07T09:04:32.387030Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:196;event=skip_tier_manager_reloading;tier=/Root/tier1;has_secrets=1;found_tier_config=1; 2025-05-07T09:04:32.387092Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier1;has_config=0};SECRETS={}; 2025-05-07T09:04:32.387170Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:128;event=start_subscribing_metadata; 2025-05-07T09:04:32.387322Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:154;event=watch_scheme_objects;names=/Root/tier1; 2025-05-07T09:04:32.387647Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:62;event=TEvRefreshSubscriberData;snapshot=secrets; 2025-05-07T09:04:32.387709Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:271;event=update_secrets;tablet=0; 2025-05-07T09:04:32.387765Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:196;event=skip_tier_manager_reloading;tier=/Root/tier1;has_secrets=1;found_tier_config=1; 2025-05-07T09:04:32.387842Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier1;has_config=0};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-05-07T09:04:32.389474Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:111;component=TSchemeObjectWatcher;event=NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult;path=Root/tier1; 2025-05-07T09:04:32.393952Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:140;event=object_fetched;path=/Root/tier1; 2025-05-07T09:04:32.394184Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:75;component=tiering_manager;event=object_updated;path=/Root/tier1; 2025-05-07T09:04:32.394312Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=0;has_config=1; 2025-05-07T09:04:32.394427Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier1' started at tablet 0 2025-05-07T09:04:32.394524Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier1;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; REQUEST= CREATE EXTERNAL DATA SOURCE `/Root/tier2` WITH ( SOURCE_TYPE="ObjectStorage", LOCATION="http://fake.fake/abc2", AUTH_METHOD="AWS", AWS_ACCESS_KEY_ID_SECRET_NAME="accessKey", AWS_SECRET_ACCESS_KEY_SECRET_NAME="secretKey", AWS_REGION="ru-central1" ); ;EXPECTATION=1;WAITING=1 2025-05-07T09:04:43.967800Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715717:0, at schemeshard: 72057594046644480 REQUEST= CREATE EXTERNAL DATA SOURCE `/Root/tier2` WITH ( SOURCE_TYPE="ObjectStorage", LOCATION="http://fake.fake/abc2", AUTH_METHOD="AWS", AWS_ACCESS_KEY_ID_SECRET_NAME="accessKey", AWS_SECRET_ACCESS_KEY_SECRET_NAME="secretKey", AWS_REGION="ru-c ... ect_deleted;path=/Root/tier2; 2025-05-07T09:05:46.460900Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier2; 2025-05-07T09:05:46.460957Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier2;tablet=72075186224037892;has_config=0; 2025-05-07T09:05:46.461011Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037892 2025-05-07T09:05:46.461073Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:142 :Restarting tier '/Root/tier1' at tablet 72075186224037892 2025-05-07T09:05:46.461122Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037892 2025-05-07T09:05:46.461194Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier1' started at tablet 72075186224037892 2025-05-07T09:05:46.461270Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier1;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-05-07T09:05:46.461319Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier2; 2025-05-07T09:05:46.461347Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier2;tablet=72075186224037893;has_config=0; 2025-05-07T09:05:46.461377Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037893 2025-05-07T09:05:46.461406Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:142 :Restarting tier '/Root/tier1' at tablet 72075186224037893 2025-05-07T09:05:46.461430Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037893 2025-05-07T09:05:46.461465Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier1' started at tablet 72075186224037893 2025-05-07T09:05:46.461500Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier1;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-05-07T09:05:46.461533Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier2; 2025-05-07T09:05:46.461559Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier2;tablet=72075186224037894;has_config=0; 2025-05-07T09:05:46.461588Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037894 2025-05-07T09:05:46.461614Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:142 :Restarting tier '/Root/tier1' at tablet 72075186224037894 2025-05-07T09:05:46.461637Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037894 2025-05-07T09:05:46.461667Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier1' started at tablet 72075186224037894 2025-05-07T09:05:46.461703Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier1;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-05-07T09:05:46.461762Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier2; 2025-05-07T09:05:46.461787Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier2;tablet=0;has_config=0; 2025-05-07T09:05:46.461815Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 0 2025-05-07T09:05:46.461846Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:142 :Restarting tier '/Root/tier1' at tablet 0 2025-05-07T09:05:46.461870Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 0 2025-05-07T09:05:46.461902Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier1' started at tablet 0 2025-05-07T09:05:46.461938Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier1;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-05-07T09:05:46.463115Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier2; 2025-05-07T09:05:46.464102Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier2; 2025-05-07T09:05:46.464148Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier2;tablet=0;has_config=0; 2025-05-07T09:05:46.464188Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 0 2025-05-07T09:05:46.464225Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:142 :Restarting tier '/Root/tier1' at tablet 0 2025-05-07T09:05:46.464253Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 0 2025-05-07T09:05:46.464303Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier1' started at tablet 0 2025-05-07T09:05:46.464345Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier1;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-05-07T09:05:46.464810Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037892;self_id=[1:3007:4280];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:242;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=1; 2025-05-07T09:05:46.464908Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037893;self_id=[1:3016:4283];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:242;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=1; 2025-05-07T09:05:46.464976Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037894;self_id=[1:3020:4286];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:242;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=1; REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier2`;RESULT=;EXPECTATION=1 FINISHED_REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier2`;EXPECTATION=1;WAITING=1 REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier1`;EXPECTATION=1;WAITING=1 2025-05-07T09:05:57.914771Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-05-07T09:05:57.914857Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-05-07T09:05:57.914896Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-05-07T09:05:57.914957Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-05-07T09:05:57.915283Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-05-07T09:05:57.915366Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-05-07T09:05:57.915417Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=72075186224037892;has_config=0; 2025-05-07T09:05:57.915475Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037892 2025-05-07T09:05:57.915568Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-05-07T09:05:57.915618Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-05-07T09:05:57.915648Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=72075186224037893;has_config=0; 2025-05-07T09:05:57.915676Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037893 2025-05-07T09:05:57.915716Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-05-07T09:05:57.915748Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-05-07T09:05:57.915773Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=72075186224037894;has_config=0; 2025-05-07T09:05:57.915799Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037894 2025-05-07T09:05:57.915839Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-05-07T09:05:57.915972Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-05-07T09:05:57.916002Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=0;has_config=0; 2025-05-07T09:05:57.916033Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 0 2025-05-07T09:05:57.916077Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-05-07T09:05:57.916140Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-05-07T09:05:57.916408Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-05-07T09:05:57.916439Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=0;has_config=0; 2025-05-07T09:05:57.916468Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 0 2025-05-07T09:05:57.916514Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-05-07T09:05:57.916684Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037892;self_id=[1:3007:4280];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:242;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=0; 2025-05-07T09:05:57.916762Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037893;self_id=[1:3016:4283];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:242;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=0; 2025-05-07T09:05:57.916815Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037894;self_id=[1:3020:4286];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:242;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=0; 2025-05-07T09:05:57.916928Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-05-07T09:05:57.916953Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=0;has_config=0; 2025-05-07T09:05:57.916982Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 0 2025-05-07T09:05:57.917026Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier1`;RESULT=;EXPECTATION=1 FINISHED_REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier1`;EXPECTATION=1;WAITING=1 >> DataShardVolatile::VolatileTxAbortedOnDrop [GOOD] >> DataShardVolatile::UpsertNoLocksArbiter+UseSink >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_counters_when_reading_from_empty_queue [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_empty_access_key_id[tables_format_v0] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_empty_access_key_id[tables_format_v1] >> TPersQueueCommonTest::TestLimiterLimitsWithBlobsRateLimit [GOOD] >> TPersQueueCommonTest::TestLimiterLimitsWithUserPayloadRateLimit |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/script_execution/py3test >> TopicService::MultiplePartitionsAndNoGapsInTheOffsets [GOOD] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v1-std] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_empty_auth_header [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_empty_access_key_id[tables_format_v1] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_fifo_groups_with_dlq_in_cloud[tables_format_v0] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_1__SYNC-pk_types3-all_types3-index3---SYNC] [GOOD] >> TPersQueueTest::ReadRuleServiceTypeMigration [GOOD] >> TPersQueueTest::ReadRuleServiceTypeMigrationWithDisallowDefault >> TTopicYqlTest::DropTopicYql |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v1-std] [GOOD] |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/script_execution/py3test >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v1-fifo] [GOOD] >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v1-std] |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v1-std] [GOOD] >> ttl_delete_s3.py::TestDeleteS3Ttl::test_data_unchanged_after_ttl_change >> TPQCompatTest::DiscoverTopics [GOOD] >> TPQCompatTest::SetupLockSession >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queues_count_over_limit[tables_format_v0] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queues_count_over_limit[tables_format_v1] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[std-tables_format_v0] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[std-tables_format_v1] |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v1-std] [GOOD] >> DataShardVolatile::UpsertNoLocksArbiter+UseSink [GOOD] >> DataShardVolatile::UpsertNoLocksArbiter-UseSink >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[std-tables_format_v1] [GOOD] >> DataShardVolatile::DistributedWriteThenBulkUpsertWithCdc [GOOD] >> DataShardVolatile::DistributedWriteLostPlanThenDrop |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v1-std] [GOOD] |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v1-std] [GOOD] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_0__ASYNC-pk_types6-all_types6-index6---ASYNC] [GOOD] >> unstable_connection.py::TestUnstableConnection::test >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v0-fifo] [GOOD] >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v0-std] |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/script_execution/py3test |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_1__SYNC-pk_types3-all_types3-index3---SYNC] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_fifo_groups_with_dlq_in_cloud[tables_format_v0] [GOOD] |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queue_counters_are_in_folder[tables_format_v1] [GOOD] >> ttl_unavailable_s3.py::TestUnavailableS3::test >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v1-fifo] [GOOD] >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_purge_queue_counters [GOOD] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v1-std] |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[ALTER TABLE {} DROP COLUMN syntax, DROP COLUMN ast-`.metadata/script_executions`] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_count_queues[tables_format_v0] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_count_queues[tables_format_v1] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v0-fifo] [GOOD] |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/script_execution/py3test >> TPersQueueTest::DirectReadRestartTablet [GOOD] >> TPersQueueTest::EachMessageGetsExactlyOneAcknowledgementInCorrectOrder >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v0-std] |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> ttl_delete_s3.py::TestDeleteTtl::test_ttl_delete |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_counters_when_reading_from_empty_queue [GOOD] >> DataShardVolatile::UpsertNoLocksArbiter-UseSink [GOOD] >> DataShardVolatile::UpsertBrokenLockArbiter+UseSink |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> DataShardVolatile::DistributedWriteLostPlanThenDrop [GOOD] >> DataShardVolatile::DistributedWriteLostPlanThenSplit >> TPersQueueTest::WhenTheTopicIsDeletedBeforeDataIsDecompressed_Uncompressed [GOOD] >> TPersQueueTest::WhenTheTopicIsDeletedAfterReadingTheData_Uncompressed |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v1-std] [GOOD] >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[DROP TABLE {}-`.metadata/script_executions`] >> TPersQueueTest::CheckDeleteTopic [GOOD] >> TPersQueueTest::CheckDecompressionTasksWithoutSession >> TTopicYqlTest::DropTopicYql [GOOD] >> TTopicYqlTest::CreateTopicYqlBackCompatibility |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> TPersQueueTest::StoreNoMoreThanXSourceIDs [GOOD] >> TPersQueueTest::SetupWriteSessionOnDisabledCluster |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v0-std] [GOOD] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v0-fifo] [GOOD] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v0-std] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_count_queues[tables_format_v1] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queues_count_over_limit[tables_format_v1] [GOOD] |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_0__ASYNC-pk_types6-all_types6-index6---ASYNC] [GOOD] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_1__ASYNC-pk_types5-all_types5-index5---ASYNC] [GOOD] |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/ttl_tiering/py3test >> TPersQueueCommonTest::TestLimiterLimitsWithUserPayloadRateLimit [GOOD] >> TPersQueueTest::AllEqual [GOOD] >> TPersQueueTest::BadSids |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_empty_access_key_id[tables_format_v1] [GOOD] >> TPersQueueTest::ReadRuleServiceTypeMigrationWithDisallowDefault [GOOD] >> TPersQueueTest::SetMeteringMode |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> TPQCompatTest::SetupLockSession [GOOD] >> TPQCompatTest::BadTopics >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[ALTER TABLE {} DROP COLUMN syntax, DROP COLUMN ast, DROP COLUMN stats-`.metadata/script_executions`] |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v1-std] [GOOD] >> DataShardVolatile::UpsertBrokenLockArbiter+UseSink [GOOD] >> DataShardVolatile::UpsertBrokenLockArbiter-UseSink >> ColumnShardTiers::TieringUsage [GOOD] >> DataShardVolatile::DistributedWriteLostPlanThenSplit [GOOD] >> DataShardVolatile::DistributedOutOfOrderFollowerConsistency |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_fifo_groups_with_dlq_in_cloud[tables_format_v0] [GOOD] >> test_auditlog.py::test_dml_requests_logged_when_unauthorized |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> ColumnShardTiers::TieringUsage [GOOD] Test command err: 2025-05-07T09:03:58.291291Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:03:58.291402Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:03:58.291552Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0022be/r3tmp/tmpc1jVER/pdisk_1.dat TServer::EnableGrpc on GrpcPort 8503, node 1 TClient is connected to server localhost:13232 2025-05-07T09:03:58.717392Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:03:58.748292Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:58.751739Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:03:58.751779Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:03:58.751806Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:03:58.751989Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T09:03:58.796665Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:58.796795Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:03:58.807910Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected REQUEST= UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`); UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`); ;EXPECTATION=1;WAITING=1 2025-05-07T09:04:09.390123Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:682:2573], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:09.390276Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:09.520873Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:2, at schemeshard: 72057594046644480 2025-05-07T09:04:09.856185Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:820:2659], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:09.856316Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:09.856562Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:825:2664], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:04:09.862123Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:2, at schemeshard: 72057594046644480 2025-05-07T09:04:09.996869Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:827:2666], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T09:04:10.462341Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:919:2729] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:04:11.112478Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-05-07T09:04:11.535477Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:1, at schemeshard: 72057594046644480 2025-05-07T09:04:12.244624Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480 2025-05-07T09:04:12.987895Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715673:0, at schemeshard: 72057594046644480 2025-05-07T09:04:13.365655Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715676:0, at schemeshard: 72057594046644480 2025-05-07T09:04:14.816819Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480 2025-05-07T09:04:15.137025Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480 REQUEST= UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`); UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`); ;RESULT=;EXPECTATION=1 FINISHED_REQUEST= UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`); UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`); ;EXPECTATION=1;WAITING=1 REQUEST= CREATE EXTERNAL DATA SOURCE `/Root/tier1` WITH ( SOURCE_TYPE="ObjectStorage", LOCATION="http://fake.fake/fake", AUTH_METHOD="AWS", AWS_ACCESS_KEY_ID_SECRET_NAME="accessKey", AWS_SECRET_ACCESS_KEY_SECRET_NAME="secretKey", AWS_REGION="ru-central1" ); ;EXPECTATION=1;WAITING=1 2025-05-07T09:04:29.830116Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715702:0, at schemeshard: 72057594046644480 REQUEST= CREATE EXTERNAL DATA SOURCE `/Root/tier1` WITH ( SOURCE_TYPE="ObjectStorage", LOCATION="http://fake.fake/fake", AUTH_METHOD="AWS", AWS_ACCESS_KEY_ID_SECRET_NAME="accessKey", AWS_SECRET_ACCESS_KEY_SECRET_NAME="secretKey", AWS_REGION="ru-central1" ); ;RESULT=;EXPECTATION=1 FINISHED_REQUEST= CREATE EXTERNAL DATA SOURCE `/Root/tier1` WITH ( SOURCE_TYPE="ObjectStorage", LOCATION="http://fake.fake/fake", AUTH_METHOD="AWS", AWS_ACCESS_KEY_ID_SECRET_NAME="accessKey", AWS_SECRET_ACCESS_KEY_SECRET_NAME="secretKey", AWS_REGION="ru-central1" ); ;EXPECTATION=1;WAITING=1 REQUEST= CREATE EXTERNAL DATA SOURCE `/Root/tier2` WITH ( SOURCE_TYPE="ObjectStorage", LOCATION="http://fake.fake/fake", AUTH_METHOD="AWS", AWS_ACCESS_KEY_ID_SECRET_NAME="accessKey", AWS_SECRET_ACCESS_KEY_SECRET_NAME="secretKey", AWS_REGION="ru-central1" ); ;EXPECTATION=1;WAITING=1 2025-05-07T09:04:41.966852Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715709:0, at schemeshard: 72057594046644480 REQUEST= CREATE EXTERNAL DATA SOURCE `/Root/tier2` WITH ( SOURCE_TYPE="ObjectStorage", LOCATION="http://fake.fake/fake", AUTH_METHOD="AWS", AWS_ACCESS_KEY_ID_SECRET_NAME="accessKey", AWS_SECRET_ACCESS_KEY_SECRET_NAME="secretKey", AWS_REGION="ru-central1" ); ;RESULT=;EXPECTATION=1 FINISHED_REQUEST= CREATE EXTERNAL DATA SOURCE `/Root/tier2` WITH ( SOURCE_TYPE="ObjectStorage", LOCATION="http://fake.fake/fake", AUTH_METHOD="AWS", AWS_ACCESS_KEY_ID_SECRET_NAME="accessKey", AWS_SECRET_ACCESS_KEY_SECRET_NAME="secretKey", AWS_REGION="ru-central1" ); ;EXPECTATION=1;WAITING=1 2025-05-07T09:04:44.666815Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnStore, opId: 281474976715726:0, at schemeshard: 72057594046644480 Status: 53 TxId: 281474976715726 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 15 2025-05-07T09:04:45.011316Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037892;self_id=[1:2805:4116];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-05-07T09:04:45.040357Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037892;self_id=[1:2805:4116];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-05-07T09:04:45.040724Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 72075186224037892 2025-05-07T09:04:45.048792Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:2805:4116];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-05-07T09:04:45.049091Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:2805:4116];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-05-07T09:04:45.049383Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:2805:4116];tablet_id=72075186224037892;process=TTxInitSchema::Exec ... 0;mem=504;external_task_id=90e7fa66-2b2211f0-96da8fae-51c28804;type=CS::TTL;priority=0;; 2025-05-07T09:06:30.852200Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037892;self_id=[1:2805:4116];ev=NKikimr::NColumnShard::TEvPrivate::TEvStartCompaction;path_id=16;fline=storage.cpp:87;event=granule_compaction_weight;priority=(10,19999998864); 2025-05-07T09:06:30.852336Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037892;self_id=[1:2805:4116];ev=NKikimr::NColumnShard::TEvPrivate::TEvStartCompaction;path_id=16;fline=optimizer.h:900;stop_instant=NO_VALUE_OPTIONAL;size=2656;next=;count=2;info={bytes=1136;count=1;records=1};event=start_optimization;stop_point=;main_portion=19; 2025-05-07T09:06:30.852624Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037892;self_id=[1:2805:4116];ev=NKikimr::NColumnShard::TEvPrivate::TEvStartCompaction;fline=manager.cpp:10;event=lock;process_id=CS::GENERAL::90e87e78-2b2211f0-8c72b2c3-3185036f; 2025-05-07T09:06:30.852957Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037892;parent=[1:2805:4116];ev_type=NKikimr::NOlap::NResourceBroker::NSubscribe::TEvStartTask;fline=actor.cpp:38;event=ask_resources;task=cpu=0;mem=5382;external_task_id=90e87e78-2b2211f0-8c72b2c3-3185036f;type=CS::GENERAL;priority=0;; 2025-05-07T09:06:30.853473Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037892;self_id=[1:2805:4116];ev=NKikimr::NColumnShard::TEvPrivate::TEvStartCompaction;fline=storage.cpp:67;event=granule_locked;path_id=16;lock_id=CS::GENERAL::90e87e78-2b2211f0-8c72b2c3-3185036f; 2025-05-07T09:06:30.853514Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037892;self_id=[1:2805:4116];ev=NKikimr::NColumnShard::TEvPrivate::TEvStartCompaction;fline=storage.cpp:82;event=no_granules; 2025-05-07T09:06:30.853568Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037892;self_id=[1:2805:4116];ev=NKikimr::NColumnShard::TEvPrivate::TEvStartCompaction;fline=column_engine_logs.cpp:208;event=no granules for start compaction; 2025-05-07T09:06:30.853594Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Compaction not started: cannot prepare compaction at tablet 72075186224037892 2025-05-07T09:06:30.853670Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037892;parent=[1:2805:4116];ev_type=NKikimr::NResourceBroker::TEvResourceBroker::TEvResourceAllocated;fline=actor.cpp:29;event=result_resources;task_id=20;task=cpu=0;mem=504;external_task_id=90e7fa66-2b2211f0-96da8fae-51c28804;type=CS::TTL;priority=0;; 2025-05-07T09:06:30.853705Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037892;parent=[1:2805:4116];ev_type=NKikimr::NResourceBroker::TEvResourceBroker::TEvResourceAllocated;fline=task.cpp:9;event=resource_allocated;external_task_id=90e7fa66-2b2211f0-96da8fae-51c28804;mem=504;cpu=0; 2025-05-07T09:06:30.853740Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037892;parent=[1:2805:4116];ev_type=NKikimr::NResourceBroker::TEvResourceBroker::TEvResourceAllocated;fline=task.cpp:40;event=allocate_resources;external_task_id=90e7fa66-2b2211f0-96da8fae-51c28804;task_id=20;mem=504;cpu=0; 2025-05-07T09:06:30.854738Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037892;parent=[1:2805:4116];ev_type=NKikimr::NResourceBroker::TEvResourceBroker::TEvResourceAllocated;fline=actor.cpp:29;event=result_resources;task_id=21;task=cpu=0;mem=5382;external_task_id=90e87e78-2b2211f0-8c72b2c3-3185036f;type=CS::GENERAL;priority=0;; 2025-05-07T09:06:30.854779Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037892;parent=[1:2805:4116];ev_type=NKikimr::NResourceBroker::TEvResourceBroker::TEvResourceAllocated;fline=task.cpp:9;event=resource_allocated;external_task_id=90e87e78-2b2211f0-8c72b2c3-3185036f;mem=5382;cpu=0; 2025-05-07T09:06:30.854813Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037892;parent=[1:2805:4116];ev_type=NKikimr::NResourceBroker::TEvResourceBroker::TEvResourceAllocated;fline=task.cpp:40;event=allocate_resources;external_task_id=90e87e78-2b2211f0-8c72b2c3-3185036f;task_id=21;mem=5382;cpu=0; 2025-05-07T09:06:30.854882Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: self_id=[1:2841:4135];tablet_id=72075186224037892;parent=[1:2805:4116];fline=manager.cpp:82;event=ask_data;request=request_id=41;16={portions_count=2};; 2025-05-07T09:06:30.856123Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: self_id=[1:2841:4135];tablet_id=72075186224037892;parent=[1:2805:4116];fline=columnshard_impl.cpp:1035;background=cleanup;changes_info=type=CS::CLEANUP::PORTIONS;details=(drop 2 portions(portion_id:18;path_id:16;records_count:1;schema_version:1;level:0;cs:plan_step=1737970526500;tx_id=18446744073709551615;;wi:12;;column_size:1136;index_size:0;meta:((produced=INSERTED;));remove_snapshot:(plan_step=1737970526500;tx_id=18446744073709551615;);)(portion_id:17;path_id:16;records_count:1;schema_version:1;level:0;;column_size:1520;index_size:0;meta:((produced=SPLIT_COMPACTED;));remove_snapshot:(plan_step=1737970526500;tx_id=18446744073709551615;);));; 2025-05-07T09:06:30.856289Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: WriteIndex at tablet 72075186224037892 2025-05-07T09:06:30.856443Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxWriteIndex[45] (CS::CLEANUP::PORTIONS) apply at tablet 72075186224037892 2025-05-07T09:06:30.856994Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=4064;raw_bytes=58838;count=2;records=52} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=110016;raw_bytes=3580887;count=2;records=2980} inactive {blob_bytes=2656;raw_bytes=2178;count=2;records=2} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 72075186224037892 2025-05-07T09:06:30.857229Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: self_id=[1:2841:4135];tablet_id=72075186224037892;parent=[1:2805:4116];fline=manager.cpp:82;event=ask_data;request=request_id=42;16={portions_count=2};; 2025-05-07T09:06:30.857401Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: self_id=[1:2841:4135];tablet_id=72075186224037892;parent=[1:2805:4116];fline=columnshard_impl.cpp:881;event=compaction;external_task_id=90e87e78-2b2211f0-8c72b2c3-3185036f; 2025-05-07T09:06:30.857481Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: self_id=[1:2841:4135];tablet_id=72075186224037892;parent=[1:2805:4116];fline=columnshard_impl.cpp:620;event=start_changes;type=CS::GENERAL;task_id=90e87e78-2b2211f0-8c72b2c3-3185036f; 2025-05-07T09:06:30.857712Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: external_task_id=90e87e78-2b2211f0-8c72b2c3-3185036f;fline=actor.cpp:48;task=agents_waiting=1;additional_info=();; 2025-05-07T09:06:30.858731Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: event_type=NKikimr::NBlobCache::TEvBlobCache::TEvReadBlobRangeResult;fline=task.cpp:110;event=OnDataReady;task=agents_waiting=0;additional_info=();;external_task_id=90e87e78-2b2211f0-8c72b2c3-3185036f; 2025-05-07T09:06:30.864861Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037892;parent_id=[1:2805:4116];fline=general_compaction.cpp:133;event=blobs_created_diff;appended=0;;column_id:5;chunk_idx:0;blob_range:[NO_BLOB:0:264];;column_id:3;chunk_idx:0;blob_range:[NO_BLOB:264:256];;column_id:2;chunk_idx:0;blob_range:[NO_BLOB:520:232];;column_id:4294967040;chunk_idx:0;blob_range:[NO_BLOB:752:192];;column_id:1;chunk_idx:0;blob_range:[NO_BLOB:944:192];;column_id:4294967041;chunk_idx:0;blob_range:[NO_BLOB:1136:192];;column_id:4;chunk_idx:0;blob_range:[NO_BLOB:1328:192];;;;switched=(portion_id:20;path_id:16;records_count:1;schema_version:1;level:0;cs:plan_step=1737970526500;tx_id=18446744073709551615;;wi:13;;column_size:1136;index_size:0;meta:((produced=INSERTED;)););(portion_id:19;path_id:16;records_count:1;schema_version:1;level:0;;column_size:1520;index_size:0;meta:((produced=SPLIT_COMPACTED;)););; 2025-05-07T09:06:30.864942Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037892;parent_id=[1:2805:4116];fline=general_compaction.cpp:135;event=blobs_created;appended=1;switched=2; 2025-05-07T09:06:30.865202Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037892;self_id=[1:2805:4116];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:50;event=TEvWriteIndex;count=1; 2025-05-07T09:06:30.865558Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: WriteIndex at tablet 72075186224037892 2025-05-07T09:06:30.865655Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037892;self_id=[1:2805:4116];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:54;memory_size=94;data_size=70;sum=2726;count=57; 2025-05-07T09:06:30.865728Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037892;self_id=[1:2805:4116];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:75;memory_size=254;data_size=246;sum=7366;count=58;size_of_meta=144; 2025-05-07T09:06:30.865789Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037892;self_id=[1:2805:4116];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_portion.cpp:40;memory_size=326;data_size=318;sum=9454;count=29;size_of_portion=216; 2025-05-07T09:06:30.865956Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxWriteIndex[47] (CS::GENERAL) apply at tablet 72075186224037892 2025-05-07T09:06:30.867926Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 72075186224037892 Save Batch GenStep: 1:18 Blob count: 1 2025-05-07T09:06:30.868064Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=4064;raw_bytes=58838;count=2;records=52} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=110016;raw_bytes=3580887;count=2;records=2980} inactive {blob_bytes=2656;raw_bytes=2178;count=2;records=2} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 72075186224037892 Cleaning waiting... Fake storage clean FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 0 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 0 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 0 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 0 FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037892 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037892 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037892 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037892 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037893 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037893 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037893 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037893 |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v1-std] [GOOD] >> test_auditlog.py::test_single_dml_query_logged[update] |93.2%| [TA] $(B)/ydb/core/tx/tiering/ut/test-results/unittest/{meta.json ... results_accumulator.log} |93.2%| [TA] {RESULT} $(B)/ydb/core/tx/tiering/ut/test-results/unittest/{meta.json ... results_accumulator.log} |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v0-std] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v0-std] [GOOD] |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_purge_queue_counters [GOOD] |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[std-tables_format_v1] [GOOD] |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/ttl_tiering/py3test |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> TPersQueueTest::EachMessageGetsExactlyOneAcknowledgementInCorrectOrder [GOOD] >> TPersQueueTest::Delete |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/script_execution/py3test |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_cloud_ids_are_logged[attrs1] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v1-fifo] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v1-std] |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/ttl_tiering/py3test |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> DataShardVolatile::UpsertBrokenLockArbiter-UseSink [GOOD] >> DataShardVolatile::UpsertNoLocksArbiterRestart+UseSink >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_bad_auth-_bad_dynconfig] |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v1-fifo] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v1-std] >> TTopicYqlTest::CreateTopicYqlBackCompatibility [GOOD] |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_no_auth-_good_dynconfig] >> DataShardVolatile::DistributedOutOfOrderFollowerConsistency [GOOD] >> DataShardVolatile::DistributedWriteRSNotAckedBeforeCommit >> TPersQueueTest::CheckDecompressionTasksWithoutSession [GOOD] >> TPersQueueTest::Codecs_InitWriteSession_DefaultTopicSupportedCodecsInInitResponse |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/unittest >> TTopicYqlTest::CreateTopicYqlBackCompatibility [GOOD] Test command err: 2025-05-07T09:03:38.287372Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626896124829372:2069];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:38.287557Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T09:03:38.349965Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501626894248224335:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:38.350178Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T09:03:38.501746Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-05-07T09:03:38.501139Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002607/r3tmp/tmpfA5xHO/pdisk_1.dat 2025-05-07T09:03:38.742631Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:38.742723Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:03:38.744202Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:38.744264Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:03:38.746540Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:03:38.747008Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:38.749330Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-05-07T09:03:38.749758Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20819, node 1 2025-05-07T09:03:38.837024Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/zvgn/002607/r3tmp/yandexoevvxV.tmp 2025-05-07T09:03:38.837077Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/zvgn/002607/r3tmp/yandexoevvxV.tmp 2025-05-07T09:03:38.837219Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/zvgn/002607/r3tmp/yandexoevvxV.tmp 2025-05-07T09:03:38.837346Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T09:03:38.877773Z INFO: TTestServer started on Port 7247 GrpcPort 20819 TClient is connected to server localhost:7247 PQClient connected to localhost:20819 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:03:39.091461Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T09:03:39.137623Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... 2025-05-07T09:03:41.451618Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626909009732362:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:41.451622Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626909009732391:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:41.451711Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:41.455395Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480 2025-05-07T09:03:41.458753Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626909009732434:2346], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:41.458830Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:41.473797Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501626909009732400:2343], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-05-07T09:03:41.717039Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501626909009732483:2766] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:03:41.737541Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:03:41.805371Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7501626909009732503:2349], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T09:03:41.806887Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=1&id=YmNjMDI4YzItNTU3ZGJjOWItYzJjMTg2MDYtODAyYTE0ZTA=, ActorId: [1:7501626909009732358:2335], ActorState: ExecuteState, TraceId: 01jtmzs1w102b7yzcckvbhvqvw, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T09:03:41.808627Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T09:03:41.814433Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:03:41.893257Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-05-07T09:03:42.046420Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710667. Ctx: { TraceId: 01jtmzs2c4awnmqyva5fd84pdx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzVlZGI5NDItZGQzYzhlYWItOWQ3NGFkZWMtYzE0NDI1OGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root === CheckClustersList. Subcribe to ClusterTracker from [1:7501626913304700241:3097] 2025-05-07T09:03:43.287278Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501626896124829372:2069];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:43.287377Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:03:43.350221Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7501626894248224335:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:43.350304Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-05-07T09:03:48.000637Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1353: Session closed, sessionId: ydb://session/3?node_id=1&id=MWE4OGFjZmQtNTA1Mzg1YzUtZTM1Y2M5NDItNjA4NjY5ODY=, workerId: [1:7501626934779536971:2445], local sessions count: 0 2025-05-07T09:03:48.001361Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2663: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7501626896124829653:2129], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025- ... t/PQ/rt3.dc1--legacy--topic1" YcCloudId: "" YcFolderId: "" YdbDatabaseId: "" YdbDatabasePath: "/Root" Partitions { PartitionId: 1 KeyRange { FromBound: "\177\377\377\377\377\377\377\377\377\377\377\377\377\377\377\376" } Status: Active CreateVersion: 1 TabletId: 0 } ReadRuleGenerations: 0 PartitionStrategy { MinPartitionCount: 2 MaxPartitionCount: 5 ScaleThresholdSeconds: 300 ScaleUpPartitionWriteSpeedThresholdPercent: 90 ScaleDownPartitionWriteSpeedThresholdPercent: 30 PartitionStrategyType: CAN_SPLIT } AllPartitions { PartitionId: 1 KeyRange { FromBound: "\177\377\377\377\377\377\377\377\377\377\377\377\377\377\377\376" } Status: Active CreateVersion: 1 TabletId: 0 } Consumers { Name: "c1" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } ServiceType: "data-streams" Version: 0 Generation: 0 } 2025-05-07T09:06:39.070516Z node 26 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72075186224037892] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T09:06:39.070638Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4499: [PQ: 72075186224037892] delete partitions for TxId 281474976715678 2025-05-07T09:06:39.070673Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4214: [PQ: 72075186224037892] TxId 281474976715678, NewState EXECUTED 2025-05-07T09:06:39.070703Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72075186224037892] TxId 281474976715678 moved from EXECUTING to EXECUTED 2025-05-07T09:06:39.070733Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:3804: [PQ: 72075186224037892] write key for TxId 281474976715678 2025-05-07T09:06:39.070982Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:1225: [PQ: 72075186224037893] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-05-07T09:06:39.071039Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4279: [PQ: 72075186224037893] Try execute txs with state EXECUTED 2025-05-07T09:06:39.071203Z node 26 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 281474976715678] save tx TxId: 281474976715678 State: EXECUTED MinStep: 1746608798000 MaxStep: 18446744073709551615 Step: 1746608799012 Predicate: true Kind: KIND_CONFIG TabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 TotalPartitions: 2 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } PartitionIds: 1 TopicName: "rt3.dc1--legacy--topic1" Version: 0 LocalDC: true RequireAuthWrite: true RequireAuthRead: true Producer: "legacy" Ident: "legacy" Topic: "topic1" DC: "dc1" FormatVersion: 0 Codecs { } TopicPath: "/Root/PQ/rt3.dc1--legacy--topic1" YcCloudId: "" YcFolderId: "" YdbDatabaseId: "" YdbDatabasePath: "/Root" Partitions { PartitionId: 1 KeyRange { FromBound: "\177\377\377\377\377\377\377\377\377\377\377\377\377\377\377\376" } Status: Active CreateVersion: 1 TabletId: 0 } ReadRuleGenerations: 0 PartitionStrategy { MinPartitionCount: 2 MaxPartitionCount: 5 ScaleThresholdSeconds: 300 ScaleUpPartitionWriteSpeedThresholdPercent: 90 ScaleDownPartitionWriteSpeedThresholdPercent: 30 PartitionStrategyType: CAN_SPLIT } AllPartitions { PartitionId: 1 KeyRange { FromBound: "\177\377\377\377\377\377\377\377\377\377\377\377\377\377\377\376" } Status: Active CreateVersion: 1 TabletId: 0 } Consumers { Name: "c1" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } ServiceType: "data-streams" Version: 0 Generation: 0 } } BootstrapConfig { } SourceActor { RawX1: 7501627615729598038 RawX2: 107374184601 } Partitions { Partition { PartitionId: 1 } } 2025-05-07T09:06:39.071507Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:3621: [PQ: 72075186224037892] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-05-07T09:06:39.071060Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4324: [PQ: 72075186224037893] TxId 281474976715678, State EXECUTED 2025-05-07T09:06:39.071091Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4271: [PQ: 72075186224037893] TxId 281474976715678 State EXECUTED FrontTxId 281474976715678 2025-05-07T09:06:39.071112Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:3975: [PQ: 72075186224037893] TPersQueue::SendEvReadSetAckToSenders 2025-05-07T09:06:39.071151Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4214: [PQ: 72075186224037893] TxId 281474976715678, NewState WAIT_RS_ACKS 2025-05-07T09:06:39.071172Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72075186224037893] TxId 281474976715678 moved from EXECUTED to WAIT_RS_ACKS 2025-05-07T09:06:39.071209Z node 25 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976715678] PredicateAcks: 0/0 2025-05-07T09:06:39.071220Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4525: [PQ: 72075186224037893] HaveAllRecipientsReceive 1, AllSupportivePartitionsHaveBeenDeleted 1 2025-05-07T09:06:39.071238Z node 25 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976715678] PredicateAcks: 0/0 2025-05-07T09:06:39.071259Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4586: [PQ: 72075186224037893] add an TxId 281474976715678 to the list for deletion 2025-05-07T09:06:39.071287Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4214: [PQ: 72075186224037893] TxId 281474976715678, NewState DELETING 2025-05-07T09:06:39.071326Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:3820: [PQ: 72075186224037893] delete key for TxId 281474976715678 2025-05-07T09:06:39.071390Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:3621: [PQ: 72075186224037893] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-05-07T09:06:39.082559Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:1225: [PQ: 72075186224037893] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-05-07T09:06:39.082610Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4279: [PQ: 72075186224037893] Try execute txs with state DELETING 2025-05-07T09:06:39.082632Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4324: [PQ: 72075186224037893] TxId 281474976715678, State DELETING 2025-05-07T09:06:39.082666Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4536: [PQ: 72075186224037893] delete TxId 281474976715678 2025-05-07T09:06:39.092565Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:1225: [PQ: 72075186224037892] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-05-07T09:06:39.092609Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4279: [PQ: 72075186224037892] Try execute txs with state EXECUTED 2025-05-07T09:06:39.092628Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4324: [PQ: 72075186224037892] TxId 281474976715678, State EXECUTED 2025-05-07T09:06:39.092653Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4271: [PQ: 72075186224037892] TxId 281474976715678 State EXECUTED FrontTxId 281474976715678 2025-05-07T09:06:39.092676Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:3975: [PQ: 72075186224037892] TPersQueue::SendEvReadSetAckToSenders 2025-05-07T09:06:39.092702Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4214: [PQ: 72075186224037892] TxId 281474976715678, NewState WAIT_RS_ACKS 2025-05-07T09:06:39.092723Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72075186224037892] TxId 281474976715678 moved from EXECUTED to WAIT_RS_ACKS 2025-05-07T09:06:39.092755Z node 26 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976715678] PredicateAcks: 0/0 2025-05-07T09:06:39.092765Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4525: [PQ: 72075186224037892] HaveAllRecipientsReceive 1, AllSupportivePartitionsHaveBeenDeleted 1 2025-05-07T09:06:39.092784Z node 26 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976715678] PredicateAcks: 0/0 2025-05-07T09:06:39.092805Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4586: [PQ: 72075186224037892] add an TxId 281474976715678 to the list for deletion 2025-05-07T09:06:39.092835Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4214: [PQ: 72075186224037892] TxId 281474976715678, NewState DELETING 2025-05-07T09:06:39.092868Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:3820: [PQ: 72075186224037892] delete key for TxId 281474976715678 2025-05-07T09:06:39.092940Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:3621: [PQ: 72075186224037892] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-05-07T09:06:39.183464Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:1225: [PQ: 72075186224037892] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-05-07T09:06:39.183514Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4279: [PQ: 72075186224037892] Try execute txs with state DELETING 2025-05-07T09:06:39.183549Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4324: [PQ: 72075186224037892] TxId 281474976715678, State DELETING 2025-05-07T09:06:39.183575Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4536: [PQ: 72075186224037892] delete TxId 281474976715678 TClient::Ls request: /Root/PQ/rt3.dc1--legacy--topic1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "rt3.dc1--legacy--topic1" PathId: 13 SchemeshardId: 72057594046644480 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 281474976715678 CreateStep: 1746608799012 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186224037894 } PersQueueGroup { Name: "rt3.dc1--legacy--topic1" PathId: 13 TotalGroupCount: 2 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 10... (TRUNCATED) === PATH DESCRIPTION: Name: "rt3.dc1--legacy--topic1" PathId: 13 TotalGroupCount: 2 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } LocalDC: true RequireAuthWrite: true RequireAuthRead: true Producer: "legacy" Ident: "legacy" Topic: "topic1" DC: "dc1" FormatVersion: 0 Codecs { } YdbDatabasePath: "/Root" PartitionStrategy { MinPartitionCount: 2 MaxPartitionCount: 5 ScaleThresholdSeconds: 300 ScaleUpPartitionWriteSpeedThresholdPercent: 90 ScaleDownPartitionWriteSpeedThresholdPercent: 30 PartitionStrategyType: CAN_SPLIT } Consumers { Name: "c1" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } ServiceType: "data-streams" Version: 0 } } Partitions { PartitionId: 0 TabletId: 72075186224037893 KeyRange { ToBound: "\177\377\377\377\377\377\377\377\377\377\377\377\377\377\377\376" } Status: Active } Partitions { PartitionId: 1 TabletId: 72075186224037892 KeyRange { FromBound: "\177\377\377\377\377\377\377\377\377\377\377\377\377\377\377\376" } Status: Active } AlterVersion: 1 BalancerTabletID: 72075186224037894 NextPartitionId: 2 |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v0-std] [GOOD] >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[ALTER TABLE {} DROP COLUMN syntax-`.metadata/script_executions`] [GOOD] |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_count_queues[tables_format_v1] [GOOD] |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_1__ASYNC-pk_types5-all_types5-index5---ASYNC] [GOOD] |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> TPersQueueTest::WhenTheTopicIsDeletedAfterReadingTheData_Uncompressed [GOOD] >> TTopicYqlTest::CreateAndAlterTopicYql |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> TPersQueueTest::BadSids [GOOD] |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v0-std] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v0-fifo] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v0-std] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/unittest >> TPersQueueTest::BadSids [GOOD] Test command err: 2025-05-07T09:03:38.455835Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T09:03:38.455911Z node 1 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037927937] doesn't have tx writes info 2025-05-07T09:03:39.019982Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T09:03:39.020062Z node 2 :PERSQUEUE INFO: pq_impl.cpp:794: [PQ: 72057594037927937] doesn't have tx writes info === Server->StartServer(false); 2025-05-07T09:03:39.491879Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7501626900911573539:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:39.491958Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T09:03:39.515866Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501626898232907738:2071];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:39.515963Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002626/r3tmp/tmpO2eGEx/pdisk_1.dat 2025-05-07T09:03:39.663072Z node 4 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-05-07T09:03:39.663341Z node 3 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-05-07T09:03:39.818312Z node 3 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:39.832525Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:39.832684Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:03:39.839243Z node 3 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 4 Cookie 4 2025-05-07T09:03:39.840880Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:03:39.873616Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:39.873724Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:03:39.876904Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14397, node 3 2025-05-07T09:03:39.911572Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/zvgn/002626/r3tmp/yandexP7XxCU.tmp 2025-05-07T09:03:39.911597Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/zvgn/002626/r3tmp/yandexP7XxCU.tmp 2025-05-07T09:03:39.911766Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/zvgn/002626/r3tmp/yandexP7XxCU.tmp 2025-05-07T09:03:39.911889Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T09:03:39.961409Z INFO: TTestServer started on Port 25077 GrpcPort 14397 TClient is connected to server localhost:25077 PQClient connected to localhost:14397 === TenantModeEnabled() = 0 === Init PQ - start server on port 14397 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:03:40.300142Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976715657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-05-07T09:03:40.300295Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:03:40.300480Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-05-07T09:03:40.300728Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976715657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-05-07T09:03:40.300780Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:03:40.303425Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 281474976715657, response: Status: StatusAccepted TxId: 281474976715657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-05-07T09:03:40.303562Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-05-07T09:03:40.303759Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:03:40.303830Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 281474976715657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-05-07T09:03:40.303880Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 281474976715657:0 ProgressState no shards to create, do next state 2025-05-07T09:03:40.303920Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976715657:0 2 -> 3 waiting... 2025-05-07T09:03:40.307344Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T09:03:40.307375Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976715657, ready parts: 0/1, is published: true 2025-05-07T09:03:40.307418Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T09:03:40.309424Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:03:40.309472Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976715657:0 ProgressState, at schemeshard: 72057594046644480 2025-05-07T09:03:40.309578Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976715657:0 3 -> 128 2025-05-07T09:03:40.311917Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:03:40.311971Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:03:40.311997Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976715657:0, at tablet# 72057594046644480 2025-05-07T09:03:40.312022Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 281474976715657 ready parts: 1/1 2025-05-07T09:03:40.316497Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976715657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:03:40.318978Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 281474976715657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976715657 msg type: 269090816 2025-05-07T09:03:40.319184Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 281474976715657, partId: 4294967295, tablet: 72057594046316545 2025-05-07T09:03:40.322570Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 1746608620365, transactions count in step: 1, at schemeshard: 72057594046644480 2025-05-07T09:03:40.322741Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1746608620365 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-05-07T09:03:40.322778Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976715657:0, at tablet# 72057594046644480 2025-05-07T09:03:40.323190Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976715657:0 128 -> 240 2025-05-07T09:03:40.323242Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976715657:0, at tablet# 72057594046644480 2025-05-07T09:03:40.323494Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-05-07T09:03:40.323561Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 1], at schemeshard: 72057594046644480 2025-05-07T09:03:40.325851Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_boa ... CurrentSessionLifetimeMs: 1746608804465 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-05-07T09:06:44.465773Z :INFO: [] MessageGroupId [base64:aa] SessionId [] Write session established. Init response: session_id: "base64:aa|3d47a67e-510bc15d-588c8dd3-56cbde2_0" topic: "topic1" cluster: "dc1" partition_id: 5 supported_codecs: CODEC_RAW supported_codecs: CODEC_GZIP supported_codecs: CODEC_LZOP 2025-05-07T09:06:44.466067Z :DEBUG: [] MessageGroupId [base64:aa] SessionId [base64:aa|3d47a67e-510bc15d-588c8dd3-56cbde2_0] Write 1 messages with Id from 1 to 1 2025-05-07T09:06:44.466227Z :INFO: [] MessageGroupId [base64:aa] SessionId [base64:aa|3d47a67e-510bc15d-588c8dd3-56cbde2_0] Write session: close. Timeout = 18446744073709551 ms 2025-05-07T09:06:44.470598Z :DEBUG: [] MessageGroupId [base64:aa] SessionId [base64:aa|3d47a67e-510bc15d-588c8dd3-56cbde2_0] Write session: try to update token 2025-05-07T09:06:44.470685Z :DEBUG: [] MessageGroupId [base64:aa] SessionId [base64:aa|3d47a67e-510bc15d-588c8dd3-56cbde2_0] Send 1 message(s) (0 left), first sequence number is 1 2025-05-07T09:06:44.472921Z node 21 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 5 sessionId: base64:aa|3d47a67e-510bc15d-588c8dd3-56cbde2_0 grpc read done: success: 1 data: write_request[data omitted] 2025-05-07T09:06:44.473326Z node 21 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037893 (partition=5) Received event: NKikimr::NPQ::TEvPartitionWriter::TEvWriteRequest 2025-05-07T09:06:44.474356Z node 22 :PERSQUEUE DEBUG: pq_impl.cpp:342: Handle TEvRequest topic: 'rt3.dc1--topic1' requestId: 2025-05-07T09:06:44.474414Z node 22 :PERSQUEUE DEBUG: pq_impl.cpp:2788: [PQ: 72075186224037893] got client message batch for topic 'rt3.dc1--topic1' partition 5 2025-05-07T09:06:44.474524Z node 22 :PERSQUEUE DEBUG: pq_impl.cpp:377: Answer ok topic: 'rt3.dc1--topic1' partition: 5 messageNo: 0 requestId: cookie: 1 2025-05-07T09:06:44.477288Z node 22 :PERSQUEUE DEBUG: pq_impl.cpp:342: Handle TEvRequest topic: 'rt3.dc1--topic1' requestId: 2025-05-07T09:06:44.477339Z node 22 :PERSQUEUE DEBUG: pq_impl.cpp:2788: [PQ: 72075186224037893] got client message batch for topic 'rt3.dc1--topic1' partition 5 2025-05-07T09:06:44.477758Z node 22 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037893] got client message topic: rt3.dc1--topic1 partition: 5 SourceId: '\0base64:aa' SeqNo: 1 partNo : 0 messageNo: 1 size 92 offset: -1 2025-05-07T09:06:44.476724Z node 21 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037893 (partition=5) Received event: NActors::IEventHandle 2025-05-07T09:06:44.478022Z node 22 :PERSQUEUE DEBUG: partition_write.cpp:1233: [PQ: 72075186224037893, Partition: 5, State: StateIdle] Topic 'rt3.dc1--topic1' partition 5 part blob processing sourceId '\0base64:aa' seqNo 1 partNo 0 2025-05-07T09:06:44.478985Z node 22 :PERSQUEUE DEBUG: partition_write.cpp:1333: [PQ: 72075186224037893, Partition: 5, State: StateIdle] Topic 'rt3.dc1--topic1' partition 5 part blob complete sourceId '\0base64:aa' seqNo 1 partNo 0 FormedBlobsCount 0 NewHead: Offset 0 PartNo 0 PackedSize 169 count 1 nextOffset 1 batches 1 2025-05-07T09:06:44.479793Z node 22 :PERSQUEUE DEBUG: partition_write.cpp:1623: [PQ: 72075186224037893, Partition: 5, State: StateIdle] Add new write blob: topic 'rt3.dc1--topic1' partition 5 compactOffset 0,1 HeadOffset 0 endOffset 0 curOffset 1 d0000000005_00000000000000000000_00000_0000000001_00000| size 157 WTime 1746608804479 2025-05-07T09:06:44.479985Z node 22 :PERSQUEUE DEBUG: partition.cpp:2182: [PQ: 72075186224037893, Partition: 5, State: StateIdle] === DumpKeyValueRequest === 2025-05-07T09:06:44.480030Z node 22 :PERSQUEUE DEBUG: partition.cpp:2183: [PQ: 72075186224037893, Partition: 5, State: StateIdle] --- delete ---------------- 2025-05-07T09:06:44.480077Z node 22 :PERSQUEUE DEBUG: partition.cpp:2189: [PQ: 72075186224037893, Partition: 5, State: StateIdle] [x0000000005, x0000000006) 2025-05-07T09:06:44.480117Z node 22 :PERSQUEUE DEBUG: partition.cpp:2191: [PQ: 72075186224037893, Partition: 5, State: StateIdle] --- write ----------------- 2025-05-07T09:06:44.480159Z node 22 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037893, Partition: 5, State: StateIdle] m0000000005pbase64:aa 2025-05-07T09:06:44.480176Z node 22 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037893, Partition: 5, State: StateIdle] d0000000005_00000000000000000000_00000_0000000001_00000| 2025-05-07T09:06:44.480191Z node 22 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037893, Partition: 5, State: StateIdle] i0000000005 2025-05-07T09:06:44.480234Z node 22 :PERSQUEUE DEBUG: partition.cpp:2196: [PQ: 72075186224037893, Partition: 5, State: StateIdle] --- rename ---------------- 2025-05-07T09:06:44.480273Z node 22 :PERSQUEUE DEBUG: partition.cpp:2201: [PQ: 72075186224037893, Partition: 5, State: StateIdle] =========================== 2025-05-07T09:06:44.482974Z node 22 :PERSQUEUE DEBUG: read.h:262: CacheProxy. Passthrough write request to KV 2025-05-07T09:06:44.483091Z node 22 :PERSQUEUE DEBUG: read.h:300: CacheProxy. Passthrough blob. Partition 5 offset 0 partNo 0 count 1 size 157 2025-05-07T09:06:44.497244Z node 22 :PERSQUEUE DEBUG: cache_eviction.h:315: Caching head blob in L1. Partition 5 offset 0 count 1 size 157 actorID [22:7501627690391420683:2418] 2025-05-07T09:06:44.497430Z node 22 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72075186224037893, Partition: 5, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 102 WriteNewSizeFromSupportivePartitions# 0 2025-05-07T09:06:44.497498Z node 22 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037893, Partition: 5, State: StateIdle] TPartition::ReplyWrite. Partition: 5 2025-05-07T09:06:44.497569Z node 22 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037893, Partition: 5, State: StateIdle] Answering for message sourceid: '\0base64:aa', Topic: 'rt3.dc1--topic1', Partition: 5, SeqNo: 1, partNo: 0, Offset: 0 is stored on disk 2025-05-07T09:06:44.497848Z node 22 :PERSQUEUE DEBUG: partition_read.cpp:774: [PQ: 72075186224037893, Partition: 5, State: StateIdle] Topic 'rt3.dc1--topic1' partition 5 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-05-07T09:06:44.497892Z node 22 :PERSQUEUE DEBUG: partition_read.cpp:816: [PQ: 72075186224037893, Partition: 5, State: StateIdle] Topic 'rt3.dc1--topic1' partition 5 user user send read request for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 1 rrg 0 2025-05-07T09:06:44.498038Z node 22 :PERSQUEUE DEBUG: pq_impl.cpp:377: Answer ok topic: 'rt3.dc1--topic1' partition: 5 messageNo: 1 requestId: cookie: 1 2025-05-07T09:06:44.498799Z node 21 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037893 (partition=5) Received event: NActors::IEventHandle 2025-05-07T09:06:44.498206Z node 22 :PERSQUEUE DEBUG: partition_read.cpp:731: [PQ: 72075186224037893, Partition: 5, State: StateIdle] read cookie 0 Topic 'rt3.dc1--topic1' partition 5 user user offset 0 count 1 size 1024000 endOffset 1 max time lag 0ms effective offset 0 2025-05-07T09:06:44.498241Z node 22 :PERSQUEUE DEBUG: partition_read.cpp:931: [PQ: 72075186224037893, Partition: 5, State: StateIdle] read cookie 0 added 0 blobs, size 0 count 0 last offset 0, current partition end offset: 1 2025-05-07T09:06:44.498289Z node 22 :PERSQUEUE DEBUG: partition_read.cpp:948: [PQ: 72075186224037893, Partition: 5, State: StateIdle] Reading cookie 0. All data is from uncompacted head. 2025-05-07T09:06:44.498316Z node 22 :PERSQUEUE DEBUG: partition_read.cpp:420: FormAnswer for 0 blobs 2025-05-07T09:06:44.498392Z node 22 :PERSQUEUE DEBUG: partition_read.cpp:856: Topic 'rt3.dc1--topic1' partition 5 user user readTimeStamp done, result 1746608804477 queuesize 0 startOffset 0 2025-05-07T09:06:44.498602Z node 22 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037893' partition 5 offset 0 partno 0 count 1 parts 0 size 157 2025-05-07T09:06:44.503386Z :DEBUG: [] MessageGroupId [base64:aa] SessionId [base64:aa|3d47a67e-510bc15d-588c8dd3-56cbde2_0] Write session got write response: sequence_numbers: 1 offsets: 0 already_written: false partition_id: 5 write_statistics { persist_duration_ms: 15 } 2025-05-07T09:06:44.503449Z :DEBUG: [] MessageGroupId [base64:aa] SessionId [base64:aa|3d47a67e-510bc15d-588c8dd3-56cbde2_0] Write session: acknoledged message 1 2025-05-07T09:06:44.567927Z :INFO: [] MessageGroupId [base64:aa] SessionId [base64:aa|3d47a67e-510bc15d-588c8dd3-56cbde2_0] Write session will now close 2025-05-07T09:06:44.568045Z :DEBUG: [] MessageGroupId [base64:aa] SessionId [base64:aa|3d47a67e-510bc15d-588c8dd3-56cbde2_0] Write session: aborting 2025-05-07T09:06:44.568665Z :INFO: [] MessageGroupId [base64:aa] SessionId [base64:aa|3d47a67e-510bc15d-588c8dd3-56cbde2_0] Write session: gracefully shut down, all writes complete 2025-05-07T09:06:44.568735Z :DEBUG: [] MessageGroupId [base64:aa] SessionId [base64:aa|3d47a67e-510bc15d-588c8dd3-56cbde2_0] Write session: destroy 2025-05-07T09:06:44.571370Z node 21 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 5 sessionId: base64:aa|3d47a67e-510bc15d-588c8dd3-56cbde2_0 grpc read done: success: 0 data: 2025-05-07T09:06:44.571399Z node 21 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 5 sessionId: base64:aa|3d47a67e-510bc15d-588c8dd3-56cbde2_0 grpc read failed 2025-05-07T09:06:44.571448Z node 21 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 5 sessionId: base64:aa|3d47a67e-510bc15d-588c8dd3-56cbde2_0 grpc closed 2025-05-07T09:06:44.571478Z node 21 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 5 sessionId: base64:aa|3d47a67e-510bc15d-588c8dd3-56cbde2_0 is DEAD 2025-05-07T09:06:44.573076Z node 21 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037893 (partition=5) Received event: NActors::TEvents::TEvPoison 2025-05-07T09:06:44.575012Z node 22 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037893] server disconnected, pipe [21:7501627694645411694:2536] destroyed 2025-05-07T09:06:44.575084Z node 22 :PERSQUEUE DEBUG: partition_write.cpp:138: [PQ: 72075186224037893, Partition: 5, State: StateIdle] TPartition::DropOwner. 2025-05-07T09:06:44.757545Z node 21 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1073: TxId: 281474976715686, task: 1, CA Id [21:7501627694645411723:2540]. Got EvDeliveryProblem, TabletId: 72075186224037891, NotDelivered: 0 2025-05-07T09:06:44.793057Z node 21 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1073: TxId: 281474976715686, task: 1, CA Id [21:7501627694645411723:2540]. Got EvDeliveryProblem, TabletId: 72075186224037891, NotDelivered: 1 2025-05-07T09:06:44.850373Z node 21 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1073: TxId: 281474976715686, task: 1, CA Id [21:7501627694645411723:2540]. Got EvDeliveryProblem, TabletId: 72075186224037891, NotDelivered: 1 2025-05-07T09:06:44.910784Z node 21 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1073: TxId: 281474976715686, task: 1, CA Id [21:7501627694645411723:2540]. Got EvDeliveryProblem, TabletId: 72075186224037891, NotDelivered: 1 2025-05-07T09:06:45.022717Z node 21 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1073: TxId: 281474976715686, task: 1, CA Id [21:7501627694645411723:2540]. Got EvDeliveryProblem, TabletId: 72075186224037891, NotDelivered: 1 2025-05-07T09:06:45.216906Z node 21 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1073: TxId: 281474976715686, task: 1, CA Id [21:7501627694645411723:2540]. Got EvDeliveryProblem, TabletId: 72075186224037891, NotDelivered: 1 2025-05-07T09:06:45.551016Z node 21 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1073: TxId: 281474976715686, task: 1, CA Id [21:7501627694645411723:2540]. Got EvDeliveryProblem, TabletId: 72075186224037891, NotDelivered: 1 |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> TPersQueueTest::SetupWriteSessionOnDisabledCluster [GOOD] >> TPersQueueTest::SetupReadSession |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queues_count_over_limit[tables_format_v1] [GOOD] |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> DataShardVolatile::UpsertNoLocksArbiterRestart+UseSink [GOOD] >> DataShardVolatile::UpsertNoLocksArbiterRestart-UseSink |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> TPQCompatTest::BadTopics [GOOD] >> TPQCompatTest::CommitOffsets |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dml_requests_logged_when_unauthorized [GOOD] |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v0-std] [GOOD] >> test_auditlog.py::test_single_dml_query_logged[update] [GOOD] |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> DataShardVolatile::DistributedWriteRSNotAckedBeforeCommit [GOOD] >> DataShardVolatile::DistributedUpsertRestartBeforePrepare+UseSink |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_bad_auth-_good_dynconfig] |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_cloud_ids_are_logged[attrs1] [GOOD] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_2__SYNC-pk_types2-all_types2-index2---SYNC] [GOOD] >> test_auditlog.py::test_dml_requests_arent_logged_when_anonymous >> TPersQueueTest::Delete [GOOD] >> TPersQueueTest::FetchRequest |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v1-std] [GOOD] |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_bad_auth-_bad_dynconfig] [GOOD] |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v0-std] [GOOD] |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[ALTER TABLE {} DROP COLUMN syntax, DROP COLUMN ast-`.metadata/script_executions`] [GOOD] |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_single_dml_query_logged[select] >> test_auditlog.py::test_single_dml_query_logged[insert] |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_no_auth-_good_dynconfig] [GOOD] |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> DataShardVolatile::UpsertNoLocksArbiterRestart-UseSink [GOOD] >> DataShardVolatile::UpsertBrokenLockArbiterRestart+UseSink |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> TPersQueueTest::SetMeteringMode [GOOD] >> TPersQueueTest::ReadWithoutConsumerFederation >> TPersQueueTest::Codecs_InitWriteSession_DefaultTopicSupportedCodecsInInitResponse [GOOD] >> TPersQueueTest::Codecs_WriteMessageWithDefaultCodecs_MessagesAreAcknowledged |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> TTopicYqlTest::CreateAndAlterTopicYql [GOOD] >> TTopicYqlTest::AlterAutopartitioning |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> DataShardVolatile::DistributedUpsertRestartBeforePrepare+UseSink [GOOD] >> DataShardVolatile::DistributedUpsertRestartBeforePrepare-UseSink >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[DROP TABLE {}-`.metadata/script_executions`] [GOOD] |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v0-std] [GOOD] |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/script_execution/py3test >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[ALTER TABLE {} DROP COLUMN syntax-`.metadata/script_executions`] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_cloud_ids_are_logged[attrs1] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/zvgn/00453c/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk9/testing_out_stuff/test_auditlog.py.test_cloud_ids_are_logged.attrs1/audit.txt 2025-05-07T09:06:56.553618Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-05-07T09:06:56.553535Z","sanitized_token":"**** (B6C6F477)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"update `/Root/test_auditlog.py/test-table` set value = 0 where id = 1","start_time":"2025-05-07T09:06:56.375481Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","folder_id":"folder-id-B","component":"grpc-proxy"} ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dml_requests_logged_when_unauthorized [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/zvgn/004547/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk14/testing_out_stuff/test_auditlog.py.test_dml_requests_logged_when_unauthorized/audit.txt 2025-05-07T09:06:52.086587Z: {"database":"/Root/test_auditlog.py","end_time":"2025-05-07T09:06:52.086537Z","sanitized_token":"**** (C877DF61)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"ERROR","query_text":"insert into `/Root/test_auditlog.py/test-table` (id, value) values (100, 100), (101, 101)","start_time":"2025-05-07T09:06:52.062385Z","subject":"__bad__@builtin","detailed_status":"SCHEME_ERROR","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-05-07T09:06:52.232070Z: {"database":"/Root/test_auditlog.py","end_time":"2025-05-07T09:06:52.232035Z","sanitized_token":"**** (C877DF61)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"ERROR","query_text":"delete from `/Root/test_auditlog.py/test-table` where id = 100 or id = 101","start_time":"2025-05-07T09:06:52.204118Z","subject":"__bad__@builtin","detailed_status":"SCHEME_ERROR","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-05-07T09:06:52.365192Z: {"database":"/Root/test_auditlog.py","end_time":"2025-05-07T09:06:52.365151Z","sanitized_token":"**** (C877DF61)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"ERROR","query_text":"select id from `/Root/test_auditlog.py/test-table`","start_time":"2025-05-07T09:06:52.351413Z","subject":"__bad__@builtin","detailed_status":"SCHEME_ERROR","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-05-07T09:06:52.501774Z: {"database":"/Root/test_auditlog.py","end_time":"2025-05-07T09:06:52.501736Z","sanitized_token":"**** (C877DF61)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"ERROR","query_text":"update `/Root/test_auditlog.py/test-table` set value = 0 where id = 1","start_time":"2025-05-07T09:06:52.474966Z","subject":"__bad__@builtin","detailed_status":"SCHEME_ERROR","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-05-07T09:06:52.642497Z: {"database":"/Root/test_auditlog.py","end_time":"2025-05-07T09:06:52.642458Z","sanitized_token":"**** (C877DF61)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"ERROR","query_text":"replace into `/Root/test_auditlog.py/test-table` (id, value) values (2, 3), (3, 3)","start_time":"2025-05-07T09:06:52.615533Z","subject":"__bad__@builtin","detailed_status":"SCHEME_ERROR","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-05-07T09:06:52.789623Z: {"database":"/Root/test_auditlog.py","end_time":"2025-05-07T09:06:52.789584Z","sanitized_token":"**** (C877DF61)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"ERROR","query_text":"upsert into `/Root/test_auditlog.py/test-table` (id, value) values (4, 4), (5, 5)","start_time":"2025-05-07T09:06:52.764819Z","subject":"__bad__@builtin","detailed_status":"SCHEME_ERROR","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_single_dml_query_logged[update] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/zvgn/004544/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk20/testing_out_stuff/test_auditlog.py.test_single_dml_query_logged.update/audit.txt 2025-05-07T09:06:53.756137Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-05-07T09:06:53.756088Z","sanitized_token":"**** (B6C6F477)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"update `/Root/test_auditlog.py/test-table` set value = 0 where id = 1","start_time":"2025-05-07T09:06:53.558266Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v1-std] [GOOD] |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> TPQCompatTest::CommitOffsets [GOOD] >> TPQCompatTest::LongProducerAndLongMessageGroupId >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_yc_events_processor[tables_format_v0] [FAIL] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_yc_events_processor[tables_format_v1] >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_bad_auth-_good_dynconfig] [GOOD] |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> DataShardVolatile::UpsertBrokenLockArbiterRestart+UseSink [GOOD] >> DataShardVolatile::UpsertBrokenLockArbiterRestart-UseSink |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_other-_good_dynconfig] >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[ALTER TABLE {} DROP COLUMN syntax, DROP COLUMN ast, DROP COLUMN stats-`.metadata/script_executions`] [GOOD] |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_bad_auth-_bad_dynconfig] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/zvgn/00452f/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk0/testing_out_stuff/test_auditlog.py.test_broken_dynconfig._client_session_pool_bad_auth-_bad_dynconfig/audit.txt 2025-05-07T09:06:58.102481Z: {"reason":"ydb/library/fyamlcpp/fyamlcpp.cpp:1068: \n6:12 plain scalar cannot start with '%'","sanitized_token":"**** (C877DF61)","remote_address":"127.0.0.1","status":"ERROR","subject":"__bad__@builtin","operation":"REPLACE DYNCONFIG","new_config":"\n---\n123metadata:\n kind: MainConfig\n cluster: \"\"\n version: %s\nconfig:\n yaml_config_enabled: true\nallowed_labels:\n node_id:\n type: string\n host:\n type: string\n tenant:\n type: string\nselector_config: []\n ","component":"console"} |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> DataShardVolatile::DistributedUpsertRestartBeforePrepare-UseSink [GOOD] >> DataShardVolatile::DistributedUpsertRestartAfterPrepare+UseSink |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v1-std] [GOOD] |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_2__SYNC-pk_types2-all_types2-index2---SYNC] [GOOD] |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dml_requests_arent_logged_when_anonymous [GOOD] |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_no_auth-_good_dynconfig] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/zvgn/004525/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk3/testing_out_stuff/test_auditlog.py.test_broken_dynconfig._client_session_pool_no_auth-_good_dynconfig/audit.txt 2025-05-07T09:07:01.609555Z: {"sanitized_token":"{none}","subject":"{none}","new_config":"\n---\nmetadata:\n kind: MainConfig\n cluster: \"\"\n version: 0\nconfig:\n yaml_config_enabled: true\nallowed_labels:\n node_id:\n type: string\n host:\n type: string\n tenant:\n type: string\nselector_config: []\n ","status":"SUCCESS","component":"console","operation":"REPLACE DYNCONFIG","remote_address":"127.0.0.1"} |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_yc_events_processor[tables_format_v1] [GOOD] >> test_auditlog.py::test_single_dml_query_logged[insert] [GOOD] |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v0-std] [GOOD] >> test_auditlog.py::test_single_dml_query_logged[select] [GOOD] |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> TPersQueueTest::FetchRequest [GOOD] >> TPersQueueTest::EventBatching |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> ExternalIndex::Simple [GOOD] |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> TTopicYqlTest::AlterAutopartitioning [GOOD] >> TTopicYqlTest::BadRequests >> TPersQueueTest::Codecs_WriteMessageWithDefaultCodecs_MessagesAreAcknowledged [GOOD] >> TPersQueueTest::Codecs_WriteMessageWithNonDefaultCodecThatHasToBeConfiguredAdditionally_SessionClosedWithBadRequestError >> TPersQueueTest::SetupReadSession [GOOD] >> TPersQueueTest::TestBigMessage |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/script_execution/py3test >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[ALTER TABLE {} DROP COLUMN syntax, DROP COLUMN ast-`.metadata/script_executions`] [GOOD] |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> DataShardVolatile::UpsertBrokenLockArbiterRestart-UseSink [GOOD] >> DataShardVolatile::UpsertDependenciesShardsRestart+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest >> ExternalIndex::Simple [GOOD] Test command err: 2025-05-07T09:02:21.826915Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:100:2146], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:02:21.827846Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:02:21.827966Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T09:02:21.828123Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/cs_index/external;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00356a/r3tmp/tmpHFI4Ov/pdisk_1.dat TServer::EnableGrpc on GrpcPort 25045, node 1 TClient is connected to server localhost:26015 2025-05-07T09:02:27.574800Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:392: actor# [1:60:2107] Handle TEvGetProxyServicesRequest 2025-05-07T09:02:27.575649Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:392: actor# [1:60:2107] Handle TEvGetProxyServicesRequest 2025-05-07T09:02:27.644148Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:02:27.811892Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:02:27.845181Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:02:27.845294Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:02:27.845339Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:02:27.873403Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T09:02:27.919882Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:60:2107] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-05-07T09:02:27.929946Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:02:27.930227Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:02:27.930490Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-05-07T09:02:27.942282Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:02:28.148572Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:60:2107] Handle TEvProposeTransaction 2025-05-07T09:02:28.148666Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:60:2107] TxId# 281474976715657 ProcessProposeTransaction 2025-05-07T09:02:28.159397Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:60:2107] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:676:2569] 2025-05-07T09:02:28.274226Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1520: Actor# [1:676:2569] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateColumnStore CreateColumnStore { Name: "olapStore" ColumnShardCount: 4 SchemaPresets { Name: "default" Schema { Columns { Name: "timestamp" Type: "Timestamp" NotNull: true } Columns { Name: "resource_id" Type: "Utf8" DataAccessorConstructor { ClassName: "SPARSED" } } Columns { Name: "uid" Type: "Utf8" NotNull: true StorageId: "__MEMORY" } Columns { Name: "level" Type: "Int32" } Columns { Name: "message" Type: "Utf8" StorageId: "__MEMORY" } Columns { Name: "json_payload" Type: "JsonDocument" } KeyColumnNames: "timestamp" KeyColumnNames: "uid" } } } } } ExecTimeoutPeriod: 18446744073709551615 2025-05-07T09:02:28.274311Z node 1 :TX_PROXY DEBUG: schemereq.cpp:563: Actor# [1:676:2569] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-05-07T09:02:28.274784Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1585: Actor# [1:676:2569] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-05-07T09:02:28.274892Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1575: Actor# [1:676:2569] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-05-07T09:02:28.275155Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1408: Actor# [1:676:2569] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-05-07T09:02:28.275397Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1455: Actor# [1:676:2569] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-05-07T09:02:28.275465Z node 1 :TX_PROXY DEBUG: schemereq.cpp:102: Actor# [1:676:2569] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-05-07T09:02:28.275732Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1310: Actor# [1:676:2569] txid# 281474976715657 HANDLE EvClientConnected 2025-05-07T09:02:28.322971Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnStore, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:02:28.340372Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1332: Actor# [1:676:2569] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-05-07T09:02:28.340482Z node 1 :TX_PROXY DEBUG: schemereq.cpp:543: Actor# [1:676:2569] txid# 281474976715657 SEND to# [1:675:2568] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} Status: 53 TxId: 281474976715657 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 2 2025-05-07T09:02:28.507057Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037888;self_id=[1:750:2630];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-05-07T09:02:28.524801Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037888;self_id=[1:750:2630];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-05-07T09:02:28.525125Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 72075186224037888 2025-05-07T09:02:28.547206Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:750:2630];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-05-07T09:02:28.547474Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:750:2630];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-05-07T09:02:28.547794Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:750:2630];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-05-07T09:02:28.547926Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:750:2630];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-05-07T09:02:28.548064Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:750:2630];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-05-07T09:02:28.548229Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:750:2630];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-05-07T09:02:28.548395Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:750:2630];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-05-07T09:02:28.548512Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:750:2630];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-05-07T09:02:28.548633Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:750:2630];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-05-07T09:02:28.548778Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:750:2630];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-05-07T09:02:28.548923Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:750:2630];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-05-07T09:02:28.549065Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:750:2630];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-05-07T09:02:28.570048Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 72075186224037888 2025-05-07T09:02:28.570484Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:137;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=11;current_normalizer=CLASS_NAME=Granules; 2025-05-07T09:02:28.570581Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-05-07T09:02:28.570825Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-05-07T09:02:28.571012Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-05-07T09:02:28.571106Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-05-07T09:02:28.571169Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-05-07T09:02:28.571273Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-05-07 ... nsform.cpp:33: PhysicalPeepholeTransformer: ( (let $1 (KqpTable '"//Root/.metadata/initialization/migrations" '"72057594046644480:6" '"" '1)) (let $2 '('"componentId" '"instant" '"modificationId")) (let $3 (Uint64 '"1001")) (let $4 (KqpRowsSourceSettings $1 $2 '('('"ItemsLimit" $3) '('"Sequential" '1)) (Void) '())) (let $5 (OptionalType (DataType 'Utf8))) (let $6 (StructType '('"componentId" $5) '('"instant" (OptionalType (DataType 'Uint32))) '('"modificationId" $5))) (let $7 '('('"_logical_id" '338) '('"_id" '"ccadf9b4-bd5c2be1-34a3b917-861ce93") '('"_wide_channels" $6))) (let $8 (DqPhyStage '((DqSource (DataSource '"KqpReadRangesSource") $4)) (lambda '($12) (block '( (let $13 (lambda '($14) (Member $14 '"componentId") (Member $14 '"instant") (Member $14 '"modificationId"))) (return (FromFlow (ExpandMap (Take (ToFlow $12) $3) $13))) ))) $7)) (let $9 (DqCnUnionAll (TDqOutput $8 '"0"))) (let $10 (DqPhyStage '($9) (lambda '($15) (FromFlow (NarrowMap (Take (ToFlow $15) $3) (lambda '($16 $17 $18) (AsStruct '('"componentId" $16) '('"instant" $17) '('"modificationId" $18)))))) '('('"_logical_id" '351) '('"_id" '"2e1b890-e95e0166-42ee3ab7-b5c5d393")))) (let $11 (DqCnResult (TDqOutput $10 '"0") '())) (return (KqpPhysicalQuery '((KqpPhysicalTx '($8 $10) '($11) '() '('('"type" '"data")))) '((KqpTxResultBinding (ListType $6) '"0" '"0")) '('('"type" '"data_query")))) ) 2025-05-07T09:07:19.570921Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jtmzzpkxe4vdrmj91sn7mhtb, SessionId: CompileActor 2025-05-07 09:07:19.570 INFO ydb-services-ext_index-ut(pid=341487, tid=0x00007F40945C5D00) [core exec] yql_execution.cpp:466: Register async execution for node #268 2025-05-07T09:07:19.571065Z node 1 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jtmzzpkxe4vdrmj91sn7mhtb, SessionId: CompileActor 2025-05-07 09:07:19.571 TRACE ydb-services-ext_index-ut(pid=341487, tid=0x00007F40945C5D00) [core exec] yql_execution.cpp:387: {3}, callable #277 2025-05-07T09:07:19.571166Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jtmzzpkxe4vdrmj91sn7mhtb, SessionId: CompileActor 2025-05-07 09:07:19.571 INFO ydb-services-ext_index-ut(pid=341487, tid=0x00007F40945C5D00) [core exec] yql_execution.cpp:577: Node #277 finished execution 2025-05-07T09:07:19.571229Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jtmzzpkxe4vdrmj91sn7mhtb, SessionId: CompileActor 2025-05-07 09:07:19.571 INFO ydb-services-ext_index-ut(pid=341487, tid=0x00007F40945C5D00) [core exec] yql_execution.cpp:594: Node #277 created 0 trackable nodes: 2025-05-07T09:07:19.571309Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jtmzzpkxe4vdrmj91sn7mhtb, SessionId: CompileActor 2025-05-07 09:07:19.571 INFO ydb-services-ext_index-ut(pid=341487, tid=0x00007F40945C5D00) [core exec] yql_execution.cpp:87: Finish, output #280, status: Async 2025-05-07T09:07:19.572074Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jtmzzpkxe4vdrmj91sn7mhtb, SessionId: CompileActor 2025-05-07 09:07:19.572 INFO ydb-services-ext_index-ut(pid=341487, tid=0x00007F40945C5D00) [core exec] yql_execution.cpp:133: Completed async execution for node #268 2025-05-07T09:07:19.572150Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jtmzzpkxe4vdrmj91sn7mhtb, SessionId: CompileActor 2025-05-07 09:07:19.572 INFO ydb-services-ext_index-ut(pid=341487, tid=0x00007F40945C5D00) [core exec] yql_execution.cpp:153: State is ExecutionRequired after apply async changes for node #268 2025-05-07T09:07:19.572215Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jtmzzpkxe4vdrmj91sn7mhtb, SessionId: CompileActor 2025-05-07 09:07:19.572 INFO ydb-services-ext_index-ut(pid=341487, tid=0x00007F40945C5D00) [core exec] yql_execution.cpp:59: Begin, root #280 2025-05-07T09:07:19.572269Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jtmzzpkxe4vdrmj91sn7mhtb, SessionId: CompileActor 2025-05-07 09:07:19.572 INFO ydb-services-ext_index-ut(pid=341487, tid=0x00007F40945C5D00) [core exec] yql_execution.cpp:72: Collect unused nodes for root #280, status: Ok 2025-05-07T09:07:19.572319Z node 1 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jtmzzpkxe4vdrmj91sn7mhtb, SessionId: CompileActor 2025-05-07 09:07:19.572 TRACE ydb-services-ext_index-ut(pid=341487, tid=0x00007F40945C5D00) [core exec] yql_execution.cpp:387: {0}, callable #280 2025-05-07T09:07:19.572375Z node 1 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jtmzzpkxe4vdrmj91sn7mhtb, SessionId: CompileActor 2025-05-07 09:07:19.572 TRACE ydb-services-ext_index-ut(pid=341487, tid=0x00007F40945C5D00) [core exec] yql_execution.cpp:387: {1}, callable #279 2025-05-07T09:07:19.572430Z node 1 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jtmzzpkxe4vdrmj91sn7mhtb, SessionId: CompileActor 2025-05-07 09:07:19.572 TRACE ydb-services-ext_index-ut(pid=341487, tid=0x00007F40945C5D00) [core exec] yql_execution.cpp:387: {2}, callable #278 2025-05-07T09:07:19.572537Z node 1 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jtmzzpkxe4vdrmj91sn7mhtb, SessionId: CompileActor 2025-05-07 09:07:19.572 TRACE ydb-services-ext_index-ut(pid=341487, tid=0x00007F40945C5D00) [core exec] yql_execution.cpp:387: {3}, callable #275 2025-05-07T09:07:19.572591Z node 1 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jtmzzpkxe4vdrmj91sn7mhtb, SessionId: CompileActor 2025-05-07 09:07:19.572 TRACE ydb-services-ext_index-ut(pid=341487, tid=0x00007F40945C5D00) [core exec] yql_execution.cpp:387: {4}, callable #268 2025-05-07T09:07:19.572756Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jtmzzpkxe4vdrmj91sn7mhtb, SessionId: CompileActor 2025-05-07 09:07:19.572 INFO ydb-services-ext_index-ut(pid=341487, tid=0x00007F40945C5D00) [core exec] yql_execution.cpp:577: Node #268 finished execution 2025-05-07T09:07:19.572818Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jtmzzpkxe4vdrmj91sn7mhtb, SessionId: CompileActor 2025-05-07 09:07:19.572 INFO ydb-services-ext_index-ut(pid=341487, tid=0x00007F40945C5D00) [core exec] yql_execution.cpp:594: Node #268 created 0 trackable nodes: 2025-05-07T09:07:19.572872Z node 1 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jtmzzpkxe4vdrmj91sn7mhtb, SessionId: CompileActor 2025-05-07 09:07:19.572 TRACE ydb-services-ext_index-ut(pid=341487, tid=0x00007F40945C5D00) [core exec] yql_execution.cpp:387: {3}, callable #275 2025-05-07T09:07:19.572931Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jtmzzpkxe4vdrmj91sn7mhtb, SessionId: CompileActor 2025-05-07 09:07:19.572 INFO ydb-services-ext_index-ut(pid=341487, tid=0x00007F40945C5D00) [core exec] yql_execution.cpp:577: Node #275 finished execution 2025-05-07T09:07:19.572999Z node 1 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jtmzzpkxe4vdrmj91sn7mhtb, SessionId: CompileActor 2025-05-07 09:07:19.572 TRACE ydb-services-ext_index-ut(pid=341487, tid=0x00007F40945C5D00) [core exec] yql_execution.cpp:387: {2}, callable #278 2025-05-07T09:07:19.573217Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jtmzzpkxe4vdrmj91sn7mhtb, SessionId: CompileActor 2025-05-07 09:07:19.573 INFO ydb-services-ext_index-ut(pid=341487, tid=0x00007F40945C5D00) [core exec] yql_execution.cpp:577: Node #278 finished execution 2025-05-07T09:07:19.573280Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jtmzzpkxe4vdrmj91sn7mhtb, SessionId: CompileActor 2025-05-07 09:07:19.573 INFO ydb-services-ext_index-ut(pid=341487, tid=0x00007F40945C5D00) [core exec] yql_execution.cpp:594: Node #278 created 0 trackable nodes: 2025-05-07T09:07:19.573343Z node 1 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jtmzzpkxe4vdrmj91sn7mhtb, SessionId: CompileActor 2025-05-07 09:07:19.573 TRACE ydb-services-ext_index-ut(pid=341487, tid=0x00007F40945C5D00) [core exec] yql_execution.cpp:387: {1}, callable #279 2025-05-07T09:07:19.573418Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jtmzzpkxe4vdrmj91sn7mhtb, SessionId: CompileActor 2025-05-07 09:07:19.573 INFO ydb-services-ext_index-ut(pid=341487, tid=0x00007F40945C5D00) [core exec] yql_execution.cpp:577: Node #279 finished execution 2025-05-07T09:07:19.573476Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jtmzzpkxe4vdrmj91sn7mhtb, SessionId: CompileActor 2025-05-07 09:07:19.573 INFO ydb-services-ext_index-ut(pid=341487, tid=0x00007F40945C5D00) [core exec] yql_execution.cpp:594: Node #279 created 0 trackable nodes: 2025-05-07T09:07:19.573535Z node 1 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jtmzzpkxe4vdrmj91sn7mhtb, SessionId: CompileActor 2025-05-07 09:07:19.573 TRACE ydb-services-ext_index-ut(pid=341487, tid=0x00007F40945C5D00) [core exec] yql_execution.cpp:387: {0}, callable #280 2025-05-07T09:07:19.573598Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jtmzzpkxe4vdrmj91sn7mhtb, SessionId: CompileActor 2025-05-07 09:07:19.573 INFO ydb-services-ext_index-ut(pid=341487, tid=0x00007F40945C5D00) [core exec] yql_execution.cpp:577: Node #280 finished execution 2025-05-07T09:07:19.573653Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jtmzzpkxe4vdrmj91sn7mhtb, SessionId: CompileActor 2025-05-07 09:07:19.573 INFO ydb-services-ext_index-ut(pid=341487, tid=0x00007F40945C5D00) [core exec] yql_execution.cpp:594: Node #280 created 0 trackable nodes: 2025-05-07T09:07:19.573705Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jtmzzpkxe4vdrmj91sn7mhtb, SessionId: CompileActor 2025-05-07 09:07:19.573 INFO ydb-services-ext_index-ut(pid=341487, tid=0x00007F40945C5D00) [core exec] yql_execution.cpp:87: Finish, output #280, status: Ok 2025-05-07T09:07:19.573756Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jtmzzpkxe4vdrmj91sn7mhtb, SessionId: CompileActor 2025-05-07 09:07:19.573 INFO ydb-services-ext_index-ut(pid=341487, tid=0x00007F40945C5D00) [core exec] yql_execution.cpp:93: Creating finalizing transformer, output #280 2025-05-07T09:07:19.593247Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [1:60:2107] Handle TEvExecuteKqpTransaction 2025-05-07T09:07:19.593319Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [1:60:2107] TxId# 281474976716232 ProcessProposeKqpTransaction 2025-05-07T09:07:19.604240Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [1:60:2107] Handle TEvExecuteKqpTransaction 2025-05-07T09:07:19.604302Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [1:60:2107] TxId# 281474976716233 ProcessProposeKqpTransaction 2025-05-07T09:07:19.771464Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;parent=[1:750:2630];fline=actor.cpp:33;event=skip_flush_writing; 2025-05-07T09:07:19.771718Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037889;parent=[1:754:2633];fline=actor.cpp:33;event=skip_flush_writing; 2025-05-07T09:07:19.771777Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037891;parent=[1:757:2636];fline=actor.cpp:33;event=skip_flush_writing; 2025-05-07T09:07:19.771831Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037890;parent=[1:761:2639];fline=actor.cpp:33;event=skip_flush_writing; 2025-05-07T09:07:19.784227Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;self_id=[1:750:2630];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:253;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=72075186224037888; 2025-05-07T09:07:19.784361Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037889;self_id=[1:754:2633];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:253;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=72075186224037889; 2025-05-07T09:07:19.784410Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037891;self_id=[1:757:2636];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:253;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=72075186224037891; 2025-05-07T09:07:19.784458Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037890;self_id=[1:761:2639];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:253;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=72075186224037890; REQUEST=SELECT COUNT(*) FROM `/Root/.metadata/initialization/migrations`;EXPECTATION=1 |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> TPersQueueTest::ReadWithoutConsumerFederation [GOOD] >> TPersQueueTest::ReadWithoutConsumerFirstClassCitizen >> DataShardVolatile::DistributedUpsertRestartAfterPrepare+UseSink [GOOD] >> DataShardVolatile::DistributedUpsertRestartAfterPrepare-UseSink >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_no_auth-_bad_dynconfig] |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dynconfig >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_other-_bad_dynconfig] |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.4%| [TA] $(B)/ydb/services/ext_index/ut/test-results/unittest/{meta.json ... results_accumulator.log} |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.4%| [TA] {RESULT} $(B)/ydb/services/ext_index/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_bad_auth-_good_dynconfig] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/zvgn/004501/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk1/testing_out_stuff/test_auditlog.py.test_broken_dynconfig._client_session_pool_bad_auth-_good_dynconfig/audit.txt 2025-05-07T09:07:11.895033Z: {"sanitized_token":"**** (C877DF61)","subject":"__bad__@builtin","new_config":"\n---\nmetadata:\n kind: MainConfig\n cluster: \"\"\n version: 0\nconfig:\n yaml_config_enabled: true\nallowed_labels:\n node_id:\n type: string\n host:\n type: string\n tenant:\n type: string\nselector_config: []\n ","status":"SUCCESS","component":"console","operation":"REPLACE DYNCONFIG","remote_address":"127.0.0.1"} |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v1-std] [GOOD] |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/script_execution/py3test >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[DROP TABLE {}-`.metadata/script_executions`] [GOOD] |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_other-_good_dynconfig] [GOOD] |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dml_requests_arent_logged_when_anonymous [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/zvgn/0044f5/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk11/testing_out_stuff/test_auditlog.py.test_dml_requests_arent_logged_when_anonymous/audit.txt >> TCheckpointStorageTest::ShouldCreateCheckpoint >> TStateStorageTest::ShouldIssueErrorOnWrongGetStateParams |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v1-std] [GOOD] >> test_auditlog.py::test_single_dml_query_logged[upsert] >> TPQCompatTest::LongProducerAndLongMessageGroupId [GOOD] >> TPQCompatTest::ReadWriteSessions >> TStateStorageTest::ShouldIssueErrorOnWrongGetStateParams [GOOD] >> TStateStorageTest::ShouldIssueErrorOnNonExistentState |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_single_dml_query_logged[insert] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/zvgn/0044c9/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk17/testing_out_stuff/test_auditlog.py.test_single_dml_query_logged.insert/audit.txt 2025-05-07T09:07:18.480877Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-05-07T09:07:18.480803Z","sanitized_token":"**** (B6C6F477)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"insert into `/Root/test_auditlog.py/test-table` (id, value) values (100, 100), (101, 101)","start_time":"2025-05-07T09:07:18.389166Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> TStateStorageTest::ShouldSaveGetOldSmallState2Tasks |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> DataShardVolatile::UpsertDependenciesShardsRestart+UseSink [GOOD] >> DataShardVolatile::UpsertDependenciesShardsRestart-UseSink >> DataShardVolatile::DistributedUpsertRestartAfterPrepare-UseSink [GOOD] >> DataShardVolatile::DistributedUpsertRestartAfterPlan >> TStorageServiceTest::ShouldRegister ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_single_dml_query_logged[select] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/zvgn/0044c7/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk19/testing_out_stuff/test_auditlog.py.test_single_dml_query_logged.select/audit.txt 2025-05-07T09:07:19.767979Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-05-07T09:07:19.767937Z","sanitized_token":"**** (B6C6F477)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"select id from `/Root/test_auditlog.py/test-table`","start_time":"2025-05-07T09:07:19.650528Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> TCheckpointStorageTest::ShouldCreateCheckpoint [GOOD] >> TCheckpointStorageTest::ShouldCreateGetCheckpoints >> TStorageServiceTest::ShouldNotCreateCheckpointAfterGenerationChanged |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/script_execution/py3test >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[ALTER TABLE {} DROP COLUMN syntax, DROP COLUMN ast, DROP COLUMN stats-`.metadata/script_executions`] [GOOD] |93.5%| [TA] $(B)/ydb/tests/functional/script_execution/test-results/py3test/{meta.json ... results_accumulator.log} |93.5%| [TA] {RESULT} $(B)/ydb/tests/functional/script_execution/test-results/py3test/{meta.json ... results_accumulator.log} |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> TStateStorageTest::ShouldSaveGetOldSmallState2Tasks [GOOD] >> TStorageServiceTest::ShouldCreateCheckpoint >> TStateStorageTest::ShouldIssueErrorOnNonExistentState [GOOD] >> TCheckpointStorageTest::ShouldRegisterCoordinator >> TStateStorageTest::ShouldLoadLastSnapshot |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> TStateStorageTest::ShouldDeleteNoCheckpoints >> TStorageServiceTest::ShouldRegister [GOOD] >> TStorageServiceTest::ShouldRegisterNextGeneration |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v0-std] >> TCheckpointStorageTest::ShouldUpdateCheckpointStatusForCheckpointsWithTheSameGenAndNo >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_queue_by_nonexistent_user_fails[tables_format_v1] >> TPersQueueTest::EventBatching [GOOD] >> TPersQueueTest::DisableWrongSettings |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> TTopicYqlTest::BadRequests [GOOD] |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_write_read_delete_many_groups[tables_format_v0] >> TStateStorageTest::ShouldLoadLastSnapshot [GOOD] >> TStateStorageTest::ShouldNotGetNonExistendSnaphotState >> TCheckpointStorageTest::ShouldRegisterCoordinator [GOOD] >> TCheckpointStorageTest::ShouldGetCoordinators >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_visibility_timeout_works[tables_format_v1] >> TStateStorageTest::ShouldSaveGetOldSmallState |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_yc_events_processor[tables_format_v1] [GOOD] >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_no_auth-_bad_dynconfig] [GOOD] >> TStorageServiceTest::ShouldRegisterNextGeneration [GOOD] >> TStorageServiceTest::ShouldPendingAndCompleteCheckpoint >> TPersQueueTest::Codecs_WriteMessageWithNonDefaultCodecThatHasToBeConfiguredAdditionally_SessionClosedWithBadRequestError [GOOD] >> TPersQueueTest::CreateTopicWithMeteringMode >> TStorageServiceTest::ShouldNotCreateCheckpointAfterGenerationChanged [GOOD] >> TStorageServiceTest::ShouldNotCompleteCheckpointWithoutCreation ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/unittest >> TTopicYqlTest::BadRequests [GOOD] Test command err: 2025-05-07T09:03:38.378121Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626896417636543:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:38.378712Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T09:03:38.415593Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501626896804116109:2085];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:38.416207Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0025f8/r3tmp/tmpFdPvlS/pdisk_1.dat 2025-05-07T09:03:38.543342Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-05-07T09:03:38.544673Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-05-07T09:03:38.742999Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:38.743148Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:03:38.743969Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:38.744072Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:03:38.747371Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:03:38.747783Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-05-07T09:03:38.748499Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:03:38.770413Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13408, node 1 2025-05-07T09:03:38.802431Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T09:03:38.802496Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T09:03:38.849661Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/zvgn/0025f8/r3tmp/yandexBLXpZY.tmp 2025-05-07T09:03:38.849690Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/zvgn/0025f8/r3tmp/yandexBLXpZY.tmp 2025-05-07T09:03:38.849871Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/zvgn/0025f8/r3tmp/yandexBLXpZY.tmp 2025-05-07T09:03:38.850051Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T09:03:38.899926Z INFO: TTestServer started on Port 6905 GrpcPort 13408 TClient is connected to server localhost:6905 PQClient connected to localhost:13408 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:03:39.162282Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T09:03:39.214230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... 2025-05-07T09:03:41.370312Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501626909689018373:2315], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:41.370404Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501626909689018350:2312], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:41.370477Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:41.375956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715657:3, at schemeshard: 72057594046644480 2025-05-07T09:03:41.393868Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7501626909689018379:2316], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715657 completed, doublechecking } 2025-05-07T09:03:41.457680Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501626909689018408:2174] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:03:41.768517Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:03:41.786692Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7501626909302539588:2344], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T09:03:41.788404Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=1&id=ZjVkYTk5NDQtYzcxZjFjZTEtZmU1NDU2MS1iNjAyYWU2NA==, ActorId: [1:7501626909302539547:2337], ActorState: ExecuteState, TraceId: 01jtmzs1yjfsrwjzmtp58936v2, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T09:03:41.790414Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7501626909689018422:2321], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T09:03:41.790549Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T09:03:41.790614Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=2&id=YWZmYzRiZGQtOGYwMDVjM2YtMzg2MDU5MmMtNjg5ODQzMjI=, ActorId: [2:7501626909689018348:2311], ActorState: ExecuteState, TraceId: 01jtmzs1sqftwew93fbrhm3d48, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T09:03:41.790957Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T09:03:41.840566Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:03:41.912917Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-05-07T09:03:42.065316Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710665. Ctx: { TraceId: 01jtmzs2cmav20d7ry7nnvyzhy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjUxZDgxYzYtZjBkOTk4Yy02MmM1ZDdmMC03NTU4Yzc0Ng==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root === CheckClustersList. Subcribe to ClusterTracker from [1:7501626913597507325:3067] 2025-05-07T09:03:43.372331Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501626896417636543:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:43.372452Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:03:43.415836Z node 2 :METADATA_PROVIDER ERROR: ... T09:07:35.562124Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:3956: [PQ: 72075186224037892] Send TEvTxProcessing::TEvReadSet to 0 receivers. Wait TEvTxProcessing::TEvReadSet from 0 senders. 2025-05-07T09:07:35.562172Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4447: [PQ: 72075186224037892] HaveParticipantsDecision 1 2025-05-07T09:07:35.562268Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4214: [PQ: 72075186224037892] TxId 281474976715676, NewState EXECUTING 2025-05-07T09:07:35.562298Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72075186224037892] TxId 281474976715676 moved from WAIT_RS to EXECUTING 2025-05-07T09:07:35.562322Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4477: [PQ: 72075186224037892] Received 0, Expected 1 2025-05-07T09:07:35.562466Z node 26 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1746608855551, TxId 281474976715676 2025-05-07T09:07:35.563169Z node 26 :PERSQUEUE DEBUG: partition.cpp:2182: [PQ: 72075186224037892, Partition: 0, State: StateIdle] === DumpKeyValueRequest === 2025-05-07T09:07:35.563217Z node 26 :PERSQUEUE DEBUG: partition.cpp:2183: [PQ: 72075186224037892, Partition: 0, State: StateIdle] --- delete ---------------- 2025-05-07T09:07:35.563246Z node 26 :PERSQUEUE DEBUG: partition.cpp:2191: [PQ: 72075186224037892, Partition: 0, State: StateIdle] --- write ----------------- 2025-05-07T09:07:35.563281Z node 26 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037892, Partition: 0, State: StateIdle] i0000000000 2025-05-07T09:07:35.563296Z node 26 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037892, Partition: 0, State: StateIdle] I0000000000 2025-05-07T09:07:35.563313Z node 26 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037892, Partition: 0, State: StateIdle] m0000000000cc1 2025-05-07T09:07:35.563328Z node 26 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037892, Partition: 0, State: StateIdle] m0000000000uc1 2025-05-07T09:07:35.563343Z node 26 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037892, Partition: 0, State: StateIdle] m0000000000cc2 2025-05-07T09:07:35.563358Z node 26 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037892, Partition: 0, State: StateIdle] m0000000000uc2 2025-05-07T09:07:35.563373Z node 26 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037892, Partition: 0, State: StateIdle] _config_0 2025-05-07T09:07:35.563407Z node 26 :PERSQUEUE DEBUG: partition.cpp:2196: [PQ: 72075186224037892, Partition: 0, State: StateIdle] --- rename ---------------- 2025-05-07T09:07:35.563444Z node 26 :PERSQUEUE DEBUG: partition.cpp:2201: [PQ: 72075186224037892, Partition: 0, State: StateIdle] =========================== 2025-05-07T09:07:35.563491Z node 26 :PERSQUEUE DEBUG: read.h:262: CacheProxy. Passthrough write request to KV 2025-05-07T09:07:35.573125Z node 26 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-05-07T09:07:35.573378Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:3521: [PQ: 72075186224037892] Handle TEvPQ::TEvTxCommitDone Step 1746608855551, TxId 281474976715676, Partition 0 2025-05-07T09:07:35.573426Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4279: [PQ: 72075186224037892] Try execute txs with state EXECUTING 2025-05-07T09:07:35.573461Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4324: [PQ: 72075186224037892] TxId 281474976715676, State EXECUTING 2025-05-07T09:07:35.573490Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4271: [PQ: 72075186224037892] TxId 281474976715676 State EXECUTING FrontTxId 281474976715676 2025-05-07T09:07:35.573510Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4477: [PQ: 72075186224037892] Received 1, Expected 1 2025-05-07T09:07:35.573543Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4150: [PQ: 72075186224037892] TxId: 281474976715676 send TEvPersQueue::TEvProposeTransactionResult(COMPLETE) 2025-05-07T09:07:35.573577Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4481: [PQ: 72075186224037892] complete TxId 281474976715676 2025-05-07T09:07:35.574178Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:585: [PQ: 72075186224037892] Apply new config PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 TotalPartitions: 1 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } PartitionIds: 0 TopicName: "rt3.dc1--legacy--topic1" Version: 0 LocalDC: true RequireAuthWrite: true RequireAuthRead: true Producer: "legacy" Ident: "legacy" Topic: "topic1" DC: "dc1" FormatVersion: 0 Codecs { } TopicPath: "/Root/PQ/rt3.dc1--legacy--topic1" YcCloudId: "" YcFolderId: "" YdbDatabaseId: "" YdbDatabasePath: "/Root" Partitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } ReadRuleGenerations: 0 ReadRuleGenerations: 0 AllPartitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } Consumers { Name: "c1" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } ServiceType: "data-streams" Version: 0 Generation: 0 } Consumers { Name: "c2" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } ServiceType: "data-streams" Version: 0 Generation: 0 } 2025-05-07T09:07:35.574354Z node 26 :PERSQUEUE NOTICE: pq_impl.cpp:1093: [PQ: 72075186224037892] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-05-07T09:07:35.574504Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4499: [PQ: 72075186224037892] delete partitions for TxId 281474976715676 2025-05-07T09:07:35.574539Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4214: [PQ: 72075186224037892] TxId 281474976715676, NewState EXECUTED 2025-05-07T09:07:35.574572Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72075186224037892] TxId 281474976715676 moved from EXECUTING to EXECUTED 2025-05-07T09:07:35.574609Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:3804: [PQ: 72075186224037892] write key for TxId 281474976715676 2025-05-07T09:07:35.575181Z node 26 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 281474976715676] save tx TxId: 281474976715676 State: EXECUTED MinStep: 1746608855000 MaxStep: 18446744073709551615 Step: 1746608855551 Predicate: true Kind: KIND_CONFIG TabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 TotalPartitions: 1 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } PartitionIds: 0 TopicName: "rt3.dc1--legacy--topic1" Version: 0 LocalDC: true RequireAuthWrite: true RequireAuthRead: true Producer: "legacy" Ident: "legacy" Topic: "topic1" DC: "dc1" FormatVersion: 0 Codecs { } TopicPath: "/Root/PQ/rt3.dc1--legacy--topic1" YcCloudId: "" YcFolderId: "" YdbDatabaseId: "" YdbDatabasePath: "/Root" Partitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } ReadRuleGenerations: 0 ReadRuleGenerations: 0 AllPartitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } Consumers { Name: "c1" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } ServiceType: "data-streams" Version: 0 Generation: 0 } Consumers { Name: "c2" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } ServiceType: "data-streams" Version: 0 Generation: 0 } } BootstrapConfig { } SourceActor { RawX1: 7501627856863218384 RawX2: 107374184610 } Partitions { Partition { PartitionId: 0 } } 2025-05-07T09:07:35.575533Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:3621: [PQ: 72075186224037892] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-05-07T09:07:35.590570Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:1225: [PQ: 72075186224037892] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-05-07T09:07:35.590622Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4279: [PQ: 72075186224037892] Try execute txs with state EXECUTED 2025-05-07T09:07:35.590648Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4324: [PQ: 72075186224037892] TxId 281474976715676, State EXECUTED 2025-05-07T09:07:35.590689Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4271: [PQ: 72075186224037892] TxId 281474976715676 State EXECUTED FrontTxId 281474976715676 2025-05-07T09:07:35.590722Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:3975: [PQ: 72075186224037892] TPersQueue::SendEvReadSetAckToSenders 2025-05-07T09:07:35.590754Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4214: [PQ: 72075186224037892] TxId 281474976715676, NewState WAIT_RS_ACKS 2025-05-07T09:07:35.590777Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72075186224037892] TxId 281474976715676 moved from EXECUTED to WAIT_RS_ACKS 2025-05-07T09:07:35.590818Z node 26 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976715676] PredicateAcks: 0/0 2025-05-07T09:07:35.590830Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4525: [PQ: 72075186224037892] HaveAllRecipientsReceive 1, AllSupportivePartitionsHaveBeenDeleted 1 2025-05-07T09:07:35.590854Z node 26 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976715676] PredicateAcks: 0/0 2025-05-07T09:07:35.590884Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4586: [PQ: 72075186224037892] add an TxId 281474976715676 to the list for deletion 2025-05-07T09:07:35.590924Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4214: [PQ: 72075186224037892] TxId 281474976715676, NewState DELETING 2025-05-07T09:07:35.590956Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:3820: [PQ: 72075186224037892] delete key for TxId 281474976715676 2025-05-07T09:07:35.591060Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:3621: [PQ: 72075186224037892] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-05-07T09:07:35.605957Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:1225: [PQ: 72075186224037892] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-05-07T09:07:35.606025Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4279: [PQ: 72075186224037892] Try execute txs with state DELETING 2025-05-07T09:07:35.606049Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4324: [PQ: 72075186224037892] TxId 281474976715676, State DELETING 2025-05-07T09:07:35.606081Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4536: [PQ: 72075186224037892] delete TxId 281474976715676 2025-05-07T09:07:36.984965Z node 25 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1937: ActorId: [25:7501627916992762426:2519] TxId: 281474976715681. Ctx: { TraceId: 01jtn007w78eev1e02d9ha86ta, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=25&id=MjQ5N2FjMDQtZDhiOGM1MjYtNGEyMWY0ZDUtN2VhNDZhNTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. UNAVAILABLE: Failed to send EvStartKqpTasksRequest because node is unavailable: 26 2025-05-07T09:07:36.985129Z node 25 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1216: SelfId: [25:7501627916992762431:2519], TxId: 281474976715681, task: 2. Ctx: { TraceId : 01jtn007w78eev1e02d9ha86ta. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=25&id=MjQ5N2FjMDQtZDhiOGM1MjYtNGEyMWY0ZDUtN2VhNDZhNTY=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [25:7501627916992762426:2519], status: UNAVAILABLE, reason: {
: Error: Terminate execution } >> TStorageServiceTest::ShouldCreateCheckpoint [GOOD] >> TStorageServiceTest::ShouldGetCheckpoints >> TStateStorageTest::ShouldNotGetNonExistendSnaphotState [GOOD] >> TStateStorageTest::ShouldLoadIncrementSnapshot |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v0-std] [GOOD] >> TStateStorageTest::ShouldDeleteNoCheckpoints [GOOD] >> TStateStorageTest::ShouldDeleteNoCheckpoints2 >> TCheckpointStorageTest::ShouldCreateGetCheckpoints [GOOD] >> TCheckpointStorageTest::ShouldGetCheckpointsEmpty >> test_auditlog.py::test_dml_begin_commit_logged >> TPersQueueTest::TestBigMessage [GOOD] >> TPersQueueTest::TClusterTrackerTest >> TCheckpointStorageTest::ShouldGetCoordinators [GOOD] >> TCheckpointStorageTest::ShouldMarkCheckpointsGc >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_for_deleted_message[tables_format_v1-std] |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> TStateStorageTest::ShouldSaveGetOldSmallState [GOOD] >> TStateStorageTest::ShouldSaveGetOldBigState |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dynconfig [GOOD] >> TCheckpointStorageTest::ShouldUpdateCheckpointStatusForCheckpointsWithTheSameGenAndNo [GOOD] >> TGcTest::ShouldRemovePreviousCheckpoints >> TStorageServiceTest::ShouldNotCompleteCheckpointWithoutCreation [GOOD] >> TStorageServiceTest::ShouldNotAbortCheckpointWithoutCreation >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_other-_bad_dynconfig] [GOOD] >> TCheckpointStorageTest::ShouldGetCheckpointsEmpty [GOOD] >> TCheckpointStorageTest::ShouldDeleteGraph >> TStateStorageTest::ShouldDeleteNoCheckpoints2 [GOOD] >> TStateStorageTest::ShouldDeleteCheckpoints >> TStateStorageTest::ShouldSaveGetOldBigState [GOOD] >> TStateStorageTest::ShouldSaveGetIncrementSmallState >> TStorageServiceTest::ShouldNotRegisterPrevGeneration >> TStateStorageTest::ShouldLoadIncrementSnapshot [GOOD] |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dml_requests_arent_logged_when_sid_is_expected >> TPersQueueTest::ReadWithoutConsumerFirstClassCitizen [GOOD] >> TStorageServiceTest::ShouldGetCheckpoints [GOOD] >> TStorageServiceTest::ShouldAbortCheckpoint >> DataShardVolatile::DistributedUpsertRestartAfterPlan [GOOD] >> DataShardVolatile::CompactedVolatileChangesCommit >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_all_types-pk_types7-all_types7-index7---] [GOOD] >> TStateStorageTest::ShouldSaveGetIncrementSmallState [GOOD] >> TStateStorageTest::ShouldSaveGetIncrementBigState >> TStorageServiceTest::ShouldNotAbortCheckpointWithoutCreation [GOOD] >> TStorageServiceTest::ShouldNotCompleteCheckpointWithoutPending >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_root-_good_dynconfig] >> DataShardVolatile::UpsertDependenciesShardsRestart-UseSink [GOOD] >> DataShardVolatile::NotCachingAbortingDeletes+UseSink |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dml_requests_logged_when_sid_is_unexpected ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_other-_good_dynconfig] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/zvgn/004498/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk5/testing_out_stuff/test_auditlog.py.test_broken_dynconfig._client_session_pool_with_auth_other-_good_dynconfig/audit.txt 2025-05-07T09:07:31.858700Z: {"sanitized_token":"othe****ltin (27F910A9)","subject":"other-user@builtin","new_config":"\n---\nmetadata:\n kind: MainConfig\n cluster: \"\"\n version: 0\nconfig:\n yaml_config_enabled: true\nallowed_labels:\n node_id:\n type: string\n host:\n type: string\n tenant:\n type: string\nselector_config: []\n ","status":"SUCCESS","component":"console","operation":"REPLACE DYNCONFIG","remote_address":"127.0.0.1"} |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/unittest >> TPersQueueTest::ReadWithoutConsumerFirstClassCitizen [GOOD] Test command err: 2025-05-07T09:03:38.261234Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626897359037419:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:38.261498Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T09:03:38.464315Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0025ee/r3tmp/tmpkAn896/pdisk_1.dat 2025-05-07T09:03:38.697681Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:38.697795Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 30065, node 1 2025-05-07T09:03:38.700568Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:03:38.723412Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:38.723880Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T09:03:38.723969Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T09:03:38.832329Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/zvgn/0025ee/r3tmp/yandexZnLWEi.tmp 2025-05-07T09:03:38.832370Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/zvgn/0025ee/r3tmp/yandexZnLWEi.tmp 2025-05-07T09:03:38.832492Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/zvgn/0025ee/r3tmp/yandexZnLWEi.tmp 2025-05-07T09:03:38.832595Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T09:03:38.874341Z INFO: TTestServer started on Port 28233 GrpcPort 30065 TClient is connected to server localhost:28233 PQClient connected to localhost:30065 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:03:39.104998Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:03:39.116906Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:03:39.134518Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... 2025-05-07T09:03:40.960078Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626905948972829:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:40.960199Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626905948972825:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:40.960269Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:40.964653Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480 2025-05-07T09:03:40.977870Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501626905948972840:2341], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-05-07T09:03:41.242181Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501626910243940200:2439] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:03:41.274740Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:03:41.313941Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:03:41.371418Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7501626910243940215:2347], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T09:03:41.371728Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=1&id=NDdkMDAxZTEtOTZhMjM1OGItZTMxMTc5Y2QtMzFkMzU2YjE=, ActorId: [1:7501626905948972822:2335], ActorState: ExecuteState, TraceId: 01jtmzs1cc3ettd45txpk6seg1, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T09:03:41.373781Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T09:03:41.432650Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-05-07T09:03:41.624593Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710667. Ctx: { TraceId: 01jtmzs1xm5fr643mqacs1syva, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjlkOTllOTEtNGFjMjFlYzQtNmMzMDFlMzUtNzA3OWFmNDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root === CheckClustersList. Subcribe to ClusterTracker from [1:7501626910243940509:2618] 2025-05-07T09:03:43.261104Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501626897359037419:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:43.261186Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok PQ Client: create topic: rt3.dc1--topic1 with 2 partitions CallPersQueueGRPC request to localhost:30065 MetaRequest { CmdGetTopicMetadata { Topic: "rt3.dc1--topic1" } } 2025-05-07T09:03:47.323370Z node 1 :PERSQUEUE INFO: msgbus_server_persqueue.cpp:1531: proxy answer CallPersQueueGRPC response: Status: 128 ErrorReason: "the following topics are not created: rt3.dc1--topic1, Marker# PQ95" ErrorCode: UNKNOWN_TOPIC CallPersQueueGRPC request to localhost:30065 MetaRequest { CmdCreateTopic { Topic: "rt3.dc1--topic1" NumPartitions: 2 Config { PartitionConfig { LifetimeSeconds: 86400 LowWatermark: 8388608 SourceIdLifetimeSeconds: 86400 WriteSpeedInBytesPerSecond: 20000000 BurstSize: 20000000 SourceIdMaxCounts: 6000000 } LocalDC: true ReadRules: "user" ReadFromTimestampsMs: 0 ConsumerFormatVersions: 0 ConsumerCodecs { } Codecs { Ids: 0 Ids: 1 Ids: 2 Codecs: "raw" Codecs: "gzip" Codecs: "lzop" } ReadRuleVersions: 0 } } } CallPersQueueGRPC response: Status: 129 ProxyErrorCode: 53 SchemeStatus: 1 FlatTxId { TxId: 281474976710679 SchemeShardTabletId: 72057594046644480 PathId: 13 } ErrorCode: OK AddTopic: rt3.dc1--topic1 ===Run query:``DECLARE $version as Int64; DECLARE $path AS Utf8; DECLARE $cluster as Utf8; UPSERT INTO `/Root/PQ/Config/V2/Topics` (path, dc) VALUES ($path, $cluster); UPSERT INTO `/Root/PQ/Config/V2/Versions` (name, version) VALUES ("Topics", $version);`` with topic = topic1, dc = dc1 2025-05-07T09:03:47.365810Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][] pipe [1:7501626936013744572:2728] connected; active server actors: 1 2025-05-07T09:03:47.366297Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1516: [72075186224037893][rt3.dc1--topic1] updating configuration. Deleted partitions []. Added partitions [1, 0] 2025-05-07T09:03:47.366795Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186224037893][rt3.dc1--topic1] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2025-05-07T09:03:47.366911Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:138: [72075186224037893][rt3.dc1--topic1] BALANCER INIT DONE for rt3.dc1--topic1: (0, 72075186224037892) (1, 72075186224037892) 2025-05-07T09:03:47.367327Z ... 0 readOffset 35 EndOffset 40 ClientCommitOffset 0 committedOffset 0 Guid 704f62ff-2700f920-2a09d583-4d047d8c 2025-05-07T09:07:43.501223Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:342: Handle TEvRequest topic: 'rt3.dc1--topic1' requestId: 2025-05-07T09:07:43.501282Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2788: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--topic1' partition 4 2025-05-07T09:07:43.501436Z node 27 :PERSQUEUE DEBUG: partition_read.cpp:731: [PQ: 72075186224037892, Partition: 4, State: StateIdle] read cookie 33 Topic 'rt3.dc1--topic1' partition 4 user $without_consumer offset 35 count 6 size 286 endOffset 40 max time lag 0ms effective offset 35 2025-05-07T09:07:43.501488Z node 27 :PERSQUEUE DEBUG: partition_read.cpp:931: [PQ: 72075186224037892, Partition: 4, State: StateIdle] read cookie 33 added 0 blobs, size 0 count 0 last offset 35, current partition end offset: 40 2025-05-07T09:07:43.501594Z node 27 :PERSQUEUE DEBUG: partition_read.cpp:948: [PQ: 72075186224037892, Partition: 4, State: StateIdle] Reading cookie 33. All data is from uncompacted head. 2025-05-07T09:07:43.501628Z node 27 :PERSQUEUE DEBUG: partition_read.cpp:420: FormAnswer for 0 blobs 2025-05-07T09:07:43.501798Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:377: Answer ok topic: 'rt3.dc1--topic1' partition: 4 messageNo: 0 requestId: cookie: 35 2025-05-07T09:07:43.502770Z node 26 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 1 consumer session _26_1_5662643438318755780_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 4(assignId:5) initDone 1 event { CmdReadResult { MaxOffset: 40 Result { Offset: 35 Data: "... 94 bytes ..." SourceId: "\000source" SeqNo: 37 WriteTimestampMS: 1746608863207 CreateTimestampMS: 1746608863204 UncompressedSize: 6 PartitionKey: "" ExplicitHash: "" } Result { Offset: 36 Data: "... 94 bytes ..." SourceId: "\000source" SeqNo: 38 WriteTimestampMS: 1746608863303 CreateTimestampMS: 1746608863302 UncompressedSize: 6 PartitionKey: "" ExplicitHash: "" } Result { Offset: 37 Data: "... 94 bytes ..." SourceId: "\000source" SeqNo: 39 WriteTimestampMS: 1746608863315 CreateTimestampMS: 1746608863314 UncompressedSize: 6 PartitionKey: "" ExplicitHash: "" } BlobsFromDisk: 0 BlobsFromCache: 0 SizeLag: 372 RealReadOffset: 37 WaitQuotaTimeMs: 0 EndOffset: 40 StartOffset: 0 } Cookie: 35 } 2025-05-07T09:07:43.502949Z node 26 :PQ_READ_PROXY DEBUG: partition_actor.cpp:948: session cookie 1 consumer session _26_1_5662643438318755780_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 4(assignId:5) ready for read with readOffset 38 endOffset 40 2025-05-07T09:07:43.502985Z node 26 :PQ_READ_PROXY DEBUG: partition_actor.cpp:880: session cookie 1 consumer session _26_1_5662643438318755780_v1 after read state TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 4(assignId:5) EndOffset 40 ReadOffset 38 ReadGuid 704f62ff-2700f920-2a09d583-4d047d8c has messages 1 2025-05-07T09:07:43.503034Z node 26 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2309: session cookie 1 consumer session _26_1_5662643438318755780_v1 partition ready for read: partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 4(assignId:5), readOffset# 38, endOffset# 40, WTime# 1746608863315, sizeLag# 372 2025-05-07T09:07:43.503046Z node 26 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2320: session cookie 1 consumer session _26_1_5662643438318755780_v1TEvPartitionReady. Aval parts: 0 2025-05-07T09:07:43.503074Z node 26 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1917: session cookie 1 consumer session _26_1_5662643438318755780_v1 read done: guid# 704f62ff-2700f920-2a09d583-4d047d8c, partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 4(assignId:5), size# 520 2025-05-07T09:07:43.503092Z node 26 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2079: session cookie 1 consumer session _26_1_5662643438318755780_v1 response to read: guid# 704f62ff-2700f920-2a09d583-4d047d8c 2025-05-07T09:07:43.503283Z node 26 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2122: session cookie 1 consumer session _26_1_5662643438318755780_v1 Process answer. Aval parts: 1 Bytes readed: 520 Offset: 35 from session 5 Offset: 36 from session 5 Offset: 37 from session 5 2025-05-07T09:07:43.506171Z node 26 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer session _26_1_5662643438318755780_v1 grpc read done: success# 1, data# { read_request { bytes_size: 400 } } 2025-05-07T09:07:43.506305Z node 26 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1816: session cookie 1 consumer session _26_1_5662643438318755780_v1 got read request: guid# 1c52ddac-f1f02ce9-e320a0fe-355f1bf3 2025-05-07T09:07:43.506359Z node 26 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2243: session cookie 1 consumer session _26_1_5662643438318755780_v1 performing read request: guid# 73d75451-2bcfba98-191aa053-3e5081cf, from# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 4(assignId:5), count# 2, size# 166, partitionsAsked# 1, maxTimeLag# 0ms 2025-05-07T09:07:43.506448Z node 26 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1369: session cookie 1 consumer session _26_1_5662643438318755780_v1 READ FROM TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 4(assignId:5)maxCount 2 maxSize 166 maxTimeLagMs 0 readTimestampMs 0 readOffset 38 EndOffset 40 ClientCommitOffset 0 committedOffset 0 Guid 73d75451-2bcfba98-191aa053-3e5081cf 2025-05-07T09:07:43.507566Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:342: Handle TEvRequest topic: 'rt3.dc1--topic1' requestId: 2025-05-07T09:07:43.507651Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2788: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--topic1' partition 4 2025-05-07T09:07:43.507834Z node 27 :PERSQUEUE DEBUG: partition_read.cpp:731: [PQ: 72075186224037892, Partition: 4, State: StateIdle] read cookie 34 Topic 'rt3.dc1--topic1' partition 4 user $without_consumer offset 38 count 2 size 166 endOffset 40 max time lag 0ms effective offset 38 2025-05-07T09:07:43.507889Z node 27 :PERSQUEUE DEBUG: partition_read.cpp:931: [PQ: 72075186224037892, Partition: 4, State: StateIdle] read cookie 34 added 0 blobs, size 0 count 0 last offset 38, current partition end offset: 40 2025-05-07T09:07:43.507980Z node 27 :PERSQUEUE DEBUG: partition_read.cpp:948: [PQ: 72075186224037892, Partition: 4, State: StateIdle] Reading cookie 34. All data is from uncompacted head. 2025-05-07T09:07:43.508019Z node 27 :PERSQUEUE DEBUG: partition_read.cpp:420: FormAnswer for 0 blobs 2025-05-07T09:07:43.508226Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:377: Answer ok topic: 'rt3.dc1--topic1' partition: 4 messageNo: 0 requestId: cookie: 38 2025-05-07T09:07:43.509660Z node 26 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 1 consumer session _26_1_5662643438318755780_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 4(assignId:5) initDone 1 event { CmdReadResult { MaxOffset: 40 Result { Offset: 38 Data: "... 94 bytes ..." SourceId: "\000source" SeqNo: 40 WriteTimestampMS: 1746608863327 CreateTimestampMS: 1746608863326 UncompressedSize: 6 PartitionKey: "" ExplicitHash: "" } Result { Offset: 39 Data: "... 94 bytes ..." SourceId: "\000source" SeqNo: 41 WriteTimestampMS: 1746608863358 CreateTimestampMS: 1746608863358 UncompressedSize: 6 PartitionKey: "" ExplicitHash: "" } BlobsFromDisk: 0 BlobsFromCache: 0 SizeLag: 40 RealReadOffset: 39 WaitQuotaTimeMs: 0 EndOffset: 40 StartOffset: 0 } Cookie: 38 } 2025-05-07T09:07:43.509876Z node 26 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1252: session cookie 1 consumer session _26_1_5662643438318755780_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 4(assignId:5) wait data in partition inited, cookie 1 from offset40 2025-05-07T09:07:43.509927Z node 26 :PQ_READ_PROXY DEBUG: partition_actor.cpp:880: session cookie 1 consumer session _26_1_5662643438318755780_v1 after read state TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 4(assignId:5) EndOffset 40 ReadOffset 40 ReadGuid 73d75451-2bcfba98-191aa053-3e5081cf has messages 1 2025-05-07T09:07:43.510116Z node 26 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1917: session cookie 1 consumer session _26_1_5662643438318755780_v1 read done: guid# 73d75451-2bcfba98-191aa053-3e5081cf, partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 4(assignId:5), size# 352 2025-05-07T09:07:43.510151Z node 26 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2079: session cookie 1 consumer session _26_1_5662643438318755780_v1 response to read: guid# 73d75451-2bcfba98-191aa053-3e5081cf 2025-05-07T09:07:43.510412Z node 26 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2122: session cookie 1 consumer session _26_1_5662643438318755780_v1 Process answer. Aval parts: 0 Bytes readed: 352 Offset: 38 from session 5 Offset: 39 from session 5 2025-05-07T09:07:43.511445Z node 26 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer session _26_1_5662643438318755780_v1 grpc read done: success# 1, data# { commit_offset_request { commit_offsets { partition_session_id: 5 offsets { end: 39 } } } } 2025-05-07T09:07:43.511482Z node 26 :PQ_READ_PROXY INFO: read_session_actor.cpp:1640: session cookie 1 consumer session _26_1_5662643438318755780_v1 closed with error: reason# can't commit when reading without a consumer 2025-05-07T09:07:43.511747Z node 26 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer session _26_1_5662643438318755780_v1 is DEAD 2025-05-07T09:07:43.513153Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2433: [PQ: 72075186224037892] Destroy direct read session _26_1_5662643438318755780_v1 2025-05-07T09:07:43.513227Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037892] server disconnected, pipe [26:7501627949729978927:2579] destroyed 2025-05-07T09:07:43.513251Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2433: [PQ: 72075186224037892] Destroy direct read session _26_1_5662643438318755780_v1 2025-05-07T09:07:43.513273Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037892] server disconnected, pipe [26:7501627949729978926:2578] destroyed 2025-05-07T09:07:43.513290Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2433: [PQ: 72075186224037892] Destroy direct read session _26_1_5662643438318755780_v1 2025-05-07T09:07:43.513312Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037892] server disconnected, pipe [26:7501627949729978925:2577] destroyed 2025-05-07T09:07:43.513331Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2433: [PQ: 72075186224037892] Destroy direct read session _26_1_5662643438318755780_v1 2025-05-07T09:07:43.513354Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037892] server disconnected, pipe [26:7501627949729978924:2576] destroyed 2025-05-07T09:07:43.513372Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2433: [PQ: 72075186224037892] Destroy direct read session _26_1_5662643438318755780_v1 2025-05-07T09:07:43.513389Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037892] server disconnected, pipe [26:7501627949729978928:2580] destroyed 2025-05-07T09:07:43.513455Z node 27 :PQ_READ_PROXY DEBUG: caching_service.cpp:138: Direct read cache: server session deregistered: _26_1_5662643438318755780_v1 2025-05-07T09:07:43.513498Z node 27 :PQ_READ_PROXY DEBUG: caching_service.cpp:138: Direct read cache: server session deregistered: _26_1_5662643438318755780_v1 2025-05-07T09:07:43.513517Z node 27 :PQ_READ_PROXY DEBUG: caching_service.cpp:138: Direct read cache: server session deregistered: _26_1_5662643438318755780_v1 2025-05-07T09:07:43.513535Z node 27 :PQ_READ_PROXY DEBUG: caching_service.cpp:138: Direct read cache: server session deregistered: _26_1_5662643438318755780_v1 2025-05-07T09:07:43.513551Z node 27 :PQ_READ_PROXY DEBUG: caching_service.cpp:138: Direct read cache: server session deregistered: _26_1_5662643438318755780_v1 |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> TStorageServiceTest::ShouldPendingAndCompleteCheckpoint [GOOD] >> TStorageServiceTest::ShouldSaveState >> TStateStorageTest::ShouldDeleteCheckpoints [GOOD] >> TStateStorageTest::ShouldDeleteGraph >> TStorageServiceTest::ShouldNotRegisterPrevGeneration [GOOD] >> TStorageServiceTest::ShouldNotCreateCheckpointWhenUnregistered >> TStateStorageTest::ShouldSaveGetIncrementBigState [GOOD] >> TStateStorageTest::ShouldNotGetNonExistendState >> TCheckpointStorageTest::ShouldMarkCheckpointsGc [GOOD] >> TCheckpointStorageTest::ShouldNotDeleteUnmarkedCheckpoints |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> TStateStorageTest::ShouldDeleteGraph [GOOD] >> TStateStorageTest::ShouldGetMultipleStates >> TStateStorageTest::ShouldNotGetNonExistendState [GOOD] |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TStateStorageTest::ShouldLoadIncrementSnapshot [GOOD] >> TStorageServiceTest::ShouldNotCompleteCheckpointWithoutPending [GOOD] >> TStorageServiceTest::ShouldNotCompleteCheckpointGenerationChanged >> TCheckpointStorageTest::ShouldDeleteGraph [GOOD] >> TCheckpointStorageTest::ShouldDeleteMarkedCheckpoints |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> TStorageServiceTest::ShouldNotCreateCheckpointWhenUnregistered [GOOD] >> TStorageServiceTest::ShouldNotCreateCheckpointTwice |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> TStorageServiceTest::ShouldSaveState [GOOD] >> TStorageServiceTest::ShouldUseGc >> test_auditlog.py::test_single_dml_query_logged[upsert] [GOOD] >> TStorageServiceTest::ShouldAbortCheckpoint [GOOD] >> TStorageServiceTest::ShouldGetState |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_works[tables_format_v0] |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_single_dml_query_logged[delete] |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> TPQCompatTest::ReadWriteSessions [GOOD] >> TStorageServiceTest::ShouldNotCreateCheckpointTwice [GOOD] >> TStorageServiceTest::ShouldNotPendingCheckpointWithoutCreation |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TStateStorageTest::ShouldNotGetNonExistendState [GOOD] |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_root-_bad_dynconfig] >> TStorageServiceTest::ShouldNotCompleteCheckpointGenerationChanged [GOOD] >> TGcTest::ShouldRemovePreviousCheckpoints [GOOD] >> TGcTest::ShouldIgnoreIncrementCheckpoint >> TStateStorageTest::ShouldGetMultipleStates [GOOD] |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> TCheckpointStorageTest::ShouldNotDeleteUnmarkedCheckpoints [GOOD] >> TCheckpointStorageTest::ShouldRetryOnExistingGraphDescId ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/unittest >> TPQCompatTest::ReadWriteSessions [GOOD] Test command err: 2025-05-07T09:03:38.365623Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626897285893151:2075];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:38.365748Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T09:03:38.393197Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501626893387757104:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:38.393896Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00263a/r3tmp/tmpW3zFQC/pdisk_1.dat 2025-05-07T09:03:38.522462Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-05-07T09:03:38.525956Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-05-07T09:03:38.724564Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:38.724725Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:03:38.725442Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:38.725513Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:03:38.730329Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:03:38.730991Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-05-07T09:03:38.731679Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:03:38.759932Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1983, node 1 2025-05-07T09:03:38.765890Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T09:03:38.765920Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T09:03:38.849104Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/zvgn/00263a/r3tmp/yandexcaTpTw.tmp 2025-05-07T09:03:38.849129Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/zvgn/00263a/r3tmp/yandexcaTpTw.tmp 2025-05-07T09:03:38.849257Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/zvgn/00263a/r3tmp/yandexcaTpTw.tmp 2025-05-07T09:03:38.849386Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T09:03:38.889978Z INFO: TTestServer started on Port 25745 GrpcPort 1983 TClient is connected to server localhost:25745 PQClient connected to localhost:1983 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:03:39.121042Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T09:03:39.168917Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... 2025-05-07T09:03:41.397802Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626910170796127:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:41.397915Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:41.398030Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626910170796143:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:41.400839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480 2025-05-07T09:03:41.432456Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626910170796180:2345], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:41.432676Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:41.432952Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710662, at schemeshard: 72057594046644480 2025-05-07T09:03:41.433107Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501626910170796148:2343], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-05-07T09:03:41.613103Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501626910170796222:2758] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:03:41.637305Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:03:41.640431Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7501626910170796241:2349], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T09:03:41.640522Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7501626906272659395:2318], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T09:03:41.640729Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=2&id=ZmM2ZWUwMjctMmYzODFkNjItMTFkYjgxMjQtYjhiYTA3ZDQ=, ActorId: [2:7501626906272659348:2312], ActorState: ExecuteState, TraceId: 01jtmzs1xa6vt65rhejfzec93j, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T09:03:41.640753Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=1&id=N2UwODJkZGEtMzJiYTA0YjQtYTFiNGUyOTYtNTM1OGYzOGE=, ActorId: [1:7501626910170796116:2337], ActorState: ExecuteState, TraceId: 01jtmzs1tbfrhaghrth7zmy607, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T09:03:41.642497Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T09:03:41.642504Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T09:03:41.714642Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:03:41.790400Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-05-07T09:03:41.956493Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710667. Ctx: { TraceId: 01j ... got 1 requests infly, db = "Root/LbCommunal" 2025-05-07T09:07:52.103777Z node 27 :PQ_METACACHE DEBUG: msgbus_server_pq_metacache.cpp:548: Handle SchemeCache response: result# { ErrorCount: 0 DatabaseName: Root/LbCommunal DomainOwnerId: 0 Instant: 12 ResultSet [{ Path: Root/LbCommunal/account/topic2-mirrored-from-dc2 TableId: [72057594046644480:18:0] RequestType: ByPath Operation: OpList RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindTopic DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-05-07T09:07:52.103905Z node 27 :PQ_METACACHE DEBUG: msgbus_server_pq_metacache.cpp:613: Got describe topics SC response 2025-05-07T09:07:52.103955Z node 27 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:131: session cookie 6 consumer shared/user session shared/user_27_6_4364915672942482623_v1 Handle describe topics response 2025-05-07T09:07:52.104183Z node 27 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:68: session cookie 6 consumer shared/user session shared/user_27_6_4364915672942482623_v1 auth is DEAD 2025-05-07T09:07:52.104211Z node 27 :PQ_READ_PROXY INFO: read_session_actor.cpp:1033: session cookie 6 consumer shared/user session shared/user_27_6_4364915672942482623_v1 auth ok: topics# 1, initDone# 0 2025-05-07T09:07:52.105262Z node 27 :PQ_READ_PROXY INFO: read_session_actor.cpp:1196: session cookie 6 consumer shared/user session shared/user_27_6_4364915672942482623_v1 register session: topic# rt3.dc2--account--topic2 2025-05-07T09:07:52.106023Z node 27 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037899][topic2-mirrored-from-dc2] pipe [27:7501627986504651708:2676] connected; active server actors: 1 2025-05-07T09:07:52.106235Z node 27 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1699: [72075186224037899][topic2-mirrored-from-dc2] consumer "user" register session for pipe [27:7501627986504651708:2676] session shared/user_27_6_4364915672942482623_v1 2025-05-07T09:07:52.106314Z node 27 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:635: [72075186224037899][topic2-mirrored-from-dc2] consumer user register readable partition 0 2025-05-07T09:07:52.106434Z node 27 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:665: [72075186224037899][topic2-mirrored-from-dc2] consumer user family created family=1 (Status=Free, Partitions=[0]) 2025-05-07T09:07:52.106518Z node 27 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:867: [72075186224037899][topic2-mirrored-from-dc2] consumer user register reading session ReadingSession "shared/user_27_6_4364915672942482623_v1" (Sender=[27:7501627986504651705:2676], Pipe=[27:7501627986504651708:2676], Partitions=[], ActiveFamilyCount=0) 2025-05-07T09:07:52.106585Z node 27 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1183: [72075186224037899][topic2-mirrored-from-dc2] consumer user rebalancing was scheduled 2025-05-07T09:07:52.106679Z node 27 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1255: [72075186224037899][topic2-mirrored-from-dc2] consumer user balancing. Sessions=1, Families=1, UnradableFamilies=1 [1 (0), ], RequireBalancing=0 [] 2025-05-07T09:07:52.106799Z node 27 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1302: [72075186224037899][topic2-mirrored-from-dc2] consumer user balancing family=1 (Status=Free, Partitions=[0]) for ReadingSession "shared/user_27_6_4364915672942482623_v1" (Sender=[27:7501627986504651705:2676], Pipe=[27:7501627986504651708:2676], Partitions=[], ActiveFamilyCount=0) 2025-05-07T09:07:52.106926Z node 27 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:545: [72075186224037899][topic2-mirrored-from-dc2] consumer user family 1 status Active partitions [0] session "shared/user_27_6_4364915672942482623_v1" sender [27:7501627986504651705:2676] lock partition 0 for ReadingSession "shared/user_27_6_4364915672942482623_v1" (Sender=[27:7501627986504651705:2676], Pipe=[27:7501627986504651708:2676], Partitions=[], ActiveFamilyCount=1) generation 1 step 3 2025-05-07T09:07:52.107035Z node 27 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1322: [72075186224037899][topic2-mirrored-from-dc2] consumer user start rebalancing. familyCount=1, sessionCount=1, desiredFamilyCount=1, allowPlusOne=0 2025-05-07T09:07:52.107097Z node 27 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1399: [72075186224037899][topic2-mirrored-from-dc2] consumer user balancing duration: 0.000361s ===Got response: status: SUCCESS init_response { session_id: "shared/user_27_6_4364915672942482623_v1" } 2025-05-07T09:07:52.111461Z node 28 :PERSQUEUE DEBUG: pq_impl.cpp:2874: [PQ: 72075186224037898] server connected, pipe [27:7501627986504651711:2679], now have 1 active actors on pipe 2025-05-07T09:07:52.112144Z node 28 :PERSQUEUE DEBUG: pq_impl.cpp:342: Handle TEvRequest topic: 'topic2-mirrored-from-dc2' requestId: 2025-05-07T09:07:52.112201Z node 28 :PERSQUEUE DEBUG: pq_impl.cpp:2788: [PQ: 72075186224037898] got client message batch for topic 'rt3.dc2--account--topic2' partition 0 2025-05-07T09:07:52.112265Z node 28 :PERSQUEUE DEBUG: pq_impl.cpp:1932: [PQ: 72075186224037898] Created session shared/user_27_6_4364915672942482623_v1 on pipe: [27:7501627986504651711:2679] 2025-05-07T09:07:52.112371Z node 28 :PQ_READ_PROXY DEBUG: caching_service.cpp:282: Direct read cache: registered server session: shared/user_27_6_4364915672942482623_v1:1 with generation 1 2025-05-07T09:07:52.112483Z node 28 :PERSQUEUE DEBUG: partition.cpp:3264: [PQ: 72075186224037898, Partition: 0, State: StateIdle] Topic 'rt3.dc2--account--topic2' partition 0 user user session is set to 0 (startOffset 0) session shared/user_27_6_4364915672942482623_v1 2025-05-07T09:07:52.112725Z node 28 :PERSQUEUE DEBUG: partition.cpp:2182: [PQ: 72075186224037898, Partition: 0, State: StateIdle] === DumpKeyValueRequest === 2025-05-07T09:07:52.112773Z node 28 :PERSQUEUE DEBUG: partition.cpp:2183: [PQ: 72075186224037898, Partition: 0, State: StateIdle] --- delete ---------------- 2025-05-07T09:07:52.112807Z node 28 :PERSQUEUE DEBUG: partition.cpp:2191: [PQ: 72075186224037898, Partition: 0, State: StateIdle] --- write ----------------- 2025-05-07T09:07:52.112844Z node 28 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037898, Partition: 0, State: StateIdle] i0000000000 2025-05-07T09:07:52.112861Z node 28 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037898, Partition: 0, State: StateIdle] m0000000000cuser 2025-05-07T09:07:52.112876Z node 28 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037898, Partition: 0, State: StateIdle] m0000000000uuser 2025-05-07T09:07:52.112914Z node 28 :PERSQUEUE DEBUG: partition.cpp:2196: [PQ: 72075186224037898, Partition: 0, State: StateIdle] --- rename ---------------- 2025-05-07T09:07:52.112950Z node 28 :PERSQUEUE DEBUG: partition.cpp:2201: [PQ: 72075186224037898, Partition: 0, State: StateIdle] =========================== 2025-05-07T09:07:52.112989Z node 28 :PERSQUEUE DEBUG: read.h:262: CacheProxy. Passthrough write request to KV 2025-05-07T09:07:52.108095Z node 27 :PQ_READ_PROXY INFO: read_session_actor.cpp:1315: session cookie 6 consumer shared/user session shared/user_27_6_4364915672942482623_v1 assign: record# { Partition: 0 TabletId: 72075186224037898 Topic: "topic2-mirrored-from-dc2" Generation: 1 Step: 3 Session: "shared/user_27_6_4364915672942482623_v1" ClientId: "user" PipeClient { RawX1: 7501627986504651708 RawX2: 4503715591490164 } Path: "/Root/LbCommunal/account/topic2-mirrored-from-dc2" } 2025-05-07T09:07:52.108242Z node 27 :PQ_READ_PROXY INFO: partition_actor.cpp:1122: session cookie 6 consumer shared/user session shared/user_27_6_4364915672942482623_v1 INITING TopicId: Topic topic2-mirrored-from-dc2 in dc dc2 in database: Root, partition 0(assignId:1) 2025-05-07T09:07:52.111615Z node 27 :PQ_READ_PROXY INFO: partition_actor.cpp:962: session cookie 6 consumer shared/user session shared/user_27_6_4364915672942482623_v1 TopicId: Topic topic2-mirrored-from-dc2 in dc dc2 in database: Root, partition 0(assignId:1) pipe restart attempt 0 pipe creation result: OK TabletId: 72075186224037898 Generation: 1, pipe: [27:7501627986504651711:2679] 2025-05-07T09:07:52.120448Z node 28 :PERSQUEUE DEBUG: pq_impl.cpp:377: Answer ok topic: 'topic2-mirrored-from-dc2' partition: 0 messageNo: 0 requestId: cookie: 18446744073709551615 2025-05-07T09:07:52.120492Z node 28 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72075186224037898, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-05-07T09:07:52.126562Z node 27 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 6 consumer shared/user session shared/user_27_6_4364915672942482623_v1 TopicId: Topic topic2-mirrored-from-dc2 in dc dc2 in database: Root, partition 0(assignId:1) initDone 0 event { CmdGetClientOffsetResult { Offset: 0 EndOffset: 0 SizeLag: 0 WriteTimestampEstimateMS: 0 ClientHasAnyCommits: false } Cookie: 18446744073709551615 } 2025-05-07T09:07:52.126653Z node 27 :PQ_READ_PROXY INFO: partition_actor.cpp:683: session cookie 6 consumer shared/user session shared/user_27_6_4364915672942482623_v1 INIT DONE TopicId: Topic topic2-mirrored-from-dc2 in dc dc2 in database: Root, partition 0(assignId:1) EndOffset 0 readOffset 0 committedOffset 0 2025-05-07T09:07:52.126766Z node 27 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1413: session cookie 6 consumer shared/user session shared/user_27_6_4364915672942482623_v1 sending to client partition status ===Got response: status: SUCCESS start_partition_session_request { partition_session { partition_session_id: 1 path: "account/topic2-mirrored-from-dc2" } partition_offsets { } } 2025-05-07T09:07:52.132902Z node 28 :PERSQUEUE DEBUG: pq_impl.cpp:2433: [PQ: 72075186224037898] Destroy direct read session shared/user_27_6_4364915672942482623_v1 2025-05-07T09:07:52.132967Z node 28 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037898] server disconnected, pipe [27:7501627986504651711:2679] destroyed 2025-05-07T09:07:52.133044Z node 28 :PQ_READ_PROXY DEBUG: caching_service.cpp:138: Direct read cache: server session deregistered: shared/user_27_6_4364915672942482623_v1 2025-05-07T09:07:52.131522Z node 27 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 6 consumer shared/user session shared/user_27_6_4364915672942482623_v1 grpc read done: success# 0, data# { } 2025-05-07T09:07:52.131547Z node 27 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 6 consumer shared/user session shared/user_27_6_4364915672942482623_v1 grpc read failed 2025-05-07T09:07:52.131574Z node 27 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 6 consumer shared/user session shared/user_27_6_4364915672942482623_v1 grpc closed 2025-05-07T09:07:52.131611Z node 27 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 6 consumer shared/user session shared/user_27_6_4364915672942482623_v1 is DEAD 2025-05-07T09:07:52.132549Z node 27 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037899][topic2-mirrored-from-dc2] pipe [27:7501627986504651708:2676] disconnected; active server actors: 1 2025-05-07T09:07:52.132583Z node 27 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037899][topic2-mirrored-from-dc2] pipe [27:7501627986504651708:2676] client user disconnected session shared/user_27_6_4364915672942482623_v1 2025-05-07T09:07:52.601318Z node 27 :PQ_METACACHE DEBUG: msgbus_server_pq_metacache.cpp:743: Check version rescan 2025-05-07T09:07:52.614473Z node 27 :PQ_METACACHE DEBUG: msgbus_server_pq_metacache.cpp:137: Metacache: reset >> TStorageServiceTest::ShouldNotPendingCheckpointWithoutCreation [GOOD] >> TStorageServiceTest::ShouldNotPendingCheckpointGenerationChanged |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_no_auth-_bad_dynconfig] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/zvgn/00441f/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk2/testing_out_stuff/test_auditlog.py.test_broken_dynconfig._client_session_pool_no_auth-_bad_dynconfig/audit.txt 2025-05-07T09:07:41.571008Z: {"reason":"ydb/library/fyamlcpp/fyamlcpp.cpp:1068: \n6:12 plain scalar cannot start with '%'","sanitized_token":"{none}","remote_address":"127.0.0.1","status":"ERROR","subject":"{none}","operation":"REPLACE DYNCONFIG","new_config":"\n---\n123metadata:\n kind: MainConfig\n cluster: \"\"\n version: %s\nconfig:\n yaml_config_enabled: true\nallowed_labels:\n node_id:\n type: string\n host:\n type: string\n tenant:\n type: string\nselector_config: []\n ","component":"console"} |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> TStorageServiceTest::ShouldGetState [GOOD] >> DataShardVolatile::CompactedVolatileChangesCommit [GOOD] >> DataShardVolatile::CompactedVolatileChangesAbort >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_for_deleted_message[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_not_in_flight[tables_format_v0-fifo] >> TPersQueueTest::DisableWrongSettings [GOOD] >> TPersQueueTest::DisableDeduplication |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_single_dml_query_logged[replace] >> TCheckpointStorageTest::ShouldDeleteMarkedCheckpoints [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dynconfig [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/zvgn/004418/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk15/testing_out_stuff/test_auditlog.py.test_dynconfig/audit.txt 2025-05-07T09:07:44.032266Z: {"sanitized_token":"**** (B6C6F477)","subject":"root@builtin","new_config":"\n---\nmetadata:\n kind: MainConfig\n cluster: \"\"\n version: 0\nconfig:\n yaml_config_enabled: true\nallowed_labels:\n node_id:\n type: string\n host:\n type: string\n tenant:\n type: string\nselector_config: []\n ","status":"SUCCESS","component":"console","operation":"REPLACE DYNCONFIG","remote_address":"127.0.0.1"} >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_set_very_big_visibility_timeout[tables_format_v1] >> DataShardVolatile::NotCachingAbortingDeletes+UseSink [GOOD] >> DataShardVolatile::NotCachingAbortingDeletes-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_other-_bad_dynconfig] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/zvgn/00440f/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk4/testing_out_stuff/test_auditlog.py.test_broken_dynconfig._client_session_pool_with_auth_other-_bad_dynconfig/audit.txt 2025-05-07T09:07:44.641884Z: {"reason":"ydb/library/fyamlcpp/fyamlcpp.cpp:1068: \n6:12 plain scalar cannot start with '%'","sanitized_token":"othe****ltin (27F910A9)","remote_address":"127.0.0.1","status":"ERROR","subject":"other-user@builtin","operation":"REPLACE DYNCONFIG","new_config":"\n---\n123metadata:\n kind: MainConfig\n cluster: \"\"\n version: %s\nconfig:\n yaml_config_enabled: true\nallowed_labels:\n node_id:\n type: string\n host:\n type: string\n tenant:\n type: string\nselector_config: []\n ","component":"console"} |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_all_types-pk_types7-all_types7-index7---] [GOOD] >> TStorageServiceTest::ShouldUseGc [GOOD] |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TStateStorageTest::ShouldGetMultipleStates [GOOD] |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TStorageServiceTest::ShouldNotCompleteCheckpointGenerationChanged [GOOD] Test command err: 2025-05-07T09:07:38.254258Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [1:7501627917182584830:2048] with connection to localhost:13249:local 2025-05-07T09:07:38.254368Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-05-07T09:07:38.998107Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-05-07T09:07:38.998132Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-05-07T09:07:38.998544Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-05-07T09:07:40.563512Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-05-07T09:07:40.563548Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-05-07T09:07:40.566494Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.18] Got TEvRegisterCoordinatorRequest 2025-05-07T09:07:41.075087Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.18] Graph registered 2025-05-07T09:07:41.075122Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.18] Send TEvRegisterCoordinatorResponse 2025-05-07T09:07:41.078369Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:2] Got TEvCreateCheckpointRequest 2025-05-07T09:07:41.544520Z node 1 :STREAMS_STORAGE_SERVICE WARN: storage_proxy.cpp:249: [graph_graphich.17] [17:2] Failed to create checkpoint:
: Warning: Table: local/TStorageServiceTestShouldNotCreateCheckpointAfterGenerationChanged/coordinators_sync, pk: graph_graphich, current generation: 18, expected/new generation: 17, operation: Check, code: 400130 2025-05-07T09:07:41.544567Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:2] Send TEvCreateCheckpointResponse 2025-05-07T09:07:43.461386Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [2:7501627945252369449:2048] with connection to localhost:13249:local 2025-05-07T09:07:43.461499Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-05-07T09:07:43.837177Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-05-07T09:07:43.837213Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-05-07T09:07:43.870084Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:286: [graph_graphich.17] [17:1] Got TEvCompleteCheckpointRequest 2025-05-07T09:07:44.370521Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:302: [graph_graphich.17] [17:1] Failed to set 'Completed' status:
: Warning: Failed to select checkpoint '17:1', code: 400080 2025-05-07T09:07:44.370557Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:311: [graph_graphich.17] [17:1] Send TEvCompleteCheckpointResponse 2025-05-07T09:07:46.218292Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [3:7501627953047985510:2048] with connection to localhost:13249:local 2025-05-07T09:07:46.218407Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-05-07T09:07:46.630264Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-05-07T09:07:46.630301Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-05-07T09:07:46.634046Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:319: [graph_graphich.17] [17:1] Got TEvAbortCheckpointRequest 2025-05-07T09:07:47.181404Z node 3 :STREAMS_STORAGE_SERVICE WARN: storage_proxy.cpp:331: [graph_graphich.17] [17:1] Failed to abort checkpoint:
: Warning: Failed to select checkpoint '17:1', code: 400080 2025-05-07T09:07:47.181446Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:335: [graph_graphich.17] [17:1] Send TEvAbortCheckpointResponse 2025-05-07T09:07:48.540823Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [4:7501627966695100470:2048] with connection to localhost:13249:local 2025-05-07T09:07:48.540924Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-05-07T09:07:49.016902Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-05-07T09:07:49.016940Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-05-07T09:07:49.022494Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-05-07T09:07:50.109394Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-05-07T09:07:50.109436Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-05-07T09:07:50.109783Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:286: [graph_graphich.17] [17:1] Got TEvCompleteCheckpointRequest 2025-05-07T09:07:50.579295Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:302: [graph_graphich.17] [17:1] Failed to set 'Completed' status:
: Warning: Selected checkpoint '17:1' with status Pending, while expected PendingCommit, code: 400080 2025-05-07T09:07:50.579330Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:311: [graph_graphich.17] [17:1] Send TEvCompleteCheckpointResponse 2025-05-07T09:07:52.606336Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [5:7501627982826609837:2048] with connection to localhost:13249:local 2025-05-07T09:07:52.606435Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-05-07T09:07:52.978813Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-05-07T09:07:52.978844Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-05-07T09:07:52.984012Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-05-07T09:07:54.402121Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-05-07T09:07:54.402184Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-05-07T09:07:54.403080Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:1] Got TEvSetCheckpointPendingCommitStatusRequest 2025-05-07T09:07:55.070215Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:276: [graph_graphich.17] [17:1] Status updated to 'PendingCommit' 2025-05-07T09:07:55.070257Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:1] Send TEvSetCheckpointPendingCommitStatusResponse 2025-05-07T09:07:55.072419Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.18] Got TEvRegisterCoordinatorRequest 2025-05-07T09:07:55.432378Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.18] Graph registered 2025-05-07T09:07:55.432415Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.18] Send TEvRegisterCoordinatorResponse 2025-05-07T09:07:55.438454Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:286: [graph_graphich.17] [17:1] Got TEvCompleteCheckpointRequest 2025-05-07T09:07:55.695680Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:302: [graph_graphich.17] [17:1] Failed to set 'Completed' status:
: Warning: Table: local/TStorageServiceTestShouldNotPendingCheckpointGenerationChanged/coordinators_sync, pk: graph_graphich, current generation: 18, expected/new generation: 17, operation: Check, code: 400130 2025-05-07T09:07:55.695720Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:311: [graph_graphich.17] [17:1] Send TEvCompleteCheckpointResponse >> TStorageServiceTest::ShouldNotPendingCheckpointGenerationChanged [GOOD] >> TCheckpointStorageTest::ShouldRetryOnExistingGraphDescId [GOOD] |93.6%| [TA] $(B)/ydb/tests/datashard/dump_restore/test-results/py3test/{meta.json ... results_accumulator.log} |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |93.6%| [TA] {RESULT} $(B)/ydb/tests/datashard/dump_restore/test-results/py3test/{meta.json ... results_accumulator.log} |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dml_begin_commit_logged [GOOD] >> test_auditlog.py::test_cloud_ids_are_logged[attrs0] >> test_fifo_messaging.py::TestSqsFifoMicroBatchesWithPath::test_micro_batch_read[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_not_in_flight[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_not_in_flight[tables_format_v0-std] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TStorageServiceTest::ShouldGetState [GOOD] Test command err: 2025-05-07T09:07:39.863187Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [1:7501627927859521482:2048] with connection to localhost:16906:local 2025-05-07T09:07:39.863310Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-05-07T09:07:40.340868Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-05-07T09:07:40.340901Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-05-07T09:07:40.342232Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-05-07T09:07:41.888120Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-05-07T09:07:41.888154Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-05-07T09:07:43.307762Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [2:7501627943341444041:2048] with connection to localhost:16906:local 2025-05-07T09:07:43.307859Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-05-07T09:07:43.763756Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-05-07T09:07:43.763787Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-05-07T09:07:43.770087Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-05-07T09:07:44.934437Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-05-07T09:07:44.934467Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-05-07T09:07:44.942082Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:2] Got TEvCreateCheckpointRequest 2025-05-07T09:07:45.424494Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:2] Checkpoint created 2025-05-07T09:07:45.424524Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:2] Send TEvCreateCheckpointResponse 2025-05-07T09:07:45.427105Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:3] Got TEvCreateCheckpointRequest 2025-05-07T09:07:45.942364Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:3] Checkpoint created 2025-05-07T09:07:45.942397Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:3] Send TEvCreateCheckpointResponse 2025-05-07T09:07:45.943153Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-05-07T09:07:46.340167Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-05-07T09:07:47.891024Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [3:7501627961216201780:2048] with connection to localhost:16906:local 2025-05-07T09:07:47.891115Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-05-07T09:07:48.224581Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-05-07T09:07:48.224609Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-05-07T09:07:48.224975Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-05-07T09:07:49.506309Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-05-07T09:07:49.506361Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-05-07T09:07:49.507056Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:1] Got TEvSetCheckpointPendingCommitStatusRequest 2025-05-07T09:07:50.203654Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:276: [graph_graphich.17] [17:1] Status updated to 'PendingCommit' 2025-05-07T09:07:50.203687Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:1] Send TEvSetCheckpointPendingCommitStatusResponse 2025-05-07T09:07:50.206229Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:2] Got TEvCreateCheckpointRequest 2025-05-07T09:07:50.720222Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:2] Checkpoint created 2025-05-07T09:07:50.720250Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:2] Send TEvCreateCheckpointResponse 2025-05-07T09:07:50.720842Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:2] Got TEvSetCheckpointPendingCommitStatusRequest 2025-05-07T09:07:51.208484Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:276: [graph_graphich.17] [17:2] Status updated to 'PendingCommit' 2025-05-07T09:07:51.208520Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:2] Send TEvSetCheckpointPendingCommitStatusResponse 2025-05-07T09:07:51.218377Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:286: [graph_graphich.17] [17:2] Got TEvCompleteCheckpointRequest 2025-05-07T09:07:51.543381Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:304: [graph_graphich.17] [17:2] Status updated to 'Completed' 2025-05-07T09:07:51.543410Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:311: [graph_graphich.17] [17:2] Send TEvCompleteCheckpointResponse 2025-05-07T09:07:51.543993Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:319: [graph_graphich.17] [17:1] Got TEvAbortCheckpointRequest 2025-05-07T09:07:51.901297Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:333: [graph_graphich.17] [17:1] Checkpoint aborted 2025-05-07T09:07:51.902017Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:335: [graph_graphich.17] [17:1] Send TEvAbortCheckpointResponse 2025-05-07T09:07:51.910077Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:319: [graph_graphich.17] [17:2] Got TEvAbortCheckpointRequest 2025-05-07T09:07:52.272422Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:333: [graph_graphich.17] [17:2] Checkpoint aborted 2025-05-07T09:07:52.272458Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:335: [graph_graphich.17] [17:2] Send TEvAbortCheckpointResponse 2025-05-07T09:07:52.283560Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-05-07T09:07:52.641051Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-05-07T09:07:54.412225Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [4:7501627990851812594:2048] with connection to localhost:16906:local 2025-05-07T09:07:54.412309Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-05-07T09:07:54.841073Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-05-07T09:07:54.841109Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-05-07T09:07:54.841847Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-05-07T09:07:56.316320Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-05-07T09:07:56.316357Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-05-07T09:07:56.318619Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:365: [graph_graphich] [17:1] Got TEvSaveTaskState: task 1317 2025-05-07T09:07:56.529020Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:389: [graph_graphich] [17:1] TEvSaveTaskState Apply: task: 1317 2025-05-07T09:07:56.529085Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:404: [graph_graphich] [17:1] Send TEvSaveTaskStateResult: task: 1317 2025-05-07T09:07:56.530246Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:413: [graph_graphich] [17:1] Got TEvGetTaskState: tasks {1317} 2025-05-07T09:07:56.530339Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: ydb_state_storage.cpp:532: [graph_graphich] [17:1] GetState, tasks: 1317 2025-05-07T09:07:57.134405Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: ydb_state_storage.cpp:667: [graph_graphich] [17:1] ListOfStates results: 2025-05-07T09:07:57.134506Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: ydb_state_storage.cpp:688: [graph_graphich] [17:1] taskId 1317 checkpoint id: 17:1, rows count: 1 2025-05-07T09:07:57.134567Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: ydb_state_storage.cpp:920: [graph_graphich] [17:1] SkipStatesInFuture, skip 0 checkpoints 2025-05-07T09:07:57.134928Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: ydb_state_storage.cpp:812: [graph_graphich] [17:1] SelectState: task_id 1317, seq_no 1, blob_seq_num 0 2025-05-07T09:07:57.669812Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: ydb_state_storage.cpp:423: [graph_graphich] [17:1] DeserializeState, task id 1317, blob size 49 2025-05-07T09:07:57.669880Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: ydb_state_storage.cpp:979: [graph_graphich] [17:1] ApplyIncrements 2025-05-07T09:07:57.676828Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:432: [graph_graphich] [{ Id: 1 Generation: 17 }] Send TEvGetTaskStateResult: tasks: {1317} |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_q_twice[tables_format_v0-fifo] |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_queue_by_nonexistent_user_fails[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_message_batch[tables_format_v0-fifo] >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_root-_good_dynconfig] [GOOD] |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TCheckpointStorageTest::ShouldDeleteMarkedCheckpoints [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_write_read_delete_many_groups[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_write_read_delete_many_groups[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_message[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_queue_by_nonexistent_user_fails[tables_format_v1] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TStorageServiceTest::ShouldUseGc [GOOD] Test command err: 2025-05-07T09:07:37.767989Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [1:7501627911215123644:2048] with connection to localhost:7886:local 2025-05-07T09:07:37.786128Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-05-07T09:07:38.813333Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-05-07T09:07:38.813368Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-05-07T09:07:40.707879Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [2:7501627928812990458:2048] with connection to localhost:7886:local 2025-05-07T09:07:40.707975Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-05-07T09:07:40.991532Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-05-07T09:07:40.991562Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-05-07T09:07:40.998696Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.18] Got TEvRegisterCoordinatorRequest 2025-05-07T09:07:41.470222Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.18] Graph registered 2025-05-07T09:07:41.470254Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.18] Send TEvRegisterCoordinatorResponse 2025-05-07T09:07:41.481600Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-05-07T09:07:41.690537Z node 2 :STREAMS_STORAGE_SERVICE WARN: storage_proxy.cpp:197: [graph_graphich.17] Failed to register graph:
: Warning: Table: local/TStorageServiceTestShouldRegisterNextGeneration/coordinators_sync, pk: graph_graphich, current generation: 18, expected/new generation: 17, operation: RegisterCheck, code: 400130 2025-05-07T09:07:41.690586Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-05-07T09:07:43.322295Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [3:7501627941918110581:2048] with connection to localhost:7886:local 2025-05-07T09:07:43.322391Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-05-07T09:07:43.674196Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-05-07T09:07:43.674226Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-05-07T09:07:43.678716Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-05-07T09:07:45.262909Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-05-07T09:07:45.262961Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-05-07T09:07:45.264874Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:1] Got TEvSetCheckpointPendingCommitStatusRequest 2025-05-07T09:07:45.981232Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:276: [graph_graphich.17] [17:1] Status updated to 'PendingCommit' 2025-05-07T09:07:45.981269Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:1] Send TEvSetCheckpointPendingCommitStatusResponse 2025-05-07T09:07:45.982273Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:2] Got TEvCreateCheckpointRequest 2025-05-07T09:07:46.716194Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:2] Checkpoint created 2025-05-07T09:07:46.716226Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:2] Send TEvCreateCheckpointResponse 2025-05-07T09:07:46.716795Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:2] Got TEvSetCheckpointPendingCommitStatusRequest 2025-05-07T09:07:47.278752Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:276: [graph_graphich.17] [17:2] Status updated to 'PendingCommit' 2025-05-07T09:07:47.278788Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:2] Send TEvSetCheckpointPendingCommitStatusResponse 2025-05-07T09:07:47.279314Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:286: [graph_graphich.17] [17:2] Got TEvCompleteCheckpointRequest 2025-05-07T09:07:47.630269Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:304: [graph_graphich.17] [17:2] Status updated to 'Completed' 2025-05-07T09:07:47.630310Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:311: [graph_graphich.17] [17:2] Send TEvCompleteCheckpointResponse 2025-05-07T09:07:47.630719Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-05-07T09:07:47.908377Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-05-07T09:07:49.511120Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [4:7501627971093987051:2048] with connection to localhost:7886:local 2025-05-07T09:07:49.511217Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-05-07T09:07:49.798827Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-05-07T09:07:49.798856Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-05-07T09:07:49.799497Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-05-07T09:07:51.414222Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-05-07T09:07:51.414255Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-05-07T09:07:51.423871Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:365: [graph_graphich] [17:1] Got TEvSaveTaskState: task 1317 2025-05-07T09:07:51.670432Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:389: [graph_graphich] [17:1] TEvSaveTaskState Apply: task: 1317 2025-05-07T09:07:51.671960Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:404: [graph_graphich] [17:1] Send TEvSaveTaskStateResult: task: 1317 2025-05-07T09:07:53.354234Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [5:7501627984845241921:2048] with connection to localhost:7886:local 2025-05-07T09:07:53.354285Z node 5 :STREAMS_STORAGE_SERVICE INFO: gc.cpp:83: Successfully bootstrapped storage GC [5:7501627989140209321:2130] 2025-05-07T09:07:53.354330Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-05-07T09:07:53.661781Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-05-07T09:07:53.661816Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-05-07T09:07:53.662134Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-05-07T09:07:54.946235Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-05-07T09:07:54.946267Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-05-07T09:07:54.950244Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:1] Got TEvSetCheckpointPendingCommitStatusRequest 2025-05-07T09:07:55.990236Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:276: [graph_graphich.17] [17:1] Status updated to 'PendingCommit' 2025-05-07T09:07:55.990266Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:1] Send TEvSetCheckpointPendingCommitStatusResponse 2025-05-07T09:07:55.990578Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:286: [graph_graphich.17] [17:1] Got TEvCompleteCheckpointRequest 2025-05-07T09:07:56.347773Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:304: [graph_graphich.17] [17:1] Status updated to 'Completed' 2025-05-07T09:07:56.347811Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:307: [graph_graphich.17] [17:1] Send TEvNewCheckpointSucceeded 2025-05-07T09:07:56.347837Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:311: [graph_graphich.17] [17:1] Send TEvCompleteCheckpointResponse 2025-05-07T09:07:56.347982Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:93: GC received upperbound checkpoint 17:1 for graph 'graph_graphich' 2025-05-07T09:07:56.386093Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:2] Got TEvCreateCheckpointRequest 2025-05-07T09:07:56.649640Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:2] Checkpoint created 2025-05-07T09:07:56.649669Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:2] Send TEvCreateCheckpointResponse 2025-05-07T09:07:56.650085Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:2] Got TEvSetCheckpointPendingCommitStatusRequest 2025-05-07T09:07:56.838889Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:276: [graph_graphich.17] [17:2] Status updated to 'PendingCommit' 2025-05-07T09:07:56.838924Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:2] Send TEvSetCheckpointPendingCommitStatusResponse 2025-05-07T09:07:56.839235Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:286: [graph_graphich.17] [17:2] Got TEvCompleteCheckpointRequest 2025-05-07T09:07:57.026287Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:304: [graph_graphich.17] [17:2] Status updated to 'Completed' 2025-05-07T09:07:57.026321Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:307: [graph_graphich.17] [17:2] Send TEvNewCheckpointSucceeded 2025-05-07T09:07:57.026348Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:311: [graph_graphich.17] [17:2] Send TEvCompleteCheckpointResponse 2025-05-07T09:07:57.028968Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:93: GC received upperbound checkpoint 17:2 for graph 'graph_graphich' 2025-05-07T09:07:57.030434Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:3] Got TEvCreateCheckpointRequest 2025-05-07T09:07:57.098509Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:170: GC deleted checkpoints of graph 'graph_graphich' up to 17:1 2025-05-07T09:07:57.106821Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:170: GC deleted checkpoints of graph 'graph_graphich' up to 17:2 2025-05-07T09:07:57.290927Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:3] Checkpoint created 2025-05-07T09:07:57.290962Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:3] Send TEvCreateCheckpointResponse 2025-05-07T09:07:57.291323Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:3] Got TEvSetCheckpointPendingCommitStatusRequest 2025-05-07T09:07:57.506454Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:276: [graph_graphich.17] [17:3] Status updated to 'PendingCommit' 2025-05-07T09:07:57.506485Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:3] Send TEvSetCheckpointPendingCommitStatusResponse 2025-05-07T09:07:57.510366Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:286: [graph_graphich.17] [17:3] Got TEvCompleteCheckpointRequest 2025-05-07T09:07:57.829065Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:304: [graph_graphich.17] [17:3] Status updated to 'Completed' 2025-05-07T09:07:57.829116Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:307: [graph_graphich.17] [17:3] Send TEvNewCheckpointSucceeded 2025-05-07T09:07:57.829146Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:311: [graph_graphich.17] [17:3] Send TEvCompleteCheckpointResponse 2025-05-07T09:07:57.830167Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:93: GC received upperbound checkpoint 17:3 for graph 'graph_graphich' 2025-05-07T09:07:57.830827Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-05-07T09:07:58.095424Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:170: GC deleted checkpoints of graph 'graph_graphich' up to 17:3 2025-05-07T09:07:58.138143Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-05-07T09:07:58.239122Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-05-07T09:07:58.276965Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-05-07T09:07:58.377932Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-05-07T09:07:58.391747Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-05-07T09:07:58.493478Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-05-07T09:07:58.507435Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-05-07T09:07:58.610924Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-05-07T09:07:58.634505Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-05-07T09:07:58.736061Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-05-07T09:07:58.750809Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-05-07T09:07:58.854103Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-05-07T09:07:58.873641Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-05-07T09:07:58.975031Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-05-07T09:07:58.991974Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-05-07T09:07:59.093982Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-05-07T09:07:59.110281Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-05-07T09:07:59.213860Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-05-07T09:07:59.230985Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-05-07T09:07:59.334790Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-05-07T09:07:59.350712Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-05-07T09:07:59.458081Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-05-07T09:07:59.473792Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-05-07T09:07:59.582154Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-05-07T09:07:59.597706Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-05-07T09:07:59.698798Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-05-07T09:07:59.715342Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-05-07T09:07:59.818366Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-05-07T09:07:59.831596Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-05-07T09:07:59.934679Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-05-07T09:07:59.966432Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-05-07T09:08:00.090037Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-05-07T09:08:00.123689Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-05-07T09:08:00.229480Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-05-07T09:08:00.346762Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_attributes_table[tables_format_v1-std] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_change_disables_receive_attempt_id[tables_format_v0-with_change_visibility] |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TStorageServiceTest::ShouldNotPendingCheckpointGenerationChanged [GOOD] Test command err: 2025-05-07T09:07:47.194060Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [1:7501627957206263738:2048] with connection to localhost:63235:local 2025-05-07T09:07:47.194179Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-05-07T09:07:48.274312Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-05-07T09:07:48.274343Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-05-07T09:07:48.288241Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.16] Got TEvRegisterCoordinatorRequest 2025-05-07T09:07:48.551668Z node 1 :STREAMS_STORAGE_SERVICE WARN: storage_proxy.cpp:197: [graph_graphich.16] Failed to register graph:
: Warning: Table: local/TStorageServiceTestShouldNotRegisterPrevGeneration/coordinators_sync, pk: graph_graphich, current generation: 17, expected/new generation: 16, operation: RegisterCheck, code: 400130 2025-05-07T09:07:48.551696Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.16] Send TEvRegisterCoordinatorResponse 2025-05-07T09:07:50.232637Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [2:7501627975450837329:2048] with connection to localhost:63235:local 2025-05-07T09:07:50.232702Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-05-07T09:07:51.083920Z node 2 :STREAMS_STORAGE_SERVICE WARN: storage_proxy.cpp:249: [graph_graphich.17] [17:1] Failed to create checkpoint:
: Warning: Table: local/TStorageServiceTestShouldNotCreateCheckpointWhenUnregistered/coordinators_sync, pk: graph_graphich, current generation: 0, expected/new generation: 17, operation: Check, code: 400130 2025-05-07T09:07:51.083959Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-05-07T09:07:52.870246Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [3:7501627983165546250:2048] with connection to localhost:63235:local 2025-05-07T09:07:52.870340Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-05-07T09:07:53.198003Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-05-07T09:07:53.198032Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-05-07T09:07:53.202142Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-05-07T09:07:54.478254Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-05-07T09:07:54.478288Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-05-07T09:07:54.479962Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-05-07T09:07:54.913341Z node 3 :STREAMS_STORAGE_SERVICE WARN: storage_proxy.cpp:249: [graph_graphich.17] [17:1] Failed to create checkpoint:
: Error: Constraint violated. Table: `local/TStorageServiceTestShouldNotCreateCheckpointTwice/checkpoints_metadata`., code: 2012
: Error: Conflict with existing key., code: 2012 2025-05-07T09:07:54.913373Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-05-07T09:07:56.143866Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [4:7501627997553675384:2048] with connection to localhost:63235:local 2025-05-07T09:07:56.143935Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-05-07T09:07:56.444596Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-05-07T09:07:56.444619Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-05-07T09:07:56.444903Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:1] Got TEvSetCheckpointPendingCommitStatusRequest 2025-05-07T09:07:56.868406Z node 4 :STREAMS_STORAGE_SERVICE WARN: storage_proxy.cpp:274: [graph_graphich.17] [17:1] Failed to set 'PendingCommit' status:
: Warning: Failed to select checkpoint '17:1', code: 400080 2025-05-07T09:07:56.868448Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:1] Send TEvSetCheckpointPendingCommitStatusResponse 2025-05-07T09:07:58.797194Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [5:7501628006874825877:2048] with connection to localhost:63235:local 2025-05-07T09:07:58.797287Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-05-07T09:07:59.232937Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-05-07T09:07:59.232967Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-05-07T09:07:59.233785Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-05-07T09:08:00.342215Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-05-07T09:08:00.342252Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-05-07T09:08:00.342734Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.18] Got TEvRegisterCoordinatorRequest 2025-05-07T09:08:00.630282Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.18] Graph registered 2025-05-07T09:08:00.630307Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.18] Send TEvRegisterCoordinatorResponse 2025-05-07T09:08:00.630620Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:1] Got TEvSetCheckpointPendingCommitStatusRequest 2025-05-07T09:08:00.975173Z node 5 :STREAMS_STORAGE_SERVICE WARN: storage_proxy.cpp:274: [graph_graphich.17] [17:1] Failed to set 'PendingCommit' status:
: Warning: Table: local/TStorageServiceTestShouldNotPendingCheckpointGenerationChanged/coordinators_sync, pk: graph_graphich, current generation: 18, expected/new generation: 17, operation: Check, code: 400130 2025-05-07T09:08:00.975256Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:1] Send TEvSetCheckpointPendingCommitStatusResponse >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_delete_message_works[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_read_dont_stall[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_not_in_flight[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_not_in_flight[tables_format_v1-fifo] >> TGcTest::ShouldIgnoreIncrementCheckpoint [GOOD] >> TStateStorageTest::ShouldCountStates |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TCheckpointStorageTest::ShouldRetryOnExistingGraphDescId [GOOD] >> test_auditlog.py::test_dml_requests_logged_when_sid_is_unexpected [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_one_message[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v1-fifo] >> test_auditlog.py::test_dml_requests_arent_logged_when_sid_is_expected [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_single_dml_query_logged[upsert] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/zvgn/0043da/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk21/testing_out_stuff/test_auditlog.py.test_single_dml_query_logged.upsert/audit.txt 2025-05-07T09:07:52.395626Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-05-07T09:07:52.395576Z","sanitized_token":"**** (B6C6F477)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"upsert into `/Root/test_auditlog.py/test-table` (id, value) values (4, 4), (5, 5)","start_time":"2025-05-07T09:07:52.295302Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_message[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_for_deleted_message[tables_format_v1-std] >> TPersQueueTest::TClusterTrackerTest [GOOD] >> TPersQueueTest::TestReadPartitionByGroupId >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_one_message[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_read_dont_stall[tables_format_v1] >> TStateStorageTest::ShouldCountStates [GOOD] >> TStateStorageTest::ShouldCountStatesNonExistentCheckpoint >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_zero_visibility_timeout_works[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_message_batch[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_message_batch[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_not_in_flight[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_not_in_flight[tables_format_v1-std] >> TStateStorageTest::ShouldCountStatesNonExistentCheckpoint [GOOD] >> DataShardVolatile::CompactedVolatileChangesAbort [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_can_read_new_written_data_on_visibility_timeout[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_can_read_from_different_groups[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message_batch[tables_format_v1-fifo] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_receive_attempt_reloads_same_messages[tables_format_v1-after_crutch_batch] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_visibility_timeout_works[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_wrong_attribute_name[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_not_in_flight[tables_format_v1-std] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_volatile/unittest >> DataShardVolatile::CompactedVolatileChangesAbort [GOOD] Test command err: 2025-05-07T09:04:05.352201Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:04:05.352431Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:04:05.352853Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028bf/r3tmp/tmphoyMEu/pdisk_1.dat 2025-05-07T09:04:05.987653Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:04:06.032077Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:06.088581Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:59:2106] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-05-07T09:04:06.089479Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-05-07T09:04:06.091174Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:04:06.092192Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:04:06.105408Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:04:06.291094Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:59:2106] Handle TEvProposeTransaction 2025-05-07T09:04:06.291186Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:59:2106] TxId# 281474976715657 ProcessProposeTransaction 2025-05-07T09:04:06.291362Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:59:2106] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:640:2548] 2025-05-07T09:04:06.423628Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1520: Actor# [1:640:2548] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-05-07T09:04:06.423756Z node 1 :TX_PROXY DEBUG: schemereq.cpp:563: Actor# [1:640:2548] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-05-07T09:04:06.424349Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1585: Actor# [1:640:2548] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-05-07T09:04:06.424446Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1575: Actor# [1:640:2548] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-05-07T09:04:06.424758Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1408: Actor# [1:640:2548] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-05-07T09:04:06.424959Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1455: Actor# [1:640:2548] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 1000 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-05-07T09:04:06.425063Z node 1 :TX_PROXY DEBUG: schemereq.cpp:102: Actor# [1:640:2548] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-05-07T09:04:06.426833Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:04:06.427294Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1310: Actor# [1:640:2548] txid# 281474976715657 HANDLE EvClientConnected 2025-05-07T09:04:06.427934Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1332: Actor# [1:640:2548] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-05-07T09:04:06.428001Z node 1 :TX_PROXY DEBUG: schemereq.cpp:543: Actor# [1:640:2548] txid# 281474976715657 SEND to# [1:594:2519] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-05-07T09:04:06.470302Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828672, Sender [1:656:2563], Recipient [1:665:2569]: NKikimr::TEvTablet::TEvBoot 2025-05-07T09:04:06.471431Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828673, Sender [1:656:2563], Recipient [1:665:2569]: NKikimr::TEvTablet::TEvRestored 2025-05-07T09:04:06.471979Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:665:2569] 2025-05-07T09:04:06.472254Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T09:04:06.523110Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3111: StateInactive, received event# 268828684, Sender [1:656:2563], Recipient [1:665:2569]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-05-07T09:04:06.523840Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T09:04:06.523960Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T09:04:06.526586Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-05-07T09:04:06.526717Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-05-07T09:04:06.526779Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-05-07T09:04:06.528551Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T09:04:06.528727Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T09:04:06.528852Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:681:2569] in generation 1 2025-05-07T09:04:06.539693Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T09:04:06.583688Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-05-07T09:04:06.583863Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T09:04:06.583945Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:683:2579] 2025-05-07T09:04:06.583983Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:04:06.584019Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-05-07T09:04:06.584057Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:04:06.584269Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 2146435072, Sender [1:665:2569], Recipient [1:665:2569]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-05-07T09:04:06.584980Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3155: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-05-07T09:04:06.586120Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T09:04:06.586247Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T09:04:06.586328Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:04:06.586377Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:04:06.586435Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-05-07T09:04:06.586477Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-05-07T09:04:06.586521Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-05-07T09:04:06.586557Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T09:04:06.586614Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:04:06.586745Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877761, Sender [1:670:2571], Recipient [1:665:2569]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T09:04:06.586786Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T09:04:06.586844Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:670:2571], sessionId# [0:0:0] 2025-05-07T09:04:06.588034Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269549568, Sender [1:410:2405], Recipient [1:670:2571] 2025-05-07T09:04:06.588089Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3136: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-05-07T09:04:06.588201Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T09:04:06.588514Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-05-07T09:04:06.588575Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-05-07T09:04:06.588659Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-05-07T09:04:06.588797Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-05-07T09:04:06.588836Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-05-07T09:04:06.588872Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-05-07T09:04:06.588908Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-05-07T09:04:06.589186Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 ... 9:08:09.218682Z node 26 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:332: TClient[72075186224037888] shutdown pipe due to pending shutdown request [26:967:2772] 2025-05-07T09:08:09.218788Z node 26 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:505: TClient[72075186224037888] notify reset [26:967:2772] 2025-05-07T09:08:09.219126Z node 26 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269553210, Sender [26:966:2771], Recipient [26:696:2584]: NKikimrTxDataShard.TEvCompactTable PathId { OwnerId: 72057594046644480 LocalId: 2 } CompactBorrowed: false 2025-05-07T09:08:09.219289Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:16} Tx{28, NKikimr::NDataShard::TDataShard::TTxCompactTable} queued, type NKikimr::NDataShard::TDataShard::TTxCompactTable 2025-05-07T09:08:09.219433Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:16} Tx{28, NKikimr::NDataShard::TDataShard::TTxCompactTable} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-05-07T09:08:09.219605Z node 26 :TABLET_EXECUTOR DEBUG: TCompactionLogic PrepareForceCompaction for 72075186224037888 table 1001, mode Full, forced state None, forced mode Full 2025-05-07T09:08:09.219816Z node 26 :TX_DATASHARD INFO: datashard__compaction.cpp:141: Started background compaction# 1 of 72075186224037888 tableId# 2 localTid# 1001, requested from [26:966:2771], partsCount# 0, memtableSize# 656, memtableWaste# 3952, memtableRows# 2 2025-05-07T09:08:09.220002Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:16} Tx{28, NKikimr::NDataShard::TDataShard::TTxCompactTable} hope 1 -> done Change{16, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-05-07T09:08:09.223636Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:16} Tx{28, NKikimr::NDataShard::TDataShard::TTxCompactTable} release 4194304b of static, Memory{0 dyn 0} 2025-05-07T09:08:09.224241Z node 26 :TABLET_EXECUTOR DEBUG: TGenCompactionStrategy PrepareCompaction for 72075186224037888: task 1, edge 9223372036854775807/0, generation 0 2025-05-07T09:08:09.224373Z node 26 :TABLET_EXECUTOR INFO: Leader{72075186224037888:1:16} starting compaction 2025-05-07T09:08:09.224942Z node 26 :TABLET_EXECUTOR INFO: Leader{72075186224037888:1:17} starting Scan{1 on 1001, Compact{72075186224037888.1.16, eph 1}} 2025-05-07T09:08:09.225154Z node 26 :TABLET_EXECUTOR INFO: Leader{72075186224037888:1:17} started compaction 1 2025-05-07T09:08:09.225277Z node 26 :TABLET_EXECUTOR DEBUG: TGenCompactionStrategy PrepareCompaction for 72075186224037888 started compaction 1 generation 0 ... blocking NKikimr::TEvBlobStorage::TEvPut from TABLET_REQ_WRITE_LOG to BS_PROXY_ACTOR cookie 3713319701547938349 2025-05-07T09:08:09.229569Z node 26 :TABLET_EXECUTOR INFO: Leader{72075186224037888:1:17} Compact 1 on TGenCompactionParams{1001: gen 0 epoch +inf, 0 parts} step 16, product {tx status + 1 parts epoch 2} done 2025-05-07T09:08:09.230370Z node 26 :TABLET_EXECUTOR DEBUG: TGenCompactionStrategy CompactionFinished for 72075186224037888: compaction 1, generation 0 2025-05-07T09:08:09.230571Z node 26 :TABLET_EXECUTOR DEBUG: TGenCompactionStrategy CheckGeneration for 72075186224037888 generation 1, state Free, final id 0, final level 0 2025-05-07T09:08:09.230683Z node 26 :TABLET_EXECUTOR DEBUG: TGenCompactionStrategy CheckGeneration for 72075186224037888 generation 3, state Free, final id 0, final level 0 2025-05-07T09:08:09.231335Z node 26 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 72075186224037888, table# 1001, finished edge# 1, ts 1970-01-01T00:00:01.521666Z 2025-05-07T09:08:09.231557Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:18} Tx{29, NKikimr::NDataShard::TDataShard::TTxPersistFullCompactionTs} queued, type NKikimr::NDataShard::TDataShard::TTxPersistFullCompactionTs 2025-05-07T09:08:09.231709Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:18} Tx{29, NKikimr::NDataShard::TDataShard::TTxPersistFullCompactionTs} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-05-07T09:08:09.231855Z node 26 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 72075186224037888, table# 1001, finished edge# 1, front# 1 2025-05-07T09:08:09.232000Z node 26 :TX_DATASHARD DEBUG: datashard__compaction.cpp:260: ReplyCompactionWaiters of tablet# 72075186224037888, table# 1001 sending TEvCompactTableResult to# [26:966:2771]pathId# [OwnerId: 72057594046644480, LocalPathId: 2] 2025-05-07T09:08:09.232782Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:18} Tx{29, NKikimr::NDataShard::TDataShard::TTxPersistFullCompactionTs} hope 1 -> done Change{17, redo 83b alter 0b annex 0, ~{ 27 } -{ }, 0 gb} 2025-05-07T09:08:09.232966Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:18} Tx{29, NKikimr::NDataShard::TDataShard::TTxPersistFullCompactionTs} release 4194304b of static, Memory{0 dyn 0} ... blocking NKikimr::TEvBlobStorage::TEvPut from TABLET_REQ_WRITE_LOG to BS_PROXY_ACTOR cookie 15727042008390786331 ... blocking NKikimr::TEvBlobStorage::TEvPut from TABLET_REQ_WRITE_LOG to BS_PROXY_ACTOR cookie 1128639805682493824 ========= Starting an immediate read ========= 2025-05-07T09:08:09.473218Z node 26 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715663. Ctx: { TraceId: 01jtn017cw0g6jde9ddv3ze3f4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=26&id=YTBkMTZkNWItZTRmOWQ1M2QtYzBjMGM5YTEtYjE0MmNlYWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:08:09.492985Z node 26 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72075186224037888] send [26:900:2718] 2025-05-07T09:08:09.493133Z node 26 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72075186224037888] push event to server [26:900:2718] 2025-05-07T09:08:09.493690Z node 26 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269553215, Sender [26:992:2779], Recipient [26:696:2584]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC MaxRows: 32767 MaxBytes: 5242880 Reverse: false KeysSize: 1 2025-05-07T09:08:09.494034Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:19} Tx{30, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} queued, type NKikimr::NDataShard::TDataShard::TTxReadViaPipeline 2025-05-07T09:08:09.494201Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:19} Tx{30, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-05-07T09:08:09.494441Z node 26 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2435: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-05-07T09:08:09.494578Z node 26 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v1522/281474976715662 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-05-07T09:08:09.494708Z node 26 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2538: 72075186224037888 changed HEAD read to non-repeatable v1522/18446744073709551615 2025-05-07T09:08:09.494870Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037888 on unit CheckRead 2025-05-07T09:08:09.495093Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037888 is Executed 2025-05-07T09:08:09.495204Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037888 executing on unit CheckRead 2025-05-07T09:08:09.495310Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:5] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-05-07T09:08:09.495400Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037888 on unit BuildAndWaitDependencies 2025-05-07T09:08:09.495487Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:5] at 72075186224037888 2025-05-07T09:08:09.495595Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037888 is Executed 2025-05-07T09:08:09.495636Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-05-07T09:08:09.495668Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:5] at 72075186224037888 to execution unit ExecuteRead 2025-05-07T09:08:09.495702Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037888 on unit ExecuteRead 2025-05-07T09:08:09.495946Z node 26 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1566: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC MaxRows: 32767 MaxBytes: 5242880 Reverse: false } 2025-05-07T09:08:09.496300Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037888 is DelayComplete 2025-05-07T09:08:09.496369Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037888 executing on unit ExecuteRead 2025-05-07T09:08:09.496478Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:5] at 72075186224037888 to execution unit CompletedOperations 2025-05-07T09:08:09.496564Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037888 on unit CompletedOperations 2025-05-07T09:08:09.496633Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037888 is Executed 2025-05-07T09:08:09.496665Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037888 executing on unit CompletedOperations 2025-05-07T09:08:09.496719Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:5] at 72075186224037888 has finished 2025-05-07T09:08:09.496832Z node 26 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2670: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-05-07T09:08:09.497049Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:19} Tx{30, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} hope 1 -> done Change{18, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-05-07T09:08:09.497231Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:19} Tx{30, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} release 4194304b of static, Memory{0 dyn 0} 2025-05-07T09:08:09.620752Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:13} Tx{20, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} queued, type NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep 2025-05-07T09:08:09.620969Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:13} Tx{20, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-05-07T09:08:09.621309Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:13} Tx{20, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} hope 1 -> done Change{12, redo 134b alter 0b annex 0, ~{ 2 } -{ }, 0 gb} 2025-05-07T09:08:09.621468Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:13} Tx{20, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} release 4194304b of static, Memory{0 dyn 0} 2025-05-07T09:08:09.622519Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:14} commited cookie 1 for step 13 2025-05-07T09:08:09.622905Z node 26 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72057594046382081] send [26:539:2480] 2025-05-07T09:08:09.623015Z node 26 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594046382081] push event to server [26:539:2480] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v1-std] >> DataShardVolatile::NotCachingAbortingDeletes-UseSink [GOOD] >> DataShardVolatile::GracefulShardRestartNoEarlyReadSetAck >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message_batch[tables_format_v1-fifo] >> test_auditlog.py::test_single_dml_query_logged[delete] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_wrong_attribute_name[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_wrong_attribute_name[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_write_read_delete_many_groups[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_message_batch[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_message_batch[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_wrong_attribute_name[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_wrong_delete_fails[tables_format_v0] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TStateStorageTest::ShouldCountStatesNonExistentCheckpoint [GOOD] Test command err: 2025-05-07T09:07:50.482094Z node 1 :STREAMS_STORAGE_SERVICE INFO: gc.cpp:83: Successfully bootstrapped storage GC [1:36:2083] Count graph descriptions query: --!syntax_v1 PRAGMA TablePathPrefix("local/TGcTestShouldRemovePreviousCheckpoints"); SELECT * FROM checkpoints_graphs_description; 2025-05-07T09:07:50.996367Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:93: GC received upperbound checkpoint 11:3 for graph 'graph' 2025-05-07T09:07:52.015237Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:170: GC deleted checkpoints of graph 'graph' up to 11:3 Count graph descriptions query: --!syntax_v1 PRAGMA TablePathPrefix("local/TGcTestShouldRemovePreviousCheckpoints"); SELECT * FROM checkpoints_graphs_description; 2025-05-07T09:08:02.983306Z node 2 :STREAMS_STORAGE_SERVICE INFO: gc.cpp:83: Successfully bootstrapped storage GC [2:36:2083] Count graph descriptions query: --!syntax_v1 PRAGMA TablePathPrefix("local/ShouldIgnoreIncrementCheckpoint"); SELECT * FROM checkpoints_graphs_description; 2025-05-07T09:08:03.518204Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:93: GC received upperbound checkpoint 11:3 for graph 'graph' 2025-05-07T09:08:03.520500Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:96: GC skip increment checkpoint for graph 'graph' >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_root-_bad_dynconfig] [GOOD] |93.6%| [TA] $(B)/ydb/core/fq/libs/checkpoint_storage/ut/test-results/unittest/{meta.json ... results_accumulator.log} |93.6%| [TA] {RESULT} $(B)/ydb/core/fq/libs/checkpoint_storage/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dml_begin_commit_logged [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/zvgn/0043ab/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk10/testing_out_stuff/test_auditlog.py.test_dml_begin_commit_logged/audit.txt 2025-05-07T09:08:01.841557Z: {"tx_id":"01jtn0105aawhbsahp5fb4p1t6","database":"/Root/test_auditlog.py","end_time":"2025-05-07T09:08:01.841498Z","sanitized_token":"**** (B6C6F477)","remote_address":"127.0.0.1","status":"SUCCESS","start_time":"2025-05-07T09:08:01.834341Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"BeginTransactionRequest","component":"grpc-proxy"} 2025-05-07T09:08:02.044942Z: {"tx_id":"01jtn0105aawhbsahp5fb4p1t6","database":"/Root/test_auditlog.py","end_time":"2025-05-07T09:08:02.044895Z","sanitized_token":"**** (B6C6F477)","remote_address":"127.0.0.1","commit_tx":"0","status":"SUCCESS","query_text":"update `/Root/test_auditlog.py/test-table` set value = 0 where id = 1","start_time":"2025-05-07T09:08:01.849148Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-05-07T09:08:02.069463Z: {"tx_id":"01jtn0105aawhbsahp5fb4p1t6","database":"/Root/test_auditlog.py","end_time":"2025-05-07T09:08:02.069402Z","sanitized_token":"**** (B6C6F477)","remote_address":"127.0.0.1","status":"SUCCESS","start_time":"2025-05-07T09:08:02.051794Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"CommitTransactionRequest","component":"grpc-proxy"} >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_visibility_timeout_works[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_wrong_delete_fails[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_wrong_delete_fails[tables_format_v1] >> TPersQueueTest::CreateTopicWithMeteringMode [GOOD] >> TPersQueueTest::DefaultMeteringMode >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_retryable_iam_error[tables_format_v0] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_retryable_iam_error[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_works[tables_format_v0] >> test_fifo_messaging.py::TestSqsFifoMicroBatchesWithPath::test_micro_batch_read[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMicroBatchesWithPath::test_micro_batch_read[tables_format_v1] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_root-_good_dynconfig] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/zvgn/00439e/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk7/testing_out_stuff/test_auditlog.py.test_broken_dynconfig._client_session_pool_with_auth_root-_good_dynconfig/audit.txt 2025-05-07T09:08:04.122812Z: {"sanitized_token":"**** (B6C6F477)","subject":"root@builtin","new_config":"\n---\nmetadata:\n kind: MainConfig\n cluster: \"\"\n version: 0\nconfig:\n yaml_config_enabled: true\nallowed_labels:\n node_id:\n type: string\n host:\n type: string\n tenant:\n type: string\nselector_config: []\n ","status":"SUCCESS","component":"console","operation":"REPLACE DYNCONFIG","remote_address":"127.0.0.1"} >> test_auditlog.py::test_single_dml_query_logged[replace] [GOOD] >> TPersQueueTest::DisableDeduplication [GOOD] >> TPersQueueTest::InflightLimit >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_wrong_delete_fails[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_q_twice[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_q_twice[tables_format_v0-std] >> test_fifo_messaging.py::TestSqsFifoMicroBatchesWithPath::test_micro_batch_read[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_message_batch[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_message_batch[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_queue_by_nonexistent_user_fails[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_message_batch[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_attributes_table[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_runtime_attributes[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_can_read_new_written_data_on_visibility_timeout[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_q_twice[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_q_twice[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_runtime_attributes[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_runtime_attributes[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_message[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_message[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_q_twice[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_q_twice[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_read_dont_stall[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_receive_with_very_big_visibility_timeout[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_message[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_multiple_messages[tables_format_v0] >> test_auditlog.py::test_cloud_ids_are_logged[attrs0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_works[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_works[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_q_twice[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_queue_by_nonexistent_user_fails[tables_format_v0] >> TPersQueueTest::TestWriteStat [GOOD] >> TPersQueueTest::TestWriteSessionsConflicts >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_queue_by_nonexistent_user_fails[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_runtime_attributes[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_runtime_attributes[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_empty_queue_url[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_set_very_big_visibility_timeout[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_attribute_value[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_receive_with_very_big_visibility_timeout[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_receive_with_very_big_visibility_timeout[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_message_batch[tables_format_v1-std] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dml_requests_logged_when_sid_is_unexpected [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/zvgn/00439d/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk13/testing_out_stuff/test_auditlog.py.test_dml_requests_logged_when_sid_is_unexpected/audit.txt 2025-05-07T09:08:05.660671Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-05-07T09:08:05.660622Z","sanitized_token":"othe****ltin (27F910A9)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"insert into `/Root/test_auditlog.py/test-table` (id, value) values (100, 100), (101, 101)","start_time":"2025-05-07T09:08:05.512427Z","subject":"other-user@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-05-07T09:08:05.988936Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-05-07T09:08:05.988893Z","sanitized_token":"othe****ltin (27F910A9)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"delete from `/Root/test_auditlog.py/test-table` where id = 100 or id = 101","start_time":"2025-05-07T09:08:05.775202Z","subject":"other-user@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-05-07T09:08:06.406851Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-05-07T09:08:06.406812Z","sanitized_token":"othe****ltin (27F910A9)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"select id from `/Root/test_auditlog.py/test-table`","start_time":"2025-05-07T09:08:06.098872Z","subject":"other-user@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-05-07T09:08:06.715673Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-05-07T09:08:06.715633Z","sanitized_token":"othe****ltin (27F910A9)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"update `/Root/test_auditlog.py/test-table` set value = 0 where id = 1","start_time":"2025-05-07T09:08:06.517489Z","subject":"other-user@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-05-07T09:08:06.924483Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-05-07T09:08:06.924441Z","sanitized_token":"othe****ltin (27F910A9)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"replace into `/Root/test_auditlog.py/test-table` (id, value) values (2, 3), (3, 3)","start_time":"2025-05-07T09:08:06.827449Z","subject":"other-user@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-05-07T09:08:07.106764Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-05-07T09:08:07.106722Z","sanitized_token":"othe****ltin (27F910A9)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"upsert into `/Root/test_auditlog.py/test-table` (id, value) values (4, 4), (5, 5)","start_time":"2025-05-07T09:08:07.035307Z","subject":"other-user@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_runtime_attributes[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_runtime_attributes[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_message_batch[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_message_batch[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_receive_with_very_big_visibility_timeout[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_message[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_runtime_attributes[tables_format_v1-std] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dml_requests_arent_logged_when_sid_is_expected [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/zvgn/0043a2/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk12/testing_out_stuff/test_auditlog.py.test_dml_requests_arent_logged_when_sid_is_expected/audit.txt >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_attribute_value[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_attribute_value[tables_format_v1] >> DataShardVolatile::GracefulShardRestartNoEarlyReadSetAck [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_message[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_message[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_works[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_works[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message_batch[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message_batch[tables_format_v1-std] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_single_dml_query_logged[delete] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/zvgn/004388/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk16/testing_out_stuff/test_auditlog.py.test_single_dml_query_logged.delete/audit.txt 2025-05-07T09:08:15.008317Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-05-07T09:08:15.008277Z","sanitized_token":"**** (B6C6F477)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"delete from `/Root/test_auditlog.py/test-table` where id = 100 or id = 101","start_time":"2025-05-07T09:08:14.775042Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_attribute_value[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_attributes[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_works[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_one_message[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_one_message[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message_batch[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_to_nonexistent_queue[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_message[tables_format_v0-std] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_volatile/unittest >> DataShardVolatile::GracefulShardRestartNoEarlyReadSetAck [GOOD] Test command err: 2025-05-07T09:04:05.352161Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:04:05.352376Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:04:05.352863Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0028b6/r3tmp/tmpwXwlOh/pdisk_1.dat 2025-05-07T09:04:05.982680Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:04:06.030270Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:04:06.091920Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:59:2106] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-05-07T09:04:06.092702Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-05-07T09:04:06.092978Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:04:06.093064Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:04:06.106026Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:04:06.285556Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:59:2106] Handle TEvProposeTransaction 2025-05-07T09:04:06.285652Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:59:2106] TxId# 281474976715657 ProcessProposeTransaction 2025-05-07T09:04:06.286765Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:59:2106] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:640:2548] 2025-05-07T09:04:06.417596Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1520: Actor# [1:640:2548] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value2" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-05-07T09:04:06.417707Z node 1 :TX_PROXY DEBUG: schemereq.cpp:563: Actor# [1:640:2548] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-05-07T09:04:06.419300Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1585: Actor# [1:640:2548] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-05-07T09:04:06.419403Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1575: Actor# [1:640:2548] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-05-07T09:04:06.419729Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1408: Actor# [1:640:2548] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-05-07T09:04:06.419894Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1455: Actor# [1:640:2548] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 1000 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-05-07T09:04:06.420012Z node 1 :TX_PROXY DEBUG: schemereq.cpp:102: Actor# [1:640:2548] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-05-07T09:04:06.423717Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:04:06.424171Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1310: Actor# [1:640:2548] txid# 281474976715657 HANDLE EvClientConnected 2025-05-07T09:04:06.426094Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1332: Actor# [1:640:2548] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-05-07T09:04:06.426172Z node 1 :TX_PROXY DEBUG: schemereq.cpp:543: Actor# [1:640:2548] txid# 281474976715657 SEND to# [1:594:2519] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-05-07T09:04:06.473303Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828672, Sender [1:656:2563], Recipient [1:665:2569]: NKikimr::TEvTablet::TEvBoot 2025-05-07T09:04:06.474168Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3098: StateInit, received event# 268828673, Sender [1:656:2563], Recipient [1:665:2569]: NKikimr::TEvTablet::TEvRestored 2025-05-07T09:04:06.474637Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:665:2569] 2025-05-07T09:04:06.474887Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T09:04:06.523760Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3111: StateInactive, received event# 268828684, Sender [1:656:2563], Recipient [1:665:2569]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-05-07T09:04:06.524409Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T09:04:06.524506Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T09:04:06.526568Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-05-07T09:04:06.526667Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-05-07T09:04:06.526735Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-05-07T09:04:06.528558Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T09:04:06.528730Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T09:04:06.528853Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:681:2569] in generation 1 2025-05-07T09:04:06.539691Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T09:04:06.581918Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-05-07T09:04:06.583462Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T09:04:06.583641Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:683:2579] 2025-05-07T09:04:06.583693Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:04:06.583726Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-05-07T09:04:06.583763Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:04:06.584012Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 2146435072, Sender [1:665:2569], Recipient [1:665:2569]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-05-07T09:04:06.584958Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3155: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-05-07T09:04:06.586135Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T09:04:06.586244Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T09:04:06.586342Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:04:06.586394Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:04:06.586437Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-05-07T09:04:06.586476Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-05-07T09:04:06.586507Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-05-07T09:04:06.586559Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T09:04:06.586603Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:04:06.586737Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877761, Sender [1:670:2571], Recipient [1:665:2569]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T09:04:06.586774Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T09:04:06.586829Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:670:2571], sessionId# [0:0:0] 2025-05-07T09:04:06.588038Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269549568, Sender [1:410:2405], Recipient [1:670:2571] 2025-05-07T09:04:06.588100Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3136: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-05-07T09:04:06.588212Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T09:04:06.588520Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-05-07T09:04:06.588597Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-05-07T09:04:06.588674Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-05-07T09:04:06.588731Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-05-07T09:04:06.588772Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-05-07T09:04:06.588807Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-05-07T09:04:06.588843Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-05-07T09:04:06.589174Z node 1 :TX_DATASHARD TRACE: datashard_pipeline ... pp:1916: Add [0:7] at 72075186224037889 to execution unit ExecuteRead 2025-05-07T09:08:28.758165Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037889 on unit ExecuteRead 2025-05-07T09:08:28.758271Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1566: 72075186224037889 Execute read# 1, request: { ReadId: 1 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 1536 TxId: 18446744073709551615 } ResultFormat: FORMAT_CELLVEC MaxRows: 1000 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1000 } 2025-05-07T09:08:28.758484Z node 28 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037889 promoting UnprotectedReadEdge to v1536/18446744073709551615 2025-05-07T09:08:28.758528Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2146: 72075186224037889 Complete read# {[28:1089:2858], 1} after executionsCount# 1 2025-05-07T09:08:28.758573Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2120: 72075186224037889 read iterator# {[28:1089:2858], 1} sends rowCount# 1, bytes# 32, quota rows left# 999, quota bytes left# 5242848, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-05-07T09:08:28.758652Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2171: 72075186224037889 read iterator# {[28:1089:2858], 1} finished in read 2025-05-07T09:08:28.758705Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037889 is Executed 2025-05-07T09:08:28.758737Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037889 executing on unit ExecuteRead 2025-05-07T09:08:28.758765Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037889 to execution unit CompletedOperations 2025-05-07T09:08:28.758793Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037889 on unit CompletedOperations 2025-05-07T09:08:28.758840Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037889 is Executed 2025-05-07T09:08:28.758867Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037889 executing on unit CompletedOperations 2025-05-07T09:08:28.758898Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:7] at 72075186224037889 has finished 2025-05-07T09:08:28.758930Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2670: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037889 2025-05-07T09:08:28.759025Z node 28 :TABLET_EXECUTOR DEBUG: Leader{72075186224037889:1:16} Tx{33, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} hope 1 -> done Change{16, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-05-07T09:08:28.759080Z node 28 :TABLET_EXECUTOR DEBUG: Leader{72075186224037889:1:16} Tx{33, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} release 4194304b of static, Memory{0 dyn 0} 2025-05-07T09:08:28.759120Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2719: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037889 2025-05-07T09:08:28.760073Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72075186224037890] ::Bootstrap [28:1092:2861] 2025-05-07T09:08:28.760140Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72075186224037890] lookup [28:1092:2861] 2025-05-07T09:08:28.760192Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72075186224037889] send [28:939:2746] 2025-05-07T09:08:28.760223Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72075186224037889] push event to server [28:939:2746] 2025-05-07T09:08:28.760344Z node 28 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269553219, Sender [28:1089:2858], Recipient [28:704:2588]: NKikimrTxDataShard.TEvReadCancel ReadId: 1 2025-05-07T09:08:28.760394Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3392: 72075186224037889 ReadCancel: { ReadId: 1 } 2025-05-07T09:08:28.760494Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72075186224037890] queue send [28:1092:2861] 2025-05-07T09:08:28.760558Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[72075186224037890] forward result local node, try to connect [28:1092:2861] 2025-05-07T09:08:28.760601Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037890]::SendEvent [28:1092:2861] 2025-05-07T09:08:28.760772Z node 28 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877761, Sender [28:1093:2862], Recipient [28:1045:2830]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T09:08:28.760809Z node 28 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T09:08:28.760850Z node 28 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037890, clientId# [28:1092:2861], serverId# [28:1093:2862], sessionId# [0:0:0] 2025-05-07T09:08:28.760894Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72075186224037890] connected with status OK role: Leader [28:1092:2861] 2025-05-07T09:08:28.760934Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72075186224037890] send queued [28:1092:2861] 2025-05-07T09:08:28.760963Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72075186224037890] push event to server [28:1092:2861] 2025-05-07T09:08:28.761144Z node 28 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269553215, Sender [28:1089:2858], Recipient [28:1045:2830]: NKikimrTxDataShard.TEvRead ReadId: 2 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 1536 TxId: 18446744073709551615 } ResultFormat: FORMAT_CELLVEC MaxRows: 999 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 999 RangesSize: 1 2025-05-07T09:08:28.761242Z node 28 :TABLET_EXECUTOR DEBUG: Leader{72075186224037890:2:4} Tx{13, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} queued, type NKikimr::NDataShard::TDataShard::TTxReadViaPipeline 2025-05-07T09:08:28.761294Z node 28 :TABLET_EXECUTOR DEBUG: Leader{72075186224037890:2:4} Tx{13, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-05-07T09:08:28.761386Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2435: TTxReadViaPipeline execute: at tablet# 72075186224037890, FollowerId 0 2025-05-07T09:08:28.761448Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 72075186224037890 on unit CheckRead 2025-05-07T09:08:28.761513Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 72075186224037890 is Executed 2025-05-07T09:08:28.761541Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 72075186224037890 executing on unit CheckRead 2025-05-07T09:08:28.761567Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 72075186224037890 to execution unit BuildAndWaitDependencies 2025-05-07T09:08:28.761602Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 72075186224037890 on unit BuildAndWaitDependencies 2025-05-07T09:08:28.761655Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:1] at 72075186224037890 2025-05-07T09:08:28.761696Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 72075186224037890 is Executed 2025-05-07T09:08:28.761721Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 72075186224037890 executing on unit BuildAndWaitDependencies 2025-05-07T09:08:28.761747Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 72075186224037890 to execution unit ExecuteRead 2025-05-07T09:08:28.761774Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 72075186224037890 on unit ExecuteRead 2025-05-07T09:08:28.761885Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1566: 72075186224037890 Execute read# 1, request: { ReadId: 2 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 1536 TxId: 18446744073709551615 } ResultFormat: FORMAT_CELLVEC MaxRows: 999 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 999 } 2025-05-07T09:08:28.762147Z node 28 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037890 promoting UnprotectedReadEdge to v1536/18446744073709551615 2025-05-07T09:08:28.762193Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2146: 72075186224037890 Complete read# {[28:1089:2858], 2} after executionsCount# 1 2025-05-07T09:08:28.762231Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2120: 72075186224037890 read iterator# {[28:1089:2858], 2} sends rowCount# 1, bytes# 32, quota rows left# 998, quota bytes left# 5242848, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-05-07T09:08:28.762292Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2171: 72075186224037890 read iterator# {[28:1089:2858], 2} finished in read 2025-05-07T09:08:28.762344Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 72075186224037890 is Executed 2025-05-07T09:08:28.762372Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 72075186224037890 executing on unit ExecuteRead 2025-05-07T09:08:28.762398Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 72075186224037890 to execution unit CompletedOperations 2025-05-07T09:08:28.762426Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 72075186224037890 on unit CompletedOperations 2025-05-07T09:08:28.762469Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 72075186224037890 is Executed 2025-05-07T09:08:28.762493Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 72075186224037890 executing on unit CompletedOperations 2025-05-07T09:08:28.762518Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:1] at 72075186224037890 has finished 2025-05-07T09:08:28.762549Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2670: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037890 2025-05-07T09:08:28.762641Z node 28 :TABLET_EXECUTOR DEBUG: Leader{72075186224037890:2:4} Tx{13, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} hope 1 -> done Change{17, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-05-07T09:08:28.762693Z node 28 :TABLET_EXECUTOR DEBUG: Leader{72075186224037890:2:4} Tx{13, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} release 4194304b of static, Memory{0 dyn 0} 2025-05-07T09:08:28.762729Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2719: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037890 2025-05-07T09:08:28.763320Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72075186224037890] send [28:1092:2861] 2025-05-07T09:08:28.763357Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72075186224037890] push event to server [28:1092:2861] 2025-05-07T09:08:28.763474Z node 28 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269553219, Sender [28:1089:2858], Recipient [28:1045:2830]: NKikimrTxDataShard.TEvReadCancel ReadId: 2 2025-05-07T09:08:28.763519Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3392: 72075186224037890 ReadCancel: { ReadId: 2 } { items { uint32_value: 1 } items { uint32_value: 1 } }, { items { uint32_value: 11 } items { uint32_value: 111 } }, { items { uint32_value: 21 } items { uint32_value: 21 } } ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_root-_bad_dynconfig] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/zvgn/00437b/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk6/testing_out_stuff/test_auditlog.py.test_broken_dynconfig._client_session_pool_with_auth_root-_bad_dynconfig/audit.txt 2025-05-07T09:08:16.852323Z: {"reason":"ydb/library/fyamlcpp/fyamlcpp.cpp:1068: \n6:12 plain scalar cannot start with '%'","sanitized_token":"**** (B6C6F477)","remote_address":"127.0.0.1","status":"ERROR","subject":"root@builtin","operation":"REPLACE DYNCONFIG","new_config":"\n---\n123metadata:\n kind: MainConfig\n cluster: \"\"\n version: %s\nconfig:\n yaml_config_enabled: true\nallowed_labels:\n node_id:\n type: string\n host:\n type: string\n tenant:\n type: string\nselector_config: []\n ","component":"console"} >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_attributes[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_attributes[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_can_read_from_different_groups[tables_format_v0] |93.7%| [TA] $(B)/ydb/core/tx/datashard/ut_volatile/test-results/unittest/{meta.json ... results_accumulator.log} >> data_correctness.py::TestDataCorrectness::test [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_multiple_messages[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_multiple_messages[tables_format_v1] |93.7%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_volatile/test-results/unittest/{meta.json ... results_accumulator.log} >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_can_read_from_different_groups[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_to_nonexistent_queue[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_attributes[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_can_read_from_different_groups[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_to_nonexistent_queue[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_attributes_table[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_message_batch[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_message_batch[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_to_nonexistent_queue[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_set_very_big_visibility_timeout[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_body[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_works[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_for_deleted_message[tables_format_v0-fifo] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_change_disables_receive_attempt_id[tables_format_v0-with_change_visibility] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_change_disables_receive_attempt_id[tables_format_v0-with_delete_message] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_message[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_for_deleted_message[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_message[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_not_in_flight[tables_format_v0-fifo] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_single_dml_query_logged[replace] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/zvgn/004371/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk18/testing_out_stuff/test_auditlog.py.test_single_dml_query_logged.replace/audit.txt 2025-05-07T09:08:20.397021Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-05-07T09:08:20.396969Z","sanitized_token":"**** (B6C6F477)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"replace into `/Root/test_auditlog.py/test-table` (id, value) values (2, 3), (3, 3)","start_time":"2025-05-07T09:08:20.270471Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} |93.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_not_in_flight[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_set_very_big_visibility_timeout[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_message[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_multiple_messages[tables_format_v0] >> test_generic_messaging.py::TestYandexAttributesPrefix::test_allows_yandex_message_attribute_prefix[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_works[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_for_deleted_message[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_for_deleted_message[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_zero_visibility_timeout_works[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_zero_visibility_timeout_works[tables_format_v0-std] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_change_disables_receive_attempt_id[tables_format_v0-with_delete_message] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_change_disables_receive_attempt_id[tables_format_v1-with_change_visibility] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_can_read_from_different_groups[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_crutch_groups_selection_algorithm_selects_second_group_batch[tables_format_v0] >> test_fifo_messaging.py::TestSqsFifoMicroBatchesWithTenant::test_micro_batch_read[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_to_zero_works[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_read_dont_stall[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_receive_with_very_big_visibility_timeout[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_zero_visibility_timeout_works[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_one_message[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_one_message[tables_format_v1-fifo] >> TPersQueueTest::TestReadPartitionByGroupId [GOOD] >> TPersQueueTest::SrcIdCompatibility |93.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_wrong_delete_fails[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_message_batch[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_message_batch[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_multiple_messages[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_not_in_flight[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_not_in_flight[tables_format_v0-std] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_change_disables_receive_attempt_id[tables_format_v1-with_change_visibility] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_change_disables_receive_attempt_id[tables_format_v1-with_delete_message] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_for_deleted_message[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_for_deleted_message[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_zero_visibility_timeout_works[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_zero_visibility_timeout_works[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_one_message[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_one_message[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_receive_with_very_big_visibility_timeout[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_receive_with_very_big_visibility_timeout[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_for_deleted_message[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_zero_visibility_timeout_works[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_zero_visibility_timeout_works[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_receive_with_very_big_visibility_timeout[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_message[tables_format_v0-fifo] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_cloud_ids_are_logged[attrs0] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/zvgn/004367/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk8/testing_out_stuff/test_auditlog.py.test_cloud_ids_are_logged.attrs0/audit.txt 2025-05-07T09:08:26.164836Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","cloud_id":"cloud-id-A","end_time":"2025-05-07T09:08:26.164775Z","sanitized_token":"**** (B6C6F477)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"update `/Root/test_auditlog.py/test-table` set value = 0 where id = 1","start_time":"2025-05-07T09:08:25.906327Z","subject":"root@builtin","detailed_status":"SUCCESS","resource_id":"database-id-C","operation":"ExecuteDataQueryRequest","folder_id":"folder-id-B","component":"grpc-proxy"} >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_change_disables_receive_attempt_id[tables_format_v1-with_delete_message] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_works[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message_batch[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message_batch[tables_format_v1-std] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_timeout_works[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_empty_queue_url[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_works[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_visibility_timeout_works[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_wrong_attribute_name[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_works[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_delete_message_works[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_empty_queue_url[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_empty_queue_url[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_delete_message_works[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_zero_visibility_timeout_works[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_empty_queue_url[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_attributes_table[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_message[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_message[tables_format_v0-std] |93.7%| [TA] $(B)/ydb/tests/functional/audit/test-results/py3test/{meta.json ... results_accumulator.log} >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message_batch[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_to_nonexistent_queue[tables_format_v0] |93.7%| [TA] {RESULT} $(B)/ydb/tests/functional/audit/test-results/py3test/{meta.json ... results_accumulator.log} >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_wrong_attribute_name[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_wrong_attribute_name[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_attributes_table[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_attributes_table[tables_format_v0-std] |93.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_queue_by_nonexistent_user_fails[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_message_batch[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_not_in_flight[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_wrong_attribute_name[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_not_in_flight[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_wrong_delete_fails[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_multiple_messages[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_multiple_messages[tables_format_v1] >> unstable_connection.py::TestUnstableConnection::test [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_batch_works[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_one_message[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_one_message[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_works[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_message[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_to_nonexistent_queue[tables_format_v0] [GOOD] |93.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_message_batch[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_to_nonexistent_queue[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_attributes_table[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_attributes_table[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_wrong_delete_fails[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_wrong_delete_fails[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_to_nonexistent_queue[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_set_very_big_visibility_timeout[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_attributes_table[tables_format_v1-fifo] [GOOD] |93.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_runtime_attributes[tables_format_v1-std] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_deduplication[tables_format_v1-by_deduplication_id] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_wrong_delete_fails[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_one_message[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_one_message[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_not_in_flight[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_not_in_flight[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_set_very_big_visibility_timeout[tables_format_v0] [GOOD] |93.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_works[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes[tables_format_v1-fifo] >> TPersQueueTest::TestWriteSessionsConflicts [GOOD] >> TPersQueueTest::TestReadRuleServiceTypePassword >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes_batch[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_body[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_body[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_multiple_messages[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_multi_read_dont_stall[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_one_message[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_not_in_flight[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_body[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_visibility_timeout_expires_on_wait_timeout[tables_format_v0] |93.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_attributes[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_delete_message_works[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_fifo_read_delete_single_message >> test_generic_messaging.py::TestYandexAttributesPrefix::test_allows_yandex_message_attribute_prefix[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_invalid_queue_url[tables_format_v0] >> YdbYqlClient::SimpleColumnFamilies [GOOD] >> YdbYqlClient::TableKeyRangesSinglePartition >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_to_zero_works[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_works[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_one_message[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_one_message[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_zero_visibility_timeout_works[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_zero_visibility_timeout_works[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_invalid_queue_url[tables_format_v0] |93.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_set_very_big_visibility_timeout[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_fifo_read_delete_single_message [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_only_single_read_infly_from_fifo |93.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_message[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_zero_visibility_timeout_works[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_zero_visibility_timeout_works[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_can_read_new_written_data_on_visibility_timeout[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_message_visibility_with_very_big_timeout[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_can_read_new_written_data_on_visibility_timeout[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_message_visibility_with_very_big_timeout[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v0-fifo] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_crutch_groups_selection_algorithm_selects_second_group_batch[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_zero_visibility_timeout_works[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_zero_visibility_timeout_works[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_zero_visibility_timeout_works[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_attributes_table[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_runtime_attributes[tables_format_v0-fifo] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_crutch_groups_selection_algorithm_selects_second_group_batch[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_crutch_groups_selection_algorithm_selects_second_group_batch[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_only_single_read_infly_from_fifo [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_queue_attributes[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_message_visibility_with_very_big_timeout[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_message_visibility_with_very_big_timeout[tables_format_v1] >> test_generic_messaging.py::TestYandexAttributesPrefix::test_allows_yandex_message_attribute_prefix[tables_format_v0] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_timeout_works[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_runtime_attributes[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_runtime_attributes[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_message_visibility_with_very_big_timeout[tables_format_v0] [GOOD] >> TPersQueueTest::DefaultMeteringMode [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_message_visibility_with_very_big_timeout[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_body[tables_format_v0] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_queue_attributes[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_message_visibility_with_very_big_timeout[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_batch_works[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_one_message[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v0-fifo] >> YdbYqlClient::TableKeyRangesSinglePartition [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_receive_attempt_reloads_same_messages[tables_format_v1-after_crutch_batch] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_receive_attempt_reloads_same_messages[tables_format_v1-standard_mode] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message[tables_format_v0-std] >> TPersQueueTest::SrcIdCompatibility [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_message_visibility_with_very_big_timeout[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_batch_works[tables_format_v0-fifo] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_can_read_from_different_groups[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_can_read_from_different_groups[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_works[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_works[tables_format_v0-std] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::TableKeyRangesSinglePartition [GOOD] Test command err: 2025-05-07T09:01:49.041060Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626428168264331:2275];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:49.041134Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00283d/r3tmp/tmpAPgWrS/pdisk_1.dat 2025-05-07T09:01:49.639210Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:49.639322Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:49.649238Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:01:49.709273Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 62108, node 1 2025-05-07T09:01:49.782482Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T09:01:49.782534Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T09:01:49.934586Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:49.934609Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:49.938086Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:49.938239Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15161 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:01:50.524071Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:01:53.088032Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626445348134402:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:53.088133Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:53.585703Z node 1 :TX_PROXY ERROR: schemereq.cpp:1030: Actor# [1:7501626445348134423:2644] txid# 281474976710658, Access denied for badguy@builtin on path /Root, with access CreateTable 2025-05-07T09:01:53.585907Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501626445348134423:2644] txid# 281474976710658, issues: { message: "Access denied for badguy@builtin on path /Root" issue_code: 200000 severity: 1 } 2025-05-07T09:01:53.758116Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501626445348134435:2350], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:53.758203Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:53.771922Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 2025-05-07T09:01:54.041951Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501626428168264331:2275];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:54.042036Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:01:56.352063Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501626457070546015:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:01:56.362368Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00283d/r3tmp/tmpgQWfKL/pdisk_1.dat 2025-05-07T09:01:56.582880Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 6050, node 4 2025-05-07T09:01:56.689231Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:01:56.689387Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:01:56.760279Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:01:56.794621Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:01:56.794651Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:01:56.794660Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:01:56.794799Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3302 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-05-07T09:01:56.943705Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:01:59.538202Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501626469955448954:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:59.538314Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:59.567055Z node 4 :TX_PROXY ERROR: schemereq.cpp:1030: Actor# [4:7501626469955448975:2618] txid# 281474976715658, Access denied for badguy@builtin on path /Root, with access CreateTable 2025-05-07T09:01:59.567207Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:7501626469955448975:2618] txid# 281474976715658, issues: { message: "Access denied for badguy@builtin on path /Root" issue_code: 200000 severity: 1 } 2025-05-07T09:01:59.689694Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501626469955448987:2349], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:59.689809Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:01:59.705433Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-05-07T09:02:01.770247Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7501626479718426216:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:02:01.770301Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00283d/r3tmp/tmpDEDu3e/pdisk_1.dat 2025-05-07T09:02:02.026409Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11105, node 7 2025-05-07T09:02:02.134409Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:02:02.134486Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:02:02.158569Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:02:02.181837Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is ... : , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:08:49.103143Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719647. Ctx: { TraceId: 01jtn02e6vc2z3551x3nc95xa7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NDhkNjJmM2EtY2E3MjNjN2QtZjAzZmY4MjItM2I4NmQ3NTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:08:49.207587Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719648. Ctx: { TraceId: 01jtn02eb80m6ar8edgyhzsj7m, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NDhkNjJmM2EtY2E3MjNjN2QtZjAzZmY4MjItM2I4NmQ3NTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:08:49.331744Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719649. Ctx: { TraceId: 01jtn02eecacj1y3wzv5vb712b, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NDhkNjJmM2EtY2E3MjNjN2QtZjAzZmY4MjItM2I4NmQ3NTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:08:49.430933Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719650. Ctx: { TraceId: 01jtn02ej42s5q1741d98tf0c6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NDhkNjJmM2EtY2E3MjNjN2QtZjAzZmY4MjItM2I4NmQ3NTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:08:49.535057Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719651. Ctx: { TraceId: 01jtn02enqap1jn2z7c89b1ewj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NDhkNjJmM2EtY2E3MjNjN2QtZjAzZmY4MjItM2I4NmQ3NTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:08:49.627124Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719652. Ctx: { TraceId: 01jtn02ergdew3f0p1ssz683ak, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NDhkNjJmM2EtY2E3MjNjN2QtZjAzZmY4MjItM2I4NmQ3NTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:08:49.762056Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719653. Ctx: { TraceId: 01jtn02evf06hpdhzc7vpnwj9m, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NDhkNjJmM2EtY2E3MjNjN2QtZjAzZmY4MjItM2I4NmQ3NTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:08:49.860509Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719654. Ctx: { TraceId: 01jtn02ezz52tp9fcj6r388s3t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NDhkNjJmM2EtY2E3MjNjN2QtZjAzZmY4MjItM2I4NmQ3NTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:08:49.986462Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719655. Ctx: { TraceId: 01jtn02f34096jg3fvj9bpz7s8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NDhkNjJmM2EtY2E3MjNjN2QtZjAzZmY4MjItM2I4NmQ3NTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:08:50.087658Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719656. Ctx: { TraceId: 01jtn02f6k0d00a8wft1gy2552, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NDhkNjJmM2EtY2E3MjNjN2QtZjAzZmY4MjItM2I4NmQ3NTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:08:50.192519Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719657. Ctx: { TraceId: 01jtn02f9t36grfhbba6czzx5x, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NDhkNjJmM2EtY2E3MjNjN2QtZjAzZmY4MjItM2I4NmQ3NTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:08:50.314932Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719658. Ctx: { TraceId: 01jtn02fd0f9mgy25b3c4xyqzg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NDhkNjJmM2EtY2E3MjNjN2QtZjAzZmY4MjItM2I4NmQ3NTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:08:50.498926Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719659. Ctx: { TraceId: 01jtn02fhhegbza8k3ryg374ax, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NDhkNjJmM2EtY2E3MjNjN2QtZjAzZmY4MjItM2I4NmQ3NTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:08:50.642039Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719660. Ctx: { TraceId: 01jtn02fq87ybpmszc9tygzsk0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NDhkNjJmM2EtY2E3MjNjN2QtZjAzZmY4MjItM2I4NmQ3NTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:08:50.745915Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719661. Ctx: { TraceId: 01jtn02fv865yezywdr6z3fy5d, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NDhkNjJmM2EtY2E3MjNjN2QtZjAzZmY4MjItM2I4NmQ3NTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:08:50.857847Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719662. Ctx: { TraceId: 01jtn02fys5zzw9jprx42x5p8b, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NDhkNjJmM2EtY2E3MjNjN2QtZjAzZmY4MjItM2I4NmQ3NTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:08:50.970781Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719663. Ctx: { TraceId: 01jtn02g27degvk2x62par1ycp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NDhkNjJmM2EtY2E3MjNjN2QtZjAzZmY4MjItM2I4NmQ3NTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:08:51.075023Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719664. Ctx: { TraceId: 01jtn02g5b8pd55vb12658r6eb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NDhkNjJmM2EtY2E3MjNjN2QtZjAzZmY4MjItM2I4NmQ3NTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:08:51.184472Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719665. Ctx: { TraceId: 01jtn02g8mewr5664r4v1syxqr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NDhkNjJmM2EtY2E3MjNjN2QtZjAzZmY4MjItM2I4NmQ3NTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:08:51.279856Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719666. Ctx: { TraceId: 01jtn02gc46d8z6tzwzpygk6dr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NDhkNjJmM2EtY2E3MjNjN2QtZjAzZmY4MjItM2I4NmQ3NTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:08:51.378892Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719667. Ctx: { TraceId: 01jtn02gf12dtdy2fwfvmq8be9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NDhkNjJmM2EtY2E3MjNjN2QtZjAzZmY4MjItM2I4NmQ3NTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:08:51.515610Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976719668. Ctx: { TraceId: 01jtn02gjm52jm2hvjhw6q1z1c, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NDhkNjJmM2EtY2E3MjNjN2QtZjAzZmY4MjItM2I4NmQ3NTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:08:51.542792Z node 7 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 9 2025-05-07T09:08:51.543525Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-05-07T09:08:54.129739Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7501628252464868276:2071];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:08:54.129830Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00283d/r3tmp/tmpIj2iYi/pdisk_1.dat 2025-05-07T09:08:54.357109Z node 10 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:08:54.456579Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:08:54.456708Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:08:54.460587Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12102, node 10 2025-05-07T09:08:54.682963Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:08:54.683006Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:08:54.683028Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:08:54.683242Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20683 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:08:55.007077Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:08:59.132012Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7501628252464868276:2071];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:08:59.132110Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:08:59.228514Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/unittest >> TPersQueueTest::DefaultMeteringMode [GOOD] Test command err: 2025-05-07T09:03:38.295951Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626896017759706:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:38.296097Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T09:03:38.349844Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501626896272178618:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:38.349926Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T09:03:38.472488Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-05-07T09:03:38.475924Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00262e/r3tmp/tmpugfetD/pdisk_1.dat 2025-05-07T09:03:38.689587Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:38.689675Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:03:38.695031Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:38.695106Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:03:38.721178Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-05-07T09:03:38.723154Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:03:38.724607Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:03:38.740055Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16411, node 1 2025-05-07T09:03:38.755485Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T09:03:38.755698Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T09:03:38.832926Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/zvgn/00262e/r3tmp/yandexnZ4rE6.tmp 2025-05-07T09:03:38.832951Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/zvgn/00262e/r3tmp/yandexnZ4rE6.tmp 2025-05-07T09:03:38.833069Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/zvgn/00262e/r3tmp/yandexnZ4rE6.tmp 2025-05-07T09:03:38.833158Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T09:03:38.871993Z INFO: TTestServer started on Port 15178 GrpcPort 16411 TClient is connected to server localhost:15178 PQClient connected to localhost:16411 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:03:39.076595Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T09:03:39.129711Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... 2025-05-07T09:03:41.400090Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501626909157080831:2312], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:41.400189Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501626909157080798:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:41.400605Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:03:41.407562Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715657:3, at schemeshard: 72057594046644480 2025-05-07T09:03:41.426122Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7501626909157080836:2313], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715657 completed, doublechecking } 2025-05-07T09:03:41.484600Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501626909157080864:2130] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:03:41.787818Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:03:41.791899Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7501626908902662841:2348], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T09:03:41.792132Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=1&id=MjhmMjE1OGYtMWVjYzI1YmMtN2E5M2Y3NjQtMjZiZDg5NDY=, ActorId: [1:7501626908902662816:2341], ActorState: ExecuteState, TraceId: 01jtmzs1y00vpeg0t3q6dktr46, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T09:03:41.792556Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7501626909157080879:2317], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-05-07T09:03:41.792694Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=2&id=YzU3YzcwMDItZmJiNGQ3MmMtNGFlZmNhMTEtNDUzZjQ3OTM=, ActorId: [2:7501626909157080796:2307], ActorState: ExecuteState, TraceId: 01jtmzs1tmbsckathfv7kq0xfz, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-05-07T09:03:41.794032Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T09:03:41.794032Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-05-07T09:03:41.863258Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:03:41.938541Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-05-07T09:03:42.101864Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710665. Ctx: { TraceId: 01jtmzs2dq3wfyp0j5jfw8rr92, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmE2NDIxYjctYTc4MjA5MzgtY2RiNjRjM2QtM2RlODE5Mzk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root === CheckClustersList. Subcribe to ClusterTracker from [1:7501626913197630506:3066] 2025-05-07T09:03:43.295845Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501626896017759706:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:43.295912Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:03:43.350130Z node 2 :METADATA_PROVIDER ERRO ... tChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } PartitionIds: 0 TopicName: "ttt" Version: 0 RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } TopicPath: "/Root/PQ/ttt" YcCloudId: "" YcFolderId: "" YdbDatabaseId: "" YdbDatabasePath: "/Root" Partitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } MeteringMode: METERING_MODE_REQUEST_UNITS AllPartitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } } BootstrapConfig { } SourceActor { RawX1: 7501628200659163242 RawX2: 124554053779 } Partitions { Partition { PartitionId: 0 } } 2025-05-07T09:08:58.256699Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:3621: [PQ: 72075186224037892] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-05-07T09:08:58.264953Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:1225: [PQ: 72075186224037892] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-05-07T09:08:58.264993Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:4279: [PQ: 72075186224037892] Try execute txs with state CALCULATED 2025-05-07T09:08:58.265032Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:4324: [PQ: 72075186224037892] TxId 281474976720672, State CALCULATED 2025-05-07T09:08:58.265063Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:4271: [PQ: 72075186224037892] TxId 281474976720672 State CALCULATED FrontTxId 281474976720672 2025-05-07T09:08:58.265095Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:4214: [PQ: 72075186224037892] TxId 281474976720672, NewState WAIT_RS 2025-05-07T09:08:58.265128Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72075186224037892] TxId 281474976720672 moved from CALCULATED to WAIT_RS 2025-05-07T09:08:58.265192Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:3956: [PQ: 72075186224037892] Send TEvTxProcessing::TEvReadSet to 0 receivers. Wait TEvTxProcessing::TEvReadSet from 0 senders. 2025-05-07T09:08:58.265234Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:4447: [PQ: 72075186224037892] HaveParticipantsDecision 1 2025-05-07T09:08:58.265314Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:4214: [PQ: 72075186224037892] TxId 281474976720672, NewState EXECUTING 2025-05-07T09:08:58.265342Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72075186224037892] TxId 281474976720672 moved from WAIT_RS to EXECUTING 2025-05-07T09:08:58.265362Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:4477: [PQ: 72075186224037892] Received 0, Expected 1 2025-05-07T09:08:58.265490Z node 30 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1746608938291, TxId 281474976720672 2025-05-07T09:08:58.265943Z node 30 :PERSQUEUE DEBUG: partition.cpp:2182: [PQ: 72075186224037892, Partition: 0, State: StateIdle] === DumpKeyValueRequest === 2025-05-07T09:08:58.266002Z node 30 :PERSQUEUE DEBUG: partition.cpp:2183: [PQ: 72075186224037892, Partition: 0, State: StateIdle] --- delete ---------------- 2025-05-07T09:08:58.266045Z node 30 :PERSQUEUE DEBUG: partition.cpp:2191: [PQ: 72075186224037892, Partition: 0, State: StateIdle] --- write ----------------- 2025-05-07T09:08:58.266095Z node 30 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037892, Partition: 0, State: StateIdle] i0000000000 2025-05-07T09:08:58.266112Z node 30 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037892, Partition: 0, State: StateIdle] I0000000000 2025-05-07T09:08:58.266128Z node 30 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037892, Partition: 0, State: StateIdle] _config_0 2025-05-07T09:08:58.266167Z node 30 :PERSQUEUE DEBUG: partition.cpp:2196: [PQ: 72075186224037892, Partition: 0, State: StateIdle] --- rename ---------------- 2025-05-07T09:08:58.266225Z node 30 :PERSQUEUE DEBUG: partition.cpp:2201: [PQ: 72075186224037892, Partition: 0, State: StateIdle] =========================== 2025-05-07T09:08:58.266282Z node 30 :PERSQUEUE DEBUG: read.h:262: CacheProxy. Passthrough write request to KV 2025-05-07T09:08:58.270340Z node 30 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-05-07T09:08:58.270518Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:3521: [PQ: 72075186224037892] Handle TEvPQ::TEvTxCommitDone Step 1746608938291, TxId 281474976720672, Partition 0 2025-05-07T09:08:58.270567Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:4279: [PQ: 72075186224037892] Try execute txs with state EXECUTING 2025-05-07T09:08:58.270600Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:4324: [PQ: 72075186224037892] TxId 281474976720672, State EXECUTING 2025-05-07T09:08:58.270639Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:4271: [PQ: 72075186224037892] TxId 281474976720672 State EXECUTING FrontTxId 281474976720672 2025-05-07T09:08:58.270665Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:4477: [PQ: 72075186224037892] Received 1, Expected 1 2025-05-07T09:08:58.270706Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:4150: [PQ: 72075186224037892] TxId: 281474976720672 send TEvPersQueue::TEvProposeTransactionResult(COMPLETE) 2025-05-07T09:08:58.270746Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:4481: [PQ: 72075186224037892] complete TxId 281474976720672 2025-05-07T09:08:58.271093Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:585: [PQ: 72075186224037892] Apply new config PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 TotalPartitions: 1 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } PartitionIds: 0 TopicName: "ttt" Version: 0 RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } TopicPath: "/Root/PQ/ttt" YcCloudId: "" YcFolderId: "" YdbDatabaseId: "" YdbDatabasePath: "/Root" Partitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } MeteringMode: METERING_MODE_REQUEST_UNITS AllPartitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } 2025-05-07T09:08:58.271163Z node 30 :PERSQUEUE NOTICE: pq_impl.cpp:1113: [PQ: 72075186224037892] metering mode METERING_MODE_REQUEST_UNITS 2025-05-07T09:08:58.271280Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:4499: [PQ: 72075186224037892] delete partitions for TxId 281474976720672 2025-05-07T09:08:58.271310Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:4214: [PQ: 72075186224037892] TxId 281474976720672, NewState EXECUTED 2025-05-07T09:08:58.271338Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72075186224037892] TxId 281474976720672 moved from EXECUTING to EXECUTED 2025-05-07T09:08:58.271372Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:3804: [PQ: 72075186224037892] write key for TxId 281474976720672 2025-05-07T09:08:58.271771Z node 30 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 281474976720672] save tx TxId: 281474976720672 State: EXECUTED MinStep: 1746608938004 MaxStep: 18446744073709551615 Step: 1746608938291 Predicate: true Kind: KIND_CONFIG TabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 TotalPartitions: 1 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } PartitionIds: 0 TopicName: "ttt" Version: 0 RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } TopicPath: "/Root/PQ/ttt" YcCloudId: "" YcFolderId: "" YdbDatabaseId: "" YdbDatabasePath: "/Root" Partitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } MeteringMode: METERING_MODE_REQUEST_UNITS AllPartitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } } BootstrapConfig { } SourceActor { RawX1: 7501628200659163242 RawX2: 124554053779 } Partitions { Partition { PartitionId: 0 } } 2025-05-07T09:08:58.272089Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:3621: [PQ: 72075186224037892] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-05-07T09:08:58.282740Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:1225: [PQ: 72075186224037892] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-05-07T09:08:58.282792Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:4279: [PQ: 72075186224037892] Try execute txs with state EXECUTED 2025-05-07T09:08:58.282844Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:4324: [PQ: 72075186224037892] TxId 281474976720672, State EXECUTED 2025-05-07T09:08:58.282884Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:4271: [PQ: 72075186224037892] TxId 281474976720672 State EXECUTED FrontTxId 281474976720672 2025-05-07T09:08:58.282919Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:3975: [PQ: 72075186224037892] TPersQueue::SendEvReadSetAckToSenders 2025-05-07T09:08:58.282950Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:4214: [PQ: 72075186224037892] TxId 281474976720672, NewState WAIT_RS_ACKS 2025-05-07T09:08:58.282981Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:4249: [PQ: 72075186224037892] TxId 281474976720672 moved from EXECUTED to WAIT_RS_ACKS 2025-05-07T09:08:58.283033Z node 30 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976720672] PredicateAcks: 0/0 2025-05-07T09:08:58.283048Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:4525: [PQ: 72075186224037892] HaveAllRecipientsReceive 1, AllSupportivePartitionsHaveBeenDeleted 1 2025-05-07T09:08:58.283077Z node 30 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976720672] PredicateAcks: 0/0 2025-05-07T09:08:58.283104Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:4586: [PQ: 72075186224037892] add an TxId 281474976720672 to the list for deletion 2025-05-07T09:08:58.283145Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:4214: [PQ: 72075186224037892] TxId 281474976720672, NewState DELETING 2025-05-07T09:08:58.283190Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:3820: [PQ: 72075186224037892] delete key for TxId 281474976720672 2025-05-07T09:08:58.283295Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:3621: [PQ: 72075186224037892] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-05-07T09:08:58.291060Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:1225: [PQ: 72075186224037892] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-05-07T09:08:58.291109Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:4279: [PQ: 72075186224037892] Try execute txs with state DELETING 2025-05-07T09:08:58.291149Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:4324: [PQ: 72075186224037892] TxId 281474976720672, State DELETING 2025-05-07T09:08:58.291179Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:4536: [PQ: 72075186224037892] delete TxId 281474976720672 2025-05-07T09:08:58.302752Z node 29 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:150: new Describe topic request 2025-05-07T09:08:58.302858Z node 29 :PQ_READ_PROXY DEBUG: schema_actors.cpp:455: TDescribeTopicActor for request operation_params { } path: "/Root/PQ/ttt" 2025-05-07T09:08:58.302943Z node 29 :PQ_READ_PROXY DEBUG: schema_actors.cpp:1189: Describe topic actor for path /Root/PQ/ttt |93.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_message_batch[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_runtime_attributes[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_runtime_attributes[tables_format_v1-fifo] >> test_fifo_messaging.py::TestSqsFifoMicroBatchesWithTenant::test_micro_batch_read[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMicroBatchesWithTenant::test_micro_batch_read[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_runtime_attributes[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_runtime_attributes[tables_format_v1-std] |93.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_works[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_visibility_timeout_expires_on_wait_timeout[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_visibility_timeout_expires_on_wait_timeout[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_receive_attempt_reloads_same_messages[tables_format_v1-standard_mode] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/unittest >> TPersQueueTest::SrcIdCompatibility [GOOD] Test command err: === Start server === Server->StartServer(false); 2025-05-07T09:03:38.300148Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626894561752732:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:38.300458Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T09:03:38.350266Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501626895741100297:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:38.350474Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T09:03:38.495350Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-05-07T09:03:38.507309Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002622/r3tmp/tmpcvWgzk/pdisk_1.dat 2025-05-07T09:03:38.734672Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:38.734776Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:03:38.735373Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:38.735415Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:03:38.737576Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-05-07T09:03:38.737858Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:03:38.738522Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 64162, node 1 2025-05-07T09:03:38.762636Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T09:03:38.762709Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:38.762807Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T09:03:38.836571Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/zvgn/002622/r3tmp/yandexYewQJm.tmp 2025-05-07T09:03:38.836601Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/zvgn/002622/r3tmp/yandexYewQJm.tmp 2025-05-07T09:03:38.836800Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/zvgn/002622/r3tmp/yandexYewQJm.tmp 2025-05-07T09:03:38.836964Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T09:03:38.879409Z INFO: TTestServer started on Port 10454 GrpcPort 64162 TClient is connected to server localhost:10454 PQClient connected to localhost:64162 === TenantModeEnabled() = 0 === Init PQ - start server on port 64162 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:03:39.158070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976710657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-05-07T09:03:39.158299Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-05-07T09:03:39.158517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-05-07T09:03:39.159964Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976710657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-05-07T09:03:39.160018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-05-07T09:03:39.162038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 281474976710657, response: Status: StatusAccepted TxId: 281474976710657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-05-07T09:03:39.162170Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-05-07T09:03:39.162319Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-05-07T09:03:39.162363Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 281474976710657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-05-07T09:03:39.162378Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 281474976710657:0 ProgressState no shards to create, do next state 2025-05-07T09:03:39.162391Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976710657:0 2 -> 3 waiting... 2025-05-07T09:03:39.164436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-05-07T09:03:39.164474Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976710657:0 ProgressState, at schemeshard: 72057594046644480 2025-05-07T09:03:39.164491Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976710657:0 3 -> 128 2025-05-07T09:03:39.164949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T09:03:39.164978Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976710657, ready parts: 0/1, is published: true 2025-05-07T09:03:39.164995Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T09:03:39.170515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-05-07T09:03:39.170546Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-05-07T09:03:39.170569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976710657:0, at tablet# 72057594046644480 2025-05-07T09:03:39.170597Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 281474976710657 ready parts: 1/1 2025-05-07T09:03:39.175489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976710657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:03:39.177297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 281474976710657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976710657 msg type: 269090816 2025-05-07T09:03:39.177418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 281474976710657, partId: 4294967295, tablet: 72057594046316545 2025-05-07T09:03:39.180161Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 1746608619224, transactions count in step: 1, at schemeshard: 72057594046644480 2025-05-07T09:03:39.180301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1746608619224 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-05-07T09:03:39.180348Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-05-07T09:03:39.180639Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976710657:0 128 -> 240 2025-05-07T09:03:39.180680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-05-07T09:03:39.180838Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-05-07T09:03:39.180897Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 1], at schemeshard: 72057594046644480 2025-05-07T09:03:39.183576Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-05-07T09:03:39.183621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976710657, path id: [OwnerId: 72057594046644480, LocalPathId: ... Ms: 1746608939374 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-05-07T09:08:59.374551Z :INFO: [] MessageGroupId [test-src-id-compat2] SessionId [] Write session established. Init response: session_id: "test-src-id-compat2|3b8cb84-1787d827-b39750c8-371ffd80_0" topic: "account/topic100" cluster: "dc1" partition_id: 7 supported_codecs: CODEC_RAW supported_codecs: CODEC_GZIP supported_codecs: CODEC_LZOP 2025-05-07T09:08:59.374958Z :DEBUG: [] MessageGroupId [test-src-id-compat2] SessionId [test-src-id-compat2|3b8cb84-1787d827-b39750c8-371ffd80_0] Write 1 messages with Id from 1 to 1 2025-05-07T09:08:59.378998Z :DEBUG: [] MessageGroupId [test-src-id-compat2] SessionId [test-src-id-compat2|3b8cb84-1787d827-b39750c8-371ffd80_0] Write session: try to update token 2025-05-07T09:08:59.379086Z :DEBUG: [] MessageGroupId [test-src-id-compat2] SessionId [test-src-id-compat2|3b8cb84-1787d827-b39750c8-371ffd80_0] Send 1 message(s) (0 left), first sequence number is 1 2025-05-07T09:08:59.380385Z node 25 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 5 sessionId: test-src-id-compat2|3b8cb84-1787d827-b39750c8-371ffd80_0 grpc read done: success: 1 data: write_request[data omitted] 2025-05-07T09:08:59.380769Z node 25 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037910 (partition=7) Received event: NKikimr::NPQ::TEvPartitionWriter::TEvWriteRequest 2025-05-07T09:08:59.381096Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:342: Handle TEvRequest topic: 'rt3.dc1--account--topic100' requestId: 2025-05-07T09:08:59.381142Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:2788: [PQ: 72075186224037910] got client message batch for topic 'rt3.dc1--account--topic100' partition 7 2025-05-07T09:08:59.381249Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:377: Answer ok topic: 'rt3.dc1--account--topic100' partition: 7 messageNo: 0 requestId: cookie: 1 2025-05-07T09:08:59.381352Z node 25 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037910 (partition=7) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-05-07T09:08:59.381677Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:342: Handle TEvRequest topic: 'rt3.dc1--account--topic100' requestId: 2025-05-07T09:08:59.381712Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:2788: [PQ: 72075186224037910] got client message batch for topic 'rt3.dc1--account--topic100' partition 7 2025-05-07T09:08:59.381790Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:2192: [PQ: 72075186224037910] got client message topic: rt3.dc1--account--topic100 partition: 7 SourceId: '\0test-src-id-compat2' SeqNo: 1 partNo : 0 messageNo: 1 size 102 offset: -1 2025-05-07T09:08:59.382072Z node 25 :PERSQUEUE DEBUG: partition_write.cpp:1233: [PQ: 72075186224037910, Partition: 7, State: StateIdle] Topic 'rt3.dc1--account--topic100' partition 7 part blob processing sourceId '\0test-src-id-compat2' seqNo 1 partNo 0 2025-05-07T09:08:59.451478Z node 25 :PERSQUEUE DEBUG: partition_write.cpp:1333: [PQ: 72075186224037910, Partition: 7, State: StateIdle] Topic 'rt3.dc1--account--topic100' partition 7 part blob complete sourceId '\0test-src-id-compat2' seqNo 1 partNo 0 FormedBlobsCount 0 NewHead: Offset 0 PartNo 0 PackedSize 189 count 1 nextOffset 1 batches 1 2025-05-07T09:08:59.452544Z node 25 :PERSQUEUE DEBUG: partition_write.cpp:1623: [PQ: 72075186224037910, Partition: 7, State: StateIdle] Add new write blob: topic 'rt3.dc1--account--topic100' partition 7 compactOffset 0,1 HeadOffset 0 endOffset 0 curOffset 1 d0000000007_00000000000000000000_00000_0000000001_00000| size 177 WTime 1746608939452 2025-05-07T09:08:59.452747Z node 25 :PERSQUEUE DEBUG: partition.cpp:2182: [PQ: 72075186224037910, Partition: 7, State: StateIdle] === DumpKeyValueRequest === 2025-05-07T09:08:59.452773Z node 25 :PERSQUEUE DEBUG: partition.cpp:2183: [PQ: 72075186224037910, Partition: 7, State: StateIdle] --- delete ---------------- 2025-05-07T09:08:59.452802Z node 25 :PERSQUEUE DEBUG: partition.cpp:2189: [PQ: 72075186224037910, Partition: 7, State: StateIdle] [x0000000007, x0000000008) 2025-05-07T09:08:59.452827Z node 25 :PERSQUEUE DEBUG: partition.cpp:2191: [PQ: 72075186224037910, Partition: 7, State: StateIdle] --- write ----------------- 2025-05-07T09:08:59.452852Z node 25 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037910, Partition: 7, State: StateIdle] m0000000007ptest-src-id-compat2 2025-05-07T09:08:59.452865Z node 25 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037910, Partition: 7, State: StateIdle] d0000000007_00000000000000000000_00000_0000000001_00000| 2025-05-07T09:08:59.452878Z node 25 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037910, Partition: 7, State: StateIdle] i0000000007 2025-05-07T09:08:59.452901Z node 25 :PERSQUEUE DEBUG: partition.cpp:2196: [PQ: 72075186224037910, Partition: 7, State: StateIdle] --- rename ---------------- 2025-05-07T09:08:59.452927Z node 25 :PERSQUEUE DEBUG: partition.cpp:2201: [PQ: 72075186224037910, Partition: 7, State: StateIdle] =========================== 2025-05-07T09:08:59.452998Z node 25 :PERSQUEUE DEBUG: read.h:262: CacheProxy. Passthrough write request to KV 2025-05-07T09:08:59.453114Z node 25 :PERSQUEUE DEBUG: read.h:300: CacheProxy. Passthrough blob. Partition 7 offset 0 partNo 0 count 1 size 177 2025-05-07T09:08:59.457364Z node 25 :PERSQUEUE DEBUG: cache_eviction.h:315: Caching head blob in L1. Partition 7 offset 0 count 1 size 177 actorID [25:7501628262210957653:2503] 2025-05-07T09:08:59.457503Z node 25 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72075186224037910, Partition: 7, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 122 WriteNewSizeFromSupportivePartitions# 0 2025-05-07T09:08:59.457565Z node 25 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037910, Partition: 7, State: StateIdle] TPartition::ReplyWrite. Partition: 7 2025-05-07T09:08:59.457639Z node 25 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037910, Partition: 7, State: StateIdle] Answering for message sourceid: '\0test-src-id-compat2', Topic: 'rt3.dc1--account--topic100', Partition: 7, SeqNo: 1, partNo: 0, Offset: 0 is stored on disk 2025-05-07T09:08:59.457904Z node 25 :PERSQUEUE DEBUG: partition_read.cpp:774: [PQ: 72075186224037910, Partition: 7, State: StateIdle] Topic 'rt3.dc1--account--topic100' partition 7 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-05-07T09:08:59.457942Z node 25 :PERSQUEUE DEBUG: partition_read.cpp:816: [PQ: 72075186224037910, Partition: 7, State: StateIdle] Topic 'rt3.dc1--account--topic100' partition 7 user user send read request for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 1 rrg 0 2025-05-07T09:08:59.458134Z node 25 :PERSQUEUE DEBUG: partition_read.cpp:731: [PQ: 72075186224037910, Partition: 7, State: StateIdle] read cookie 0 Topic 'rt3.dc1--account--topic100' partition 7 user user offset 0 count 1 size 1024000 endOffset 1 max time lag 0ms effective offset 0 2025-05-07T09:08:59.458167Z node 25 :PERSQUEUE DEBUG: partition_read.cpp:931: [PQ: 72075186224037910, Partition: 7, State: StateIdle] read cookie 0 added 0 blobs, size 0 count 0 last offset 0, current partition end offset: 1 2025-05-07T09:08:59.458218Z node 25 :PERSQUEUE DEBUG: partition_read.cpp:948: [PQ: 72075186224037910, Partition: 7, State: StateIdle] Reading cookie 0. All data is from uncompacted head. 2025-05-07T09:08:59.458244Z node 25 :PERSQUEUE DEBUG: partition_read.cpp:420: FormAnswer for 0 blobs 2025-05-07T09:08:59.458325Z node 25 :PERSQUEUE DEBUG: partition_read.cpp:856: Topic 'rt3.dc1--account--topic100' partition 7 user user readTimeStamp done, result 1746608939381 queuesize 0 startOffset 0 2025-05-07T09:08:59.458414Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:377: Answer ok topic: 'rt3.dc1--account--topic100' partition: 7 messageNo: 1 requestId: cookie: 1 2025-05-07T09:08:59.458509Z node 25 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037910 (partition=7) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-05-07T09:08:59.459384Z node 25 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037910' partition 7 offset 0 partno 0 count 1 parts 0 size 177 2025-05-07T09:08:59.462494Z :DEBUG: [] MessageGroupId [test-src-id-compat2] SessionId [test-src-id-compat2|3b8cb84-1787d827-b39750c8-371ffd80_0] Write session got write response: sequence_numbers: 1 offsets: 0 already_written: false partition_id: 7 write_statistics { persist_duration_ms: 6 queued_in_partition_duration_ms: 69 } 2025-05-07T09:08:59.462586Z :DEBUG: [] MessageGroupId [test-src-id-compat2] SessionId [test-src-id-compat2|3b8cb84-1787d827-b39750c8-371ffd80_0] Write session: acknoledged message 1 2025-05-07T09:08:59.464671Z :INFO: [] MessageGroupId [test-src-id-compat2] SessionId [test-src-id-compat2|3b8cb84-1787d827-b39750c8-371ffd80_0] Write session: close. Timeout = 0 ms 2025-05-07T09:08:59.464794Z :INFO: [] MessageGroupId [test-src-id-compat2] SessionId [test-src-id-compat2|3b8cb84-1787d827-b39750c8-371ffd80_0] Write session will now close 2025-05-07T09:08:59.464876Z :DEBUG: [] MessageGroupId [test-src-id-compat2] SessionId [test-src-id-compat2|3b8cb84-1787d827-b39750c8-371ffd80_0] Write session: aborting 2025-05-07T09:08:59.465555Z :INFO: [] MessageGroupId [test-src-id-compat2] SessionId [test-src-id-compat2|3b8cb84-1787d827-b39750c8-371ffd80_0] Write session: gracefully shut down, all writes complete 2025-05-07T09:08:59.465626Z :DEBUG: [] MessageGroupId [test-src-id-compat2] SessionId [test-src-id-compat2|3b8cb84-1787d827-b39750c8-371ffd80_0] Write session: destroy 2025-05-07T09:08:59.466964Z node 25 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 5 sessionId: test-src-id-compat2|3b8cb84-1787d827-b39750c8-371ffd80_0 grpc read done: success: 0 data: 2025-05-07T09:08:59.467014Z node 25 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 5 sessionId: test-src-id-compat2|3b8cb84-1787d827-b39750c8-371ffd80_0 grpc read failed 2025-05-07T09:08:59.467070Z node 25 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 5 sessionId: test-src-id-compat2|3b8cb84-1787d827-b39750c8-371ffd80_0 grpc closed 2025-05-07T09:08:59.467103Z node 25 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 5 sessionId: test-src-id-compat2|3b8cb84-1787d827-b39750c8-371ffd80_0 is DEAD 2025-05-07T09:08:59.468120Z node 25 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037910 (partition=7) Received event: NActors::TEvents::TEvPoison 2025-05-07T09:08:59.471163Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:2899: [PQ: 72075186224037910] server disconnected, pipe [25:7501628275095861106:2748] destroyed 2025-05-07T09:08:59.471244Z node 25 :PERSQUEUE DEBUG: partition_write.cpp:138: [PQ: 72075186224037910, Partition: 7, State: StateIdle] TPartition::DropOwner. 2025-05-07T09:09:00.737898Z node 25 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1937: ActorId: [25:7501628279390828467:2754] TxId: 281474976720699. Ctx: { TraceId: 01jtn02s184qdyem04s9jp9xej, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=25&id=NDBhY2M0YTAtNDJhMDBmODgtODhmYjI3NGMtZjE5ZjRhN2E=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. UNAVAILABLE: Failed to send EvStartKqpTasksRequest because node is unavailable: 26 2025-05-07T09:09:00.739024Z node 25 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1216: SelfId: [25:7501628279390828471:2754], TxId: 281474976720699, task: 3. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=25&id=NDBhY2M0YTAtNDJhMDBmODgtODhmYjI3NGMtZjE5ZjRhN2E=. TraceId : 01jtn02s184qdyem04s9jp9xej. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [25:7501628279390828467:2754], status: UNAVAILABLE, reason: {
: Error: Terminate execution } >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_send_and_read_multiple_messages[tables_format_v0] |93.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_message[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_runtime_attributes[tables_format_v1-std] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMicroBatchesWithTenant::test_micro_batch_read[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_empty_queue_url[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_empty_queue_url[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_empty_queue_url[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_attributes_table[tables_format_v0-fifo] |93.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_wrong_delete_fails[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes_batch[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_works[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes_batch[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_works[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes_batch[tables_format_v1] >> ttl_unavailable_s3.py::TestUnavailableS3::test [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_attributes_table[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_attributes_table[tables_format_v0-std] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_deduplication[tables_format_v1-by_deduplication_id] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_deduplication[tables_format_v1-content_based] |93.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_set_very_big_visibility_timeout[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_can_read_from_different_groups[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_crutch_groups_selection_algorithm_selects_second_group_batch[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_multi_read_dont_stall[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_attributes_table[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_attributes_table[tables_format_v1-fifo] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_change_disables_receive_attempt_id[tables_format_v1-with_change_visibility] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_attributes_table[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_to_zero_works[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v0-fifo] [GOOD] |93.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message[tables_format_v0-fifo] [GOOD] |93.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/ttl_tiering/py3test >> data_correctness.py::TestDataCorrectness::test [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_invalid_queue_url[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_invalid_queue_url[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_timeout_works[tables_format_v1] |93.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_not_in_flight[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_works[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_for_deleted_message[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_invalid_queue_url[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_list_queues_of_nonexistent_user[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_q_twice[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_list_queues_of_nonexistent_user[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes_batch[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_list_queues_of_nonexistent_user[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_read_dont_stall[tables_format_v0] |93.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_attributes_table[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_list_queues_of_nonexistent_user[tables_format_v1] [GOOD] |93.7%| [TA] $(B)/ydb/services/ydb/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_multi_read_dont_stall[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_set_very_big_visibility_timeout[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_deduplication[tables_format_v1-content_based] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_delete_message_works[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_for_deleted_message[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_for_deleted_message[tables_format_v0-std] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_send_and_read_multiple_messages[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_group_id[tables_format_v0] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_send_and_read_multiple_messages[tables_format_v1] |93.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/ttl_tiering/py3test >> unstable_connection.py::TestUnstableConnection::test [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_works[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_works[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_multi_read_dont_stall[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_partial_delete_works[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_for_deleted_message[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_for_deleted_message[tables_format_v1-fifo] |93.7%| [TA] {RESULT} $(B)/ydb/services/ydb/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_read_dont_stall[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_for_deleted_message[tables_format_v1-fifo] [GOOD] >> test_polling.py::TestSqsPolling::test_receive_message_with_polling[tables_format_v0-long_polling-fifo] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_queue_attributes[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_receive_attempt_reloads_same_messages[tables_format_v1-after_crutch_batch] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_batch_works[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_batch_works[tables_format_v0-std] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_deduplication_id[tables_format_v1] |93.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message[tables_format_v0-fifo] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_queue_attributes[tables_format_v0] >> test_generic_messaging.py::TestYandexAttributesPrefix::test_allows_yandex_message_attribute_prefix[tables_format_v0] [GOOD] >> TPersQueueTest::InflightLimit [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_visibility_timeout_expires_on_wait_timeout[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_invalid_queue_url[tables_format_v0] [GOOD] |93.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_visibility_timeout_works[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_invalid_queue_url[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_invalid_queue_url[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_list_queues_of_nonexistent_user[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_batch_works[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_batch_works[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_list_queues_of_nonexistent_user[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_list_queues_of_nonexistent_user[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_list_queues_of_nonexistent_user[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_multi_read_dont_stall[tables_format_v0] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_send_and_read_multiple_messages[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_deduplication_id[tables_format_v0] |93.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_timeout_works[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes_batch[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes_batch[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_crutch_groups_selection_algorithm_selects_second_group_batch[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_deduplication[tables_format_v0-by_deduplication_id] |93.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_queue_attributes[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_deduplication_id[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_batch_works[tables_format_v1-fifo] |93.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_for_deleted_message[tables_format_v1-fifo] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_delete_message_works[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_delete_message_works[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_multi_read_dont_stall[tables_format_v0] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/ttl_tiering/py3test >> ttl_unavailable_s3.py::TestUnavailableS3::test [GOOD] Test command err: !!! simulating S3 hang up -- sending SIGSTOP !!! simulating S3 recovery -- sending SIGCONT >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_works[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_works[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_can_read_new_written_data_on_visibility_timeout[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_batch_works[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_batch_works[tables_format_v1-std] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Date-pk_types18-all_types18-index18-Date--] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_can_read_new_written_data_on_visibility_timeout[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes_batch[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_read_dont_stall[tables_format_v0] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] |93.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_runtime_attributes[tables_format_v1-std] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Interval-pk_types35-all_types35-index35---] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_partial_delete_works[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_partial_delete_works[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_deduplication[tables_format_v0-by_deduplication_id] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_deduplication[tables_format_v0-content_based] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_body[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_body[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_visibility_timeout_works[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_change_disables_receive_attempt_id[tables_format_v1-with_change_visibility] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Utf8-pk_types30-all_types30-index30---] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_change_disables_receive_attempt_id[tables_format_v1-with_delete_message] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Decimal229-pk_types26-all_types26-index26---] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_group_id[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_group_id[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_read_dont_stall[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_set_very_big_visibility_timeout[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_attribute_value[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_body[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_visibility_timeout_expires_on_wait_timeout[tables_format_v0] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Uint32-pk_types23-all_types23-index23---] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_group_id[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_receive_attempt_id[tables_format_v0] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_change_disables_receive_attempt_id[tables_format_v1-with_delete_message] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_multi_read_dont_stall[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_timeout_works[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_attribute_value[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_attribute_value[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_receive_attempt_id[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_receive_attempt_id[tables_format_v1] |93.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_attributes_table[tables_format_v1-fifo] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_receive_attempt_id[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_change_disables_receive_attempt_id[tables_format_v0-with_change_visibility] |93.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_attribute_value[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_attributes[tables_format_v0] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_delete_message_works[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_fifo_read_delete_single_message >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_works[tables_format_v1-std] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_deduplication[tables_format_v0-content_based] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_attributes[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_attributes[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message[tables_format_v1-fifo] |93.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_batch_works[tables_format_v0-std] [GOOD] >> TPersQueueTest::TestReadRuleServiceTypePassword [GOOD] >> TPersQueueTest::TestReadPartitionStatus >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_crutch_groups_selection_algorithm_selects_second_group_batch[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message_batch[tables_format_v0-fifo] >> test_polling.py::TestSqsPolling::test_receive_message_with_polling[tables_format_v0-long_polling-fifo] [GOOD] >> test_polling.py::TestSqsPolling::test_receive_message_with_polling[tables_format_v0-long_polling-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message[tables_format_v1-fifo] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_queue_attributes[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_change_disables_receive_attempt_id[tables_format_v0-with_change_visibility] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_change_disables_receive_attempt_id[tables_format_v0-with_delete_message] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_queue_attributes[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_attributes[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_fifo_read_delete_single_message [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_only_single_read_infly_from_fifo >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_batch_works[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message[tables_format_v1-std] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_queue_attributes[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_read_dont_stall[tables_format_v0] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Datetime64-pk_types37-all_types37-index37---] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] |93.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_read_dont_stall[tables_format_v0] [GOOD] |93.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_for_deleted_message[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message_batch[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_partial_delete_works[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_to_zero_works[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_works[tables_format_v0-fifo] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_change_disables_receive_attempt_id[tables_format_v0-with_delete_message] [GOOD] >> test_polling.py::TestSqsPolling::test_receive_message_with_polling[tables_format_v0-long_polling-std] [GOOD] >> test_polling.py::TestSqsPolling::test_receive_message_with_polling[tables_format_v1-long_polling-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_q_twice[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_q_twice[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_batch_works[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_to_zero_works[tables_format_v0-fifo] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_only_single_read_infly_from_fifo [GOOD] |93.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_polling.py::TestSqsPolling::test_receive_message_with_polling[tables_format_v1-long_polling-fifo] [GOOD] >> test_polling.py::TestSqsPolling::test_receive_message_with_polling[tables_format_v1-long_polling-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_q_twice[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_q_twice[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_multi_read_dont_stall[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_partial_delete_works[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_to_zero_works[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_to_zero_works[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_q_twice[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_q_twice[tables_format_v1-std] |93.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_deduplication_id[tables_format_v0] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Timestamp-pk_types34-all_types34-index34---] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message_batch[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message_batch[tables_format_v0-std] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] >> test_polling.py::TestSqsPolling::test_receive_message_with_polling[tables_format_v1-long_polling-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_q_twice[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_queue_by_nonexistent_user_fails[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_visibility_timeout_expires_on_wait_timeout[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_visibility_timeout_expires_on_wait_timeout[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_queue_by_nonexistent_user_fails[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_queue_attributes[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_read_dont_stall[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_to_zero_works[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_to_zero_works[tables_format_v1-fifo] |93.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestYandexAttributesPrefix::test_allows_yandex_message_attribute_prefix[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_deduplication_id[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_group_id[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_to_zero_works[tables_format_v1-fifo] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_group_id[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_group_id[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message_batch[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message_batch[tables_format_v0-std] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Int32-pk_types20-all_types20-index20---] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_group_id[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_receive_attempt_id[tables_format_v0] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_crutch_groups_selection_algorithm_selects_second_group_batch[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_receive_attempt_id[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_receive_attempt_id[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_deduplication[tables_format_v0-by_deduplication_id] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_receive_attempt_id[tables_format_v1] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_works[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_works[tables_format_v0-std] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_timeout_works[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_timeout_works[tables_format_v1] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_String-pk_types29-all_types29-index29---] |93.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_multi_read_dont_stall[tables_format_v0] [GOOD] |93.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_multi_read_dont_stall[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message_batch[tables_format_v0-std] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_receive_attempt_reloads_same_messages[tables_format_v1-after_crutch_batch] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_receive_attempt_reloads_same_messages[tables_format_v1-standard_mode] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_deduplication[tables_format_v0-by_deduplication_id] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_deduplication[tables_format_v0-content_based] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/unittest >> TPersQueueTest::InflightLimit [GOOD] Test command err: === Server->StartServer(false); 2025-05-07T09:03:38.265212Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626895606424693:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:38.265444Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T09:03:38.489738Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002632/r3tmp/tmpP3rwwO/pdisk_1.dat TServer::EnableGrpc on GrpcPort 22013, node 1 2025-05-07T09:03:38.714090Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:38.714254Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:03:38.716028Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:03:38.723166Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:03:38.835077Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/zvgn/002632/r3tmp/yandexNiySJW.tmp 2025-05-07T09:03:38.835101Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/zvgn/002632/r3tmp/yandexNiySJW.tmp 2025-05-07T09:03:38.835236Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/zvgn/002632/r3tmp/yandexNiySJW.tmp 2025-05-07T09:03:38.835316Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T09:03:38.877697Z INFO: TTestServer started on Port 20864 GrpcPort 22013 TClient is connected to server localhost:20864 PQClient connected to localhost:22013 === TenantModeEnabled() = 0 === Init PQ - start server on port 22013 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:03:39.222260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976710657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-05-07T09:03:39.222434Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-05-07T09:03:39.222569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-05-07T09:03:39.222775Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976710657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-05-07T09:03:39.222839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:03:39.223291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 281474976710657, response: Status: StatusAccepted TxId: 281474976710657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-05-07T09:03:39.223431Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-05-07T09:03:39.223588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-05-07T09:03:39.223618Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 281474976710657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-05-07T09:03:39.223631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 281474976710657:0 ProgressState no shards to create, do next state 2025-05-07T09:03:39.223645Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976710657:0 2 -> 3 2025-05-07T09:03:39.224192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-05-07T09:03:39.224267Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976710657:0 ProgressState, at schemeshard: 72057594046644480 2025-05-07T09:03:39.224289Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976710657:0 3 -> 128 2025-05-07T09:03:39.224749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T09:03:39.224770Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976710657, ready parts: 0/1, is published: true 2025-05-07T09:03:39.224784Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T09:03:39.224887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-05-07T09:03:39.224909Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-05-07T09:03:39.224949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976710657:0, at tablet# 72057594046644480 2025-05-07T09:03:39.224969Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 281474976710657 ready parts: 1/1 2025-05-07T09:03:39.228158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976710657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:03:39.228547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 281474976710657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976710657 msg type: 269090816 2025-05-07T09:03:39.228672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 281474976710657, partId: 4294967295, tablet: 72057594046316545 2025-05-07T09:03:39.229727Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 1746608619273, transactions count in step: 1, at schemeshard: 72057594046644480 2025-05-07T09:03:39.229814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1746608619273 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-05-07T09:03:39.229849Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-05-07T09:03:39.230082Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976710657:0 128 -> 240 2025-05-07T09:03:39.230110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-05-07T09:03:39.230294Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-05-07T09:03:39.230347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 1], at schemeshard: 72057594046644480 2025-05-07T09:03:39.230818Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-05-07T09:03:39.230846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976710657, path id: [OwnerId: 72057594046644480, LocalPathId: 1] 2025-05-07T09:03:39.231001Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-05-07T09:03:39.231014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:7501626895606425229:2252], at schemeshard: 72057594046644480, txId: 281474976710657, path id: 1 2025-05-07T09:03:39.231082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-05-07T09:03:39.231129Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046644480] TDone opId# 281474976710657:0 ProgressState 2025-05-07T09:03:39.231199Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710657:0 progress is 1/1 2025-05-07T09:03:39.231210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976710657 ready parts: 1/1 2025-05-07T09:03:39.231236Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710657:0 progress is 1/1 2025-05-07T09:03:39.231242Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDo ... :Pointer &> /-S/contrib/libs/cxxsupp/libcxx/include/variant:706:1 #7 0x1a6bc11d in __union<2UL, const grpc_core::ChannelArgs::Pointer &> /-S/contrib/libs/cxxsupp/libcxx/include/variant:706:1 #8 0x1a6bc11d in construct_at >, grpc_core::ChannelArgs::Pointer>, const std::__y1::in_place_index_t<2UL> &, const grpc_core::ChannelArgs::Pointer &, std::__y1::__variant_detail::__union<(std::__y1::__variant_detail::_Trait)1, 0UL, int, TBasicString >, grpc_core::ChannelArgs::Pointer> *> /-S/contrib/libs/cxxsupp/libcxx/include/__memory/construct_at.h:41:46 #9 0x1a6bc11d in __construct_at >, grpc_core::ChannelArgs::Pointer>, const std::__y1::in_place_index_t<2UL> &, const grpc_core::ChannelArgs::Pointer &, std::__y1::__variant_detail::__union<(std::__y1::__variant_detail::_Trait)1, 0UL, int, TBasicString >, grpc_core::ChannelArgs::Pointer> *> /-S/contrib/libs/cxxsupp/libcxx/include/__memory/construct_at.h:49:10 #10 0x1a6bc11d in operator() &> /-S/contrib/libs/cxxsupp/libcxx/include/variant:816:13 #11 0x1a6bc11d in __invoke<(lambda at /-S/contrib/libs/cxxsupp/libcxx/include/variant:815:11), const std::__y1::__variant_detail::__alt<2UL, grpc_core::ChannelArgs::Pointer> &> /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149:25 #12 0x1a6bc11d in decltype(auto) std::__y1::__variant_detail::__visitation::__base::__dispatcher<2ul>::__dispatch[abi:fe200000]>, grpc_core::ChannelArgs::Pointer>>::__generic_construct[abi:fe200000]>, grpc_core::ChannelArgs::Pointer>, (std::__y1::__variant_detail::_Trait)1> const&>(std::__y1::__variant_detail::__ctor>, grpc_core::ChannelArgs::Pointer>>&, std::__y1::__variant_detail::__copy_constructor>, grpc_core::ChannelArgs::Pointer>, (std::__y1::__variant_detail::_Trait)1> const&)::'lambda'(std::__y1::__variant_detail::__copy_constructor>, grpc_core::ChannelArgs::Pointer>, (std::__y1::__variant_detail::_Trait)1> const&)&&, std::__y1::__variant_detail::__base<(std::__y1::__variant_detail::_Trait)1, int, TBasicString>, grpc_core::ChannelArgs::Pointer> const&>(std::__y1::__variant_detail::__copy_constructor>, grpc_core::ChannelArgs::Pointer>, (std::__y1::__variant_detail::_Trait)1> const&, std::__y1::__variant_detail::__base<(std::__y1::__variant_detail::_Trait)1, int, TBasicString>, grpc_core::ChannelArgs::Pointer> const&) /-S/contrib/libs/cxxsupp/libcxx/include/variant:531:14 #13 0x1a6b7878 in __visit_alt_at<(lambda at /-S/contrib/libs/cxxsupp/libcxx/include/variant:815:11), const std::__y1::__variant_detail::__copy_constructor >, grpc_core::ChannelArgs::Pointer>, (std::__y1::__variant_detail::_Trait)1> &> /-S/contrib/libs/cxxsupp/libcxx/include/variant:493:12 #14 0x1a6b7878 in __generic_construct >, grpc_core::ChannelArgs::Pointer>, (std::__y1::__variant_detail::_Trait)1> &> /-S/contrib/libs/cxxsupp/libcxx/include/variant:813:7 #15 0x1a6b7878 in __copy_constructor /-S/contrib/libs/cxxsupp/libcxx/include/variant:888:1 #16 0x1a6b7878 in __assignment /-S/contrib/libs/cxxsupp/libcxx/include/variant:900:28 #17 0x1a6b7878 in __move_assignment /-S/contrib/libs/cxxsupp/libcxx/include/variant:986:1 #18 0x1a6b7878 in __copy_assignment /-S/contrib/libs/cxxsupp/libcxx/include/variant:1016:1 #19 0x1a6b7878 in __impl /-S/contrib/libs/cxxsupp/libcxx/include/variant:1036:25 #20 0x1a6b7878 in variant /-S/contrib/libs/cxxsupp/libcxx/include/variant:1183:35 #21 0x1a6b7878 in grpc_core::AVL>, std::__y1::variant>, grpc_core::ChannelArgs::Pointer>>::Rebalance(TBasicString>, std::__y1::variant>, grpc_core::ChannelArgs::Pointer>, std::__y1::shared_ptr>, std::__y1::variant>, grpc_core::ChannelArgs::Pointer>>::Node> const&, std::__y1::shared_ptr>, std::__y1::variant>, grpc_core::ChannelArgs::Pointer>>::Node> const&) /-S/contrib/libs/grpc/src/core/lib/avl/avl.h:252:30 #22 0x1a6b699a in grpc_core::AVL>, std::__y1::variant>, grpc_core::ChannelArgs::Pointer>>::AddKey(std::__y1::shared_ptr>, std::__y1::variant>, grpc_core::ChannelArgs::Pointer>>::Node> const&, TBasicString>, std::__y1::variant>, grpc_core::ChannelArgs::Pointer>) /-S/contrib/libs/grpc/src/core/lib/avl/avl.h:265:14 #23 0x1a6b6969 in grpc_core::AVL>, std::__y1::variant>, grpc_core::ChannelArgs::Pointer>>::AddKey(std::__y1::shared_ptr>, std::__y1::variant>, grpc_core::ChannelArgs::Pointer>>::Node> const&, TBasicString>, std::__y1::variant>, grpc_core::ChannelArgs::Pointer>) /-S/contrib/libs/grpc/src/core/lib/avl/avl.h:266:24 #24 0x1a6ae877 in grpc_core::AVL>, std::__y1::variant>, grpc_core::ChannelArgs::Pointer>>::Add(TBasicString>, std::__y1::variant>, grpc_core::ChannelArgs::Pointer>) const /-S/contrib/libs/grpc/src/core/lib/avl/avl.h:36:16 #25 0x1a6ae25a in grpc_core::ChannelArgs::Set(std::__y1::basic_string_view>, std::__y1::variant>, grpc_core::ChannelArgs::Pointer>) const /-S/contrib/libs/grpc/src/core/lib/channel/channel_args.cc:158:28 #26 0x1a6ad925 in grpc_core::ChannelArgs::Set(std::__y1::basic_string_view>, grpc_core::ChannelArgs::Pointer) const /-S/contrib/libs/grpc/src/core/lib/channel/channel_args.cc:150:10 #27 0x1a717b16 in grpc_core::Channel::Create(char const*, grpc_core::ChannelArgs, grpc_channel_stack_type, grpc_transport*) /-S/contrib/libs/grpc/src/core/lib/surface/channel.cc:218:19 #28 0x1aa9dc81 in CreateChannel /-S/contrib/libs/grpc/src/core/ext/transport/chttp2/client/chttp2_connector.cc:323:10 #29 0x1aa9dc81 in grpc_channel_create /-S/contrib/libs/grpc/src/core/ext/transport/chttp2/client/chttp2_connector.cc:365:14 #30 0x1b301230 in grpc::(anonymous namespace)::InsecureChannelCredentialsImpl::CreateChannelWithInterceptors(TBasicString> const&, grpc::ChannelArguments const&, std::__y1::vector>, std::__y1::allocator>>>) /-S/contrib/libs/grpc/src/cpp/client/insecure_credentials.cc:55:13 #31 0x1b30100b in grpc::(anonymous namespace)::InsecureChannelCredentialsImpl::CreateChannelImpl(TBasicString> const&, grpc::ChannelArguments const&) /-S/contrib/libs/grpc/src/cpp/client/insecure_credentials.cc:40:12 #32 0x1b2f97c4 in grpc::CreateCustomChannel(TBasicString> const&, std::__y1::shared_ptr const&, grpc::ChannelArguments const&) /-S/contrib/libs/grpc/src/cpp/client/create_channel.cc:50:25 #33 0x192457b6 in NKikimr::NPersQueueTests::NTestSuiteTPersQueueTest::TDirectReadTestSetup::Connect(NKikimr::NPersQueueTests::TPersQueueV1TestServer&) /-S/ydb/services/persqueue_v1/persqueue_ut.cpp:824:23 #34 0x18f41822 in NKikimr::NPersQueueTests::NTestSuiteTPersQueueTest::TDirectReadTestSetup::TDirectReadTestSetup(NKikimr::NPersQueueTests::TPersQueueV1TestServer&) /-S/ydb/services/persqueue_v1/persqueue_ut.cpp:806:13 #35 0x18f3bc0e in NKikimr::NPersQueueTests::NTestSuiteTPersQueueTest::TTestCaseDirectReadPreCached::Execute_(NUnitTest::TTestContext&) /-S/ydb/services/persqueue_v1/persqueue_ut.cpp:1124:30 #36 0x192244a7 in operator() /-S/ydb/services/persqueue_v1/persqueue_ut.cpp:134:1 #37 0x192244a7 in __invoke<(lambda at /-S/ydb/services/persqueue_v1/persqueue_ut.cpp:134:1) &> /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149:25 #38 0x192244a7 in __call<(lambda at /-S/ydb/services/persqueue_v1/persqueue_ut.cpp:134:1) &> /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:224:5 #39 0x192244a7 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:169:12 #40 0x192244a7 in std::__y1::__function::__func, void ()>::operator()() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:314:10 #41 0x19cc2fd5 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:431:12 #42 0x19cc2fd5 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:990:10 #43 0x19cc2fd5 in TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool) /-S/library/cpp/testing/unittest/utmain.cpp:525:20 #44 0x19c92978 in NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool) /-S/library/cpp/testing/unittest/registar.cpp:373:18 #45 0x19223453 in NKikimr::NPersQueueTests::NTestSuiteTPersQueueTest::TCurrentTest::Execute() /-S/ydb/services/persqueue_v1/persqueue_ut.cpp:134:1 #46 0x19c94245 in NUnitTest::TTestFactory::Execute() /-S/library/cpp/testing/unittest/registar.cpp:494:19 #47 0x19cbd54c in NUnitTest::RunMain(int, char**) /-S/library/cpp/testing/unittest/utmain.cpp:872:44 #48 0x7f01edabfd8f (/lib/x86_64-linux-gnu/libc.so.6+0x29d8f) (BuildId: cd410b710f0f094c6832edd95931006d883af48e) SUMMARY: AddressSanitizer: 7216333 byte(s) leaked in 1617 allocation(s). >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_partial_delete_works[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_partial_delete_works[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_batch_works[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_batch_works[tables_format_v1-std] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_read_dont_stall[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_read_dont_stall[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_receive_attempt_reloads_same_messages[tables_format_v1-standard_mode] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_send_and_read_multiple_messages[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message_batch[tables_format_v0-std] [GOOD] |93.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_visibility_timeout_works[tables_format_v0] [GOOD] |93.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_crutch_groups_selection_algorithm_selects_second_group_batch[tables_format_v0] [GOOD] |93.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_read_dont_stall[tables_format_v0] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Timestamp64-pk_types38-all_types38-index38---] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_timeout_works[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_write_and_read_to_different_groups[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_visibility_timeout_expires_on_wait_timeout[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_visibility_timeout_works[tables_format_v0] |93.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_batch_works[tables_format_v0-std] [GOOD] |93.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_attributes[tables_format_v1] [GOOD] >> TPersQueueTest::TestReadPartitionStatus [GOOD] >> TPersQueueTest::TxCounters >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_deduplication[tables_format_v0-content_based] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_deduplication[tables_format_v1-by_deduplication_id] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_write_and_read_to_different_groups[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_write_and_read_to_different_groups[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_works[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_works[tables_format_v1-fifo] |93.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_deduplication[tables_format_v0-content_based] [GOOD] |93.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_works[tables_format_v1-std] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_UUID-pk_types31-all_types31-index31---] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_write_and_read_to_different_groups[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_write_read_delete_many_groups[tables_format_v0] |93.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_batch_works[tables_format_v0-std] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_read_dont_stall[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_read_dont_stall[tables_format_v1] |93.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_send_and_read_multiple_messages[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_send_and_read_multiple_messages[tables_format_v1] |93.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_only_single_read_infly_from_fifo [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_deduplication[tables_format_v1-by_deduplication_id] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_deduplication[tables_format_v1-content_based] |93.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_can_read_new_written_data_on_visibility_timeout[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_partial_delete_works[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes[tables_format_v0-fifo] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] |93.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_queue_by_nonexistent_user_fails[tables_format_v0] [GOOD] |93.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_change_disables_receive_attempt_id[tables_format_v0-with_delete_message] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes[tables_format_v0-std] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Decimal150-pk_types25-all_types25-index25---] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Interval64-pk_types39-all_types39-index39---] |93.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_to_zero_works[tables_format_v1-fifo] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Uint64-pk_types22-all_types22-index22---] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_visibility_timeout_works[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes[tables_format_v0-std] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_timeout_works[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_write_and_read_to_different_groups[tables_format_v0] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_send_and_read_multiple_messages[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_deduplication_id[tables_format_v0] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_deduplication[tables_format_v1-content_based] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_works[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_works[tables_format_v1-std] |93.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_batch_works[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_to_zero_works[tables_format_v0-fifo] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_deduplication_id[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_deduplication_id[tables_format_v1] |93.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_receive_attempt_id[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_can_read_new_written_data_on_visibility_timeout[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_write_and_read_to_different_groups[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_write_and_read_to_different_groups[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_deduplication_id[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_read_dont_stall[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_receive_attempt_reloads_same_messages[tables_format_v0-after_crutch_batch] |93.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_polling.py::TestSqsPolling::test_receive_message_with_polling[tables_format_v1-long_polling-std] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Datetime-pk_types33-all_types33-index33---] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_to_zero_works[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_to_zero_works[tables_format_v0-std] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_DyNumber-pk_types28-all_types28-index28---] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_write_and_read_to_different_groups[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_to_zero_works[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_to_zero_works[tables_format_v1-fifo] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_write_read_delete_many_groups[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_write_read_delete_many_groups[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_to_zero_works[tables_format_v1-fifo] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Int64-pk_types19-all_types19-index19---] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] |93.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message_batch[tables_format_v0-std] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] >> TPersQueueTest::TxCounters [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_read_dont_stall[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_receive_attempt_reloads_same_messages[tables_format_v0-after_crutch_batch] |93.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message_batch[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_works[tables_format_v1-std] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/unittest >> TPersQueueTest::TxCounters [GOOD] Test command err: === Server->StartServer(false); 2025-05-07T09:03:38.298549Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501626895397982250:2072];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:38.298656Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T09:03:38.352629Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501626896733482817:2074];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:03:38.352709Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T09:03:38.480193Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002613/r3tmp/tmpv6UVOZ/pdisk_1.dat 2025-05-07T09:03:38.492003Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-05-07T09:03:38.713108Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:38.713221Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:03:38.714418Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:03:38.714481Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:03:38.718140Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-05-07T09:03:38.718678Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:03:38.718985Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:03:38.744110Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20592, node 1 2025-05-07T09:03:38.749028Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T09:03:38.749077Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T09:03:38.844121Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/zvgn/002613/r3tmp/yandex5bSWpA.tmp 2025-05-07T09:03:38.844154Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/zvgn/002613/r3tmp/yandex5bSWpA.tmp 2025-05-07T09:03:38.844324Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/zvgn/002613/r3tmp/yandex5bSWpA.tmp 2025-05-07T09:03:38.844457Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T09:03:38.887550Z INFO: TTestServer started on Port 7560 GrpcPort 20592 TClient is connected to server localhost:7560 PQClient connected to localhost:20592 === TenantModeEnabled() = 0 === Init PQ - start server on port 20592 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:03:39.254176Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976710657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-05-07T09:03:39.254391Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-05-07T09:03:39.254606Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-05-07T09:03:39.254849Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976710657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-05-07T09:03:39.254897Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-05-07T09:03:39.258894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 281474976710657, response: Status: StatusAccepted TxId: 281474976710657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-05-07T09:03:39.259019Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-05-07T09:03:39.259194Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-05-07T09:03:39.259240Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 281474976710657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-05-07T09:03:39.259257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 281474976710657:0 ProgressState no shards to create, do next state 2025-05-07T09:03:39.259297Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976710657:0 2 -> 3 waiting... 2025-05-07T09:03:39.262203Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-05-07T09:03:39.262248Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976710657:0 ProgressState, at schemeshard: 72057594046644480 2025-05-07T09:03:39.262269Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976710657:0 3 -> 128 2025-05-07T09:03:39.265334Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-05-07T09:03:39.265371Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-05-07T09:03:39.265458Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976710657:0, at tablet# 72057594046644480 2025-05-07T09:03:39.265502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 281474976710657 ready parts: 1/1 2025-05-07T09:03:39.270520Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976710657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:03:39.274625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T09:03:39.274659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976710657, ready parts: 0/1, is published: true 2025-05-07T09:03:39.274678Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T09:03:39.274797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 281474976710657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976710657 msg type: 269090816 2025-05-07T09:03:39.274954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 281474976710657, partId: 4294967295, tablet: 72057594046316545 2025-05-07T09:03:39.277797Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 1746608619322, transactions count in step: 1, at schemeshard: 72057594046644480 2025-05-07T09:03:39.277927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1746608619322 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-05-07T09:03:39.277961Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-05-07T09:03:39.278262Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 281474976710657:0 128 -> 240 2025-05-07T09:03:39.278304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-05-07T09:03:39.278522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-05-07T09:03:39.278575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 1], at schemeshard: 72057594046644480 2025-05-07T09:03:39.282360Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-05-07T09:03:39.282392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976710657, path id: [OwnerId: 72057594046644480, LocalPathId: 1] 2025-05-07T09:03 ... f5-bcf438ae-de625bc3-b5f150ef_0 2025-05-07T09:10:05.226638Z node 32 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:168: new Describe partition request 2025-05-07T09:10:05.226888Z node 32 :PQ_READ_PROXY DEBUG: schema_actors.cpp:1212: TDescribePartitionActor for request path: "/Root/topic" include_location: true 2025-05-07T09:10:05.227002Z node 32 :PQ_READ_PROXY DEBUG: schema_actors.cpp:1222: TDescribePartitionActor[32:7501628557268728285:2493]: Bootstrap 2025-05-07T09:10:05.229860Z node 32 :PQ_READ_PROXY DEBUG: schema_actors.cpp:657: DescribeTopicImpl [32:7501628557268728285:2493]: Request location 2025-05-07T09:10:05.230148Z node 32 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][topic] pipe [32:7501628557268728294:2494] connected; active server actors: 1 2025-05-07T09:10:05.230259Z node 32 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037893][topic] addPartitionToResponse tabletId 72075186224037892, partitionId 0, NodeId 32, Generation 1 2025-05-07T09:10:05.230356Z node 32 :PQ_READ_PROXY DEBUG: schema_actors.cpp:750: DescribeTopicImpl [32:7501628557268728285:2493]: Got location 2025-05-07T09:10:05.230520Z node 32 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][topic] pipe [32:7501628557268728294:2494] disconnected; active server actors: 1 2025-05-07T09:10:05.230576Z node 32 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037893][topic] pipe [32:7501628557268728294:2494] disconnected no session 2025-05-07T09:10:05.238578Z node 32 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: 123|7c08abf5-bcf438ae-de625bc3-b5f150ef_0 grpc read done: success: 0 data: 2025-05-07T09:10:05.238618Z node 32 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 2 sessionId: 123|7c08abf5-bcf438ae-de625bc3-b5f150ef_0 grpc read failed 2025-05-07T09:10:05.239474Z node 32 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:818: session v1 closed cookie: 2 sessionId: 123|7c08abf5-bcf438ae-de625bc3-b5f150ef_0 2025-05-07T09:10:05.239523Z node 32 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 2 sessionId: 123|7c08abf5-bcf438ae-de625bc3-b5f150ef_0 is DEAD 2025-05-07T09:10:05.240320Z node 32 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-05-07T09:10:05.241596Z node 32 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:107: new grpc connection 2025-05-07T09:10:05.241656Z node 32 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:141: new session created cookie 3 2025-05-07T09:10:05.243157Z node 32 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 3 sessionId: grpc read done: success: 1 data: init_request { path: "topic" producer_id: "123" partition_with_generation { generation: 1 } } 2025-05-07T09:10:05.243375Z node 32 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:442: session request cookie: 3 path: "topic" producer_id: "123" partition_with_generation { generation: 1 } from ipv6:[::1]:56690 2025-05-07T09:10:05.243416Z node 32 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:1535: write session: cookie=3 sessionId= userAgent="topic server" ip=ipv6:[::1]:56690 proto=topic topic=topic durationSec=0 2025-05-07T09:10:05.243433Z node 32 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-05-07T09:10:05.243489Z node 32 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:475: session to partition: 0, generation: 1 2025-05-07T09:10:05.244893Z node 32 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 3 sessionId: describe result for acl check 2025-05-07T09:10:05.245225Z node 32 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint64; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `//Root/.metadata/TopicPartitionsMapping` WHERE Hash == $Hash AND Topic == $Topic AND ProducerId == $SourceId; 2025-05-07T09:10:05.245252Z node 32 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; DECLARE $SeqNo AS Uint64; UPSERT INTO `//Root/.metadata/TopicPartitionsMapping` (Hash, Topic, ProducerId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-05-07T09:10:05.245283Z node 32 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `//Root/.metadata/TopicPartitionsMapping` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND ProducerId = $SourceId AND Partition = $Partition; 2025-05-07T09:10:05.245307Z node 32 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [32:7501628557268728299:2496] (SourceId=123, PreferedPartition=0) ReplyResult: Partition=0, SeqNo=0 2025-05-07T09:10:05.245329Z node 32 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 3 sessionId: partition: 0 expectedGeneration: 1 2025-05-07T09:10:05.246004Z node 32 :PQ_WRITE_PROXY DEBUG: writer.cpp:798: TPartitionWriter 72075186224037892 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037892, NodeId 32, Generation: 1 2025-05-07T09:10:05.246146Z node 32 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie 123|8a0d2c0c-35f635f3-16ba3729-51c61883_0 generated for partition 0 topic 'topic' owner 123 2025-05-07T09:10:05.246743Z node 32 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 3 partition: 0 MaxSeqNo: 0 sessionId: 123|8a0d2c0c-35f635f3-16ba3729-51c61883_0 2025-05-07T09:10:05.248637Z node 32 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 3 sessionId: 123|8a0d2c0c-35f635f3-16ba3729-51c61883_0 grpc read done: success: 1 data: write_request[data omitted] 2025-05-07T09:10:05.249140Z node 32 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 3 sessionId: 123|8a0d2c0c-35f635f3-16ba3729-51c61883_0 grpc read done: success: 1 data: write_request[data omitted] 2025-05-07T09:10:05.249791Z node 32 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 3 sessionId: 123|8a0d2c0c-35f635f3-16ba3729-51c61883_0 grpc read done: success: 1 data: write_request[data omitted] 2025-05-07T09:10:05.250049Z node 32 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 3 sessionId: 123|8a0d2c0c-35f635f3-16ba3729-51c61883_0 grpc read done: success: 1 data: write_request[data omitted] 2025-05-07T09:10:05.250113Z node 32 :PQ_WRITE_PROXY DEBUG: writer.cpp:798: TPartitionWriter 72075186224037892 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037892, NodeId 32, Generation: 1 2025-05-07T09:10:05.251372Z node 32 :PQ_WRITE_PROXY DEBUG: writer.cpp:289: SessionId: ydb://session/3?node_id=32&id=MWY1ZDYwM2UtZThjMjdjODAtODQwY2Q3YjctNTIzOGVlZDU= TxId: 01jtn04rgacqmtv5ef7kgmvama WriteId: {32, 281474976715673} 2025-05-07T09:10:05.255189Z node 32 :PERSQUEUE INFO: partition_init.cpp:879: [PQ: 72075186224037892, Partition: {0, {32, 281474976715673}, 100000}, State: StateInit] bootstrapping {0, {32, 281474976715673}, 100000} [32:7501628557268728311:2498] 2025-05-07T09:10:05.258590Z node 32 :PERSQUEUE INFO: partition_init.cpp:773: [topic:{0, {32, 281474976715673}, 100000}:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-05-07T09:10:05.258708Z node 32 :PERSQUEUE INFO: partition.cpp:557: [PQ: 72075186224037892, Partition: {0, {32, 281474976715673}, 100000}, State: StateInit] init complete for topic 'topic' partition {0, {32, 281474976715673}, 100000} generation 1 [32:7501628557268728311:2498] 2025-05-07T09:10:05.259138Z node 32 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie 123|9eb15d43-565920f2-aaa1538a-b02a4ae5_0 generated for partition {0, {32, 281474976715673}, 100000} topic 'topic' owner 123 2025-05-07T09:10:05.261699Z node 32 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-05-07T09:10:05.261783Z node 32 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-05-07T09:10:05.261817Z node 32 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-05-07T09:10:05.261852Z node 32 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-05-07T09:10:05.267396Z node 32 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-05-07T09:10:05.339950Z node 32 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-05-07T09:10:05.340015Z node 32 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-05-07T09:10:05.340043Z node 32 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-05-07T09:10:05.354687Z node 32 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 3 sessionId: 123|8a0d2c0c-35f635f3-16ba3729-51c61883_0 grpc read done: success: 0 data: 2025-05-07T09:10:05.354742Z node 32 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 3 sessionId: 123|8a0d2c0c-35f635f3-16ba3729-51c61883_0 grpc read failed 2025-05-07T09:10:05.355225Z node 32 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:818: session v1 closed cookie: 3 sessionId: 123|8a0d2c0c-35f635f3-16ba3729-51c61883_0 2025-05-07T09:10:05.355244Z node 32 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 3 sessionId: 123|8a0d2c0c-35f635f3-16ba3729-51c61883_0 is DEAD 2025-05-07T09:10:05.355935Z node 32 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-05-07T09:10:05.355978Z node 32 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-05-07T09:10:05.373383Z node 32 :PERSQUEUE WARN: pq_impl.cpp:4195: [PQ: 72075186224037892] Unknown transaction 0 Counters: ================================
name=api.grpc.topic.stream_write.bytes: 20796
name=api.grpc.topic.stream_write.messages: 4
name=topic.write.bytes: 20796
name=topic.write.discarded_bytes: 0
name=topic.write.discarded_messages: 0
name=topic.write.messages: 4
name=topic.write.uncompressed_bytes: 16
name=topic.write.lag_milliseconds:
    bin=100: 0
    bin=1000: 0
    bin=10000: 0
    bin=180000: 0
    bin=200: 0
    bin=2000: 3
    bin=30000: 0
    bin=500: 0
    bin=5000: 1
    bin=60000: 0
    bin=999999: 0
name=topic.write.message_size_bytes:
    bin=1024: 1
    bin=10240: 2
    bin=102400: 0
    bin=1048576: 0
    bin=10485760: 0
    bin=20480: 1
    bin=204800: 0
    bin=2097152: 0
    bin=5120: 0
    bin=51200: 0
    bin=524288: 0
    bin=5242880: 0
    bin=67108864: 0
    bin=99999999: 0
name=topic.write.partition_throttled_milliseconds:
    bin=0: 4
    bin=1: 0
    bin=10: 0
    bin=100: 0
    bin=1000: 0
    bin=10000: 0
    bin=20: 0
    bin=2500: 0
    bin=5: 0
    bin=50: 0
    bin=500: 0
    bin=5000: 0
    bin=999999: 0
|93.8%| [TA] $(B)/ydb/services/persqueue_v1/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] |93.8%| [TA] {RESULT} $(B)/ydb/services/persqueue_v1/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Uint8-pk_types24-all_types24-index24---] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] |93.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_can_read_new_written_data_on_visibility_timeout[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_write_read_delete_many_groups[tables_format_v1] [GOOD] |93.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_visibility_timeout_works[tables_format_v0] [GOOD] |93.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes[tables_format_v0-std] [GOOD] |93.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_deduplication[tables_format_v1-content_based] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_receive_attempt_reloads_same_messages[tables_format_v0-after_crutch_batch] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_receive_attempt_reloads_same_messages[tables_format_v0-standard_mode] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Int8-pk_types21-all_types21-index21---] |93.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_can_read_new_written_data_on_visibility_timeout[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_receive_attempt_reloads_same_messages[tables_format_v0-standard_mode] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Date-pk_types32-all_types32-index32---] |93.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_write_and_read_to_different_groups[tables_format_v1] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Decimal3510-pk_types27-all_types27-index27---] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Date32-pk_types36-all_types36-index36---] |93.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_deduplication_id[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_receive_attempt_reloads_same_messages[tables_format_v0-after_crutch_batch] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_receive_attempt_reloads_same_messages[tables_format_v0-standard_mode] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_all_types-pk_types12-all_types12-index12---] |93.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_works[tables_format_v1-std] [GOOD] |93.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_to_zero_works[tables_format_v1-fifo] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_receive_attempt_reloads_same_messages[tables_format_v0-standard_mode] [GOOD] |93.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test |93.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_write_read_delete_many_groups[tables_format_v1] [GOOD] >> test_s3.py::TestYdbS3TTL::test_s3[table_all_types-pk_types7-all_types7-index7---] >> test_s3.py::TestYdbS3TTL::test_s3[table_index_3__SYNC-pk_types1-all_types1-index1---SYNC] >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Timestamp-pk_types12-all_types12-index12-Timestamp--] >> test_cp_ic.py::TestCpIc::test_discovery |93.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_receive_attempt_reloads_same_messages[tables_format_v0-standard_mode] [GOOD] |93.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_receive_attempt_reloads_same_messages[tables_format_v0-standard_mode] [GOOD] >> test_cp_ic.py::TestCpIc::test_discovery [GOOD] >> test_s3.py::TestYdbS3TTL::test_s3[table_index_0__SYNC-pk_types4-all_types4-index4---SYNC] |93.8%| [TA] $(B)/ydb/tests/functional/sqs/messaging/test-results/py3test/{meta.json ... results_accumulator.log} |93.8%| [TA] {RESULT} $(B)/ydb/tests/functional/sqs/messaging/test-results/py3test/{meta.json ... results_accumulator.log} |93.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_DyNumber-pk_types8-all_types8-index8-DyNumber--] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_retryable_iam_error[tables_format_v1] [GOOD] |93.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/fq/multi_plane/py3test >> test_cp_ic.py::TestCpIc::test_discovery [GOOD] >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Datetime-pk_types11-all_types11-index11-Datetime--] |93.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_retryable_iam_error[tables_format_v1] [GOOD] |93.8%| [TA] $(B)/ydb/tests/functional/sqs/cloud/test-results/py3test/{meta.json ... results_accumulator.log} |93.9%| [TA] {RESULT} $(B)/ydb/tests/functional/sqs/cloud/test-results/py3test/{meta.json ... results_accumulator.log} >> test_s3.py::TestYdbS3TTL::test_s3[table_index_0__ASYNC-pk_types6-all_types6-index6---ASYNC] >> tier_delete.py::TestTierDelete::test_delete_s3_ttl [GOOD] >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Timestamp-pk_types12-all_types12-index12-Timestamp--] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/ttl_tiering/py3test >> tier_delete.py::TestTierDelete::test_delete_s3_ttl [GOOD] Test command err: rows by tier: {'__DEFAULT': 90000}, portions: 4 rows by tier: {'__DEFAULT': 90000}, portions: 4 rows by tier: {'__DEFAULT': 90000}, portions: 4 rows by tier: {'__DEFAULT': 90000}, portions: 4 rows by tier: {'__DEFAULT': 90000}, portions: 4 rows by tier: {'__DEFAULT': 90000}, portions: 4 rows by tier: {'__DEFAULT': 90000}, portions: 4 rows by tier: {'__DEFAULT': 90000}, portions: 4 rows by tier: {'__DEFAULT': 90000}, portions: 4 rows by tier: {'__DEFAULT': 90000}, portions: 4 rows by tier: {'__DEFAULT': 90000}, portions: 4 rows by tier: {'__DEFAULT': 90000}, portions: 4 rows by tier: {'__DEFAULT': 90000}, portions: 4 rows by tier: {'__DEFAULT': 90000}, portions: 4 rows by tier: {'__DEFAULT': 90000}, portions: 4 rows by tier: {'__DEFAULT': 90000}, portions: 4 rows by tier: {'__DEFAULT': 90000}, portions: 4 rows by tier: {'__DEFAULT': 90000}, portions: 4 rows by tier: {'__DEFAULT': 90000}, portions: 4 rows by tier: {'__DEFAULT': 90000}, portions: 4 rows by tier: {'__DEFAULT': 90000}, portions: 4 rows by tier: {'__DEFAULT': 90000}, portions: 4 rows by tier: {'__DEFAULT': 90000}, portions: 4 rows by tier: {'__DEFAULT': 90000}, portions: 4 rows by tier: {'__DEFAULT': 90000}, portions: 4 rows by tier: {'__DEFAULT': 90000}, portions: 4 rows by tier: {'__DEFAULT': 90000}, portions: 4 rows by tier: {'__DEFAULT': 90000}, portions: 4 rows by tier: {'__DEFAULT': 90000}, portions: 4 rows by tier: {'__DEFAULT': 90000}, portions: 4 rows by tier: {'__DEFAULT': 90000}, portions: 4 rows by tier: {'__DEFAULT': 90000}, portions: 4 rows by tier: {'__DEFAULT': 90000}, portions: 4 rows by tier: {'__DEFAULT': 90000}, portions: 4 rows by tier: {'__DEFAULT': 90000}, portions: 4 rows by tier: {'__DEFAULT': 90000}, portions: 4 rows by tier: {'__DEFAULT': 90000}, portions: 4 rows by tier: {'__DEFAULT': 90000}, portions: 4 rows by tier: {'__DEFAULT': 90000}, portions: 4 rows by tier: {'__DEFAULT': 100000}, portions: 5 |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Timestamp-pk_types12-all_types12-index12-Timestamp--] [GOOD] |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Uint64-pk_types10-all_types10-index10-Uint64--] >> test_s3.py::TestYdbS3TTL::test_s3[table_index_2__SYNC-pk_types2-all_types2-index2---SYNC] >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_DyNumber-pk_types8-all_types8-index8-DyNumber--] [GOOD] >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Datetime-pk_types11-all_types11-index11-Datetime--] [GOOD] |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_DyNumber-pk_types8-all_types8-index8-DyNumber--] [GOOD] |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Datetime-pk_types11-all_types11-index11-Datetime--] [GOOD] >> test_s3.py::TestYdbS3TTL::test_s3[table_index_1__ASYNC-pk_types5-all_types5-index5---ASYNC] >> test_s3.py::TestYdbS3TTL::test_s3[table_index_0__SYNC-pk_types4-all_types4-index4---SYNC] [GOOD] |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_index_0__SYNC-pk_types4-all_types4-index4---SYNC] [GOOD] >> test_dispatch.py::TestMapping::test_mapping |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_create_path_with_long_name_failed >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_create_path_with_long_name_failed [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_create_table_and_path_with_name_clash_unsuccessful >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_create_table_and_path_with_name_clash_unsuccessful [GOOD] >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Uint64-pk_types10-all_types10-index10-Uint64--] [GOOD] >> test_s3.py::TestYdbS3TTL::test_s3[table_index_3__SYNC-pk_types1-all_types1-index1---SYNC] [GOOD] >> ttl_delete_s3.py::TestDeleteS3Ttl::test_data_unchanged_after_ttl_change [GOOD] >> ttl_delete_s3.py::TestDeleteS3Ttl::test_delete_s3_tiering >> TGroupMapperTest::MonteCarlo [GOOD] |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::MonteCarlo [GOOD] |93.9%| [TA] $(B)/ydb/core/mind/bscontroller/ut/test-results/unittest/{meta.json ... results_accumulator.log} |93.9%| [TA] {RESULT} $(B)/ydb/core/mind/bscontroller/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> test_s3.py::TestYdbS3TTL::test_s3[table_index_0__ASYNC-pk_types6-all_types6-index6---ASYNC] [GOOD] |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Uint64-pk_types10-all_types10-index10-Uint64--] [GOOD] |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_index_3__SYNC-pk_types1-all_types1-index1---SYNC] [GOOD] >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Uint32-pk_types9-all_types9-index9-Uint32--] >> test_s3.py::TestYdbS3TTL::test_s3[table_index_4__SYNC-pk_types0-all_types0-index0---SYNC] |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_index_0__ASYNC-pk_types6-all_types6-index6---ASYNC] [GOOD] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_cant_add_existing_column >> test_dispatch.py::TestMapping::test_mapping [GOOD] |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_create_table_and_path_with_name_clash_unsuccessful [GOOD] >> test_retry.py::TestRetry::test_fail_first[kikimr0] >> test_s3.py::TestYdbS3TTL::test_s3[table_index_1__SYNC-pk_types3-all_types3-index3---SYNC] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_cant_add_existing_column [GOOD] >> test_dispatch.py::TestMapping::test_idle >> test_s3.py::TestYdbS3TTL::test_s3[table_all_types-pk_types7-all_types7-index7---] [GOOD] |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_all_types-pk_types7-all_types7-index7---] [GOOD] |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_cant_add_existing_column [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Date-pk_types18-all_types18-index18-Date--] [GOOD] |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_index_1__ASYNC-pk_types5-all_types5-index5---ASYNC] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Datetime64-pk_types37-all_types37-index37---] [GOOD] >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Date-pk_types13-all_types13-index13-Date--] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Timestamp-pk_types34-all_types34-index34---] [GOOD] |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] [GOOD] >> test_retry_high_rate.py::TestRetry::test_high_rate[kikimr0] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Int32-pk_types20-all_types20-index20---] [GOOD] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_by_not_single_key_column_failure |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Date-pk_types18-all_types18-index18-Date--] [GOOD] |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Datetime64-pk_types37-all_types37-index37---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Timestamp64-pk_types38-all_types38-index38---] [GOOD] |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_index_1__ASYNC-pk_types5-all_types5-index5---ASYNC] [GOOD] >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Uint32-pk_types9-all_types9-index9-Uint32--] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Uint32-pk_types23-all_types23-index23---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_String-pk_types29-all_types29-index29---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_ydb_create_and_remove_directory_success >> test_s3.py::TestYdbS3TTL::test_s3[table_index_2__SYNC-pk_types2-all_types2-index2---SYNC] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Decimal229-pk_types26-all_types26-index26---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Interval-pk_types35-all_types35-index35---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] [GOOD] |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_given_table_when_drop_table_and_create_with_same_scheme_then_ok |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Timestamp-pk_types34-all_types34-index34---] [GOOD] |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Utf8-pk_types30-all_types30-index30---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Uint64-pk_types22-all_types22-index22---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Decimal150-pk_types25-all_types25-index25---] [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_when_delete_path_with_folder_then_get_error_response |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Int32-pk_types20-all_types20-index20---] [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_ydb_create_and_remove_directory_success [GOOD] >> test_dispatch.py::TestMapping::test_idle [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_UUID-pk_types31-all_types31-index31---] [GOOD] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_can_change_compaction_policy_options >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_can_change_partition_config_options >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_by_not_single_key_column_failure [GOOD] |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Uint32-pk_types9-all_types9-index9-Uint32--] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Interval64-pk_types39-all_types39-index39---] [GOOD] |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Decimal229-pk_types26-all_types26-index26---] [GOOD] |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Timestamp64-pk_types38-all_types38-index38---] [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_delete_directory_from_leaf_success |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Uint32-pk_types23-all_types23-index23---] [GOOD] |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_String-pk_types29-all_types29-index29---] [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_when_delete_path_with_folder_then_get_error_response [GOOD] |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_given_table_when_drop_table_and_create_with_other_keys_then_ok >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_decreasing_number_of_generations_it_is_raise_error |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Interval-pk_types35-all_types35-index35---] [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_given_table_when_drop_table_and_create_with_same_scheme_then_ok [GOOD] |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_index_2__SYNC-pk_types2-all_types2-index2---SYNC] [GOOD] |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Utf8-pk_types30-all_types30-index30---] [GOOD] |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] [GOOD] |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_delete_directory_from_leaf_success [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_delete_table_that_doesnt_exist_failure >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_delete_table_that_doesnt_exist_failure [GOOD] |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/fq/multi_plane/py3test >> test_dispatch.py::TestMapping::test_idle [GOOD] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_can_change_compaction_policy_options [GOOD] |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Uint64-pk_types22-all_types22-index22---] [GOOD] |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Decimal150-pk_types25-all_types25-index25---] [GOOD] |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] [GOOD] |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_add_column_after_table_creation_with_data_and_success >> test_split_merge.py::TestSplitMerge::test_merge_split[table_DyNumber-pk_types28-all_types28-index28---] [GOOD] |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_UUID-pk_types31-all_types31-index31---] [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_given_table_when_drop_table_and_create_with_other_keys_then_ok [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_ydb_remove_directory_that_does_not_exist_failure >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_can_change_partition_config_options [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Int64-pk_types19-all_types19-index19---] [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_when_create_path_second_time_then_it_is_ok >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_decreasing_number_of_generations_it_is_raise_error [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_given_table_when_drop_table_and_create_with_same_primary_key_and_other_scheme_then_ok >> test_copy_ops.py::TestSchemeShardCopyOps::test_when_copy_table_partition_config >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_add_and_remove_column_many_times_success |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_create_and_drop_table_many_times_in_range |94.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] [GOOD] |94.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test |94.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Uint8-pk_types24-all_types24-index24---] [GOOD] >> test_copy_ops.py::TestSchemeShardCopyOps::test_given_table_when_create_copy_of_it_then_ok >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_after_create_table_it_is_success |94.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Interval64-pk_types39-all_types39-index39---] [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_ydb_remove_directory_that_does_not_exist_failure [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_when_create_path_second_time_then_it_is_ok [GOOD] |94.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_DyNumber-pk_types28-all_types28-index28---] [GOOD] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_by_single_key_column_failure |94.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_ydb_create_and_remove_directory_success [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Int8-pk_types21-all_types21-index21---] [GOOD] |94.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_by_not_single_key_column_failure [GOOD] |94.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_can_change_compaction_policy_options [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_given_table_when_drop_table_and_create_with_same_primary_key_and_other_scheme_then_ok [GOOD] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_add_column_after_table_creation_with_data_and_success [GOOD] |94.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_can_change_partition_config_options [GOOD] |94.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Int64-pk_types19-all_types19-index19---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Decimal3510-pk_types27-all_types27-index27---] [GOOD] |94.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_decreasing_number_of_generations_it_is_raise_error [GOOD] |94.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_when_delete_path_with_folder_then_get_error_response [GOOD] |94.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_create_and_drop_table_many_times_in_range [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_create_many_directories_success >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_create_many_directories_success [GOOD] |94.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Uint8-pk_types24-all_types24-index24---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Datetime-pk_types33-all_types33-index33---] [GOOD] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_after_create_table_it_is_success [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Date32-pk_types36-all_types36-index36---] [GOOD] >> YdbSdkSessions::MultiThreadSessionPoolLimitSyncTableClient >> YdbSdkSessions::TestSdkFreeSessionAfterBadSessionQueryService [SKIPPED] >> test_copy_ops.py::TestSchemeShardCopyOps::test_when_copy_table_partition_config [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Date-pk_types32-all_types32-index32---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] [GOOD] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_by_single_key_column_failure [GOOD] >> TFqYdbTest::ShouldStatusToIssuesProcessExceptions >> TFqYdbTest::ShouldStatusToIssuesProcessExceptions [GOOD] >> TFqYdbTest::ShouldStatusToIssuesProcessEmptyIssues [GOOD] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_add_and_remove_column_many_times_success [GOOD] >> YdbSdkSessions::CloseSessionWithSessionPoolExplicitDriverStopOnly |94.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Decimal3510-pk_types27-all_types27-index27---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::TestSdkFreeSessionAfterBadSessionQueryService [SKIPPED] Test command err: ydb/public/sdk/cpp/tests/integration/sessions/main.cpp:195: Test is failing right now |94.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Int8-pk_types21-all_types21-index21---] [GOOD] >> TRegisterCheckTest::ShouldNotRegisterCheckPrevGeneration >> test_s3.py::TestYdbS3TTL::test_s3[table_index_1__SYNC-pk_types3-all_types3-index3---SYNC] [GOOD] >> YdbSdkSessions::TestSessionPool >> YdbSdkSessions::MultiThreadSync >> YdbSdkSessions::TestMultipleSessions |94.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_delete_table_that_doesnt_exist_failure [GOOD] >> TRegisterCheckTest::ShouldRegisterCheckNewGeneration >> TRegisterCheckTest::ShouldNotRegisterCheckPrevGeneration [GOOD] >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Date-pk_types13-all_types13-index13-Date--] [GOOD] >> YdbSdkSessions::MultiThreadSync [GOOD] >> YdbSdkSessions::SessionsServerLimit [SKIPPED] >> YdbSdkSessions::TestMultipleSessions [GOOD] >> YdbSdkSessions::TestActiveSessionCountAfterTransportError |94.0%| [TS] {asan, default-linux-x86_64, release} ydb/core/fq/libs/ydb/ut/unittest >> TFqYdbTest::ShouldStatusToIssuesProcessExceptions [GOOD] |94.0%| [TS] {asan, default-linux-x86_64, release} ydb/core/fq/libs/ydb/ut/unittest >> TFqYdbTest::ShouldStatusToIssuesProcessEmptyIssues [GOOD] >> YdbSdkSessions::TestSessionPool [GOOD] |94.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_given_table_when_drop_table_and_create_with_same_scheme_then_ok [GOOD] >> YdbSdkSessions::TestActiveSessionCountAfterBadSession >> TRegisterCheckTest::ShouldRegisterCheckNewGeneration [GOOD] |94.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Date-pk_types32-all_types32-index32---] [GOOD] >> TRegisterCheckTest::ShouldNotRegisterCheckPrevGeneration2 >> YdbSdkSessions::TestActiveSessionCountAfterBadSession [GOOD] >> YdbSdkSessions::SessionsServerLimitWithSessionPool [SKIPPED] |94.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_given_table_when_drop_table_and_create_with_other_keys_then_ok [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] [GOOD] >> YdbSdkSessions::MultiThreadSessionPoolLimitSyncTableClient [GOOD] >> YdbSdkSessions::MultiThreadSessionPoolLimitSyncQueryClient >> YdbSdkSessions::TestSdkFreeSessionAfterBadSessionQueryServiceStreamCall [SKIPPED] >> TRegisterCheckTest::ShouldNotRegisterCheckPrevGeneration2 [GOOD] |94.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_ydb_remove_directory_that_does_not_exist_failure [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::SessionsServerLimit [SKIPPED] Test command err: ydb/public/sdk/cpp/tests/integration/sessions/main.cpp:543: Enable after accepting a pull request with merging configs |94.0%| [TS] {asan, default-linux-x86_64, release} ydb/core/fq/libs/ydb/ut/unittest >> TRegisterCheckTest::ShouldNotRegisterCheckPrevGeneration [GOOD] |94.0%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::TestSessionPool [GOOD] |94.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_after_create_table_it_is_success [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] |94.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_when_create_path_second_time_then_it_is_ok [GOOD] |94.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Date32-pk_types36-all_types36-index36---] [GOOD] >> YdbSdkSessions::MultiThreadMultipleRequestsOnSharedSessionsTableClient >> YdbSdkSessions::TestActiveSessionCountAfterTransportError [GOOD] >> test_copy_ops.py::TestSchemeShardCopyOps::test_given_table_when_create_copy_of_it_then_ok [GOOD] |94.0%| [TS] {asan, default-linux-x86_64, release} ydb/core/fq/libs/ydb/ut/unittest >> TRegisterCheckTest::ShouldRegisterCheckNewGeneration [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::SessionsServerLimitWithSessionPool [SKIPPED] Test command err: ydb/public/sdk/cpp/tests/integration/sessions/main.cpp:583: Enable after accepting a pull request with merging configs >> test_ttl.py::TestTTLOnIndexedTable::test_case ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::TestSdkFreeSessionAfterBadSessionQueryServiceStreamCall [SKIPPED] Test command err: ydb/public/sdk/cpp/tests/integration/sessions/main.cpp:243: Test is failing right now |94.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/ttl/py3test |94.0%| [TS] {asan, default-linux-x86_64, release} ydb/core/fq/libs/ydb/ut/unittest >> TRegisterCheckTest::ShouldNotRegisterCheckPrevGeneration2 [GOOD] |94.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/ttl/py3test >> YdbSdkSessions::CloseSessionAfterDriverDtorWithoutSessionPool |94.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] [GOOD] |94.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/ttl/py3test >> TRegisterCheckTest::ShouldRegisterCheckNextGeneration >> TRegisterCheckTest::ShouldRegisterCheckSameGeneration >> test_ttl.py::TestTTLAlterSettings::test_case >> test_ttl.py::TestTTLDefaultEnv::test_case |94.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] |94.0%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::TestActiveSessionCountAfterTransportError [GOOD] >> test_ttl.py::TestTTLValueSinceUnixEpoch::test_case |94.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Datetime-pk_types33-all_types33-index33---] [GOOD] |94.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/ttl/py3test |94.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] [GOOD] |94.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/ttl/py3test |94.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/ttl/py3test >> TRegisterCheckTest::ShouldRegisterCheckSameGeneration [GOOD] >> TRegisterCheckTest::ShouldRegisterCheckNextGeneration [GOOD] >> YdbSdkSessions::CloseSessionAfterDriverDtorWithoutSessionPool [GOOD] >> YdbSdkSessions::CloseSessionWithSessionPoolExplicit >> YdbSdkSessions::CloseSessionWithSessionPoolExplicitDriverStopOnly [GOOD] >> YdbSdkSessions::CloseSessionWithSessionPoolFromDtors >> TCheckGenerationTest::ShouldRollbackTransactionWhenCheckFails >> TRegisterCheckTest::ShouldRegisterCheckNewGenerationAndTransact |94.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_index_1__SYNC-pk_types3-all_types3-index3---SYNC] [GOOD] |94.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] >> YdbSdkSessions::MultiThreadSessionPoolLimitSyncQueryClient [GOOD] >> TCheckGenerationTest::ShouldRollbackTransactionWhenCheckFails [GOOD] >> TCheckGenerationTest::ShouldRollbackTransactionWhenCheckFails2 >> TRegisterCheckTest::ShouldRegisterCheckSameGenerationAndTransact >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] [GOOD] >> TCheckGenerationTest::ShouldRollbackTransactionWhenCheckFails2 [GOOD] |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_given_table_when_drop_table_and_create_with_same_primary_key_and_other_scheme_then_ok [GOOD] >> TRegisterCheckTest::ShouldRegisterCheckNewGenerationAndTransact [GOOD] |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_add_column_after_table_creation_with_data_and_success [GOOD] |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Date-pk_types13-all_types13-index13-Date--] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] |94.1%| [TS] {asan, default-linux-x86_64, release} ydb/core/fq/libs/ydb/ut/unittest >> TRegisterCheckTest::ShouldRegisterCheckSameGeneration [GOOD] >> YdbSdkSessions::MultiThreadMultipleRequestsOnSharedSessionsTableClient [GOOD] >> YdbSdkSessions::MultiThreadMultipleRequestsOnSharedSessionsQueryClient [SKIPPED] >> test_disk.py::TestSafeDiskBreak::test_erase_method |94.1%| [TS] {asan, default-linux-x86_64, release} ydb/core/fq/libs/ydb/ut/unittest >> TRegisterCheckTest::ShouldRegisterCheckNextGeneration [GOOD] >> TRegisterCheckTest::ShouldRegisterCheckSameGenerationAndTransact [GOOD] |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] [GOOD] |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] >> test_ttl.py::TestTTLAlterSettings::test_case [GOOD] |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test >> YdbSdkSessionsPool::WaitQueue/0 >> YdbSdkSessionsPool::StressTestSync/1 |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_create_many_directories_success [GOOD] |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::MultiThreadSessionPoolLimitSyncQueryClient [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] >> test_tablet.py::TestMassiveKills::test_tablets_are_ok_after_many_kills >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] >> YdbSdkSessionsPool1Session::GetSession/0 |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_by_single_key_column_failure [GOOD] >> YdbSdkSessionsPool::WaitQueue/0 [GOOD] |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_add_and_remove_column_many_times_success [GOOD] >> YdbSdkSessionsPool1Session::GetSession/0 [GOOD] |94.1%| [TS] {asan, default-linux-x86_64, release} ydb/core/fq/libs/ydb/ut/unittest >> TCheckGenerationTest::ShouldRollbackTransactionWhenCheckFails2 [GOOD] |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test >> YdbSdkSessionsPool::PeriodicTask/0 |94.1%| [TS] {asan, default-linux-x86_64, release} ydb/core/fq/libs/ydb/ut/unittest >> TRegisterCheckTest::ShouldRegisterCheckNewGenerationAndTransact [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::MultiThreadMultipleRequestsOnSharedSessionsQueryClient [SKIPPED] Test command err: ydb/public/sdk/cpp/tests/integration/sessions/main.cpp:534: Enable after interactive tx support |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test |94.1%| [TS] {asan, default-linux-x86_64, release} ydb/core/fq/libs/ydb/ut/unittest >> TRegisterCheckTest::ShouldRegisterCheckSameGenerationAndTransact [GOOD] |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test |94.1%| [TA] $(B)/ydb/core/fq/libs/ydb/ut/test-results/unittest/{meta.json ... results_accumulator.log} |94.1%| [TA] {RESULT} $(B)/ydb/core/fq/libs/ydb/ut/test-results/unittest/{meta.json ... results_accumulator.log} |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test >> YdbSdkSessionsPool1Session::RunSmallPlan/0 >> YdbSdkSessionsPool::WaitQueue/1 |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test >> YdbSdkSessionsPool1Session::FailTest/0 >> YdbSdkSessionsPool1Session::RunSmallPlan/0 [GOOD] >> YdbSdkSessionsPool1Session::FailTest/0 [GOOD] >> YdbSdkSessionsPool::StressTestAsync/0 |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool::WaitQueue/0 [GOOD] |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool1Session::GetSession/0 [GOOD] |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test >> YdbSdkSessionsPool1Session::CustomPlan/0 |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test >> YdbSdkSessionsPool::StressTestSync/0 >> YdbSdkSessionsPool::WaitQueue/1 [GOOD] |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_copy_ops.py::TestSchemeShardCopyOps::test_when_copy_table_partition_config [GOOD] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_other_requests_rate[tables_format_v0] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v1-std] |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] [GOOD] |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool1Session::RunSmallPlan/0 [GOOD] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_other_requests_rate[tables_format_v1] |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> YdbSdkSessionsPool::StressTestSync/1 [GOOD] |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool1Session::FailTest/0 [GOOD] |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v0-fifo] |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_create_queue_rate[tables_format_v0] |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool::WaitQueue/1 [GOOD] |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/ttl/py3test >> test_ttl.py::TestTTLAlterSettings::test_case [GOOD] |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_send_message_rate[tables_format_v0] |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v0-std] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_send_message_rate[tables_format_v1] |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v1-fifo] >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v1-std] |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool::StressTestSync/1 [GOOD] |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithKesus::test_creates_quoter |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> YdbSdkSessionsPool1Session::CustomPlan/0 [GOOD] |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v0-fifo] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_create_queue_rate[tables_format_v1] >> test_s3.py::TestYdbS3TTL::test_s3[table_index_4__SYNC-pk_types0-all_types0-index0---SYNC] [GOOD] >> YdbSdkSessionsPool::StressTestSync/0 [GOOD] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_not_create_kesus |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_copy_ops.py::TestSchemeShardCopyOps::test_given_table_when_create_copy_of_it_then_ok [GOOD] |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v1-fifo] |94.2%| [TA] $(B)/ydb/tests/functional/scheme_shard/test-results/py3test/{meta.json ... results_accumulator.log} |94.2%| [TA] {RESULT} $(B)/ydb/tests/functional/scheme_shard/test-results/py3test/{meta.json ... results_accumulator.log} |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool1Session::CustomPlan/0 [GOOD] |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> YdbSdkSessionsPool::PeriodicTask/0 [GOOD] >> YdbSdkSessionsPool::PeriodicTask/1 |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v0-std] |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v1-std] [GOOD] |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool::StressTestSync/0 [GOOD] >> YdbSdkSessions::CloseSessionWithSessionPoolExplicit [GOOD] |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> KqpPrefixedVectorIndexes::OrderByCosineSimilarityNotNullableLevel1 >> KqpIndexes::UpsertMultipleUniqIndexes |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v0-fifo] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] >> test_copy_table.py::TestCopyTable::test_copy_table[table_all_types-pk_types12-all_types12-index12---] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] >> YdbSdkSessions::CloseSessionWithSessionPoolFromDtors [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Date-pk_types18-all_types18-index18-Date--] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::CloseSessionWithSessionPoolExplicit [GOOD] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_other_requests_rate[tables_format_v0] [GOOD] >> TCmsTest::WalleTasks >> TCmsTest::ActionIssuePartialPermissions >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v1-fifo] [GOOD] >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v0-std] [GOOD] >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v1-std] [GOOD] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_other_requests_rate[tables_format_v1] [GOOD] >> KqpMultishardIndex::SecondaryIndexSelectNull |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::CloseSessionWithSessionPoolFromDtors [GOOD] >> test_retry_high_rate.py::TestRetry::test_high_rate[kikimr0] [GOOD] |94.3%| [TA] $(B)/ydb/public/sdk/cpp/tests/integration/sessions/test-results/gtest/{meta.json ... results_accumulator.log} |94.3%| [TA] {RESULT} $(B)/ydb/public/sdk/cpp/tests/integration/sessions/test-results/gtest/{meta.json ... results_accumulator.log} >> TCmsTest::ActionIssuePartialPermissions [GOOD] >> TCmsTest::ActionWithZeroDuration >> test_quoting.py::TestSqsQuotingWithKesus::test_creates_quoter [GOOD] >> test_disk.py::TestSafeDiskBreak::test_erase_method [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_4__SYNC-pk_types0-all_types0-index0---SYNC] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_not_create_kesus [GOOD] |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_send_message_rate[tables_format_v0] [GOOD] >> TStorageTenantTest::CreateTableOutsideDatabaseFailToStartTabletsButDropIsOk [GOOD] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_send_message_rate[tables_format_v1] [GOOD] |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_create_queue_rate[tables_format_v0] [GOOD] |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest >> TStorageTenantTest::CreateTableOutsideDatabaseFailToStartTabletsButDropIsOk [GOOD] >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v0-fifo] [GOOD] >> TCmsTest::ActionWithZeroDuration [GOOD] >> TCmsTest::CheckUnreplicatedDiskPreventsRestart >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_create_queue_rate[tables_format_v1] [GOOD] |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_index_4__SYNC-pk_types0-all_types0-index0---SYNC] [GOOD] >> KqpUniqueIndex::InsertNullInPk >> TCmsTest::WalleTasks [GOOD] >> TCmsTest::WalleTasksWithNodeLimit >> TStorageTenantTest::Empty [GOOD] >> TCmsTest::CheckUnreplicatedDiskPreventsRestart [GOOD] >> TCmsTest::AllVDisksEvictionInRack |94.3%| [TA] $(B)/ydb/tests/datashard/s3/test-results/py3test/{meta.json ... results_accumulator.log} |94.3%| [TA] {RESULT} $(B)/ydb/tests/datashard/s3/test-results/py3test/{meta.json ... results_accumulator.log} |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest >> TStorageTenantTest::Empty [GOOD] >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v1-fifo] [GOOD] >> KqpIndexes::UpsertMultipleUniqIndexes [GOOD] >> KqpIndexes::UpsertWithNullKeysComplex >> TCmsTest::RequestReplaceDevices |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v1-std] [GOOD] >> TCmsTest::AllVDisksEvictionInRack [GOOD] |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest >> KqpMultishardIndex::YqWorksFineAfterAlterIndexTableDirectly ------- [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::AllVDisksEvictionInRack [GOOD] Test command err: 2025-05-07T09:15:45.828795Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-05-07T09:15:45.828921Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-05-07T09:15:45.829092Z node 25 :CMS DEBUG: cluster_info.cpp:966: Timestamp: 1970-01-01T00:02:00Z 2025-05-07T09:15:45.839980Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvClusterStateRequest { }, response# NKikimr::NCms::TEvCms::TEvClusterStateResponse { Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027512 } Devices { Name: "vdisk-0-1-0-0-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-1-1-0-0-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-2-1-0-0-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-3-1-0-0-0" State: UP Timestamp: 120027512 } Devices { Name: "pdisk-25-25" State: UP Timestamp: 120027512 } Timestamp: 120027512 NodeId: 25 InterconnectPort: 12001 Location { Rack: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027512 } Devices { Name: "vdisk-0-1-0-1-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-1-1-0-1-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-2-1-0-1-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-3-1-0-1-0" State: UP Timestamp: 120027512 } Devices { Name: "pdisk-26-26" State: UP Timestamp: 120027512 } Timestamp: 120027512 NodeId: 26 InterconnectPort: 12002 Location { Rack: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027512 } Devices { Name: "vdisk-0-1-0-2-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-1-1-0-2-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-2-1-0-2-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-3-1-0-2-0" State: UP Timestamp: 120027512 } Devices { Name: "pdisk-27-27" State: UP Timestamp: 120027512 } Timestamp: 120027512 NodeId: 27 InterconnectPort: 12003 Location { Rack: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027512 } Devices { Name: "vdisk-0-1-0-3-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-1-1-0-3-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-2-1-0-3-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-3-1-0-3-0" State: UP Timestamp: 120027512 } Devices { Name: "pdisk-28-28" State: UP Timestamp: 120027512 } Timestamp: 120027512 NodeId: 28 InterconnectPort: 12004 Location { Rack: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027512 } Devices { Name: "vdisk-0-1-0-4-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-1-1-0-4-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-2-1-0-4-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-3-1-0-4-0" State: UP Timestamp: 120027512 } Devices { Name: "pdisk-29-29" State: UP Timestamp: 120027512 } Timestamp: 120027512 NodeId: 29 InterconnectPort: 12005 Location { Rack: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027512 } Devices { Name: "vdisk-0-1-0-5-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-1-1-0-5-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-2-1-0-5-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-3-1-0-5-0" State: UP Timestamp: 120027512 } Devices { Name: "pdisk-30-30" State: UP Timestamp: 120027512 } Timestamp: 120027512 NodeId: 30 InterconnectPort: 12006 Location { Rack: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027512 } Devices { Name: "vdisk-0-1-0-6-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-1-1-0-6-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-2-1-0-6-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-3-1-0-6-0" State: UP Timestamp: 120027512 } Devices { Name: "pdisk-31-31" State: UP Timestamp: 120027512 } Timestamp: 120027512 NodeId: 31 InterconnectPort: 12007 Location { Rack: "4" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027512 } Devices { Name: "vdisk-0-1-0-7-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-1-1-0-7-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-2-1-0-7-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-3-1-0-7-0" State: UP Timestamp: 120027512 } Devices { Name: "pdisk-32-32" State: UP Timestamp: 120027512 } Timestamp: 120027512 NodeId: 32 InterconnectPort: 12008 Location { Rack: "4" } StartTimeSeconds: 0 } Timestamp: 120027512 } } 2025-05-07T09:15:45.844487Z node 25 :CMS DEBUG: sentinel.cpp:486: [Sentinel] [ConfigUpdater] Handle TEvCms::TEvClusterStateResponse: response# Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027512 } Devices { Name: "vdisk-0-1-0-0-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-1-1-0-0-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-2-1-0-0-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-3-1-0-0-0" State: UP Timestamp: 120027512 } Devices { Name: "pdisk-25-25" State: UP Timestamp: 120027512 } Timestamp: 120027512 NodeId: 25 InterconnectPort: 12001 Location { Rack: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027512 } Devices { Name: "vdisk-0-1-0-1-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-1-1-0-1-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-2-1-0-1-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-3-1-0-1-0" State: UP Timestamp: 120027512 } Devices { Name: "pdisk-26-26" State: UP Timestamp: 120027512 } Timestamp: 120027512 NodeId: 26 InterconnectPort: 12002 Location { Rack: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027512 } Devices { Name: "vdisk-0-1-0-2-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-1-1-0-2-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-2-1-0-2-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-3-1-0-2-0" State: UP Timestamp: 120027512 } Devices { Name: "pdisk-27-27" State: UP Timestamp: 120027512 } Timestamp: 120027512 NodeId: 27 InterconnectPort: 12003 Location { Rack: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027512 } Devices { Name: "vdisk-0-1-0-3-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-1-1-0-3-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-2-1-0-3-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-3-1-0-3-0" State: UP Timestamp: 120027512 } Devices { Name: "pdisk-28-28" State: UP Timestamp: 120027512 } Timestamp: 120027512 NodeId: 28 InterconnectPort: 12004 Location { Rack: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027512 } Devices { Name: "vdisk-0-1-0-4-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-1-1-0-4-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-2-1-0-4-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-3-1-0-4-0" State: UP Timestamp: 120027512 } Devices { Name: "pdisk-29-29" State: UP Timestamp: 120027512 } Timestamp: 120027512 NodeId: 29 InterconnectPort: 12005 Location { Rack: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027512 } Devices { Name: "vdisk-0-1-0-5-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-1-1-0-5-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-2-1-0-5-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-3-1-0-5-0" State: UP Timestamp: 120027512 } Devices { Name: "pdisk-30-30" State: UP Timestamp: 120027512 } Timestamp: 120027512 NodeId: 30 InterconnectPort: 12006 Location { Rack: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027512 } Devices { Name: "vdisk-0-1-0-6-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-1-1-0-6-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-2-1-0-6-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-3-1-0-6-0" State: UP Timestamp: 120027512 } Devices { Name: "pdisk-31-31" State: UP Timestamp: 120027512 } Timestamp: 120027512 NodeId: 31 InterconnectPort: 12007 Location { Rack: "4" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027512 } Devices { Name: "vdisk-0-1-0-7-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-1-1-0-7-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-2-1-0-7-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-3-1-0-7-0" State: UP Timestamp: 120027512 } Devices { Name: "pdisk-32-32" State: UP Timestamp: 120027512 } Timestamp: 120027512 NodeId: 32 InterconnectPort: 12008 Location { Rack: "4" } StartTimeSeconds: 0 } Timestamp: 120027512 } 2025-05-07T09:15:45.844884Z node 25 :CMS DEBUG: sentinel.cpp:944: [Sentinel] [Main] Config was updated in 120.003512s 2025-05-07T09:15:45.844951Z node 25 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start StateUpdater 2025-05-07T09:15:45.845213Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "25" Services: "storage" Duration: 600000000 } PartialPermissionAllowed: false Schedule: false DryRun: false EvictVDisks: true 2025-05-07T09:15:45.845297Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "25" Services: "storage" Duration: 600000000 2025-05-07T09:15:45.845363Z node 25 :CMS DEBUG: cms.cpp:398: Result: DISALLOW_TEMP (reason: VDisks eviction from host 25 has not yet been completed) 2025-05-07T09:15:45.845564Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-05-07T09:15:45.845830Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store request: id# user-r-1, owner# user, order# 1, priority# 0, body# User: "user" Actions { Type: RESTART_SERVICES Host: "25" Services: "storage" Duration: 600000000 Issue { Type: GENERIC Message: "VDisks eviction from host 25 has not yet been completed" } } PartialPermissionAllowed: false Schedule: false Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: true 2025-05-07T09:15:45.845896Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Add host marker: host# 25, marker# MARKER_DISK_FAULTY 2025-05-07T09:15:45.846316Z node 25 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 25, wbId# [25:8388350642965737326:1634689637] 2025-05-07T09:15:45.846396Z node 25 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 26, wbId# [26:8388350642965737326:1634689637] 2025-05-07T09:15:45.846451Z node 25 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 27, wbId# [27:8388350642965737326:1634689637] 2025-05-07T09:15:45.846489Z node 25 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 28, wbId# [28:8388350642965737326:1634689637] 2025-05-07T09:15:45.846521Z node 25 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 29, wbId# [29:8388350642965737326:1634689637] 2025-05-07T09:15:45.846551Z node 25 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 30, wbId# [30:8388350642965737326:1634689637] 2025-05-07T09:15:45.846579Z node 25 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] R ... dle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 32, response# PDiskStateInfo { PDiskId: 32 CreateTime: 0 ChangeTime: 0 Path: "/32/pdisk-32.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 180027 2025-05-07T09:15:46.104242Z node 25 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 26, response# PDiskStateInfo { PDiskId: 26 CreateTime: 0 ChangeTime: 0 Path: "/26/pdisk-26.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 180027 2025-05-07T09:15:46.104362Z node 25 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 27, response# PDiskStateInfo { PDiskId: 27 CreateTime: 0 ChangeTime: 0 Path: "/27/pdisk-27.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 180027 2025-05-07T09:15:46.104438Z node 25 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 28, response# PDiskStateInfo { PDiskId: 28 CreateTime: 0 ChangeTime: 0 Path: "/28/pdisk-28.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 180027 2025-05-07T09:15:46.104518Z node 25 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 29, response# PDiskStateInfo { PDiskId: 29 CreateTime: 0 ChangeTime: 0 Path: "/29/pdisk-29.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 180027 2025-05-07T09:15:46.104587Z node 25 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 30, response# PDiskStateInfo { PDiskId: 30 CreateTime: 0 ChangeTime: 0 Path: "/30/pdisk-30.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 180027 2025-05-07T09:15:46.104643Z node 25 :CMS DEBUG: sentinel.cpp:960: [Sentinel] [Main] State was updated in 0.000000s 2025-05-07T09:15:46.104852Z node 25 :CMS NOTICE: sentinel.cpp:1039: [Sentinel] [Main] PDisk status changed: pdiskId# 26:26, status# ACTIVE, required status# FAULTY, reason# Forced status, dry run# 0 2025-05-07T09:15:46.104925Z node 25 :CMS NOTICE: sentinel.cpp:1039: [Sentinel] [Main] PDisk status changed: pdiskId# 25:25, status# ACTIVE, required status# FAULTY, reason# Forced status, dry run# 0 2025-05-07T09:15:46.104970Z node 25 :CMS DEBUG: sentinel.cpp:1076: [Sentinel] [Main] Change pdisk status: requestsSize# 2 2025-05-07T09:15:46.105213Z node 25 :CMS DEBUG: cms_tx_log_and_send.cpp:19: TTxLogAndSend Execute 2025-05-07T09:15:46.105430Z node 25 :CMS DEBUG: cms_tx_log_and_send.cpp:19: TTxLogAndSend Execute 2025-05-07T09:15:46.105565Z node 25 :CMS DEBUG: sentinel.cpp:1202: [Sentinel] [Main] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true } Status { Success: true } Success: true, cookie# 1 2025-05-07T09:15:46.105634Z node 25 :CMS NOTICE: sentinel.cpp:1226: [Sentinel] [Main] PDisk status has been changed: pdiskId# 25:25 2025-05-07T09:15:46.105695Z node 25 :CMS NOTICE: sentinel.cpp:1226: [Sentinel] [Main] PDisk status has been changed: pdiskId# 26:26 2025-05-07T09:15:46.123279Z node 25 :CMS DEBUG: cms_tx_log_and_send.cpp:27: TTxLogAndSend Complete 2025-05-07T09:15:46.123381Z node 25 :CMS DEBUG: cms_tx_log_and_send.cpp:27: TTxLogAndSend Complete 2025-05-07T09:15:46.140028Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-05-07T09:15:46.140154Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-05-07T09:15:46.140231Z node 25 :CMS DEBUG: cluster_info.cpp:966: Timestamp: 1970-01-01T00:03:00Z 2025-05-07T09:15:46.141117Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "25" Services: "storage" Duration: 600000000 Issue { Type: GENERIC Message: "VDisks eviction from host 25 has not yet been completed" } } PartialPermissionAllowed: false Schedule: false Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: true 2025-05-07T09:15:46.141235Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "25" Services: "storage" Duration: 600000000 Issue { Type: GENERIC Message: "VDisks eviction from host 25 has not yet been completed" } 2025-05-07T09:15:46.141315Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 25, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 0 2025-05-07T09:15:46.141381Z node 25 :CMS DEBUG: cms.cpp:728: Ring: 0; State: Ok 2025-05-07T09:15:46.141409Z node 25 :CMS DEBUG: cms.cpp:728: Ring: 1; State: Ok 2025-05-07T09:15:46.141428Z node 25 :CMS DEBUG: cms.cpp:728: Ring: 2; State: Ok 2025-05-07T09:15:46.141480Z node 25 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-05-07T09:15:46.141647Z node 25 :CMS DEBUG: cms.cpp:1035: Accepting permission: id# user-p-1, requestId# user-r-1, owner# user 2025-05-07T09:15:46.141723Z node 25 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12001 (25) (permission user-p-1 until 1970-01-01T00:13:00Z) 2025-05-07T09:15:46.141813Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-05-07T09:15:46.142442Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-1, validity# 1970-01-01T00:13:00.127512Z, action# Type: RESTART_SERVICES Host: "25" Services: "storage" Duration: 600000000 2025-05-07T09:15:46.142597Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store request: id# user-r-1, owner# user, order# 1, priority# 0, body# User: "user" PartialPermissionAllowed: false Schedule: false Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: true 2025-05-07T09:15:46.160907Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-05-07T09:15:46.161246Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvCheckRequest { User: "user" RequestId: "user-r-1" DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } RequestId: "user-r-1" Permissions { Id: "user-p-1" Action { Type: RESTART_SERVICES Host: "25" Services: "storage" Duration: 600000000 } Deadline: 780127512 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 25 InterconnectPort: 12001 } } } } 2025-05-07T09:15:46.161337Z node 25 :CMS DEBUG: cms.cpp:1063: Schedule cleanup at 1970-01-01T00:33:00.127512Z 2025-05-07T09:15:46.178593Z node 25 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12001 (25) (permission user-p-1 until 1970-01-01T00:13:00Z) 2025-05-07T09:15:46.179004Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-05-07T09:15:46.179088Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-05-07T09:15:46.179147Z node 25 :CMS DEBUG: cluster_info.cpp:966: Timestamp: 1970-01-01T00:03:00Z 2025-05-07T09:15:46.180054Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 600000000 Issue { Type: GENERIC Message: "VDisks eviction from host 26 has not yet been completed" } } PartialPermissionAllowed: false Schedule: false Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: true 2025-05-07T09:15:46.180151Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 600000000 Issue { Type: GENERIC Message: "VDisks eviction from host 26 has not yet been completed" } 2025-05-07T09:15:46.180227Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 26, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 1, down nodes: 0 2025-05-07T09:15:46.180281Z node 25 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-05-07T09:15:46.180432Z node 25 :CMS DEBUG: cms.cpp:1035: Accepting permission: id# user-p-2, requestId# user-r-2, owner# user 2025-05-07T09:15:46.180530Z node 25 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12002 (26) (permission user-p-2 until 1970-01-01T00:13:00Z) 2025-05-07T09:15:46.180616Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-05-07T09:15:46.180810Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-2, validity# 1970-01-01T00:13:00.229024Z, action# Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 600000000 2025-05-07T09:15:46.180914Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store request: id# user-r-2, owner# user, order# 2, priority# 0, body# User: "user" PartialPermissionAllowed: false Schedule: false Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: true 2025-05-07T09:15:46.198086Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-05-07T09:15:46.198441Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvCheckRequest { User: "user" RequestId: "user-r-2" DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } RequestId: "user-r-2" Permissions { Id: "user-p-2" Action { Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 600000000 } Deadline: 780229024 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 26 InterconnectPort: 12002 } } } } 2025-05-07T09:15:46.199147Z node 25 :CMS INFO: cms.cpp:1325: User user is done with permissions user-p-1 2025-05-07T09:15:46.199207Z node 25 :CMS DEBUG: cms.cpp:1348: Resulting status: OK 2025-05-07T09:15:46.199301Z node 25 :CMS DEBUG: cms_tx_remove_permissions.cpp:28: TTxRemovePermissions Execute 2025-05-07T09:15:46.199407Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reset host markers: host# 25 2025-05-07T09:15:46.199547Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove request: id# user-r-1, reason# permission user-p-1 was removed 2025-05-07T09:15:46.199615Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove permission: id# user-p-1, reason# explicit remove 2025-05-07T09:15:46.219017Z node 25 :CMS DEBUG: cms_tx_remove_permissions.cpp:79: TTxRemovePermissions Complete 2025-05-07T09:15:46.219327Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvManagePermissionRequest { User: "user" Command: DONE Permissions: "user-p-1" DryRun: false }, response# NKikimr::NCms::TEvCms::TEvManagePermissionResponse { Status { Code: OK } } 2025-05-07T09:15:46.219946Z node 25 :CMS INFO: cms.cpp:1325: User user is done with permissions user-p-2 2025-05-07T09:15:46.220016Z node 25 :CMS DEBUG: cms.cpp:1348: Resulting status: OK 2025-05-07T09:15:46.220106Z node 25 :CMS DEBUG: cms_tx_remove_permissions.cpp:28: TTxRemovePermissions Execute 2025-05-07T09:15:46.220218Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reset host markers: host# 26 2025-05-07T09:15:46.220327Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove request: id# user-r-2, reason# permission user-p-2 was removed 2025-05-07T09:15:46.220369Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove permission: id# user-p-2, reason# explicit remove 2025-05-07T09:15:46.235157Z node 25 :CMS DEBUG: cms_tx_remove_permissions.cpp:79: TTxRemovePermissions Complete 2025-05-07T09:15:46.235413Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvManagePermissionRequest { User: "user" Command: DONE Permissions: "user-p-2" DryRun: false }, response# NKikimr::NCms::TEvCms::TEvManagePermissionResponse { Status { Code: OK } } |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithKesus::test_creates_quoter [GOOD] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v0-std] [GOOD] |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test >> test_disk.py::TestSafeDiskBreak::test_erase_method [GOOD] |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest >> TCmsTest::RequestReplaceDevices [GOOD] >> TCmsTest::RequestReplaceDevicePDisk >> YdbSdkSessionsPool::PeriodicTask/1 [GOOD] >> KqpIndexes::InnerJoinWithNonIndexWherePredicate |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v0-fifo] [GOOD] |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest >> TCmsTest::WalleTasksWithNodeLimit [GOOD] >> TCmsTest::WalleTasksDifferentPriorities >> KqpMultishardIndex::SecondaryIndexSelectNull [GOOD] >> KqpMultishardIndex::SecondaryIndexSelect |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest >> TCmsTest::RequestReplaceDevicePDisk [GOOD] >> TCmsTest::RequestReplaceDevicePDiskByPath >> TDowntimeTest::SetIgnoredDowntimeGap [GOOD] >> TMaintenanceApiTest::CompositeActionGroupSameStorageGroup |94.3%| [TA] $(B)/ydb/core/tx/tx_proxy/ut_encrypted_storage/test-results/unittest/{meta.json ... results_accumulator.log} |94.3%| [TA] {RESULT} $(B)/ydb/core/tx/tx_proxy/ut_encrypted_storage/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpVectorIndexes::OrderByCosineDistanceNotNullableLevel1 >> TCmsTest::WalleTasksDifferentPriorities [GOOD] >> KqpUniqueIndex::InsertNullInPk [GOOD] >> KqpUniqueIndex::InsertNullInFk |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_not_create_kesus [GOOD] |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::WalleTasksDifferentPriorities [GOOD] |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool::PeriodicTask/1 [GOOD] >> TCmsTest::RequestReplaceDevicePDiskByPath [GOOD] >> TCmsTest::RequestReplaceManyDevicesOnOneNode >> TMaintenanceApiTest::CompositeActionGroupSameStorageGroup [GOOD] >> TMaintenanceApiTest::ActionReason |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_other_requests_rate[tables_format_v0] [GOOD] >> KqpIndexes::UpsertWithNullKeysComplex [GOOD] >> KqpIndexes::UpsertNoIndexColumns >> KqpIndexes::CreateTableWithImplicitSyncIndexSQL >> KqpMultishardIndex::YqWorksFineAfterAlterIndexTableDirectly [GOOD] >> KqpPrefixedVectorIndexes::OrderByCosineDistanceNotNullableLevel1 |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v1-fifo] [GOOD] |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v1-std] [GOOD] >> TCmsTest::StateStorageNodesFromOneRing >> TMaintenanceApiTest::ActionReason [GOOD] |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_other_requests_rate[tables_format_v1] [GOOD] |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/fq/multi_plane/py3test >> test_retry_high_rate.py::TestRetry::test_high_rate[kikimr0] [GOOD] |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TMaintenanceApiTest::ActionReason [GOOD] >> KqpIndexes::SecondaryIndexSelectUsingScripting >> TCmsTest::StateStorageNodesFromOneRing [GOOD] >> TCmsTest::StateStorageTwoBrokenRings >> test_ttl.py::TestTTLOnIndexedTable::test_case [GOOD] >> TCmsTest::RequestReplaceManyDevicesOnOneNode [GOOD] >> TLocksTest::GoodLock >> UpsertLoad::ShouldCreateTable |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_send_message_rate[tables_format_v1] [GOOD] >> UpsertLoad::ShouldWriteKqpUpsert2 >> UpsertLoad::ShouldWriteDataBulkUpsert >> UpsertLoad::ShouldWriteKqpUpsertKeyFrom >> UpsertLoad::ShouldWriteDataBulkUpsertLocalMkqlKeyFrom >> ReadLoad::ShouldReadIterate >> KqpIndexes::UpdateDeletePlan-UseSink |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_send_message_rate[tables_format_v0] [GOOD] |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::RequestReplaceManyDevicesOnOneNode [GOOD] >> UpsertLoad::ShouldWriteDataBulkUpsertLocalMkql >> KqpIndexes::InnerJoinWithNonIndexWherePredicate [GOOD] >> KqpIndexes::InnerJoinSecondaryIndexLookupAndRightTablePredicateNonIndexColumn |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_create_queue_rate[tables_format_v0] [GOOD] |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v0-fifo] [GOOD] >> test_ttl.py::TestTTLDefaultEnv::test_case [GOOD] >> TCmsTest::StateStorageTwoBrokenRings [GOOD] >> TCmsTest::StateStorageRollingRestart |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v0-std] [GOOD] |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest >> UpsertLoad::ShouldWriteDataBulkUpsertBatch >> TCmsTest::StateRequest >> test_ttl.py::TestTTLValueSinceUnixEpoch::test_case [GOOD] >> KqpMultishardIndex::SecondaryIndexSelect [GOOD] >> KqpPrefixedVectorIndexes::OrderByCosineDistanceNullableLevel1 |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v1-fifo] [GOOD] >> UpsertLoad::ShouldWriteKqpUpsert ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpMultishardIndex::SecondaryIndexSelect [GOOD] Test command err: Trying to start YDB, gRPC: 63590, MsgBus: 27415 2025-05-07T09:15:39.411224Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501629991218027064:2070];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:15:39.411279Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003bf8/r3tmp/tmpGJB0Pr/pdisk_1.dat 2025-05-07T09:15:40.112944Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:15:40.157246Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:15:40.157345Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:15:40.172447Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 63590, node 1 2025-05-07T09:15:40.490568Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:15:40.490591Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:15:40.490597Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:15:40.490706Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27415 TClient is connected to server localhost:27415 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:15:41.670908Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:15:41.710936Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T09:15:41.732756Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:15:42.021058Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T09:15:42.506412Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 2025-05-07T09:15:42.658075Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:15:44.414128Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501629991218027064:2070];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:15:44.414212Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:15:45.131351Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630016987832481:2409], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:15:45.131532Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:15:45.511002Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:15:45.556911Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:15:45.635592Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:15:45.681031Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:15:45.743746Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:15:45.826671Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:15:45.921245Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:15:46.027089Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630021282800444:2474], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:15:46.027201Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:15:46.027789Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630021282800449:2477], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:15:46.031976Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:15:46.056898Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501630021282800451:2478], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:15:46.166931Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501630021282800504:3430] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:15:47.500212Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 waiting... Trying to start YDB, gRPC: 27342, MsgBus: 23083 2025-05-07T09:15:51.550986Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501630041477635752:2067];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:15:51.551105Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003bf8/r3tmp/tmp2H28KD/pdisk_1.dat 2025-05-07T09:15:51.710936Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27342, node 2 2025-05-07T09:15:51.731873Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:15:51.731986Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:15:51.736939Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:15:51.792112Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:15:51.792143Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:15:51.792154Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:15:51.792275Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23083 TClient is connected to server localhost:23083 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-05-07T09:15:52.318620Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-05-07T09:15:52.336267Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:15:52.463768Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:15:52.654670Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:15:52.735790Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:15:55.367176Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501630058657506560:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:15:55.367303Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:15:55.436264Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:15:55.474226Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:15:55.506937Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:15:55.539225Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:15:55.580374Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:15:55.656669Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:15:55.760244Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:15:55.863988Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501630058657507232:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:15:55.864082Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:15:55.864569Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501630058657507237:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:15:55.869138Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:15:55.917744Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7501630058657507239:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:15:56.007509Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501630062952474586:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:15:56.561419Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7501630041477635752:2067];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:15:56.561489Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:15:57.079114Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 waiting... >> KqpUniqueIndex::InsertNullInFk [GOOD] >> KqpUniqueIndex::InsertNullInComplexFk >> TCmsTest::StateRequest [GOOD] >> TCmsTest::ScheduledEmergencyDuringRollingRestart >> KqpIndexes::CreateTableWithImplicitSyncIndexSQL [GOOD] >> KqpIndexes::CreateTableWithExplicitSyncIndexSQL >> UpsertLoad::ShouldWriteDataBulkUpsertLocalMkqlKeyFrom [GOOD] >> UpsertLoad::ShouldWriteDataBulkUpsert [GOOD] >> UpsertLoad::ShouldWriteDataBulkUpsert2 >> UpsertLoad::ShouldWriteDataBulkUpsertLocalMkql [GOOD] >> UpsertLoad::ShouldWriteDataBulkUpsertLocalMkql2 >> KqpPrefixedVectorIndexes::OrderByCosineSimilarityNotNullableLevel1 [GOOD] >> KqpPrefixedVectorIndexes::OrderByCosineDistanceNullableLevel2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpUniqueIndex::InsertNullInFk [GOOD] Test command err: Trying to start YDB, gRPC: 28721, MsgBus: 22792 2025-05-07T09:15:44.904659Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501630013407251365:2080];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:15:44.904735Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003bf6/r3tmp/tmpIYjo4T/pdisk_1.dat 2025-05-07T09:15:45.562326Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:15:45.634941Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:15:45.635039Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:15:45.639426Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28721, node 1 2025-05-07T09:15:45.860235Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:15:45.860258Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:15:45.860264Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:15:45.860431Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22792 TClient is connected to server localhost:22792 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:15:46.586378Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:15:46.617189Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T09:15:46.639923Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:15:46.810927Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:15:47.119770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:15:47.227212Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:15:49.208101Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630034882089470:2407], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:15:49.208296Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:15:49.606515Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:15:49.690262Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:15:49.737053Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:15:49.788736Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:15:49.829516Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:15:49.885484Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:15:49.908497Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501630013407251365:2080];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:15:49.908552Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:15:49.929658Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:15:50.012529Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630039177057425:2471], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:15:50.012606Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:15:50.012691Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630039177057430:2474], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:15:50.016908Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:15:50.033253Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501630039177057432:2475], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:15:50.134755Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501630039177057483:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:15:51.378361Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:15:53.396891Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [1:7501630052061960682:2640], TxId: 281474976710680, task: 1. Ctx: { TraceId : 01jtn0fc9m14kjwpsk7d323fyb. SessionId : ydb://session/3?node_id=1&id=ZGIzN2JmOTAtMmMxZmUyYi1jYjg3Yjc0Mi00OTliNGQ0NQ==. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: PRECONDITION_FAILED KIKIMR_CONSTRAINT_VIOLATION: {
: Error: Conflict with existing key., code: 2012 }. 2025-05-07T09:15:53.397439Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1216: SelfId: [1:7501630052061960683:2641], TxId: 281474976710680, task: 2. Ctx: { CustomerSuppliedId : . TraceId : 01jtn0fc9m14kjwpsk7d323fyb. SessionId : ydb://session/3?node_id=1&id=ZGIzN2JmOTAtMmMxZmUyYi1jYjg3Yjc0Mi00OTliNGQ0NQ==. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [1:7501630052061960679:2574], status: PRECONDITION_FAILED, reason: {
: Error: Terminate execution } 2025-05-07T09:15:53.397881Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=1&id=ZGIzN2JmOTAtMmMxZmUyYi1jYjg3Yjc0Mi00OTliNGQ0NQ==, ActorId: [1:7501630043472025798:2574], ActorState: ExecuteState, TraceId: 01jtn0fc9m14kjwpsk7d323fyb, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 6895, MsgBus: 15429 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003bf6/r3tmp/tmp7a0MJo/pdisk_1.dat 2025-05-07T09:15:54.390425Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T09:15:54.457876Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:15:54.481271Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:15:54.481532Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:15:54.483170Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6895, node 2 2025-05-07T09:15:54.688296Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:15:54.688319Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:15:54.688327Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:15:54.688449Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15429 TClient is connected to server localhost:15429 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:15:55.365523Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:15:55.374945Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T09:15:55.384358Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:15:55.470260Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:15:55.649172Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:15:55.759130Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:15:58.698229Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501630075108778955:2407], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:15:58.698371Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:15:58.767812Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-05-07T09:15:58.820450Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-05-07T09:15:58.871474Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-05-07T09:15:58.949696Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-05-07T09:15:59.001256Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-05-07T09:15:59.065623Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-05-07T09:15:59.143936Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-05-07T09:15:59.231784Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501630079403746918:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:15:59.231905Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:15:59.232361Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501630079403746923:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:15:59.238144Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-05-07T09:15:59.255987Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7501630079403746925:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-05-07T09:15:59.346934Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501630079403746977:3423] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:00.539480Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 waiting... >> UpsertLoad::ShouldCreateTable [GOOD] >> UpsertLoad::ShouldDropCreateTable >> ReadLoad::ShouldReadIterate [GOOD] >> ReadLoad::ShouldReadIterateMoreThanRows |94.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_create_queue_rate[tables_format_v1] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut_ycsb/unittest >> UpsertLoad::ShouldWriteDataBulkUpsertLocalMkqlKeyFrom [GOOD] Test command err: 2025-05-07T09:16:04.032066Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:16:04.032271Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:16:04.032547Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0038ad/r3tmp/tmpoAyUCf/pdisk_1.dat 2025-05-07T09:16:04.474670Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:16:04.533103Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:04.587701Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:04.587829Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:04.603097Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:16:04.727032Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:16:05.165666Z node 1 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertLocalMkqlStart with tag# 1, proto# NotifyWhenFinished: true TargetShard { TabletId: 72075186224037888 TableId: 2 TableName: "usertable" } UpsertLocalMkqlStart { RowCount: 10 Inflight: 3 KeyFrom: 12345 } 2025-05-07T09:16:05.165882Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [1:732:2614], subTag: 2} TUpsertActor Bootstrap called: RowCount: 10 Inflight: 3 KeyFrom: 12345 with type# 1, target# TabletId: 72075186224037888 TableId: 2 TableName: "usertable" 2025-05-07T09:16:05.420836Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [1:732:2614], subTag: 2} TUpsertActor finished in 0.254381s, errors=0 2025-05-07T09:16:05.420918Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [1:733:2615] with tag# 2 |94.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v0-std] [GOOD] >> TCmsTest::StateStorageRollingRestart [GOOD] >> TCmsTest::StateStorageLockedNodes |94.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/ttl/py3test >> test_ttl.py::TestTTLOnIndexedTable::test_case [GOOD] >> UpsertLoad::ShouldWriteKqpUpsertKeyFrom [GOOD] >> UpsertLoad::ShouldWriteKqpUpsert2 [GOOD] >> TCmsTest::ScheduledEmergencyDuringRollingRestart [GOOD] >> TCmsTest::ScheduledWalleRequestDuringRollingRestart >> KqpIndexes::SecondaryIndexSelectUsingScripting [GOOD] >> KqpIndexes::SecondaryIndexReplace+UseSink >> UpsertLoad::ShouldWriteDataBulkUpsertBatch [GOOD] >> UpsertLoad::ShouldWriteDataBulkUpsertKeyFrom >> TCmsTest::CollectInfo |94.4%| [TA] $(B)/ydb/tests/functional/sqs/with_quotas/test-results/py3test/{meta.json ... results_accumulator.log} >> KqpIndexes::UpsertNoIndexColumns [GOOD] |94.4%| [TA] {RESULT} $(B)/ydb/tests/functional/sqs/with_quotas/test-results/py3test/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut_ycsb/unittest >> UpsertLoad::ShouldWriteKqpUpsert2 [GOOD] Test command err: 2025-05-07T09:16:03.842305Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:16:03.842508Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:16:03.842823Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0038a4/r3tmp/tmpqJdtjR/pdisk_1.dat 2025-05-07T09:16:04.467632Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:16:04.524920Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:04.582166Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:04.584713Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:04.597655Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:16:04.723220Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:16:05.159680Z node 1 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertKqpStart with tag# 1, proto# NotifyWhenFinished: true TargetShard { TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "JustTable" } UpsertKqpStart { RowCount: 20 Inflight: 5 } 2025-05-07T09:16:05.159867Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:298: TKqpUpsertActorMultiSession# {Tag: 0, parent: [1:732:2614], subTag: 2} Bootstrap called: RowCount: 20 Inflight: 5 2025-05-07T09:16:05.172574Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:361: TKqpUpsertActorMultiSession# {Tag: 0, parent: [1:732:2614], subTag: 2} started# 5 actors each with inflight# 4 2025-05-07T09:16:05.172678Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 1} Bootstrap called: RowCount: 4 Inflight: 1 2025-05-07T09:16:05.172758Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 2} Bootstrap called: RowCount: 4 Inflight: 1 2025-05-07T09:16:05.172789Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 3} Bootstrap called: RowCount: 4 Inflight: 1 2025-05-07T09:16:05.172839Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 4} Bootstrap called: RowCount: 4 Inflight: 1 2025-05-07T09:16:05.172868Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 5} Bootstrap called: RowCount: 4 Inflight: 1 2025-05-07T09:16:05.178584Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 1} session: ydb://session/3?node_id=1&id=M2E3Njc1MTUtMWQyMDk4ODYtMzk0YWFhZWItM2ZlZmI3ZmY= 2025-05-07T09:16:05.178669Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 2} session: ydb://session/3?node_id=1&id=MjM1NzNjLWI0MGRhMTIzLTkxYjIwYzMzLTM2NTkyYjhi 2025-05-07T09:16:05.181296Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 3} session: ydb://session/3?node_id=1&id=M2I0YjE4Ni1mNmJjMTZiMS0xZjQ1NDM3Yi1hNTUzODk3Mw== 2025-05-07T09:16:05.183282Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 4} session: ydb://session/3?node_id=1&id=MTJlMmVlYTQtMmM3YzhlZjAtY2QxMjM3MDctMTE2NmVjOTk= 2025-05-07T09:16:05.185505Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 5} session: ydb://session/3?node_id=1&id=MTVhZjdjZTktZGNlZDIzNzgtNjY2ZjJlODctNzJlZGYxYzk= 2025-05-07T09:16:05.190504Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:746:2628], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:05.190721Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:775:2651], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:05.190787Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:776:2652], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:05.190940Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:05.191228Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:772:2648], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:05.191301Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:773:2649], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:05.191366Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:774:2650], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:05.205807Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T09:16:05.274701Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:795:2671] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-05-07T09:16:05.276444Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:796:2672] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-05-07T09:16:05.277247Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:790:2666] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-05-07T09:16:05.278053Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:791:2667] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-05-07T09:16:05.463700Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:787:2663], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T09:16:05.463855Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:788:2664], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T09:16:05.463930Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:789:2665], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T09:16:05.464170Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:784:2660], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T09:16:05.464241Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:785:2661], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T09:16:05.505797Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:890:2731] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:06.364173Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 3} finished in 1746609366.364104s, errors=0 2025-05-07T09:16:06.364703Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:732:2614], subTag: 2} finished: 3 { Tag: 3 DurationMs: 1746609366364 OperationsOK: 4 OperationsError: 0 } 2025-05-07T09:16:06.379380Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:963:2769] txid# 281474976715668, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:06.449847Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 1} finished in 1746609366.449813s, errors=0 2025-05-07T09:16:06.450162Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:732:2614], subTag: 2} finished: 1 { Tag: 1 DurationMs: 1746609366449 OperationsOK: 4 OperationsError: 0 } 2025-05-07T09:16:06.464048Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:1014:2791] txid# 281474976715673, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:06.535778Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 2} finished in 1746609366.535710s, errors=0 2025-05-07T09:16:06.536119Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:732:2614], subTag: 2} finished: 2 { Tag: 2 DurationMs: 1746609366535 OperationsOK: 4 OperationsError: 0 } 2025-05-07T09:16:06.551920Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:1065:2813] txid# 281474976715678, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:06.620472Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 4} finished in 1746609366.620433s, errors=0 2025-05-07T09:16:06.620791Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:732:2614], subTag: 2} finished: 4 { Tag: 4 DurationMs: 1746609366620 OperationsOK: 4 OperationsError: 0 } 2025-05-07T09:16:06.634735Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:1116:2835] txid# 281474976715683, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:06.701632Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 5} finished in 1746609366.701589s, errors=0 2025-05-07T09:16:06.701998Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:732:2614], subTag: 2} finished: 5 { Tag: 5 DurationMs: 1746609366701 OperationsOK: 4 OperationsError: 0 } 2025-05-07T09:16:06.702077Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:395: TKqpUpsertActorMultiSession# {Tag: 0, parent: [1:732:2614], subTag: 2} finished in 1.529795s, oks# 20, errors# 0 2025-05-07T09:16:06.702209Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [1:733:2615] with tag# 2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut_ycsb/unittest >> UpsertLoad::ShouldWriteKqpUpsertKeyFrom [GOOD] Test command err: 2025-05-07T09:16:04.318483Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:16:04.318770Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:16:04.319117Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0038c0/r3tmp/tmpvxNQrX/pdisk_1.dat 2025-05-07T09:16:04.794854Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:16:04.843046Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:04.901954Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:04.902146Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:04.913990Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:16:05.007176Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:16:05.403632Z node 1 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertKqpStart with tag# 1, proto# NotifyWhenFinished: true TargetShard { TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "usertable" } UpsertKqpStart { RowCount: 20 Inflight: 5 KeyFrom: 12345 } 2025-05-07T09:16:05.403801Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:298: TKqpUpsertActorMultiSession# {Tag: 0, parent: [1:732:2614], subTag: 2} Bootstrap called: RowCount: 20 Inflight: 5 KeyFrom: 12345 2025-05-07T09:16:05.408141Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:361: TKqpUpsertActorMultiSession# {Tag: 0, parent: [1:732:2614], subTag: 2} started# 5 actors each with inflight# 4 2025-05-07T09:16:05.408236Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 1} Bootstrap called: RowCount: 4 Inflight: 1 KeyFrom: 12345 2025-05-07T09:16:05.408314Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 2} Bootstrap called: RowCount: 4 Inflight: 1 KeyFrom: 12345 2025-05-07T09:16:05.408357Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 3} Bootstrap called: RowCount: 4 Inflight: 1 KeyFrom: 12345 2025-05-07T09:16:05.408394Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 4} Bootstrap called: RowCount: 4 Inflight: 1 KeyFrom: 12345 2025-05-07T09:16:05.408424Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 5} Bootstrap called: RowCount: 4 Inflight: 1 KeyFrom: 12345 2025-05-07T09:16:05.415651Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 1} session: ydb://session/3?node_id=1&id=ZWM3MTIwOTgtOGRmY2Q3YTItNTQzM2ZhNzQtYjFmYTNjYmM= 2025-05-07T09:16:05.415738Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 2} session: ydb://session/3?node_id=1&id=NmVhNTAyZjItM2U1OWQzZmEtZDlhYjFkNmYtNTlhODAxNTU= 2025-05-07T09:16:05.418060Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 3} session: ydb://session/3?node_id=1&id=NmRlNWFiNTYtYTNjZTg0YTgtN2NkNmE4M2YtZmIwMzFiMGI= 2025-05-07T09:16:05.419822Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 4} session: ydb://session/3?node_id=1&id=ZWE2MmNlMDYtMTE1MzdmZGEtN2U3MjI1NzEtZDdjMGM0ZDY= 2025-05-07T09:16:05.422010Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 5} session: ydb://session/3?node_id=1&id=MTU2NWFlYzctYWMxMWEyYjAtZjQwZTE4ZDItODA1ODQ0ZjE= 2025-05-07T09:16:05.426077Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:772:2648], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:05.426214Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:774:2650], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:05.426266Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:775:2651], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:05.426397Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:746:2628], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:05.426682Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:773:2649], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:05.426749Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:776:2652], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:05.427281Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:05.433899Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T09:16:05.503225Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:795:2671] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-05-07T09:16:05.504422Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:796:2672] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-05-07T09:16:05.505010Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:790:2666] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-05-07T09:16:05.505613Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:791:2667] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-05-07T09:16:05.675475Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:786:2662], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T09:16:05.675580Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:788:2664], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T09:16:05.675669Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:789:2665], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T09:16:05.675859Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:784:2660], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T09:16:05.675929Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:785:2661], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T09:16:05.717581Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:890:2731] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:06.364139Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 2} finished in 1746609366.364068s, errors=0 2025-05-07T09:16:06.364630Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:732:2614], subTag: 2} finished: 2 { Tag: 2 DurationMs: 1746609366364 OperationsOK: 4 OperationsError: 0 } 2025-05-07T09:16:06.380502Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:963:2769] txid# 281474976715668, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:06.455496Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 1} finished in 1746609366.455450s, errors=0 2025-05-07T09:16:06.455778Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:732:2614], subTag: 2} finished: 1 { Tag: 1 DurationMs: 1746609366455 OperationsOK: 4 OperationsError: 0 } 2025-05-07T09:16:06.473217Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:1014:2791] txid# 281474976715673, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:06.552294Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 4} finished in 1746609366.552245s, errors=0 2025-05-07T09:16:06.552710Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:732:2614], subTag: 2} finished: 4 { Tag: 4 DurationMs: 1746609366552 OperationsOK: 4 OperationsError: 0 } 2025-05-07T09:16:06.566728Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:1065:2813] txid# 281474976715678, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:06.586596Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:1082:2822] txid# 281474976715680, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:06.681333Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 3} finished in 1746609366.681288s, errors=0 2025-05-07T09:16:06.681878Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:732:2614], subTag: 2} finished: 3 { Tag: 3 DurationMs: 1746609366681 OperationsOK: 4 OperationsError: 0 } 2025-05-07T09:16:06.681932Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 5} finished in 1746609366.681918s, errors=0 2025-05-07T09:16:06.682052Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:732:2614], subTag: 2} finished: 5 { Tag: 5 DurationMs: 1746609366681 OperationsOK: 4 OperationsError: 0 } 2025-05-07T09:16:06.682116Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:395: TKqpUpsertActorMultiSession# {Tag: 0, parent: [1:732:2614], subTag: 2} finished in 1.274213s, oks# 20, errors# 0 2025-05-07T09:16:06.682626Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [1:733:2615] with tag# 2 >> KqpIndexes::NullInIndexTableNoDataRead >> KqpIndexes::UpdateDeletePlan-UseSink [GOOD] >> KqpIndexes::UpdateIndexSubsetPk |94.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/ttl/py3test >> test_ttl.py::TestTTLDefaultEnv::test_case [GOOD] >> KqpIndexes::CheckUpsertNonEquatableType+NotNull >> TCmsTest::ScheduledWalleRequestDuringRollingRestart [GOOD] >> TCmsTest::SamePriorityRequest2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::UpsertNoIndexColumns [GOOD] Test command err: Trying to start YDB, gRPC: 19913, MsgBus: 16862 2025-05-07T09:15:31.721466Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501629958242413989:2196];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:15:31.721930Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003c09/r3tmp/tmpgoraxW/pdisk_1.dat 2025-05-07T09:15:32.526028Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:15:32.526119Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:15:32.527785Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:15:32.530386Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19913, node 1 2025-05-07T09:15:32.809272Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:15:32.809294Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:15:32.809301Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:15:32.809431Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16862 TClient is connected to server localhost:16862 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:15:33.784072Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:15:33.823634Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T09:15:33.838085Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:15:34.078360Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:15:34.374546Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:15:34.479716Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:15:36.700898Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501629958242413989:2196];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:15:36.700976Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:15:37.279443Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501629984012219292:2409], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:15:37.279624Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:15:38.004788Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:15:38.115941Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:15:38.246187Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:15:38.335511Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:15:38.442639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:15:38.513093Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:15:38.579477Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:15:38.684039Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501629988307187261:2475], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:15:38.684119Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:15:38.684712Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501629988307187266:2478], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:15:38.689431Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:15:38.708708Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501629988307187268:2479], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:15:38.779606Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501629988307187321:3434] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:15:40.147104Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:7501629996897122215:3612], Recipient [1:7501629962537381598:2206]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T09:15:40.147156Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T09:15:40.147169Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T09:15:40.147230Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122432, Sender [1:7501629996897122211:3609], Recipient [1:7501629962537381598:2206]: {TEvModifySchemeTransaction txid# 281474976710672 TabletId# 72057594046644480} 2025-05-07T09:15:40.147246Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4851: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-05-07T09:15:40.198520Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateIndexedTable CreateIndexedTable { TableDescription { Name: "TestTable" Columns { Name: "k1" Type: "String" NotNull: false } Columns { Name: "fk1" Type: "String" NotNull: false } Columns { Name: "fk2" Type: "Int32" NotNull: false } Columns { Name: "fk3" Type: "Uint64" NotNull: false } Columns { Name: "Value" Type: "String" NotNull: false } KeyColumnNames: "k1" PartitionConfig { ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } Temporary: false } IndexDescription { Name: "Index12" KeyColumnNames: "fk1" KeyColumnNames: "fk2" Type: EIndexTypeGlobalUnique IndexImplTableDescriptions { PartitionConfig { } } DataColumnNames: "Value" } IndexDescription { Name: "Index3" KeyColumnNames: "fk3" Type: EIndexTypeGlobalUnique IndexImplTableDescriptions { PartitionConfig { } } } } } TxId: 281474976710672 TabletId: 72057594046644480 PeerName: "ipv6:[::1]:39064" , at schemeshard: 72057594046644480 2025-05-07T09:15:40.199129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_indexed_table.cpp:101: TCreateTableIndex construct operation table path: /Root/TestTable domain path id: [OwnerId: 72057594046644480, LocalPathId: 1] domain path: /Root shardsToCreate: 3 GetShardsInside: 34 MaxShards: 200000 2025-05-07T09:15:40.199771Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /Root/TestTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-05-07T09:15:40.199937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshar ... 76715674:1 2025-05-07T09:16:03.254395Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 24] was 3 2025-05-07T09:16:03.254403Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715674:2 2025-05-07T09:16:03.254409Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976715674:2 2025-05-07T09:16:03.254441Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 25] was 3 2025-05-07T09:16:03.256950Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T09:16:03.257061Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877764, Sender [3:7501630095211241587:3827], Recipient [3:7501630065146467847:2149]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-05-07T09:16:03.257081Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4938: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-05-07T09:16:03.257093Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5761: Server pipe is reset, at schemeshard: 72057594046644480 2025-05-07T09:16:03.257121Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877764, Sender [3:7501630095211241589:3829], Recipient [3:7501630065146467847:2149]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-05-07T09:16:03.257132Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4938: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-05-07T09:16:03.257139Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5761: Server pipe is reset, at schemeshard: 72057594046644480 2025-05-07T09:16:03.257306Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T09:16:03.257374Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [3:7501630095211241492:2541] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715674 at schemeshard: 72057594046644480 2025-05-07T09:16:03.263304Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877764, Sender [3:7501630095211241508:3778], Recipient [3:7501630065146467847:2149]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-05-07T09:16:03.263364Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4938: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-05-07T09:16:03.263378Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5761: Server pipe is reset, at schemeshard: 72057594046644480 2025-05-07T09:16:03.566161Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7501630065146467847:2149]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:03.566203Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:03.566250Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [3:7501630065146467847:2149], Recipient [3:7501630065146467847:2149]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:03.566268Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:04.566585Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7501630065146467847:2149]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:04.566635Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:04.566693Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [3:7501630065146467847:2149], Recipient [3:7501630065146467847:2149]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:04.566714Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:05.570100Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7501630065146467847:2149]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:05.570147Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:05.570186Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [3:7501630065146467847:2149], Recipient [3:7501630065146467847:2149]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:05.570203Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:06.570317Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7501630065146467847:2149]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:06.570368Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:06.570429Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [3:7501630065146467847:2149], Recipient [3:7501630065146467847:2149]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:06.570449Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:07.334432Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [3:7501630112391111193:3986], Recipient [3:7501630065146467847:2149]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T09:16:07.334471Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T09:16:07.334497Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T09:16:07.334907Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269553162, Sender [3:7501630069441435429:2314], Recipient [3:7501630065146467847:2149]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037888 TableLocalId: 2 Generation: 1 Round: 0 TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 1746609362832 LastUpdateTime: 1746609362832 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 3 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { Memory: 82488 } ShardState: 2 UserTablePartOwners: 72075186224037888 NodeId: 3 StartTime: 1746609357270 TableOwnerId: 72057594046644480 FollowerId: 0 2025-05-07T09:16:07.334924Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4877: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-05-07T09:16:07.334955Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:561: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037888 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 2] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0 2025-05-07T09:16:07.335043Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:568: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037888 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 2] raw table stats: DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 1746609362832 LastUpdateTime: 1746609362832 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 3 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-05-07T09:16:07.335074Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:608: Will delay TTxStoreTableStats on# 0.099993s, queue# 1 2025-05-07T09:16:07.342882Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [3:7501630112391111195:3987], Recipient [3:7501630065146467847:2149]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T09:16:07.342922Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T09:16:07.342936Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T09:16:07.343350Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269553162, Sender [3:7501630069441435430:2315], Recipient [3:7501630065146467847:2149]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037889 TableLocalId: 2 Generation: 1 Round: 0 TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 1746609362831 LastUpdateTime: 1746609362831 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 3 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { Memory: 82488 } ShardState: 2 UserTablePartOwners: 72075186224037889 NodeId: 3 StartTime: 1746609357270 TableOwnerId: 72057594046644480 FollowerId: 0 2025-05-07T09:16:07.343377Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4877: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-05-07T09:16:07.343411Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:561: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037889 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 2] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0 2025-05-07T09:16:07.343514Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:568: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037889 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 2] raw table stats: DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 1746609362831 LastUpdateTime: 1746609362831 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 3 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 >> TCmsTest::StateStorageLockedNodes [GOOD] |94.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/ttl/py3test >> test_ttl.py::TestTTLValueSinceUnixEpoch::test_case [GOOD] >> TCmsTest::CollectInfo [GOOD] >> TCmsTest::DynamicConfig >> UpsertLoad::ShouldWriteKqpUpsert [GOOD] >> TSchemeshardCompactionQueueTest::EnqueueEmptyShard [GOOD] >> TSchemeshardCompactionQueueTest::EnqueueSinglePartedShard [GOOD] >> TSchemeshardCompactionQueueTest::EnqueueSinglePartedShardWhenEnabled [GOOD] >> TSchemeshardBackgroundCompactionTest::SchemeshardShouldRequestCompactionsSchemeshardRestart >> TSchemeshardCompactionQueueTest::UpdateBelowThreshold [GOOD] >> TSchemeshardCompactionQueueTest::UpdateWithEmptyShard [GOOD] >> TSchemeshardCompactionQueueTest::EnqueueBelowSearchHeightThreshold [GOOD] >> TSchemeshardCompactionQueueTest::EnqueueBelowRowDeletesThreshold [GOOD] >> TSchemeshardCompactionQueueTest::CheckOrderWhenAllQueues [GOOD] >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldCompactBorrowedBeforeSplit >> TSchemeshardBackgroundCompactionTest::ShouldNotCompactServerless >> TSchemeshardCompactionQueueTest::ShouldNotEnqueueEmptyShard [GOOD] >> TSchemeshardCompactionQueueTest::RemoveLastShardFromSubQueues [GOOD] |94.4%| [TA] $(B)/ydb/tests/functional/ttl/test-results/py3test/{meta.json ... results_accumulator.log} |94.4%| [TA] {RESULT} $(B)/ydb/tests/functional/ttl/test-results/py3test/{meta.json ... results_accumulator.log} >> UpsertLoad::ShouldWriteDataBulkUpsert2 [GOOD] |94.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::StateStorageLockedNodes [GOOD] >> UpsertLoad::ShouldWriteDataBulkUpsertLocalMkql2 [GOOD] >> TCmsTest::DynamicConfig [GOOD] >> TCmsTest::DisabledEvictVDisks |94.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_compaction/unittest >> TSchemeshardCompactionQueueTest::EnqueueSinglePartedShardWhenEnabled [GOOD] |94.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_compaction/unittest >> TSchemeshardCompactionQueueTest::UpdateWithEmptyShard [GOOD] |94.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_compaction/unittest >> TSchemeshardCompactionQueueTest::RemoveLastShardFromSubQueues [GOOD] |94.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_compaction/unittest >> TSchemeshardCompactionQueueTest::CheckOrderWhenAllQueues [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut_ycsb/unittest >> UpsertLoad::ShouldWriteKqpUpsert [GOOD] Test command err: 2025-05-07T09:16:07.744431Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:16:07.744622Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:16:07.744877Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0037fe/r3tmp/tmp2OoYRY/pdisk_1.dat 2025-05-07T09:16:08.251188Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:16:08.294847Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:08.359467Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:08.359595Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:08.371080Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:16:08.455201Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:16:08.864486Z node 1 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertKqpStart with tag# 1, proto# NotifyWhenFinished: true TargetShard { TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "usertable" } UpsertKqpStart { RowCount: 20 Inflight: 5 } 2025-05-07T09:16:08.864634Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:298: TKqpUpsertActorMultiSession# {Tag: 0, parent: [1:732:2614], subTag: 2} Bootstrap called: RowCount: 20 Inflight: 5 2025-05-07T09:16:08.868656Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:361: TKqpUpsertActorMultiSession# {Tag: 0, parent: [1:732:2614], subTag: 2} started# 5 actors each with inflight# 4 2025-05-07T09:16:08.868744Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 1} Bootstrap called: RowCount: 4 Inflight: 1 2025-05-07T09:16:08.868825Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 2} Bootstrap called: RowCount: 4 Inflight: 1 2025-05-07T09:16:08.868855Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 3} Bootstrap called: RowCount: 4 Inflight: 1 2025-05-07T09:16:08.868891Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 4} Bootstrap called: RowCount: 4 Inflight: 1 2025-05-07T09:16:08.868924Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 5} Bootstrap called: RowCount: 4 Inflight: 1 2025-05-07T09:16:08.873723Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 1} session: ydb://session/3?node_id=1&id=OGY1YmU1MjktOWMyOGRkZmEtY2M2ZjgwYWMtNTFlMmQ4N2Y= 2025-05-07T09:16:08.873802Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 2} session: ydb://session/3?node_id=1&id=Y2ViZTc0YzEtMmZmMGUwZjEtNWJhMjZjYi03YWM5MjRiYg== 2025-05-07T09:16:08.875765Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 3} session: ydb://session/3?node_id=1&id=M2RiN2M1NzEtNjAzYTdmMDMtMWIwMDYxZWItY2Y4YWEwNGY= 2025-05-07T09:16:08.877465Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 4} session: ydb://session/3?node_id=1&id=OGFjNWQwMTctYzNhMjg5NTEtNmM2YWE5NTItYTI3MjE3MzU= 2025-05-07T09:16:08.879293Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 5} session: ydb://session/3?node_id=1&id=NTBkZGM4MGMtMTM2MzMyNjMtMmJiN2JkZmUtOWE3ZGZlZTQ= 2025-05-07T09:16:08.883449Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:772:2648], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:08.883587Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:774:2650], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:08.883637Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:775:2651], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:08.883764Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:746:2628], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:08.884043Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:773:2649], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:08.884098Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:776:2652], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:08.884622Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:08.895500Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-05-07T09:16:08.974389Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:795:2671] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-05-07T09:16:08.975506Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:796:2672] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-05-07T09:16:08.976018Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:790:2666] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-05-07T09:16:08.976579Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:791:2667] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-05-07T09:16:09.155669Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:786:2662], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T09:16:09.155796Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:788:2664], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T09:16:09.155873Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:789:2665], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T09:16:09.156095Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:784:2660], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T09:16:09.156151Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:785:2661], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-05-07T09:16:09.195879Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:890:2731] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:09.718909Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 5} finished in 1746609369.718831s, errors=0 2025-05-07T09:16:09.719425Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:732:2614], subTag: 2} finished: 5 { Tag: 5 DurationMs: 1746609369718 OperationsOK: 4 OperationsError: 0 } 2025-05-07T09:16:09.733842Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:963:2769] txid# 281474976715668, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:09.808233Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 2} finished in 1746609369.808181s, errors=0 2025-05-07T09:16:09.808598Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:732:2614], subTag: 2} finished: 2 { Tag: 2 DurationMs: 1746609369808 OperationsOK: 4 OperationsError: 0 } 2025-05-07T09:16:09.822984Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:1014:2791] txid# 281474976715673, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:09.903918Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 1} finished in 1746609369.903840s, errors=0 2025-05-07T09:16:09.904318Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:732:2614], subTag: 2} finished: 1 { Tag: 1 DurationMs: 1746609369903 OperationsOK: 4 OperationsError: 0 } 2025-05-07T09:16:09.922487Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:1065:2813] txid# 281474976715678, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:09.950197Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:1082:2822] txid# 281474976715680, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:10.036544Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 3} finished in 1746609370.036486s, errors=0 2025-05-07T09:16:10.037081Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:732:2614], subTag: 2} finished: 3 { Tag: 3 DurationMs: 1746609370036 OperationsOK: 4 OperationsError: 0 } 2025-05-07T09:16:10.037140Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 4} finished in 1746609370.037123s, errors=0 2025-05-07T09:16:10.037229Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:732:2614], subTag: 2} finished: 4 { Tag: 4 DurationMs: 1746609370037 OperationsOK: 4 OperationsError: 0 } 2025-05-07T09:16:10.037277Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:395: TKqpUpsertActorMultiSession# {Tag: 0, parent: [1:732:2614], subTag: 2} finished in 1.168852s, oks# 20, errors# 0 2025-05-07T09:16:10.037426Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [1:733:2615] with tag# 2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut_ycsb/unittest >> UpsertLoad::ShouldWriteDataBulkUpsert2 [GOOD] Test command err: 2025-05-07T09:16:04.099680Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:16:04.099907Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:16:04.100188Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00384a/r3tmp/tmp3m5eJv/pdisk_1.dat 2025-05-07T09:16:04.559087Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:16:04.628614Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:04.683208Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:04.683392Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:04.695052Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:16:04.784630Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:16:05.150494Z node 1 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertBulkStart with tag# 1, proto# NotifyWhenFinished: true TargetShard { TabletId: 72075186224037888 TableId: 2 TableName: "usertable" } UpsertBulkStart { RowCount: 10 Inflight: 3 } 2025-05-07T09:16:05.150732Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [1:732:2614], subTag: 2} TUpsertActor Bootstrap called: RowCount: 10 Inflight: 3 with type# 0, target# TabletId: 72075186224037888 TableId: 2 TableName: "usertable" 2025-05-07T09:16:05.241242Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [1:732:2614], subTag: 2} TUpsertActor finished in 0.089917s, errors=0 2025-05-07T09:16:05.241358Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [1:733:2615] with tag# 2 2025-05-07T09:16:09.361594Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:311:2354], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:16:09.361883Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:16:09.362054Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00384a/r3tmp/tmpHiuM5m/pdisk_1.dat 2025-05-07T09:16:09.667731Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:16:09.711816Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:09.767697Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:09.767837Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:09.783314Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:16:09.865841Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:16:10.152986Z node 2 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertBulkStart with tag# 1, proto# NotifyWhenFinished: true TargetShard { TabletId: 72075186224037888 TableId: 2 TableName: "JustTable" } UpsertBulkStart { RowCount: 10 Inflight: 3 } 2025-05-07T09:16:10.153136Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [2:732:2614], subTag: 2} TUpsertActor Bootstrap called: RowCount: 10 Inflight: 3 with type# 0, target# TabletId: 72075186224037888 TableId: 2 TableName: "JustTable" 2025-05-07T09:16:10.222050Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [2:732:2614], subTag: 2} TUpsertActor finished in 0.068170s, errors=0 2025-05-07T09:16:10.222182Z node 2 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [2:733:2615] with tag# 2 >> ReadLoad::ShouldReadIterateMoreThanRows [GOOD] >> UpsertLoad::ShouldWriteDataBulkUpsertKeyFrom [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/ttl_tiering/py3test >> data_migration_when_alter_ttl.py::TestDataMigrationWhenAlterTtl::test 2025-05-07 09:15:49,076 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-05-07 09:15:49,585 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 407876 716M 672M 633M ydb-tests-olap-ttl_tiering --basetemp /home/runner/.ya/build/build_root/zvgn/0045e3/tmp --capture no -c pkg:library.python.pytest:pytest.yatest.ini -p no:factor --doctest-modu 412458 2.7G 2.7G 2.2G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/.ya/build/build_root/zvgn/0045e3/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk1 415356 438M 426M 406M └─ moto_server s3 --port 6132 Test command err: File "library/python/pytest/main.py", line 101, in main rc = pytest.main( File "contrib/python/pytest/py3/_pytest/config/__init__.py", line 175, in main ret: Union[ExitCode, int] = config.hook.pytest_cmdline_main( File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 513, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 103, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/main.py", line 320, in pytest_cmdline_main return wrap_session(config, _main) File "contrib/python/pytest/py3/_pytest/main.py", line 273, in wrap_session session.exitstatus = doit(config, session) or 0 File "contrib/python/pytest/py3/_pytest/main.py", line 327, in _main config.hook.pytest_runtestloop(session=session) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 513, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 103, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/main.py", line 352, in pytest_runtestloop item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 513, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 103, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/runner.py", line 115, in pytest_runtest_protocol runtestprotocol(item, nextitem=nextitem) File "contrib/python/pytest/py3/_pytest/runner.py", line 134, in runtestprotocol reports.append(call_and_report(item, "call", log)) File "contrib/python/pytest/py3/_pytest/runner.py", line 223, in call_and_report call = call_runtest_hook(item, when, **kwds) File "contrib/python/pytest/py3/_pytest/runner.py", line 262, in call_runtest_hook return CallInfo.from_call( File "contrib/python/pytest/py3/_pytest/runner.py", line 342, in from_call result: Optional[TResult] = func() File "contrib/python/pytest/py3/_pytest/runner.py", line 263, in lambda: ihook(item=item, **kwds), when=when, reraise=reraise File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 513, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 103, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/runner.py", line 170, in pytest_runtest_call item.runtest() File "contrib/python/pytest/py3/_pytest/python.py", line 1844, in runtest self.ihook.pytest_pyfunc_call(pyfuncitem=self) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 513, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 103, in _multicall res = hook_impl.function(*args) File "library/python/pytest/plugins/ya.py", line 563, in pytest_pyfunc_call pyfuncitem.retval = testfunction(**testargs) File "ydb/tests/olap/ttl_tiering/data_migration_when_alter_ttl.py", line 171, in test if not self.wait_for( File "ydb/tests/olap/ttl_tiering/base.py", line 88, in wait_for time.sleep(1) File "library/python/pytest/plugins/ya.py", line 344, in _graceful_shutdown traceback.print_stack(file=sys.stderr) Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 764, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: ...d_root/zvgn/0045e3/tmp', '--capture', 'no', '-c', 'pkg:library.python.pytest:pytest.yatest.ini', '-p', 'no:factor', '--doctest-modules', '--ya-trace', '/home/runner/.ya/build/build_root/zvgn/0045e3/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk1/ytest.report.trace', '--build-root', '/home/runner/.ya/build/build_root/zvgn/0045e3', '--source-root', '/home/runner/.ya/build/build_root/zvgn/0045e3/environment/arcadia', '--output-dir', '/home/runner/.ya/build/build_root/zvgn/0045e3/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk1/testing_out_stuff', '--durations', '0', '--project-path', 'ydb/tests/olap/ttl_tiering', '--test-tool-bin', '/home/runner/.ya/tools/v4/8580453620/test_tool', '--ya-version', '2', '--collect-cores', '--sanitizer-extra-checks', '--build-type', 'release', '--tb', 'short', '--modulo', '10', '--modulo-index', '1', '--partition-mode', 'SEQUENTIAL', '--dep-root', 'ydb/tests/olap/ttl_tiering', '--flags', 'APPLE_SDK_LOCAL=yes', '--flags', 'CFLAGS=-fno-omit-frame-pointer -Wno-unknown-argument', '--flags', 'DEBUGINFO_LINES_ONLY=yes', '--flags', 'DISABLE_FLAKE8_MIGRATIONS=yes', '--flags', 'OPENSOURCE=yes', '--flags', 'SANITIZER_TYPE=address', '--flags', 'TESTS_REQUESTED=yes', '--flags', 'USE_AIO=static', '--flags', 'USE_CLANG_CL=yes', '--flags', 'USE_EAT_MY_DATA=yes', '--flags', 'USE_ICONV=static', '--flags', 'USE_IDN=static', '--flags', 'USE_PREBUILT_TOOLS=no', '--sanitize', 'address']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("...d_root/zvgn/0045e3/tmp', '--capture', 'no', '-c', 'pkg:library.python.pytest:pytest.yatest.ini', '-p', 'no:factor', '--doctest-modules', '--ya-trace', '/home/runner/.ya/build/build_root/zvgn/0045e3/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk1/ytest.report.trace', '--build-root', '/home/runner/.ya/build/build_root/zvgn/0045e3', '--source-root', '/home/runner/.ya/build/build_root/zvgn/0045e3/environment/arcadia', '--output-dir', '/home/runner/.ya/build/build_root/zvgn/0045e3/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk1/testing_out_stuff', '--durations', '0', '--project-path', 'ydb/tests/olap/ttl_tiering', '--test-tool-bin', '/home/runner/.ya/tools/v4/8580453620/test_tool', '--ya-version', '2', '--collect-cores', '--sanitizer-extra-checks', '--build-type', 'release', '--tb', 'short', '--modulo', '10', '--modulo-index', '1', '--partition-mode', 'SEQUENTIAL', '--dep-root', 'ydb/tests/olap/ttl_tiering', '--flags', 'APPLE_SDK_LOCAL=yes', '--flags', 'CFLAGS=-fno-omit-frame-pointer -Wno-unknown-argument', '--flags', 'DEBUGINFO_LINES_ONLY=yes', '--flags', 'DISABLE_FLAKE8_MIGRATIONS=yes', '--flags', 'OPENSOURCE=yes', '--flags', 'SANITIZER_TYPE=address', '--flags', 'TESTS_REQUESTED=yes', '--flags', 'USE_AIO=static', '--flags', 'USE_CLANG_CL=yes', '--flags', 'USE_EAT_MY_DATA=yes', '--flags', 'USE_ICONV=static', '--flags', 'USE_IDN=static', '--flags', 'USE_PREBUILT_TOOLS=no', '--sanitize', 'address']' stopped by 600 seconds timeout",), {}) >> UpsertLoad::ShouldDropCreateTable [GOOD] >> TCmsTest::SamePriorityRequest2 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut_ycsb/unittest >> UpsertLoad::ShouldWriteDataBulkUpsertLocalMkql2 [GOOD] Test command err: 2025-05-07T09:16:04.035904Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:16:04.036078Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:16:04.036347Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00382f/r3tmp/tmpNvhdTT/pdisk_1.dat 2025-05-07T09:16:04.468316Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:16:04.549483Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:04.628351Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:04.628516Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:04.643493Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:16:04.732765Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:16:05.155974Z node 1 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertLocalMkqlStart with tag# 1, proto# NotifyWhenFinished: true TargetShard { TabletId: 72075186224037888 TableId: 2 TableName: "usertable" } UpsertLocalMkqlStart { RowCount: 10 Inflight: 3 } 2025-05-07T09:16:05.156202Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [1:732:2614], subTag: 2} TUpsertActor Bootstrap called: RowCount: 10 Inflight: 3 with type# 1, target# TabletId: 72075186224037888 TableId: 2 TableName: "usertable" 2025-05-07T09:16:05.420531Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [1:732:2614], subTag: 2} TUpsertActor finished in 0.263794s, errors=0 2025-05-07T09:16:05.420636Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [1:733:2615] with tag# 2 2025-05-07T09:16:09.492120Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:311:2354], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:16:09.492419Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:16:09.492528Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00382f/r3tmp/tmpBmlsRJ/pdisk_1.dat 2025-05-07T09:16:09.825102Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:16:09.866165Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:09.913897Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:09.914045Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:09.926401Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:16:10.019058Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:16:10.354794Z node 2 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertLocalMkqlStart with tag# 1, proto# NotifyWhenFinished: true TargetShard { TabletId: 72075186224037888 TableId: 2 TableName: "JustTable" } UpsertLocalMkqlStart { RowCount: 10 Inflight: 3 } 2025-05-07T09:16:10.354965Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [2:732:2614], subTag: 2} TUpsertActor Bootstrap called: RowCount: 10 Inflight: 3 with type# 1, target# TabletId: 72075186224037888 TableId: 2 TableName: "JustTable" 2025-05-07T09:16:10.502471Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [2:732:2614], subTag: 2} TUpsertActor finished in 0.147003s, errors=0 2025-05-07T09:16:10.502591Z node 2 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [2:733:2615] with tag# 2 >> KqpIndexes::CreateTableWithExplicitSyncIndexSQL [GOOD] >> KqpIndexes::DeleteByIndex |94.4%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |94.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::SamePriorityRequest2 [GOOD] >> TCmsTest::VDisksEvictionShouldFailWhileSentinelIsDisabled >> KqpMultishardIndex::DataColumnUpsertMixedSemantic >> TSchemeshardBackgroundCompactionTest::SchemeshardShouldNotCompactBackups ------- [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut_ycsb/unittest >> ReadLoad::ShouldReadIterateMoreThanRows [GOOD] Test command err: 2025-05-07T09:16:03.973274Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:16:03.973455Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:16:03.973706Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003870/r3tmp/tmpq2zsaY/pdisk_1.dat 2025-05-07T09:16:04.469659Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:16:04.524993Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:04.585516Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:04.585647Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:04.599277Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:16:04.720469Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:16:05.159828Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:346: TLoad# 0 warmups table# usertable in dir# /Root with rows# 1000 2025-05-07T09:16:05.163541Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [1:732:2614], subTag: 1} TUpsertActor Bootstrap called: RowCount: 1000 Inflight: 100 BatchSize: 100 with type# 0, target# TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "usertable" 2025-05-07T09:16:05.243446Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [1:732:2614], subTag: 1} TUpsertActor finished in 0.079407s, errors=0 2025-05-07T09:16:05.244176Z node 1 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kReadIteratorStart with tag# 2, proto# NotifyWhenFinished: true TableSetup { WorkingDir: "/Root" TableName: "usertable" } TargetShard { TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "usertable" } ReadIteratorStart { RowCount: 1000 Inflights: 1 Chunks: 0 Chunks: 1 Chunks: 10 } 2025-05-07T09:16:05.244350Z node 1 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:334: ReadIteratorLoadScenario# [1:741:2623] with id# {Tag: 0, parent: [1:732:2614], subTag: 3} Bootstrap called: RowCount: 1000 Inflights: 1 Chunks: 0 Chunks: 1 Chunks: 10 2025-05-07T09:16:05.245757Z node 1 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:396: ReadIteratorLoadScenario# {Tag: 0, parent: [1:732:2614], subTag: 3} will work with tablet# 72075186224037888 with ownerId# 72057594046644480 with tableId# 2 resolved for path# /Root/usertable with columnsCount# 11, keyColumnCount# 1 2025-05-07T09:16:05.245924Z node 1 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:437: started fullscan actor# [1:744:2626] 2025-05-07T09:16:05.246430Z node 1 :DS_LOAD_TEST INFO: common.cpp:52: ReadIteratorScan# {Tag: 0, parent: [1:741:2623], subTag: 1} Bootstrap called, sample# 0 2025-05-07T09:16:05.246496Z node 1 :DS_LOAD_TEST DEBUG: common.cpp:61: ReadIteratorScan# {Tag: 0, parent: [1:741:2623], subTag: 1} Connect to# 72075186224037888 called 2025-05-07T09:16:05.247562Z node 1 :DS_LOAD_TEST DEBUG: common.cpp:75: ReadIteratorScan# {Tag: 0, parent: [1:741:2623], subTag: 1} Handle TEvClientConnected called, Status# OK 2025-05-07T09:16:05.256036Z node 1 :DS_LOAD_TEST NOTICE: common.cpp:147: ReadIteratorScan# {Tag: 0, parent: [1:741:2623], subTag: 1} finished in 0.008401s, read# 1000 2025-05-07T09:16:05.256640Z node 1 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:456: fullscan actor# [1:744:2626] with chunkSize# 0 finished: 0 { DurationMs: 8 OperationsOK: 1000 OperationsError: 0 } 2025-05-07T09:16:05.256810Z node 1 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:437: started fullscan actor# [1:747:2629] 2025-05-07T09:16:05.256871Z node 1 :DS_LOAD_TEST INFO: common.cpp:52: ReadIteratorScan# {Tag: 0, parent: [1:741:2623], subTag: 2} Bootstrap called, sample# 0 2025-05-07T09:16:05.256909Z node 1 :DS_LOAD_TEST DEBUG: common.cpp:61: ReadIteratorScan# {Tag: 0, parent: [1:741:2623], subTag: 2} Connect to# 72075186224037888 called 2025-05-07T09:16:05.257234Z node 1 :DS_LOAD_TEST DEBUG: common.cpp:75: ReadIteratorScan# {Tag: 0, parent: [1:741:2623], subTag: 2} Handle TEvClientConnected called, Status# OK 2025-05-07T09:16:05.585298Z node 1 :DS_LOAD_TEST NOTICE: common.cpp:147: ReadIteratorScan# {Tag: 0, parent: [1:741:2623], subTag: 2} finished in 0.327993s, read# 1000 2025-05-07T09:16:05.585484Z node 1 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:456: fullscan actor# [1:747:2629] with chunkSize# 1 finished: 0 { DurationMs: 327 OperationsOK: 1000 OperationsError: 0 } 2025-05-07T09:16:05.585600Z node 1 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:437: started fullscan actor# [1:750:2632] 2025-05-07T09:16:05.585652Z node 1 :DS_LOAD_TEST INFO: common.cpp:52: ReadIteratorScan# {Tag: 0, parent: [1:741:2623], subTag: 3} Bootstrap called, sample# 0 2025-05-07T09:16:05.585685Z node 1 :DS_LOAD_TEST DEBUG: common.cpp:61: ReadIteratorScan# {Tag: 0, parent: [1:741:2623], subTag: 3} Connect to# 72075186224037888 called 2025-05-07T09:16:05.585996Z node 1 :DS_LOAD_TEST DEBUG: common.cpp:75: ReadIteratorScan# {Tag: 0, parent: [1:741:2623], subTag: 3} Handle TEvClientConnected called, Status# OK 2025-05-07T09:16:05.662183Z node 1 :DS_LOAD_TEST NOTICE: common.cpp:147: ReadIteratorScan# {Tag: 0, parent: [1:741:2623], subTag: 3} finished in 0.076112s, read# 1000 2025-05-07T09:16:05.662401Z node 1 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:456: fullscan actor# [1:750:2632] with chunkSize# 10 finished: 0 { DurationMs: 76 OperationsOK: 1000 OperationsError: 0 } 2025-05-07T09:16:05.662558Z node 1 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:437: started fullscan actor# [1:753:2635] 2025-05-07T09:16:05.662638Z node 1 :DS_LOAD_TEST INFO: common.cpp:52: ReadIteratorScan# {Tag: 0, parent: [1:741:2623], subTag: 4} Bootstrap called, sample# 1000 2025-05-07T09:16:05.662679Z node 1 :DS_LOAD_TEST DEBUG: common.cpp:61: ReadIteratorScan# {Tag: 0, parent: [1:741:2623], subTag: 4} Connect to# 72075186224037888 called 2025-05-07T09:16:05.662979Z node 1 :DS_LOAD_TEST DEBUG: common.cpp:75: ReadIteratorScan# {Tag: 0, parent: [1:741:2623], subTag: 4} Handle TEvClientConnected called, Status# OK 2025-05-07T09:16:05.667405Z node 1 :DS_LOAD_TEST NOTICE: common.cpp:137: ReadIteratorScan# {Tag: 0, parent: [1:741:2623], subTag: 4} finished in 0.003564s, sampled# 1000, iter finished# 1, oks# 1000 2025-05-07T09:16:05.667610Z node 1 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:506: ReadIteratorLoadScenario# {Tag: 0, parent: [1:732:2614], subTag: 3} received keyCount# 1000 2025-05-07T09:16:05.667815Z node 1 :DS_LOAD_TEST DEBUG: test_load_read_iterator.cpp:551: ReadIteratorLoadScenario# {Tag: 0, parent: [1:732:2614], subTag: 3} started read actor with id# [1:756:2638] 2025-05-07T09:16:05.667883Z node 1 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:79: TReadIteratorPoints# {Tag: 0, parent: [1:741:2623], subTag: 5} Bootstrap called, will read keys# 1000 2025-05-07T09:16:06.101339Z node 1 :DS_LOAD_TEST DEBUG: test_load_read_iterator.cpp:559: ReadIteratorLoadScenario# {Tag: 0, parent: [1:732:2614], subTag: 3} received point times# 1000, Inflight left# 0 2025-05-07T09:16:06.101557Z node 1 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:482: headread with inflight# 1 finished: 0 { DurationMs: 433 OperationsOK: 1000 OperationsError: 0 Info: "single row head read hist (ms):\n50%: 1\n95%: 1\n99%: 1\n99.9%: 26\n" } 2025-05-07T09:16:06.101728Z node 1 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:616: ReadIteratorLoadScenario# {Tag: 0, parent: [1:732:2614], subTag: 3} finished in 0.857209s with report: { DurationMs: 8 OperationsOK: 1000 OperationsError: 0 PrefixInfo: "Test run# 1, type# FullScan with chunk# inf" } { DurationMs: 327 OperationsOK: 1000 OperationsError: 0 PrefixInfo: "Test run# 2, type# FullScan with chunk# 1" } { DurationMs: 76 OperationsOK: 1000 OperationsError: 0 PrefixInfo: "Test run# 3, type# FullScan with chunk# 10" } { DurationMs: 433 OperationsOK: 1000 OperationsError: 0 Info: "single row head read hist (ms):\n50%: 1\n95%: 1\n99%: 1\n99.9%: 26\n" PrefixInfo: "Test run# 4, type# ReadHeadPoints with inflight# 1" } 2025-05-07T09:16:06.102264Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [1:741:2623] with tag# 3 2025-05-07T09:16:10.245456Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:311:2354], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:16:10.245753Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:16:10.245872Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003870/r3tmp/tmp4fclzq/pdisk_1.dat 2025-05-07T09:16:10.631472Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:16:10.667746Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:10.718637Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:10.718774Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:10.731230Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:16:10.828117Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:16:11.140947Z node 2 :DS_LOAD_TEST INFO: test_load_actor.cpp:346: TLoad# 0 warmups table# usertable in dir# /Root with rows# 10 2025-05-07T09:16:11.141295Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [2:732:2614], subTag: 1} TUpsertActor Bootstrap called: RowCount: 10 Inflight: 100 BatchSize: 100 with type# 0, target# TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "usertable" 2025-05-07T09:16:11.164966Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [2:732:2614], subTag: 1} TUpsertActor finished in 0.023386s, errors=0 2025-05-07T09:16:11.165651Z node 2 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kReadIteratorStart with tag# 2, proto# NotifyWhenFinished: true TableSetup { WorkingDir: "/Root" TableName: "usertable" } TargetShard { TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "usertable" } ReadIteratorStart { RowCount: 10 ReadCount: 1000 Inflights: 1 Chunks: 0 Chunks: 1 Chunks: 10 } 2025-05-07T09:16:11.165797Z node 2 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:334: ReadIteratorLoadScenario# [2:741:2623] with id# {Tag: 0, parent: [2:732:2614], subTag: 3} Bootstrap called: RowCount: 10 ReadCount: 1000 Inflights: 1 Chunks: 0 Chunks: 1 Chunks: 10 2025-05-07T09:16:11.167207Z node 2 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:396: ReadIteratorLoadScenario# {Tag: 0, parent: [2:732:2614], subTag: 3} will work with tablet# 72075186224037888 with ownerId# 72057594046644480 with tableId# 2 resolved for path# /Root/usertable with columnsCount# 11, keyColumnCount# 1 2025-05-07T09:16:11.167348Z node 2 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:437: started fullscan actor# [2:744:2626] 2025-05-07T09:16:11.167480Z node 2 :DS_LOAD_TEST INFO: common.cpp:52: ReadIteratorScan# {Tag: 0, parent: [2:741:2623], subTag: 1} Bootstrap called, sample# 0 2025-05-07T09:16:11.167527Z node 2 :DS_LOAD_TEST DEBUG: common.cpp:61: ReadIteratorScan# {Tag: 0, parent: [2:741:2623], subTag: 1} Connect to# 72075186224037888 called 2025-05-07T09:16:11.167865Z node 2 :DS_LOAD_TEST DEBUG: common.cpp:75: ReadIteratorScan# {Tag: 0, parent: [2:741:2623], subTag: 1} Handle TEvClientConnected called, Status# OK 2025-05-07T09:16:11.168936Z node 2 :DS_LOAD_TEST NOTICE: common.cpp:147: ReadIteratorScan# {Tag: 0, parent: [2:741:2623], subTag: 1} finished in 0.001008s, read# 10 2025-05-07T09:16:11.169128Z node 2 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:456: fullscan actor# [2:744:2626] with chunkSize# 0 finished: 0 { DurationMs: 1 OperationsOK: 10 OperationsError: 0 } 2025-05-07T09:16:11.169272Z node 2 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:437: started fullscan actor# [2:747:2629] 2025-05-07T09:16:11.169333Z node 2 :DS_LOAD_TEST INFO: common.cpp:52: ReadIteratorScan# {Tag: 0, parent: [2:741:2623], subTag: 2} Bootstrap called, sample# 0 2025-05-07T09:16:11.169368Z node 2 :DS_LOAD_TEST DEBUG: common.cpp:61: ReadIteratorScan# {Tag: 0, parent: [2:741:2623], subTag: 2} Connect to# 72075186224037888 called 2025-05-07T09:16:11.169686Z node 2 :DS_LOAD_TEST DEBUG: common.cpp:75: ReadIteratorScan# {Tag: 0, parent: [2:741:2623], subTag: 2} Handle TEvClientConnected called, Status# OK 2025-05-07T09:16:11.172243Z node 2 :DS_LOAD_TEST NOTICE: common.cpp:147: ReadIteratorScan# {Tag: 0, parent: [2:741:2623], subTag: 2} finished in 0.002509s, read# 10 2025-05-07T09:16:11.172386Z node 2 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:456: fullscan actor# [2:747:2629] with chunkSize# 1 finished: 0 { DurationMs: 2 OperationsOK: 10 OperationsError: 0 } 2025-05-07T09:16:11.172500Z node 2 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:437: started fullscan actor# [2:750:2632] 2025-05-07T09:16:11.172546Z node 2 :DS_LOAD_TEST INFO: common.cpp:52: ReadIteratorScan# {Tag: 0, parent: [2:741:2623], subTag: 3} Bootstrap called, sample# 0 2025-05-07T09:16:11.172595Z node 2 :DS_LOAD_TEST DEBUG: common.cpp:61: ReadIteratorScan# {Tag: 0, parent: [2:741:2623], subTag: 3} Connect to# 72075186224037888 called 2025-05-07T09:16:11.172876Z node 2 :DS_LOAD_TEST DEBUG: common.cpp:75: ReadIteratorScan# {Tag: 0, parent: [2:741:2623], subTag: 3} Handle TEvClientConnected called, Status# OK 2025-05-07T09:16:11.173636Z node 2 :DS_LOAD_TEST NOTICE: common.cpp:147: ReadIteratorScan# {Tag: 0, parent: [2:741:2623], subTag: 3} finished in 0.000715s, read# 10 2025-05-07T09:16:11.173754Z node 2 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:456: fullscan actor# [2:750:2632] with chunkSize# 10 finished: 0 { DurationMs: 0 OperationsOK: 10 OperationsError: 0 } 2025-05-07T09:16:11.173868Z node 2 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:437: started fullscan actor# [2:753:2635] 2025-05-07T09:16:11.173921Z node 2 :DS_LOAD_TEST INFO: common.cpp:52: ReadIteratorScan# {Tag: 0, parent: [2:741:2623], subTag: 4} Bootstrap called, sample# 10 2025-05-07T09:16:11.173951Z node 2 :DS_LOAD_TEST DEBUG: common.cpp:61: ReadIteratorScan# {Tag: 0, parent: [2:741:2623], subTag: 4} Connect to# 72075186224037888 called 2025-05-07T09:16:11.174277Z node 2 :DS_LOAD_TEST DEBUG: common.cpp:75: ReadIteratorScan# {Tag: 0, parent: [2:741:2623], subTag: 4} Handle TEvClientConnected called, Status# OK 2025-05-07T09:16:11.174759Z node 2 :DS_LOAD_TEST NOTICE: common.cpp:137: ReadIteratorScan# {Tag: 0, parent: [2:741:2623], subTag: 4} finished in 0.000415s, sampled# 10, iter finished# 1, oks# 10 2025-05-07T09:16:11.174848Z node 2 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:506: ReadIteratorLoadScenario# {Tag: 0, parent: [2:732:2614], subTag: 3} received keyCount# 10 2025-05-07T09:16:11.174987Z node 2 :DS_LOAD_TEST DEBUG: test_load_read_iterator.cpp:551: ReadIteratorLoadScenario# {Tag: 0, parent: [2:732:2614], subTag: 3} started read actor with id# [2:756:2638] 2025-05-07T09:16:11.175051Z node 2 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:79: TReadIteratorPoints# {Tag: 0, parent: [2:741:2623], subTag: 5} Bootstrap called, will read keys# 10 2025-05-07T09:16:11.649804Z node 2 :DS_LOAD_TEST DEBUG: test_load_read_iterator.cpp:559: ReadIteratorLoadScenario# {Tag: 0, parent: [2:732:2614], subTag: 3} received point times# 1000, Inflight left# 0 2025-05-07T09:16:11.650114Z node 2 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:482: headread with inflight# 1 finished: 0 { DurationMs: 474 OperationsOK: 1000 OperationsError: 0 Info: "single row head read hist (ms):\n50%: 1\n95%: 1\n99%: 1\n99.9%: 30\n" } 2025-05-07T09:16:11.650378Z node 2 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:616: ReadIteratorLoadScenario# {Tag: 0, parent: [2:732:2614], subTag: 3} finished in 0.484362s with report: { DurationMs: 1 OperationsOK: 10 OperationsError: 0 PrefixInfo: "Test run# 1, type# FullScan with chunk# inf" } { DurationMs: 2 OperationsOK: 10 OperationsError: 0 PrefixInfo: "Test run# 2, type# FullScan with chunk# 1" } { DurationMs: 0 OperationsOK: 10 OperationsError: 0 PrefixInfo: "Test run# 3, type# FullScan with chunk# 10" } { DurationMs: 474 OperationsOK: 1000 OperationsError: 0 Info: "single row head read hist (ms):\n50%: 1\n95%: 1\n99%: 1\n99.9%: 30\n" PrefixInfo: "Test run# 4, type# ReadHeadPoints with inflight# 1" } 2025-05-07T09:16:11.650533Z node 2 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [2:741:2623] with tag# 3 >> TFlatTest::Mix_DML_DDL ------- [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut_ycsb/unittest >> UpsertLoad::ShouldWriteDataBulkUpsertKeyFrom [GOOD] Test command err: 2025-05-07T09:16:05.713832Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:16:05.714056Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:16:05.714327Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00381c/r3tmp/tmp4A60bG/pdisk_1.dat 2025-05-07T09:16:06.143582Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:16:06.193211Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:06.253344Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:06.253488Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:06.267448Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:16:06.370566Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:16:06.800918Z node 1 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertBulkStart with tag# 1, proto# NotifyWhenFinished: true TargetShard { TabletId: 72075186224037888 TableId: 2 TableName: "usertable" } UpsertBulkStart { RowCount: 100 Inflight: 3 BatchSize: 7 } 2025-05-07T09:16:06.801092Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [1:732:2614], subTag: 2} TUpsertActor Bootstrap called: RowCount: 100 Inflight: 3 BatchSize: 7 with type# 0, target# TabletId: 72075186224037888 TableId: 2 TableName: "usertable" 2025-05-07T09:16:06.899276Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [1:732:2614], subTag: 2} TUpsertActor finished in 0.097720s, errors=0 2025-05-07T09:16:06.899401Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [1:733:2615] with tag# 2 2025-05-07T09:16:10.932887Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:311:2354], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:16:10.933149Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:16:10.933279Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00381c/r3tmp/tmpWfjA3e/pdisk_1.dat 2025-05-07T09:16:11.279930Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:16:11.323769Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:11.375611Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:11.375718Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:11.389570Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:16:11.477945Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:16:11.771997Z node 2 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertBulkStart with tag# 1, proto# NotifyWhenFinished: true TargetShard { TabletId: 72075186224037888 TableId: 2 TableName: "usertable" } UpsertBulkStart { RowCount: 10 Inflight: 3 KeyFrom: 12345 } 2025-05-07T09:16:11.772154Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [2:732:2614], subTag: 2} TUpsertActor Bootstrap called: RowCount: 10 Inflight: 3 KeyFrom: 12345 with type# 0, target# TabletId: 72075186224037888 TableId: 2 TableName: "usertable" 2025-05-07T09:16:11.853663Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [2:732:2614], subTag: 2} TUpsertActor finished in 0.081064s, errors=0 2025-05-07T09:16:11.853799Z node 2 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [2:733:2615] with tag# 2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut_ycsb/unittest >> UpsertLoad::ShouldDropCreateTable [GOOD] Test command err: 2025-05-07T09:16:03.842313Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:16:03.842499Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:16:03.842808Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003896/r3tmp/tmpwnBHXu/pdisk_1.dat 2025-05-07T09:16:04.467662Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:16:04.527505Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:04.588148Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:04.588320Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:04.600126Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:16:04.719397Z node 1 :DS_LOAD_TEST NOTICE: test_load_actor.cpp:194: TLoad# 0 creates table# BrandNewTable in dir# /Root 2025-05-07T09:16:05.399585Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:643:2551], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:05.399793Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:05.561404Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:16:05.962441Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:346: TLoad# 0 warmups table# BrandNewTable in dir# /Root with rows# 10 2025-05-07T09:16:05.964266Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [1:640:2548], subTag: 1} TUpsertActor Bootstrap called: RowCount: 10 Inflight: 100 BatchSize: 100 with type# 0, target# TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "BrandNewTable" 2025-05-07T09:16:05.987120Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [1:640:2548], subTag: 1} TUpsertActor finished in 0.022465s, errors=0 2025-05-07T09:16:05.987495Z node 1 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertBulkStart with tag# 2, proto# NotifyWhenFinished: true TableSetup { WorkingDir: "/Root" TableName: "BrandNewTable" CreateTable: true MinParts: 11 MaxParts: 13 MaxPartSizeMb: 1234 } TargetShard { TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "BrandNewTable" } UpsertBulkStart { RowCount: 10 Inflight: 3 } 2025-05-07T09:16:05.987664Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [1:640:2548], subTag: 3} TUpsertActor Bootstrap called: RowCount: 10 Inflight: 3 with type# 0, target# TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "BrandNewTable" 2025-05-07T09:16:06.047009Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [1:640:2548], subTag: 3} TUpsertActor finished in 0.058972s, errors=0 2025-05-07T09:16:06.047112Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [1:749:2624] with tag# 3 2025-05-07T09:16:09.966571Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:311:2354], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:16:09.966844Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:16:09.966958Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003896/r3tmp/tmpDTBSIr/pdisk_1.dat 2025-05-07T09:16:10.280735Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:16:10.311712Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:10.362795Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:10.362972Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:10.374797Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:16:10.475598Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:16:10.780825Z node 2 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertBulkStart with tag# 1, proto# NotifyWhenFinished: true TargetShard { TabletId: 72075186224037888 TableId: 2 } UpsertBulkStart { RowCount: 100 Inflight: 3 } 2025-05-07T09:16:10.780915Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [2:732:2614], subTag: 2} TUpsertActor Bootstrap called: RowCount: 100 Inflight: 3 with type# 0, target# TabletId: 72075186224037888 TableId: 2 2025-05-07T09:16:11.254974Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [2:732:2614], subTag: 2} TUpsertActor finished in 0.473705s, errors=0 2025-05-07T09:16:11.255088Z node 2 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [2:733:2615] with tag# 2 2025-05-07T09:16:11.270575Z node 2 :DS_LOAD_TEST NOTICE: test_load_actor.cpp:174: TLoad# 0 drops table# table in dir# /Root 2025-05-07T09:16:11.289852Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:775:2656], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:11.290077Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:11.569619Z node 2 :DS_LOAD_TEST NOTICE: test_load_actor.cpp:194: TLoad# 0 creates table# table in dir# /Root 2025-05-07T09:16:11.586429Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:838:2699], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:11.586537Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:11.598834Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-05-07T09:16:11.648740Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037888 not found 2025-05-07T09:16:11.896768Z node 2 :DS_LOAD_TEST INFO: test_load_actor.cpp:346: TLoad# 0 warmups table# table in dir# /Root with rows# 10 2025-05-07T09:16:11.897074Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [2:771:2653], subTag: 1} TUpsertActor Bootstrap called: RowCount: 10 Inflight: 100 BatchSize: 100 with type# 0, target# TabletId: 72075186224037889 TableId: 3 WorkingDir: "/Root" TableName: "table" 2025-05-07T09:16:11.910862Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [2:771:2653], subTag: 1} TUpsertActor finished in 0.013481s, errors=0 2025-05-07T09:16:11.911176Z node 2 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertBulkStart with tag# 2, proto# NotifyWhenFinished: true TableSetup { WorkingDir: "/Root" TableName: "table" DropTable: true } TargetShard { TabletId: 72075186224037889 TableId: 3 WorkingDir: "/Root" TableName: "table" } UpsertBulkStart { RowCount: 10 Inflight: 3 } 2025-05-07T09:16:11.911345Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [2:771:2653], subTag: 3} TUpsertActor Bootstrap called: RowCount: 10 Inflight: 3 with type# 0, target# TabletId: 72075186224037889 TableId: 3 WorkingDir: "/Root" TableName: "table" 2025-05-07T09:16:11.978728Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [2:771:2653], subTag: 3} TUpsertActor finished in 0.067084s, errors=0 2025-05-07T09:16:11.978824Z node 2 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [2:929:2771] with tag# 3 >> KqpUniqueIndex::ReplaceFkAlreadyExist >> TCmsTest::DisabledEvictVDisks [GOOD] >> TCmsTest::EmergencyDuringRollingRestart |94.4%| [TA] $(B)/ydb/core/load_test/ut_ycsb/test-results/unittest/{meta.json ... results_accumulator.log} |94.4%| [TA] {RESULT} $(B)/ydb/core/load_test/ut_ycsb/test-results/unittest/{meta.json ... results_accumulator.log} >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldNotCompactBorrowedAfterSplitMergeWhenDisabled >> TCmsTest::TestForceRestartMode >> KqpUniqueIndex::InsertNullInComplexFk [GOOD] >> KqpUniqueIndex::InsertNullInComplexFkDuplicate >> KqpIndexes::SecondaryIndexUsingInJoin+UseStreamJoin >> TCmsTest::VDisksEvictionShouldFailWhileSentinelIsDisabled [GOOD] >> TCmsTest::VDisksEvictionShouldFailOnUnsupportedAction >> KqpMultishardIndex::DataColumnWriteNull >> KqpIndexes::SecondaryIndexReplace+UseSink [GOOD] >> KqpIndexes::SecondaryIndexReplace-UseSink >> TCmsTest::EmergencyDuringRollingRestart [GOOD] >> TCmsTest::TestForceRestartMode [GOOD] >> TCmsTest::TestForceRestartModeDisconnects >> YdbSdkSessionsPool::StressTestAsync/0 [GOOD] >> YdbSdkSessionsPool::StressTestAsync/1 >> KqpIndexes::CheckUpsertNonEquatableType+NotNull [GOOD] >> KqpIndexes::CheckUpsertNonEquatableType-NotNull |94.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest |94.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::EmergencyDuringRollingRestart [GOOD] Test command err: 2025-05-07T09:16:11.945654Z node 10 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvGetConfigRequest { }, response# NKikimr::NCms::TEvCms::TEvGetConfigResponse { Status { Code: OK } Config { DefaultRetryTime: 300000000 DefaultPermissionDuration: 300000000 TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } InfoCollectionTimeout: 15000000 LogConfig { DefaultLevel: ENABLED TTL: 1209600000000 } SentinelConfig { Enable: true UpdateConfigInterval: 3600000000 RetryUpdateConfig: 60000000 UpdateStateInterval: 60000000 UpdateStateTimeout: 45000000 RetryChangeStatus: 10000000 ChangeStatusRetries: 5 DefaultStateLimit: 60 DataCenterRatio: 50 RoomRatio: 70 RackRatio: 90 DryRun: false EvictVDisksStatus: FAULTY GoodStateLimit: 5 FaultyPDisksThresholdPerNode: 0 } } } 2025-05-07T09:16:11.946228Z node 10 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-05-07T09:16:11.985482Z node 10 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-05-07T09:16:11.985691Z node 10 :CMS DEBUG: cluster_info.cpp:966: Timestamp: 1970-01-01T00:02:00Z 2025-05-07T09:16:11.987905Z node 10 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvClusterStateRequest { }, response# NKikimr::NCms::TEvCms::TEvClusterStateResponse { Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027512 } Devices { Name: "vdisk-0-1-0-7-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-1-1-0-7-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-2-1-0-7-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-3-1-0-7-0" State: UP Timestamp: 120027512 } Devices { Name: "pdisk-17-17" State: UP Timestamp: 120027512 } Timestamp: 120027512 NodeId: 17 InterconnectPort: 12008 Location { DataCenter: "1" Module: "8" Rack: "8" Unit: "8" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027512 } Devices { Name: "vdisk-0-1-0-0-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-1-1-0-0-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-2-1-0-0-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-3-1-0-0-0" State: UP Timestamp: 120027512 } Devices { Name: "pdisk-10-10" State: UP Timestamp: 120027512 } Timestamp: 120027512 NodeId: 10 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027512 } Devices { Name: "vdisk-0-1-0-1-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-1-1-0-1-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-2-1-0-1-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-3-1-0-1-0" State: UP Timestamp: 120027512 } Devices { Name: "pdisk-11-11" State: UP Timestamp: 120027512 } Timestamp: 120027512 NodeId: 11 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027512 } Devices { Name: "vdisk-0-1-0-2-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-1-1-0-2-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-2-1-0-2-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-3-1-0-2-0" State: UP Timestamp: 120027512 } Devices { Name: "pdisk-12-12" State: UP Timestamp: 120027512 } Timestamp: 120027512 NodeId: 12 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027512 } Devices { Name: "vdisk-0-1-0-3-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-1-1-0-3-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-2-1-0-3-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-3-1-0-3-0" State: UP Timestamp: 120027512 } Devices { Name: "pdisk-13-13" State: UP Timestamp: 120027512 } Timestamp: 120027512 NodeId: 13 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027512 } Devices { Name: "vdisk-0-1-0-4-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-1-1-0-4-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-2-1-0-4-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-3-1-0-4-0" State: UP Timestamp: 120027512 } Devices { Name: "pdisk-14-14" State: UP Timestamp: 120027512 } Timestamp: 120027512 NodeId: 14 InterconnectPort: 12005 Location { DataCenter: "1" Module: "5" Rack: "5" Unit: "5" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027512 } Devices { Name: "vdisk-0-1-0-5-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-1-1-0-5-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-2-1-0-5-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-3-1-0-5-0" State: UP Timestamp: 120027512 } Devices { Name: "pdisk-15-15" State: UP Timestamp: 120027512 } Timestamp: 120027512 NodeId: 15 InterconnectPort: 12006 Location { DataCenter: "1" Module: "6" Rack: "6" Unit: "6" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027512 } Devices { Name: "vdisk-0-1-0-6-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-1-1-0-6-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-2-1-0-6-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-3-1-0-6-0" State: UP Timestamp: 120027512 } Devices { Name: "pdisk-16-16" State: UP Timestamp: 120027512 } Timestamp: 120027512 NodeId: 16 InterconnectPort: 12007 Location { DataCenter: "1" Module: "7" Rack: "7" Unit: "7" } StartTimeSeconds: 0 } Timestamp: 120027512 } } 2025-05-07T09:16:11.988846Z node 10 :CMS DEBUG: sentinel.cpp:486: [Sentinel] [ConfigUpdater] Handle TEvCms::TEvClusterStateResponse: response# Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027512 } Devices { Name: "vdisk-0-1-0-7-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-1-1-0-7-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-2-1-0-7-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-3-1-0-7-0" State: UP Timestamp: 120027512 } Devices { Name: "pdisk-17-17" State: UP Timestamp: 120027512 } Timestamp: 120027512 NodeId: 17 InterconnectPort: 12008 Location { DataCenter: "1" Module: "8" Rack: "8" Unit: "8" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027512 } Devices { Name: "vdisk-0-1-0-0-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-1-1-0-0-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-2-1-0-0-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-3-1-0-0-0" State: UP Timestamp: 120027512 } Devices { Name: "pdisk-10-10" State: UP Timestamp: 120027512 } Timestamp: 120027512 NodeId: 10 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027512 } Devices { Name: "vdisk-0-1-0-1-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-1-1-0-1-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-2-1-0-1-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-3-1-0-1-0" State: UP Timestamp: 120027512 } Devices { Name: "pdisk-11-11" State: UP Timestamp: 120027512 } Timestamp: 120027512 NodeId: 11 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027512 } Devices { Name: "vdisk-0-1-0-2-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-1-1-0-2-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-2-1-0-2-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-3-1-0-2-0" State: UP Timestamp: 120027512 } Devices { Name: "pdisk-12-12" State: UP Timestamp: 120027512 } Timestamp: 120027512 NodeId: 12 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027512 } Devices { Name: "vdisk-0-1-0-3-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-1-1-0-3-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-2-1-0-3-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-3-1-0-3-0" State: UP Timestamp: 120027512 } Devices { Name: "pdisk-13-13" State: UP Timestamp: 120027512 } Timestamp: 120027512 NodeId: 13 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027512 } Devices { Name: "vdisk-0-1-0-4-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-1-1-0-4-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-2-1-0-4-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-3-1-0-4-0" State: UP Timestamp: 120027512 } Devices { Name: "pdisk-14-14" State: UP Timestamp: 120027512 } Timestamp: 120027512 NodeId: 14 InterconnectPort: 12005 Location { DataCenter: "1" Module: "5" Rack: "5" Unit: "5" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027512 } Devices { Name: "vdisk-0-1-0-5-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-1-1-0-5-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-2-1-0-5-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-3-1-0-5-0" State: UP Timestamp: 120027512 } Devices { Name: "pdisk-15-15" State: UP Timestamp: 120027512 } Timestamp: 120027512 NodeId: 15 InterconnectPort: 12006 Location { DataCenter: "1" Module: "6" Rack: "6" Unit: "6" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027512 } Devices { Name: "vdisk-0-1-0-6-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-1-1-0-6-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-2-1-0-6-0" State: UP Timestamp: 120027512 } Devices { Name: "vdisk-3-1-0-6-0" State: UP Timestamp: 120027512 } Devices { Name: "pdisk-16-16" State: UP Timestamp: 120027512 } Timestamp: 120027512 NodeId: 16 InterconnectPort: 12007 Location { DataCenter: "1" Module: "7" Rack: "7" Unit: "7" } StartTimeSeconds: 0 } Timestamp: 120027512 } 2025-05-07T09:16:11.989142Z node 10 :CMS DEBUG: sentinel.cpp:944: [Sentinel] [Main] Config was updated in 120.003512s 2025-05-07T09:16:11.989206Z node 10 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start StateUpdater 2025-05-07T09:16:11.989327Z node 10 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 10, wbId# [10:8388350642965737326:1634689637] 2025-05-07T09:16:11.989417Z node 10 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 11, wbId# [11:8388350642965737326:1634689637] 2025-05-07T09:16:11.989475Z node 10 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 12, wbId# [12:8388350642965737326:1634689637] 2025-05-07T09:16:11.989511Z node 10 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 13, wbId# [13:8388350642965737326:1634689637] 2025-05-07T09:16:11.989556Z node 10 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 14, wbId# [14:8388350642965737326:1634689637] 2025-05-07T09:16:11.989590Z node 10 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 15, wbId# ... de 10 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 17, wbId# [17:8388350642965737326:1634689637] 2025-05-07T09:16:12.323602Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 10, response# PDiskStateInfo { PDiskId: 10 CreateTime: 0 ChangeTime: 0 Path: "/10/pdisk-10.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 240027 2025-05-07T09:16:12.324450Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 11, response# PDiskStateInfo { PDiskId: 11 CreateTime: 0 ChangeTime: 0 Path: "/11/pdisk-11.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 240027 2025-05-07T09:16:12.324560Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 12, response# PDiskStateInfo { PDiskId: 12 CreateTime: 0 ChangeTime: 0 Path: "/12/pdisk-12.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 240027 2025-05-07T09:16:12.324632Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 13, response# PDiskStateInfo { PDiskId: 13 CreateTime: 0 ChangeTime: 0 Path: "/13/pdisk-13.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 240027 2025-05-07T09:16:12.324701Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 14, response# PDiskStateInfo { PDiskId: 14 CreateTime: 0 ChangeTime: 0 Path: "/14/pdisk-14.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 240027 2025-05-07T09:16:12.324766Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 15, response# PDiskStateInfo { PDiskId: 15 CreateTime: 0 ChangeTime: 0 Path: "/15/pdisk-15.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 240027 2025-05-07T09:16:12.324827Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 16, response# PDiskStateInfo { PDiskId: 16 CreateTime: 0 ChangeTime: 0 Path: "/16/pdisk-16.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 240027 2025-05-07T09:16:12.324888Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 17, response# PDiskStateInfo { PDiskId: 17 CreateTime: 0 ChangeTime: 0 Path: "/17/pdisk-17.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 240027 2025-05-07T09:16:12.324944Z node 10 :CMS DEBUG: sentinel.cpp:960: [Sentinel] [Main] State was updated in 0.000000s 2025-05-07T09:16:12.325244Z node 10 :CMS NOTICE: sentinel.cpp:1039: [Sentinel] [Main] PDisk status changed: pdiskId# 10:10, status# FAULTY, required status# ACTIVE, reason# PrevState# Normal State# Normal StateCounter# 3 StateLimit# 1, dry run# 0 2025-05-07T09:16:12.325324Z node 10 :CMS DEBUG: sentinel.cpp:1076: [Sentinel] [Main] Change pdisk status: requestsSize# 1 2025-05-07T09:16:12.325578Z node 10 :CMS DEBUG: cms_tx_log_and_send.cpp:19: TTxLogAndSend Execute 2025-05-07T09:16:12.325871Z node 10 :CMS DEBUG: sentinel.cpp:1202: [Sentinel] [Main] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true } Success: true, cookie# 2 2025-05-07T09:16:12.325933Z node 10 :CMS NOTICE: sentinel.cpp:1226: [Sentinel] [Main] PDisk status has been changed: pdiskId# 10:10 2025-05-07T09:16:12.340449Z node 10 :CMS DEBUG: cms_tx_log_and_send.cpp:27: TTxLogAndSend Complete 2025-05-07T09:16:12.361323Z node 10 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-05-07T09:16:12.361444Z node 10 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-05-07T09:16:12.361519Z node 10 :CMS DEBUG: cluster_info.cpp:966: Timestamp: 1970-01-01T00:04:00Z 2025-05-07T09:16:12.362661Z node 10 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "10" Services: "storage" Duration: 600000000 Issue { Type: GENERIC Message: "VDisks eviction from host 10 has not yet been completed" } } PartialPermissionAllowed: false Schedule: false Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: true 2025-05-07T09:16:12.362783Z node 10 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "10" Services: "storage" Duration: 600000000 Issue { Type: GENERIC Message: "VDisks eviction from host 10 has not yet been completed" } 2025-05-07T09:16:12.362838Z node 10 :CMS DEBUG: cms.cpp:398: Result: ERROR (reason: Evict vdisks is disabled in Sentinel (self heal)) 2025-05-07T09:16:12.363004Z node 10 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-05-07T09:16:12.365937Z node 10 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store request: id# user-r-1, owner# user, order# 1, priority# 0, body# User: "user" PartialPermissionAllowed: false Schedule: false Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: true 2025-05-07T09:16:12.378997Z node 10 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-05-07T09:16:12.379316Z node 10 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvCheckRequest { User: "user" RequestId: "user-r-1" DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ERROR Reason: "Evict vdisks is disabled in Sentinel (self heal)" } RequestId: "user-r-1" } 2025-05-07T09:16:12.380155Z node 10 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-05-07T09:16:12.394837Z node 10 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-05-07T09:16:12.395161Z node 10 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: DefaultRetryTime: 300000000 DefaultPermissionDuration: 300000000 TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } InfoCollectionTimeout: 15000000 LogConfig { DefaultLevel: ENABLED TTL: 1209600000000 } SentinelConfig { Enable: true UpdateConfigInterval: 3600000000 RetryUpdateConfig: 60000000 UpdateStateInterval: 60000000 UpdateStateTimeout: 45000000 RetryChangeStatus: 10000000 ChangeStatusRetries: 5 DefaultStateLimit: 1 DataCenterRatio: 50 RoomRatio: 70 RackRatio: 90 DryRun: false EvictVDisksStatus: FAULTY GoodStateLimit: 5 FaultyPDisksThresholdPerNode: 0 } 2025-05-07T09:16:12.466446Z node 10 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-05-07T09:16:12.466535Z node 10 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start StateUpdater 2025-05-07T09:16:12.466852Z node 10 :CMS DEBUG: cms.cpp:1146: Running CleanupWalleTasks 2025-05-07T09:16:12.467136Z node 10 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 10, wbId# [10:8388350642965737326:1634689637] 2025-05-07T09:16:12.467195Z node 10 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 11, wbId# [11:8388350642965737326:1634689637] 2025-05-07T09:16:12.467226Z node 10 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 12, wbId# [12:8388350642965737326:1634689637] 2025-05-07T09:16:12.467257Z node 10 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 13, wbId# [13:8388350642965737326:1634689637] 2025-05-07T09:16:12.467289Z node 10 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 14, wbId# [14:8388350642965737326:1634689637] 2025-05-07T09:16:12.467322Z node 10 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 15, wbId# [15:8388350642965737326:1634689637] 2025-05-07T09:16:12.467355Z node 10 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 16, wbId# [16:8388350642965737326:1634689637] 2025-05-07T09:16:12.467416Z node 10 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 17, wbId# [17:8388350642965737326:1634689637] 2025-05-07T09:16:12.467807Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 10, response# PDiskStateInfo { PDiskId: 10 CreateTime: 0 ChangeTime: 0 Path: "/10/pdisk-10.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 300027 2025-05-07T09:16:12.468332Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 15, response# PDiskStateInfo { PDiskId: 15 CreateTime: 0 ChangeTime: 0 Path: "/15/pdisk-15.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 300027 2025-05-07T09:16:12.468485Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 16, response# PDiskStateInfo { PDiskId: 16 CreateTime: 0 ChangeTime: 0 Path: "/16/pdisk-16.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 300027 2025-05-07T09:16:12.468557Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 17, response# PDiskStateInfo { PDiskId: 17 CreateTime: 0 ChangeTime: 0 Path: "/17/pdisk-17.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 300027 2025-05-07T09:16:12.468702Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 12, response# PDiskStateInfo { PDiskId: 12 CreateTime: 0 ChangeTime: 0 Path: "/12/pdisk-12.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 300027 2025-05-07T09:16:12.468781Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 13, response# PDiskStateInfo { PDiskId: 13 CreateTime: 0 ChangeTime: 0 Path: "/13/pdisk-13.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 300027 2025-05-07T09:16:12.468855Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 14, response# PDiskStateInfo { PDiskId: 14 CreateTime: 0 ChangeTime: 0 Path: "/14/pdisk-14.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 300027 2025-05-07T09:16:12.468939Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 11, response# PDiskStateInfo { PDiskId: 11 CreateTime: 0 ChangeTime: 0 Path: "/11/pdisk-11.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 300027 2025-05-07T09:16:12.468992Z node 10 :CMS DEBUG: sentinel.cpp:960: [Sentinel] [Main] State was updated in 0.000000s 2025-05-07T09:16:12.469268Z node 10 :CMS NOTICE: sentinel.cpp:1039: [Sentinel] [Main] PDisk status changed: pdiskId# 10:10, status# ACTIVE, required status# FAULTY, reason# Forced status, dry run# 0 2025-05-07T09:16:12.469344Z node 10 :CMS DEBUG: sentinel.cpp:1076: [Sentinel] [Main] Change pdisk status: requestsSize# 1 2025-05-07T09:16:12.469558Z node 10 :CMS DEBUG: cms_tx_log_and_send.cpp:19: TTxLogAndSend Execute 2025-05-07T09:16:12.469835Z node 10 :CMS DEBUG: sentinel.cpp:1202: [Sentinel] [Main] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true } Success: true, cookie# 3 2025-05-07T09:16:12.469909Z node 10 :CMS NOTICE: sentinel.cpp:1226: [Sentinel] [Main] PDisk status has been changed: pdiskId# 10:10 >> KqpIndexes::NullInIndexTableNoDataRead [GOOD] >> KqpIndexes::NullInIndexTable >> TFlatTest::Mix_DML_DDL [GOOD] >> TFlatTest::OutOfDiskSpace [GOOD] >> KqpIndexes::UpdateIndexSubsetPk [GOOD] >> KqpIndexes::UpdateOnReadColumns >> TCmsTest::VDisksEvictionShouldFailOnUnsupportedAction [GOOD] >> TCmsTest::VDisksEvictionShouldFailOnMultipleActions >> test_tablet.py::TestMassiveKills::test_tablets_are_ok_after_many_kills [GOOD] >> TCmsTest::StateRequestNode >> TCmsTest::TestForceRestartModeDisconnects [GOOD] >> TCmsTest::StateStorageTwoRings |94.4%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::OutOfDiskSpace [GOOD] Test command err: 2025-05-07T09:16:13.814357Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501630138947299549:2201];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:13.814743Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004a18/r3tmp/tmpeRFlwr/pdisk_1.dat 2025-05-07T09:16:14.464985Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:14.470433Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:14.470532Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:14.473561Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:22862 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-05-07T09:16:14.822403Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:14.852901Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:15.039854Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... proxy error code: Unknown error:
: Error: Resolve failed for table: /dc-1/Table, error: column 'value' not exist, code: 200400 2025-05-07T09:16:15.077734Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:15.101639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:15.133721Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 waiting... proxy error code: Unknown error:
:5:24: Error: At function: AsList
:5:32: Error: At function: SetResult
:4:27: Error: At function: SelectRow
:4:27: Error: Mismatch of key columns count for table [/dc-1/Table], expected: 2, but got 1., code: 2028 >> AsyncIndexChangeCollector::UpsertToSameKey >> CdcStreamChangeCollector::InsertSingleRow >> CdcStreamChangeCollector::UpsertToSameKey >> TCmsTest::VDisksEvictionShouldFailOnMultipleActions [GOOD] >> SystemView::Nodes >> DbCounters::TabletsSimple >> SystemView::ShowCreateTableDefaultLiteral >> KqpIndexes::InnerJoinSecondaryIndexLookupAndRightTablePredicateNonIndexColumn [GOOD] >> KqpIndexes::IndexTopSortPushDown >> TCmsTest::StateStorageTwoRings [GOOD] >> TCmsTest::SysTabletsNode |94.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::VDisksEvictionShouldFailOnMultipleActions [GOOD] >> KqpIndexes::DoUpsertWithoutIndexUpdate+UniqIndex-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/ttl_tiering/py3test >> ttl_delete_s3.py::TestDeleteS3Ttl::test_delete_s3_tiering 2025-05-07 09:16:02,375 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-05-07 09:16:02,972 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 413831 812M 806M 728M ydb-tests-olap-ttl_tiering --basetemp /home/runner/.ya/build/build_root/zvgn/00458b/tmp --capture no -c pkg:library.python.pytest:pytest.yatest.ini -p no:factor --doctest-modu 415669 2.9G 2.9G 2.3G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/.ya/build/build_root/zvgn/00458b/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk3 416897 497M 484M 464M └─ moto_server s3 --port 11518 Test command err: File "library/python/pytest/main.py", line 101, in main rc = pytest.main( File "contrib/python/pytest/py3/_pytest/config/__init__.py", line 175, in main ret: Union[ExitCode, int] = config.hook.pytest_cmdline_main( File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 513, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 103, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/main.py", line 320, in pytest_cmdline_main return wrap_session(config, _main) File "contrib/python/pytest/py3/_pytest/main.py", line 273, in wrap_session session.exitstatus = doit(config, session) or 0 File "contrib/python/pytest/py3/_pytest/main.py", line 327, in _main config.hook.pytest_runtestloop(session=session) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 513, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 103, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/main.py", line 352, in pytest_runtestloop item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 513, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 103, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/runner.py", line 115, in pytest_runtest_protocol runtestprotocol(item, nextitem=nextitem) File "contrib/python/pytest/py3/_pytest/runner.py", line 134, in runtestprotocol reports.append(call_and_report(item, "call", log)) File "contrib/python/pytest/py3/_pytest/runner.py", line 223, in call_and_report call = call_runtest_hook(item, when, **kwds) File "contrib/python/pytest/py3/_pytest/runner.py", line 262, in call_runtest_hook return CallInfo.from_call( File "contrib/python/pytest/py3/_pytest/runner.py", line 342, in from_call result: Optional[TResult] = func() File "contrib/python/pytest/py3/_pytest/runner.py", line 263, in lambda: ihook(item=item, **kwds), when=when, reraise=reraise File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 513, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 103, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/runner.py", line 170, in pytest_runtest_call item.runtest() File "contrib/python/pytest/py3/_pytest/python.py", line 1844, in runtest self.ihook.pytest_pyfunc_call(pyfuncitem=self) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 513, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 103, in _multicall res = hook_impl.function(*args) File "library/python/pytest/plugins/ya.py", line 563, in pytest_pyfunc_call pyfuncitem.retval = testfunction(**testargs) File "ydb/tests/olap/ttl_tiering/ttl_delete_s3.py", line 252, in test_delete_s3_tiering self.ydb_client.query(""" File "ydb/tests/olap/common/ydb_client.py", line 24, in query return self.session_pool.execute_with_retries(statement) File "contrib/python/ydb/py3/ydb/query/pool.py", line 204, in execute_with_retries return retry_operation_sync(wrapped_callee, retry_settings) File "contrib/python/ydb/py3/ydb/retries.py", line 133, in retry_operation_sync for next_opt in opt_generator: File "contrib/python/ydb/py3/ydb/retries.py", line 94, in retry_operation_impl result = YdbRetryOperationFinalResult(callee(*args, **kwargs)) File "contrib/python/ydb/py3/ydb/query/pool.py", line 202, in wrapped_callee return [result_set for result_set in it] File "contrib/python/ydb/py3/ydb/_utilities.py", line 173, in __next__ return self._next() File "contrib/python/ydb/py3/ydb/_utilities.py", line 164, in _next res = self.wrapper(next(self.it)) File "contrib/python/grpcio/py3/grpc/_channel.py", line 475, in __next__ return self._next() File "contrib/python/grpcio/py3/grpc/_channel.py", line 872, in _next _common.wait(self._state.condition.wait, _response_ready) File "contrib/python/grpcio/py3/grpc/_common.py", line 150, in wait _wait_once(wait_fn, MAXIMUM_WAIT_TIMEOUT, spin_cb) File "contrib/python/grpcio/py3/grpc/_common.py", line 112, in _wait_once wait_fn(timeout=timeout) File "contrib/tools/python3/Lib/threading.py", line 359, in wait gotit = waiter.acquire(True, timeout) File "library/python/pytest/plugins/ya.py", line 344, in _graceful_shutdown traceback.print_stack(file=sys.stderr) Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 764, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: ...d_root/zvgn/00458b/tmp', '--capture', 'no', '-c', 'pkg:library.python.pytest:pytest.yatest.ini', '-p', 'no:factor', '--doctest-modules', '--ya-trace', '/home/runner/.ya/build/build_root/zvgn/00458b/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk3/ytest.report.trace', '--build-root', '/home/runner/.ya/build/build_root/zvgn/00458b', '--source-root', '/home/runner/.ya/build/build_root/zvgn/00458b/environment/arcadia', '--output-dir', '/home/runner/.ya/build/build_root/zvgn/00458b/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk3/testing_out_stuff', '--durations', '0', '--project-path', 'ydb/tests/olap/ttl_tiering', '--test-tool-bin', '/home/runner/.ya/tools/v4/8580453620/test_tool', '--ya-version', '2', '--collect-cores', '--sanitizer-extra-checks', '--build-type', 'release', '--tb', 'short', '--modulo', '10', '--modulo-index', '3', '--partition-mode', 'SEQUENTIAL', '--dep-root', 'ydb/tests/olap/ttl_tiering', '--flags', 'APPLE_SDK_LOCAL=yes', '--flags', 'CFLAGS=-fno-omit-frame-pointer -Wno-unknown-argument', '--flags', 'DEBUGINFO_LINES_ONLY=yes', '--flags', 'DISABLE_FLAKE8_MIGRATIONS=yes', '--flags', 'OPENSOURCE=yes', '--flags', 'SANITIZER_TYPE=address', '--flags', 'TESTS_REQUESTED=yes', '--flags', 'USE_AIO=static', '--flags', 'USE_CLANG_CL=yes', '--flags', 'USE_EAT_MY_DATA=yes', '--flags', 'USE_ICONV=static', '--flags', 'USE_IDN=static', '--flags', 'USE_PREBUILT_TOOLS=no', '--sanitize', 'address']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("...d_root/zvgn/00458b/tmp', '--capture', 'no', '-c', 'pkg:library.python.pytest:pytest.yatest.ini', '-p', 'no:factor', '--doctest-modules', '--ya-trace', '/home/runner/.ya/build/build_root/zvgn/00458b/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk3/ytest.report.trace', '--build-root', '/home/runner/.ya/build/build_root/zvgn/00458b', '--source-root', '/home/runner/.ya/build/build_root/zvgn/00458b/environment/arcadia', '--output-dir', '/home/runner/.ya/build/build_root/zvgn/00458b/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk3/testing_out_stuff', '--durations', '0', '--project-path', 'ydb/tests/olap/ttl_tiering', '--test-tool-bin', '/home/runner/.ya/tools/v4/8580453620/test_tool', '--ya-version', '2', '--collect-cores', '--sanitizer-extra-checks', '--build-type', 'release', '--tb', 'short', '--modulo', '10', '--modulo-index', '3', '--partition-mode', 'SEQUENTIAL', '--dep-root', 'ydb/tests/olap/ttl_tiering', '--flags', 'APPLE_SDK_LOCAL=yes', '--flags', 'CFLAGS=-fno-omit-frame-pointer -Wno-unknown-argument', '--flags', 'DEBUGINFO_LINES_ONLY=yes', '--flags', 'DISABLE_FLAKE8_MIGRATIONS=yes', '--flags', 'OPENSOURCE=yes', '--flags', 'SANITIZER_TYPE=address', '--flags', 'TESTS_REQUESTED=yes', '--flags', 'USE_AIO=static', '--flags', 'USE_CLANG_CL=yes', '--flags', 'USE_EAT_MY_DATA=yes', '--flags', 'USE_ICONV=static', '--flags', 'USE_IDN=static', '--flags', 'USE_PREBUILT_TOOLS=no', '--sanitize', 'address']' stopped by 600 seconds timeout",), {}) >> TLocksTest::GoodLock [GOOD] >> TLocksTest::GoodNullLock >> ShowCreateView::WithTablePathPrefix >> TCmsTest::StateRequestNode [GOOD] >> TCmsTest::StateRequestUnknownNode >> KqpMultishardIndex::DataColumnUpsertMixedSemantic [GOOD] >> KqpMultishardIndex::DataColumnWrite+UseSink >> KqpUniqueIndex::InsertNullInComplexFkDuplicate [GOOD] >> KqpIndexes::DeleteByIndex [GOOD] >> SystemView::AuthGroups_Access >> KqpUniqueIndex::ReplaceFkAlreadyExist [GOOD] >> KqpUniqueIndex::ReplaceFkDuplicate >> TCmsTest::SysTabletsNode [GOOD] >> KqpUniqueIndex::UpdateOnHidenChanges-DataColumn >> KqpIndexes::CheckUpsertNonEquatableType-NotNull [GOOD] >> KqpIndexes::CreateTableWithExplicitAsyncIndexSQL ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpUniqueIndex::InsertNullInComplexFkDuplicate [GOOD] Test command err: Trying to start YDB, gRPC: 15958, MsgBus: 10645 2025-05-07T09:16:04.872406Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501630097741716303:2147];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:04.888102Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003bda/r3tmp/tmpm2cm5N/pdisk_1.dat 2025-05-07T09:16:05.462853Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:05.469034Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:05.469168Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:05.475996Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15958, node 1 2025-05-07T09:16:05.684276Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:05.684293Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:05.684301Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:05.684401Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10645 TClient is connected to server localhost:10645 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:16:06.505396Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:06.523925Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T09:16:06.533041Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:06.765310Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:06.986122Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:07.076417Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:09.084514Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630119216554358:2408], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:09.084661Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:09.437142Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:16:09.479904Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:16:09.566167Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:16:09.616405Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:16:09.664775Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:16:09.712406Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:16:09.792491Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:16:09.872039Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501630097741716303:2147];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:09.872133Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:16:09.880293Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630119216555020:2471], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:09.880354Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:09.880820Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630119216555025:2474], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:09.884865Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:16:09.896948Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501630119216555027:2475], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:16:09.975379Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501630119216555080:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:11.357171Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 waiting... Trying to start YDB, gRPC: 26556, MsgBus: 11154 2025-05-07T09:16:15.035336Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501630146748431972:2059];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:15.035375Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003bda/r3tmp/tmpHiQo40/pdisk_1.dat 2025-05-07T09:16:15.175279Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:15.190557Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:15.190634Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:15.192377Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26556, node 2 2025-05-07T09:16:15.316461Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:15.316488Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:15.316497Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:15.316619Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11154 TClient is connected to server localhost:11154 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:16:15.803326Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:15.811549Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T09:16:15.824486Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:15.911273Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T09:16:16.130020Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-05-07T09:16:16.217535Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:18.802133Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501630159633335524:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:18.802249Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:18.862524Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-05-07T09:16:18.901454Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-05-07T09:16:18.944925Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-05-07T09:16:18.996746Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-05-07T09:16:19.036319Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-05-07T09:16:19.073963Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-05-07T09:16:19.114484Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-05-07T09:16:19.187458Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501630163928303474:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:19.187547Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:19.198433Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501630163928303479:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:19.202747Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-05-07T09:16:19.217160Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7501630163928303481:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-05-07T09:16:19.316855Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501630163928303532:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:20.038261Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7501630146748431972:2059];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:20.038325Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:16:20.369834Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 waiting... >> TCmsTest::StateRequestUnknownNode [GOOD] >> TCmsTest::StateRequestUnknownMultipleNodes |94.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::SysTabletsNode [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::DeleteByIndex [GOOD] Test command err: Trying to start YDB, gRPC: 4548, MsgBus: 27314 2025-05-07T09:15:56.887644Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501630067025819630:2131];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:15:56.899687Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003bef/r3tmp/tmpYKzraJ/pdisk_1.dat 2025-05-07T09:15:57.342743Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:15:57.347057Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:15:57.347167Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:15:57.351145Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4548, node 1 2025-05-07T09:15:57.538571Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:15:57.538598Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:15:57.538607Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:15:57.538740Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27314 TClient is connected to server localhost:27314 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:15:58.385198Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:15:58.409573Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T09:15:58.418870Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:15:58.617772Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:15:58.816921Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:15:58.918721Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:01.186879Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630088500657686:2407], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:01.186986Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:01.667601Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:16:01.712098Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:16:01.784362Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:16:01.835277Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:16:01.890151Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501630067025819630:2131];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:01.890228Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:16:01.917593Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:16:01.959482Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:16:02.012870Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:16:02.091964Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630092795625642:2471], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:02.092079Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:02.092362Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630092795625647:2474], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:02.096967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:16:02.110082Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501630092795625649:2475], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:16:02.167794Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501630092795625703:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:03.341020Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-05-07T09:16:03.471282Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501630097090593537:3745] txid# 281474976710673, issues: { message: "Check failed: path: \'/Root/TestTable\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 17], type: EPathTypeTable, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:04.556496Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-05-07T09:16:04.591725Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill Trying to start YDB, gRPC: 3418, MsgBus: 4303 2025-05-07T09:16:05.470924Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501630104369094563:2220];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003bef/r3tmp/tmpNgMCoO/pdisk_1.dat 2025-05-07T09:16:05.528241Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T09:16:05.609756Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:05.627941Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:05.628017Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:05.629656Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3418, node 2 2025-05-07T09:16:05.798614Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:05.798637Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:05.798645Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:05.798764Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4303 TClient is connected to server localhost:4303 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus ... deEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T09:16:19.513112Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 281474976715672:2, at schemeshard: 72057594046644480 2025-05-07T09:16:19.513132Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T09:16:19.513144Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 281474976715672:2 2025-05-07T09:16:19.513220Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [3:7501630162532352851:2523] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715672 at schemeshard: 72057594046644480 2025-05-07T09:16:19.513290Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 281474976715672:0, at schemeshard: 72057594046644480 2025-05-07T09:16:19.513302Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T09:16:19.513311Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 281474976715672:0 2025-05-07T09:16:19.513338Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [3:7501630162532352849:2522] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715672 at schemeshard: 72057594046644480 2025-05-07T09:16:19.513388Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435072, Sender [3:7501630136762546727:2149], Recipient [3:7501630136762546727:2149]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-05-07T09:16:19.513408Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4857: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-05-07T09:16:19.513439Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976715672:2, at schemeshard: 72057594046644480 2025-05-07T09:16:19.513468Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046644480] TDone opId# 281474976715672:2 ProgressState 2025-05-07T09:16:19.513537Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T09:16:19.513556Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715672:2 progress is 2/3 2025-05-07T09:16:19.513571Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 2/3 2025-05-07T09:16:19.513588Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715672:2 progress is 2/3 2025-05-07T09:16:19.513603Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 2/3 2025-05-07T09:16:19.513621Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976715672, ready parts: 2/3, is published: true 2025-05-07T09:16:19.513762Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435072, Sender [3:7501630136762546727:2149], Recipient [3:7501630136762546727:2149]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-05-07T09:16:19.513781Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4857: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-05-07T09:16:19.513807Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976715672:0, at schemeshard: 72057594046644480 2025-05-07T09:16:19.513820Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046644480] TDone opId# 281474976715672:0 ProgressState 2025-05-07T09:16:19.513865Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T09:16:19.513875Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715672:0 progress is 3/3 2025-05-07T09:16:19.513883Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 3/3 2025-05-07T09:16:19.513898Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715672:0 progress is 3/3 2025-05-07T09:16:19.513906Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 3/3 2025-05-07T09:16:19.513916Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976715672, ready parts: 3/3, is published: true 2025-05-07T09:16:19.514529Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:7501630162532352812:2519] message: TxId: 281474976715672 2025-05-07T09:16:19.514564Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 3/3 2025-05-07T09:16:19.514588Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715672:0 2025-05-07T09:16:19.514598Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976715672:0 2025-05-07T09:16:19.514705Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 17] was 4 2025-05-07T09:16:19.514720Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715672:1 2025-05-07T09:16:19.514727Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976715672:1 2025-05-07T09:16:19.514743Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 18] was 3 2025-05-07T09:16:19.514751Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715672:2 2025-05-07T09:16:19.514756Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976715672:2 2025-05-07T09:16:19.514781Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 19] was 3 2025-05-07T09:16:19.515040Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T09:16:19.515115Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877764, Sender [3:7501630162532352919:3663], Recipient [3:7501630136762546727:2149]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-05-07T09:16:19.515128Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4938: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-05-07T09:16:19.515137Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5761: Server pipe is reset, at schemeshard: 72057594046644480 2025-05-07T09:16:19.515159Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877764, Sender [3:7501630162532352924:3668], Recipient [3:7501630136762546727:2149]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-05-07T09:16:19.515165Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4938: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-05-07T09:16:19.515170Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5761: Server pipe is reset, at schemeshard: 72057594046644480 2025-05-07T09:16:19.515181Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T09:16:19.515238Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [3:7501630162532352812:2519] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715672 at schemeshard: 72057594046644480 2025-05-07T09:16:19.517000Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877764, Sender [3:7501630162532352830:3600], Recipient [3:7501630136762546727:2149]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-05-07T09:16:19.517017Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4938: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-05-07T09:16:19.517026Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5761: Server pipe is reset, at schemeshard: 72057594046644480 2025-05-07T09:16:19.539687Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7501630136762546727:2149]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:19.539729Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:19.539771Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [3:7501630136762546727:2149], Recipient [3:7501630136762546727:2149]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:19.539791Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:20.540447Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7501630136762546727:2149]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:20.540492Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:20.540534Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [3:7501630136762546727:2149], Recipient [3:7501630136762546727:2149]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:20.540552Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:21.542294Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7501630136762546727:2149]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:21.542328Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:21.542371Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [3:7501630136762546727:2149], Recipient [3:7501630136762546727:2149]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:21.542397Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime >> KqpIndexes::SecondaryIndexUsingInJoin+UseStreamJoin [GOOD] >> KqpIndexes::SecondaryIndexUsingInJoin-UseStreamJoin >> TSchemeshardBackgroundCompactionTest::ShouldNotCompactServerless [GOOD] >> TSchemeshardBackgroundCompactionTest::ShouldCompactServerless >> KqpIndexes::SecondaryIndexReplace-UseSink [GOOD] >> KqpMultishardIndex::DataColumnWriteNull [GOOD] >> KqpMultishardIndex::DuplicateUpsert >> TCmsTest::TestTwoOrMoreDisksFromGroupAtTheSameRequestBlock42 >> SystemView::VSlotsFields >> TCmsTest::StateRequestUnknownMultipleNodes [GOOD] >> TCmsTest::StateStorageAvailabilityMode >> KqpIndexes::NullInIndexTable [GOOD] >> KqpIndexes::MultipleSecondaryIndexWithSameComulns-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::SecondaryIndexReplace-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 18011, MsgBus: 4660 2025-05-07T09:15:59.125463Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501630076690524416:2062];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:15:59.125605Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003bdf/r3tmp/tmpDlursB/pdisk_1.dat 2025-05-07T09:15:59.847012Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:15:59.849195Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:15:59.849296Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:15:59.857519Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18011, node 1 2025-05-07T09:16:00.038783Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:00.038814Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:00.038830Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:00.039021Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4660 TClient is connected to server localhost:4660 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:16:00.819169Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:00.862311Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T09:16:00.876524Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:01.058733Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:01.293745Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:01.383414Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:03.727157Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630093870395249:2407], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:03.727333Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:04.109045Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:16:04.122958Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501630076690524416:2062];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:04.123043Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:16:04.161463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:16:04.218673Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:16:04.257757Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:16:04.297298Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:16:04.344171Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:16:04.386380Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:16:04.478146Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630098165363205:2471], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:04.478253Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:04.482371Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630098165363210:2474], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:04.491232Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:16:04.513026Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501630098165363212:2475], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:16:04.610345Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501630098165363265:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:05.843701Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:7501630102460330853:3605], Recipient [1:7501630076690524851:2198]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T09:16:05.843749Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T09:16:05.843761Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T09:16:05.843824Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122432, Sender [1:7501630102460330849:3602], Recipient [1:7501630076690524851:2198]: {TEvModifySchemeTransaction txid# 281474976710672 TabletId# 72057594046644480} 2025-05-07T09:16:05.843839Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4851: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-05-07T09:16:05.897920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateIndexedTable CreateIndexedTable { TableDescription { Name: "SharedHouseholds" Columns { Name: "guest_huid" Type: "Uint64" NotNull: false } Columns { Name: "guest_id" Type: "Uint64" NotNull: false } Columns { Name: "owner_huid" Type: "Uint64" NotNull: false } Columns { Name: "owner_id" Type: "Uint64" NotNull: false } Columns { Name: "household_id" Type: "String" NotNull: false } KeyColumnNames: "guest_huid" KeyColumnNames: "owner_huid" KeyColumnNames: "household_id" PartitionConfig { ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } Temporary: false } IndexDescription { Name: "shared_households_owner_huid" KeyColumnNames: "owner_huid" Type: EIndexTypeGlobal State: EIndexStateReady } } } TxId: 281474976710672 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-05-07T09:16:05.898648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_indexed_table.cpp:101: TCreateTableIndex construct operation table path: /Root/SharedHouseholds domain path id: [OwnerId: 72057594046644480, LocalPathId: 1] domain path: /Root shardsToCreate: 2 GetShardsInside: 34 MaxShards: 200000 2025-05-07T09:16:05.899143Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /Root/SharedHouseholds, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-05-07T09:16:05.899313Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:433: TCreateTable Propose, path: /Root/SharedHouseholds, ... hemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T09:16:22.064457Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 281474976715672:0, at schemeshard: 72057594046644480 2025-05-07T09:16:22.064470Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T09:16:22.064550Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 281474976715672:2, at schemeshard: 72057594046644480 2025-05-07T09:16:22.064559Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T09:16:22.064775Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 281474976715672:2, at schemeshard: 72057594046644480 2025-05-07T09:16:22.064786Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T09:16:22.064797Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 281474976715672:2 2025-05-07T09:16:22.064856Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [3:7501630176197844320:2509] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715672 at schemeshard: 72057594046644480 2025-05-07T09:16:22.064947Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435072, Sender [3:7501630150428038278:2153], Recipient [3:7501630150428038278:2153]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-05-07T09:16:22.064964Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4857: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-05-07T09:16:22.065010Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976715672:2, at schemeshard: 72057594046644480 2025-05-07T09:16:22.065035Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046644480] TDone opId# 281474976715672:2 ProgressState 2025-05-07T09:16:22.065112Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T09:16:22.065126Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715672:2 progress is 2/3 2025-05-07T09:16:22.065139Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 2/3 2025-05-07T09:16:22.065157Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715672:2 progress is 2/3 2025-05-07T09:16:22.065168Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 2/3 2025-05-07T09:16:22.065183Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976715672, ready parts: 2/3, is published: true 2025-05-07T09:16:22.065437Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 281474976715672:0, at schemeshard: 72057594046644480 2025-05-07T09:16:22.065446Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T09:16:22.065455Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 281474976715672:0 2025-05-07T09:16:22.065498Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [3:7501630176197844307:2508] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715672 at schemeshard: 72057594046644480 2025-05-07T09:16:22.065581Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435072, Sender [3:7501630150428038278:2153], Recipient [3:7501630150428038278:2153]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-05-07T09:16:22.065605Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4857: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-05-07T09:16:22.065657Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976715672:0, at schemeshard: 72057594046644480 2025-05-07T09:16:22.065674Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046644480] TDone opId# 281474976715672:0 ProgressState 2025-05-07T09:16:22.065737Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T09:16:22.065747Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715672:0 progress is 3/3 2025-05-07T09:16:22.065756Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 3/3 2025-05-07T09:16:22.065774Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715672:0 progress is 3/3 2025-05-07T09:16:22.065783Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 3/3 2025-05-07T09:16:22.065794Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976715672, ready parts: 3/3, is published: true 2025-05-07T09:16:22.065833Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:7501630171902876990:2505] message: TxId: 281474976715672 2025-05-07T09:16:22.065856Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 3/3 2025-05-07T09:16:22.065879Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715672:0 2025-05-07T09:16:22.065889Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976715672:0 2025-05-07T09:16:22.068045Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 17] was 4 2025-05-07T09:16:22.068089Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715672:1 2025-05-07T09:16:22.068098Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976715672:1 2025-05-07T09:16:22.068120Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 18] was 3 2025-05-07T09:16:22.068129Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715672:2 2025-05-07T09:16:22.068137Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976715672:2 2025-05-07T09:16:22.068179Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 19] was 3 2025-05-07T09:16:22.068550Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T09:16:22.068656Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877764, Sender [3:7501630176197844391:3662], Recipient [3:7501630150428038278:2153]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-05-07T09:16:22.068675Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4938: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-05-07T09:16:22.068688Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5761: Server pipe is reset, at schemeshard: 72057594046644480 2025-05-07T09:16:22.068720Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877764, Sender [3:7501630176197844390:3661], Recipient [3:7501630150428038278:2153]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-05-07T09:16:22.068731Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4938: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-05-07T09:16:22.068739Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5761: Server pipe is reset, at schemeshard: 72057594046644480 2025-05-07T09:16:22.068754Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T09:16:22.068817Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [3:7501630171902876990:2505] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715672 at schemeshard: 72057594046644480 2025-05-07T09:16:22.069570Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877764, Sender [3:7501630176197844301:3598], Recipient [3:7501630150428038278:2153]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-05-07T09:16:22.069597Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4938: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-05-07T09:16:22.069608Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5761: Server pipe is reset, at schemeshard: 72057594046644480 2025-05-07T09:16:22.323044Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7501630150428038278:2153]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:22.323078Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:22.323112Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [3:7501630150428038278:2153], Recipient [3:7501630150428038278:2153]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:22.323124Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:23.326599Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7501630150428038278:2153]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:23.326637Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:23.326697Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [3:7501630150428038278:2153], Recipient [3:7501630150428038278:2153]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:23.326713Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime >> CdcStreamChangeCollector::InsertSingleRow [GOOD] >> CdcStreamChangeCollector::InsertSingleUuidRow >> CdcStreamChangeCollector::UpsertToSameKey [GOOD] >> CdcStreamChangeCollector::UpsertToSameKeyWithImages >> AsyncIndexChangeCollector::UpsertToSameKey [GOOD] >> AsyncIndexChangeCollector::UpsertWithoutIndexedValue >> SystemView::ShowCreateTablePartitionSettings >> TCmsTest::TestTwoOrMoreDisksFromGroupAtTheSameRequestBlock42 [GOOD] >> TCmsTest::TestTwoOrMoreDisksFromGroupAtTheSameRequestMirror3dc >> TSchemeshardBackgroundCompactionTest::SchemeshardShouldRequestCompactionsSchemeshardRestart [GOOD] >> TSchemeshardBackgroundCompactionTest::SchemeshardShouldRequestCompactionsConfigRequest >> KqpIndexes::MultipleSecondaryIndex+UseSink >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] [GOOD] >> TCmsTest::StateStorageAvailabilityMode [GOOD] >> KqpVectorIndexes::OrderByCosineDistanceNotNullableLevel1 [GOOD] >> KqpVectorIndexes::OrderByCosineDistanceNotNullableLevel2 >> KqpPrefixedVectorIndexes::OrderByCosineDistanceNotNullableLevel1 [GOOD] |94.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::StateStorageAvailabilityMode [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Date-pk_types18-all_types18-index18-Date--] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] [GOOD] >> TCmsTest::TestTwoOrMoreDisksFromGroupAtTheSameRequestMirror3dc [GOOD] >> TCmsTest::VDisksEviction ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpPrefixedVectorIndexes::OrderByCosineDistanceNotNullableLevel1 [GOOD] Test command err: Trying to start YDB, gRPC: 17360, MsgBus: 3629 2025-05-07T09:15:48.303096Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501630030627216318:2065];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:15:48.303160Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003bf5/r3tmp/tmpF7dtPU/pdisk_1.dat 2025-05-07T09:15:48.911437Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:15:48.911550Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:15:48.916059Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:15:48.979261Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17360, node 1 2025-05-07T09:15:49.076745Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:15:49.076769Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:15:49.076777Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:15:49.076927Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3629 TClient is connected to server localhost:3629 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:15:49.828773Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:15:49.846712Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T09:15:49.869677Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:15:50.086234Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:15:50.336867Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:15:50.439857Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:15:52.331735Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630047807087149:2407], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:15:52.331884Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:15:52.722159Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:15:52.796117Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:15:52.842853Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:15:52.906781Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:15:52.951892Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:15:53.032555Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:15:53.079656Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:15:53.163082Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630052102055109:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:15:53.163186Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:15:53.163488Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630052102055114:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:15:53.167721Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:15:53.180659Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501630052102055116:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:15:53.259727Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501630052102055167:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:15:53.306649Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501630030627216318:2065];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:15:53.306772Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:15:54.621711Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:15:55.641863Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710675:1, at schemeshard: 72057594046644480 waiting... Trying to start YDB, gRPC: 24127, MsgBus: 24318 2025-05-07T09:15:56.842745Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501630064502358649:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:15:56.842776Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003bf5/r3tmp/tmp845W20/pdisk_1.dat 2025-05-07T09:15:57.021342Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:15:57.041248Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:15:57.041325Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:15:57.042916Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24127, node 2 2025-05-07T09:15:57.120818Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:15:57.120844Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:15:57.120853Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:15:57.120971Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24318 TClient is connected to server localhost:24318 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" Path ... CE: schemeshard_impl.cpp:4877: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-05-07T09:16:28.408519Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:561: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037907 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 5] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0109 2025-05-07T09:16:28.408582Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:568: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037907 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 5] raw table stats: DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-05-07T09:16:28.506138Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [2:7501630064502359002:2150]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-05-07T09:16:28.506182Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5015: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-05-07T09:16:28.506198Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:588: Started TEvPersistStats at tablet 72057594046644480, queue size# 8 2025-05-07T09:16:28.506249Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:599: Will execute TTxStoreStats, queue# 8 2025-05-07T09:16:28.506263Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:608: Will delay TTxStoreTableStats on# 0.000000s, queue# 8 2025-05-07T09:16:28.506322Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:19 data size 0 row count 0 2025-05-07T09:16:28.506380Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037906 maps to shardIdx: 72057594046644480:19 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-05-07T09:16:28.506392Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037906, followerId 0 2025-05-07T09:16:28.506448Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:19 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-05-07T09:16:28.506486Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186224037906 2025-05-07T09:16:28.506518Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:21 data size 0 row count 0 2025-05-07T09:16:28.506543Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037908 maps to shardIdx: 72057594046644480:21 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-05-07T09:16:28.506551Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037908, followerId 0 2025-05-07T09:16:28.506576Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:21 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-05-07T09:16:28.506589Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186224037908 2025-05-07T09:16:28.506606Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:17 data size 0 row count 0 2025-05-07T09:16:28.506632Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037904 maps to shardIdx: 72057594046644480:17 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-05-07T09:16:28.506640Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037904, followerId 0 2025-05-07T09:16:28.506663Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:17 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-05-07T09:16:28.506672Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186224037904 2025-05-07T09:16:28.506688Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:23 data size 0 row count 0 2025-05-07T09:16:28.506716Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037910 maps to shardIdx: 72057594046644480:23 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-05-07T09:16:28.506723Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037910, followerId 0 2025-05-07T09:16:28.506746Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:23 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-05-07T09:16:28.506755Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186224037910 2025-05-07T09:16:28.506770Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:16 data size 0 row count 0 2025-05-07T09:16:28.506792Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037903 maps to shardIdx: 72057594046644480:16 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-05-07T09:16:28.506800Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037903, followerId 0 2025-05-07T09:16:28.506825Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:16 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-05-07T09:16:28.506835Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186224037903 2025-05-07T09:16:28.506851Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:14 data size 0 row count 0 2025-05-07T09:16:28.506875Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037901 maps to shardIdx: 72057594046644480:14 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-05-07T09:16:28.506883Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037901, followerId 0 2025-05-07T09:16:28.506906Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:14 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-05-07T09:16:28.506915Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186224037901 2025-05-07T09:16:28.506931Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:18 data size 0 row count 0 2025-05-07T09:16:28.506954Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037905 maps to shardIdx: 72057594046644480:18 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-05-07T09:16:28.506962Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037905, followerId 0 2025-05-07T09:16:28.506989Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:18 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-05-07T09:16:28.506999Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186224037905 2025-05-07T09:16:28.507016Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:20 data size 0 row count 0 2025-05-07T09:16:28.507038Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037907 maps to shardIdx: 72057594046644480:20 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-05-07T09:16:28.507045Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037907, followerId 0 2025-05-07T09:16:28.507065Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:20 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-05-07T09:16:28.507075Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186224037907 2025-05-07T09:16:28.507126Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T09:16:28.507247Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [2:7501630064502359002:2150]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-05-07T09:16:28.507261Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5015: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-05-07T09:16:28.507270Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:588: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 >> KqpIndexes::DoUpsertWithoutIndexUpdate+UniqIndex-UseSink [GOOD] >> KqpIndexes::DoUpsertWithoutIndexUpdate-UniqIndex+UseSink >> KqpIndexes::CreateTableWithExplicitAsyncIndexSQL [GOOD] >> AsyncIndexChangeCollector::UpsertWithoutIndexedValue [GOOD] >> CdcStreamChangeCollector::DeleteNothing |94.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test >> test_tablet.py::TestMassiveKills::test_tablets_are_ok_after_many_kills [GOOD] >> CdcStreamChangeCollector::InsertSingleUuidRow [GOOD] >> CdcStreamChangeCollector::IndexAndStreamUpsert >> CdcStreamChangeCollector::UpsertToSameKeyWithImages [GOOD] >> CdcStreamChangeCollector::UpsertModifyDelete >> KqpMultishardIndex::DuplicateUpsert [GOOD] >> KqpIndexes::IndexTopSortPushDown [GOOD] |94.4%| [TA] $(B)/ydb/tests/tools/nemesis/ut/test-results/py3test/{meta.json ... results_accumulator.log} |94.4%| [TA] {RESULT} $(B)/ydb/tests/tools/nemesis/ut/test-results/py3test/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/ttl_tiering/py3test >> ttl_delete_s3.py::TestDeleteTtl::test_ttl_delete 2025-05-07 09:16:14,778 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-05-07 09:16:15,396 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 416330 645M 640M 560M ydb-tests-olap-ttl_tiering --basetemp /home/runner/.ya/build/build_root/zvgn/004561/tmp --capture no -c pkg:library.python.pytest:pytest.yatest.ini -p no:factor --doctest-modu 417353 4.6G 4.6G 4.1G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/.ya/build/build_root/zvgn/004561/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk4 419052 391M 386M 359M └─ moto_server s3 --port 21585 Test command err: File "library/python/pytest/main.py", line 101, in main rc = pytest.main( File "contrib/python/pytest/py3/_pytest/config/__init__.py", line 175, in main ret: Union[ExitCode, int] = config.hook.pytest_cmdline_main( File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 513, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 103, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/main.py", line 320, in pytest_cmdline_main return wrap_session(config, _main) File "contrib/python/pytest/py3/_pytest/main.py", line 273, in wrap_session session.exitstatus = doit(config, session) or 0 File "contrib/python/pytest/py3/_pytest/main.py", line 327, in _main config.hook.pytest_runtestloop(session=session) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 513, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 103, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/main.py", line 352, in pytest_runtestloop item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 513, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 103, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/runner.py", line 115, in pytest_runtest_protocol runtestprotocol(item, nextitem=nextitem) File "contrib/python/pytest/py3/_pytest/runner.py", line 134, in runtestprotocol reports.append(call_and_report(item, "call", log)) File "contrib/python/pytest/py3/_pytest/runner.py", line 223, in call_and_report call = call_runtest_hook(item, when, **kwds) File "contrib/python/pytest/py3/_pytest/runner.py", line 262, in call_runtest_hook return CallInfo.from_call( File "contrib/python/pytest/py3/_pytest/runner.py", line 342, in from_call result: Optional[TResult] = func() File "contrib/python/pytest/py3/_pytest/runner.py", line 263, in lambda: ihook(item=item, **kwds), when=when, reraise=reraise File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 513, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 103, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/runner.py", line 170, in pytest_runtest_call item.runtest() File "contrib/python/pytest/py3/_pytest/python.py", line 1844, in runtest self.ihook.pytest_pyfunc_call(pyfuncitem=self) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 513, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 103, in _multicall res = hook_impl.function(*args) File "library/python/pytest/plugins/ya.py", line 563, in pytest_pyfunc_call pyfuncitem.retval = testfunction(**testargs) File "ydb/tests/olap/ttl_tiering/ttl_delete_s3.py", line 361, in test_ttl_delete self.ydb_client.query(""" File "ydb/tests/olap/common/ydb_client.py", line 24, in query return self.session_pool.execute_with_retries(statement) File "contrib/python/ydb/py3/ydb/query/pool.py", line 204, in execute_with_retries return retry_operation_sync(wrapped_callee, retry_settings) File "contrib/python/ydb/py3/ydb/retries.py", line 133, in retry_operation_sync for next_opt in opt_generator: File "contrib/python/ydb/py3/ydb/retries.py", line 94, in retry_operation_impl result = YdbRetryOperationFinalResult(callee(*args, **kwargs)) File "contrib/python/ydb/py3/ydb/query/pool.py", line 202, in wrapped_callee return [result_set for result_set in it] File "contrib/python/ydb/py3/ydb/_utilities.py", line 173, in __next__ return self._next() File "contrib/python/ydb/py3/ydb/_utilities.py", line 164, in _next res = self.wrapper(next(self.it)) File "contrib/python/grpcio/py3/grpc/_channel.py", line 475, in __next__ return self._next() File "contrib/python/grpcio/py3/grpc/_channel.py", line 872, in _next _common.wait(self._state.condition.wait, _response_ready) File "contrib/python/grpcio/py3/grpc/_common.py", line 150, in wait _wait_once(wait_fn, MAXIMUM_WAIT_TIMEOUT, spin_cb) File "contrib/python/grpcio/py3/grpc/_common.py", line 112, in _wait_once wait_fn(timeout=timeout) File "contrib/tools/python3/Lib/threading.py", line 359, in wait gotit = waiter.acquire(True, timeout) File "library/python/pytest/plugins/ya.py", line 344, in _graceful_shutdown traceback.print_stack(file=sys.stderr) Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 764, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: ...d_root/zvgn/004561/tmp', '--capture', 'no', '-c', 'pkg:library.python.pytest:pytest.yatest.ini', '-p', 'no:factor', '--doctest-modules', '--ya-trace', '/home/runner/.ya/build/build_root/zvgn/004561/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk4/ytest.report.trace', '--build-root', '/home/runner/.ya/build/build_root/zvgn/004561', '--source-root', '/home/runner/.ya/build/build_root/zvgn/004561/environment/arcadia', '--output-dir', '/home/runner/.ya/build/build_root/zvgn/004561/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk4/testing_out_stuff', '--durations', '0', '--project-path', 'ydb/tests/olap/ttl_tiering', '--test-tool-bin', '/home/runner/.ya/tools/v4/8580453620/test_tool', '--ya-version', '2', '--collect-cores', '--sanitizer-extra-checks', '--build-type', 'release', '--tb', 'short', '--modulo', '10', '--modulo-index', '4', '--partition-mode', 'SEQUENTIAL', '--dep-root', 'ydb/tests/olap/ttl_tiering', '--flags', 'APPLE_SDK_LOCAL=yes', '--flags', 'CFLAGS=-fno-omit-frame-pointer -Wno-unknown-argument', '--flags', 'DEBUGINFO_LINES_ONLY=yes', '--flags', 'DISABLE_FLAKE8_MIGRATIONS=yes', '--flags', 'OPENSOURCE=yes', '--flags', 'SANITIZER_TYPE=address', '--flags', 'TESTS_REQUESTED=yes', '--flags', 'USE_AIO=static', '--flags', 'USE_CLANG_CL=yes', '--flags', 'USE_EAT_MY_DATA=yes', '--flags', 'USE_ICONV=static', '--flags', 'USE_IDN=static', '--flags', 'USE_PREBUILT_TOOLS=no', '--sanitize', 'address']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("...d_root/zvgn/004561/tmp', '--capture', 'no', '-c', 'pkg:library.python.pytest:pytest.yatest.ini', '-p', 'no:factor', '--doctest-modules', '--ya-trace', '/home/runner/.ya/build/build_root/zvgn/004561/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk4/ytest.report.trace', '--build-root', '/home/runner/.ya/build/build_root/zvgn/004561', '--source-root', '/home/runner/.ya/build/build_root/zvgn/004561/environment/arcadia', '--output-dir', '/home/runner/.ya/build/build_root/zvgn/004561/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk4/testing_out_stuff', '--durations', '0', '--project-path', 'ydb/tests/olap/ttl_tiering', '--test-tool-bin', '/home/runner/.ya/tools/v4/8580453620/test_tool', '--ya-version', '2', '--collect-cores', '--sanitizer-extra-checks', '--build-type', 'release', '--tb', 'short', '--modulo', '10', '--modulo-index', '4', '--partition-mode', 'SEQUENTIAL', '--dep-root', 'ydb/tests/olap/ttl_tiering', '--flags', 'APPLE_SDK_LOCAL=yes', '--flags', 'CFLAGS=-fno-omit-frame-pointer -Wno-unknown-argument', '--flags', 'DEBUGINFO_LINES_ONLY=yes', '--flags', 'DISABLE_FLAKE8_MIGRATIONS=yes', '--flags', 'OPENSOURCE=yes', '--flags', 'SANITIZER_TYPE=address', '--flags', 'TESTS_REQUESTED=yes', '--flags', 'USE_AIO=static', '--flags', 'USE_CLANG_CL=yes', '--flags', 'USE_EAT_MY_DATA=yes', '--flags', 'USE_ICONV=static', '--flags', 'USE_IDN=static', '--flags', 'USE_PREBUILT_TOOLS=no', '--sanitize', 'address']' stopped by 600 seconds timeout",), {}) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::CreateTableWithExplicitAsyncIndexSQL [GOOD] Test command err: Trying to start YDB, gRPC: 7072, MsgBus: 23118 2025-05-07T09:16:09.504602Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501630119652857980:2129];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:09.510798Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003bd5/r3tmp/tmpL1Gfpa/pdisk_1.dat 2025-05-07T09:16:10.042486Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:10.042624Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:10.045744Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:16:10.065035Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7072, node 1 2025-05-07T09:16:10.238608Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:10.238633Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:10.238648Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:10.238750Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23118 TClient is connected to server localhost:23118 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:16:11.093186Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:11.128736Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:11.371421Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:11.602322Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:11.695645Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:13.818932Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630136832728748:2407], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:13.819116Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:14.148296Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:16:14.191873Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:16:14.245896Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:16:14.317016Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:16:14.358427Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:16:14.439340Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:16:14.490171Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:16:14.510883Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501630119652857980:2129];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:14.510943Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:16:14.562887Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630141127696709:2471], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:14.562978Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:14.563279Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630141127696714:2474], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:14.567286Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:16:14.579900Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501630141127696716:2475], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:16:14.682882Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501630141127696767:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:15.881139Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-05-07T09:16:16.382135Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-05-07T09:16:16.402208Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill Trying to start YDB, gRPC: 4264, MsgBus: 18501 2025-05-07T09:16:17.248439Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501630154397877724:2186];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003bd5/r3tmp/tmpxdgINs/pdisk_1.dat 2025-05-07T09:16:17.290806Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T09:16:17.417175Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:17.432038Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:17.432146Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:17.433635Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4264, node 2 2025-05-07T09:16:17.482636Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:17.482660Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:17.482668Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:17.482786Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18501 TClient is connected to server localhost:18501 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 Securit ... ] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:21.102893Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501630171577749054:2472], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:21.107799Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:16:21.128875Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7501630171577749056:2473], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:16:21.223552Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501630171577749107:3412] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:22.246167Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7501630154397877724:2186];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:22.246251Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:16:22.430761Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-05-07T09:16:22.939725Z node 2 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill Trying to start YDB, gRPC: 25983, MsgBus: 31425 2025-05-07T09:16:23.818055Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7501630182188599621:2061];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:23.818202Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003bd5/r3tmp/tmpHDCVDd/pdisk_1.dat 2025-05-07T09:16:23.942599Z node 3 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25983, node 3 2025-05-07T09:16:23.972154Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:23.972246Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:24.002274Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:16:24.048514Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:24.048535Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:24.048543Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:24.048713Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31425 TClient is connected to server localhost:31425 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:16:24.562665Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:24.576555Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:24.656006Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:24.853735Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:24.953409Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:27.454148Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7501630199368470459:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:27.454246Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:27.517055Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-05-07T09:16:27.602126Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-05-07T09:16:27.641670Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-05-07T09:16:27.690602Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-05-07T09:16:27.723571Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-05-07T09:16:27.813750Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-05-07T09:16:27.895160Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-05-07T09:16:28.011898Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7501630203663438421:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:28.011987Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:28.012398Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7501630203663438426:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:28.017056Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-05-07T09:16:28.030882Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7501630203663438428:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-05-07T09:16:28.120810Z node 3 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [3:7501630203663438479:3413] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:28.814094Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7501630182188599621:2061];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:28.814159Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:16:29.392389Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-05-07T09:16:29.526792Z node 3 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [3:7501630207958406317:3740] txid# 281474976715673, issues: { message: "Check failed: path: \'/Root/TestTable\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 17], type: EPathTypeTable, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:30.722564Z node 3 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill >> KqpUniqueIndex::UpdateOnHidenChanges-DataColumn [GOOD] >> KqpUniqueIndex::UpdateOnNullInComplexFk >> KqpPrefixedVectorIndexes::OrderByCosineDistanceNullableLevel1 [GOOD] >> KqpPrefixedVectorIndexes::OrderByCosineDistanceNotNullableLevel4 >> KqpIndexes::SecondaryIndexUsingInJoin-UseStreamJoin [GOOD] >> KqpIndexes::SecondaryIndexUsingInJoin2+UseStreamJoin >> SystemView::VSlotsFields [GOOD] >> SystemView::TopPartitionsByCpuTables ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpMultishardIndex::DuplicateUpsert [GOOD] Test command err: Trying to start YDB, gRPC: 65512, MsgBus: 15892 2025-05-07T09:16:16.106966Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501630149126849518:2068];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:16.107018Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003bca/r3tmp/tmpfxNIih/pdisk_1.dat 2025-05-07T09:16:16.459947Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 65512, node 1 2025-05-07T09:16:16.528708Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:16.528964Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:16.538771Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:16:16.574842Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:16.574873Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:16.574886Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:16.575062Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15892 TClient is connected to server localhost:15892 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:16:17.228199Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:17.255606Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:17.270824Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T09:16:17.448371Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:17.628483Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:17.715366Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:19.610767Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630162011753042:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:19.610894Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:20.013353Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:16:20.047481Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:16:20.086347Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:16:20.117772Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:16:20.190542Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:16:20.236218Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:16:20.311159Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:16:20.420147Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630166306721009:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:20.420293Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:20.421088Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630166306721014:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:20.424928Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:16:20.436275Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501630166306721016:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:16:20.499516Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501630166306721067:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:21.110112Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501630149126849518:2068];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:21.110193Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:16:21.750187Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 waiting... Trying to start YDB, gRPC: 21738, MsgBus: 16179 2025-05-07T09:16:24.958233Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501630186716094952:2057];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:24.958304Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003bca/r3tmp/tmpHwC1XJ/pdisk_1.dat 2025-05-07T09:16:25.088994Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:25.110495Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:25.110588Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:25.116959Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21738, node 2 2025-05-07T09:16:25.164922Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:25.164947Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:25.164958Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:25.165085Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16179 TClient is connected to server localhost:16179 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:16:25.632493Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:25.637703Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T09:16:25.653191Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:25.738664Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T09:16:25.894744Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-05-07T09:16:25.984389Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:28.633602Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501630203895965782:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:28.633682Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:28.696855Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-05-07T09:16:28.731192Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-05-07T09:16:28.766444Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-05-07T09:16:28.836022Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-05-07T09:16:28.889473Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-05-07T09:16:28.981636Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-05-07T09:16:29.055544Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-05-07T09:16:29.165040Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501630208190933748:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:29.165123Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:29.165360Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501630208190933753:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:29.169327Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-05-07T09:16:29.181670Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7501630208190933755:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-05-07T09:16:29.264626Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501630208190933806:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:29.962117Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7501630186716094952:2057];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:29.971236Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:16:30.309041Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 waiting... >> KqpPrefixedVectorIndexes::OrderByCosineDistanceNotNullableLevel2 >> SystemView::Nodes [GOOD] >> SystemView::PartitionStatsFields ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::IndexTopSortPushDown [GOOD] Test command err: Trying to start YDB, gRPC: 28970, MsgBus: 2014 2025-05-07T09:15:50.552865Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501630039476355277:2128];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:15:50.552902Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003bf3/r3tmp/tmpWrFmvC/pdisk_1.dat 2025-05-07T09:15:51.126768Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:15:51.126875Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:15:51.129770Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:15:51.150903Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28970, node 1 2025-05-07T09:15:51.206725Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:15:51.206764Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:15:51.206775Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:15:51.206900Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2014 TClient is connected to server localhost:2014 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:15:51.773376Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:15:51.788478Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T09:15:51.799696Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:15:52.009316Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:15:52.212766Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:15:52.302541Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:15:54.250900Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630056656226047:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:15:54.251077Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:15:54.607776Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:15:54.640012Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:15:54.687796Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:15:54.727129Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:15:54.767532Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:15:54.806646Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:15:54.859435Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:15:54.932295Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630056656226705:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:15:54.932375Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:15:54.932663Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630056656226710:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:15:54.936520Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:15:54.953188Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501630056656226712:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:15:55.053576Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501630060951194059:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:15:55.554072Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501630039476355277:2128];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:15:55.554142Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:15:56.340222Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:7501630065246161666:3605], Recipient [1:7501630043771322908:2182]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T09:15:56.340248Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T09:15:56.340256Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T09:15:56.340282Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122432, Sender [1:7501630065246161662:3602], Recipient [1:7501630043771322908:2182]: {TEvModifySchemeTransaction txid# 281474976710672 TabletId# 72057594046644480} 2025-05-07T09:15:56.340290Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4851: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-05-07T09:15:56.429109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateIndexedTable CreateIndexedTable { TableDescription { Name: "SecondaryKeys" Columns { Name: "Key" Type: "Int32" NotNull: false } Columns { Name: "Fk" Type: "Int32" NotNull: false } Columns { Name: "Value" Type: "String" NotNull: false } KeyColumnNames: "Key" PartitionConfig { ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } Temporary: false } IndexDescription { Name: "Index" KeyColumnNames: "Fk" Type: EIndexTypeGlobal State: EIndexStateReady } } } TxId: 281474976710672 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-05-07T09:15:56.429691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_indexed_table.cpp:101: TCreateTableIndex construct operation table path: /Root/SecondaryKeys domain path id: [OwnerId: 72057594046644480, LocalPathId: 1] domain path: /Root shardsToCreate: 2 GetShardsInside: 34 MaxShards: 200000 2025-05-07T09:15:56.430318Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /Root/SecondaryKeys, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-05-07T09:15:56.430479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:433: TCreateTable Propose, path: /Root/SecondaryKeys, opId: 281474976710672:0, schema: Name: "SecondaryKeys" Columns { Name: "Key" Type: "Int32" NotNull: false } Columns { Name: "Fk" Type: "Int32" NotNull: false } Columns { Name: "Value" Type: "String" NotNull: false } KeyColumnNames: "Key" PartitionConfi ... 630083726963963:2141], Recipient [2:7501630083726963963:2141]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:17.880879Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:18.881376Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:7501630083726963963:2141]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:18.881420Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:18.881465Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [2:7501630083726963963:2141], Recipient [2:7501630083726963963:2141]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:18.881484Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:19.882281Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:7501630083726963963:2141]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:19.882321Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:19.882370Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [2:7501630083726963963:2141], Recipient [2:7501630083726963963:2141]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:19.882386Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime Trying to start YDB, gRPC: 26743, MsgBus: 13417 2025-05-07T09:16:20.826326Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7501630169935326944:2130];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:20.840300Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003bf3/r3tmp/tmpC50BAC/pdisk_1.dat 2025-05-07T09:16:21.068925Z node 3 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:21.124621Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:21.124738Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:21.133653Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26743, node 3 2025-05-07T09:16:21.297921Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:21.297955Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:21.297983Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:21.298144Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13417 TClient is connected to server localhost:13417 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:16:22.055163Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:22.066611Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:22.083596Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:22.174300Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 2025-05-07T09:16:22.436745Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T09:16:22.550267Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 2025-05-07T09:16:25.682484Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7501630191410165005:2407], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:25.682604Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:25.752000Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:16:25.800076Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:16:25.826021Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7501630169935326944:2130];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:25.826141Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:16:25.855707Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:16:25.905265Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:16:25.983636Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:16:26.057830Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:16:26.102269Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:16:26.223620Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7501630195705132962:2471], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:26.223729Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:26.224121Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7501630195705132967:2474], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:26.228681Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:16:26.245589Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7501630195705132969:2475], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:16:26.304580Z node 3 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [3:7501630195705133020:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:27.531295Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-05-07T09:16:27.598104Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480 2025-05-07T09:16:27.642362Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480 >> TCmsTest::VDisksEviction [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::VDisksEviction [GOOD] Test command err: 2025-05-07T09:16:31.510899Z node 18 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-05-07T09:16:31.511012Z node 18 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-05-07T09:16:31.511169Z node 18 :CMS DEBUG: cluster_info.cpp:966: Timestamp: 1970-01-01T00:02:00Z 2025-05-07T09:16:31.513216Z node 18 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvClusterStateRequest { }, response# NKikimr::NCms::TEvCms::TEvClusterStateResponse { Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027000 } Devices { Name: "vdisk-0-1-0-0-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-1-1-0-0-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-2-1-0-0-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-3-1-0-0-0" State: UP Timestamp: 120027000 } Devices { Name: "pdisk-18-18" State: UP Timestamp: 120027000 } Timestamp: 120027000 NodeId: 18 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027000 } Devices { Name: "vdisk-0-1-0-1-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-1-1-0-1-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-2-1-0-1-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-3-1-0-1-0" State: UP Timestamp: 120027000 } Devices { Name: "pdisk-19-19" State: UP Timestamp: 120027000 } Timestamp: 120027000 NodeId: 19 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027000 } Devices { Name: "vdisk-0-1-0-2-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-1-1-0-2-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-2-1-0-2-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-3-1-0-2-0" State: UP Timestamp: 120027000 } Devices { Name: "pdisk-20-20" State: UP Timestamp: 120027000 } Timestamp: 120027000 NodeId: 20 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027000 } Devices { Name: "vdisk-0-1-0-3-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-1-1-0-3-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-2-1-0-3-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-3-1-0-3-0" State: UP Timestamp: 120027000 } Devices { Name: "pdisk-21-21" State: UP Timestamp: 120027000 } Timestamp: 120027000 NodeId: 21 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027000 } Devices { Name: "vdisk-0-1-0-4-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-1-1-0-4-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-2-1-0-4-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-3-1-0-4-0" State: UP Timestamp: 120027000 } Devices { Name: "pdisk-22-22" State: UP Timestamp: 120027000 } Timestamp: 120027000 NodeId: 22 InterconnectPort: 12005 Location { DataCenter: "1" Module: "5" Rack: "5" Unit: "5" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027000 } Devices { Name: "vdisk-0-1-0-5-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-1-1-0-5-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-2-1-0-5-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-3-1-0-5-0" State: UP Timestamp: 120027000 } Devices { Name: "pdisk-23-23" State: UP Timestamp: 120027000 } Timestamp: 120027000 NodeId: 23 InterconnectPort: 12006 Location { DataCenter: "1" Module: "6" Rack: "6" Unit: "6" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027000 } Devices { Name: "vdisk-0-1-0-6-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-1-1-0-6-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-2-1-0-6-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-3-1-0-6-0" State: UP Timestamp: 120027000 } Devices { Name: "pdisk-24-24" State: UP Timestamp: 120027000 } Timestamp: 120027000 NodeId: 24 InterconnectPort: 12007 Location { DataCenter: "1" Module: "7" Rack: "7" Unit: "7" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027000 } Devices { Name: "vdisk-0-1-0-7-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-1-1-0-7-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-2-1-0-7-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-3-1-0-7-0" State: UP Timestamp: 120027000 } Devices { Name: "pdisk-25-25" State: UP Timestamp: 120027000 } Timestamp: 120027000 NodeId: 25 InterconnectPort: 12008 Location { DataCenter: "1" Module: "8" Rack: "8" Unit: "8" } StartTimeSeconds: 0 } Timestamp: 120027000 } } 2025-05-07T09:16:31.513932Z node 18 :CMS DEBUG: sentinel.cpp:486: [Sentinel] [ConfigUpdater] Handle TEvCms::TEvClusterStateResponse: response# Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027000 } Devices { Name: "vdisk-0-1-0-0-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-1-1-0-0-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-2-1-0-0-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-3-1-0-0-0" State: UP Timestamp: 120027000 } Devices { Name: "pdisk-18-18" State: UP Timestamp: 120027000 } Timestamp: 120027000 NodeId: 18 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027000 } Devices { Name: "vdisk-0-1-0-1-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-1-1-0-1-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-2-1-0-1-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-3-1-0-1-0" State: UP Timestamp: 120027000 } Devices { Name: "pdisk-19-19" State: UP Timestamp: 120027000 } Timestamp: 120027000 NodeId: 19 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027000 } Devices { Name: "vdisk-0-1-0-2-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-1-1-0-2-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-2-1-0-2-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-3-1-0-2-0" State: UP Timestamp: 120027000 } Devices { Name: "pdisk-20-20" State: UP Timestamp: 120027000 } Timestamp: 120027000 NodeId: 20 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027000 } Devices { Name: "vdisk-0-1-0-3-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-1-1-0-3-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-2-1-0-3-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-3-1-0-3-0" State: UP Timestamp: 120027000 } Devices { Name: "pdisk-21-21" State: UP Timestamp: 120027000 } Timestamp: 120027000 NodeId: 21 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027000 } Devices { Name: "vdisk-0-1-0-4-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-1-1-0-4-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-2-1-0-4-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-3-1-0-4-0" State: UP Timestamp: 120027000 } Devices { Name: "pdisk-22-22" State: UP Timestamp: 120027000 } Timestamp: 120027000 NodeId: 22 InterconnectPort: 12005 Location { DataCenter: "1" Module: "5" Rack: "5" Unit: "5" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027000 } Devices { Name: "vdisk-0-1-0-5-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-1-1-0-5-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-2-1-0-5-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-3-1-0-5-0" State: UP Timestamp: 120027000 } Devices { Name: "pdisk-23-23" State: UP Timestamp: 120027000 } Timestamp: 120027000 NodeId: 23 InterconnectPort: 12006 Location { DataCenter: "1" Module: "6" Rack: "6" Unit: "6" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027000 } Devices { Name: "vdisk-0-1-0-6-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-1-1-0-6-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-2-1-0-6-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-3-1-0-6-0" State: UP Timestamp: 120027000 } Devices { Name: "pdisk-24-24" State: UP Timestamp: 120027000 } Timestamp: 120027000 NodeId: 24 InterconnectPort: 12007 Location { DataCenter: "1" Module: "7" Rack: "7" Unit: "7" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027000 } Devices { Name: "vdisk-0-1-0-7-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-1-1-0-7-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-2-1-0-7-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-3-1-0-7-0" State: UP Timestamp: 120027000 } Devices { Name: "pdisk-25-25" State: UP Timestamp: 120027000 } Timestamp: 120027000 NodeId: 25 InterconnectPort: 12008 Location { DataCenter: "1" Module: "8" Rack: "8" Unit: "8" } StartTimeSeconds: 0 } Timestamp: 120027000 } 2025-05-07T09:16:31.514397Z node 18 :CMS DEBUG: sentinel.cpp:944: [Sentinel] [Main] Config was updated in 120.003000s 2025-05-07T09:16:31.514454Z node 18 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start StateUpdater 2025-05-07T09:16:31.514668Z node 18 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "18" Services: "storage" Duration: 600000000 } PartialPermissionAllowed: false Schedule: false DryRun: false EvictVDisks: true 2025-05-07T09:16:31.514757Z node 18 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "18" Services: "storage" Duration: 600000000 2025-05-07T09:16:31.514842Z node 18 :CMS DEBUG: cms.cpp:398: Result: DISALLOW_TEMP (reason: VDisks eviction from host 18 has not yet been completed) 2025-05-07T09:16:31.514995Z node 18 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-05-07T09:16:31.515228Z node 18 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store request: id# user-r-1, owner# user, order# 1, priority# 0, body# User: "user" Actions { Type: RESTART_SERVICES Host: "18" Services: "storage" Duration: 600000000 Issue { Type: GENERIC Message: "VDisks eviction from host 18 has not yet been completed" } } PartialPermissionAllowed: false Schedule: false Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: true 2025-05-07T09:16:31.515286Z node 18 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Add host marker: host# 18, marker# MARKER_DISK_FAULTY 2025-05-07T09:16:31.515591Z node 18 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 18, wbId# [18:8388350642965737326:1634689637] 2025-05-07T09:16:31.515647Z node 18 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 19, wbId# [19:8388350642965737326:1634689637] 2025-05-07T09:16:31.515679Z node 18 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 20, wbId# [20:8388350642965737326:1 ... "storage" State: UP Version: "-1" Timestamp: 120539048 } Devices { Name: "vdisk-0-1-0-0-0" State: UP Timestamp: 120539048 } Devices { Name: "vdisk-1-1-0-0-0" State: UP Timestamp: 120539048 } Devices { Name: "vdisk-2-1-0-0-0" State: UP Timestamp: 120539048 } Devices { Name: "vdisk-3-1-0-0-0" State: UP Timestamp: 120539048 } Devices { Name: "pdisk-18-18" State: UP Timestamp: 120539048 } Timestamp: 120539048 NodeId: 18 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120539048 } Devices { Name: "vdisk-0-1-0-1-0" State: UP Timestamp: 120539048 } Devices { Name: "vdisk-1-1-0-1-0" State: UP Timestamp: 120539048 } Devices { Name: "vdisk-2-1-0-1-0" State: UP Timestamp: 120539048 } Devices { Name: "vdisk-3-1-0-1-0" State: UP Timestamp: 120539048 } Devices { Name: "pdisk-19-19" State: UP Timestamp: 120539048 } Timestamp: 120539048 NodeId: 19 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120539048 } Devices { Name: "vdisk-0-1-0-2-0" State: UP Timestamp: 120539048 } Devices { Name: "vdisk-1-1-0-2-0" State: UP Timestamp: 120539048 } Devices { Name: "vdisk-2-1-0-2-0" State: UP Timestamp: 120539048 } Devices { Name: "vdisk-3-1-0-2-0" State: UP Timestamp: 120539048 } Devices { Name: "pdisk-20-20" State: UP Timestamp: 120539048 } Timestamp: 120539048 NodeId: 20 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120539048 } Devices { Name: "vdisk-0-1-0-3-0" State: UP Timestamp: 120539048 } Devices { Name: "vdisk-1-1-0-3-0" State: UP Timestamp: 120539048 } Devices { Name: "vdisk-2-1-0-3-0" State: UP Timestamp: 120539048 } Devices { Name: "vdisk-3-1-0-3-0" State: UP Timestamp: 120539048 } Devices { Name: "pdisk-21-21" State: UP Timestamp: 120539048 } Timestamp: 120539048 NodeId: 21 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120539048 } Devices { Name: "vdisk-0-1-0-4-0" State: UP Timestamp: 120539048 } Devices { Name: "vdisk-1-1-0-4-0" State: UP Timestamp: 120539048 } Devices { Name: "vdisk-2-1-0-4-0" State: UP Timestamp: 120539048 } Devices { Name: "vdisk-3-1-0-4-0" State: UP Timestamp: 120539048 } Devices { Name: "pdisk-22-22" State: UP Timestamp: 120539048 } Timestamp: 120539048 NodeId: 22 InterconnectPort: 12005 Location { DataCenter: "1" Module: "5" Rack: "5" Unit: "5" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120539048 } Devices { Name: "vdisk-0-1-0-5-0" State: UP Timestamp: 120539048 } Devices { Name: "vdisk-1-1-0-5-0" State: UP Timestamp: 120539048 } Devices { Name: "vdisk-2-1-0-5-0" State: UP Timestamp: 120539048 } Devices { Name: "vdisk-3-1-0-5-0" State: UP Timestamp: 120539048 } Devices { Name: "pdisk-23-23" State: UP Timestamp: 120539048 } Timestamp: 120539048 NodeId: 23 InterconnectPort: 12006 Location { DataCenter: "1" Module: "6" Rack: "6" Unit: "6" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120539048 } Devices { Name: "vdisk-0-1-0-6-0" State: UP Timestamp: 120539048 } Devices { Name: "vdisk-1-1-0-6-0" State: UP Timestamp: 120539048 } Devices { Name: "vdisk-2-1-0-6-0" State: UP Timestamp: 120539048 } Devices { Name: "vdisk-3-1-0-6-0" State: UP Timestamp: 120539048 } Devices { Name: "pdisk-24-24" State: UP Timestamp: 120539048 } Timestamp: 120539048 NodeId: 24 InterconnectPort: 12007 Location { DataCenter: "1" Module: "7" Rack: "7" Unit: "7" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120539048 } Devices { Name: "vdisk-0-1-0-7-0" State: UP Timestamp: 120539048 } Devices { Name: "vdisk-1-1-0-7-0" State: UP Timestamp: 120539048 } Devices { Name: "vdisk-2-1-0-7-0" State: UP Timestamp: 120539048 } Devices { Name: "vdisk-3-1-0-7-0" State: UP Timestamp: 120539048 } Devices { Name: "pdisk-25-25" State: UP Timestamp: 120539048 } Timestamp: 120539048 NodeId: 25 InterconnectPort: 12008 Location { DataCenter: "1" Module: "8" Rack: "8" Unit: "8" } StartTimeSeconds: 0 } Timestamp: 120539048 } 2025-05-07T09:16:31.896782Z node 18 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "18" Services: "storage" Duration: 600000000 } PartialPermissionAllowed: false Schedule: false DryRun: false EvictVDisks: true 2025-05-07T09:16:31.896859Z node 18 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "18" Services: "storage" Duration: 600000000 2025-05-07T09:16:31.896915Z node 18 :CMS DEBUG: cms.cpp:398: Result: DISALLOW_TEMP (reason: VDisks eviction from host 18 has not yet been completed) 2025-05-07T09:16:31.897060Z node 18 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-05-07T09:16:31.897255Z node 18 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store request: id# user-r-3, owner# user, order# 3, priority# 0, body# User: "user" Actions { Type: RESTART_SERVICES Host: "18" Services: "storage" Duration: 600000000 Issue { Type: GENERIC Message: "VDisks eviction from host 18 has not yet been completed" } } PartialPermissionAllowed: false Schedule: false Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: true 2025-05-07T09:16:31.897302Z node 18 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Add host marker: host# 18, marker# MARKER_DISK_FAULTY 2025-05-07T09:16:31.897556Z node 18 :CMS DEBUG: sentinel.cpp:944: [Sentinel] [Main] Config was updated in 0.100000s 2025-05-07T09:16:31.897622Z node 18 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start StateUpdater 2025-05-07T09:16:31.897720Z node 18 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 18, wbId# [18:8388350642965737326:1634689637] 2025-05-07T09:16:31.897768Z node 18 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 19, wbId# [19:8388350642965737326:1634689637] 2025-05-07T09:16:31.897854Z node 18 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 20, wbId# [20:8388350642965737326:1634689637] 2025-05-07T09:16:31.897895Z node 18 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 21, wbId# [21:8388350642965737326:1634689637] 2025-05-07T09:16:31.897933Z node 18 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 22, wbId# [22:8388350642965737326:1634689637] 2025-05-07T09:16:31.897960Z node 18 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 23, wbId# [23:8388350642965737326:1634689637] 2025-05-07T09:16:31.898096Z node 18 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 24, wbId# [24:8388350642965737326:1634689637] 2025-05-07T09:16:31.898126Z node 18 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 25, wbId# [25:8388350642965737326:1634689637] 2025-05-07T09:16:31.898346Z node 18 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 18, response# PDiskStateInfo { PDiskId: 18 CreateTime: 120440560 ChangeTime: 120440560 Path: "/18/pdisk-18.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120539 2025-05-07T09:16:31.898875Z node 18 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 25, response# PDiskStateInfo { PDiskId: 25 CreateTime: 120440560 ChangeTime: 120440560 Path: "/25/pdisk-25.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120539 2025-05-07T09:16:31.899007Z node 18 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 24, response# PDiskStateInfo { PDiskId: 24 CreateTime: 120440560 ChangeTime: 120440560 Path: "/24/pdisk-24.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120539 2025-05-07T09:16:31.899192Z node 18 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 19, response# PDiskStateInfo { PDiskId: 19 CreateTime: 120440560 ChangeTime: 120440560 Path: "/19/pdisk-19.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120539 2025-05-07T09:16:31.899261Z node 18 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 20, response# PDiskStateInfo { PDiskId: 20 CreateTime: 120440560 ChangeTime: 120440560 Path: "/20/pdisk-20.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120539 2025-05-07T09:16:31.899322Z node 18 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 21, response# PDiskStateInfo { PDiskId: 21 CreateTime: 120440560 ChangeTime: 120440560 Path: "/21/pdisk-21.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120539 2025-05-07T09:16:31.899395Z node 18 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 22, response# PDiskStateInfo { PDiskId: 22 CreateTime: 120440560 ChangeTime: 120440560 Path: "/22/pdisk-22.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120539 2025-05-07T09:16:31.899466Z node 18 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 23, response# PDiskStateInfo { PDiskId: 23 CreateTime: 120440560 ChangeTime: 120440560 Path: "/23/pdisk-23.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120539 2025-05-07T09:16:31.899532Z node 18 :CMS DEBUG: sentinel.cpp:960: [Sentinel] [Main] State was updated in 0.000000s 2025-05-07T09:16:31.912060Z node 18 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-05-07T09:16:31.912312Z node 18 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "18" Services: "storage" Duration: 600000000 } PartialPermissionAllowed: false Schedule: false DryRun: false EvictVDisks: true }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: DISALLOW_TEMP Reason: "VDisks eviction from host 18 has not yet been completed" } RequestId: "user-r-3" Deadline: 0 } 2025-05-07T09:16:31.912881Z node 18 :CMS INFO: cms.cpp:1403: User user removes request user-r-3 2025-05-07T09:16:31.912931Z node 18 :CMS DEBUG: cms.cpp:1426: Resulting status: OK 2025-05-07T09:16:31.912993Z node 18 :CMS DEBUG: cms_tx_remove_request.cpp:21: TTxRemoveRequest Execute 2025-05-07T09:16:31.913037Z node 18 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reset host markers: host# 18 2025-05-07T09:16:31.913162Z node 18 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove request: id# user-r-3, reason# explicit remove 2025-05-07T09:16:31.926013Z node 18 :CMS DEBUG: cms_tx_remove_request.cpp:45: TTxRemoveRequest Complete 2025-05-07T09:16:31.926204Z node 18 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvManageRequestRequest { User: "user" Command: REJECT RequestId: "user-r-3" DryRun: false }, response# NKikimr::NCms::TEvCms::TEvManageRequestResponse { Status { Code: OK } } >> KqpUniqueIndex::ReplaceFkDuplicate [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpUniqueIndex::ReplaceFkDuplicate [GOOD] Test command err: Trying to start YDB, gRPC: 13837, MsgBus: 7335 2025-05-07T09:16:14.016036Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501630140677140632:2206];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:14.016433Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003bcf/r3tmp/tmpk3X7mg/pdisk_1.dat 2025-05-07T09:16:14.577128Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:14.577240Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:14.579654Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:16:14.609963Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13837, node 1 2025-05-07T09:16:14.724532Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:14.724557Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:14.724569Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:14.724725Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7335 TClient is connected to server localhost:7335 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:16:15.433587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:15.466837Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:15.629628Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:15.790699Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:15.866497Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:17.699550Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630153562044022:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:17.699696Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:18.056611Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:16:18.095608Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:16:18.128901Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:16:18.186022Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:16:18.225302Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:16:18.274490Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:16:18.306439Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:16:18.403653Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630157857011984:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:18.403761Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:18.403902Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630157857011989:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:18.408043Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:16:18.426559Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501630157857011991:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:16:18.504204Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501630157857012042:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:19.008897Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501630140677140632:2206];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:19.008973Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:16:19.642386Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:21.548691Z node 1 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jtn0g7eh8yeg8nzg8eqkayn6, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjFlODBhZjEtNjVlN2M2Y2YtNWNhMDcwYWItOGE1Mzg0MzQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-05-07T09:16:21.559600Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=1&id=NjFlODBhZjEtNjVlN2M2Y2YtNWNhMDcwYWItOGE1Mzg0MzQ=, ActorId: [1:7501630162151980367:2572], ActorState: ExecuteState, TraceId: 01jtn0g7eh8yeg8nzg8eqkayn6, Create QueryResponse for error on request, msg: 2025-05-07T09:16:22.281153Z node 1 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jtn0g864c75ts52e6ghax4wa, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjFlODBhZjEtNjVlN2M2Y2YtNWNhMDcwYWItOGE1Mzg0MzQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-05-07T09:16:22.281366Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=1&id=NjFlODBhZjEtNjVlN2M2Y2YtNWNhMDcwYWItOGE1Mzg0MzQ=, ActorId: [1:7501630162151980367:2572], ActorState: ExecuteState, TraceId: 01jtn0g864c75ts52e6ghax4wa, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 30096, MsgBus: 7094 2025-05-07T09:16:23.370381Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501630180202485177:2276];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:23.370696Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003bcf/r3tmp/tmpLkCzVS/pdisk_1.dat 2025-05-07T09:16:23.587010Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:23.587100Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:23.588855Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:16:23.591867Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 30096, node 2 2025-05-07T09:16:23.667397Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:23.667413Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:23.667419Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:23.667540Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7094 TClient is connected to server localhost:7094 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:16:24.227692Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:24.236727Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T09:16:24.252670Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:24.331266Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:24.564882Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:24.669421Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:27.080217Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501630197382355791:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:27.080424Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:27.142394Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:16:27.191685Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:16:27.240406Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:16:27.284961Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:16:27.324745Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:16:27.400223Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:16:27.448331Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:16:27.558862Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501630197382356453:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:27.558953Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:27.559437Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501630197382356458:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:27.563884Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:16:27.582566Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7501630197382356460:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:16:27.664798Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501630197382356511:3416] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:28.349528Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7501630180202485177:2276];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:28.349593Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:16:28.682271Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:30.709029Z node 2 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jtn0ggaeb485w007ne2zt5hc, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YzQzMTIyOWQtZDYzNzA2ODMtMWIyN2ExMWEtYTQ2M2RiZDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-05-07T09:16:30.709330Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=2&id=YzQzMTIyOWQtZDYzNzA2ODMtMWIyN2ExMWEtYTQ2M2RiZDk=, ActorId: [2:7501630201677324832:2571], ActorState: ExecuteState, TraceId: 01jtn0ggaeb485w007ne2zt5hc, Create QueryResponse for error on request, msg: 2025-05-07T09:16:31.454444Z node 2 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jtn0gh40ftfwfdykav1v682z, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YzQzMTIyOWQtZDYzNzA2ODMtMWIyN2ExMWEtYTQ2M2RiZDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-05-07T09:16:31.454703Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=2&id=YzQzMTIyOWQtZDYzNzA2ODMtMWIyN2ExMWEtYTQ2M2RiZDk=, ActorId: [2:7501630201677324832:2571], ActorState: ExecuteState, TraceId: 01jtn0gh40ftfwfdykav1v682z, Create QueryResponse for error on request, msg: >> TestDataErasure::SimpleDataErasureTestForAllSupportedObjects >> TestDataErasure::SimpleDataErasureTestForTopic >> TestDataErasure::DataErasureWithSplit >> TestDataErasure::DataErasureManualLaunch >> TestDataErasure::DataErasureWithMerge >> TestDataErasure::DataErasureRun3CyclesForAllSupportedObjects >> TestDataErasure::SimpleDataErasureTestForTables >> KqpIndexes::MultipleSecondaryIndex+UseSink [GOOD] >> KqpIndexes::MultipleSecondaryIndex-UseSink >> TSchemeshardBackgroundCompactionTest::ShouldCompactServerless [GOOD] >> TSchemeshardBackgroundCompactionTest::ShouldNotCompactServerlessAfterDisable >> SystemView::AuthGroups_Access [GOOD] >> SystemView::AuthGroups_ResultOrder >> CdcStreamChangeCollector::IndexAndStreamUpsert [GOOD] >> CdcStreamChangeCollector::NewImage >> CdcStreamChangeCollector::DeleteNothing [GOOD] >> CdcStreamChangeCollector::DeleteSingleRow >> CdcStreamChangeCollector::UpsertModifyDelete [GOOD] >> TestDataErasure::DataErasureRun3CyclesForTopics >> KqpIndexes::DoUpsertWithoutIndexUpdate-UniqIndex+UseSink [GOOD] >> KqpIndexes::DoUpsertWithoutIndexUpdate+UniqIndex+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_change_collector/unittest >> CdcStreamChangeCollector::UpsertModifyDelete [GOOD] Test command err: 2025-05-07T09:16:23.254153Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:16:23.254507Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:16:23.254803Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003258/r3tmp/tmpXA667j/pdisk_1.dat 2025-05-07T09:16:23.776097Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:16:23.832816Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:23.847283Z node 1 :TABLET_SAUSAGECACHE NOTICE: shared_sausagecache.cpp:1187: Update config MemoryLimit: 33554432 2025-05-07T09:16:23.900730Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:23.900877Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:23.916148Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:16:24.040074Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:16:24.125319Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:665:2569] 2025-05-07T09:16:24.125612Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T09:16:24.200502Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T09:16:24.200653Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T09:16:24.202656Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-05-07T09:16:24.202748Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-05-07T09:16:24.202812Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-05-07T09:16:24.203230Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T09:16:24.203397Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T09:16:24.203509Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:681:2569] in generation 1 2025-05-07T09:16:24.214474Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T09:16:24.239048Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-05-07T09:16:24.239282Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T09:16:24.239408Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:683:2579] 2025-05-07T09:16:24.239471Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:16:24.239513Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-05-07T09:16:24.239551Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:16:24.243614Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T09:16:24.243776Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T09:16:24.243890Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:16:24.243956Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:16:24.244053Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T09:16:24.244145Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:16:24.244270Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:672:2573], sessionId# [0:0:0] 2025-05-07T09:16:24.244887Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T09:16:24.245399Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-05-07T09:16:24.245513Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-05-07T09:16:24.247659Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:16:24.258587Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T09:16:24.258728Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-05-07T09:16:24.435573Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:697:2587], serverId# [1:699:2589], sessionId# [0:0:0] 2025-05-07T09:16:24.447093Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-05-07T09:16:24.447186Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:16:24.447559Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:16:24.447606Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-05-07T09:16:24.447676Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-05-07T09:16:24.447909Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-05-07T09:16:24.448098Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-05-07T09:16:24.448758Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:16:24.448850Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-05-07T09:16:24.451403Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-05-07T09:16:24.452042Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:16:24.454095Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-05-07T09:16:24.454141Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:16:24.454831Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-05-07T09:16:24.454915Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:16:24.455943Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:16:24.455981Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:16:24.456047Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-05-07T09:16:24.456132Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:410:2405], exec latency: 0 ms, propose latency: 0 ms 2025-05-07T09:16:24.456182Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-05-07T09:16:24.456260Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:16:24.460219Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:16:24.462635Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-05-07T09:16:24.462877Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-05-07T09:16:24.462955Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-05-07T09:16:24.498679Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:16:24.499343Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T09:16:24.499546Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715658 ssId 72057594046644480 seqNo 2:2 2025-05-07T09:16:24.499629Z node 1 :TX_DATASHARD INFO: check_scheme_tx_unit.cpp:234: Check scheme tx, proposed scheme version# 2 current version# 1 expected version# 2 at tablet# 72075186224037888 txId# 281474976715658 2025-05-07T09:16:24.499691Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715658 at tablet 72075186224037888 2025-05-07T09:16:24.525847Z node 1 :TX_DATASHARD DEBUG: datashard__pro ... DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-05-07T09:16:35.843922Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-05-07T09:16:35.843994Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-05-07T09:16:35.857171Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T09:16:35.857395Z node 3 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715658 ssId 72057594046644480 seqNo 2:2 2025-05-07T09:16:35.857454Z node 3 :TX_DATASHARD INFO: check_scheme_tx_unit.cpp:234: Check scheme tx, proposed scheme version# 2 current version# 1 expected version# 2 at tablet# 72075186224037888 txId# 281474976715658 2025-05-07T09:16:35.857491Z node 3 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715658 at tablet 72075186224037888 2025-05-07T09:16:35.857928Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:16:35.883099Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T09:16:36.100708Z node 3 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715658 at step 1500 at tablet 72075186224037888 { Transactions { TxId: 281474976715658 AckTo { RawX1: 0 RawX2: 0 } } Step: 1500 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-05-07T09:16:36.100795Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:16:36.101065Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:16:36.101108Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-05-07T09:16:36.101156Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1500:281474976715658] in PlanQueue unit at 72075186224037888 2025-05-07T09:16:36.101406Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1500:281474976715658 keys extracted: 0 2025-05-07T09:16:36.101539Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-05-07T09:16:36.101865Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:16:36.102723Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:16:36.154680Z node 3 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1500} 2025-05-07T09:16:36.154815Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:16:36.154858Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:16:36.154904Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:16:36.154990Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1500 : 281474976715658] from 72075186224037888 at tablet 72075186224037888 send result to client [3:419:2412], exec latency: 0 ms, propose latency: 0 ms 2025-05-07T09:16:36.155054Z node 3 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715658 state Ready TxInFly 0 2025-05-07T09:16:36.155156Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:16:36.158050Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715658 datashard 72075186224037888 state Ready 2025-05-07T09:16:36.158157Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-05-07T09:16:36.165525Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:876:2714], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:36.165642Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:887:2719], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:36.165726Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:36.171679Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-05-07T09:16:36.183363Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:16:36.369690Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:16:36.373202Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:890:2722], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-05-07T09:16:36.409546Z node 3 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [3:946:2759] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:36.471384Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jtn0gpe34n4q0mx4zmeaa67a, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NTFjYTJhZGUtM2E3MDcxNmItMTFmMzA5MjItOGI1OTBmNDY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:16:36.473994Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:977:2776], serverId# [3:978:2777], sessionId# [0:0:0] 2025-05-07T09:16:36.474439Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:3] at 72075186224037888 2025-05-07T09:16:36.474748Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 1 Group: 1746609396474639 Step: 2000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcDataChange Source: Unspecified Body: 34b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-05-07T09:16:36.474962Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:410: Executed write operation for [0:3] at 72075186224037888, row count=1 2025-05-07T09:16:36.486734Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 34 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 } 2025-05-07T09:16:36.486817Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:16:36.557512Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jtn0gpreb0ds1hcjyta2xm1p, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZWY2Yzk5ZmMtMTgxZmJiMmMtOWU2MGViZWYtOTExZDBkYTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:16:36.559727Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:4] at 72075186224037888 2025-05-07T09:16:36.560011Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 2 Group: 1746609396559922 Step: 2000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcDataChange Source: Unspecified Body: 50b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-05-07T09:16:36.560145Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:410: Executed write operation for [0:4] at 72075186224037888, row count=1 2025-05-07T09:16:36.571233Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 50 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 } 2025-05-07T09:16:36.571303Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:16:36.652118Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715663. Ctx: { TraceId: 01jtn0gpty64ywhszhsn5wrmdv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=OTUyZDgzY2YtOGVkODgzNzYtYzY0ZTQyM2QtOGNjMjFmYmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:16:36.654410Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:5] at 72075186224037888 2025-05-07T09:16:36.654745Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 3 Group: 1746609396654620 Step: 2000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcDataChange Source: Unspecified Body: 34b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-05-07T09:16:36.654900Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:410: Executed write operation for [0:5] at 72075186224037888, row count=1 2025-05-07T09:16:36.670794Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 3 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 34 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 } 2025-05-07T09:16:36.670901Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:16:36.672734Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:1024:2806], serverId# [3:1025:2807], sessionId# [0:0:0] 2025-05-07T09:16:36.679260Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:1026:2808], serverId# [3:1027:2809], sessionId# [0:0:0] >> ShowCreateView::WithTablePathPrefix [GOOD] >> ShowCreateView::WithSingleQuotedTablePathPrefix >> KqpMultishardIndex::DataColumnWrite+UseSink [GOOD] >> KqpMultishardIndex::DataColumnWrite-UseSink >> TestDataErasure::SimpleDataErasureTestForTopic [GOOD] >> TestDataErasure::DataErasureManualLaunch [GOOD] >> TestDataErasure::DataErasureManualLaunch3Cycles >> TestDataErasure::SimpleDataErasureTestForTables [GOOD] >> KqpIndexes::SecondaryIndexUsingInJoin2+UseStreamJoin [GOOD] >> KqpIndexes::MultipleSecondaryIndexWithSameComulns-UseSink [GOOD] >> TSchemeshardBackgroundCompactionTest::SchemeshardShouldNotCompactBackups [GOOD] >> TSchemeshardBackgroundCompactionTest::SchemeshardShouldNotCompactBorrowed >> TestDataErasure::DataErasureRun3CyclesForTables |94.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_data_erasure/unittest >> TestDataErasure::SimpleDataErasureTestForTopic [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:16:36.968766Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:16:36.968862Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:16:36.968926Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:16:36.968960Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:16:36.969027Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:16:36.969059Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:16:36.969110Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:16:36.969178Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:16:36.969862Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:16:36.970478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:16:37.055023Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:16:37.055073Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:37.068077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:16:37.068239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:16:37.068412Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:16:37.073232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:16:37.073522Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:16:37.074064Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:16:37.074258Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:16:37.077002Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:16:37.078497Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:16:37.078564Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:16:37.078646Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:16:37.078690Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:16:37.078740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:16:37.078950Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.085862Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:16:37.225224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:16:37.225433Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.225672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:16:37.225912Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:16:37.227812Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.230374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:16:37.230505Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:16:37.230688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.230762Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:16:37.230805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:16:37.230848Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:16:37.233141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.233202Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:16:37.233242Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:16:37.235593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.235646Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.235685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:16:37.235753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:16:37.245077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:16:37.246889Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:16:37.247053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:16:37.247860Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:16:37.247963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:16:37.247995Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:16:37.248228Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:16:37.248268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:16:37.248441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:16:37.248517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:16:37.250155Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:16:37.250196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:16:37.250383Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:16:37.250413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... ger] SendRequestToBSC: Generation# 1 2025-05-07T09:16:40.405290Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 268637738, Sender [1:297:2279], Recipient [1:289:2273]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 1 Completed: false Progress10k: 5000 2025-05-07T09:16:40.405324Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5038: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-05-07T09:16:40.405362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7757: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-05-07T09:16:40.405448Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:628: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-05-07T09:16:40.405494Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:644: TTxCompleteDataErasureBSC: Progress data shred in BSC 50% 2025-05-07T09:16:40.405592Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:652: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# true 2025-05-07T09:16:40.405643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:346: [RootDataErasureManager] ScheduleRequestToBSC: Interval# 1.000000s 2025-05-07T09:16:40.470786Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:457:2410]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:40.470855Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:40.470990Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [1:457:2410], Recipient [1:457:2410]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:40.471017Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:40.471185Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122945, Sender [1:640:2556], Recipient [1:457:2410]: NKikimrSchemeOp.TDescribePath PathId: 2 SchemeshardId: 72075186233409546 2025-05-07T09:16:40.471216Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4852: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-05-07T09:16:40.471299Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 2 SchemeshardId: 72075186233409546, at schemeshard: 72075186233409546 2025-05-07T09:16:40.471493Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409546 describe pathId 2 took 167us result status StatusSuccess 2025-05-07T09:16:40.471895Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Database1/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72075186233409546 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 103 CreateStep: 200 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409550 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 10 } YdbDatabasePath: "/MyRoot/Database1" } Partitions { PartitionId: 0 TabletId: 72075186233409549 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409550 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 2 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409548 SchemeShard: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 5 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 2025-05-07T09:16:40.533833Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:890:2764]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:40.533895Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:40.534058Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [1:890:2764], Recipient [1:890:2764]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:40.534108Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:40.534235Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122945, Sender [1:1069:2906], Recipient [1:890:2764]: NKikimrSchemeOp.TDescribePath PathId: 2 SchemeshardId: 72075186233409551 2025-05-07T09:16:40.534262Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4852: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-05-07T09:16:40.534364Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 2 SchemeshardId: 72075186233409551, at schemeshard: 72075186233409551 2025-05-07T09:16:40.534527Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409551 describe pathId 2 took 133us result status StatusSuccess 2025-05-07T09:16:40.534923Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Database2/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72075186233409551 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 106 CreateStep: 300 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409555 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 10 } YdbDatabasePath: "/MyRoot/Database2" } Partitions { PartitionId: 0 TabletId: 72075186233409554 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409555 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 3 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409552 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409553 SchemeShard: 72075186233409551 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 5 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 3 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72075186233409551, at schemeshard: 72075186233409551 2025-05-07T09:16:40.853537Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:289:2273]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:40.853628Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:40.853732Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [1:289:2273], Recipient [1:289:2273]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:40.853762Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:40.874337Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125517, Sender [0:0:0], Recipient [1:289:2273]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-05-07T09:16:40.874407Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5039: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-05-07T09:16:40.874438Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:352: [RootDataErasureManager] SendRequestToBSC: Generation# 1 2025-05-07T09:16:40.874641Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 268637738, Sender [1:297:2279], Recipient [1:289:2273]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 1 Completed: true Progress10k: 10000 2025-05-07T09:16:40.874674Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5038: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-05-07T09:16:40.874773Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7757: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-05-07T09:16:40.874841Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:628: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-05-07T09:16:40.874874Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:640: TTxCompleteDataErasureBSC: Data shred in BSC is completed 2025-05-07T09:16:40.874955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:168: [RootDataErasureManager] ScheduleDataErasureWakeup: Interval# 0.979000s, Timestamp# 1970-01-01T00:00:05.066000Z 2025-05-07T09:16:40.874996Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:376: [RootDataErasureManager] Complete: Generation# 1, duration# 2 s 2025-05-07T09:16:40.877122Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:652: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# false 2025-05-07T09:16:40.877687Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:1358:3163], Recipient [1:289:2273]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T09:16:40.877751Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T09:16:40.877787Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046678944 2025-05-07T09:16:40.877949Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125519, Sender [1:273:2264], Recipient [1:289:2273]: NKikimrScheme.TEvDataErasureInfoRequest 2025-05-07T09:16:40.878004Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5036: StateWork, processing event TEvSchemeShard::TEvDataErasureInfoRequest 2025-05-07T09:16:40.878043Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7712: Handle TEvDataErasureInfoRequest, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_data_erasure/unittest >> TestDataErasure::SimpleDataErasureTestForTables [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:16:36.966769Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:16:36.966847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:16:36.966888Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:16:36.966922Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:16:36.967001Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:16:36.967038Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:16:36.967092Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:16:36.967155Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:16:36.967909Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:16:36.968264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:16:37.047187Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:16:37.047251Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:37.064185Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:16:37.064425Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:16:37.064621Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:16:37.070635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:16:37.070943Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:16:37.071666Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:16:37.071879Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:16:37.074607Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:16:37.076109Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:16:37.076169Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:16:37.076237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:16:37.076283Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:16:37.076333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:16:37.076547Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.083714Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:16:37.222343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:16:37.222572Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.222844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:16:37.223095Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:16:37.223159Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.225607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:16:37.225732Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:16:37.225909Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.226008Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:16:37.226053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:16:37.226112Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:16:37.228222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.228280Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:16:37.228328Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:16:37.230158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.230207Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.230249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:16:37.230311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:16:37.234372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:16:37.237030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:16:37.237247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:16:37.238276Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:16:37.238402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:16:37.238475Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:16:37.238736Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:16:37.238785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:16:37.238981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:16:37.239071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:16:37.241599Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:16:37.241662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:16:37.241911Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:16:37.241955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... anager.cpp:352: [RootDataErasureManager] SendRequestToBSC: Generation# 1 2025-05-07T09:16:40.144569Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877760, Sender [1:1957:3626], Recipient [1:289:2273]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037932033 Status: OK ServerId: [1:1958:3627] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2025-05-07T09:16:40.144608Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4935: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-05-07T09:16:40.144635Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5663: Handle TEvClientConnected, tabletId: 72057594037932033, status: OK, at schemeshard: 72057594046678944 2025-05-07T09:16:40.144769Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 268637738, Sender [1:297:2279], Recipient [1:289:2273]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 1 Completed: false Progress10k: 0 2025-05-07T09:16:40.144804Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5038: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-05-07T09:16:40.144839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7757: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-05-07T09:16:40.144904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:628: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-05-07T09:16:40.144960Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:644: TTxCompleteDataErasureBSC: Progress data shred in BSC 0% 2025-05-07T09:16:40.145064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:652: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# true 2025-05-07T09:16:40.145126Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:346: [RootDataErasureManager] ScheduleRequestToBSC: Interval# 1.000000s 2025-05-07T09:16:40.590817Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:457:2410]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:40.590885Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:40.590963Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:835:2718]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:40.590987Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:40.591036Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:289:2273]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:40.591057Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:40.591107Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [1:457:2410], Recipient [1:457:2410]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:40.591137Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:40.591199Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [1:835:2718], Recipient [1:835:2718]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:40.591221Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:40.591271Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [1:289:2273], Recipient [1:289:2273]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:40.591294Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:40.632355Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125517, Sender [0:0:0], Recipient [1:289:2273]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-05-07T09:16:40.632414Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5039: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-05-07T09:16:40.632460Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:352: [RootDataErasureManager] SendRequestToBSC: Generation# 1 2025-05-07T09:16:40.632667Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 268637738, Sender [1:297:2279], Recipient [1:289:2273]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 1 Completed: false Progress10k: 5000 2025-05-07T09:16:40.632697Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5038: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-05-07T09:16:40.632726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7757: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-05-07T09:16:40.632793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:628: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-05-07T09:16:40.632835Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:644: TTxCompleteDataErasureBSC: Progress data shred in BSC 50% 2025-05-07T09:16:40.632894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:652: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# true 2025-05-07T09:16:40.632932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:346: [RootDataErasureManager] ScheduleRequestToBSC: Interval# 1.000000s 2025-05-07T09:16:40.992482Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:835:2718]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:40.992555Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:40.992644Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:289:2273]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:40.992670Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:40.992712Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:457:2410]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:40.992736Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:40.992795Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [1:457:2410], Recipient [1:457:2410]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:40.992868Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:40.992951Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [1:835:2718], Recipient [1:835:2718]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:40.992976Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:40.993027Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [1:289:2273], Recipient [1:289:2273]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:40.993049Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:41.034158Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125517, Sender [0:0:0], Recipient [1:289:2273]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-05-07T09:16:41.034229Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5039: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-05-07T09:16:41.034263Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:352: [RootDataErasureManager] SendRequestToBSC: Generation# 1 2025-05-07T09:16:41.034551Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 268637738, Sender [1:297:2279], Recipient [1:289:2273]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 1 Completed: true Progress10k: 10000 2025-05-07T09:16:41.034593Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5038: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-05-07T09:16:41.034625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7757: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-05-07T09:16:41.034688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:628: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-05-07T09:16:41.034719Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:640: TTxCompleteDataErasureBSC: Data shred in BSC is completed 2025-05-07T09:16:41.034930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:168: [RootDataErasureManager] ScheduleDataErasureWakeup: Interval# 0.938000s, Timestamp# 1970-01-01T00:00:05.107000Z 2025-05-07T09:16:41.034984Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:376: [RootDataErasureManager] Complete: Generation# 1, duration# 2 s 2025-05-07T09:16:41.037136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:652: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# false 2025-05-07T09:16:41.037783Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:1979:3648], Recipient [1:289:2273]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T09:16:41.037838Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T09:16:41.037881Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046678944 2025-05-07T09:16:41.038072Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125519, Sender [1:273:2264], Recipient [1:289:2273]: NKikimrScheme.TEvDataErasureInfoRequest 2025-05-07T09:16:41.038114Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5036: StateWork, processing event TEvSchemeShard::TEvDataErasureInfoRequest 2025-05-07T09:16:41.038152Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7712: Handle TEvDataErasureInfoRequest, at schemeshard: 72057594046678944 >> CdcStreamChangeCollector::NewImage [GOOD] >> CdcStreamChangeCollector::DeleteSingleRow [GOOD] >> SystemView::PartitionStatsFields [GOOD] >> SystemView::ConcurrentScans ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::MultipleSecondaryIndexWithSameComulns-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 19929, MsgBus: 3756 2025-05-07T09:16:09.148413Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501630119786357081:2125];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:09.148529Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003bd6/r3tmp/tmptSbbgW/pdisk_1.dat 2025-05-07T09:16:09.671227Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:09.675635Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:09.675745Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:09.678871Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19929, node 1 2025-05-07T09:16:09.850451Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:09.850471Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:09.850478Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:09.850578Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3756 TClient is connected to server localhost:3756 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:16:10.673917Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:10.697708Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T09:16:10.711149Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:10.892598Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:11.095314Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:11.192711Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:13.046088Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630136966227853:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:13.046184Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:13.383974Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:16:13.431788Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:16:13.487563Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:16:13.543836Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:16:13.601251Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:16:13.659586Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:16:13.728164Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:16:13.825153Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630136966228518:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:13.825210Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:13.825281Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630136966228523:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:13.829747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:16:13.843485Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501630136966228525:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:16:13.936971Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501630136966228576:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:14.151296Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501630119786357081:2125];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:14.151369Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:16:15.027129Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-05-07T09:16:15.072520Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480 2025-05-07T09:16:15.108073Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 5301, MsgBus: 2878 2025-05-07T09:16:17.803880Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501630154828737044:2202];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:17.841202Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003bd6/r3tmp/tmplubx2T/pdisk_1.dat 2025-05-07T09:16:17.935423Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5301, node 2 2025-05-07T09:16:17.961293Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:17.961372Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:17.978084Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:16:18.013128Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:18.013147Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:18.013153Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:18.013233Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2878 TClient is connected to server localhost:2878 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 Schemeshard ... eteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 7 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-05-07T09:16:40.425054Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [3:7501630252934856949:4382], Recipient [3:7501630192805310928:2137]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T09:16:40.425061Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T09:16:40.425067Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T09:16:40.425159Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269553162, Sender [3:7501630209985182006:2460], Recipient [3:7501630192805310928:2137]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037920 TableLocalId: 12 Generation: 1 Round: 0 TableStats { DataSize: 928 RowCount: 4 IndexSize: 0 InMemSize: 928 LastAccessTime: 1746609391940 LastUpdateTime: 1746609391940 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 4 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { Memory: 82488 } ShardState: 2 UserTablePartOwners: 72075186224037920 NodeId: 3 StartTime: 1746609390399 TableOwnerId: 72057594046644480 FollowerId: 0 2025-05-07T09:16:40.425166Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4877: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-05-07T09:16:40.425175Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:561: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037920 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 12] state 'Ready' dataSize 928 rowCount 4 cpuUsage 0 2025-05-07T09:16:40.425224Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:568: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037920 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 12] raw table stats: DataSize: 928 RowCount: 4 IndexSize: 0 InMemSize: 928 LastAccessTime: 1746609391940 LastUpdateTime: 1746609391940 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 4 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-05-07T09:16:40.434519Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [3:7501630252934856951:4383], Recipient [3:7501630192805310928:2137]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T09:16:40.434553Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T09:16:40.434564Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T09:16:40.434811Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269553162, Sender [3:7501630209985182022:2462], Recipient [3:7501630192805310928:2137]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037919 TableLocalId: 12 Generation: 1 Round: 0 TableStats { DataSize: 1032 RowCount: 5 IndexSize: 0 InMemSize: 1032 LastAccessTime: 1746609391936 LastUpdateTime: 1746609391936 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 5 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { Memory: 82488 } ShardState: 2 UserTablePartOwners: 72075186224037919 NodeId: 3 StartTime: 1746609390400 TableOwnerId: 72057594046644480 FollowerId: 0 2025-05-07T09:16:40.434834Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4877: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-05-07T09:16:40.434863Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:561: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037919 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 12] state 'Ready' dataSize 1032 rowCount 5 cpuUsage 0 2025-05-07T09:16:40.434945Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:568: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037919 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 12] raw table stats: DataSize: 1032 RowCount: 5 IndexSize: 0 InMemSize: 1032 LastAccessTime: 1746609391936 LastUpdateTime: 1746609391936 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 5 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-05-07T09:16:40.436777Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:7501630192805310928:2137]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-05-07T09:16:40.436829Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5015: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-05-07T09:16:40.436848Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:588: Started TEvPersistStats at tablet 72057594046644480, queue size# 4 2025-05-07T09:16:40.436890Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:599: Will execute TTxStoreStats, queue# 4 2025-05-07T09:16:40.436903Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:608: Will delay TTxStoreTableStats on# 0.000000s, queue# 4 2025-05-07T09:16:40.436952Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 11 shard idx 72057594046644480:31 data size 960 row count 4 2025-05-07T09:16:40.436999Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037918 maps to shardIdx: 72057594046644480:31 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 11], pathId map=Join2, is column=0, is olap=0, RowCount 4, DataSize 960 2025-05-07T09:16:40.437018Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037918, followerId 0 2025-05-07T09:16:40.437068Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:31 with partCount# 0, rowCount# 4, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-05-07T09:16:40.437129Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186224037918 2025-05-07T09:16:40.437154Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 12 shard idx 72057594046644480:34 data size 1240 row count 7 2025-05-07T09:16:40.437201Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037921 maps to shardIdx: 72057594046644480:34 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 12], pathId map=TuplePrimaryDescending, is column=0, is olap=0, RowCount 7, DataSize 1240 2025-05-07T09:16:40.437213Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037921, followerId 0 2025-05-07T09:16:40.437240Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:34 with partCount# 0, rowCount# 7, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-05-07T09:16:40.437262Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186224037921 2025-05-07T09:16:40.437283Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 12 shard idx 72057594046644480:33 data size 928 row count 4 2025-05-07T09:16:40.437329Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037920 maps to shardIdx: 72057594046644480:33 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 12], pathId map=TuplePrimaryDescending, is column=0, is olap=0, RowCount 4, DataSize 928 2025-05-07T09:16:40.437346Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037920, followerId 0 2025-05-07T09:16:40.437374Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:33 with partCount# 0, rowCount# 4, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-05-07T09:16:40.437398Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186224037920 2025-05-07T09:16:40.437417Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 12 shard idx 72057594046644480:32 data size 1032 row count 5 2025-05-07T09:16:40.437442Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037919 maps to shardIdx: 72057594046644480:32 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 12], pathId map=TuplePrimaryDescending, is column=0, is olap=0, RowCount 5, DataSize 1032 2025-05-07T09:16:40.437456Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037919, followerId 0 2025-05-07T09:16:40.437478Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:32 with partCount# 0, rowCount# 5, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-05-07T09:16:40.437488Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186224037919 2025-05-07T09:16:40.437531Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T09:16:40.437653Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:7501630192805310928:2137]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-05-07T09:16:40.437672Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5015: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-05-07T09:16:40.437684Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:588: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::SecondaryIndexUsingInJoin2+UseStreamJoin [GOOD] Test command err: Trying to start YDB, gRPC: 21992, MsgBus: 17368 2025-05-07T09:16:15.582372Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501630144691091675:2065];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:15.582612Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003bcc/r3tmp/tmpSPUW4E/pdisk_1.dat 2025-05-07T09:16:16.077455Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:16.083145Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:16.083243Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:16.086567Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21992, node 1 2025-05-07T09:16:16.170405Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:16.170436Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:16.170445Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:16.170599Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17368 TClient is connected to server localhost:17368 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:16:16.726677Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:16.746212Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:16.753715Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T09:16:16.883616Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T09:16:17.074111Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:17.144523Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 2025-05-07T09:16:18.943974Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630157575995198:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:18.944113Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:19.280659Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:16:19.316734Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:16:19.347316Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:16:19.381506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:16:19.415791Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:16:19.500460Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:16:19.539714Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:16:19.631491Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630161870963157:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:19.631571Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:19.631778Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630161870963162:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:19.635467Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:16:19.647443Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501630161870963164:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:16:19.717956Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501630161870963215:3416] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:20.576098Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501630144691091675:2065];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:20.585598Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:16:20.773632Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:7501630166165930786:3592], Recipient [1:7501630144691092094:2197]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T09:16:20.773672Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T09:16:20.773701Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T09:16:20.773753Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122432, Sender [1:7501630166165930782:3589], Recipient [1:7501630144691092094:2197]: {TEvModifySchemeTransaction txid# 281474976710672 TabletId# 72057594046644480} 2025-05-07T09:16:20.773773Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4851: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-05-07T09:16:20.835735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateIndexedTable CreateIndexedTable { TableDescription { Name: "TestTable1" Columns { Name: "Key" Type: "String" NotNull: false } Columns { Name: "Value" Type: "Int64" NotNull: false } KeyColumnNames: "Key" PartitionConfig { ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } Temporary: false } IndexDescription { Name: "Index1" KeyColumnNames: "Value" Type: EIndexTypeGlobal IndexImplTableDescriptions { PartitionConfig { } } } } } TxId: 281474976710672 TabletId: 72057594046644480 PeerName: "ipv6:[::1]:51856" , at schemeshard: 72057594046644480 2025-05-07T09:16:20.836098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_indexed_table.cpp:101: TCreateTableIndex construct operation table path: /Root/TestTable1 domain path id: [OwnerId: 72057594046644480, LocalPathId: 1] domain path: /Root shardsToCreate: 2 GetShardsInside: 34 MaxShards: 200000 2025-05-07T09:16:20.836422Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /Root/TestTable1, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-05-07T09:16:20.836500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:433: TCreateTable Propose, path: /Root/TestTable1, opId: 281474976710672:0, schema: Name: "TestTable1" Columns { Name: "Key" Type: "String" NotNull: false } Columns { Name: "Value" Type: "Int64" NotNull: false } KeyColumnNames: "Key" PartitionConfig { ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind ... hemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T09:16:38.555254Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 281474976715673:0, at schemeshard: 72057594046644480 2025-05-07T09:16:38.555279Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T09:16:38.555366Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 281474976715673:2, at schemeshard: 72057594046644480 2025-05-07T09:16:38.555380Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T09:16:38.555639Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 281474976715673:0, at schemeshard: 72057594046644480 2025-05-07T09:16:38.555651Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T09:16:38.555661Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 281474976715673:0 2025-05-07T09:16:38.555715Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [3:7501630246076360538:2525] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715673 at schemeshard: 72057594046644480 2025-05-07T09:16:38.555777Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 281474976715673:2, at schemeshard: 72057594046644480 2025-05-07T09:16:38.555784Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T09:16:38.555792Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 281474976715673:2 2025-05-07T09:16:38.555816Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [3:7501630246076360536:2524] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715673 at schemeshard: 72057594046644480 2025-05-07T09:16:38.555874Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435072, Sender [3:7501630224601521633:2152], Recipient [3:7501630224601521633:2152]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-05-07T09:16:38.555890Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4857: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-05-07T09:16:38.555928Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976715673:0, at schemeshard: 72057594046644480 2025-05-07T09:16:38.555949Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046644480] TDone opId# 281474976715673:0 ProgressState 2025-05-07T09:16:38.556016Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T09:16:38.556029Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715673:0 progress is 2/3 2025-05-07T09:16:38.556039Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715673 ready parts: 2/3 2025-05-07T09:16:38.556055Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715673:0 progress is 2/3 2025-05-07T09:16:38.556065Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715673 ready parts: 2/3 2025-05-07T09:16:38.556083Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976715673, ready parts: 2/3, is published: true 2025-05-07T09:16:38.556240Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435072, Sender [3:7501630224601521633:2152], Recipient [3:7501630224601521633:2152]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-05-07T09:16:38.556254Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4857: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-05-07T09:16:38.556286Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976715673:2, at schemeshard: 72057594046644480 2025-05-07T09:16:38.556301Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:482: [72057594046644480] TDone opId# 281474976715673:2 ProgressState 2025-05-07T09:16:38.556346Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-05-07T09:16:38.556358Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715673:2 progress is 3/3 2025-05-07T09:16:38.556368Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715673 ready parts: 3/3 2025-05-07T09:16:38.556384Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715673:2 progress is 3/3 2025-05-07T09:16:38.556393Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715673 ready parts: 3/3 2025-05-07T09:16:38.556405Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1594: TOperation IsReadyToNotify, TxId: 281474976715673, ready parts: 3/3, is published: true 2025-05-07T09:16:38.556451Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1617: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:7501630246076360509:2522] message: TxId: 281474976715673 2025-05-07T09:16:38.556469Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1629: TOperation IsReadyToDone TxId: 281474976715673 ready parts: 3/3 2025-05-07T09:16:38.556495Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715673:0 2025-05-07T09:16:38.556505Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976715673:0 2025-05-07T09:16:38.556618Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 20] was 4 2025-05-07T09:16:38.556635Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715673:1 2025-05-07T09:16:38.556641Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976715673:1 2025-05-07T09:16:38.556655Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 21] was 3 2025-05-07T09:16:38.556662Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715673:2 2025-05-07T09:16:38.556668Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5136: RemoveTx for txid 281474976715673:2 2025-05-07T09:16:38.556699Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:486: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 22] was 3 2025-05-07T09:16:38.557336Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T09:16:38.557397Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T09:16:38.557456Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [3:7501630246076360509:2522] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715673 at schemeshard: 72057594046644480 2025-05-07T09:16:38.557642Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877764, Sender [3:7501630246076360608:3742], Recipient [3:7501630224601521633:2152]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-05-07T09:16:38.557664Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4938: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-05-07T09:16:38.557683Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5761: Server pipe is reset, at schemeshard: 72057594046644480 2025-05-07T09:16:38.557715Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877764, Sender [3:7501630246076360609:3743], Recipient [3:7501630224601521633:2152]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-05-07T09:16:38.557725Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4938: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-05-07T09:16:38.557731Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5761: Server pipe is reset, at schemeshard: 72057594046644480 2025-05-07T09:16:38.558290Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877764, Sender [3:7501630246076360520:3680], Recipient [3:7501630224601521633:2152]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-05-07T09:16:38.558316Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4938: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-05-07T09:16:38.558326Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5761: Server pipe is reset, at schemeshard: 72057594046644480 2025-05-07T09:16:39.473691Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7501630224601521633:2152]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:39.473729Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:39.473786Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [3:7501630224601521633:2152], Recipient [3:7501630224601521633:2152]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:39.473802Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:40.474237Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7501630224601521633:2152]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:40.474278Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:40.474329Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [3:7501630224601521633:2152], Recipient [3:7501630224601521633:2152]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:40.474345Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime >> TestDataErasure::SimpleDataErasureTestForAllSupportedObjects [GOOD] |94.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] [GOOD] |94.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Date-pk_types18-all_types18-index18-Date--] [GOOD] >> TSchemeshardBackgroundCompactionTest::SchemeshardShouldRequestCompactionsConfigRequest [GOOD] >> TSchemeshardBackgroundCompactionTest::SchemeshardShouldNotRequestCompactionsAfterDisable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_change_collector/unittest >> CdcStreamChangeCollector::NewImage [GOOD] Test command err: 2025-05-07T09:16:23.167907Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:16:23.168096Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:16:23.168412Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003235/r3tmp/tmp3CtZLV/pdisk_1.dat 2025-05-07T09:16:23.776272Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:16:23.832759Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:23.841058Z node 1 :TABLET_SAUSAGECACHE NOTICE: shared_sausagecache.cpp:1187: Update config MemoryLimit: 33554432 2025-05-07T09:16:23.902804Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:23.902923Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:23.915114Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:16:24.039200Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:16:24.116281Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:665:2569] 2025-05-07T09:16:24.116575Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T09:16:24.185870Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T09:16:24.186132Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T09:16:24.188278Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-05-07T09:16:24.188382Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-05-07T09:16:24.188456Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-05-07T09:16:24.188918Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T09:16:24.189103Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T09:16:24.189224Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:681:2569] in generation 1 2025-05-07T09:16:24.201629Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T09:16:24.252765Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-05-07T09:16:24.253045Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T09:16:24.253202Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:683:2579] 2025-05-07T09:16:24.253247Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:16:24.253291Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-05-07T09:16:24.253334Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:16:24.253864Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T09:16:24.254124Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T09:16:24.254268Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:16:24.254345Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:16:24.254421Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T09:16:24.254493Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:16:24.254645Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:672:2573], sessionId# [0:0:0] 2025-05-07T09:16:24.255177Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T09:16:24.255526Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-05-07T09:16:24.255643Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-05-07T09:16:24.257570Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:16:24.270542Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T09:16:24.270655Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-05-07T09:16:24.425226Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:697:2587], serverId# [1:699:2589], sessionId# [0:0:0] 2025-05-07T09:16:24.430320Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-05-07T09:16:24.430423Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:16:24.430804Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:16:24.430858Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-05-07T09:16:24.430966Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-05-07T09:16:24.431252Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-05-07T09:16:24.431571Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-05-07T09:16:24.432463Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:16:24.432558Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-05-07T09:16:24.437467Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-05-07T09:16:24.439088Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:16:24.441564Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-05-07T09:16:24.441634Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:16:24.442424Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-05-07T09:16:24.442504Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:16:24.443771Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:16:24.443821Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:16:24.443902Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-05-07T09:16:24.443982Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:410:2405], exec latency: 0 ms, propose latency: 0 ms 2025-05-07T09:16:24.444041Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-05-07T09:16:24.444171Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:16:24.469393Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:16:24.479952Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-05-07T09:16:24.480211Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-05-07T09:16:24.480339Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-05-07T09:16:24.506610Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:16:24.507083Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T09:16:24.507221Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715658 ssId 72057594046644480 seqNo 2:2 2025-05-07T09:16:24.507303Z node 1 :TX_DATASHARD INFO: check_scheme_tx_unit.cpp:234: Check scheme tx, proposed scheme version# 2 current version# 1 expected version# 2 at tablet# 72075186224037888 txId# 281474976715658 2025-05-07T09:16:24.507367Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715658 at tablet 72075186224037888 2025-05-07T09:16:24.546714Z node 1 :TX_DATASHARD DEBUG: datashard__pro ... D DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-05-07T09:16:40.907613Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:16:40.908826Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:16:40.908866Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:16:40.908910Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-05-07T09:16:40.908982Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [4:410:2405], exec latency: 0 ms, propose latency: 0 ms 2025-05-07T09:16:40.909035Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-05-07T09:16:40.909116Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:16:40.910551Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:16:40.912272Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-05-07T09:16:40.912408Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-05-07T09:16:40.912473Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-05-07T09:16:40.922708Z node 4 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T09:16:40.922842Z node 4 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715658 ssId 72057594046644480 seqNo 2:2 2025-05-07T09:16:40.922880Z node 4 :TX_DATASHARD INFO: check_scheme_tx_unit.cpp:234: Check scheme tx, proposed scheme version# 2 current version# 1 expected version# 2 at tablet# 72075186224037888 txId# 281474976715658 2025-05-07T09:16:40.922931Z node 4 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715658 at tablet 72075186224037888 2025-05-07T09:16:40.923489Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:16:40.947602Z node 4 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T09:16:41.204099Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715658 at step 1500 at tablet 72075186224037888 { Transactions { TxId: 281474976715658 AckTo { RawX1: 0 RawX2: 0 } } Step: 1500 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-05-07T09:16:41.204164Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:16:41.204503Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:16:41.204555Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-05-07T09:16:41.204594Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1500:281474976715658] in PlanQueue unit at 72075186224037888 2025-05-07T09:16:41.204783Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1500:281474976715658 keys extracted: 0 2025-05-07T09:16:41.204885Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-05-07T09:16:41.205088Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:16:41.205710Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:16:41.258710Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1500} 2025-05-07T09:16:41.258827Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:16:41.258868Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:16:41.258916Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:16:41.258994Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1500 : 281474976715658] from 72075186224037888 at tablet 72075186224037888 send result to client [4:410:2405], exec latency: 0 ms, propose latency: 0 ms 2025-05-07T09:16:41.259058Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715658 state Ready TxInFly 0 2025-05-07T09:16:41.259147Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:16:41.261246Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715658 datashard 72075186224037888 state Ready 2025-05-07T09:16:41.261340Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-05-07T09:16:41.273049Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:876:2714], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:41.273170Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:887:2719], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:41.273277Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:41.284093Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-05-07T09:16:41.292211Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:16:41.466915Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:16:41.470794Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:890:2722], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-05-07T09:16:41.498081Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:946:2759] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:41.562542Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jtn0gvdq4b1m2y8exfdkk13m, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=YjQwODA5ODktNTgzNDhkYzYtYjcwNzdiOWUtZTlmNjkzYjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:16:41.565091Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:977:2776], serverId# [4:978:2777], sessionId# [0:0:0] 2025-05-07T09:16:41.565527Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:3] at 72075186224037888 2025-05-07T09:16:41.565847Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 1 Group: 1746609401565739 Step: 2000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcDataChange Source: Unspecified Body: 40b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-05-07T09:16:41.566083Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:410: Executed write operation for [0:3] at 72075186224037888, row count=1 2025-05-07T09:16:41.590746Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 40 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 } 2025-05-07T09:16:41.590835Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:16:41.673540Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jtn0gvqvbe2a7sjp3msw3pgw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=YjNhZWU2ZGMtNjhlNGRkMWYtOGZiZTU1ZmMtNDQ1Y2Y4ODM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:16:41.675680Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:4] at 72075186224037888 2025-05-07T09:16:41.676000Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 2 Group: 1746609401675893 Step: 2000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcDataChange Source: Unspecified Body: 18b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-05-07T09:16:41.676193Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:410: Executed write operation for [0:4] at 72075186224037888, row count=1 2025-05-07T09:16:41.687185Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 18 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 } 2025-05-07T09:16:41.687272Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:16:41.689076Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:1005:2795], serverId# [4:1006:2796], sessionId# [0:0:0] 2025-05-07T09:16:41.695399Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:1007:2797], serverId# [4:1008:2798], sessionId# [0:0:0] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_change_collector/unittest >> CdcStreamChangeCollector::DeleteSingleRow [GOOD] Test command err: 2025-05-07T09:16:23.165202Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:105:2151], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:16:23.165407Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:16:23.165734Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/00324b/r3tmp/tmpGGVZGO/pdisk_1.dat 2025-05-07T09:16:23.778046Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-05-07T09:16:23.832843Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:23.899924Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:23.900087Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:23.918763Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:16:24.039370Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:16:24.113713Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:675:2577] 2025-05-07T09:16:24.114133Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T09:16:24.166271Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T09:16:24.166516Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T09:16:24.169474Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-05-07T09:16:24.169563Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-05-07T09:16:24.169618Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-05-07T09:16:24.171874Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T09:16:24.172257Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T09:16:24.172321Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:702:2577] in generation 1 2025-05-07T09:16:24.173787Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:679:2579] 2025-05-07T09:16:24.173999Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-05-07T09:16:24.184650Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-05-07T09:16:24.184794Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-05-07T09:16:24.186637Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-05-07T09:16:24.186715Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-05-07T09:16:24.186764Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-05-07T09:16:24.187101Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-05-07T09:16:24.187249Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-05-07T09:16:24.187317Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:710:2579] in generation 1 2025-05-07T09:16:24.198483Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T09:16:24.237112Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-05-07T09:16:24.238409Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T09:16:24.238888Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:713:2598] 2025-05-07T09:16:24.238987Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:16:24.239038Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-05-07T09:16:24.239082Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:16:24.240674Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-05-07T09:16:24.240747Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-05-07T09:16:24.240822Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-05-07T09:16:24.240905Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:714:2599] 2025-05-07T09:16:24.240945Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-05-07T09:16:24.240976Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-05-07T09:16:24.241010Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-05-07T09:16:24.241462Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-05-07T09:16:24.241648Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-05-07T09:16:24.241847Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:16:24.241894Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:16:24.242023Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-05-07T09:16:24.242127Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:16:24.242197Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-05-07T09:16:24.242287Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-05-07T09:16:24.242460Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:669:2573], serverId# [1:689:2584], sessionId# [0:0:0] 2025-05-07T09:16:24.242531Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-05-07T09:16:24.242564Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:16:24.242620Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-05-07T09:16:24.242659Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-05-07T09:16:24.248204Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T09:16:24.248544Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-05-07T09:16:24.248658Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-05-07T09:16:24.249213Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:670:2574], serverId# [1:697:2591], sessionId# [0:0:0] 2025-05-07T09:16:24.249434Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-05-07T09:16:24.249583Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037889 txId 281474976715657 ssId 72057594046644480 seqNo 2:2 2025-05-07T09:16:24.249634Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037889 2025-05-07T09:16:24.251662Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:16:24.251767Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-05-07T09:16:24.263159Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T09:16:24.263331Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-05-07T09:16:24.264001Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2025-05-07T09:16:24.264080Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037889 not sending time cast registration request in state WaitScheme 2025-05-07T09:16:24.432196Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:731:2610], serverId# [1:734:2613], sessionId# [0:0:0] 2025-05-07T09:16:24.432652Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:733:2612], serverId# [1:735:2614], sessionId# [0:0:0] 2025-05-07T09:16:24.441629Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037889 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037889 } 2025-05-07T09:16:24.441719Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-05-07T09:16:24.442673Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-05-07T09:16:24.442736Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 1 2025-05-07T09:16:24.442835Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:2814749767 ... D DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-05-07T09:16:41.064670Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:16:41.066099Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:16:41.066166Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-05-07T09:16:41.066240Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-05-07T09:16:41.066314Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [4:410:2405], exec latency: 0 ms, propose latency: 0 ms 2025-05-07T09:16:41.066374Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-05-07T09:16:41.066461Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:16:41.067991Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:16:41.069937Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-05-07T09:16:41.070180Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-05-07T09:16:41.070265Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-05-07T09:16:41.083254Z node 4 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-05-07T09:16:41.083427Z node 4 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715658 ssId 72057594046644480 seqNo 2:2 2025-05-07T09:16:41.083474Z node 4 :TX_DATASHARD INFO: check_scheme_tx_unit.cpp:234: Check scheme tx, proposed scheme version# 2 current version# 1 expected version# 2 at tablet# 72075186224037888 txId# 281474976715658 2025-05-07T09:16:41.083517Z node 4 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715658 at tablet 72075186224037888 2025-05-07T09:16:41.084261Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:16:41.109593Z node 4 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-05-07T09:16:41.320519Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715658 at step 1500 at tablet 72075186224037888 { Transactions { TxId: 281474976715658 AckTo { RawX1: 0 RawX2: 0 } } Step: 1500 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-05-07T09:16:41.320605Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:16:41.320953Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:16:41.321006Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-05-07T09:16:41.321058Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1500:281474976715658] in PlanQueue unit at 72075186224037888 2025-05-07T09:16:41.321261Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1500:281474976715658 keys extracted: 0 2025-05-07T09:16:41.321408Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-05-07T09:16:41.321698Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-05-07T09:16:41.322554Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:16:41.418384Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1500} 2025-05-07T09:16:41.418512Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:16:41.418557Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-05-07T09:16:41.418608Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:16:41.418691Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1500 : 281474976715658] from 72075186224037888 at tablet 72075186224037888 send result to client [4:410:2405], exec latency: 0 ms, propose latency: 0 ms 2025-05-07T09:16:41.418762Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715658 state Ready TxInFly 0 2025-05-07T09:16:41.418863Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:16:41.421028Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715658 datashard 72075186224037888 state Ready 2025-05-07T09:16:41.421123Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-05-07T09:16:41.429681Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:876:2714], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:41.429813Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:887:2719], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:41.429904Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:41.435795Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-05-07T09:16:41.442374Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:16:41.615374Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-05-07T09:16:41.619806Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:890:2722], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-05-07T09:16:41.657757Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:946:2759] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:41.719857Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jtn0gvjm9db5bybq44n941z4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=MTlmZmZhZjAtMzk0ZmExNS05ZjUzMGU4NC01Njk3NDdiOA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:16:41.722426Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:977:2776], serverId# [4:978:2777], sessionId# [0:0:0] 2025-05-07T09:16:41.722897Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:3] at 72075186224037888 2025-05-07T09:16:41.723213Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 1 Group: 1746609401723103 Step: 2000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcDataChange Source: Unspecified Body: 34b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-05-07T09:16:41.723419Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:410: Executed write operation for [0:3] at 72075186224037888, row count=1 2025-05-07T09:16:41.734596Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 34 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 } 2025-05-07T09:16:41.734695Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:16:41.806766Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jtn0gvwbet2h53h7d5wpaaq5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=OTYzMWVmMzgtN2EyZmZiZTItNjU4MzMzMmMtMzVhNjVlMjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:16:41.808965Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:4] at 72075186224037888 2025-05-07T09:16:41.809286Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 2 Group: 1746609401809172 Step: 2000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcDataChange Source: Unspecified Body: 34b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-05-07T09:16:41.809448Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:410: Executed write operation for [0:4] at 72075186224037888, row count=1 2025-05-07T09:16:41.820533Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 34 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 } 2025-05-07T09:16:41.820607Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-05-07T09:16:41.822561Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:1005:2795], serverId# [4:1006:2796], sessionId# [0:0:0] 2025-05-07T09:16:41.828827Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:1007:2797], serverId# [4:1008:2798], sessionId# [0:0:0] |94.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] [GOOD] |94.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_data_erasure/unittest >> TestDataErasure::SimpleDataErasureTestForAllSupportedObjects [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T09:16:36.963243Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:16:36.963329Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:16:36.963371Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:16:36.963402Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:16:36.963472Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:16:36.963517Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:16:36.963577Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:16:36.963661Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:16:36.964417Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:16:36.964744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:16:37.038369Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:16:37.038471Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:37.054838Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:16:37.054965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:16:37.055158Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:16:37.075251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:16:37.075907Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:16:37.076722Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:16:37.076986Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:16:37.079071Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:16:37.080630Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:16:37.080689Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:16:37.080747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:16:37.080812Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:16:37.080862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:16:37.081001Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.087607Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T09:16:37.234747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:16:37.235017Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.235271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:16:37.235550Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:16:37.235618Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.238018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:16:37.238163Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:16:37.238407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.238498Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:16:37.238545Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:16:37.238592Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:16:37.240954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.241023Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:16:37.241078Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:16:37.243367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.243427Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.243477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:16:37.243595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:16:37.247541Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:16:37.249694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:16:37.249893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:16:37.250986Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:16:37.251155Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:16:37.251206Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:16:37.251518Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:16:37.251603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:16:37.251806Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:16:37.251890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:16:37.253962Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:16:37.254036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:16:37.254232Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:16:37.254311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... ger] SendRequestToBSC: Generation# 1 2025-05-07T09:16:42.050377Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 268637738, Sender [1:298:2280], Recipient [1:290:2274]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 1 Completed: false Progress10k: 5000 2025-05-07T09:16:42.050415Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5038: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-05-07T09:16:42.050439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7757: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-05-07T09:16:42.050493Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:628: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-05-07T09:16:42.050541Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:644: TTxCompleteDataErasureBSC: Progress data shred in BSC 50% 2025-05-07T09:16:42.050607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:652: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# true 2025-05-07T09:16:42.050654Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:346: [RootDataErasureManager] ScheduleRequestToBSC: Interval# 1.000000s 2025-05-07T09:16:42.092126Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:459:2411]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:42.092201Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:42.092291Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [1:459:2411], Recipient [1:459:2411]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:42.092321Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:42.092566Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122945, Sender [1:793:2682], Recipient [1:459:2411]: NKikimrSchemeOp.TDescribePath PathId: 3 SchemeshardId: 72075186233409546 2025-05-07T09:16:42.092601Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4852: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-05-07T09:16:42.092686Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 3 SchemeshardId: 72075186233409546, at schemeshard: 72075186233409546 2025-05-07T09:16:42.092890Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409546 describe pathId 3 took 168us result status StatusSuccess 2025-05-07T09:16:42.093335Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Database1/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72075186233409546 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 104 CreateStep: 300 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409552 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 10 } YdbDatabasePath: "/MyRoot/Database1" } Partitions { PartitionId: 0 TabletId: 72075186233409551 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409552 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 2 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409548 SchemeShard: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 2025-05-07T09:16:42.166066Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:955:2814]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:42.166145Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:42.166233Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [1:955:2814], Recipient [1:955:2814]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:42.166273Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:42.166416Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122945, Sender [1:1370:3157], Recipient [1:955:2814]: NKikimrSchemeOp.TDescribePath PathId: 3 SchemeshardId: 72075186233409553 2025-05-07T09:16:42.166443Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4852: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-05-07T09:16:42.166529Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 3 SchemeshardId: 72075186233409553, at schemeshard: 72075186233409553 2025-05-07T09:16:42.166692Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409553 describe pathId 3 took 132us result status StatusSuccess 2025-05-07T09:16:42.167120Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Database2/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72075186233409553 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 108 CreateStep: 350 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409559 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 10 } YdbDatabasePath: "/MyRoot/Database2" } Partitions { PartitionId: 0 TabletId: 72075186233409558 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409559 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 3 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409554 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409555 SchemeShard: 72075186233409553 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 3 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72075186233409553, at schemeshard: 72075186233409553 2025-05-07T09:16:42.551848Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:290:2274]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:42.551934Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:42.552005Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [1:290:2274], Recipient [1:290:2274]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:42.552043Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:42.605788Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125517, Sender [0:0:0], Recipient [1:290:2274]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-05-07T09:16:42.605848Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5039: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-05-07T09:16:42.605876Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:352: [RootDataErasureManager] SendRequestToBSC: Generation# 1 2025-05-07T09:16:42.606105Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 268637738, Sender [1:298:2280], Recipient [1:290:2274]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 1 Completed: true Progress10k: 10000 2025-05-07T09:16:42.606140Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5038: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-05-07T09:16:42.606171Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7757: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-05-07T09:16:42.606238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:628: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-05-07T09:16:42.606271Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:640: TTxCompleteDataErasureBSC: Data shred in BSC is completed 2025-05-07T09:16:42.606355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:168: [RootDataErasureManager] ScheduleDataErasureWakeup: Interval# 0.922000s, Timestamp# 1970-01-01T00:00:05.123000Z 2025-05-07T09:16:42.606422Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:376: [RootDataErasureManager] Complete: Generation# 1, duration# 2 s 2025-05-07T09:16:42.609200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:652: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# false 2025-05-07T09:16:42.609874Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:2417:4023], Recipient [1:290:2274]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T09:16:42.609933Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T09:16:42.609997Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046678944 2025-05-07T09:16:42.610167Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125519, Sender [1:274:2265], Recipient [1:290:2274]: NKikimrScheme.TEvDataErasureInfoRequest 2025-05-07T09:16:42.610201Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5036: StateWork, processing event TEvSchemeShard::TEvDataErasureInfoRequest 2025-05-07T09:16:42.610264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7712: Handle TEvDataErasureInfoRequest, at schemeshard: 72057594046678944 |94.5%| [TA] $(B)/ydb/core/tx/datashard/ut_change_collector/test-results/unittest/{meta.json ... results_accumulator.log} |94.5%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_change_collector/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpIndexes::MultipleSecondaryIndex-UseSink [GOOD] >> KqpIndexes::MultipleSecondaryIndexWithSameComulns+UseSink >> TestDataErasure::DataErasureWithMerge [GOOD] >> TestDataErasure::DataErasureWithSplit [GOOD] >> TestDataErasure::DataErasureRun3CyclesForTopics [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_data_erasure/unittest >> TestDataErasure::DataErasureWithMerge [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:65:2058] recipient: [1:59:2100] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:65:2058] recipient: [1:59:2100] Leader for TabletID 72057594046678944 is [1:69:2104] sender: [1:73:2058] recipient: [1:59:2100] 2025-05-07T09:16:36.843628Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:16:36.843765Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:16:36.843826Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:16:36.843871Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:16:36.850068Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:16:36.850168Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:16:36.850264Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:16:36.850395Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:16:36.851211Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:16:36.853907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:16:36.967863Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:16:36.967918Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:36.969374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:16:36.969737Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:16:36.969932Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:16:36.977954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:16:36.978288Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:16:36.979014Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:16:36.979349Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:16:36.993792Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:16:37.001237Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:16:37.001347Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:16:37.001581Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:16:37.001653Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:16:37.001761Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:16:37.002042Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.004944Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:69:2104] sender: [1:148:2058] recipient: [1:16:2063] 2025-05-07T09:16:37.144140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:16:37.144384Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.146457Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:16:37.147534Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:16:37.147611Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.149205Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:16:37.149353Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:16:37.149557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.149695Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:16:37.149739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:16:37.149786Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:16:37.150387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.150451Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:16:37.150498Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:16:37.150987Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.151026Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.151069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:16:37.151124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:16:37.161678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:16:37.164369Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:16:37.164565Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:16:37.165661Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:16:37.165833Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 74 RawX2: 4294969404 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:16:37.165886Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:16:37.178869Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:16:37.178947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:16:37.179162Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:16:37.179253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:16:37.180054Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:16:37.180103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:16:37.180286Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:16:37.180323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:123:2 ... EMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [1:185:2178], Recipient [1:185:2178]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:44.194596Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:44.205011Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:277:2238]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:44.205090Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:44.205179Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [1:277:2238], Recipient [1:277:2238]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:44.205209Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:44.237326Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:185:2178]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:44.237419Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:44.237507Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [1:185:2178], Recipient [1:185:2178]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:44.237540Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:44.247969Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:277:2238]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:44.248075Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:44.248187Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [1:277:2238], Recipient [1:277:2238]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:44.248218Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:44.280458Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:185:2178]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:44.280533Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:44.281290Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [1:185:2178], Recipient [1:185:2178]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:44.281338Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:44.292330Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269553162, Sender [1:1203:3016], Recipient [1:277:2238]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186233409551 TableLocalId: 2 Generation: 2 Round: 2 TableStats { DataSize: 10141461 RowCount: 99 IndexSize: 4463 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false Channels { Channel: 1 DataSize: 10141461 IndexSize: 4463 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 2379 Memory: 94331 Storage: 10148063 } ShardState: 2 UserTablePartOwners: 72075186233409551 NodeId: 1 StartTime: 50000 TableOwnerId: 72075186233409546 IsDstSplit: true FollowerId: 0 2025-05-07T09:16:44.292403Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4877: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-05-07T09:16:44.292456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:561: Got periodic table stats at tablet 72075186233409546 from shard 72075186233409551 followerId 0 pathId [OwnerId: 72075186233409546, LocalPathId: 2] state 'Ready' dataSize 10141461 rowCount 99 cpuUsage 0.2379 2025-05-07T09:16:44.292569Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:568: Got periodic table stats at tablet 72075186233409546 from shard 72075186233409551 followerId 0 pathId [OwnerId: 72075186233409546, LocalPathId: 2] raw table stats: DataSize: 10141461 RowCount: 99 IndexSize: 4463 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false Channels { Channel: 1 DataSize: 10141461 IndexSize: 4463 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-05-07T09:16:44.292615Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:608: Will delay TTxStoreTableStats on# 0.100000s, queue# 1 2025-05-07T09:16:44.313582Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:277:2238]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:44.313657Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:44.313721Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [1:277:2238], Recipient [1:277:2238]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:44.313747Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:44.324220Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125517, Sender [0:0:0], Recipient [1:185:2178]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-05-07T09:16:44.324324Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5039: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-05-07T09:16:44.324363Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:352: [RootDataErasureManager] SendRequestToBSC: Generation# 1 2025-05-07T09:16:44.324480Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [1:277:2238]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-05-07T09:16:44.324517Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5015: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-05-07T09:16:44.324550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:588: Started TEvPersistStats at tablet 72075186233409546, queue size# 1 2025-05-07T09:16:44.324619Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:599: Will execute TTxStoreStats, queue# 1 2025-05-07T09:16:44.324657Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:608: Will delay TTxStoreTableStats on# 0.000000s, queue# 1 2025-05-07T09:16:44.324885Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 268637738, Sender [1:184:2177], Recipient [1:185:2178]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 1 Completed: true Progress10k: 10000 2025-05-07T09:16:44.324925Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5038: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-05-07T09:16:44.324954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7757: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-05-07T09:16:44.325014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:628: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-05-07T09:16:44.325057Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:640: TTxCompleteDataErasureBSC: Data shred in BSC is completed 2025-05-07T09:16:44.325114Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:168: [RootDataErasureManager] ScheduleDataErasureWakeup: Interval# 19.899500s, Timestamp# 1970-01-01T00:01:20.100500Z 2025-05-07T09:16:44.325159Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:376: [RootDataErasureManager] Complete: Generation# 1, duration# 30 s 2025-05-07T09:16:44.325442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72075186233409546:6 data size 10141461 row count 99 2025-05-07T09:16:44.325526Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409551 maps to shardIdx: 72075186233409546:6 followerId=0, pathId: [OwnerId: 72075186233409546, LocalPathId: 2], pathId map=Simple, is column=0, is olap=0, RowCount 99, DataSize 10141461 2025-05-07T09:16:44.325607Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72075186233409546:6 with partCount# 1, rowCount# 99, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72075186233409546 2025-05-07T09:16:44.325683Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186233409551 2025-05-07T09:16:44.325757Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72075186233409546 2025-05-07T09:16:44.326239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:652: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# false 2025-05-07T09:16:44.329421Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:1378:3168], Recipient [1:185:2178]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T09:16:44.329491Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T09:16:44.329531Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046678944 2025-05-07T09:16:44.329677Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125519, Sender [1:169:2169], Recipient [1:185:2178]: NKikimrScheme.TEvDataErasureInfoRequest 2025-05-07T09:16:44.329717Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5036: StateWork, processing event TEvSchemeShard::TEvDataErasureInfoRequest 2025-05-07T09:16:44.329755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7712: Handle TEvDataErasureInfoRequest, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_data_erasure/unittest >> TestDataErasure::DataErasureRun3CyclesForTopics [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T09:16:37.966541Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:16:37.966635Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:16:37.966681Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:16:37.966724Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:16:37.966774Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:16:37.966804Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:16:37.966855Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:16:37.966957Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:16:37.967791Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:16:37.968127Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:16:38.055203Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:16:38.055262Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:38.074896Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:16:38.074978Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:16:38.075128Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:16:38.085464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:16:38.086049Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:16:38.086832Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:16:38.087171Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:16:38.089584Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:16:38.091276Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:16:38.091341Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:16:38.091399Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:16:38.091482Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:16:38.091557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:16:38.091743Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:16:38.098865Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T09:16:38.223786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:16:38.223996Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:38.224225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:16:38.224460Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:16:38.224522Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:38.226939Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:16:38.227082Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:16:38.227279Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:38.227359Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:16:38.227400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:16:38.227437Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:16:38.229421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:38.229474Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:16:38.229507Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:16:38.231449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:38.231526Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:38.231580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:16:38.231666Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:16:38.242115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:16:38.244491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:16:38.244672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:16:38.245689Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:16:38.245876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:16:38.245931Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:16:38.246264Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:16:38.246334Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:16:38.246533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:16:38.246655Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:16:38.249064Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:16:38.249116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:16:38.249340Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:16:38.249405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... ger] SendRequestToBSC: Generation# 3 2025-05-07T09:16:43.989202Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 268637738, Sender [1:298:2280], Recipient [1:290:2274]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 3 Completed: false Progress10k: 5000 2025-05-07T09:16:43.989234Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5038: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-05-07T09:16:43.989283Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7757: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-05-07T09:16:43.989349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:628: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-05-07T09:16:43.989385Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:644: TTxCompleteDataErasureBSC: Progress data shred in BSC 50% 2025-05-07T09:16:43.989430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:652: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# true 2025-05-07T09:16:43.989467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:346: [RootDataErasureManager] ScheduleRequestToBSC: Interval# 1.000000s 2025-05-07T09:16:44.061999Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:459:2411]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:44.062072Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:44.062234Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [1:459:2411], Recipient [1:459:2411]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:44.062271Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:44.062458Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122945, Sender [1:635:2552], Recipient [1:459:2411]: NKikimrSchemeOp.TDescribePath PathId: 2 SchemeshardId: 72075186233409546 2025-05-07T09:16:44.062494Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4852: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-05-07T09:16:44.062582Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 2 SchemeshardId: 72075186233409546, at schemeshard: 72075186233409546 2025-05-07T09:16:44.062808Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409546 describe pathId 2 took 142us result status StatusSuccess 2025-05-07T09:16:44.063144Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Database1/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72075186233409546 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 103 CreateStep: 200 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409550 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 10 } YdbDatabasePath: "/MyRoot/Database1" } Partitions { PartitionId: 0 TabletId: 72075186233409549 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409550 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 2 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409548 SchemeShard: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 5 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 2025-05-07T09:16:44.146382Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:886:2761]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:44.146459Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:44.146609Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [1:886:2761], Recipient [1:886:2761]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:44.146636Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:44.146757Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122945, Sender [1:1061:2900], Recipient [1:886:2761]: NKikimrSchemeOp.TDescribePath PathId: 2 SchemeshardId: 72075186233409551 2025-05-07T09:16:44.146784Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4852: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-05-07T09:16:44.146875Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 2 SchemeshardId: 72075186233409551, at schemeshard: 72075186233409551 2025-05-07T09:16:44.147091Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409551 describe pathId 2 took 153us result status StatusSuccess 2025-05-07T09:16:44.147515Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Database2/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72075186233409551 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 106 CreateStep: 300 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409555 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 10 } YdbDatabasePath: "/MyRoot/Database2" } Partitions { PartitionId: 0 TabletId: 72075186233409554 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409555 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 3 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409552 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409553 SchemeShard: 72075186233409551 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 5 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 3 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72075186233409551, at schemeshard: 72075186233409551 2025-05-07T09:16:44.489447Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:290:2274]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:44.489533Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:44.489638Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [1:290:2274], Recipient [1:290:2274]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:44.489698Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:44.500270Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125517, Sender [0:0:0], Recipient [1:290:2274]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-05-07T09:16:44.500364Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5039: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-05-07T09:16:44.500402Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:352: [RootDataErasureManager] SendRequestToBSC: Generation# 3 2025-05-07T09:16:44.500605Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 268637738, Sender [1:298:2280], Recipient [1:290:2274]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 3 Completed: true Progress10k: 10000 2025-05-07T09:16:44.500638Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5038: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-05-07T09:16:44.500684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7757: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-05-07T09:16:44.500775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:628: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-05-07T09:16:44.500811Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:640: TTxCompleteDataErasureBSC: Data shred in BSC is completed 2025-05-07T09:16:44.500860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:168: [RootDataErasureManager] ScheduleDataErasureWakeup: Interval# 0.978000s, Timestamp# 1970-01-01T00:00:11.067000Z 2025-05-07T09:16:44.500914Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:376: [RootDataErasureManager] Complete: Generation# 3, duration# 2 s 2025-05-07T09:16:44.503523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:652: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# false 2025-05-07T09:16:44.504221Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:1529:3319], Recipient [1:290:2274]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T09:16:44.504301Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T09:16:44.504376Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046678944 2025-05-07T09:16:44.504543Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125519, Sender [1:274:2265], Recipient [1:290:2274]: NKikimrScheme.TEvDataErasureInfoRequest 2025-05-07T09:16:44.504583Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5036: StateWork, processing event TEvSchemeShard::TEvDataErasureInfoRequest 2025-05-07T09:16:44.504635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7712: Handle TEvDataErasureInfoRequest, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_data_erasure/unittest >> TestDataErasure::DataErasureWithSplit [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:65:2058] recipient: [1:59:2100] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:65:2058] recipient: [1:59:2100] Leader for TabletID 72057594046678944 is [1:69:2104] sender: [1:73:2058] recipient: [1:59:2100] 2025-05-07T09:16:36.843601Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:16:36.843742Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:16:36.843796Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:16:36.843850Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:16:36.850059Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:16:36.850141Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:16:36.850239Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:16:36.850328Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:16:36.851207Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:16:36.854434Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:16:36.955393Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:16:36.955464Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:36.957026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:16:36.957254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:16:36.958542Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:16:36.971079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:16:36.971474Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:16:36.978322Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:16:36.978987Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:16:36.990714Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:16:37.002043Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:16:37.002150Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:16:37.002408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:16:37.002468Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:16:37.002521Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:16:37.002770Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.005476Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:69:2104] sender: [1:148:2058] recipient: [1:16:2063] 2025-05-07T09:16:37.142188Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:16:37.146224Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.146487Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:16:37.147929Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:16:37.148002Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.151904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:16:37.152051Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:16:37.152223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.152295Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:16:37.152333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:16:37.152374Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:16:37.152873Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.152923Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:16:37.152964Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:16:37.153375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.153414Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.153451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:16:37.153498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:16:37.157019Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:16:37.157641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:16:37.162079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:16:37.163381Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:16:37.163570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 74 RawX2: 4294969404 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:16:37.163625Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:16:37.176062Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:16:37.176162Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:16:37.176369Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:16:37.176462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:16:37.177454Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:16:37.177505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:16:37.177705Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:16:37.177751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:123:2 ... d_impl.cpp:4840: StateWork, received event# 271124999, Sender [1:185:2178], Recipient [1:185:2178]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:44.270963Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:44.282286Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269553162, Sender [1:998:2862], Recipient [1:277:2238]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186233409550 TableLocalId: 2 Generation: 2 Round: 2 TableStats { DataSize: 5019511 RowCount: 49 IndexSize: 2213 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false Channels { Channel: 1 DataSize: 5019511 IndexSize: 2213 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 2243 Memory: 93107 Storage: 5022813 } ShardState: 2 UserTablePartOwners: 72075186233409550 NodeId: 1 StartTime: 50000 TableOwnerId: 72075186233409546 IsDstSplit: true FollowerId: 0 2025-05-07T09:16:44.282344Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4877: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-05-07T09:16:44.282392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:561: Got periodic table stats at tablet 72075186233409546 from shard 72075186233409550 followerId 0 pathId [OwnerId: 72075186233409546, LocalPathId: 2] state 'Ready' dataSize 5019511 rowCount 49 cpuUsage 0.2243 2025-05-07T09:16:44.282493Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:568: Got periodic table stats at tablet 72075186233409546 from shard 72075186233409550 followerId 0 pathId [OwnerId: 72075186233409546, LocalPathId: 2] raw table stats: DataSize: 5019511 RowCount: 49 IndexSize: 2213 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false Channels { Channel: 1 DataSize: 5019511 IndexSize: 2213 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-05-07T09:16:44.282529Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:608: Will delay TTxStoreTableStats on# 0.100000s, queue# 1 2025-05-07T09:16:44.282767Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269553162, Sender [1:1003:2864], Recipient [1:277:2238]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186233409551 TableLocalId: 2 Generation: 2 Round: 2 TableStats { DataSize: 5121950 RowCount: 50 IndexSize: 2258 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false Channels { Channel: 1 DataSize: 5121950 IndexSize: 2258 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 1305 Memory: 93131 Storage: 5125318 } ShardState: 2 UserTablePartOwners: 72075186233409551 NodeId: 1 StartTime: 50000 TableOwnerId: 72075186233409546 IsDstSplit: true FollowerId: 0 2025-05-07T09:16:44.282803Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4877: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-05-07T09:16:44.282834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:561: Got periodic table stats at tablet 72075186233409546 from shard 72075186233409551 followerId 0 pathId [OwnerId: 72075186233409546, LocalPathId: 2] state 'Ready' dataSize 5121950 rowCount 50 cpuUsage 0.1305 2025-05-07T09:16:44.282915Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:568: Got periodic table stats at tablet 72075186233409546 from shard 72075186233409551 followerId 0 pathId [OwnerId: 72075186233409546, LocalPathId: 2] raw table stats: DataSize: 5121950 RowCount: 50 IndexSize: 2258 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false Channels { Channel: 1 DataSize: 5121950 IndexSize: 2258 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-05-07T09:16:44.293397Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:277:2238]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:44.293459Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:44.293535Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [1:277:2238], Recipient [1:277:2238]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:44.293562Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:44.314651Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [1:277:2238]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-05-07T09:16:44.314730Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5015: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-05-07T09:16:44.314762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:588: Started TEvPersistStats at tablet 72075186233409546, queue size# 2 2025-05-07T09:16:44.314828Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:599: Will execute TTxStoreStats, queue# 2 2025-05-07T09:16:44.314857Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:608: Will delay TTxStoreTableStats on# 0.000000s, queue# 2 2025-05-07T09:16:44.314956Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125517, Sender [0:0:0], Recipient [1:185:2178]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-05-07T09:16:44.314993Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5039: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-05-07T09:16:44.315020Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:352: [RootDataErasureManager] SendRequestToBSC: Generation# 1 2025-05-07T09:16:44.315242Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 268637738, Sender [1:184:2177], Recipient [1:185:2178]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 1 Completed: true Progress10k: 10000 2025-05-07T09:16:44.315274Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5038: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-05-07T09:16:44.315299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7757: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-05-07T09:16:44.315349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:628: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-05-07T09:16:44.315380Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:640: TTxCompleteDataErasureBSC: Data shred in BSC is completed 2025-05-07T09:16:44.315437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:168: [RootDataErasureManager] ScheduleDataErasureWakeup: Interval# 19.899500s, Timestamp# 1970-01-01T00:01:20.100500Z 2025-05-07T09:16:44.315474Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:376: [RootDataErasureManager] Complete: Generation# 1, duration# 30 s 2025-05-07T09:16:44.315769Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72075186233409546:5 data size 5019511 row count 49 2025-05-07T09:16:44.315845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409550 maps to shardIdx: 72075186233409546:5 followerId=0, pathId: [OwnerId: 72075186233409546, LocalPathId: 2], pathId map=Simple, is column=0, is olap=0, RowCount 49, DataSize 5019511 2025-05-07T09:16:44.315921Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72075186233409546:5 with partCount# 1, rowCount# 49, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72075186233409546 2025-05-07T09:16:44.315994Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186233409550 2025-05-07T09:16:44.316043Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72075186233409546:6 data size 5121950 row count 50 2025-05-07T09:16:44.316082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409551 maps to shardIdx: 72075186233409546:6 followerId=0, pathId: [OwnerId: 72075186233409546, LocalPathId: 2], pathId map=Simple, is column=0, is olap=0, RowCount 50, DataSize 5121950 2025-05-07T09:16:44.316126Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72075186233409546:6 with partCount# 1, rowCount# 50, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72075186233409546 2025-05-07T09:16:44.316157Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186233409551 2025-05-07T09:16:44.316208Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72075186233409546 2025-05-07T09:16:44.316681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:652: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# false 2025-05-07T09:16:44.320490Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:1175:3009], Recipient [1:185:2178]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T09:16:44.320558Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T09:16:44.320597Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046678944 2025-05-07T09:16:44.320797Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125519, Sender [1:169:2169], Recipient [1:185:2178]: NKikimrScheme.TEvDataErasureInfoRequest 2025-05-07T09:16:44.320835Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5036: StateWork, processing event TEvSchemeShard::TEvDataErasureInfoRequest 2025-05-07T09:16:44.320871Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7712: Handle TEvDataErasureInfoRequest, at schemeshard: 72057594046678944 >> TLocksTest::GoodNullLock [GOOD] >> KqpPrefixedVectorIndexes::OrderByCosineDistanceNullableLevel2 [GOOD] >> SystemView::AuthGroups_ResultOrder [GOOD] >> SystemView::AuthGroups_TableRange >> test_retry.py::TestRetry::test_fail_first[kikimr0] [GOOD] >> KqpIndexes::DoUpsertWithoutIndexUpdate+UniqIndex+UseSink [GOOD] >> KqpQuery::CurrentUtcTimestamp >> KqpExplain::UpdateSecondaryConditional-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpPrefixedVectorIndexes::OrderByCosineDistanceNullableLevel2 [GOOD] Test command err: Trying to start YDB, gRPC: 62375, MsgBus: 5846 2025-05-07T09:15:31.436007Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501629958748955002:2063];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:15:31.436955Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003c1a/r3tmp/tmpWIOrQI/pdisk_1.dat 2025-05-07T09:15:32.068260Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:15:32.086892Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:15:32.087018Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:15:32.092738Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 62375, node 1 2025-05-07T09:15:32.246500Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:15:32.246527Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:15:32.246552Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:15:32.246683Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5846 TClient is connected to server localhost:5846 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:15:32.964346Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:15:32.984013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T09:15:32.990407Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:15:33.150657Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:15:33.430358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:15:33.536634Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:15:35.735968Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501629975928825844:2407], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:15:35.736073Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:15:36.105732Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:15:36.166864Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:15:36.258303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:15:36.308661Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:15:36.391216Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:15:36.436908Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501629958748955002:2063];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:15:36.437152Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:15:36.484206Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:15:36.563793Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:15:36.686528Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501629980223793808:2471], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:15:36.686621Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:15:36.686874Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501629980223793813:2474], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:15:36.691230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:15:36.710589Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501629980223793815:2475], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:15:36.809877Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501629980223793869:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:15:38.596776Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:7501629988813728756:3606], Recipient [1:7501629958748955426:2194]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T09:15:38.596830Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T09:15:38.596866Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T09:15:38.596921Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122432, Sender [1:7501629988813728752:3603], Recipient [1:7501629958748955426:2194]: {TEvModifySchemeTransaction txid# 281474976710672 TabletId# 72057594046644480} 2025-05-07T09:15:38.596936Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4851: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-05-07T09:15:38.674884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "TestTable" Columns { Name: "pk" Type: "Int64" NotNull: true } Columns { Name: "user" Type: "String" NotNull: true } Columns { Name: "emb" Type: "String" NotNull: true } Columns { Name: "data" Type: "String" NotNull: true } KeyColumnNames: "pk" PartitionConfig { PartitioningPolicy { MinPartitionsCount: 3 } ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } SplitBoundary { KeyPrefix { Tuple { Optional { Int64: 40 } } } } SplitBoundary { KeyPrefix { Tuple { Optional { Int64: 60 } } } } Temporary: false } CreateIndexedTable { } } TxId: 281474976710672 TabletId: 72057594046644480 PeerName: "ipv6:[::1]:35010" , at schemeshard: 72057594046644480 2025-05-07T09:15:38.675350Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /Root/TestTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-05-07T09:15:38.675535Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:433: TCreateTable Propose, path: /Root/TestTable, opId: 281474976710672:0, schema: Name: "TestTable" Columns { Name: "pk" Type: "Int64" NotNull: true } Columns { Name: "user" Type: "String" NotNull: true } Columns { Name: "emb" Type: "String" NotNull: true } Columns { Name: "data" Type: "String" NotNull: true } KeyColumnNames: "pk" PartitionConfig { PartitioningPolicy { MinPartitionsCount: 3 } ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data ... nedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 18 RowDeletes: 0 RowReads: 0 RangeReads: 33 PartCount: 1 RangeReadRows: 66 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false Channels { Channel: 1 DataSize: 842 IndexSize: 0 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-05-07T09:16:43.298091Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:608: Will delay TTxStoreTableStats on# 0.099996s, queue# 1 2025-05-07T09:16:43.298243Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269553162, Sender [2:7501630138693523609:2544], Recipient [2:7501630108628749935:2141]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037927 TableLocalId: 20 Generation: 1 Round: 2 TableStats { DataSize: 710 RowCount: 30 IndexSize: 0 InMemSize: 0 LastAccessTime: 1746609402380 LastUpdateTime: 1746609373703 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 30 RowDeletes: 0 RowReads: 0 RangeReads: 22 PartCount: 1 RangeReadRows: 77 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false Channels { Channel: 1 DataSize: 710 IndexSize: 0 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 921 Memory: 119581 Storage: 818 } ShardState: 2 UserTablePartOwners: 72075186224037927 NodeId: 2 StartTime: 1746609373256 TableOwnerId: 72057594046644480 FollowerId: 0 2025-05-07T09:16:43.298277Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4877: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-05-07T09:16:43.298295Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:561: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037927 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 20] state 'Ready' dataSize 710 rowCount 30 cpuUsage 0.0921 2025-05-07T09:16:43.298372Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:568: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037927 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 20] raw table stats: DataSize: 710 RowCount: 30 IndexSize: 0 InMemSize: 0 LastAccessTime: 1746609402380 LastUpdateTime: 1746609373703 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 30 RowDeletes: 0 RowReads: 0 RangeReads: 22 PartCount: 1 RangeReadRows: 77 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false Channels { Channel: 1 DataSize: 710 IndexSize: 0 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-05-07T09:16:43.298509Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269553162, Sender [2:7501630138693523583:2542], Recipient [2:7501630108628749935:2141]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037925 TableLocalId: 21 Generation: 1 Round: 2 TableStats { DataSize: 107 RowCount: 3 IndexSize: 0 InMemSize: 0 LastAccessTime: 1746609402372 LastUpdateTime: 1746609373580 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 3 RowDeletes: 0 RowReads: 0 RangeReads: 11 PartCount: 1 RangeReadRows: 11 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false Channels { Channel: 1 DataSize: 107 IndexSize: 0 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 1884 Memory: 119579 Storage: 228 } ShardState: 2 UserTablePartOwners: 72075186224037925 NodeId: 2 StartTime: 1746609373248 TableOwnerId: 72057594046644480 FollowerId: 0 2025-05-07T09:16:43.298531Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4877: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-05-07T09:16:43.298545Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:561: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037925 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 21] state 'Ready' dataSize 107 rowCount 3 cpuUsage 0.1884 2025-05-07T09:16:43.298639Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:568: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037925 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 21] raw table stats: DataSize: 107 RowCount: 3 IndexSize: 0 InMemSize: 0 LastAccessTime: 1746609402372 LastUpdateTime: 1746609373580 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 3 RowDeletes: 0 RowReads: 0 RangeReads: 11 PartCount: 1 RangeReadRows: 11 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false Channels { Channel: 1 DataSize: 107 IndexSize: 0 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-05-07T09:16:43.398274Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [2:7501630108628749935:2141]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-05-07T09:16:43.398328Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5015: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-05-07T09:16:43.398353Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:588: Started TEvPersistStats at tablet 72057594046644480, queue size# 3 2025-05-07T09:16:43.398402Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:599: Will execute TTxStoreStats, queue# 3 2025-05-07T09:16:43.398420Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:608: Will delay TTxStoreTableStats on# 0.000000s, queue# 3 2025-05-07T09:16:43.398480Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 19 shard idx 72057594046644480:38 data size 842 row count 18 2025-05-07T09:16:43.398553Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037926 maps to shardIdx: 72057594046644480:38 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 19], pathId map=indexImplLevelTable, is column=0, is olap=0, RowCount 18, DataSize 842 2025-05-07T09:16:43.398574Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037926, followerId 0 2025-05-07T09:16:43.398647Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:38 with partCount# 1, rowCount# 18, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-05-07T09:16:43.398695Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186224037926 2025-05-07T09:16:43.398734Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 20 shard idx 72057594046644480:39 data size 710 row count 30 2025-05-07T09:16:43.398769Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037927 maps to shardIdx: 72057594046644480:39 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 20], pathId map=indexImplPostingTable, is column=0, is olap=0, RowCount 30, DataSize 710 2025-05-07T09:16:43.398778Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037927, followerId 0 2025-05-07T09:16:43.398815Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:39 with partCount# 1, rowCount# 30, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-05-07T09:16:43.398835Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186224037927 2025-05-07T09:16:43.398858Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 21 shard idx 72057594046644480:40 data size 107 row count 3 2025-05-07T09:16:43.398890Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037925 maps to shardIdx: 72057594046644480:40 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 21], pathId map=indexImplPrefixTable, is column=0, is olap=0, RowCount 3, DataSize 107 2025-05-07T09:16:43.398900Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037925, followerId 0 2025-05-07T09:16:43.398934Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:40 with partCount# 1, rowCount# 3, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-05-07T09:16:43.398951Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186224037925 2025-05-07T09:16:43.399007Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T09:16:43.399128Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [2:7501630108628749935:2141]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-05-07T09:16:43.399144Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5015: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-05-07T09:16:43.399162Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:588: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 2025-05-07T09:16:43.421864Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:7501630108628749935:2141]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:43.421914Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:43.421965Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [2:7501630108628749935:2141], Recipient [2:7501630108628749935:2141]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:43.422001Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:44.422254Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:7501630108628749935:2141]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:44.422301Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:44.422352Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [2:7501630108628749935:2141], Recipient [2:7501630108628749935:2141]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:44.422370Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime >> KqpLimits::QSReplySizeEnsureMemoryLimits-useSink >> KqpParams::ExplicitSameParameterTypesQueryCacheCheck >> TestDataErasure::DataErasureRun3CyclesForAllSupportedObjects [GOOD] >> KqpQuery::QueryClientTimeout >> KqpLimits::CancelAfterRwTx+useSink >> KqpQuery::QueryTimeout >> KqpExplain::LimitOffset >> KqpQuery::RandomNumber >> KqpExplain::UpdateSecondaryConditionalSecondaryKey-UseSink >> KqpStats::JoinNoStatsYql >> KqpParams::CheckQueryCacheForPreparedQuery >> KqpQuery::YqlSyntaxV0 >> KqpExplain::SortStage ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksTest::GoodNullLock [GOOD] Test command err: 2025-05-07T09:16:00.057684Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501630083887532207:2196];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:00.072785Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004a19/r3tmp/tmp5ZAMjd/pdisk_1.dat 2025-05-07T09:16:00.885616Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:00.899258Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:00.899438Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:00.907655Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:12504 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-05-07T09:16:01.351406Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T09:16:01.379203Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-05-07T09:16:01.385183Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:01.523358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:01.599822Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:04.395443Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501630101391873510:2063];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:04.397431Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004a19/r3tmp/tmps2iLnf/pdisk_1.dat 2025-05-07T09:16:04.606235Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:04.630626Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:04.630741Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:04.633110Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:63033 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-05-07T09:16:04.924928Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:04.933245Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:04.951112Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-05-07T09:16:04.956804Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:05.063390Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:05.132880Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:08.693905Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7501630117533372593:2061];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:08.693986Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004a19/r3tmp/tmpMOgxbh/pdisk_1.dat 2025-05-07T09:16:08.918315Z node 3 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:08.939370Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:08.939433Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:08.940847Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:17521 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-05-07T09:16:09.184157Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:09.198291Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:09.216181Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:09.282069Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:09.339280Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:13.235678Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501630137762061533:2059];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:13.235872Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004a19/r3tmp/tmpLcG8NM/pdisk_1.dat 2025-05-07T09:16:13.389138Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:13.396232Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:13.396318Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:13.401875Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:62599 WaitRootIsUp 'dc-1'... TCl ... { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-05-07T09:16:26.980816Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:26.991820Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:27.004110Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-05-07T09:16:27.011404Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:27.132802Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:27.216260Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:31.158772Z node 8 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[8:7501630213631666250:2059];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:31.158847Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004a19/r3tmp/tmpIXIjKb/pdisk_1.dat 2025-05-07T09:16:31.287179Z node 8 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:31.318971Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:31.319093Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:31.323005Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:12958 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-05-07T09:16:31.550950Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T09:16:31.574746Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:31.657774Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:31.721914Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:35.898643Z node 9 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[9:7501630234132192274:2062];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:35.898964Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004a19/r3tmp/tmpVtj0jP/pdisk_1.dat 2025-05-07T09:16:36.017756Z node 9 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:36.052431Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:36.052533Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:36.054387Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:61281 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-05-07T09:16:36.336431Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:36.345184Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:36.355201Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-05-07T09:16:36.360919Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:36.446660Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:36.521821Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:40.778391Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7501630253346247729:2066];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:40.778575Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004a19/r3tmp/tmpCD9W0A/pdisk_1.dat 2025-05-07T09:16:40.899481Z node 10 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:40.915080Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:40.915201Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:40.916614Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25742 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-05-07T09:16:41.167665Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T09:16:41.191287Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T09:16:41.329964Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 2025-05-07T09:16:41.409785Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... >> KqpLimits::ComputeActorMemoryAllocationFailure+useSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::DoUpsertWithoutIndexUpdate+UniqIndex+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 29843, MsgBus: 20073 2025-05-07T09:16:21.526538Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501630172099537138:2195];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:21.530374Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003bc9/r3tmp/tmpuWV3ZU/pdisk_1.dat 2025-05-07T09:16:22.199379Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:22.199521Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:22.201809Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:16:22.263113Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29843, node 1 2025-05-07T09:16:22.382732Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:22.382761Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:22.382775Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:22.382955Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20073 TClient is connected to server localhost:20073 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:16:23.109365Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:23.140137Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:23.306445Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:23.464357Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:23.564853Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:25.587683Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630189279407847:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:25.588071Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:25.837946Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:16:25.911913Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:16:25.943806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:16:26.050750Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:16:26.096048Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:16:26.145462Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:16:26.220101Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:16:26.303602Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630193574375810:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:26.303679Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:26.303958Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630193574375815:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:26.307653Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:16:26.328967Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501630193574375817:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:16:26.435233Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501630193574375868:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:26.571540Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501630172099537138:2195];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:26.571653Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:16:27.746491Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:7501630197869343406:3604], Recipient [1:7501630176394504739:2197]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T09:16:27.746541Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T09:16:27.746554Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T09:16:27.746592Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122432, Sender [1:7501630197869343402:3601], Recipient [1:7501630176394504739:2197]: {TEvModifySchemeTransaction txid# 281474976710672 TabletId# 72057594046644480} 2025-05-07T09:16:27.746608Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4851: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-05-07T09:16:27.823057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateIndexedTable CreateIndexedTable { TableDescription { Name: "TestTable" Columns { Name: "Key" Type: "String" NotNull: false } Columns { Name: "fk1" Type: "String" NotNull: false } Columns { Name: "fk2" Type: "Int32" NotNull: false } Columns { Name: "fk3" Type: "Uint64" NotNull: false } Columns { Name: "Value" Type: "String" NotNull: false } KeyColumnNames: "Key" PartitionConfig { ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } Temporary: false } IndexDescription { Name: "Index" KeyColumnNames: "fk1" KeyColumnNames: "fk2" KeyColumnNames: "fk3" Type: EIndexTypeGlobalUnique IndexImplTableDescriptions { PartitionConfig { } } } } } TxId: 281474976710672 TabletId: 72057594046644480 PeerName: "ipv6:[::1]:45056" , at schemeshard: 72057594046644480 2025-05-07T09:16:27.823664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_indexed_table.cpp:101: TCreateTableIndex construct operation table path: /Root/TestTable domain path id: [OwnerId: 72057594046644480, LocalPathId: 1] domain path: /Root shardsToCreate: 2 GetShardsInside: 34 MaxShards: 200000 2025-05-07T09:16:27.824200Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /Root/TestTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-05-07T09:16:27.824359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:433: TCreateTable Propose, path: /Root/TestTable, opId: 281474976710672:0, schema: Name: "TestTable" Columns { Name: "Key" Type: "String" NotNull: false } Columns { Name: "fk1" Type: "String" NotNull: false } Columns { Name: "fk2" Type: "Int32" NotNull: false } Columns { Name: "fk3" Type: "Uint64" N ... Not (Contains %kqp%tx_result_binding_4_2 $153)))) (lambda \'($154) $71))) \'(\'(\'\"_logical_id\" \'4689) \'(\'\"_id\" \'\"6a9933c5-1cf6bb6c-9986560c-c1c647f5\"))))\n(let $73 (DqCnUnionAll (TDqOutput $72 \'0)))\n(let $74 (Bool \'true))\n(let $75 (DqPhyStage \'($73) (lambda \'($155) (block \'(\n (let $156 (lambda \'($157 $158) $71))\n (return (FromFlow (Condense (ToFlow $155) $74 $156 $156)))\n))) \'(\'(\'\"_logical_id\" \'4669) \'(\'\"_id\" \'\"5c97eb82-64f04a0f-d59157f7-1d59b66d\"))))\n(let $76 \'($67 $72 $75))\n(let $77 (DqCnValue (TDqOutput $75 \'0)))\n(let $78 (KqpTxResultBinding $57 \'\"4\" \'2))\n(let $79 (KqpTxResultBinding $55 \'\"4\" \'\"3\"))\n(let $80 (KqpPhysicalTx $76 \'($77) \'(\'($70 $78) \'($66 $79)) $41))\n(let $81 \'\"%kqp%tx_result_binding_4_1\")\n(let $82 \'\"%kqp%tx_result_binding_5_0\")\n(let $83 \'\"%kqp%tx_result_binding_4_0\")\n(let $84 (DqPhyStage \'() (lambda \'() (block \'(\n (let $159 (KqpEnsure $74 %kqp%tx_result_binding_4_1 \'\"2012\" (Utf8 \'\"Duplicated keys found.\")))\n (let $160 (KqpEnsure $74 %kqp%tx_result_binding_5_0 \'\"2012\" (Utf8 \'\"Conflict with existing key.\")))\n (let $161 (If (And $159 $160) %kqp%tx_result_binding_4_0 (List $11)))\n (return (ToStream (Just (PartitionByKey $161 (lambda \'($162) (Member $162 \'\"Key\")) (Void) (Void) (lambda \'($163) (FlatMap $163 (lambda \'($164) (Last (ForwardList (Nth $164 \'1))))))))))\n))) \'(\'(\'\"_logical_id\" \'5031) \'(\'\"_id\" \'\"f48e05e6-8336d163-197936f5-a4fc8916\"))))\n(let $85 (DqCnValue (TDqOutput $84 \'0)))\n(let $86 (KqpTxResultBinding $11 \'\"4\" \'0))\n(let $87 (KqpTxResultBinding $56 \'\"4\" \'1))\n(let $88 (KqpTxResultBinding $56 \'\"5\" \'0))\n(let $89 \'(\'($83 $86) \'($81 $87) \'($82 $88)))\n(let $90 (KqpPhysicalTx \'($84) \'($85) $89 $3))\n(let $91 \'\"%kqp%tx_result_binding_6_0\")\n(let $92 %kqp%tx_result_binding_6_0)\n(let $93 (DqPhyStage \'() (lambda \'() (Iterator (AsList (ToDict (FlatMap (Map $92 (lambda \'($165) (AsStruct \'(\'\"Key\" (Member $165 \'\"Key\")) \'(\'\"fk1\" (Member $165 \'\"fk1\")) \'(\'\"fk3\" (Member $165 \'\"fk3\"))))) (lambda \'($166) (block \'(\n (let $167 (AsStruct \'(\'\"Key\" (Member $166 \'\"Key\"))))\n (return (IfPresent (Lookup $46 $167) (lambda \'($168) (Just \'($167 $168 (Or (AggrNotEquals (Member $166 \'\"fk1\") (Member $168 \'\"fk1\")) (AggrNotEquals (Member $166 \'\"fk3\") (Member $168 \'\"fk3\")))))) (Nothing (OptionalType (TupleType $19 $44 $56)))))\n)))) (lambda \'($169) (Nth $169 \'0)) (lambda \'($170) \'((Nth $170 \'1) (Nth $170 \'2))) $35)))) \'(\'(\'\"_logical_id\" \'5182) \'(\'\"_id\" \'\"ffe7a574-9ae9f879-2782d4f2-f89478d0\"))))\n(let $94 (DqCnValue (TDqOutput $93 \'0)))\n(let $95 (KqpTxResultBinding $11 \'\"6\" \'0))\n(let $96 \'($91 $95))\n(let $97 (KqpPhysicalTx \'($93) \'($94) \'($51 $96) $3))\n(let $98 (DataSink \'\"KqpTableSink\" \'\"db\"))\n(let $99 (KqpTableSinkSettings $22 \'false \'\"upsert\" \'0 \'false \'false \'()))\n(let $100 (DqPhyStage \'() (lambda \'() (Iterator $92)) \'(\'(\'\"_logical_id\" \'5730) \'(\'\"_id\" \'\"17c88887-f9a27d9c-a243bc2b-276aa17d\")) \'((DqSink \'0 $98 $99))))\n(let $101 \'\"%kqp%tx_result_binding_7_0\")\n(let $102 (DictType $19 (TupleType $44 $56)))\n(let $103 %kqp%tx_result_binding_7_0)\n(let $104 \'(\'(\'\"_logical_id\" \'5758) \'(\'\"_id\" \'\"dd2cea4b-538979eb-f2d7b8b-2b8d08a1\") $30))\n(let $105 (DqPhyStage \'() (lambda \'() (block \'(\n (let $171 (lambda \'($173) (block \'(\n (let $174 (Nth $173 \'1))\n (let $175 (Nth $174 \'0))\n (return (Member (Nth $173 \'0) \'\"Key\") (Member $175 \'\"fk1\") (Member $175 \'\"fk2\") (Member $175 \'\"fk3\") (Nth $174 \'1))\n ))))\n (let $172 (lambda \'($181 $182 $183 $184 $185) $181 $182 $183 $184))\n (return (FromFlow (WideMap (WideFilter (ExpandMap (ToFlow (DictItems $103)) $171) (lambda \'($176 $177 $178 $179 $180) $180)) $172)))\n))) $104))\n(let $106 (DqCnUnionAll (TDqOutput $105 \'0)))\n(let $107 (lambda \'($186) (FromFlow (NarrowMap (ToFlow $186) $34))))\n(let $108 (KqpTableSinkSettings $68 \'false \'\"delete\" \'1 \'false \'false \'()))\n(let $109 (DqPhyStage \'($106) $107 \'(\'(\'\"_logical_id\" \'5744) \'(\'\"_id\" \'\"b4f902e-79fe7887-7c95072d-91fa5531\")) \'((DqSink \'0 $98 $108))))\n(let $110 \'(\'(\'\"_logical_id\" \'5810) \'(\'\"_id\" \'\"f4609e48-b8431072-661f474f-5849b212\") $30))\n(let $111 (DqPhyStage \'() (lambda \'() (FromFlow (ExpandMap (FlatMap (Map (ToFlow $92) (lambda \'($187) (AsStruct \'(\'\"Key\" (Member $187 \'\"Key\")) \'(\'\"fk1\" (Member $187 \'\"fk1\")) \'(\'\"fk3\" (Member $187 \'\"fk3\"))))) (lambda \'($188) (block \'(\n (let $189 \'(\'\"Key\" (Member $188 \'\"Key\")))\n (let $190 \'(\'\"fk1\" (Member $188 \'\"fk1\")))\n (let $191 \'(\'\"fk3\" (Member $188 \'\"fk3\")))\n (return (IfPresent (Lookup $103 (AsStruct $189)) (lambda \'($192) (If (Nth $192 \'1) (Just (AsStruct $189 $190 \'(\'\"fk2\" (Member (Nth $192 \'0) \'\"fk2\")) $191)) (Nothing (OptionalType $29)))) (Just (AsStruct $189 $190 $47 $191))))\n)))) $26))) $110))\n(let $112 (DqCnUnionAll (TDqOutput $111 \'0)))\n(let $113 (KqpTableSinkSettings $68 \'false \'\"\" \'2 \'false \'false \'()))\n(let $114 (DqPhyStage \'($112) $107 \'(\'(\'\"_logical_id\" \'5772) \'(\'\"_id\" \'\"4a9fcbd7-a6f1fb43-90c14257-cb159a14\")) \'((DqSink \'0 $98 $113))))\n(let $115 \'($100 $105 $109 $111 $114))\n(let $116 (KqpTxResultBinding $102 \'\"7\" \'0))\n(let $117 (KqpPhysicalTx $115 \'() \'($96 \'($101 $116)) \'($40 \'(\'\"with_effects\"))))\n(let $118 \'($4 $17 $42 $52 $65 $80 $90 $97 $117))\n(return (KqpPhysicalQuery $118 \'() \'(\'(\'\"type\" \'\"data_query\"))))\n)\n" total_duration_us: 1100569 total_cpu_time_us: 1080271 query_meta: "{\"query_database\":\"/Root\",\"query_parameter_types\":{},\"table_metadata\":[\"{\\\"DoesExist\\\":true,\\\"Cluster\\\":\\\"db\\\",\\\"Name\\\":\\\"/Root/TestTable\\\",\\\"SysView\\\":\\\"\\\",\\\"PathId\\\":{\\\"OwnerId\\\":72057594046644480,\\\"TableId\\\":17},\\\"SchemaVersion\\\":1,\\\"Kind\\\":1,\\\"Columns\\\":[{\\\"Name\\\":\\\"Key\\\",\\\"Id\\\":1,\\\"Type\\\":\\\"String\\\",\\\"TypeId\\\":4097,\\\"NotNull\\\":false,\\\"DefaultFromSequence\\\":\\\"\\\",\\\"DefaultKind\\\":0,\\\"DefaultFromLiteral\\\":{},\\\"IsBuildInProgress\\\":false,\\\"DefaultFromSequencePathId\\\":{\\\"OwnerId\\\":18446744073709551615,\\\"TableId\\\":18446744073709551615}},{\\\"Name\\\":\\\"Value\\\",\\\"Id\\\":5,\\\"Type\\\":\\\"String\\\",\\\"TypeId\\\":4097,\\\"NotNull\\\":false,\\\"DefaultFromSequence\\\":\\\"\\\",\\\"DefaultKind\\\":0,\\\"DefaultFromLiteral\\\":{},\\\"IsBuildInProgress\\\":false,\\\"DefaultFromSequencePathId\\\":{\\\"OwnerId\\\":18446744073709551615,\\\"TableId\\\":18446744073709551615}},{\\\"Name\\\":\\\"fk1\\\",\\\"Id\\\":2,\\\"Type\\\":\\\"String\\\",\\\"TypeId\\\":4097,\\\"NotNull\\\":false,\\\"DefaultFromSequence\\\":\\\"\\\",\\\"DefaultKind\\\":0,\\\"DefaultFromLiteral\\\":{},\\\"IsBuildInProgress\\\":false,\\\"DefaultFromSequencePathId\\\":{\\\"OwnerId\\\":18446744073709551615,\\\"TableId\\\":18446744073709551615}},{\\\"Name\\\":\\\"fk2\\\",\\\"Id\\\":3,\\\"Type\\\":\\\"Int32\\\",\\\"TypeId\\\":1,\\\"NotNull\\\":false,\\\"DefaultFromSequence\\\":\\\"\\\",\\\"DefaultKind\\\":0,\\\"DefaultFromLiteral\\\":{},\\\"IsBuildInProgress\\\":false,\\\"DefaultFromSequencePathId\\\":{\\\"OwnerId\\\":18446744073709551615,\\\"TableId\\\":18446744073709551615}},{\\\"Name\\\":\\\"fk3\\\",\\\"Id\\\":4,\\\"Type\\\":\\\"Uint64\\\",\\\"TypeId\\\":4,\\\"NotNull\\\":false,\\\"DefaultFromSequence\\\":\\\"\\\",\\\"DefaultKind\\\":0,\\\"DefaultFromLiteral\\\":{},\\\"IsBuildInProgress\\\":false,\\\"DefaultFromSequencePathId\\\":{\\\"OwnerId\\\":18446744073709551615,\\\"TableId\\\":18446744073709551615}}],\\\"KeyColunmNames\\\":[\\\"Key\\\"],\\\"Indexes\\\":[{\\\"Name\\\":\\\"Index\\\",\\\"Type\\\":2,\\\"State\\\":1,\\\"SchemaVersion\\\":1,\\\"LocalPathId\\\":18,\\\"PathOwnerId\\\":8716544,\\\"KeyColumns\\\":[\\\"fk1\\\",\\\"fk2\\\",\\\"fk3\\\"]}],\\\"SecondaryGlobalIndexMetadata\\\":[{\\\"DoesExist\\\":true,\\\"Cluster\\\":\\\"db\\\",\\\"Name\\\":\\\"/Root/TestTable/Index/indexImplTable\\\",\\\"SysView\\\":\\\"\\\",\\\"PathId\\\":{\\\"OwnerId\\\":72057594046644480,\\\"TableId\\\":19},\\\"SchemaVersion\\\":1,\\\"Kind\\\":1,\\\"Columns\\\":[{\\\"Name\\\":\\\"Key\\\",\\\"Id\\\":4,\\\"Type\\\":\\\"String\\\",\\\"TypeId\\\":4097,\\\"NotNull\\\":false,\\\"DefaultFromSequence\\\":\\\"\\\",\\\"DefaultKind\\\":0,\\\"DefaultFromLiteral\\\":{},\\\"IsBuildInProgress\\\":false,\\\"DefaultFromSequencePathId\\\":{\\\"OwnerId\\\":18446744073709551615,\\\"TableId\\\":18446744073709551615}},{\\\"Name\\\":\\\"fk1\\\",\\\"Id\\\":1,\\\"Type\\\":\\\"String\\\",\\\"TypeId\\\":4097,\\\"NotNull\\\":false,\\\"DefaultFromSequence\\\":\\\"\\\",\\\"DefaultKind\\\":0,\\\"DefaultFromLiteral\\\":{},\\\"IsBuildInProgress\\\":false,\\\"DefaultFromSequencePathId\\\":{\\\"OwnerId\\\":18446744073709551615,\\\"TableId\\\":18446744073709551615}},{\\\"Name\\\":\\\"fk2\\\",\\\"Id\\\":2,\\\"Type\\\":\\\"Int32\\\",\\\"TypeId\\\":1,\\\"NotNull\\\":false,\\\"DefaultFromSequence\\\":\\\"\\\",\\\"DefaultKind\\\":0,\\\"DefaultFromLiteral\\\":{},\\\"IsBuildInProgress\\\":false,\\\"DefaultFromSequencePathId\\\":{\\\"OwnerId\\\":18446744073709551615,\\\"TableId\\\":18446744073709551615}},{\\\"Name\\\":\\\"fk3\\\",\\\"Id\\\":3,\\\"Type\\\":\\\"Uint64\\\",\\\"TypeId\\\":4,\\\"NotNull\\\":false,\\\"DefaultFromSequence\\\":\\\"\\\",\\\"DefaultKind\\\":0,\\\"DefaultFromLiteral\\\":{},\\\"IsBuildInProgress\\\":false,\\\"DefaultFromSequencePathId\\\":{\\\"OwnerId\\\":18446744073709551615,\\\"TableId\\\":18446744073709551615}}],\\\"KeyColunmNames\\\":[\\\"fk1\\\",\\\"fk2\\\",\\\"fk3\\\",\\\"Key\\\"],\\\"RecordsCount\\\":0,\\\"DataSize\\\":0,\\\"StatsLoaded\\\":false}],\\\"RecordsCount\\\":0,\\\"DataSize\\\":0,\\\"StatsLoaded\\\":false}\"],\"table_meta_serialization_type\":2,\"created_at\":\"1746609404\",\"query_type\":\"QUERY_TYPE_SQL_DML\",\"query_syntax\":\"1\",\"query_cluster\":\"db\",\"query_id\":\"60ccb9f2-a4f1df8d-38d14b02-423b8b03\",\"version\":\"1.0\"}" 2025-05-07T09:16:44.640548Z node 3 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-05-07T09:16:44.647752Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7501630244958403830:2133]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:44.647796Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:44.647852Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [3:7501630244958403830:2133], Recipient [3:7501630244958403830:2133]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:44.647867Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:45.648225Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7501630244958403830:2133]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:45.648267Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:45.648308Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [3:7501630244958403830:2133], Recipient [3:7501630244958403830:2133]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:45.648323Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:45.754754Z node 3 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill |94.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_data_erasure/unittest >> TestDataErasure::DataErasureRun3CyclesForAllSupportedObjects [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:128:2058] recipient: [1:108:2140] 2025-05-07T09:16:36.967803Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:16:36.967904Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:16:36.967944Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:16:36.967982Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:16:36.968026Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:16:36.968058Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:16:36.968110Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:16:36.968184Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:16:36.968958Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:16:36.969304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:16:37.059766Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:16:37.059838Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:37.075182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:16:37.075320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:16:37.075493Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:16:37.085347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:16:37.086371Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:16:37.087137Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:16:37.087467Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:16:37.089722Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:16:37.091394Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:16:37.091459Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:16:37.091536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:16:37.091603Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:16:37.091662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:16:37.091845Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.099615Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:125:2151] sender: [1:239:2058] recipient: [1:15:2062] 2025-05-07T09:16:37.246832Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:16:37.247066Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.247302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:16:37.247585Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:16:37.247649Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.259222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:16:37.259379Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:16:37.259611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.259700Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:16:37.259738Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:16:37.259784Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:16:37.267971Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.268040Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:16:37.268087Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:16:37.279080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.279147Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.279187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:16:37.279260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:16:37.283708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:16:37.286115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:16:37.286313Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:16:37.287298Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:16:37.287455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 130 RawX2: 4294969450 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:16:37.287501Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:16:37.287792Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:16:37.287843Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:16:37.288040Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:16:37.288113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:16:37.290811Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:16:37.290871Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:16:37.291044Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:16:37.291091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... ger] SendRequestToBSC: Generation# 3 2025-05-07T09:16:46.257354Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 268637738, Sender [1:298:2280], Recipient [1:290:2274]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 3 Completed: false Progress10k: 5000 2025-05-07T09:16:46.257379Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5038: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-05-07T09:16:46.257404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7757: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-05-07T09:16:46.257464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:628: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-05-07T09:16:46.257503Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:644: TTxCompleteDataErasureBSC: Progress data shred in BSC 50% 2025-05-07T09:16:46.257553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:652: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# true 2025-05-07T09:16:46.257597Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:346: [RootDataErasureManager] ScheduleRequestToBSC: Interval# 1.000000s 2025-05-07T09:16:46.343366Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:459:2411]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:46.343444Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:46.343582Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [1:459:2411], Recipient [1:459:2411]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:46.343615Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:46.343784Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122945, Sender [1:793:2682], Recipient [1:459:2411]: NKikimrSchemeOp.TDescribePath PathId: 3 SchemeshardId: 72075186233409546 2025-05-07T09:16:46.343818Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4852: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-05-07T09:16:46.343915Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 3 SchemeshardId: 72075186233409546, at schemeshard: 72075186233409546 2025-05-07T09:16:46.344097Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409546 describe pathId 3 took 151us result status StatusSuccess 2025-05-07T09:16:46.344498Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Database1/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72075186233409546 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 104 CreateStep: 300 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409552 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 10 } YdbDatabasePath: "/MyRoot/Database1" } Partitions { PartitionId: 0 TabletId: 72075186233409551 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409552 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 2 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409548 SchemeShard: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 2025-05-07T09:16:46.464958Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:955:2814]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:46.465033Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:46.465162Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [1:955:2814], Recipient [1:955:2814]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:46.465198Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:46.465376Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122945, Sender [1:1370:3157], Recipient [1:955:2814]: NKikimrSchemeOp.TDescribePath PathId: 3 SchemeshardId: 72075186233409553 2025-05-07T09:16:46.465411Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4852: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-05-07T09:16:46.465521Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 3 SchemeshardId: 72075186233409553, at schemeshard: 72075186233409553 2025-05-07T09:16:46.465721Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409553 describe pathId 3 took 172us result status StatusSuccess 2025-05-07T09:16:46.466154Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Database2/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72075186233409553 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 108 CreateStep: 350 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409559 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 10 } YdbDatabasePath: "/MyRoot/Database2" } Partitions { PartitionId: 0 TabletId: 72075186233409558 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409559 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 3 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409554 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409555 SchemeShard: 72075186233409553 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 3 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72075186233409553, at schemeshard: 72075186233409553 2025-05-07T09:16:46.863988Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:290:2274]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:46.864094Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:46.864199Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [1:290:2274], Recipient [1:290:2274]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:46.864236Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:46.905939Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125517, Sender [0:0:0], Recipient [1:290:2274]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-05-07T09:16:46.906051Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5039: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-05-07T09:16:46.906095Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:352: [RootDataErasureManager] SendRequestToBSC: Generation# 3 2025-05-07T09:16:46.906362Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 268637738, Sender [1:298:2280], Recipient [1:290:2274]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 3 Completed: true Progress10k: 10000 2025-05-07T09:16:46.906403Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5038: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-05-07T09:16:46.906439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7757: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-05-07T09:16:46.906516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:628: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-05-07T09:16:46.906550Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:640: TTxCompleteDataErasureBSC: Data shred in BSC is completed 2025-05-07T09:16:46.906603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:168: [RootDataErasureManager] ScheduleDataErasureWakeup: Interval# 0.920000s, Timestamp# 1970-01-01T00:00:11.125000Z 2025-05-07T09:16:46.906639Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:376: [RootDataErasureManager] Complete: Generation# 3, duration# 2 s 2025-05-07T09:16:46.908909Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:652: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# false 2025-05-07T09:16:46.909587Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:4087:5365], Recipient [1:290:2274]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T09:16:46.909649Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T09:16:46.909690Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046678944 2025-05-07T09:16:46.909846Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125519, Sender [1:274:2265], Recipient [1:290:2274]: NKikimrScheme.TEvDataErasureInfoRequest 2025-05-07T09:16:46.909883Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5036: StateWork, processing event TEvSchemeShard::TEvDataErasureInfoRequest 2025-05-07T09:16:46.909928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7712: Handle TEvDataErasureInfoRequest, at schemeshard: 72057594046678944 >> TestDataErasure::DataErasureManualLaunch3Cycles [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_data_erasure/unittest >> TestDataErasure::DataErasureManualLaunch3Cycles [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:16:36.972339Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:16:36.972435Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:16:36.972501Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:16:36.972548Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:16:36.972596Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:16:36.972627Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:16:36.972689Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:16:36.972764Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:16:36.973537Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:16:36.973914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:16:37.064008Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:16:37.064073Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:37.082707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:16:37.082931Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:16:37.083141Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:16:37.090421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:16:37.090731Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:16:37.091450Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:16:37.091677Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:16:37.094964Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:16:37.096393Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:16:37.096463Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:16:37.096537Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:16:37.096582Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:16:37.096634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:16:37.096855Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.104231Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:16:37.267757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:16:37.267997Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.268238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:16:37.268534Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:16:37.268599Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.271251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:16:37.271411Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:16:37.271655Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.271731Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:16:37.271778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:16:37.271824Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:16:37.274121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.274200Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:16:37.274250Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:16:37.276451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.276510Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:37.276550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:16:37.276621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:16:37.280676Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:16:37.282886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:16:37.283107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:16:37.284227Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:16:37.284348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:16:37.284392Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:16:37.284672Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:16:37.284728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:16:37.284901Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:16:37.284972Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:16:37.287033Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:16:37.287093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:16:37.287267Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:16:37.287324Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... ard_impl.cpp:5039: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-05-07T09:16:47.292390Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:352: [RootDataErasureManager] SendRequestToBSC: Generation# 3 2025-05-07T09:16:47.292662Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 268637738, Sender [2:298:2280], Recipient [2:292:2276]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 3 Completed: false Progress10k: 5000 2025-05-07T09:16:47.292696Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5038: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-05-07T09:16:47.292723Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7757: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-05-07T09:16:47.292787Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:628: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-05-07T09:16:47.292829Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:644: TTxCompleteDataErasureBSC: Progress data shred in BSC 50% 2025-05-07T09:16:47.292886Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:652: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# true 2025-05-07T09:16:47.292928Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:346: [RootDataErasureManager] ScheduleRequestToBSC: Interval# 1.000000s 2025-05-07T09:16:47.625923Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:455:2410]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:47.626027Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:47.626192Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [2:455:2410], Recipient [2:455:2410]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:47.626229Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:47.626384Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122945, Sender [2:793:2681], Recipient [2:455:2410]: NKikimrSchemeOp.TDescribePath PathId: 3 SchemeshardId: 72075186233409546 2025-05-07T09:16:47.626418Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4852: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-05-07T09:16:47.626522Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 3 SchemeshardId: 72075186233409546, at schemeshard: 72075186233409546 2025-05-07T09:16:47.626714Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409546 describe pathId 3 took 163us result status StatusSuccess 2025-05-07T09:16:47.627203Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Database1/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72075186233409546 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 104 CreateStep: 250 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409552 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 10 } YdbDatabasePath: "/MyRoot/Database1" } Partitions { PartitionId: 0 TabletId: 72075186233409551 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409552 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 2 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409548 SchemeShard: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 2025-05-07T09:16:47.714028Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:951:2811]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:47.714115Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:47.714226Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [2:951:2811], Recipient [2:951:2811]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:47.714261Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:47.714429Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122945, Sender [2:1372:3159], Recipient [2:951:2811]: NKikimrSchemeOp.TDescribePath PathId: 3 SchemeshardId: 72075186233409553 2025-05-07T09:16:47.714462Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4852: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-05-07T09:16:47.714568Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 3 SchemeshardId: 72075186233409553, at schemeshard: 72075186233409553 2025-05-07T09:16:47.714760Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409553 describe pathId 3 took 159us result status StatusSuccess 2025-05-07T09:16:47.715223Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Database2/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72075186233409553 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 108 CreateStep: 400 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409559 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 10 } YdbDatabasePath: "/MyRoot/Database2" } Partitions { PartitionId: 0 TabletId: 72075186233409558 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409559 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 3 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409554 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409555 SchemeShard: 72075186233409553 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 3 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72075186233409553, at schemeshard: 72075186233409553 2025-05-07T09:16:47.798356Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:292:2276]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:47.798446Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:47.798538Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [2:292:2276], Recipient [2:292:2276]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:47.798575Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:47.886114Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125517, Sender [0:0:0], Recipient [2:292:2276]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-05-07T09:16:47.886208Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5039: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-05-07T09:16:47.886251Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:352: [RootDataErasureManager] SendRequestToBSC: Generation# 3 2025-05-07T09:16:47.886539Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 268637738, Sender [2:298:2280], Recipient [2:292:2276]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 3 Completed: true Progress10k: 10000 2025-05-07T09:16:47.886582Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5038: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-05-07T09:16:47.886619Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7757: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-05-07T09:16:47.886706Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:628: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-05-07T09:16:47.886755Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:640: TTxCompleteDataErasureBSC: Data shred in BSC is completed 2025-05-07T09:16:47.886806Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:376: [RootDataErasureManager] Complete: Generation# 3, duration# 2 s 2025-05-07T09:16:47.894977Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:652: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# false 2025-05-07T09:16:47.895834Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [2:4051:5329], Recipient [2:292:2276]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T09:16:47.895910Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T09:16:47.895958Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046678944 2025-05-07T09:16:47.896119Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125519, Sender [2:3207:4655], Recipient [2:292:2276]: NKikimrScheme.TEvDataErasureInfoRequest 2025-05-07T09:16:47.896163Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5036: StateWork, processing event TEvSchemeShard::TEvDataErasureInfoRequest 2025-05-07T09:16:47.896240Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7712: Handle TEvDataErasureInfoRequest, at schemeshard: 72057594046678944 >> KqpAnalyze::AnalyzeTable+ColumnStore >> test_retry.py::TestRetry::test_low_rate[kikimr0] >> SystemView::ConcurrentScans [GOOD] >> SystemView::PDisksFields >> KqpExplain::UpdateSecondaryConditional+UseSink >> KqpLimits::DatashardProgramSize+useSink >> TestDataErasure::DataErasureRun3CyclesForTables [GOOD] >> KqpTypes::UnsafeTimestampCastV0 >> KqpExplain::Explain >> SystemView::TopPartitionsByCpuTables [GOOD] >> SystemView::TopPartitionsByTliFields ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_data_erasure/unittest >> TestDataErasure::DataErasureRun3CyclesForTables [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:16:41.945426Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:16:41.945497Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:16:41.945541Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:16:41.945585Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:16:41.945677Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:16:41.945706Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:16:41.945762Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:16:41.945825Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:16:41.946601Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:16:41.947013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:16:42.031526Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:16:42.031607Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:42.048851Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:16:42.049106Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:16:42.049307Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:16:42.055646Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:16:42.055951Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:16:42.056570Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:16:42.056791Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:16:42.059814Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:16:42.061117Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:16:42.061166Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:16:42.061243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:16:42.061307Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:16:42.061347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:16:42.061562Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:16:42.068018Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:16:42.181599Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:16:42.181781Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:42.182000Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:16:42.182229Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:16:42.182288Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:42.184612Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:16:42.184753Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:16:42.184982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:42.185049Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:16:42.185100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:16:42.185138Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:16:42.187431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:42.187492Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:16:42.187560Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:16:42.189421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:42.189484Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:42.189533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:16:42.189590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:16:42.192523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:16:42.194456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:16:42.194724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:16:42.195820Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:16:42.195933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:16:42.195984Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:16:42.196248Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:16:42.196306Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:16:42.196490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:16:42.196577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:16:42.198722Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:16:42.198779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:16:42.198946Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:16:42.199001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... # [OwnerId: 72057594046678944, LocalPathId: 3] in# 66 ms, next wakeup# 593.934000s, rate# 0, in queue# 0 tenants, running# 0 tenants at schemeshard 72057594046678944 2025-05-07T09:16:48.657841Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:325: [RootDataErasureManager] Data erasure in tenants is completed. Send request to BS controller 2025-05-07T09:16:48.666109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:604: TTxCompleteDataErasureTenant Complete at schemeshard: 72057594046678944, NeedSendRequestToBSC# true 2025-05-07T09:16:48.666215Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:352: [RootDataErasureManager] SendRequestToBSC: Generation# 3 2025-05-07T09:16:48.666500Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 268637738, Sender [1:297:2279], Recipient [1:289:2273]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 3 Completed: false Progress10k: 0 2025-05-07T09:16:48.666548Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5038: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-05-07T09:16:48.666582Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7757: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-05-07T09:16:48.666649Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:628: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-05-07T09:16:48.666686Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:644: TTxCompleteDataErasureBSC: Progress data shred in BSC 0% 2025-05-07T09:16:48.666746Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:652: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# true 2025-05-07T09:16:48.666801Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:346: [RootDataErasureManager] ScheduleRequestToBSC: Interval# 1.000000s 2025-05-07T09:16:49.205029Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:835:2718]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:49.205122Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:49.205197Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:289:2273]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:49.205223Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:49.205273Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:457:2410]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:49.205299Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:49.205352Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [1:289:2273], Recipient [1:289:2273]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:49.205383Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:49.205474Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [1:457:2410], Recipient [1:457:2410]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:49.205500Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:49.205553Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [1:835:2718], Recipient [1:835:2718]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:49.205577Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:49.242475Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125517, Sender [0:0:0], Recipient [1:289:2273]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-05-07T09:16:49.242560Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5039: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-05-07T09:16:49.242597Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:352: [RootDataErasureManager] SendRequestToBSC: Generation# 3 2025-05-07T09:16:49.242855Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 268637738, Sender [1:297:2279], Recipient [1:289:2273]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 3 Completed: false Progress10k: 5000 2025-05-07T09:16:49.242891Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5038: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-05-07T09:16:49.242923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7757: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-05-07T09:16:49.242995Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:628: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-05-07T09:16:49.243048Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:644: TTxCompleteDataErasureBSC: Progress data shred in BSC 50% 2025-05-07T09:16:49.243111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:652: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# true 2025-05-07T09:16:49.243164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:346: [RootDataErasureManager] ScheduleRequestToBSC: Interval# 1.000000s 2025-05-07T09:16:49.667804Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:289:2273]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:49.667877Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:49.667934Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:457:2410]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:49.667954Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:49.668004Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:835:2718]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:49.668026Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:16:49.668066Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [1:289:2273], Recipient [1:289:2273]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:49.668087Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:49.668149Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [1:457:2410], Recipient [1:457:2410]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:49.668173Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:49.668215Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [1:835:2718], Recipient [1:835:2718]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:49.668232Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:16:49.706313Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125517, Sender [0:0:0], Recipient [1:289:2273]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-05-07T09:16:49.706399Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5039: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-05-07T09:16:49.706438Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:352: [RootDataErasureManager] SendRequestToBSC: Generation# 3 2025-05-07T09:16:49.706721Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 268637738, Sender [1:297:2279], Recipient [1:289:2273]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 3 Completed: true Progress10k: 10000 2025-05-07T09:16:49.706758Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5038: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-05-07T09:16:49.706787Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7757: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-05-07T09:16:49.706850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:628: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-05-07T09:16:49.706876Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:640: TTxCompleteDataErasureBSC: Data shred in BSC is completed 2025-05-07T09:16:49.706914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:168: [RootDataErasureManager] ScheduleDataErasureWakeup: Interval# 0.933000s, Timestamp# 1970-01-01T00:00:11.112000Z 2025-05-07T09:16:49.706941Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:376: [RootDataErasureManager] Complete: Generation# 3, duration# 2 s 2025-05-07T09:16:49.715040Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:652: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# false 2025-05-07T09:16:49.715808Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:3580:4937], Recipient [1:289:2273]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T09:16:49.715870Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T09:16:49.715916Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046678944 2025-05-07T09:16:49.716076Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125519, Sender [1:273:2264], Recipient [1:289:2273]: NKikimrScheme.TEvDataErasureInfoRequest 2025-05-07T09:16:49.716110Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5036: StateWork, processing event TEvSchemeShard::TEvDataErasureInfoRequest 2025-05-07T09:16:49.716165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7712: Handle TEvDataErasureInfoRequest, at schemeshard: 72057594046678944 >> KqpStats::MultiTxStatsFullYql |94.5%| [TA] $(B)/ydb/core/tx/schemeshard/ut_data_erasure/test-results/unittest/{meta.json ... results_accumulator.log} |94.5%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_data_erasure/test-results/unittest/{meta.json ... results_accumulator.log} >> SystemView::ShowCreateTablePartitionSettings [GOOD] >> SystemView::ShowCreateTableReadReplicas >> KqpExplain::ExplainStream >> KqpLimits::ComputeActorMemoryAllocationFailure+useSink [GOOD] >> KqpLimits::ComputeActorMemoryAllocationFailure-useSink >> DbCounters::TabletsSimple [GOOD] >> LabeledDbCounters::OneTablet >> SystemView::ShowCreateTableDefaultLiteral [GOOD] >> SystemView::ShowCreateTablePartitionAtKeys >> KqpQuery::QueryTimeout [GOOD] >> KqpQuery::QueryResultsTruncated >> KqpQuery::RandomNumber [GOOD] >> KqpQuery::RandomUuid >> KqpQuery::CurrentUtcTimestamp [GOOD] >> KqpQuery::DdlInDataQuery >> KqpExplain::SortStage [GOOD] >> KqpExplain::SelfJoin3xSameLabels >> KqpExplain::LimitOffset [GOOD] >> KqpExplain::MultiUsedStage >> KqpParams::ExplicitSameParameterTypesQueryCacheCheck [GOOD] >> KqpParams::DefaultParameterValue >> KqpParams::CheckQueryCacheForPreparedQuery [GOOD] >> KqpParams::CheckQueryCacheForUnpreparedQuery >> KqpUniqueIndex::UpdateOnNullInComplexFk [GOOD] >> KqpQuery::YqlSyntaxV0 [GOOD] >> KqpQuery::YqlTableSample >> KqpStats::JoinNoStatsYql [GOOD] >> KqpStats::JoinNoStatsScan >> SystemView::PDisksFields [GOOD] >> SystemView::GroupsFields >> KqpLimits::QSReplySizeEnsureMemoryLimits-useSink [GOOD] >> KqpLimits::QueryReplySize >> KqpExplain::Explain [GOOD] >> KqpExplain::ExplainDataQuery >> KqpTypes::UnsafeTimestampCastV0 [GOOD] >> KqpTypes::UnsafeTimestampCastV1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpUniqueIndex::UpdateOnNullInComplexFk [GOOD] Test command err: Trying to start YDB, gRPC: 21243, MsgBus: 27805 2025-05-07T09:16:23.606268Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501630179142214768:2164];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:23.944243Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003bbf/r3tmp/tmp6BrgU7/pdisk_1.dat 2025-05-07T09:16:24.179288Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:24.202884Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:24.203007Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:24.205144Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21243, node 1 2025-05-07T09:16:24.361549Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:24.361573Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:24.361589Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:24.361706Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27805 TClient is connected to server localhost:27805 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:16:25.145747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:25.166977Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T09:16:25.173539Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:25.317516Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:25.518024Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:25.624299Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:27.590385Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630196322085484:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:27.590562Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:27.853761Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:16:27.897792Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:16:27.946401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:16:28.032107Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:16:28.075257Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:16:28.158431Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:16:28.214508Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:16:28.322594Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630200617053448:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:28.322680Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:28.323040Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630200617053453:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:28.327401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:16:28.344299Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501630200617053455:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:16:28.409962Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501630200617053506:3415] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:28.606560Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501630179142214768:2164];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:28.606642Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:16:29.450766Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:31.312792Z node 1 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jtn0ggxz08sntrwkxqp7g0ta, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjE4YWZiYzgtNGE2MDZiYTUtNTYwYThjYy1hOGNlZDgzMw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-05-07T09:16:31.322746Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=1&id=NjE4YWZiYzgtNGE2MDZiYTUtNTYwYThjYy1hOGNlZDgzMw==, ActorId: [1:7501630204912021846:2573], ActorState: ExecuteState, TraceId: 01jtn0ggxz08sntrwkxqp7g0ta, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 28728, MsgBus: 5553 2025-05-07T09:16:32.970980Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501630220640625953:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:32.971228Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003bbf/r3tmp/tmp1NH5BT/pdisk_1.dat 2025-05-07T09:16:33.062941Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28728, node 2 2025-05-07T09:16:33.106851Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:33.106949Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:33.108728Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:16:33.137090Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:33.137114Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:33.137123Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:33.137477Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5553 TClient is connected to serve ... n { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:16:33.608426Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:33.623219Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:33.681118Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:33.859727Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:33.945814Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:36.252726Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501630237820496796:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:36.252813Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:36.312850Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-05-07T09:16:36.355288Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-05-07T09:16:36.399424Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-05-07T09:16:36.436676Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-05-07T09:16:36.476573Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-05-07T09:16:36.517488Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-05-07T09:16:36.590290Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-05-07T09:16:36.687987Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501630237820497460:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:36.688079Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:36.688440Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7501630237820497465:2472], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:36.692216Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-05-07T09:16:36.708111Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7501630237820497467:2473], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-05-07T09:16:36.764326Z node 2 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [2:7501630237820497518:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:37.842106Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:37.981534Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7501630220640625953:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:38.023315Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:16:43.597172Z node 2 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jtn0gwsa04pcg4tm2b5tcnem, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODBlMDYyNzEtOGRjNTNmNGQtNTljMTkzZTYtNzM1YTlkZmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-05-07T09:16:43.597489Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=2&id=ODBlMDYyNzEtOGRjNTNmNGQtNTljMTkzZTYtNzM1YTlkZmM=, ActorId: [2:7501630246410433135:2571], ActorState: ExecuteState, TraceId: 01jtn0gwsa04pcg4tm2b5tcnem, Create QueryResponse for error on request, msg: 2025-05-07T09:16:48.062251Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7261: Cannot get console configs 2025-05-07T09:16:48.062289Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:48.382617Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037938 not found 2025-05-07T09:16:48.382659Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037937 not found 2025-05-07T09:16:48.395245Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037926 not found 2025-05-07T09:16:48.457244Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037940 not found 2025-05-07T09:16:48.463430Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037939 not found 2025-05-07T09:16:48.502681Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037936 not found 2025-05-07T09:16:48.503855Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037929 not found 2025-05-07T09:16:48.505902Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037931 not found 2025-05-07T09:16:48.505920Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037935 not found 2025-05-07T09:16:48.505934Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037934 not found 2025-05-07T09:16:48.505946Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037928 not found 2025-05-07T09:16:48.513503Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037930 not found 2025-05-07T09:16:48.513544Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037932 not found 2025-05-07T09:16:48.513562Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037933 not found 2025-05-07T09:16:52.311431Z node 2 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jtn0h4ta89z4jsf7333747hp, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODBlMDYyNzEtOGRjNTNmNGQtNTljMTkzZTYtNzM1YTlkZmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-05-07T09:16:52.311721Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=2&id=ODBlMDYyNzEtOGRjNTNmNGQtNTljMTkzZTYtNzM1YTlkZmM=, ActorId: [2:7501630246410433135:2571], ActorState: ExecuteState, TraceId: 01jtn0h4ta89z4jsf7333747hp, Create QueryResponse for error on request, msg: 2025-05-07T09:16:53.612820Z node 2 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jtn0h69pedrnkbdy4d6129nf, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODBlMDYyNzEtOGRjNTNmNGQtNTljMTkzZTYtNzM1YTlkZmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-05-07T09:16:53.613192Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=2&id=ODBlMDYyNzEtOGRjNTNmNGQtNTljMTkzZTYtNzM1YTlkZmM=, ActorId: [2:7501630246410433135:2571], ActorState: ExecuteState, TraceId: 01jtn0h69pedrnkbdy4d6129nf, Create QueryResponse for error on request, msg: 2025-05-07T09:16:55.140669Z node 2 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jtn0h8yk98dcqxd44frf4qxn, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODBlMDYyNzEtOGRjNTNmNGQtNTljMTkzZTYtNzM1YTlkZmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-05-07T09:16:55.140986Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=2&id=ODBlMDYyNzEtOGRjNTNmNGQtNTljMTkzZTYtNzM1YTlkZmM=, ActorId: [2:7501630246410433135:2571], ActorState: ExecuteState, TraceId: 01jtn0h8yk98dcqxd44frf4qxn, Create QueryResponse for error on request, msg: >> KqpExplain::UpdateSecondaryConditional-UseSink [GOOD] >> KqpExplain::UpdateSecondaryConditionalPrimaryKey+UseSink >> KqpQuery::QueryClientTimeout [GOOD] >> KqpQuery::QueryClientTimeoutPrecompiled >> ShowCreateView::WithSingleQuotedTablePathPrefix [GOOD] >> ShowCreateView::WithTwoTablePathPrefixes >> KqpExplain::UpdateSecondaryConditionalSecondaryKey-UseSink [GOOD] >> KqpLimits::BigParameter >> KqpLimits::DatashardProgramSize+useSink [GOOD] >> KqpLimits::DatashardProgramSize-useSink >> KqpLimits::ComputeActorMemoryAllocationFailure-useSink [GOOD] >> KqpLimits::ComputeActorMemoryAllocationFailureQueryService+useSink |94.5%| [TA] $(B)/ydb/tests/olap/ttl_tiering/test-results/py3test/{meta.json ... results_accumulator.log} >> KqpParams::ImplicitParameterTypes >> KqpExplain::UpdateSecondaryConditional+UseSink [GOOD] >> KqpStats::MultiTxStatsFullYql [GOOD] >> SystemView::AuthGroups_TableRange [GOOD] >> KqpExplain::ExplainStream [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] [GOOD] >> KqpExplain::UpdateOnSecondary-UseSink >> SystemView::AuthOwners >> KqpParams::DefaultParameterValue [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] [GOOD] >> KqpQuery::RandomUuid [GOOD] >> KqpStats::MultiTxStatsFullScan >> KqpIndexes::MultipleSecondaryIndexWithSameComulns+UseSink [GOOD] >> KqpQuery::DdlInDataQuery [GOOD] >> KqpQuery::CreateAsSelect_BadCases >> KqpQuery::QueryResultsTruncated [GOOD] >> KqpParams::CheckQueryCacheForUnpreparedQuery [GOOD] >> KqpQuery::QueryStats+UseSink >> KqpExplain::MultiUsedStage [GOOD] >> KqpExplain::ExplainScanQueryWithParams >> KqpParams::Decimal-QueryService-UseSink >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] >> KqpParams::Decimal+QueryService-UseSink >> KqpQuery::ReadOverloaded+StreamLookup >> KqpExplain::MergeConnection >> KqpTypes::UnsafeTimestampCastV1 [GOOD] >> KqpLimits::ComputeActorMemoryAllocationFailureQueryService+useSink [GOOD] >> KqpQuery::YqlTableSample [GOOD] >> KqpExplain::SelfJoin3xSameLabels [GOOD] >> KqpTypes::Time64Columns+EnableTableDatetime64 >> KqpQuery::UpdateWhereInSubquery >> KqpLimits::ComputeActorMemoryAllocationFailureQueryService-useSink >> KqpExplain::SqlIn ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::MultipleSecondaryIndexWithSameComulns+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 5450, MsgBus: 12930 2025-05-07T09:16:28.534794Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501630203914568106:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:28.534858Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003bb9/r3tmp/tmpPGkDnd/pdisk_1.dat 2025-05-07T09:16:28.928461Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:28.952797Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:28.952918Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:28.957007Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5450, node 1 2025-05-07T09:16:29.138423Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:29.138459Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:29.138477Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:29.138571Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12930 TClient is connected to server localhost:12930 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:16:29.801275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:29.852215Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:29.996881Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:30.165212Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:30.247994Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:32.203058Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630221094438950:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:32.203185Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:32.590218Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:16:32.620720Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:16:32.652527Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:16:32.682455Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:16:32.709480Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:16:32.755096Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:16:32.828504Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:16:32.910681Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630221094439614:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:32.910774Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:32.910847Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630221094439619:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:32.914358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:16:32.923689Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501630221094439621:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:16:33.029648Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501630225389406968:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:33.535363Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501630203914568106:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:33.535456Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:16:34.066873Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:7501630229684374541:3598], Recipient [1:7501630203914568540:2197]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T09:16:34.066913Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T09:16:34.066924Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T09:16:34.066963Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122432, Sender [1:7501630229684374537:3595], Recipient [1:7501630203914568540:2197]: {TEvModifySchemeTransaction txid# 281474976710672 TabletId# 72057594046644480} 2025-05-07T09:16:34.066982Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4851: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-05-07T09:16:34.190678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateIndexedTable CreateIndexedTable { TableDescription { Name: "TestTable" Columns { Name: "Key" Type: "String" NotNull: false } Columns { Name: "Value1" Type: "String" NotNull: false } Columns { Name: "Value2" Type: "String" NotNull: false } KeyColumnNames: "Key" PartitionConfig { ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } Temporary: false } IndexDescription { Name: "Index1" KeyColumnNames: "Value1" Type: EIndexTypeGlobal IndexImplTableDescriptions { PartitionConfig { } } } IndexDescription { Name: "Index2" KeyColumnNames: "Value2" Type: EIndexTypeGlobal IndexImplTableDescriptions { PartitionConfig { } } } } } TxId: 281474976710672 TabletId: 72057594046644480 PeerName: "ipv6:[::1]:33542" , at schemeshard: 72057594046644480 2025-05-07T09:16:34.191235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_indexed_table.cpp:101: TCreateTableIndex construct operation table path: /Root/TestTable domain path id: [OwnerId: 72057594046644480, LocalPathId: 1] domain path: /Root shardsToCreate: 3 GetShardsInside: 34 MaxShards: 200000 2025-05-07T09:16:34.191784Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /Root/TestTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-05-07T09:16:34.191927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:433: TCreateTable Propose, path: /Root/TestTable, opId: 281474976710672:0, schema: Name: "TestTable" Columns { Name: "Key" Type: "String" NotNull: false } Columns { Name: "Value1" Type: "String" NotNull: false } Columns { Name: "Value2" Type: "String" NotNull: false } KeyColumnNames: "Key" PartitionConfig { ColumnF ... 0 InMemSize: 816 LastAccessTime: 1746609420362 LastUpdateTime: 1746609420362 ImmediateTxCompleted: 11 PlannedTxCompleted: 12 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 7 RowDeletes: 14 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-05-07T09:17:00.501157Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:608: Will delay TTxStoreTableStats on# 0.099995s, queue# 1 2025-05-07T09:17:00.510886Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [3:7501630340187712278:4423], Recipient [3:7501630271468231525:2157]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T09:17:00.510929Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T09:17:00.510942Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T09:17:00.510977Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [3:7501630340187712279:4424], Recipient [3:7501630271468231525:2157]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T09:17:00.510988Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T09:17:00.510995Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T09:17:00.516665Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269553162, Sender [3:7501630297238037653:2519], Recipient [3:7501630271468231525:2157]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037924 TableLocalId: 19 Generation: 1 Round: 0 TableStats { DataSize: 816 RowCount: 3 IndexSize: 0 InMemSize: 816 LastAccessTime: 1746609420362 LastUpdateTime: 1746609420362 ImmediateTxCompleted: 11 PlannedTxCompleted: 12 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 8 RowDeletes: 16 RowReads: 0 RangeReads: 1 PartCount: 0 RangeReadRows: 1 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { Memory: 82488 } ShardState: 2 UserTablePartOwners: 72075186224037924 NodeId: 3 StartTime: 1746609410455 TableOwnerId: 72057594046644480 FollowerId: 0 2025-05-07T09:17:00.516707Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4877: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-05-07T09:17:00.516738Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:561: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037924 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 19] state 'Ready' dataSize 816 rowCount 3 cpuUsage 0 2025-05-07T09:17:00.516818Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:568: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037924 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 19] raw table stats: DataSize: 816 RowCount: 3 IndexSize: 0 InMemSize: 816 LastAccessTime: 1746609420362 LastUpdateTime: 1746609420362 ImmediateTxCompleted: 11 PlannedTxCompleted: 12 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 8 RowDeletes: 16 RowReads: 0 RangeReads: 1 PartCount: 0 RangeReadRows: 1 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-05-07T09:17:00.516971Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269553162, Sender [3:7501630297238037646:2517], Recipient [3:7501630271468231525:2157]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037922 TableLocalId: 17 Generation: 1 Round: 0 TableStats { DataSize: 840 RowCount: 1 IndexSize: 0 InMemSize: 840 LastAccessTime: 1746609420361 LastUpdateTime: 1746609420361 ImmediateTxCompleted: 11 PlannedTxCompleted: 12 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 8 RowDeletes: 6 RowReads: 10 RangeReads: 2 PartCount: 0 RangeReadRows: 2 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 11 LocksWholeShard: 0 LocksBroken: 11 } TabletMetrics { Memory: 82488 } ShardState: 2 UserTablePartOwners: 72075186224037922 NodeId: 3 StartTime: 1746609410454 TableOwnerId: 72057594046644480 FollowerId: 0 2025-05-07T09:17:00.516983Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4877: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-05-07T09:17:00.516998Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:561: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037922 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 17] state 'Ready' dataSize 840 rowCount 1 cpuUsage 0 2025-05-07T09:17:00.517072Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:568: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037922 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 17] raw table stats: DataSize: 840 RowCount: 1 IndexSize: 0 InMemSize: 840 LastAccessTime: 1746609420361 LastUpdateTime: 1746609420361 ImmediateTxCompleted: 11 PlannedTxCompleted: 12 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 8 RowDeletes: 6 RowReads: 10 RangeReads: 2 PartCount: 0 RangeReadRows: 2 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 11 LocksWholeShard: 0 LocksBroken: 11 2025-05-07T09:17:00.604107Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:7501630271468231525:2157]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-05-07T09:17:00.604166Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5015: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-05-07T09:17:00.604189Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:588: Started TEvPersistStats at tablet 72057594046644480, queue size# 3 2025-05-07T09:17:00.604253Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:599: Will execute TTxStoreStats, queue# 3 2025-05-07T09:17:00.604270Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:608: Will delay TTxStoreTableStats on# 0.000000s, queue# 3 2025-05-07T09:17:00.604333Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 21 shard idx 72057594046644480:37 data size 816 row count 3 2025-05-07T09:17:00.604386Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037923 maps to shardIdx: 72057594046644480:37 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 21], pathId map=indexImplTable, is column=0, is olap=0, RowCount 3, DataSize 816 2025-05-07T09:17:00.604399Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037923, followerId 0 2025-05-07T09:17:00.604467Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:37 with partCount# 0, rowCount# 3, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-05-07T09:17:00.604513Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186224037923 2025-05-07T09:17:00.604540Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 19 shard idx 72057594046644480:36 data size 816 row count 3 2025-05-07T09:17:00.604571Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037924 maps to shardIdx: 72057594046644480:36 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 19], pathId map=indexImplTable, is column=0, is olap=0, RowCount 3, DataSize 816 2025-05-07T09:17:00.604581Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037924, followerId 0 2025-05-07T09:17:00.604607Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:36 with partCount# 0, rowCount# 3, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-05-07T09:17:00.604625Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186224037924 2025-05-07T09:17:00.604642Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 17 shard idx 72057594046644480:35 data size 840 row count 1 2025-05-07T09:17:00.604671Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037922 maps to shardIdx: 72057594046644480:35 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 17], pathId map=TestTable, is column=0, is olap=0, RowCount 1, DataSize 840 2025-05-07T09:17:00.604680Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037922, followerId 0 2025-05-07T09:17:00.604710Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:35 with partCount# 0, rowCount# 1, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-05-07T09:17:00.604722Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186224037922 2025-05-07T09:17:00.604772Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T09:17:00.611343Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:7501630271468231525:2157]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-05-07T09:17:00.611381Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5015: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-05-07T09:17:00.611398Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:588: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 2025-05-07T09:17:00.623796Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7501630271468231525:2157]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:17:00.623838Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:17:00.623887Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [3:7501630271468231525:2157], Recipient [3:7501630271468231525:2157]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:17:00.623908Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime >> KqpLimits::DatashardProgramSize-useSink [GOOD] >> KqpPrefixedVectorIndexes::OrderByCosineDistanceNotNullableLevel2 [GOOD] >> KqpStats::MultiTxStatsFullScan [GOOD] >> SystemView::GroupsFields [GOOD] >> KqpStats::OneShardLocalExec+UseSink >> KqpQuery::QueryClientTimeoutPrecompiled [GOOD] >> KqpParams::ImplicitParameterTypes [GOOD] >> KqpLimits::ComputeNodeMemoryLimit >> KqpExplain::ExplainDataQuery [GOOD] >> KqpStats::JoinNoStatsScan [GOOD] >> KqpLimits::BigParameter [GOOD] >> KqpExplain::UpdateSecondaryConditionalPrimaryKey+UseSink [GOOD] >> KqpQuery::QueryExplain >> KqpExplain::ExplainScanQueryWithParams [GOOD] >> KqpExplain::FewEffects+UseSink >> KqpPrefixedVectorIndexes::OrderByCosineDistanceNotNullableLevel3 >> SystemView::Describe >> KqpLimits::QueryReplySize [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] [GOOD] >> KqpLimits::ComputeActorMemoryAllocationFailureQueryService-useSink [GOOD] >> KqpParams::ImplicitSameParameterTypesQueryCacheCheck >> KqpTypes::QuerySpecialTypes >> KqpLimits::AffectedShardsLimit >> KqpExplain::UpdateSecondaryConditionalPrimaryKey-UseSink >> KqpStats::JoinStatsBasicScan >> KqpExplain::ExplainDataQueryWithParams >> KqpLimits::QueryExecTimeoutCancel >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] >> KqpExplain::UpdateOnSecondary-UseSink [GOOD] >> KqpTypes::Time64Columns+EnableTableDatetime64 [GOOD] >> KqpExplain::UpdateOnSecondaryWithoutSecondaryKey+UseSink >> KqpTypes::Time64Columns-EnableTableDatetime64 >> KqpQuery::QueryStats+UseSink [GOOD] >> KqpQuery::QueryStats-UseSink |94.5%| [TA] {RESULT} $(B)/ydb/tests/olap/ttl_tiering/test-results/py3test/{meta.json ... results_accumulator.log} >> KqpExplain::MergeConnection [GOOD] >> KqpExplain::IdxFullscan ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpLimits::ComputeActorMemoryAllocationFailureQueryService-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 3728, MsgBus: 9600 2025-05-07T09:16:47.639175Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501630286233564820:2202];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:47.639470Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001e58/r3tmp/tmp9iOCqI/pdisk_1.dat 2025-05-07T09:16:48.344649Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:48.381549Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:48.390334Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:48.399395Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3728, node 1 2025-05-07T09:16:48.544699Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:48.544723Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:48.544734Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:48.544837Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9600 TClient is connected to server localhost:9600 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:16:49.256932Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:49.313631Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:51.614246Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630303413434860:2356], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:51.614363Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630303413434868:2359], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:51.614452Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:51.622367Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480 2025-05-07T09:16:51.637992Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501630303413434874:2360], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-05-07T09:16:51.740723Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501630303413434927:2564] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:52.635114Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501630286233564820:2202];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:52.635171Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:16:52.700939Z node 1 :KQP_COMPUTE WARN: log.cpp:784: fline=kqp_compute_actor_factory.cpp:40;problem=cannot_allocate_memory;tx_id=281474976710661;task_id=1;memory=1048576; 2025-05-07T09:16:52.700983Z node 1 :KQP_COMPUTE WARN: dq_compute_memory_quota.h:152: TxId: 281474976710661, task: 1. [Mem] memory 1048576 NOT granted 2025-05-07T09:16:52.758905Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [1:7501630307708402267:2370], TxId: 281474976710661, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jtn0h5gmc0jbzbhwz96kbd9p. SessionId : ydb://session/3?node_id=1&id=ZWI5NGFlYzAtODg4NzZlNWQtNzA3MGU0NjQtNWNjYmE1MjQ=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: OVERLOADED KIKIMR_PRECONDITION_FAILED: {
: Error: Mkql memory limit exceeded, allocated by task 1: 10, host: ghrun-sykirh5vua, canAllocateExtraMemory: 1, memory manager details for current node: TxResourcesInfo { TxId: 281474976710661, Database: /Root, PoolId: default, MemoryPoolPercent: 100.00, tx initially granted memory: 20B, tx total memory allocations: 1MiB, tx largest successful memory allocation: 1MiB, tx last failed memory allocation: 1MiB, tx total execution units: 2, started at: 2025-05-07T09:16:52.664831Z }, code: 2029 }. 2025-05-07T09:16:52.794156Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1216: SelfId: [1:7501630307708402268:2371], TxId: 281474976710661, task: 2. Ctx: { SessionId : ydb://session/3?node_id=1&id=ZWI5NGFlYzAtODg4NzZlNWQtNzA3MGU0NjQtNWNjYmE1MjQ=. CustomerSuppliedId : . TraceId : 01jtn0h5gmc0jbzbhwz96kbd9p. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [1:7501630307708402256:2353], status: OVERLOADED, reason: {
: Error: Terminate execution } 2025-05-07T09:16:52.799953Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=1&id=ZWI5NGFlYzAtODg4NzZlNWQtNzA3MGU0NjQtNWNjYmE1MjQ=, ActorId: [1:7501630303413434833:2353], ActorState: ExecuteState, TraceId: 01jtn0h5gmc0jbzbhwz96kbd9p, Create QueryResponse for error on request, msg:
: Error: Mkql memory limit exceeded, allocated by task 1: 10, host: ghrun-sykirh5vua, canAllocateExtraMemory: 1, memory manager details for current node: TxResourcesInfo { TxId: 281474976710661, Database: /Root, PoolId: default, MemoryPoolPercent: 100.00, tx initially granted memory: 20B, tx total memory allocations: 1MiB, tx largest successful memory allocation: 1MiB, tx last failed memory allocation: 1MiB, tx total execution units: 2, started at: 2025-05-07T09:16:52.664831Z } , code: 2029 Trying to start YDB, gRPC: 61411, MsgBus: 28729 2025-05-07T09:16:53.839070Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501630311570184053:2208];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001e58/r3tmp/tmpAv86O6/pdisk_1.dat 2025-05-07T09:16:53.915922Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T09:16:54.042566Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:54.044645Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:54.044724Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:54.046270Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 61411, node 2 2025-05-07T09:16:54.118602Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:54.118624Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:54.118632Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:54.118730Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28729 TClient is connected to server localhost:28729 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:16:54.635542Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:54.647326Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T09:16:54.664948Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo u ... hemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-05-07T09:17:04.631291Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:17:04.732265Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T09:17:04.757721Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:07.735777Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501630371868121581:2354], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:07.735911Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501630371868121604:2359], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:07.735970Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:07.740112Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-05-07T09:17:07.755016Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7501630371868121606:2360], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-05-07T09:17:07.861252Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:7501630371868121657:2558] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:17:08.037727Z node 4 :KQP_COMPUTE WARN: log.cpp:784: fline=kqp_compute_actor_factory.cpp:40;problem=cannot_allocate_memory;tx_id=281474976715661;task_id=2;memory=1048576; 2025-05-07T09:17:08.037769Z node 4 :KQP_COMPUTE WARN: dq_compute_memory_quota.h:152: TxId: 281474976715661, task: 2. [Mem] memory 1048576 NOT granted 2025-05-07T09:17:08.038376Z node 4 :KQP_COMPUTE WARN: log.cpp:784: fline=kqp_compute_actor_factory.cpp:40;problem=cannot_allocate_memory;tx_id=281474976715661;task_id=3;memory=1048576; 2025-05-07T09:17:08.038393Z node 4 :KQP_COMPUTE WARN: dq_compute_memory_quota.h:152: TxId: 281474976715661, task: 3. [Mem] memory 1048576 NOT granted 2025-05-07T09:17:08.038454Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [4:7501630376163089009:2369], TxId: 281474976715661, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=ODQ4NmRkNzgtY2QwMjdiMC1kYzdhOWIwMC1kM2Y3OGU5Nw==. TraceId : 01jtn0hjp742ayw30bgy6wt82a. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: OVERLOADED KIKIMR_PRECONDITION_FAILED: {
: Error: Mkql memory limit exceeded, allocated by task 2: 10, host: ghrun-sykirh5vua, canAllocateExtraMemory: 1, memory manager details for current node: TxResourcesInfo { TxId: 281474976715661, Database: /Root, PoolId: default, MemoryPoolPercent: 100.00, tx initially granted memory: 50B, tx total memory allocations: 1MiB, tx largest successful memory allocation: 1MiB, tx last failed memory allocation: 1MiB, tx total execution units: 5, started at: 2025-05-07T09:17:08.035281Z }, code: 2029 }. 2025-05-07T09:17:08.038717Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [4:7501630376163089010:2370], TxId: 281474976715661, task: 3. Ctx: { CustomerSuppliedId : . TraceId : 01jtn0hjp742ayw30bgy6wt82a. SessionId : ydb://session/3?node_id=4&id=ODQ4NmRkNzgtY2QwMjdiMC1kYzdhOWIwMC1kM2Y3OGU5Nw==. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: OVERLOADED KIKIMR_PRECONDITION_FAILED: {
: Error: Mkql memory limit exceeded, allocated by task 3: 10, host: ghrun-sykirh5vua, canAllocateExtraMemory: 1, memory manager details for current node: TxResourcesInfo { TxId: 281474976715661, Database: /Root, PoolId: default, MemoryPoolPercent: 100.00, tx initially granted memory: 50B, tx total memory allocations: 1MiB, tx largest successful memory allocation: 1MiB, tx last failed memory allocation: 1MiB, tx total execution units: 5, started at: 2025-05-07T09:17:08.035281Z }, code: 2029 }. 2025-05-07T09:17:08.038932Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1216: SelfId: [4:7501630376163089008:2368], TxId: 281474976715661, task: 1. Ctx: { SessionId : ydb://session/3?node_id=4&id=ODQ4NmRkNzgtY2QwMjdiMC1kYzdhOWIwMC1kM2Y3OGU5Nw==. TraceId : 01jtn0hjp742ayw30bgy6wt82a. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [4:7501630376163088984:2352], status: OVERLOADED, reason: {
: Error: Terminate execution } 2025-05-07T09:17:08.039906Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1216: SelfId: [4:7501630376163089012:2372], TxId: 281474976715661, task: 5. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=ODQ4NmRkNzgtY2QwMjdiMC1kYzdhOWIwMC1kM2Y3OGU5Nw==. TraceId : 01jtn0hjp742ayw30bgy6wt82a. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [4:7501630376163088984:2352], status: OVERLOADED, reason: {
: Error: Terminate execution } 2025-05-07T09:17:08.040751Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1216: SelfId: [4:7501630376163089011:2371], TxId: 281474976715661, task: 4. Ctx: { TraceId : 01jtn0hjp742ayw30bgy6wt82a. SessionId : ydb://session/3?node_id=4&id=ODQ4NmRkNzgtY2QwMjdiMC1kYzdhOWIwMC1kM2Y3OGU5Nw==. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [4:7501630376163088984:2352], status: OVERLOADED, reason: {
: Error: Terminate execution } 2025-05-07T09:17:08.042867Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=4&id=ODQ4NmRkNzgtY2QwMjdiMC1kYzdhOWIwMC1kM2Y3OGU5Nw==, ActorId: [4:7501630371868121563:2352], ActorState: ExecuteState, TraceId: 01jtn0hjp742ayw30bgy6wt82a, Create QueryResponse for error on request, msg:
: Error: Mkql memory limit exceeded, allocated by task 2: 10, host: ghrun-sykirh5vua, canAllocateExtraMemory: 1, memory manager details for current node: TxResourcesInfo { TxId: 281474976715661, Database: /Root, PoolId: default, MemoryPoolPercent: 100.00, tx initially granted memory: 50B, tx total memory allocations: 1MiB, tx largest successful memory allocation: 1MiB, tx last failed memory allocation: 1MiB, tx total execution units: 5, started at: 2025-05-07T09:17:08.035281Z } , code: 2029 query_phases { duration_us: 14805 table_access { name: "/Root/LargeTable" partitions_count: 2 } cpu_time_us: 7484 affected_shards: 8 } compilation { duration_us: 153751 cpu_time_us: 148670 } process_cpu_time_us: 395 query_plan: "{\"Plan\":{\"Plans\":[{\"PlanNodeId\":5,\"Plans\":[{\"PlanNodeId\":4,\"Plans\":[{\"PlanNodeId\":3,\"Plans\":[{\"PlanNodeId\":2,\"Plans\":[{\"Tables\":[\"LargeTable\"],\"PlanNodeId\":1,\"Operators\":[{\"Scan\":\"Parallel\",\"E-Size\":\"No estimate\",\"ReadRanges\":[\"Key (-\342\210\236, +\342\210\236)\",\"KeyText (-\342\210\236, +\342\210\236)\"],\"Name\":\"TableFullScan\",\"Inputs\":[],\"Path\":\"\\/Root\\/LargeTable\",\"ReadRangesPointPrefixLen\":\"0\",\"E-Rows\":\"No estimate\",\"Table\":\"LargeTable\",\"ReadColumns\":[\"Data\",\"DataText\",\"Key\",\"KeyText\"],\"E-Cost\":\"No estimate\"}],\"Node Type\":\"TableFullScan\"}],\"Node Type\":\"Collect\",\"Stats\":{\"UseLlvm\":\"undefined\",\"Output\":[{\"Pop\":{},\"Name\":\"4\",\"Push\":{}}],\"MaxMemoryUsage\":{\"Count\":4,\"Sum\":2097192,\"Max\":1048586,\"Min\":10},\"Tasks\":4,\"FinishedTasks\":0,\"PhysicalStageId\":0,\"StageDurationUs\":0,\"Table\":[{\"Path\":\"\\/Root\\/LargeTable\"}],\"BaseTimeMs\":1746609428037,\"CpuTimeUs\":{\"Count\":4,\"Sum\":2224,\"Max\":1438,\"Min\":33},\"Ingress\":[{\"Pop\":{},\"External\":{},\"Name\":\"KqpReadRangesSource\",\"Ingress\":{},\"Push\":{}}],\"UpdateTimeMs\":3}}],\"Node Type\":\"UnionAll\",\"PlanNodeType\":\"Connection\"}],\"Node Type\":\"Collect\",\"Stats\":{\"UseLlvm\":\"undefined\",\"Output\":[{\"Pop\":{},\"Name\":\"RESULT\",\"Push\":{}}],\"MaxMemoryUsage\":{\"Count\":1,\"Sum\":1048586,\"Max\":1048586,\"Min\":1048586},\"Tasks\":1,\"FinishedTasks\":0,\"PhysicalStageId\":1,\"StageDurationUs\":0,\"BaseTimeMs\":1746609428037,\"CpuTimeUs\":{\"Count\":1,\"Sum\":155,\"Max\":155,\"Min\":155},\"UpdateTimeMs\":2,\"Input\":[{\"Pop\":{},\"Name\":\"2\",\"Push\":{}}]}}],\"Node Type\":\"ResultSet\",\"PlanNodeType\":\"ResultSet\"}],\"Node Type\":\"Query\",\"Stats\":{\"Compilation\":{\"FromCache\":false,\"DurationUs\":153751,\"CpuTimeUs\":148670},\"ProcessCpuTimeUs\":395,\"TotalDurationUs\":314967,\"ResourcePoolId\":\"default\",\"QueuedTimeUs\":134385},\"PlanNodeType\":\"Query\"},\"meta\":{\"version\":\"0.2\",\"type\":\"query\"},\"SimplifiedPlan\":{\"PlanNodeId\":0,\"Plans\":[{\"PlanNodeId\":1,\"Plans\":[{\"PlanNodeId\":5,\"Operators\":[{\"Scan\":\"Parallel\",\"E-Size\":\"No estimate\",\"ReadRanges\":[\"Key (-\342\210\236, +\342\210\236)\",\"KeyText (-\342\210\236, +\342\210\236)\"],\"Name\":\"TableFullScan\",\"Path\":\"\\/Root\\/LargeTable\",\"ReadRangesPointPrefixLen\":\"0\",\"E-Rows\":\"No estimate\",\"Table\":\"LargeTable\",\"ReadColumns\":[\"Data\",\"DataText\",\"Key\",\"KeyText\"],\"E-Cost\":\"No estimate\"}],\"Node Type\":\"TableFullScan\"}],\"Node Type\":\"ResultSet\",\"PlanNodeType\":\"ResultSet\"}],\"Node Type\":\"Query\",\"PlanNodeType\":\"Query\"}}" query_ast: "(\n(let $1 (KqpTable \'\"/Root/LargeTable\" \'\"72057594046644480:2\" \'\"\" \'1))\n(let $2 \'(\'\"Data\" \'\"DataText\" \'\"Key\" \'\"KeyText\"))\n(let $3 (KqpRowsSourceSettings $1 $2 \'() (Void) \'()))\n(let $4 (lambda \'($10) $10))\n(let $5 (DqPhyStage \'((DqSource (DataSource \'\"KqpReadRangesSource\") $3)) $4 \'(\'(\'\"_logical_id\" \'354) \'(\'\"_id\" \'\"d36cf0b-de697f0f-6f7cbb2b-a8b1583\"))))\n(let $6 (DqCnUnionAll (TDqOutput $5 \'\"0\")))\n(let $7 (DqPhyStage \'($6) $4 \'(\'(\'\"_logical_id\" \'377) \'(\'\"_id\" \'\"434bfeab-a2c0418-6a885b59-db2ab528\"))))\n(let $8 (DqCnResult (TDqOutput $7 \'\"0\") \'()))\n(let $9 (OptionalType (DataType \'String)))\n(return (KqpPhysicalQuery \'((KqpPhysicalTx \'($5 $7) \'($8) \'() \'(\'(\'\"type\" \'\"generic\")))) \'((KqpTxResultBinding (ListType (StructType \'(\'\"Data\" (OptionalType (DataType \'Int64))) \'(\'\"DataText\" $9) \'(\'\"Key\" (OptionalType (DataType \'Uint64))) \'(\'\"KeyText\" $9))) \'\"0\" \'\"0\")) \'(\'(\'\"type\" \'\"query\"))))\n)\n" total_duration_us: 314967 total_cpu_time_us: 156549 >> KqpExplain::SqlIn [GOOD] >> KqpExplain::ReadTableRangesFullScan >> KqpQuery::UpdateWhereInSubquery [GOOD] >> KqpQuery::UpdateThenDelete-UseSink >> KqpIndexes::UpdateOnReadColumns [GOOD] >> KqpParams::Decimal-QueryService-UseSink [GOOD] >> KqpParams::Decimal-QueryService+UseSink >> SystemView::TopPartitionsByTliFields [GOOD] >> ViewQuerySplit::Basic [GOOD] >> ViewQuerySplit::WithPragmaTablePathPrefix [GOOD] >> ViewQuerySplit::WithPairedPragmaTablePathPrefix [GOOD] >> ViewQuerySplit::WithComments [GOOD] >> ViewQuerySplit::Joins [GOOD] >> KqpParams::Decimal+QueryService-UseSink [GOOD] >> KqpParams::Decimal+QueryService+UseSink >> KqpExplain::SsaProgramInJsonPlan >> KqpExplain::ExplainDataQueryWithParams [GOOD] >> KqpExplain::CreateTableAs+Stats >> KqpParams::ImplicitSameParameterTypesQueryCacheCheck [GOOD] >> KqpParams::ImplicitDifferentParameterTypesQueryCacheCheck >> KqpStats::OneShardLocalExec+UseSink [GOOD] >> KqpStats::OneShardLocalExec-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::UpdateOnReadColumns [GOOD] Test command err: Trying to start YDB, gRPC: 25471, MsgBus: 12427 2025-05-07T09:16:00.213018Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501630084005437032:2205];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:00.226748Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003bdd/r3tmp/tmpkmfnMA/pdisk_1.dat 2025-05-07T09:16:00.892256Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:00.895168Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:00.895279Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:00.922447Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25471, node 1 2025-05-07T09:16:01.122760Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:01.122791Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:01.122808Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:01.122947Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12427 TClient is connected to server localhost:12427 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:16:02.089299Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:02.107624Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T09:16:02.123846Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:02.330429Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:02.547610Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:02.673849Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:04.764156Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630101185307713:2407], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:04.764344Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:05.194445Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501630084005437032:2205];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:05.194514Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:16:05.265055Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:16:05.304271Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:16:05.373463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:16:05.425783Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:16:05.472233Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:16:05.534599Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:16:05.630005Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:16:05.721923Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630105480275680:2472], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:05.722057Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:05.722328Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630105480275685:2475], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:05.727450Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:16:05.744050Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501630105480275687:2476], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:16:05.844541Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501630105480275738:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:07.218061Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:7501630114070210586:3608], Recipient [1:7501630084005437299:2192]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T09:16:07.218104Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T09:16:07.218121Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T09:16:07.218156Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122432, Sender [1:7501630114070210582:3605], Recipient [1:7501630084005437299:2192]: {TEvModifySchemeTransaction txid# 281474976710672 TabletId# 72057594046644480} 2025-05-07T09:16:07.218170Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4851: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-05-07T09:16:07.339747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateIndexedTable CreateIndexedTable { TableDescription { Name: "TestTable" Columns { Name: "Key" Type: "Int32" NotNull: false } Columns { Name: "Subkey" Type: "Utf8" NotNull: false } Columns { Name: "Value" Type: "Utf8" NotNull: false } KeyColumnNames: "Key" PartitionConfig { ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } Temporary: false } IndexDescription { Name: "SecondaryIndex" KeyColumnNames: "Subkey" Type: EIndexTypeGlobal State: EIndexStateReady } } } TxId: 281474976710672 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-05-07T09:16:07.340273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_indexed_table.cpp:101: TCreateTableIndex construct operation table path: /Root/TestTable domain path id: [OwnerId: 72057594046644480, LocalPathId: 1] domain path: /Root shardsToCreate: 2 GetShardsInside: 34 MaxShards: 200000 2025-05-07T09:16:07.340782Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /Root/TestTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-05-07T09:16:07.340946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:433: TCreateTable Propose, path: /Root/TestTable, opId: 281474976710672:0, schema: Name: "TestTable" Columns { Name: "Key" Type: "Int32" NotNull: false } Columns { Name: "Subkey" Type: "Utf8" NotNull: false } Columns { Name: "Value" Type: "Utf8" NotNull: false } KeyColumnNames: "Key" PartitionConfig ... P_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7501630305559553882:2474], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:52.844063Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:16:52.863156Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7501630305559553884:2475], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:16:52.965852Z node 6 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [6:7501630305559553935:3428] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:54.492002Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-05-07T09:16:54.553057Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480 2025-05-07T09:16:54.629401Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 23546, MsgBus: 29331 2025-05-07T09:16:59.550292Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7501630335919361256:2058];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:59.554327Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003bdd/r3tmp/tmpoF0WyZ/pdisk_1.dat 2025-05-07T09:16:59.833049Z node 7 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:59.870882Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:59.871017Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:59.874542Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23546, node 7 2025-05-07T09:16:59.966846Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:59.966873Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:59.966886Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:59.967057Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29331 TClient is connected to server localhost:29331 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:17:00.843501Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:00.854645Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T09:17:00.876783Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:01.001609Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:01.263391Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:01.380229Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:04.550140Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7501630335919361256:2058];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:17:04.550229Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:17:04.762199Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7501630357394199397:2408], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:04.762341Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:04.839691Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:17:04.932176Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:17:04.999393Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:17:05.056683Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:17:05.129744Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:17:05.185361Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:17:05.266202Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:17:05.384265Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7501630361689167366:2471], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:05.384472Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:05.385473Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7501630361689167371:2474], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:05.392520Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:17:05.408052Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7501630361689167373:2475], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:17:05.477701Z node 7 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [7:7501630361689167424:3429] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:17:07.320985Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-05-07T09:17:07.417314Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480 2025-05-07T09:17:07.519709Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480 >> KqpTypes::QuerySpecialTypes [GOOD] >> KqpTypes::DyNumberCompare ------- [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/ut/unittest >> ViewQuerySplit::Joins [GOOD] Test command err: 2025-05-07T09:16:26.098873Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501630194752546442:2269];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:26.098937Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004586/r3tmp/tmpKDRO3A/pdisk_1.dat 2025-05-07T09:16:26.631251Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:26.647842Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:26.647931Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:26.652916Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6114, node 1 2025-05-07T09:16:26.793664Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:26.793692Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:26.793701Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:26.793822Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12781 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:16:27.163852Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:27.179226Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T09:16:29.643372Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630207637448760:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:29.643609Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:29.644582Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630207637448787:2331], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:29.649568Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480 2025-05-07T09:16:29.667576Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501630207637448789:2332], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-05-07T09:16:29.754496Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501630207637448840:2331] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:30.235874Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710661. Ctx: { TraceId: 01jtn0gdqzdjq2m6dse6ff8csk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OThlZTQyYWYtZDU5ZTIyNDAtNWRiMDc3YzEtNWU4ODVlNTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:16:30.268549Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:45: Scan started, actor: [1:7501630211932416174:2341], owner: [1:7501630211932416170:2339], scan id: 0, table id: [72057594046644480:1:0:ds_vslots] 2025-05-07T09:16:30.270585Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:321: Scan prepared, actor: [1:7501630211932416174:2341], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-05-07T09:16:30.290920Z node 1 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [1:7501630211932416174:2341], row count: 1, finished: 1 2025-05-07T09:16:30.290981Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:120: Scan finished, actor: [1:7501630211932416174:2341], owner: [1:7501630211932416170:2339], scan id: 0, table id: [72057594046644480:1:0:ds_vslots] 2025-05-07T09:16:30.307642Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746609390229, txId: 281474976710660] shutting down 2025-05-07T09:16:31.099113Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501630194752546442:2269];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:31.099212Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:16:31.511308Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710663. Ctx: { TraceId: 01jtn0ghqc8dz763wjdsgncbcn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTM5MDUxZTYtMzZkYWEzM2EtYmVjNTYwMWQtZGU4YjY0ZWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:16:31.514055Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:45: Scan started, actor: [1:7501630216227383514:2355], owner: [1:7501630216227383511:2353], scan id: 0, table id: [72057594046644480:1:0:ds_vslots] 2025-05-07T09:16:31.514868Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:321: Scan prepared, actor: [1:7501630216227383514:2355], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-05-07T09:16:31.515108Z node 1 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [1:7501630216227383514:2355], row count: 1, finished: 1 2025-05-07T09:16:31.515147Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:120: Scan finished, actor: [1:7501630216227383514:2355], owner: [1:7501630216227383511:2353], scan id: 0, table id: [72057594046644480:1:0:ds_vslots] 2025-05-07T09:16:31.517466Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746609391510, txId: 281474976710662] shutting down 2025-05-07T09:16:32.652418Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710665. Ctx: { TraceId: 01jtn0gjwq3gw9dstyk0w469t6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDM4MzBhYTktZjdiNjM3ODUtNzA3YzcxY2YtMTdhNzA0ZTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:16:32.654043Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:45: Scan started, actor: [1:7501630220522350856:2370], owner: [1:7501630220522350852:2368], scan id: 0, table id: [72057594046644480:1:0:ds_vslots] 2025-05-07T09:16:32.654410Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:321: Scan prepared, actor: [1:7501630220522350856:2370], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-05-07T09:16:32.654629Z node 1 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [1:7501630220522350856:2370], row count: 2, finished: 1 2025-05-07T09:16:32.654661Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:120: Scan finished, actor: [1:7501630220522350856:2370], owner: [1:7501630220522350852:2368], scan id: 0, table id: [72057594046644480:1:0:ds_vslots] 2025-05-07T09:16:32.656741Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746609392651, txId: 281474976710664] shutting down 2025-05-07T09:16:33.737207Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501630224249538415:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:33.737264Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/004586/r3tmp/tmp3oNnLR/pdisk_1.dat 2025-05-07T09:16:33.858804Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:33.892646Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:33.892705Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:33.895630Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2959, node 2 2025-05-07T09:16:33.990552Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:33.990573Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:33.990581Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:33.990699Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1109 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescr ... -05-07T10:00:00.000000Z, query count# 0, persisted# 0 2025-05-07T09:17:08.529018Z node 11 :SYSTEM_VIEWS DEBUG: tx_aggregate.cpp:110: [72075186224037893] TTxAggregate::Complete 2025-05-07T09:17:08.546255Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710681. Ctx: { TraceId: 01jtn0hn1f0z3fyqvtbbf3hvfe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MTZjNGY0NTEtYmMzODUyZTYtNTdlY2VlNzYtZmU5MWEyMQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:17:08.550644Z node 7 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746609428011, txId: 281474976710679] shutting down 2025-05-07T09:17:08.566098Z node 9 :SYSTEM_VIEWS DEBUG: tx_aggregate.cpp:14: [72075186224037899] TTxAggregate::Execute 2025-05-07T09:17:08.566164Z node 9 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:136: [72075186224037899] PersistQueryResults: interval end# 2025-05-07T09:17:08.000000Z, query count# 0 2025-05-07T09:17:08.566189Z node 9 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037899] PersistQueryTopResults: table id# 8, interval end# 2025-05-07T09:17:08.000000Z, query count# 0, persisted# 0 2025-05-07T09:17:08.566210Z node 9 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037899] PersistQueryTopResults: table id# 10, interval end# 2025-05-07T09:17:08.000000Z, query count# 0, persisted# 0 2025-05-07T09:17:08.566231Z node 9 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037899] PersistQueryTopResults: table id# 12, interval end# 2025-05-07T09:17:08.000000Z, query count# 0, persisted# 0 2025-05-07T09:17:08.566251Z node 9 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037899] PersistQueryTopResults: table id# 14, interval end# 2025-05-07T09:17:08.000000Z, query count# 0, persisted# 0 2025-05-07T09:17:08.566274Z node 9 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037899] PersistQueryTopResults: table id# 9, interval end# 2025-05-07T10:00:00.000000Z, query count# 0, persisted# 0 2025-05-07T09:17:08.566292Z node 9 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037899] PersistQueryTopResults: table id# 11, interval end# 2025-05-07T10:00:00.000000Z, query count# 0, persisted# 0 2025-05-07T09:17:08.566313Z node 9 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037899] PersistQueryTopResults: table id# 13, interval end# 2025-05-07T10:00:00.000000Z, query count# 0, persisted# 0 2025-05-07T09:17:08.566332Z node 9 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037899] PersistQueryTopResults: table id# 15, interval end# 2025-05-07T10:00:00.000000Z, query count# 0, persisted# 0 2025-05-07T09:17:08.598282Z node 9 :SYSTEM_VIEWS DEBUG: tx_aggregate.cpp:110: [72075186224037899] TTxAggregate::Complete 2025-05-07T09:17:08.947472Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710683. Ctx: { TraceId: 01jtn0hp2z8d6fv1wjdr31ev53, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MzkxMDJjYjgtN2ZkMjVmY2EtZGFjODBjMmYtMmUwOTdiOWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-05-07T09:17:08.952168Z node 7 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:45: Scan started, actor: [7:7501630373914559113:2465], owner: [7:7501630373914559110:2463], scan id: 0, table id: [72075186224037888:1:0:top_partitions_by_tli_one_minute] 2025-05-07T09:17:08.958882Z node 7 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:321: Scan prepared, actor: [7:7501630373914559113:2465], schemeshard id: 72075186224037888, hive id: 72057594037968897, database: /Root/Tenant1, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 2], database node count: 2 2025-05-07T09:17:08.959437Z node 11 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:641: [72075186224037893] Reply batch: range# From { IntervalEndUs: 1746609428000000 Rank: 0 } InclusiveFrom: true To { IntervalEndUs: 1746609428000000 Rank: 4294967295 } InclusiveTo: true Type: TOP_PARTITIONS_BY_TLI_ONE_MINUTE , rows# 1, bytes# 63, next# 2025-05-07T09:17:08.959858Z node 7 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [7:7501630373914559113:2465], row count: 1, finished: 1 2025-05-07T09:17:08.959892Z node 7 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:120: Scan finished, actor: [7:7501630373914559113:2465], owner: [7:7501630373914559110:2463], scan id: 0, table id: [72075186224037888:1:0:top_partitions_by_tli_one_minute] 2025-05-07T09:17:08.979972Z node 7 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746609428946, txId: 281474976710682] shutting down 2025-05-07T09:17:08.995453Z node 10 :SYSTEM_VIEWS WARN: sysview_service.cpp:811: Summary delivery problem: service id# [10:7501630305927965931:2130], processor id# 72075186224037893, database# /Root/Tenant1 2025-05-07T09:17:08.999644Z node 8 :SYSTEM_VIEWS WARN: sysview_service.cpp:811: Summary delivery problem: service id# [8:7501630309230694462:2078], processor id# 72075186224037899, database# /Root/Tenant2 2025-05-07T09:17:08.999838Z node 7 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 8 2025-05-07T09:17:09.006122Z node 11 :SYSTEM_VIEWS WARN: sysview_service.cpp:811: Summary delivery problem: service id# [11:7501630303614865709:2088], processor id# 72075186224037893, database# /Root/Tenant1 2025-05-07T09:17:09.005123Z node 9 :SYSTEM_VIEWS WARN: sysview_service.cpp:811: Summary delivery problem: service id# [9:7501630310102965808:2206], processor id# 72075186224037899, database# /Root/Tenant2 2025-05-07T09:17:09.000243Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-05-07T09:17:09.008775Z node 9 :SYSTEM_VIEWS INFO: sysview_service.cpp:880: Navigate by database succeeded: service id# [9:7501630310102965808:2206], database# /Root/Tenant2, processor id# 72075186224037899 2025-05-07T09:17:09.000394Z node 7 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 11 2025-05-07T09:17:09.000997Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-05-07T09:17:09.002861Z node 7 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 10 2025-05-07T09:17:08.995728Z node 10 :SYSTEM_VIEWS INFO: sysview_service.cpp:880: Navigate by database succeeded: service id# [10:7501630305927965931:2130], database# /Root/Tenant1, processor id# 72075186224037893 2025-05-07T09:17:09.018471Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-05-07T09:17:09.019063Z node 7 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 9 2025-05-07T09:17:09.019878Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-05-07T09:17:09.007448Z node 11 :SYSTEM_VIEWS INFO: sysview_service.cpp:880: Navigate by database succeeded: service id# [11:7501630303614865709:2088], database# /Root/Tenant1, processor id# 72075186224037893 2025-05-07T09:17:09.024631Z node 8 :SYSTEM_VIEWS INFO: sysview_service.cpp:880: Navigate by database succeeded: service id# [8:7501630309230694462:2078], database# /Root/Tenant2, processor id# 72075186224037899 2025-05-07T09:17:09.028040Z node 7 :HIVE WARN: hive_impl.cpp:934: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[9:7501630310102965796:2101], Type=268959746 2025-05-07T09:17:09.028095Z node 7 :HIVE WARN: hive_impl.cpp:934: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[10:7501630305927966099:2106], Type=268959746 2025-05-07T09:17:09.028129Z node 7 :HIVE WARN: hive_impl.cpp:934: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[10:7501630305927966099:2106], Type=268959746 2025-05-07T09:17:09.028154Z node 7 :HIVE WARN: hive_impl.cpp:934: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[10:7501630305927966099:2106], Type=268959746 2025-05-07T09:17:09.028186Z node 7 :HIVE WARN: hive_impl.cpp:934: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[10:7501630305927966099:2106], Type=268959746 2025-05-07T09:17:09.028212Z node 7 :HIVE WARN: hive_impl.cpp:934: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[10:7501630305927966099:2106], Type=268959746 2025-05-07T09:17:09.028237Z node 7 :HIVE WARN: hive_impl.cpp:934: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[10:7501630305927966099:2106], Type=268959746 2025-05-07T09:17:10.018155Z node 10 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:669: Handle TEvPrivate::TEvProcessLabeledCounters: service id# [10:7501630305927965931:2130] 2025-05-07T09:17:10.210093Z node 11 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:669: Handle TEvPrivate::TEvProcessLabeledCounters: service id# [11:7501630299319898318:2063] 2025-05-07T09:17:10.310102Z node 11 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:658: Handle TEvPrivate::TEvProcessCounters: service id# [11:7501630299319898318:2063] 2025-05-07T09:17:10.378165Z node 11 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:669: Handle TEvPrivate::TEvProcessLabeledCounters: service id# [11:7501630303614865709:2088] 2025-05-07T09:17:10.398321Z node 10 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:669: Handle TEvPrivate::TEvProcessLabeledCounters: service id# [10:7501630301632998497:2063] 2025-05-07T09:17:10.398369Z node 10 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:658: Handle TEvPrivate::TEvProcessCounters: service id# [10:7501630301632998497:2063] 2025-05-07T09:17:10.538055Z node 8 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:669: Handle TEvPrivate::TEvProcessLabeledCounters: service id# [8:7501630300640759782:2063] 2025-05-07T09:17:10.542108Z node 8 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:669: Handle TEvPrivate::TEvProcessLabeledCounters: service id# [8:7501630309230694462:2078] 2025-05-07T09:17:10.670076Z node 8 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:658: Handle TEvPrivate::TEvProcessCounters: service id# [8:7501630309230694462:2078] 2025-05-07T09:17:10.671044Z node 8 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:522: Send counters: service id# [8:7501630309230694462:2078], processor id# 72075186224037899, database# /Root/Tenant2, generation# 4298807447542057630, node id# 8, is retrying# 0, is labeled# 0 2025-05-07T09:17:10.711927Z node 8 :SYSTEM_VIEWS WARN: sysview_service.cpp:811: Summary delivery problem: service id# [8:7501630309230694462:2078], processor id# 72075186224037899, database# /Root/Tenant2 2025-05-07T09:17:10.713192Z node 8 :SYSTEM_VIEWS INFO: sysview_service.cpp:880: Navigate by database succeeded: service id# [8:7501630309230694462:2078], database# /Root/Tenant2, processor id# 72075186224037899 2025-05-07T09:17:10.746274Z node 8 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:658: Handle TEvPrivate::TEvProcessCounters: service id# [8:7501630300640759782:2063] 2025-05-07T09:17:10.863120Z node 10 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:658: Handle TEvPrivate::TEvProcessCounters: service id# [10:7501630305927965931:2130] 2025-05-07T09:17:10.863948Z node 10 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:522: Send counters: service id# [10:7501630305927965931:2130], processor id# 72075186224037893, database# /Root/Tenant1, generation# 5323100854815561535, node id# 10, is retrying# 0, is labeled# 0 2025-05-07T09:17:10.901417Z node 10 :SYSTEM_VIEWS WARN: sysview_service.cpp:811: Summary delivery problem: service id# [10:7501630305927965931:2130], processor id# 72075186224037893, database# /Root/Tenant1 2025-05-07T09:17:10.901679Z node 10 :SYSTEM_VIEWS INFO: sysview_service.cpp:880: Navigate by database succeeded: service id# [10:7501630305927965931:2130], database# /Root/Tenant1, processor id# 72075186224037893 >> SystemView::Describe [GOOD] >> SystemView::DescribeSystemFolder >> KqpExplain::FewEffects+UseSink [GOOD] >> KqpExplain::FewEffects-UseSink >> SystemView::AuthOwners [GOOD] >> SystemView::AuthOwners_Access >> KqpQuery::QueryExplain [GOOD] >> KqpQuery::QueryFromSqs >> KqpTypes::Time64Columns-EnableTableDatetime64 [GOOD] >> KqpVectorIndexes::OrderByCosineDistanceNotNullableLevel2 [GOOD] >> KqpLimits::StreamWrite+Allowed >> KqpLimits::AffectedShardsLimit [GOOD] >> KqpLimits::CancelAfterRoTx ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpTypes::Time64Columns-EnableTableDatetime64 [GOOD] Test command err: Trying to start YDB, gRPC: 64115, MsgBus: 23781 2025-05-07T09:16:50.459957Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501630297757705881:2271];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:50.460017Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001e4c/r3tmp/tmpZJXpz1/pdisk_1.dat 2025-05-07T09:16:51.058695Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:51.061640Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:51.061747Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:51.066593Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 64115, node 1 2025-05-07T09:16:51.274703Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:51.274731Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:51.274744Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:51.274884Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23781 TClient is connected to server localhost:23781 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:16:51.970383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:51.993663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:52.172937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:52.369653Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:52.468493Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:54.471423Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630314937576494:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:54.471515Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:54.741204Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:16:54.818335Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:16:54.867668Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:16:54.928388Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:16:55.024115Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:16:55.078965Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:16:55.120063Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:16:55.213421Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630319232544456:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:55.213501Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:55.213873Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630319232544461:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:55.219209Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:16:55.236428Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501630319232544463:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:16:55.304893Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501630319232544514:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:55.460217Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501630297757705881:2271];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:55.460305Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:16:56.538847Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480
: Warning: Optimization, code: 1070
:3:29: Warning: Unsafe conversion integral value to Timestamp, consider using date types, code: 1102 Trying to start YDB, gRPC: 14563, MsgBus: 25253 2025-05-07T09:16:57.881925Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501630327376615294:2061];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:57.881998Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001e4c/r3tmp/tmpZedBu4/pdisk_1.dat 2025-05-07T09:16:58.037759Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:58.038440Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:58.038527Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:58.055694Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14563, node 2 2025-05-07T09:16:58.150918Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:58.150940Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:58.150947Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:58.151049Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25253 TClient is connected to server localhost:25253 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" Pat ... Actor# [2:7501630348851454154:3414] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:17:02.882465Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7501630327376615294:2061];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:17:02.882562Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:17:03.139494Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-05-07T09:17:03.238576Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7501630353146421799:2524], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:6:25: Error: At function: AsList
:6:46: Error: At function: AsStruct
:3:29: Error: At function: Just, At function: UnsafeTimestampCast
:3:29: Error: Unsafe timestamp cast restricted from SQL v1. 2025-05-07T09:17:03.242910Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=2&id=OGEyZTA4YjAtZTY4YWRiMWUtNGRkNDU0MGItZjBlODQyODk=, ActorId: [2:7501630353146421720:2513], ActorState: ExecuteState, TraceId: 01jtn0hgv0bqv0sg99phxpnajb, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id:
: Error: Type annotation, code: 1030
:6:25: Error: At function: AsList
:6:46: Error: At function: AsStruct
:3:29: Error: At function: Just, At function: UnsafeTimestampCast
:3:29: Error: Unsafe timestamp cast restricted from SQL v1. Trying to start YDB, gRPC: 10837, MsgBus: 6917 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001e4c/r3tmp/tmpUu8eQd/pdisk_1.dat 2025-05-07T09:17:04.480179Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T09:17:04.505416Z node 3 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:17:04.517807Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:17:04.517890Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:17:04.519042Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10837, node 3 2025-05-07T09:17:04.602022Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:17:04.602046Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:17:04.602054Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:17:04.602175Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6917 TClient is connected to server localhost:6917 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:17:05.050628Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:05.058079Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T09:17:08.123118Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7501630373388295444:2331], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:08.123231Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:08.150058Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T09:17:08.270160Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7501630373388295547:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:08.270290Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:08.274654Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7501630373388295552:2343], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:08.280653Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480 2025-05-07T09:17:08.310266Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7501630373388295554:2344], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-05-07T09:17:08.387801Z node 3 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [3:7501630373388295605:2393] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 65255, MsgBus: 20060 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001e4c/r3tmp/tmpeydq4Z/pdisk_1.dat 2025-05-07T09:17:09.751714Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T09:17:09.828743Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 65255, node 4 2025-05-07T09:17:09.851363Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:17:09.851463Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:17:09.854943Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:17:09.901259Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:17:09.901286Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:17:09.901294Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:17:09.901403Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20060 TClient is connected to server localhost:20060 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-05-07T09:17:10.402551Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-05-07T09:17:13.584997Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501630395793202190:2331], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:13.585156Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:13.601847Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:7501630395793202211:2303] txid# 281474976715658, issues: { message: "Type \'Datetime64\' specified for column \'DatetimePK\', but support for new date/time 64 types is disabled (EnableTableDatetime64 feature flag is off)" severity: 1 } >> KqpStats::DeferredEffects+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpVectorIndexes::OrderByCosineDistanceNotNullableLevel2 [GOOD] Test command err: Trying to start YDB, gRPC: 30726, MsgBus: 20773 2025-05-07T09:15:53.255062Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501630052008004868:2070];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:15:53.256062Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003bf2/r3tmp/tmp1HB1So/pdisk_1.dat 2025-05-07T09:15:53.769870Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:15:53.776199Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:15:53.776313Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:15:53.778135Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 30726, node 1 2025-05-07T09:15:53.894613Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:15:53.894643Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:15:53.894663Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:15:53.894796Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20773 TClient is connected to server localhost:20773 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:15:54.753613Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T09:15:54.787085Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-05-07T09:15:54.901054Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:15:55.101216Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T09:15:55.227083Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 2025-05-07T09:15:57.119113Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630069187875686:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:15:57.119268Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:15:57.582496Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:15:57.628810Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:15:57.662507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:15:57.740162Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:15:57.778550Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:15:57.824663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:15:57.861250Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:15:57.963023Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630069187876355:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:15:57.963150Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:15:57.963541Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630069187876360:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:15:57.968221Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:15:57.989473Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501630069187876362:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:15:58.063559Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501630073482843709:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:15:58.255585Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501630052008004868:2070];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:15:58.255691Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:15:59.308528Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:7501630077777811286:3600], Recipient [1:7501630052008005267:2186]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T09:15:59.308578Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T09:15:59.308598Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T09:15:59.308644Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122432, Sender [1:7501630077777811282:3597], Recipient [1:7501630052008005267:2186]: {TEvModifySchemeTransaction txid# 281474976710672 TabletId# 72057594046644480} 2025-05-07T09:15:59.308659Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4851: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-05-07T09:15:59.384337Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "TestTable" Columns { Name: "pk" Type: "Int64" NotNull: true } Columns { Name: "emb" Type: "String" NotNull: true } Columns { Name: "data" Type: "String" NotNull: true } KeyColumnNames: "pk" PartitionConfig { PartitioningPolicy { MinPartitionsCount: 3 } ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } SplitBoundary { KeyPrefix { Tuple { Optional { Int64: 4 } } } } SplitBoundary { KeyPrefix { Tuple { Optional { Int64: 6 } } } } Temporary: false } CreateIndexedTable { } } TxId: 281474976710672 TabletId: 72057594046644480 PeerName: "ipv6:[::1]:32828" , at schemeshard: 72057594046644480 2025-05-07T09:15:59.384739Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /Root/TestTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-05-07T09:15:59.384909Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:433: TCreateTable Propose, path: /Root/TestTable, opId: 281474976710672:0, schema: Name: "TestTable" Columns { Name: "pk" Type: "Int64" NotNull: true } Columns { Name: "emb" Type: "String" NotNull: true } Columns { Name: "data" Type: "String" NotNull: true } KeyColumnNames: "pk" PartitionConfig { PartitioningPolicy { MinPartitionsCount: 3 } ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } SplitBoundary { KeyPrefix { Tuple { Optional { Int64: 4 } } } } SplitBoundary { KeyPrefix { Tuple { Optional { Int64: 6 } } } } Temporary: false, at schemeshard: 72057594046644480 2025-05-07T09:15:59.385290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl ... : false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 147 Memory: 137784 } ShardState: 2 UserTablePartOwners: 72075186224037921 NodeId: 2 StartTime: 1746609393423 TableOwnerId: 72057594046644480 FollowerId: 0 2025-05-07T09:17:13.526391Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4877: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-05-07T09:17:13.526429Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:561: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037921 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 12] state 'Ready' dataSize 1240 rowCount 7 cpuUsage 0.0147 2025-05-07T09:17:13.526521Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:568: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037921 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 12] raw table stats: DataSize: 1240 RowCount: 7 IndexSize: 0 InMemSize: 1240 LastAccessTime: 1746609394741 LastUpdateTime: 1746609394741 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 7 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-05-07T09:17:13.526545Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:608: Will delay TTxStoreTableStats on# 0.099995s, queue# 1 2025-05-07T09:17:13.526730Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269553162, Sender [2:7501630222988278116:2460], Recipient [2:7501630205808407050:2141]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037920 TableLocalId: 12 Generation: 1 Round: 3 TableStats { DataSize: 928 RowCount: 4 IndexSize: 0 InMemSize: 928 LastAccessTime: 1746609394744 LastUpdateTime: 1746609394744 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 4 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 128 Memory: 137784 } ShardState: 2 UserTablePartOwners: 72075186224037920 NodeId: 2 StartTime: 1746609393423 TableOwnerId: 72057594046644480 FollowerId: 0 2025-05-07T09:17:13.526744Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4877: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-05-07T09:17:13.526760Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:561: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037920 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 12] state 'Ready' dataSize 928 rowCount 4 cpuUsage 0.0128 2025-05-07T09:17:13.526845Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:568: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037920 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 12] raw table stats: DataSize: 928 RowCount: 4 IndexSize: 0 InMemSize: 928 LastAccessTime: 1746609394744 LastUpdateTime: 1746609394744 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 4 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-05-07T09:17:13.527004Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269553162, Sender [2:7501630222988278115:2459], Recipient [2:7501630205808407050:2141]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037919 TableLocalId: 12 Generation: 1 Round: 3 TableStats { DataSize: 1032 RowCount: 5 IndexSize: 0 InMemSize: 1032 LastAccessTime: 1746609394740 LastUpdateTime: 1746609394740 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 5 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 152 Memory: 137784 } ShardState: 2 UserTablePartOwners: 72075186224037919 NodeId: 2 StartTime: 1746609393422 TableOwnerId: 72057594046644480 FollowerId: 0 2025-05-07T09:17:13.527016Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4877: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-05-07T09:17:13.527031Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:561: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037919 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 12] state 'Ready' dataSize 1032 rowCount 5 cpuUsage 0.0152 2025-05-07T09:17:13.527115Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:568: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037919 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 12] raw table stats: DataSize: 1032 RowCount: 5 IndexSize: 0 InMemSize: 1032 LastAccessTime: 1746609394740 LastUpdateTime: 1746609394740 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 5 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-05-07T09:17:13.630146Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [2:7501630205808407050:2141]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-05-07T09:17:13.630195Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5015: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-05-07T09:17:13.630224Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:588: Started TEvPersistStats at tablet 72057594046644480, queue size# 3 2025-05-07T09:17:13.630276Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:599: Will execute TTxStoreStats, queue# 3 2025-05-07T09:17:13.630293Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:608: Will delay TTxStoreTableStats on# 0.000000s, queue# 3 2025-05-07T09:17:13.630357Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 12 shard idx 72057594046644480:34 data size 1240 row count 7 2025-05-07T09:17:13.630415Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037921 maps to shardIdx: 72057594046644480:34 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 12], pathId map=TuplePrimaryDescending, is column=0, is olap=0, RowCount 7, DataSize 1240 2025-05-07T09:17:13.630430Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037921, followerId 0 2025-05-07T09:17:13.630490Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:34 with partCount# 0, rowCount# 7, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-05-07T09:17:13.630542Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186224037921 2025-05-07T09:17:13.630571Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 12 shard idx 72057594046644480:33 data size 928 row count 4 2025-05-07T09:17:13.630602Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037920 maps to shardIdx: 72057594046644480:33 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 12], pathId map=TuplePrimaryDescending, is column=0, is olap=0, RowCount 4, DataSize 928 2025-05-07T09:17:13.630613Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037920, followerId 0 2025-05-07T09:17:13.630643Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:33 with partCount# 0, rowCount# 4, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-05-07T09:17:13.630655Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186224037920 2025-05-07T09:17:13.630671Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 12 shard idx 72057594046644480:32 data size 1032 row count 5 2025-05-07T09:17:13.630700Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037919 maps to shardIdx: 72057594046644480:32 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 12], pathId map=TuplePrimaryDescending, is column=0, is olap=0, RowCount 5, DataSize 1032 2025-05-07T09:17:13.630708Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037919, followerId 0 2025-05-07T09:17:13.630731Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:32 with partCount# 0, rowCount# 5, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-05-07T09:17:13.630742Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186224037919 2025-05-07T09:17:13.630796Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T09:17:13.630905Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [2:7501630205808407050:2141]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-05-07T09:17:13.630921Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5015: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-05-07T09:17:13.630942Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:588: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 2025-05-07T09:17:13.681865Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:7501630205808407050:2141]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:17:13.681910Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:17:13.681959Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [2:7501630205808407050:2141], Recipient [2:7501630205808407050:2141]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:17:13.681996Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime >> KqpQuery::CreateAsSelect_BadCases [GOOD] >> KqpQuery::CreateAsSelectTypes-NotNull-IsOlap >> KqpQuery::ReadOverloaded+StreamLookup [GOOD] >> KqpQuery::ReadOverloaded-StreamLookup >> KqpStats::JoinStatsBasicScan [GOOD] >> KqpStats::DeferredEffects-UseSink >> KqpQuery::QueryStats-UseSink [GOOD] >> KqpExplain::UpdateSecondaryConditionalPrimaryKey-UseSink [GOOD] >> KqpExplain::UpdateSecondaryConditionalSecondaryKey+UseSink >> KqpQuery::OlapCreateAsSelect_Simple >> KqpMultishardIndex::DataColumnWrite-UseSink [FAIL] >> KqpExplain::ReadTableRangesFullScan [GOOD] >> KqpExplain::IdxFullscan [GOOD] >> KqpExplain::MultiJoinCteLinks >> KqpExplain::UpdateOnSecondaryWithoutSecondaryKey+UseSink [GOOD] >> KqpExplain::UpdateOnSecondaryWithoutSecondaryKey-UseSink >> KqpQuery::QueryCacheTtl >> KqpQuery::UpdateThenDelete-UseSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpQuery::QueryStats-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 14010, MsgBus: 25248 2025-05-07T09:16:47.599819Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501630283238542117:2200];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:47.599979Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0020cf/r3tmp/tmpBIWPMH/pdisk_1.dat 2025-05-07T09:16:48.294193Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:48.294322Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:48.303167Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:16:48.322981Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14010, node 1 2025-05-07T09:16:48.510602Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:48.510654Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:48.510662Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:48.510811Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25248 TClient is connected to server localhost:25248 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:16:49.421549Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:49.443482Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T09:16:49.452784Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:49.651471Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:49.860953Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:49.976456Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:51.723062Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630300418412822:2407], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:51.723191Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:52.248416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.286811Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.324134Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.365512Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.420050Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.480435Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.526795Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.583104Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501630283238542117:2200];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:52.583180Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:16:52.663551Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630304713380775:2471], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:52.663650Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:52.670119Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630304713380780:2474], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:52.674208Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:16:52.696112Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-05-07T09:16:52.696389Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501630304713380782:2475], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:16:52.763132Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501630304713380835:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:54.215960Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=1&id=ODA1MDI1OTEtMTZkZmZjYjItZTIwZjBjMWEtMTM0OGFiMmE=, ActorId: [1:7501630313303315698:2516], ActorState: ExecuteState, TraceId: 01jtn0h80g8ff0yy0rd8wrpzfy, Create QueryResponse for error on request, msg:
: Error: Request timeout 50ms exceeded
: Error: Cancelling after 55ms during compilation Trying to start YDB, gRPC: 29410, MsgBus: 21812 2025-05-07T09:16:55.478710Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501630317276463778:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:55.478755Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0020cf/r3tmp/tmp1gTWrK/pdisk_1.dat 2025-05-07T09:16:55.659122Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:55.661741Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:55.661824Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:55.667183Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29410, node 2 2025-05-07T09:16:55.714517Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:55.714539Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:55.714548Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:55.714657Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21812 TClient is connected to server localhost:21812 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathSta ... {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:07.272699Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7501630368154690616:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:07.277140Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-05-07T09:17:07.298357Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7501630368154690618:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-05-07T09:17:07.394740Z node 3 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [3:7501630368154690669:3414] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:17:07.752603Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7501630346679851857:2073];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:17:07.752714Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; query_phases { duration_us: 15467 table_access { name: "/Root/EightShard" updates { rows: 3 bytes: 47 } partitions_count: 1 } table_access { name: "/Root/TwoShard" reads { rows: 3 bytes: 35 } partitions_count: 1 } cpu_time_us: 3950 affected_shards: 2 } compilation { duration_us: 265899 cpu_time_us: 262214 } process_cpu_time_us: 415 total_duration_us: 286735 total_cpu_time_us: 266579 Trying to start YDB, gRPC: 61411, MsgBus: 29445 2025-05-07T09:17:10.004250Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501630384209054870:2062];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:17:10.004383Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0020cf/r3tmp/tmpUxnpIV/pdisk_1.dat 2025-05-07T09:17:10.132649Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:17:10.169863Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:17:10.169962Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:17:10.172775Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 61411, node 4 2025-05-07T09:17:10.334686Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:17:10.334720Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:17:10.334729Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:17:10.335191Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29445 TClient is connected to server localhost:29445 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:17:10.917363Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:10.927137Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T09:17:10.943780Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:11.068783Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:11.254161Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:11.346195Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:14.021326Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501630397093958401:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:14.021456Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:14.071087Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-05-07T09:17:14.117472Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-05-07T09:17:14.157321Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-05-07T09:17:14.204112Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-05-07T09:17:14.252059Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-05-07T09:17:14.330985Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-05-07T09:17:14.382273Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-05-07T09:17:14.456212Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501630401388926359:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:14.458230Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:14.458684Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501630401388926364:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:14.463670Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-05-07T09:17:14.482675Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7501630401388926366:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-05-07T09:17:14.585507Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:7501630401388926417:3412] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:17:15.004725Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7501630384209054870:2062];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:17:15.004808Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; query_phases { duration_us: 5138 table_access { name: "/Root/TwoShard" reads { rows: 3 bytes: 35 } partitions_count: 1 } cpu_time_us: 3692 affected_shards: 1 } query_phases { duration_us: 9565 table_access { name: "/Root/EightShard" updates { rows: 3 bytes: 47 } partitions_count: 1 } cpu_time_us: 2180 affected_shards: 2 } compilation { duration_us: 266936 cpu_time_us: 262342 } process_cpu_time_us: 657 total_duration_us: 292447 total_cpu_time_us: 268871 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpExplain::ReadTableRangesFullScan [GOOD] Test command err: Trying to start YDB, gRPC: 28084, MsgBus: 10010 2025-05-07T09:16:47.620014Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501630282407839454:2070];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:47.620301Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001e71/r3tmp/tmpCfGopM/pdisk_1.dat 2025-05-07T09:16:48.339518Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:48.339673Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:48.342156Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:48.344309Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28084, node 1 2025-05-07T09:16:48.522631Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:48.522661Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:48.522671Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:48.522815Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10010 TClient is connected to server localhost:10010 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:16:49.351195Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:49.389448Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:49.532676Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:49.746191Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:49.829804Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:51.845238Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630299587710268:2407], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:51.845332Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:52.248559Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.290653Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.327537Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.386659Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.419743Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.496632Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.551391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.622159Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501630282407839454:2070];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:52.622290Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:16:52.638402Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630303882678226:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:52.638496Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:52.638751Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630303882678231:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:52.643086Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:16:52.666489Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501630303882678233:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:16:52.727066Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501630303882678286:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } {"Plan":{"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["EightShard"],"PlanNodeId":1,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"SortBy":"row.Text","Name":"Sort"},{"Scan":"Parallel","ReadRange":["Key [150, 266]"],"E-Size":"No estimate","Name":"TableRangeScan","Inputs":[],"Path":"\/Root\/EightShard","E-Rows":"No estimate","Table":"EightShard","ReadColumns":["Data","Key","Text"],"E-Cost":"No estimate"}],"Node Type":"Sort-TableRangeScan"}],"Node Type":"Merge","SortColumns":["Text (Asc)"],"PlanNodeType":"Connection"}],"Node Type":"Stage"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","Stats":{"ResourcePoolId":"default"},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/EightShard","reads":[{"columns":["Data","Key","Text"],"scan_by":["Key [150, 266]"],"type":"Scan"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":5,"Operators":[{"Scan":"Parallel","ReadRange":["Key [150, 266]"],"E-Size":"No estimate","Name":"TableRangeScan","Path":"\/Root\/EightShard","E-Rows":"No estimate","Table":"EightShard","ReadColumns":["Data","Key","Text"],"E-Cost":"No estimate"}],"Node Type":"TableRangeScan"}],"Operators":[{"SortBy":"row.Text","Name":"Sort"}],"Node Type":"Sort"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} Trying to start YDB, gRPC: 9276, MsgBus: 5386 2025-05-07T09:16:55.604308Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501630318699721273:2056];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:55.604370Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001e71/r3tmp/tmprt3BOI/pdisk_1.dat 2025-05-07T09:16:55.942758Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:55.964643Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:55.964742Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:55.967282Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9276, node 2 2025-05-07T09:16:56.038622Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:56.038644Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file ... ase.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001e71/r3tmp/tmpQsDqlU/pdisk_1.dat 2025-05-07T09:17:10.866848Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:17:10.869071Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:17:10.869151Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:17:10.874526Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22993, node 4 2025-05-07T09:17:10.942562Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:17:10.942592Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:17:10.942602Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:17:10.942745Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11596 TClient is connected to server localhost:11596 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:17:11.563352Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:11.576680Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:11.659843Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:11.879360Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:11.963508Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:14.572944Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501630400926547795:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:14.573032Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:14.651879Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:17:14.693280Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:17:14.763951Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:17:14.802371Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:17:14.862578Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:17:14.957337Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:17:15.005232Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:17:15.089524Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501630405221515754:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:15.089637Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:15.089949Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501630405221515759:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:15.095551Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:17:15.119439Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7501630405221515761:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:17:15.193302Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:7501630405221515812:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:17:15.610134Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7501630383746676969:2066];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:17:15.703011Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:17:16.422464Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 {"Plan":{"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["TwoKeys"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","E-Size":"No estimate","ReadRanges":["Key1 (-∞, +∞)","Key2 (-∞, +∞)"],"Name":"TableFullScan","Inputs":[],"Path":"\/Root\/TwoKeys","E-Rows":"No estimate","Table":"TwoKeys","ReadColumns":["Key1","Key2","Value"],"E-Cost":"No estimate"}],"Node Type":"TableFullScan"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Node Type":"Collect"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","Stats":{"ResourcePoolId":"default"},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/TwoKeys","reads":[{"columns":["Key1","Key2","Value"],"scan_by":["Key1 (-∞, +∞)","Key2 (-∞, +∞)"],"type":"FullScan"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":4,"Operators":[{"Scan":"Parallel","E-Size":"No estimate","ReadRanges":["Key1 (-∞, +∞)","Key2 (-∞, +∞)"],"Name":"TableFullScan","Path":"\/Root\/TwoKeys","E-Rows":"No estimate","Table":"TwoKeys","ReadColumns":["Key1","Key2","Value"],"E-Cost":"No estimate"}],"Node Type":"TableFullScan"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} {"Plan":{"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["TwoKeys"],"PlanNodeId":1,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"E-Rows":"No estimate","Predicate":"item.Key2 \u003E 101","Name":"Filter","E-Size":"No estimate","E-Cost":"No estimate"},{"Scan":"Parallel","E-Size":"No estimate","ReadRanges":["Key1 (-∞, +∞)","Key2 (-∞, +∞)"],"Name":"TableFullScan","Inputs":[],"Path":"\/Root\/TwoKeys","E-Rows":"No estimate","Table":"TwoKeys","ReadColumns":["Key1","Key2","Value"],"E-Cost":"No estimate"}],"Node Type":"Filter-TableFullScan"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Node Type":"Collect"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","Stats":{"ResourcePoolId":"default"},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/TwoKeys","reads":[{"columns":["Key1","Key2","Value"],"scan_by":["Key1 (-∞, +∞)","Key2 (-∞, +∞)"],"type":"FullScan"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":5,"Operators":[{"Scan":"Parallel","E-Size":"No estimate","ReadRanges":["Key1 (-∞, +∞)","Key2 (-∞, +∞)"],"Name":"TableFullScan","Path":"\/Root\/TwoKeys","E-Rows":"No estimate","Table":"TwoKeys","ReadColumns":["Key1","Key2","Value"],"E-Cost":"No estimate"}],"Node Type":"TableFullScan"}],"Operators":[{"E-Rows":"No estimate","Predicate":"item.Key2 \u003E 101","Name":"Filter","E-Size":"No estimate","E-Cost":"No estimate"}],"Node Type":"Filter"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} >> KqpTypes::DyNumberCompare [GOOD] >> KqpTypes::SelectNull ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpQuery::UpdateThenDelete-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 24370, MsgBus: 10413 2025-05-07T09:16:47.578572Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501630283413109491:2267];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:47.578800Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002089/r3tmp/tmpmcOBpH/pdisk_1.dat 2025-05-07T09:16:48.180794Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:48.200432Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:48.200564Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:48.203398Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24370, node 1 2025-05-07T09:16:48.463369Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:48.463392Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:48.463396Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:48.463644Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10413 TClient is connected to server localhost:10413 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:16:49.317318Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:49.334871Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T09:16:49.344938Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:49.616601Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:49.897712Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:49.977495Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:51.723423Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630300592980122:2407], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:51.723522Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:52.244322Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.323441Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.356663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.387522Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.450206Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.489587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.564965Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.571249Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501630283413109491:2267];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:52.571349Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:16:52.653520Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630304887948088:2471], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:52.653581Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:52.653737Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630304887948093:2474], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:52.658042Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:16:52.676002Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-05-07T09:16:52.676850Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501630304887948095:2475], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:16:52.745893Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501630304887948148:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:55.143575Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7501630317772850352:2528], status: GENERIC_ERROR, issues:
:3:26: Error: mismatched input '[' expecting {'*', '(', '@', '$', ABORT, ACTION, ADD, AFTER, ALL, ALTER, ANALYZE, AND, ANSI, ANY, ARRAY, AS, ASC, ASSUME, ASYMMETRIC, ASYNC, AT, ATTACH, ATTRIBUTES, AUTOINCREMENT, BACKUP, BATCH, COLLECTION, BEFORE, BEGIN, BERNOULLI, BETWEEN, BITCAST, BY, CALLABLE, CASCADE, CASE, CAST, CHANGEFEED, CHECK, CLASSIFIER, COLLATE, COLUMN, COLUMNS, COMMIT, COMPACT, CONDITIONAL, CONFLICT, CONNECT, CONSTRAINT, CONSUMER, COVER, CREATE, CROSS, CUBE, CURRENT, CURRENT_DATE, CURRENT_TIME, CURRENT_TIMESTAMP, DATA, DATABASE, DECIMAL, DECLARE, DEFAULT, DEFERRABLE, DEFERRED, DEFINE, DELETE, DESC, DESCRIBE, DETACH, DICT, DIRECTORY, DISABLE, DISCARD, DISTINCT, DO, DROP, EACH, ELSE, EMPTY, EMPTY_ACTION, ENCRYPTED, END, ENUM, ERASE, ERROR, ESCAPE, EVALUATE, EXCEPT, EXCLUDE, EXCLUSION, EXCLUSIVE, EXISTS, EXPLAIN, EXPORT, EXTERNAL, FAIL, FAMILY, FILTER, FIRST, FLATTEN, FLOW, FOLLOWING, FOR, FOREIGN, FROM, FULL, FUNCTION, GLOB, GLOBAL, GRANT, GROUP, GROUPING, GROUPS, HASH, HAVING, HOP, IF, IGNORE, ILIKE, IMMEDIATE, IMPORT, IN, INCREMENT, INCREMENTAL, INDEX, INDEXED, INHERITS, INITIAL, INITIALLY, INNER, INSERT, INSTEAD, INTERSECT, INTO, IS, ISNULL, JOIN, JSON_EXISTS, JSON_QUERY, JSON_VALUE, KEY, LAST, LEFT, LEGACY, LIKE, LIMIT, LIST, LOCAL, LOGIN, MANAGE, MATCH, MATCHES, MATCH_RECOGNIZE, MEASURES, MICROSECONDS, MILLISECONDS, MODIFY, NANOSECONDS, NATURAL, NEXT, NO, NOLOGIN, NOT, NOTNULL, NULL, NULLS, OBJECT, OF, OFFSET, OMIT, ON, ONE, ONLY, OPTION, OPTIONAL, OR, ORDER, OTHERS, OUTER, OVER, OWNER, PARALLEL, PARTITION, PASSING, PASSWORD, PAST, PATTERN, PER, PERMUTE, PLAN, POOL, PRAGMA, PRECEDING, PRESORT, PRIMARY, PRIVILEGES, PROCESS, QUERY, QUEUE, RAISE, RANGE, REDUCE, REFERENCES, REGEXP, REINDEX, RELEASE, REMOVE, RENAME, REPLACE, REPLICATION, RESET, RESOURCE, RESPECT, RESTART, RESTORE, RESTRICT, RESULT, RETURN, RETURNING, REVERT, REVOKE, RIGHT, RLIKE, ROLLBACK, ROLLUP, ROW, ROWS, SAMPLE, SAVEPOINT, SCHEMA, SECONDS, SEEK, SELECT, SEMI, SET, SETS, SHOW, TSKIP, SEQUENCE, SOURCE, START, STREAM, STRUCT, SUBQUERY, SUBSET, SYMBOLS, SYMMETRIC, SYNC, SYSTEM, TABLE, TABLES, TABLESAMPLE, TABLESTORE, TAGGED, TEMP, TEMPORARY, THEN, TIES, TO, TOPIC, TRANSACTION, TRANSFER, TRIGGER, TUPLE, TYPE, UNBOUNDED, UNCONDITIONAL, UNION, UNIQUE, UNKNOWN, UNMATCHED, UPDATE, UPSERT, USE, USER, USING, VACUUM, VALUES, VARIANT, VIEW, VIRTUAL, WHEN, WHERE, WINDOW, WITH, WITHOUT, WRAPPER, XOR, STRI ... d ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:17:07.682940Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:17:07.771292Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:17:07.874138Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7501630368105221885:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:07.874276Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:07.875338Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7501630368105221890:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:07.879193Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:17:07.898133Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7501630368105221892:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:17:07.956728Z node 3 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [3:7501630368105221943:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:17:08.258385Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7501630350925350360:2061];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:17:08.258474Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 7726, MsgBus: 5856 2025-05-07T09:17:10.726329Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501630384946221071:2132];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:17:10.745062Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002089/r3tmp/tmpkPfDVu/pdisk_1.dat 2025-05-07T09:17:10.940734Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:17:10.958779Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:17:10.958873Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:17:10.965543Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7726, node 4 2025-05-07T09:17:11.042500Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:17:11.042526Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:17:11.042537Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:17:11.042725Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5856 TClient is connected to server localhost:5856 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:17:11.624109Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:11.641713Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:11.717494Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:11.912078Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:12.020729Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:14.839661Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501630402126091830:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:14.839797Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:14.936780Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:17:14.988790Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:17:15.028950Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:17:15.071429Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:17:15.116030Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:17:15.156594Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:17:15.233931Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:17:15.341482Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501630406421059789:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:15.341600Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:15.341980Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501630406421059794:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:15.347012Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:17:15.364240Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7501630406421059796:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:17:15.446144Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:7501630406421059847:3416] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:17:15.730072Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7501630384946221071:2132];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:17:15.730201Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; [] >> KqpParams::ImplicitDifferentParameterTypesQueryCacheCheck [GOOD] >> KqpParams::InvalidJson >> KqpLimits::TooBigQuery+useSink >> KqpParams::Decimal+QueryService+UseSink [GOOD] >> KqpStats::OneShardLocalExec-UseSink [GOOD] >> KqpExplain::PrecomputeRange >> KqpParams::Decimal-QueryService+UseSink [GOOD] >> ShowCreateView::WithTwoTablePathPrefixes [GOOD] >> SystemView::AuthGroups >> KqpExplain::FewEffects-UseSink [GOOD] >> KqpExplain::FullOuterJoin >> KqpQuery::QueryFromSqs [GOOD] >> KqpExplain::SsaProgramInJsonPlan [GOOD] >> KqpExplain::UpdateConditional+UseSink >> KqpLimits::KqpMkqlMemoryLimitException >> KqpPrefixedVectorIndexes::OrderByCosineDistanceNotNullableLevel4 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpStats::OneShardLocalExec-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 6654, MsgBus: 17220 2025-05-07T09:16:51.803617Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501630299207265391:2217];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:51.804021Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001e3d/r3tmp/tmptrQo5Z/pdisk_1.dat 2025-05-07T09:16:52.423763Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:52.424603Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:52.424681Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:52.429501Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6654, node 1 2025-05-07T09:16:52.577626Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:52.577658Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:52.577665Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:52.577782Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17220 TClient is connected to server localhost:17220 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:16:53.453891Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:53.470827Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T09:16:53.485762Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:53.725077Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:54.028841Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:54.162800Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:55.964094Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630316387136085:2408], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:55.964217Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:56.280675Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:16:56.327972Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:16:56.363647Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:16:56.423950Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:16:56.460038Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:16:56.497731Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:16:56.581592Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:16:56.657061Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630320682104040:2471], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:56.657200Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:56.657520Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630320682104045:2474], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:56.661398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:16:56.679740Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501630320682104047:2475], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:16:56.777677Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501630320682104098:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:56.800499Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501630299207265391:2217];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:56.800560Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:16:58.428905Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746609418400, txId: 281474976710672] shutting down Trying to start YDB, gRPC: 18627, MsgBus: 5696 2025-05-07T09:16:59.456743Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501630337325924837:2067];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:59.456801Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001e3d/r3tmp/tmpQFvka9/pdisk_1.dat 2025-05-07T09:16:59.622419Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:59.622502Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:59.622910Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:59.636232Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18627, node 2 2025-05-07T09:16:59.750507Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:59.750529Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:59.750536Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:59.750658Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5696 TClient is connected to server localhost:5696 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 184467440737095 ... meOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-05-07T09:17:10.289642Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-05-07T09:17:10.372662Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-05-07T09:17:10.460242Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-05-07T09:17:10.500965Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-05-07T09:17:10.569487Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7501630380876057916:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:10.569591Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:10.569950Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7501630380876057921:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:10.573711Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-05-07T09:17:10.585368Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7501630380876057923:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-05-07T09:17:10.656147Z node 3 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [3:7501630380876057974:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:17:11.105800Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7501630363696186409:2058];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:17:11.105882Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 19177, MsgBus: 15786 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001e3d/r3tmp/tmp0H1b5z/pdisk_1.dat 2025-05-07T09:17:13.520609Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T09:17:13.619619Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:17:13.646061Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:17:13.646159Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:17:13.647143Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19177, node 4 2025-05-07T09:17:13.751947Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:17:13.751969Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:17:13.751978Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:17:13.752124Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15786 TClient is connected to server localhost:15786 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:17:14.441790Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:14.449672Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T09:17:14.458732Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:14.523337Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:14.712788Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:14.822327Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:17.594273Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501630411554810897:2407], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:17.594377Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:17.651464Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:17:17.701538Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:17:17.777256Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:17:17.858418Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:17:17.916062Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:17:17.997810Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:17:18.088352Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:17:18.194364Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501630415849778866:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:18.194492Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:18.194553Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501630415849778871:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:18.199005Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:17:18.211257Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7501630415849778873:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:17:18.271047Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:7501630415849778924:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpParams::Decimal+QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 26537, MsgBus: 11809 2025-05-07T09:16:47.563104Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501630285546125473:2061];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:47.563154Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002209/r3tmp/tmpRj6dmK/pdisk_1.dat 2025-05-07T09:16:48.366946Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:48.381361Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:48.381450Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:48.384420Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26537, node 1 2025-05-07T09:16:48.618909Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:48.618955Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:48.618962Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:48.619128Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11809 TClient is connected to server localhost:11809 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:16:49.335919Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:49.354675Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T09:16:49.373606Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:49.627673Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:49.817009Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:49.940939Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:51.921803Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630302725996316:2408], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:51.921929Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:52.274325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.356295Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.419167Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.459896Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.500064Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.567036Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501630285546125473:2061];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:52.568068Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:16:52.580972Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.670365Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.772286Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630307020964285:2472], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:52.772401Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:52.773614Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630307020964290:2475], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:52.778941Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:16:52.796903Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501630307020964292:2476], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:16:52.870750Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501630307020964345:3427] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 19535, MsgBus: 6732 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002209/r3tmp/tmpB6g7WT/pdisk_1.dat 2025-05-07T09:16:56.159304Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T09:16:56.223798Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:56.249452Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:56.249529Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:56.252541Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19535, node 2 2025-05-07T09:16:56.386525Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:56.386546Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:56.386553Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:56.386655Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6732 TClient is connected to server localhost:6732 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:16:56.967272Z node 2 :FLAT_TX_SCHEMESHARD WARN ... 2057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:17:12.364556Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1185, node 4 2025-05-07T09:17:12.452553Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:17:12.452577Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:17:12.452586Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:17:12.452704Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30010 TClient is connected to server localhost:30010 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:17:13.003777Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:13.020071Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:13.103945Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:13.309831Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:13.390065Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:16.139229Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501630408756806015:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:16.139313Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:16.223603Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-05-07T09:17:16.274430Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-05-07T09:17:16.383840Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-05-07T09:17:16.423531Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-05-07T09:17:16.468846Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-05-07T09:17:16.524010Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-05-07T09:17:16.589491Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-05-07T09:17:16.666501Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501630408756806678:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:16.666648Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:16.666713Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501630408756806683:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:16.671057Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-05-07T09:17:16.684605Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7501630408756806685:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-05-07T09:17:16.744320Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:7501630408756806736:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:17:17.210080Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7501630391576935183:2061];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:17:17.210158Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:17:18.085726Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-05-07T09:17:19.382176Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:7501630421641709133:2567], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:4:17: Error: At function: RemovePrefixMembers, At function: Unordered, At function: PersistableRepr, At function: OrderedSqlProject, At function: SqlProjectItem
:3:25: Error: At function: Parameter, At function: DataType
:3:25: Error: Invalid decimal precision: 99 2025-05-07T09:17:19.384012Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=4&id=NzFhNmVjM2QtMjk4ZDg0NzMtYWQ2NjczOTItNzQ1NjM1OTY=, ActorId: [4:7501630421641709131:2566], ActorState: ExecuteState, TraceId: 01jtn0j0ky7pcd6yxqrkerc26q, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-05-07T09:17:19.507525Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=4&id=N2I3Y2RjN2ItN2FhNzI4ZGMtOTkxOTNjMzMtOWUzMGJiZTE=, ActorId: [4:7501630421641709137:2569], ActorState: ExecuteState, TraceId: 01jtn0j0n7ez6h0mt4m0h50ft7, Create QueryResponse for error on request, msg: ydb/core/kqp/session_actor/kqp_session_actor.cpp:1174: ydb/core/kqp/query_data/kqp_query_data.cpp:271: Parameter $value22 type mismatch, expected: { Kind: Data Data { Scheme: 4865 DecimalParams { Precision: 22 Scale: 9 } } }, actual: Type (Data), schemeType: Decimal(35,10), schemeTypeId: 4865 2025-05-07T09:17:19.558222Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:7501630421641709152:2575], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:7:29: Error: At function: KiWriteTable!
:7:50: Error: Failed to convert type: Struct<'Key':Int32,'Value22':Decimal(35,10),'Value35':Decimal(35,10)> to Struct<'Key':Int32?,'Value22':Decimal(22,9)?,'Value35':Decimal(35,10)?>
:4:25: Error: Implicit decimal cast would lose precision
:7:50: Error: Failed to convert 'Value22': Decimal(35,10) to Optional
:7:50: Error: Failed to convert input columns types to scheme types, code: 2031 2025-05-07T09:17:19.560139Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=4&id=ZTU1ZmExMTgtNzU0OGYxNzYtYWRiNGQxMzItODA3MzIyNzE=, ActorId: [4:7501630421641709150:2574], ActorState: ExecuteState, TraceId: 01jtn0j0s26k94j56gcbrt2kmq, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-05-07T09:17:19.601232Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:7501630421641709163:2580], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:3:29: Error: At function: KiWriteTable!
:3:50: Error: Failed to convert type: Struct<'Key':Int32,'Value22':Decimal(35,10),'Value35':Decimal(35,10)> to Struct<'Key':Int32?,'Value22':Decimal(22,9)?,'Value35':Decimal(35,10)?>
:0:14: Error: Implicit decimal cast would lose precision
:3:50: Error: Failed to convert 'Value22': Decimal(35,10) to Optional
:3:50: Error: Failed to convert input columns types to scheme types, code: 2031 2025-05-07T09:17:19.601560Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=4&id=OWJlOTFkYi1iODM0NDg0LTNmNTAzODkwLTU1MGEwMDgy, ActorId: [4:7501630421641709161:2579], ActorState: ExecuteState, TraceId: 01jtn0j0tv78njga67h40xdd0q, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldNotCompactBorrowedAfterSplitMergeWhenDisabled [GOOD] >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldHandleDataShardReboot >> SystemView::ShowCreateTableReadReplicas [GOOD] >> SystemView::ShowCreateTableTtlSettings ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpMultishardIndex::DataColumnWrite-UseSink [FAIL] Test command err: Trying to start YDB, gRPC: 29182, MsgBus: 19993 2025-05-07T09:16:13.679630Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501630137939341578:2144];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:13.704915Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003bd3/r3tmp/tmpcrzWad/pdisk_1.dat 2025-05-07T09:16:14.254808Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:14.254896Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:14.257946Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:16:14.294360Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29182, node 1 2025-05-07T09:16:14.516150Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:14.516182Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:14.516291Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:14.516722Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19993 TClient is connected to server localhost:19993 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:16:15.143205Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:15.160215Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T09:16:15.176494Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:15.331810Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:15.503350Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:15.588912Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:17.285011Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630155119212331:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:17.285149Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:17.594895Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:16:17.671266Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:16:17.745825Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:16:17.795890Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:16:17.839068Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:16:17.920676Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:16:18.000748Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:16:18.097236Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630159414180297:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:18.097290Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:18.097555Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630159414180302:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:18.100342Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:16:18.111289Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501630159414180304:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:16:18.185915Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501630159414180355:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:18.682003Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501630137939341578:2144];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:18.682062Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:16:19.384645Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 waiting... Trying to start YDB, gRPC: 16273, MsgBus: 11912 2025-05-07T09:16:22.055495Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501630177578592736:2145];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003bd3/r3tmp/tmpzkQXWW/pdisk_1.dat 2025-05-07T09:16:22.130644Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T09:16:22.213823Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:22.236394Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:22.236482Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:22.238264Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16273, node 2 2025-05-07T09:16:22.362380Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:22.362398Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:22.362403Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:22.362502Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11912 TClient is connected to server localhost:11912 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: ... suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:43.568151Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7501630266275250550:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:43.568266Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:43.639675Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-05-07T09:16:43.682587Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-05-07T09:16:43.725243Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-05-07T09:16:43.767635Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-05-07T09:16:43.802095Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-05-07T09:16:43.844939Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-05-07T09:16:43.884315Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-05-07T09:16:43.955669Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7501630266275251209:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:43.955772Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7501630266275251214:2472], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:43.955780Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:43.959744Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-05-07T09:16:43.971336Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7501630266275251216:2473], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-05-07T09:16:44.045587Z node 3 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [3:7501630270570218563:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:45.009345Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7501630253390347001:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:45.009463Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:16:45.335510Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:55.126062Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7261: Cannot get console configs 2025-05-07T09:16:55.126093Z node 3 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:55.786113Z node 3 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [3:7501630317814862429:2899], TxId: 281474976715732, task: 1. Ctx: { TraceId : 01jtn0h8xddb0v30r4c40bmw5s. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=3&id=YzRlZGVlZWUtMmY1MDUxNDItZTE1M2RiNDEtZjgxNDhkZGM=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: PRECONDITION_FAILED KIKIMR_CONSTRAINT_VIOLATION: {
: Error: Conflict with existing key., code: 2012 }. 2025-05-07T09:16:55.786680Z node 3 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1216: SelfId: [3:7501630317814862431:2900], TxId: 281474976715732, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=3&id=YzRlZGVlZWUtMmY1MDUxNDItZTE1M2RiNDEtZjgxNDhkZGM=. TraceId : 01jtn0h8xddb0v30r4c40bmw5s. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [3:7501630317814862392:2504], status: PRECONDITION_FAILED, reason: {
: Error: Terminate execution } 2025-05-07T09:16:55.787312Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=3&id=YzRlZGVlZWUtMmY1MDUxNDItZTE1M2RiNDEtZjgxNDhkZGM=, ActorId: [3:7501630274865186093:2504], ActorState: ExecuteState, TraceId: 01jtn0h8xddb0v30r4c40bmw5s, Create QueryResponse for error on request, msg: 2025-05-07T09:16:55.941216Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037934 not found 2025-05-07T09:16:55.941282Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037935 not found 2025-05-07T09:16:56.002483Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037929 not found 2025-05-07T09:16:56.002526Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037930 not found 2025-05-07T09:16:56.002542Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037927 not found 2025-05-07T09:16:56.002564Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037926 not found 2025-05-07T09:16:56.013460Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037931 not found 2025-05-07T09:16:56.025916Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037937 not found 2025-05-07T09:16:56.025957Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037939 not found 2025-05-07T09:16:56.028117Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037938 not found 2025-05-07T09:17:05.913034Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037928 not found 2025-05-07T09:17:05.913080Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037925 not found 2025-05-07T09:17:05.957362Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037933 not found 2025-05-07T09:17:05.957415Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037946 not found 2025-05-07T09:17:05.957522Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037943 not found 2025-05-07T09:17:10.891112Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037932 not found 2025-05-07T09:17:15.916421Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037942 not found assertion failed at ydb/core/kqp/ut/common/kqp_ut_common.cpp:931, TString NKikimr::NKqp::StreamResultToYson(NYdb::NTable::TTablePartIterator &, bool, const NYdb::EStatus &): (streamPart.EOS())
: Error: Shard 72075186224037932 is overloaded, code: 2006
: Error: [WRONG_SHARD_STATE] Rejecting data TxId 281474976715734 because datashard 72075186224037932: is in process of split opId 281474976710660 state SplitSrcMakeSnapshot (wrong shard state)
: Error: Table /Root/MultiShardIndexedWithDataColumn/index/indexImplTable is overloaded, code: 2006 0. /-S/util/system/backtrace.cpp:284: ?? @ 0x19DDE90B 1. /tmp//-S/library/cpp/testing/unittest/registar.cpp:46: RaiseError @ 0x1A2A3A5F 2. /tmp//-S/ydb/core/kqp/ut/common/kqp_ut_common.cpp:931: StreamResultToYson @ 0x497F5FDA 3. /tmp//-S/ydb/core/kqp/ut/common/kqp_ut_common.cpp:1122: ReadTableToYson @ 0x497FB85C 4. /tmp//-S/ydb/core/kqp/ut/indexes/kqp_indexes_multishard_ut.cpp:2021: Execute_ @ 0x1998662D 5. /tmp//-S/ydb/core/kqp/ut/indexes/kqp_indexes_multishard_ut.cpp:1354: operator() @ 0x19941B17 6. /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149: __invoke<(lambda at /-S/ydb/core/kqp/ut/indexes/kqp_indexes_multishard_ut.cpp:1354:1) &> @ 0x19941B17 7. /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:224: __call<(lambda at /-S/ydb/core/kqp/ut/indexes/kqp_indexes_multishard_ut.cpp:1354:1) &> @ 0x19941B17 8. /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:169: operator() @ 0x19941B17 9. /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:314: operator() @ 0x19941B17 10. /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:431: operator() @ 0x1A2DAC45 11. /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:990: operator() @ 0x1A2DAC45 12. /tmp//-S/library/cpp/testing/unittest/utmain.cpp:525: Run @ 0x1A2DAC45 13. /tmp//-S/library/cpp/testing/unittest/registar.cpp:373: Run @ 0x1A2AA5E8 14. /tmp//-S/ydb/core/kqp/ut/indexes/kqp_indexes_multishard_ut.cpp:1354: Execute @ 0x19940CE3 15. /tmp//-S/library/cpp/testing/unittest/registar.cpp:494: Execute @ 0x1A2ABEB5 16. /tmp//-S/library/cpp/testing/unittest/utmain.cpp:872: RunMain @ 0x1A2D51BC 17. ??:0: ?? @ 0x7F37309DED8F 18. ??:0: ?? @ 0x7F37309DEE3F 19. ??:0: ?? @ 0x16A4D028 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpParams::Decimal-QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 15345, MsgBus: 19623 2025-05-07T09:16:47.554983Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501630285615403645:2068];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:47.555087Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001ea4/r3tmp/tmpX4hqfp/pdisk_1.dat 2025-05-07T09:16:48.115765Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:48.137187Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:48.137305Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:48.162274Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15345, node 1 2025-05-07T09:16:48.464414Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:48.464436Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:48.464442Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:48.464523Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19623 TClient is connected to server localhost:19623 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:16:49.388313Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:49.412035Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T09:16:49.436908Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:49.739368Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:49.966602Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:50.081765Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:52.043787Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630307090241782:2408], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:52.043932Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:52.484051Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.556128Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501630285615403645:2068];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:52.556193Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:16:52.573146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.646430Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.680888Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.714481Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.748940Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.780223Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.851547Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630307090242442:2472], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:52.851643Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:52.852037Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630307090242447:2475], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:52.856397Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:16:52.874521Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501630307090242449:2476], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:16:52.959194Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501630307090242500:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 21441, MsgBus: 30612 2025-05-07T09:16:55.997416Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501630318630081864:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:55.997473Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001ea4/r3tmp/tmpiirehV/pdisk_1.dat 2025-05-07T09:16:56.153580Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:56.168205Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:56.168281Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:56.169769Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21441, node 2 2025-05-07T09:16:56.290591Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:56.290612Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:56.290620Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:56.290736Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30612 TClient is connected to server localhost:30612 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { Sc ... 94037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:17:12.129290Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29680, node 4 2025-05-07T09:17:12.202864Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:17:12.202894Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:17:12.202906Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:17:12.203019Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10674 TClient is connected to server localhost:10674 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:17:12.860860Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:12.878451Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:12.983855Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:13.202603Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:13.323936Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:16.179186Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501630408740280253:2407], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:16.179283Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:16.242578Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:17:16.294768Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:17:16.339375Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:17:16.377596Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:17:16.442998Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:17:16.524086Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:17:16.610772Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:17:16.706475Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501630408740280916:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:16.706570Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:16.707124Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501630408740280921:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:16.712315Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:17:16.726319Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7501630408740280923:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:17:16.791361Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:7501630408740280974:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:17:16.974292Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7501630387265442108:2062];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:17:16.974382Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:17:18.177133Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-05-07T09:17:19.721644Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:7501630421625183359:2560], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:4:17: Error: At function: RemovePrefixMembers, At function: Unordered, At function: PersistableRepr, At function: OrderedSqlProject, At function: SqlProjectItem
:3:25: Error: At function: Parameter, At function: DataType
:3:25: Error: Invalid decimal precision: 99 2025-05-07T09:17:19.722202Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=4&id=Zjc5YjEyZTQtNjk4MjYzMTItNmJlYzAwOS02MzJiZWJhZA==, ActorId: [4:7501630417330215844:2515], ActorState: ExecuteState, TraceId: 01jtn0j0yw26g94aqrr4syvv7y, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-05-07T09:17:19.837551Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=4&id=Zjc5YjEyZTQtNjk4MjYzMTItNmJlYzAwOS02MzJiZWJhZA==, ActorId: [4:7501630417330215844:2515], ActorState: ExecuteState, TraceId: 01jtn0j0zr4s9k5tewg7nr074k, Create QueryResponse for error on request, msg: ydb/core/kqp/session_actor/kqp_session_actor.cpp:1174: ydb/core/kqp/query_data/kqp_query_data.cpp:271: Parameter $value22 type mismatch, expected: { Kind: Data Data { Scheme: 4865 DecimalParams { Precision: 22 Scale: 9 } } }, actual: Type (Data), schemeType: Decimal(35,10), schemeTypeId: 4865 2025-05-07T09:17:19.873235Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:7501630421625183374:2566], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:7:29: Error: At function: KiWriteTable!
:7:50: Error: Failed to convert type: Struct<'Key':Int32,'Value22':Decimal(35,10),'Value35':Decimal(35,10)> to Struct<'Key':Int32?,'Value22':Decimal(22,9)?,'Value35':Decimal(35,10)?>
:4:25: Error: Implicit decimal cast would lose precision
:7:50: Error: Failed to convert 'Value22': Decimal(35,10) to Optional
:7:50: Error: Failed to convert input columns types to scheme types, code: 2031 2025-05-07T09:17:19.875464Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=4&id=Zjc5YjEyZTQtNjk4MjYzMTItNmJlYzAwOS02MzJiZWJhZA==, ActorId: [4:7501630417330215844:2515], ActorState: ExecuteState, TraceId: 01jtn0j13c0n7yk72wsj907svn, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-05-07T09:17:19.916014Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:7501630421625183383:2570], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:3:29: Error: At function: KiWriteTable!
:3:50: Error: Failed to convert type: Struct<'Key':Int32,'Value22':Decimal(35,10),'Value35':Decimal(35,10)> to Struct<'Key':Int32?,'Value22':Decimal(22,9)?,'Value35':Decimal(35,10)?>
:0:14: Error: Implicit decimal cast would lose precision
:3:50: Error: Failed to convert 'Value22': Decimal(35,10) to Optional
:3:50: Error: Failed to convert input columns types to scheme types, code: 2031 2025-05-07T09:17:19.917337Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2147: SessionId: ydb://session/3?node_id=4&id=Zjc5YjEyZTQtNjk4MjYzMTItNmJlYzAwOS02MzJiZWJhZA==, ActorId: [4:7501630417330215844:2515], ActorState: ExecuteState, TraceId: 01jtn0j14taw2y9snzkhbkytfk, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: >> KqpQuery::CreateAsSelectTypes-NotNull-IsOlap [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpQuery::QueryFromSqs [GOOD] Test command err: Trying to start YDB, gRPC: 1782, MsgBus: 2911 2025-05-07T09:16:47.566936Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501630285976835245:2128];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:47.567007Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001e6d/r3tmp/tmpL4cUXb/pdisk_1.dat 2025-05-07T09:16:48.311971Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:48.323661Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:48.323763Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:48.332625Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1782, node 1 2025-05-07T09:16:48.460723Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:48.460752Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:48.460759Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:48.460877Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2911 TClient is connected to server localhost:2911 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:16:49.409728Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:49.442271Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T09:16:49.462511Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:49.706926Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:49.924459Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:50.051904Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:51.783420Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630303156706012:2408], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:51.783540Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:52.244295Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.295772Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.364334Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.404442Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.452369Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.511140Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.555039Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.591220Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501630285976835245:2128];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:52.591328Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:16:52.659103Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630307451673974:2472], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:52.659177Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:52.659787Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630307451673979:2475], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:52.664122Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:16:52.685172Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-05-07T09:16:52.686213Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501630307451673981:2476], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:16:52.770895Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501630307451674034:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:54.164642Z node 1 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000340280] received request Name# ExecuteDataQuery ok# true data# session_id: "ydb://session/3?node_id=1&id=NmEyYTFhYjctZWY2MWU4MjgtM2FhNzgwZDgtOTUyNWZkODM=" tx_control { begin_tx { serializable_read_write { } } commit_tx: true } query { yql_text: "\n SELECT * FROM `/Root/TwoShard`;\n " } query_cache_policy { } operation_params { } peer# ipv6:%5B::1%5D:51090 2025-05-07T09:16:54.164701Z node 1 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000340980] created request Name# ExecuteDataQuery 2025-05-07T09:16:54.164842Z node 1 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000340280] received request without user token Name# ExecuteDataQuery data# session_id: "ydb://session/3?node_id=1&id=NmEyYTFhYjctZWY2MWU4MjgtM2FhNzgwZDgtOTUyNWZkODM=" tx_control { begin_tx { serializable_read_write { } } commit_tx: true } query { yql_text: "\n SELECT * FROM `/Root/TwoShard`;\n " } query_cache_policy { } operation_params { } peer# ipv6:%5B::1%5D:51090 database# /Root 2025-05-07T09:16:54.166187Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:575: Got grpc request# ExecuteDataQueryRequest, traceId# 01jtn0h80maqvhs4xx47gczggt, sdkBuildInfo# ydb-cpp-sdk/dev, state# AS_NOT_PERFORMED, database# /Root, peer# ipv6:[::1]:51090, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# 2.983824s
: Error: GRpc error: (4): Deadline Exceeded
: Error: Grpc error response on endpoint localhost:1782 2025-05-07T09:16:57.156089Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1944: ActorId: [1:7501630316041608912:2517] TxId: 281474976710672. Ctx: { TraceId: 01jtn0h80maqvhs4xx47gczggt, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmEyYTFhYjctZWY2MWU4MjgtM2FhNzgwZDgtOTUyNWZkODM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Client lost } 2025-05-07T09:16:57.156768Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1216: SelfId: [1:7501630316041608919:2526], TxId: 281474976710672, task: 1. Ctx: { TraceId : 01jtn0h80maqvhs4xx47gczggt. SessionId : ydb://session/3?node_id=1&id=NmEyYTFhYjctZWY2MWU4MjgtM2FhNzgwZDgtOTUyNWZkODM=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [1:7501630316041608912:2517], status: ABORTED, reason: {
: Error: Terminate execution } 2025-05-07T09:16:57.157166Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1216: SelfId: [1:7501630316041608920:2527], TxId: 28147497671 ... ($15) (Member $15 '"Amount") (Member $15 '"Comment") (Member $15 '"Group") (Member $15 '"Name"))) (return (FromFlow (ExpandMap (Take (ToFlow $13) $3) $14))) ))) $8)) (let $10 (DqCnUnionAll (TDqOutput $9 '"0"))) (let $11 (DqPhyStage '($10) (lambda '($16) (FromFlow (NarrowMap (Take (ToFlow $16) $3) (lambda '($17 $18 $19 $20) (AsStruct '('"Amount" $17) '('"Comment" $18) '('"Group" $19) '('"Name" $20)))))) '('('"_logical_id" '723) '('"_id" '"c40a65c4-1747f3c2-9ba06ca4-8f70ea4")))) (let $12 (DqCnResult (TDqOutput $11 '"0") '())) (return (KqpPhysicalQuery '((KqpPhysicalTx '($9 $11) '($12) '() '('('"type" '"data")))) '((KqpTxResultBinding (ListType $7) '"0" '"0")) '('('"type" '"data_query")))) ) Plan: {"Plan":{"Plans":[{"PlanNodeId":5,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["Test"],"PlanNodeId":1,"Operators":[{"Scan":"Sequential","ReadRange":["Group (1)","Name (Name, +∞)"],"E-Size":"No estimate","ReadLimit":"1001","Name":"TableRangeScan","Inputs":[],"Path":"\/Root\/Test","E-Rows":"No estimate","Table":"Test","ReadColumns":["Amount","Comment","Group","Name"],"E-Cost":"No estimate"}],"Node Type":"TableRangeScan"}],"Operators":[{"Inputs":[{"ExternalPlanNodeId":1}],"Name":"Limit","Limit":"1001"}],"Node Type":"Limit"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Operators":[{"Inputs":[{"ExternalPlanNodeId":3}],"Name":"Limit","Limit":"1001"}],"Node Type":"Limit"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","Stats":{"ResourcePoolId":"default"},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/Test","reads":[{"lookup_by":["Group (1)"],"columns":["Amount","Comment","Group","Name"],"scan_by":["Name (Name, +∞)"],"limit":"1001","type":"Scan"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":2,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":5,"Operators":[{"Scan":"Sequential","ReadRange":["Group (1)","Name (Name, +∞)"],"E-Size":"No estimate","ReadLimit":"1001","Name":"TableRangeScan","Path":"\/Root\/Test","E-Rows":"No estimate","Table":"Test","ReadColumns":["Amount","Comment","Group","Name"],"E-Cost":"No estimate"}],"Node Type":"TableRangeScan"}],"Operators":[{"Name":"Limit","Limit":"1001"}],"Node Type":"Limit"}],"Operators":[{"Name":"Limit","Limit":"1001"}],"Node Type":"Limit"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} Trying to start YDB, gRPC: 5232, MsgBus: 6099 2025-05-07T09:17:14.464907Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501630402148786675:2208];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001e6d/r3tmp/tmpAE5eME/pdisk_1.dat 2025-05-07T09:17:14.511882Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-05-07T09:17:14.640559Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:17:14.688018Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:17:14.688106Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:17:14.689946Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5232, node 4 2025-05-07T09:17:14.856873Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:17:14.856894Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:17:14.856901Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:17:14.857035Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6099 TClient is connected to server localhost:6099 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:17:15.415636Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:15.423786Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T09:17:15.444215Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:15.533078Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:15.722148Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:15.806724Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:18.291731Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501630419328657359:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:18.291822Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:18.356214Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:17:18.395302Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:17:18.450379Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:17:18.490689Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:17:18.537493Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:17:18.599037Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:17:18.654452Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:17:18.739496Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501630419328658018:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:18.739614Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:18.740042Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501630419328658023:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:18.745661Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:17:18.763865Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7501630419328658025:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:17:18.847435Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:7501630419328658076:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:17:19.458034Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7501630402148786675:2208];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:17:19.458103Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:17:20.154121Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:1, at schemeshard: 72057594046644480 >> KqpStats::DeferredEffects+UseSink [GOOD] >> KqpStats::DataQueryWithEffects+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpPrefixedVectorIndexes::OrderByCosineDistanceNotNullableLevel4 [GOOD] Test command err: Trying to start YDB, gRPC: 29467, MsgBus: 13880 2025-05-07T09:16:03.286637Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501630095263963700:2059];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:03.286752Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/003bdc/r3tmp/tmpKyCGF7/pdisk_1.dat 2025-05-07T09:16:03.848834Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:03.848948Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:03.852030Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29467, node 1 2025-05-07T09:16:03.902082Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T09:16:03.902651Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:509: SchemeBoardDelete /Root Strong=0 2025-05-07T09:16:03.918784Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:04.134603Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:04.134626Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:04.134633Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:04.134779Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13880 TClient is connected to server localhost:13880 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:16:04.875987Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:04.905661Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:05.120556Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:05.386567Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:05.517742Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:07.579333Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630112443834536:2407], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:07.579470Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:07.870639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:16:07.923822Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:16:07.968116Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:16:08.047885Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:16:08.092438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:16:08.161251Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:16:08.238278Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:16:08.287389Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501630095263963700:2059];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:08.287446Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:16:08.314933Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630116738802493:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:08.315187Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:08.315748Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630116738802498:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:08.322056Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:16:08.337157Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501630116738802500:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:16:08.424644Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501630116738802553:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:09.686400Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269877761, Sender [1:7501630121033770129:3600], Recipient [1:7501630095263964156:2205]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T09:16:09.686443Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4937: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T09:16:09.686458Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5713: Pipe server connected, at tablet: 72057594046644480 2025-05-07T09:16:09.686491Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271122432, Sender [1:7501630121033770125:3597], Recipient [1:7501630095263964156:2205]: {TEvModifySchemeTransaction txid# 281474976710672 TabletId# 72057594046644480} 2025-05-07T09:16:09.686503Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4851: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-05-07T09:16:09.757701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "TestTable" Columns { Name: "pk" Type: "Int64" NotNull: false } Columns { Name: "user" Type: "String" NotNull: false } Columns { Name: "emb" Type: "String" NotNull: false } Columns { Name: "data" Type: "String" NotNull: false } KeyColumnNames: "pk" PartitionConfig { PartitioningPolicy { MinPartitionsCount: 3 } ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } SplitBoundary { KeyPrefix { Tuple { Optional { Int64: 40 } } } } SplitBoundary { KeyPrefix { Tuple { Optional { Int64: 60 } } } } Temporary: false } CreateIndexedTable { } } TxId: 281474976710672 TabletId: 72057594046644480 PeerName: "ipv6:[::1]:51962" , at schemeshard: 72057594046644480 2025-05-07T09:16:09.758498Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /Root/TestTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-05-07T09:16:09.758697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:433: TCreateTable Propose, path: /Root/TestTable, opId: 281474976710672:0, schema: Name: "TestTable" Columns { Name: "pk" Type: "Int64" NotNull: false } Columns { Name: "user" Type: "String" NotNull: false } Columns { Name: "emb" Type: "String" NotNull: false } Columns { Name: "data" Type: "String" NotNull: false } KeyColumnNames: "pk" PartitionConfig { PartitioningPolicy { MinPartitionsCount: 3 } ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolK ... ssing event TEvPrivate::TEvPersistTableStats 2025-05-07T09:17:19.056288Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:588: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 2025-05-07T09:17:19.242697Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269553162, Sender [2:7501630229262450079:2329], Recipient [2:7501630224967482292:2150]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037892 TableLocalId: 3 Generation: 1 Round: 3 TableStats { DataSize: 800 RowCount: 3 IndexSize: 0 InMemSize: 800 LastAccessTime: 1746609398189 LastUpdateTime: 1746609398189 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 3 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 45 Memory: 137784 } ShardState: 2 UserTablePartOwners: 72075186224037892 NodeId: 2 StartTime: 1746609394010 TableOwnerId: 72057594046644480 FollowerId: 0 2025-05-07T09:17:19.242742Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4877: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-05-07T09:17:19.242776Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:561: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037892 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 3] state 'Ready' dataSize 800 rowCount 3 cpuUsage 0.0045 2025-05-07T09:17:19.242865Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:568: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037892 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 3] raw table stats: DataSize: 800 RowCount: 3 IndexSize: 0 InMemSize: 800 LastAccessTime: 1746609398189 LastUpdateTime: 1746609398189 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 3 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-05-07T09:17:19.242889Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:608: Will delay TTxStoreTableStats on# 0.099995s, queue# 1 2025-05-07T09:17:19.330291Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:7501630224967482292:2150]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:17:19.330332Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:17:19.330379Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [2:7501630224967482292:2150], Recipient [2:7501630224967482292:2150]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:17:19.330397Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:17:19.339835Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [2:7501630224967482292:2150]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-05-07T09:17:19.339874Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5015: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-05-07T09:17:19.339895Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:588: Started TEvPersistStats at tablet 72057594046644480, queue size# 1 2025-05-07T09:17:19.339946Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:599: Will execute TTxStoreStats, queue# 1 2025-05-07T09:17:19.339962Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:608: Will delay TTxStoreTableStats on# 0.002923s, queue# 1 2025-05-07T09:17:19.340027Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 3 shard idx 72057594046644480:5 data size 800 row count 3 2025-05-07T09:17:19.340081Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037892 maps to shardIdx: 72057594046644480:5 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 3], pathId map=EightShard, is column=0, is olap=0, RowCount 3, DataSize 800 2025-05-07T09:17:19.340095Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037892, followerId 0 2025-05-07T09:17:19.340162Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:5 with partCount# 0, rowCount# 3, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-05-07T09:17:19.340205Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186224037892 2025-05-07T09:17:19.340269Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T09:17:19.342878Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [2:7501630224967482292:2150]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-05-07T09:17:19.342928Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5015: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-05-07T09:17:19.342942Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:588: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 2025-05-07T09:17:19.379330Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269553162, Sender [2:7501630229262450325:2348], Recipient [2:7501630224967482292:2150]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037898 TableLocalId: 4 Generation: 1 Round: 3 TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 48 Memory: 119352 } ShardState: 2 UserTablePartOwners: 72075186224037898 NodeId: 2 StartTime: 1746609394276 TableOwnerId: 72057594046644480 FollowerId: 0 2025-05-07T09:17:19.379369Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4877: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-05-07T09:17:19.379402Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:561: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037898 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 4] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0048 2025-05-07T09:17:19.379486Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:568: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037898 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 4] raw table stats: DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-05-07T09:17:19.379509Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:608: Will delay TTxStoreTableStats on# 0.099995s, queue# 1 2025-05-07T09:17:19.482628Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [2:7501630224967482292:2150]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-05-07T09:17:19.482671Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5015: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-05-07T09:17:19.482692Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:588: Started TEvPersistStats at tablet 72057594046644480, queue size# 1 2025-05-07T09:17:19.482740Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:599: Will execute TTxStoreStats, queue# 1 2025-05-07T09:17:19.482755Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:608: Will delay TTxStoreTableStats on# 0.000000s, queue# 1 2025-05-07T09:17:19.482809Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046644480:11 data size 0 row count 0 2025-05-07T09:17:19.482865Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037898 maps to shardIdx: 72057594046644480:11 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 4], pathId map=Logs, is column=0, is olap=0, RowCount 0, DataSize 0 2025-05-07T09:17:19.482884Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037898, followerId 0 2025-05-07T09:17:19.482938Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:11 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-05-07T09:17:19.482978Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:483: Do not want to split tablet 72075186224037898 2025-05-07T09:17:19.483035Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-05-07T09:17:19.483265Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [2:7501630224967482292:2150]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-05-07T09:17:19.483282Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5015: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-05-07T09:17:19.483297Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:588: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 2025-05-07T09:17:20.334168Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:7501630224967482292:2150]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:17:20.334215Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:17:20.334267Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [2:7501630224967482292:2150], Recipient [2:7501630224967482292:2150]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:17:20.334283Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime >> KqpStats::RequestUnitForBadRequestExecute >> KqpQuery::PreparedQueryInvalidate ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpQuery::CreateAsSelectTypes-NotNull-IsOlap [GOOD] Test command err: Trying to start YDB, gRPC: 8870, MsgBus: 13076 2025-05-07T09:16:47.556595Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501630286198977617:2061];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:47.556652Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001e62/r3tmp/tmpR3xOKu/pdisk_1.dat 2025-05-07T09:16:48.117895Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:48.136738Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:48.136849Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:48.151491Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8870, node 1 2025-05-07T09:16:48.463266Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:48.463291Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:48.463300Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:48.463453Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13076 TClient is connected to server localhost:13076 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:16:49.527722Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:49.570615Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T09:16:49.596966Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:49.772759Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:49.996630Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:50.086041Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:51.745486Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630303378848448:2407], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:51.745606Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:52.244294Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.298336Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.352623Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.402646Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.440677Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.521293Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.558352Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501630286198977617:2061];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:52.558416Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:16:52.564353Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.639881Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630307673816403:2471], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:52.639975Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:52.641338Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630307673816408:2474], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:52.644785Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:16:52.656073Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501630307673816410:2475], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:16:52.714956Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501630307673816461:3414] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 18155, MsgBus: 61731 2025-05-07T09:16:55.619380Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501630319205982527:2058];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:55.619419Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001e62/r3tmp/tmprPHJXQ/pdisk_1.dat 2025-05-07T09:16:55.862733Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:55.867265Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:55.867354Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:55.870627Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18155, node 2 2025-05-07T09:16:55.986550Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:55.986570Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:55.986577Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:55.986694Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61731 TClient is connected to server localhost:61731 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { Sche ... 7:13.363617Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037954;self_id=[3:7501630379485187388:2630];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:1153;event=tablet_die; 2025-05-07T09:17:13.363834Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037950;self_id=[3:7501630379485187390:2631];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:1153;event=tablet_die; 2025-05-07T09:17:13.365756Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;self_id=[3:7501630383780154883:2673];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:1153;event=tablet_die; 2025-05-07T09:17:13.366622Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037952;self_id=[3:7501630379485187424:2642];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:1153;event=tablet_die; 2025-05-07T09:17:13.368413Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037925;self_id=[3:7501630379485187511:2657];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:1153;event=tablet_die; 2025-05-07T09:17:13.369279Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037956;self_id=[3:7501630379485187426:2643];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:1153;event=tablet_die; 2025-05-07T09:17:13.370730Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037904;self_id=[3:7501630383780154899:2680];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:1153;event=tablet_die; 2025-05-07T09:17:13.371982Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[3:7501630379485187273:2626];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:1153;event=tablet_die; 2025-05-07T09:17:13.372982Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037937;self_id=[3:7501630383780154815:2660];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:1153;event=tablet_die; 2025-05-07T09:17:13.375156Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037959;self_id=[3:7501630379485187436:2644];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:1153;event=tablet_die; 2025-05-07T09:17:13.377928Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037944;self_id=[3:7501630379485187513:2658];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:1153;event=tablet_die; 2025-05-07T09:17:13.379291Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037946;self_id=[3:7501630379485187397:2634];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:1153;event=tablet_die; 2025-05-07T09:17:13.380513Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037948;self_id=[3:7501630379485187392:2632];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:1153;event=tablet_die; 2025-05-07T09:17:13.383095Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037942;self_id=[3:7501630379485187467:2651];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:1153;event=tablet_die; 2025-05-07T09:17:13.487028Z node 3 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [3:7501630396665059791:5302] txid# 281474976715683, issues: { message: "Check failed: path: \'/Root/.tmp/sessions\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:17:13.506016Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715685:1, at schemeshard: 72057594046644480 2025-05-07T09:17:13.649040Z node 3 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [3:7501630396665059936:5393] txid# 281474976715687, issues: { message: "Check failed: path: \'/Root/RowSrc\', error: path exist, request doesn\'t accept it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeTable, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:17:13.649818Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=3&id=N2U1MTdjMmMtYzI5YWU3OTgtOTU2MWFiNzAtNDk3ZDgzNTY=, ActorId: [3:7501630396665059763:3366], ActorState: ExecuteState, TraceId: 01jtn0htv5dfkxhc38g818t2p6, Create QueryResponse for error on request, msg: 2025-05-07T09:17:13.904078Z node 3 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [3:7501630396665060013:5426] txid# 281474976715689, issues: { message: "Check failed: path: \'/Root/.tmp/sessions\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:17:13.911955Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715690, at schemeshard: 72057594046644480 2025-05-07T09:17:13.923322Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715691:1, at schemeshard: 72057594046644480 2025-05-07T09:17:14.895587Z node 3 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [3:7501630400960027667:5581] txid# 281474976715695, issues: { message: "Check failed: path: \'/Root/.tmp/sessions\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:17:14.909911Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715697:1, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 15499, MsgBus: 1425 2025-05-07T09:17:16.737177Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501630409426369683:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:17:16.737241Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001e62/r3tmp/tmpUuklgS/pdisk_1.dat 2025-05-07T09:17:16.943021Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:17:16.943259Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:17:16.943340Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:17:16.958906Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15499, node 4 2025-05-07T09:17:17.038140Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:17:17.038164Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:17:17.038173Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:17:17.038286Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1425 TClient is connected to server localhost:1425 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:17:17.803417Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:17.814246Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T09:17:21.272662Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501630430901206830:2334], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:21.272736Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501630430901206822:2331], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:21.272877Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:21.276879Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480 2025-05-07T09:17:21.294170Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7501630430901206836:2335], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-05-07T09:17:21.362080Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:7501630430901206887:2334] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:17:21.437294Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 2025-05-07T09:17:21.738287Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7501630409426369683:2060];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:17:21.738393Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:17:21.877613Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:1, at schemeshard: 72057594046644480 >> KqpQuery::UdfTerminate >> KqpQuery::SelectWhereInSubquery >> KqpParams::MissingParameter >> KqpTypes::SelectNull [GOOD] >> KqpTypes::MultipleCurrentUtcTimestamp >> KqpParams::BadParameterType >> KqpStats::DeferredEffects-UseSink [GOOD] >> KqpQuery::OlapCreateAsSelect_Simple [GOOD] >> KqpQuery::OltpCreateAsSelect_Simple >> KqpExplain::MultiJoinCteLinks [GOOD] >> KqpQuery::ExecuteDataQueryCollectMeta >> SystemView::DescribeSystemFolder [GOOD] >> SystemView::DescribeAccessDenied >> KqpParams::InvalidJson [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpStats::DeferredEffects-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 25596, MsgBus: 21057 2025-05-07T09:16:47.586413Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501630282414707282:2132];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:47.586483Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001e5e/r3tmp/tmpPiMkQC/pdisk_1.dat 2025-05-07T09:16:48.137274Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:48.163222Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:48.163323Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:48.167891Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25596, node 1 2025-05-07T09:16:48.470503Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:48.470549Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:48.470561Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:48.470693Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21057 TClient is connected to server localhost:21057 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:16:49.395314Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:49.420468Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:49.609550Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:49.801715Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:49.894320Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:51.612837Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630299594578058:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:51.613207Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:52.244488Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.325813Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.367333Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.410817Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.448830Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.515968Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.567356Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.624558Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501630282414707282:2132];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:52.626801Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:16:52.689505Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630303889546015:2472], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:52.689606Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:52.690190Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630303889546020:2475], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:52.693945Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:16:52.710386Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501630303889546022:2476], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:16:52.770469Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501630303889546073:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 8065, MsgBus: 11174 2025-05-07T09:16:56.870509Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501630323137042686:2197];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:56.871041Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001e5e/r3tmp/tmpQXR5vx/pdisk_1.dat 2025-05-07T09:16:57.163659Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:57.169448Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:57.169537Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:57.170969Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8065, node 2 2025-05-07T09:16:57.326592Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:57.326611Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:57.326620Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:57.326763Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11174 TClient is connected to server localhost:11174 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:16:57.981758Z nod ... scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7501630387743855345:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:11.599029Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:11.599343Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7501630387743855350:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:11.603723Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:17:11.625327Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7501630387743855352:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:17:11.692111Z node 3 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [3:7501630387743855403:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:17:11.912912Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7501630366269016539:2063];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:17:11.912992Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:17:16.546440Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1746609433772, txId: 281474976710672] shutting down Trying to start YDB, gRPC: 15327, MsgBus: 30850 2025-05-07T09:17:17.587838Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501630412805154137:2057];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:17:17.587927Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001e5e/r3tmp/tmpD6sV67/pdisk_1.dat 2025-05-07T09:17:17.789672Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:17:17.821344Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:17:17.821445Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:17:17.823219Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15327, node 4 2025-05-07T09:17:17.921911Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:17:17.921931Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:17:17.921940Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:17:17.922109Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30850 TClient is connected to server localhost:30850 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:17:18.594217Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:18.622224Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T09:17:18.653757Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:18.771443Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:18.994504Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:19.122908Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:22.182940Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501630434279992266:2407], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:22.183090Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:22.253758Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:17:22.308339Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:17:22.364589Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:17:22.411096Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:17:22.460671Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:17:22.550554Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:17:22.594491Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7501630412805154137:2057];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:17:22.594594Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:17:22.636682Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:17:22.757609Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501630434279992930:2471], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:22.757729Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:22.758097Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501630434279992935:2474], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:22.763118Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:17:22.778192Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7501630434279992937:2475], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:17:22.843270Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:7501630434279992989:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 }
: Warning: Type annotation, code: 1030
:3:46: Warning: At function: Coalesce
:3:58: Warning: At function: SqlIn
:3:58: Warning: IN may produce unexpected result when used with nullable arguments. Consider adding 'PRAGMA AnsiInForEmptyOrNullableItemsCollections;', code: 1108 >> SystemView::ShowCreateTablePartitionAtKeys [GOOD] >> SystemView::ShowCreateTablePartitionByHash ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpExplain::MultiJoinCteLinks [GOOD] Test command err: Trying to start YDB, gRPC: 19857, MsgBus: 12954 2025-05-07T09:16:47.584159Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501630283826627056:2247];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:47.584230Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001e56/r3tmp/tmpues3f0/pdisk_1.dat 2025-05-07T09:16:48.193806Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:48.193917Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:48.195604Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:16:48.210356Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19857, node 1 2025-05-07T09:16:48.458856Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:48.458879Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:48.458887Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:48.459069Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12954 TClient is connected to server localhost:12954 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:16:49.314309Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:49.356588Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:49.635904Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:49.838603Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:49.918827Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:51.667572Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630301006497703:2408], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:51.667704Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:52.249665Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.310287Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.361265Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.410654Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.487848Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.564294Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.594821Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501630283826627056:2247];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:52.596289Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:16:52.627025Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.696772Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630305301465674:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:52.696871Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:52.697274Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630305301465679:2476], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:52.700817Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:16:52.714663Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501630305301465681:2477], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:16:52.810632Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501630305301465732:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } {"Plan":{"Plans":[{"PlanNodeId":6,"Plans":[{"PlanNodeId":5,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["EightShard"],"PlanNodeId":1,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Name":"TopSort","Limit":"SUM(10,15)","TopSortBy":"row.Text"},{"Scan":"Parallel","E-Size":"No estimate","ReadRanges":["Key (-∞, +∞)"],"Name":"TableFullScan","Inputs":[],"Path":"\/Root\/EightShard","E-Rows":"No estimate","Table":"EightShard","ReadColumns":["Data","Key","Text"],"E-Cost":"No estimate"}],"Node Type":"TopSort-TableFullScan"}],"Node Type":"Merge","SortColumns":["Text (Asc)"],"PlanNodeType":"Connection"}],"Operators":[{"Inputs":[{"ExternalPlanNodeId":2}],"Name":"Limit","Limit":"Min(If,SUM(10,15))"}],"Node Type":"Limit"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Name":"Limit","Limit":"10"},{"Inputs":[{"ExternalPlanNodeId":4}],"Offset":"15","Name":"Offset"}],"Node Type":"Limit-Offset"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","Stats":{"ResourcePoolId":"default"},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/EightShard","reads":[{"columns":["Data","Key","Text"],"scan_by":["Key (-∞, +∞)"],"type":"FullScan"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":2,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":5,"Plans":[{"PlanNodeId":7,"Plans":[{"PlanNodeId":8,"Operators":[{"Scan":"Parallel","E-Size":"No estimate","ReadRanges":["Key (-∞, +∞)"],"Name":"TableFullScan","Path":"\/Root\/EightShard","E-Rows":"No estimate","Table":"EightShard","ReadColumns":["Data","Key","Text"],"E-Cost":"No estimate"}],"Node Type":"TableFullScan"}],"Operators":[{"Name":"TopSort","Limit":"SUM(10,15)","TopSortBy":"row.Text"}],"Node Type":"TopSort"}],"Operators":[{"Name":"Limit","Limit":"Min(If,SUM(10,15))"}],"Node Type":"Limit"}],"Operators":[{"Offset":"15","Name":"Offset"}],"Node Type":"Offset"}],"Operators":[{"Name":"Limit","Limit":"10"}],"Node Type":"Limit"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} Trying to start YDB, gRPC: 61969, MsgBus: 61237 2025-05-07T09:16:55.715925Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501630318623797529:2132];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:55.715982Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001e56/r3tmp/tmp5AJzHj/pdisk_1.dat 2025-05-07T09:16:55.910379Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:55.961162Z node 2 ... 7T09:17:18.738463Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19212, node 5 2025-05-07T09:17:18.921003Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:17:18.921039Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:17:18.921049Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:17:18.921213Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4783 TClient is connected to server localhost:4783 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:17:19.694198Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:19.710367Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T09:17:19.736938Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:19.855353Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T09:17:20.050707Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 2025-05-07T09:17:20.233642Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:23.228445Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7501630438767544988:2407], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:23.228576Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:23.308934Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:17:23.399660Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:17:23.410457Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[5:7501630417292706853:2059];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:17:23.410545Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:17:23.442734Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:17:23.538950Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:17:23.595266Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:17:23.684409Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:17:23.743066Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:17:23.811030Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7501630438767545653:2471], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:23.811123Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:23.811317Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7501630438767545658:2474], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:23.815434Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:17:23.827287Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [5:7501630438767545660:2475], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:17:23.884327Z node 5 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [5:7501630438767545711:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } {"Plan":{"Plans":[{"PlanNodeId":12,"Plans":[{"PlanNodeId":11,"Plans":[{"PlanNodeId":10,"Plans":[{"PlanNodeId":9,"Plans":[{"E-Size":"No estimate","PlanNodeId":8,"LookupKeyColumns":["Key"],"Node Type":"TableLookup","Path":"\/Root\/EightShard","Columns":["Data","Key","Text"],"E-Rows":"No estimate","Table":"EightShard","Plans":[{"PlanNodeId":7,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Iterator":"PartitionByKey","Name":"Iterator"},{"Inputs":[],"Name":"PartitionByKey","Input":"precompute_0_0"}],"Node Type":"ConstantExpr-Aggregate","CTE Name":"precompute_0_0"}],"PlanNodeType":"Connection","E-Cost":"No estimate"}],"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Name":"Limit","Limit":"1001"},{"Inputs":[{"InternalOperatorId":3},{"InternalOperatorId":2}],"E-Rows":"No estimate","Condition":"es.Key = kv.Key","Name":"InnerJoin (MapJoin)","E-Size":"No estimate","E-Cost":"No estimate"},{"Inputs":[],"ToFlow":"precompute_0_0","Name":"ToFlow"},{"Inputs":[{"ExternalPlanNodeId":8}],"E-Rows":"No estimate","Predicate":"Exist(item.Key)","Name":"Filter","E-Size":"No estimate","E-Cost":"No estimate"}],"Node Type":"Limit-InnerJoin (MapJoin)-ConstantExpr-Filter","CTE Name":"precompute_0_0"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Operators":[{"Inputs":[{"ExternalPlanNodeId":10}],"Name":"Limit","Limit":"1001"}],"Node Type":"Limit"}],"Node Type":"ResultSet_1","PlanNodeType":"ResultSet"},{"PlanNodeId":5,"Subplan Name":"CTE precompute_0_0","Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["KeyValue"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","E-Size":"No estimate","ReadRanges":["Key (-∞, +∞)"],"Name":"TableFullScan","Inputs":[],"Path":"\/Root\/KeyValue","ReadRangesPointPrefixLen":"0","E-Rows":"No estimate","Table":"KeyValue","ReadColumns":["Key","Value"],"E-Cost":"No estimate"}],"Node Type":"TableFullScan"}],"Node Type":"Collect"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Node Type":"Collect"}],"Node Type":"Precompute_0","Parent Relationship":"InitPlan","PlanNodeType":"Materialize"}],"Node Type":"Query","Stats":{"ResourcePoolId":"default"},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/EightShard","reads":[{"lookup_by":["Key"],"columns":["Data","Key","Text"],"type":"Lookup"}]},{"name":"\/Root\/KeyValue","reads":[{"columns":["Key","Value"],"scan_by":["Key (-∞, +∞)"],"type":"FullScan"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":2,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":5,"Plans":[{"PlanNodeId":6,"Plans":[{"PlanNodeId":7,"Operators":[{"E-Rows":"No estimate","Columns":["Data","Key","Text"],"E-Size":"No estimate","E-Cost":"No estimate","Name":"TableLookup","Table":"EightShard","LookupKeyColumns":["Key"]}],"Node Type":"TableLookup","PlanNodeType":"Connection"}],"Operators":[{"E-Rows":"No estimate","Predicate":"Exist(item.Key)","Name":"Filter","E-Size":"No estimate","E-Cost":"No estimate"}],"Node Type":"Filter"},{"PlanNodeId":13,"Operators":[{"Scan":"Parallel","E-Size":"No estimate","ReadRanges":["Key (-∞, +∞)"],"Name":"TableFullScan","Path":"\/Root\/KeyValue","ReadRangesPointPrefixLen":"0","E-Rows":"No estimate","Table":"KeyValue","ReadColumns":["Key","Value"],"E-Cost":"No estimate"}],"Node Type":"TableFullScan"}],"Operators":[{"E-Rows":"No estimate","Condition":"es.Key = kv.Key","Name":"InnerJoin (MapJoin)","E-Size":"No estimate","E-Cost":"No estimate"}],"Node Type":"InnerJoin (MapJoin)"}],"Operators":[{"Name":"Limit","Limit":"1001"}],"Node Type":"Limit"}],"Operators":[{"Name":"Limit","Limit":"1001"}],"Node Type":"Limit"}],"Node Type":"ResultSet_1","PlanNodeType":"ResultSet"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} >> KqpExplain::PrecomputeRange [GOOD] >> KqpExplain::PureExpr >> KqpExplain::UpdateSecondaryConditionalSecondaryKey+UseSink [GOOD] >> KqpExplain::UpdateConditional+UseSink [GOOD] >> KqpExplain::UpdateConditional-UseSink >> KqpExplain::UpdateOnSecondaryWithoutSecondaryKey-UseSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpParams::InvalidJson [GOOD] Test command err: Trying to start YDB, gRPC: 27067, MsgBus: 22779 2025-05-07T09:16:59.914526Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501630334715221294:2271];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:59.914575Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001e16/r3tmp/tmpWjuygM/pdisk_1.dat 2025-05-07T09:17:00.342084Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:17:00.364150Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:17:00.364268Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:17:00.367375Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27067, node 1 2025-05-07T09:17:00.512041Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:17:00.512059Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:17:00.512074Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:17:00.512180Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22779 TClient is connected to server localhost:22779 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:17:01.125012Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:01.152187Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:01.294963Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T09:17:01.490480Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 2025-05-07T09:17:01.563622Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:03.505025Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630351895091924:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:03.505136Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:03.856392Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:17:03.887189Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:17:03.934937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:17:03.969243Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:17:03.996721Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:17:04.048970Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:17:04.098310Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:17:04.190422Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630356190059882:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:04.190502Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:04.190829Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630356190059887:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:04.194862Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:17:04.207715Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501630356190059889:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:17:04.296873Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501630356190059940:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:17:04.914408Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501630334715221294:2271];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:17:04.914480Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 4535, MsgBus: 13744 2025-05-07T09:17:06.695687Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501630367407047076:2064];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:17:06.695730Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001e16/r3tmp/tmp5j8aJW/pdisk_1.dat 2025-05-07T09:17:06.850295Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:17:06.852226Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:17:06.852290Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:17:06.854207Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4535, node 2 2025-05-07T09:17:06.946401Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:17:06.946419Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:17:06.946426Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:17:06.946533Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13744 TClient is connected to server localhost:13744 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-05-07T09:17:07. ... WorkloadService] [TPoolFetcherActor] ActorId: [3:7501630414303984446:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:17.720690Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:17.721035Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7501630414303984451:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:17.725183Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-05-07T09:17:17.740935Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7501630414303984453:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-05-07T09:17:17.802768Z node 3 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [3:7501630414303984504:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:17:18.298086Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7501630397124113074:2192];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:17:18.298191Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 14064, MsgBus: 30058 2025-05-07T09:17:20.651168Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501630425276981628:2063];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:17:20.651214Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001e16/r3tmp/tmpGniI1r/pdisk_1.dat 2025-05-07T09:17:20.895253Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:17:20.897541Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:17:20.897651Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:17:20.908591Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14064, node 4 2025-05-07T09:17:20.962725Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:17:20.962749Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:17:20.962757Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:17:20.962923Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30058 TClient is connected to server localhost:30058 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:17:21.575479Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:21.623610Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:21.718834Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:21.947848Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:22.038884Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:24.890667Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501630442456852457:2407], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:24.890820Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:24.949581Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-05-07T09:17:24.996182Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-05-07T09:17:25.042868Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-05-07T09:17:25.084894Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-05-07T09:17:25.162234Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-05-07T09:17:25.243575Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-05-07T09:17:25.322701Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-05-07T09:17:25.418681Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501630446751820423:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:25.418795Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:25.419429Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501630446751820428:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:25.426975Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-05-07T09:17:25.444426Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7501630446751820430:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-05-07T09:17:25.540038Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:7501630446751820481:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:17:25.651441Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7501630425276981628:2063];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:17:25.651516Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:17:26.796873Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-05-07T09:17:26.982800Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=4&id=ZWYxZDZiNTAtMTI4MzNkNGItYzc1MDk0MDQtMzVlMTk2M2E=, ActorId: [4:7501630451046788054:2515], ActorState: ExecuteState, TraceId: 01jtn0j7z289ny5mxrxse4p24d, Create QueryResponse for error on request, msg: ydb/core/kqp/session_actor/kqp_session_actor.cpp:1003: Invalid Json value
: Error: ydb/core/kqp/session_actor/kqp_session_actor.cpp:1003: Invalid Json value >> TSchemeshardBackgroundCompactionTest::ShouldNotCompactServerlessAfterDisable [GOOD] >> KqpQuery::QueryCacheTtl [GOOD] >> KqpQuery::QueryCachePermissionsLoss ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpExplain::UpdateSecondaryConditionalSecondaryKey+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 4187, MsgBus: 3900 2025-05-07T09:16:47.555319Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501630284515766781:2062];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:47.555400Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/0020f1/r3tmp/tmpIL2P5v/pdisk_1.dat 2025-05-07T09:16:48.352480Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:48.359752Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:48.359859Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:48.363429Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4187, node 1 2025-05-07T09:16:48.480486Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:48.480502Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:48.480506Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:48.480603Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3900 TClient is connected to server localhost:3900 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:16:49.338775Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:49.353601Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T09:16:49.373808Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:49.529203Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:49.761903Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:49.853292Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:51.908055Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630301695637640:2407], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:51.908252Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:52.303536Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.339441Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.392735Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.424018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.472327Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.557177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.558276Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501630284515766781:2062];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:52.558332Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:16:52.640290Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.738890Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630305990605606:2471], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:52.739040Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:52.739413Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630305990605611:2474], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:52.743561Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:16:52.760874Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501630305990605613:2475], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:16:52.821102Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501630305990605664:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:54.381197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-05-07T09:16:54.420900Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480 2025-05-07T09:16:54.494974Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480 {"Plan":{"Plans":[{"PlanNodeId":18,"Plans":[{"Tables":["SecondaryKeys"],"PlanNodeId":17,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Path":"\/Root\/SecondaryKeys","Name":"Upsert","Table":"SecondaryKeys"},{"Inputs":[],"Iterator":"precompute_0_2","Name":"Iterator"}],"Node Type":"Upsert-ConstantExpr","CTE Name":"precompute_0_2"}],"Node Type":"Effect"},{"PlanNodeId":16,"Plans":[{"Tables":["SecondaryKeys\/Index\/indexImplTable"],"PlanNodeId":15,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Path":"\/Root\/SecondaryKeys\/Index\/indexImplTable","Name":"Delete","Table":"SecondaryKeys\/Index\/indexImplTable"},{"Inputs":[],"Iterator":"precompute_0_1","Name":"Iterator"}],"Node Type":"Delete-ConstantExpr","CTE Name":"precompute_0_1"}],"Node Type":"Effect"},{"PlanNodeId":14,"Plans":[{"Tables":["SecondaryKeys\/Index\/indexImplTable"],"PlanNodeId":13,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Path":"\/Root\/SecondaryKeys\/Index\/indexImplTable","Name":"Upsert","Table":"SecondaryKeys\/Index\/indexImplTable"},{"Inputs":[],"Iterator":"precompute_0_0","Name":"Iterator"}],"Node Type":"Upsert-ConstantExpr","CTE Name":"precompute_0_0"}],"Node Type":"Effect"},{"PlanNodeId":11,"Plans":[{"PlanNodeId":10,"Plans":[{"PlanNodeId":9,"Plans":[{"PlanNodeId":8,"Plans":[{"Tables":["SecondaryKeys"],"PlanNodeId":7,"Operators":[{"Scan":"Parallel","E-Size":"No estimate","ReadRanges":["Key (-∞, +∞)"],"Name":"TableFullScan","Inputs":[],"Path":"\/Root\/SecondaryKeys","E-Rows":"No estimate","ReadRangesPointPrefixLen":"0","Table":"SecondaryKeys","ReadColumns":["Fk","Key","Value"],"E-Cost":"No estimate"}],"Node Type":"TableFullScan"}],"Subplan Name":"CTE Stage_5","Node Type":"Stage","Parent Relationship":"InitPlan"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Node Type":"Collect"}],"Subplan Name":"CTE precompute_0_0","Node Type":"Precompute_0_0","Parent Relationship":"InitPlan","PlanNodeType":"Materialize"},{"PlanNodeId":6,"Plans":[{"PlanNodeId":5,"Plans":[{"PlanNodeId":4,"Node Type":"UnionAll ... ting 2025-05-07T09:17:18.044562Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17258, node 4 2025-05-07T09:17:18.162168Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:17:18.162199Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:17:18.162207Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:17:18.162363Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18578 TClient is connected to server localhost:18578 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:17:18.796641Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:18.810422Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T09:17:18.828787Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:18.939232Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:19.214792Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:19.322720Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:22.417829Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501630433181136656:2407], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:22.417956Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:22.514122Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-05-07T09:17:22.588319Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-05-07T09:17:22.626579Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-05-07T09:17:22.670872Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-05-07T09:17:22.724415Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-05-07T09:17:22.782650Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-05-07T09:17:22.822179Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7501630411706298683:2213];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:17:22.822263Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:17:22.845145Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-05-07T09:17:22.958727Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501630433181137321:2471], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:22.958828Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:23.008834Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501630433181137326:2474], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:23.015667Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-05-07T09:17:23.032534Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7501630437476104624:2475], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-05-07T09:17:23.098110Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:7501630437476104676:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:17:24.481430Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-05-07T09:17:24.621573Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480 2025-05-07T09:17:24.666878Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480 {"Plan":{"Plans":[{"PlanNodeId":11,"Plans":[{"Tables":["SecondaryKeys"],"PlanNodeId":10,"Operators":[{"Inputs":[],"Path":"\/Root\/SecondaryKeys","Name":"Upsert","SinkType":"KqpTableSink","Table":"SecondaryKeys"}],"Plans":[{"PlanNodeId":9,"Plans":[{"PlanNodeId":8,"Plans":[{"Tables":["SecondaryKeys\/Index\/indexImplTable"],"PlanNodeId":7,"Operators":[{"Scan":"Parallel","E-Size":"No estimate","ReadRanges":["Fk [1, 4)"],"Name":"TableRangeScan","Inputs":[],"Path":"\/Root\/SecondaryKeys\/Index\/indexImplTable","E-Rows":"No estimate","ReadRangesPointPrefixLen":"1","ReadRangesKeys":["Fk"],"Table":"SecondaryKeys\/Index\/indexImplTable","ReadColumns":["Fk","Key"],"E-Cost":"No estimate","ReadRangesExpectedSize":"3"}],"Node Type":"TableRangeScan"}],"Subplan Name":"CTE Stage_5","Node Type":"Stage","Parent Relationship":"InitPlan"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Node Type":"Collect-Sink"}],"Node Type":"Sink"},{"PlanNodeId":6,"Plans":[{"Tables":["SecondaryKeys\/Index\/indexImplTable"],"PlanNodeId":5,"Operators":[{"Inputs":[],"Path":"\/Root\/SecondaryKeys\/Index\/indexImplTable","Name":"Delete","SinkType":"KqpTableSink","Table":"SecondaryKeys\/Index\/indexImplTable"}],"Plans":[{"PlanNodeId":4,"Node Type":"UnionAll","CTE Name":"Stage_5","PlanNodeType":"Connection"}],"Node Type":"Collect-Sink"}],"Node Type":"Sink"},{"PlanNodeId":3,"Plans":[{"Tables":["SecondaryKeys\/Index\/indexImplTable"],"PlanNodeId":2,"Operators":[{"Inputs":[],"Path":"\/Root\/SecondaryKeys\/Index\/indexImplTable","Name":"Upsert","SinkType":"KqpTableSink","Table":"SecondaryKeys\/Index\/indexImplTable"}],"Plans":[{"PlanNodeId":1,"Node Type":"UnionAll","CTE Name":"Stage_5","PlanNodeType":"Connection"}],"Node Type":"Collect-Sink"}],"Node Type":"Sink"}],"Node Type":"Query","PlanNodeType":"Query","Stats":{"ResourcePoolId":"default"}},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/SecondaryKeys","writes":[{"columns":["Fk","Key"],"type":"MultiUpsert"}]},{"name":"\/Root\/SecondaryKeys\/Index\/indexImplTable","reads":[{"columns":["Fk","Key"],"scan_by":["Fk [1, 4)"],"type":"Scan"}],"writes":[{"columns":["Fk","Key"],"type":"MultiUpsert"},{"type":"MultiErase"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":2,"Operators":[{"Path":"\/Root\/SecondaryKeys","Name":"Upsert","SinkType":"KqpTableSink","Table":"SecondaryKeys"}],"Node Type":"Upsert"}],"Node Type":"Sink"},{"PlanNodeId":3,"Plans":[{"PlanNodeId":4,"Operators":[{"Path":"\/Root\/SecondaryKeys\/Index\/indexImplTable","Name":"Delete","SinkType":"KqpTableSink","Table":"SecondaryKeys\/Index\/indexImplTable"}],"Node Type":"Delete"}],"Node Type":"Sink"},{"PlanNodeId":5,"Plans":[{"PlanNodeId":6,"Operators":[{"Path":"\/Root\/SecondaryKeys\/Index\/indexImplTable","Name":"Upsert","SinkType":"KqpTableSink","Table":"SecondaryKeys\/Index\/indexImplTable"}],"Node Type":"Upsert"}],"Node Type":"Sink"}],"Node Type":"Query","PlanNodeType":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0}}} >> KqpStats::MultiTxStatsFullExpYql >> KqpLimits::OutOfSpaceBulkUpsertFail ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_compaction/unittest >> TSchemeshardBackgroundCompactionTest::ShouldNotCompactServerlessAfterDisable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:16:11.876815Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:16:11.876985Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:16:11.877045Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:16:11.877089Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:16:11.877146Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:16:11.877186Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:16:11.877262Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:16:11.877358Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:16:11.878209Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:16:11.879865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:16:11.988079Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:16:11.988135Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:12.014910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:16:12.015136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:16:12.015346Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:16:12.039225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:16:12.039626Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:16:12.040325Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:16:12.040517Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:16:12.044199Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:16:12.055267Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:16:12.055503Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:16:12.055604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:16:12.055660Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:16:12.055779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:16:12.058911Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:16:12.069532Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:16:12.348122Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:16:12.348380Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:12.348634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:16:12.348869Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:16:12.348931Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:12.351817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:16:12.351982Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:16:12.352209Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:12.352288Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:16:12.352339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:16:12.352373Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:16:12.354779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:12.354851Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:16:12.354902Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:16:12.357017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:12.357084Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:12.357143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:16:12.357201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:16:12.361005Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:16:12.363432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:16:12.363638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:16:12.364771Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:16:12.364922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:16:12.364969Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:16:12.365293Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:16:12.365354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:16:12.365565Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:16:12.365684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:16:12.368123Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:16:12.368173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:16:12.368412Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:16:12.368453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 9:17:28.951672Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4900: StateWork, processing event TEvPrivate::TEvRunConditionalErase 2025-05-07T09:17:28.951701Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6592: Handle: TEvRunConditionalErase, at schemeshard: 72075186233409546 2025-05-07T09:17:28.951803Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72075186233409546 2025-05-07T09:17:28.951868Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72075186233409546 2025-05-07T09:17:29.062333Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 2146435073, Sender [0:0:0], Recipient [3:768:2653]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvCleanupTransaction 2025-05-07T09:17:29.062422Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3156: StateWork, processing event TEvPrivate::TEvCleanupTransaction 2025-05-07T09:17:29.062515Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:214: No cleanup at 72075186233409552 outdated step 200 last cleanup 0 2025-05-07T09:17:29.062580Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186233409552 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:17:29.062624Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186233409552 2025-05-07T09:17:29.062659Z node 3 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186233409552 has no attached operations 2025-05-07T09:17:29.062693Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186233409552 2025-05-07T09:17:29.062841Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 2146435079, Sender [0:0:0], Recipient [3:768:2653]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvPeriodicWakeup 2025-05-07T09:17:29.062970Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3438: TEvPeriodicTableStats from datashard 72075186233409552, FollowerId 0, tableId 2 2025-05-07T09:17:29.063320Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269553162, Sender [3:768:2653], Recipient [3:897:2754]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186233409552 TableLocalId: 2 Generation: 2 Round: 10 TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 28 Memory: 119352 } ShardState: 2 UserTablePartOwners: 72075186233409552 NodeId: 3 StartTime: 119 TableOwnerId: 72075186233409549 FollowerId: 0 2025-05-07T09:17:29.063382Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4877: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-05-07T09:17:29.063432Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:561: Got periodic table stats at tablet 72075186233409549 from shard 72075186233409552 followerId 0 pathId [OwnerId: 72075186233409549, LocalPathId: 2] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0028 2025-05-07T09:17:29.063543Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:568: Got periodic table stats at tablet 72075186233409549 from shard 72075186233409552 followerId 0 pathId [OwnerId: 72075186233409549, LocalPathId: 2] raw table stats: DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-05-07T09:17:29.063590Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:608: Will delay TTxStoreTableStats on# 0.100000s, queue# 1 2025-05-07T09:17:29.074308Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 2146435073, Sender [0:0:0], Recipient [3:770:2654]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvCleanupTransaction 2025-05-07T09:17:29.074391Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3156: StateWork, processing event TEvPrivate::TEvCleanupTransaction 2025-05-07T09:17:29.074486Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:214: No cleanup at 72075186233409553 outdated step 200 last cleanup 0 2025-05-07T09:17:29.074557Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186233409553 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:17:29.074606Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186233409553 2025-05-07T09:17:29.074640Z node 3 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186233409553 has no attached operations 2025-05-07T09:17:29.074671Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186233409553 2025-05-07T09:17:29.074800Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 2146435079, Sender [0:0:0], Recipient [3:770:2654]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvPeriodicWakeup 2025-05-07T09:17:29.074927Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3438: TEvPeriodicTableStats from datashard 72075186233409553, FollowerId 0, tableId 2 2025-05-07T09:17:29.075265Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269553162, Sender [3:770:2654], Recipient [3:897:2754]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186233409553 TableLocalId: 2 Generation: 2 Round: 10 TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 28 Memory: 119352 } ShardState: 2 UserTablePartOwners: 72075186233409553 NodeId: 3 StartTime: 119 TableOwnerId: 72075186233409549 FollowerId: 0 2025-05-07T09:17:29.075317Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4877: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-05-07T09:17:29.075368Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:561: Got periodic table stats at tablet 72075186233409549 from shard 72075186233409553 followerId 0 pathId [OwnerId: 72075186233409549, LocalPathId: 2] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0028 2025-05-07T09:17:29.075492Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:568: Got periodic table stats at tablet 72075186233409549 from shard 72075186233409553 followerId 0 pathId [OwnerId: 72075186233409549, LocalPathId: 2] raw table stats: DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-05-07T09:17:29.087424Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:897:2754]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:17:29.087509Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4848: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-05-07T09:17:29.087596Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 271124999, Sender [3:897:2754], Recipient [3:897:2754]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:17:29.087639Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4847: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-05-07T09:17:29.098049Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435096, Sender [0:0:0], Recipient [3:897:2754]: NKikimr::NSchemeShard::TEvPrivate::TEvSendBaseStatsToSA 2025-05-07T09:17:29.098136Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5024: StateWork, processing event TEvPrivate::TEvSendBaseStatsToSA 2025-05-07T09:17:29.098372Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435076, Sender [0:0:0], Recipient [3:897:2754]: NKikimr::NSchemeShard::TEvPrivate::TEvRunConditionalErase 2025-05-07T09:17:29.098405Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4900: StateWork, processing event TEvPrivate::TEvRunConditionalErase 2025-05-07T09:17:29.098495Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6592: Handle: TEvRunConditionalErase, at schemeshard: 72075186233409549 2025-05-07T09:17:29.098572Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72075186233409549 2025-05-07T09:17:29.098632Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72075186233409549 2025-05-07T09:17:29.098801Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269746180, Sender [3:2019:3836], Recipient [3:897:2754]: NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult 2025-05-07T09:17:29.098841Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5023: StateWork, processing event TEvTxProxySchemeCache::TEvNavigateKeySetResult 2025-05-07T09:17:29.123228Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877761, Sender [3:2022:3839], Recipient [3:768:2653]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T09:17:29.123328Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T09:17:29.123395Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186233409552, clientId# [3:2021:3838], serverId# [3:2022:3839], sessionId# [0:0:0] 2025-05-07T09:17:29.123578Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269553213, Sender [3:2020:3837], Recipient [3:768:2653]: NKikimrTxDataShard.TEvGetCompactTableStats PathId { OwnerId: 72075186233409549 LocalId: 2 } 2025-05-07T09:17:29.124274Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877761, Sender [3:2025:3842], Recipient [3:770:2654]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T09:17:29.124316Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T09:17:29.124351Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186233409553, clientId# [3:2024:3841], serverId# [3:2025:3842], sessionId# [0:0:0] 2025-05-07T09:17:29.124573Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269553213, Sender [3:2023:3840], Recipient [3:770:2654]: NKikimrTxDataShard.TEvGetCompactTableStats PathId { OwnerId: 72075186233409549 LocalId: 2 } >> KqpStats::DataQueryWithEffects+UseSink [GOOD] >> KqpStats::DataQueryWithEffects-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpExplain::UpdateOnSecondaryWithoutSecondaryKey-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 2518, MsgBus: 23228 2025-05-07T09:16:49.875786Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501630294425378267:2263];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:49.875835Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001e51/r3tmp/tmpsUUOAD/pdisk_1.dat 2025-05-07T09:16:50.462063Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:50.462210Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:50.516460Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:16:50.566431Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 2518, node 1 2025-05-07T09:16:50.766391Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:50.766411Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:50.766425Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:50.766524Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23228 TClient is connected to server localhost:23228 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:16:51.434341Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:51.485156Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:51.683931Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:51.885184Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:51.980709Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:53.936220Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630311605248912:2407], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:53.936341Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:54.454943Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:16:54.497011Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:16:54.572466Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:16:54.614533Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:16:54.697276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:16:54.780965Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:16:54.840124Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:16:54.879610Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501630294425378267:2263];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:54.879679Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:16:54.929136Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630315900216877:2472], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:54.929259Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:54.929528Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630315900216882:2475], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:54.936211Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:16:54.994036Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501630315900216884:2476], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:16:55.068451Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501630320195184231:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:56.468004Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-05-07T09:16:56.546302Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480 2025-05-07T09:16:56.603819Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480 {"Plan":{"Plans":[{"PlanNodeId":11,"Plans":[{"Tables":["SecondaryKeys"],"PlanNodeId":10,"Operators":[{"Inputs":[],"Path":"\/Root\/SecondaryKeys","Name":"Upsert","SinkType":"KqpTableSink","Table":"SecondaryKeys"}],"Plans":[{"PlanNodeId":9,"Plans":[{"PlanNodeId":8,"Plans":[{"Tables":["SecondaryKeys"],"PlanNodeId":7,"Operators":[{"Scan":"Parallel","E-Size":"No estimate","ReadRanges":["Key (-∞, +∞)"],"Name":"TableFullScan","Inputs":[],"Path":"\/Root\/SecondaryKeys","E-Rows":"No estimate","ReadRangesPointPrefixLen":"0","Table":"SecondaryKeys","ReadColumns":["Fk","Key","Value"],"E-Cost":"No estimate"}],"Node Type":"TableFullScan"}],"Subplan Name":"CTE Stage_5","Node Type":"Stage","Parent Relationship":"InitPlan"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Node Type":"Collect-Sink"}],"Node Type":"Sink"},{"PlanNodeId":6,"Plans":[{"Tables":["SecondaryKeys\/Index\/indexImplTable"],"PlanNodeId":5,"Operators":[{"Inputs":[],"Path":"\/Root\/SecondaryKeys\/Index\/indexImplTable","Name":"Delete","SinkType":"KqpTableSink","Table":"SecondaryKeys\/Index\/indexImplTable"}],"Plans":[{"PlanNodeId":4,"Node Type":"UnionAll","CTE Name":"Stage_5","PlanNodeType":"Connection"}],"Node Type":"Collect-Sink"}],"Node Type":"Sink"},{"PlanNodeId":3,"Plans":[{"Tables":["SecondaryKeys\/Index\/indexImplTable"],"PlanNodeId":2,"Operators":[{"Inputs":[],"Path":"\/Root\/SecondaryKeys\/Index\/indexImplTable","Name":"Upsert","SinkType":"KqpTableSink","Table":"SecondaryKeys\/Index\/indexImplTable"}],"Plans":[{"PlanNodeId":1,"Node Type":"UnionAll","CTE Name":"Stage_5","PlanNodeType":"Connection"}],"Node Type":"Collect-Sink"}],"Node Type":"Sink"}],"Node Type":"Query","PlanNodeType":"Query","Stats":{"ResourcePoolId":"default"}},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/SecondaryKeys","reads":[{"columns":["Fk","Key","Value"],"scan_by":["Key (-∞, +∞)"],"type":"FullScan"}],"writes":[{"columns":["Fk","Key"],"type":"MultiUpsert"}]},{"name":"\/Root\/SecondaryKeys\/Index\/indexImplTable","writes":[{"columns":["Fk","Key"],"type":"MultiUpsert"},{"type":"MultiErase"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":2,"Operators":[{"Path":"\/Root\/ ... pp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001e51/r3tmp/tmpx8AMWV/pdisk_1.dat 2025-05-07T09:17:18.851528Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:17:18.852871Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:17:18.852948Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:17:18.857300Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20694, node 4 2025-05-07T09:17:18.934740Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:17:18.934764Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:17:18.934775Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:17:18.934944Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29037 TClient is connected to server localhost:29037 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:17:19.685736Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:19.692751Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T09:17:19.699397Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:19.787762Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:20.042103Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:20.140197Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:23.055298Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501630438194418552:2407], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:23.055399Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:23.123024Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:17:23.175359Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:17:23.228453Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:17:23.278945Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:17:23.315479Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:17:23.364360Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:17:23.433131Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:17:23.555804Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501630438194419207:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:23.555902Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:23.556286Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501630438194419212:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:23.561307Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:17:23.578233Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7501630416719580575:2211];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:17:23.578361Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7501630438194419214:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:17:23.578479Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:17:23.680549Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:7501630438194419265:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:17:25.252271Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-05-07T09:17:25.311573Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480 2025-05-07T09:17:25.395970Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480 {"Plan":{"Plans":[{"PlanNodeId":14,"Plans":[{"Tables":["SecondaryKeys"],"PlanNodeId":13,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Path":"\/Root\/SecondaryKeys","Name":"Upsert","Table":"SecondaryKeys"},{"Inputs":[],"Iterator":"precompute_2_0","Name":"Iterator"}],"Node Type":"Upsert-ConstantExpr","CTE Name":"precompute_2_0"}],"Node Type":"Effect"},{"PlanNodeId":11,"Plans":[{"PlanNodeId":10,"Plans":[{"PlanNodeId":9,"Plans":[{"PlanNodeId":8,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Iterator":"Filter","Name":"Iterator"},{"E-Rows":"2","Inputs":[],"Predicate":"Contains","E-Cost":"0","E-Size":"10","Name":"Filter"}],"Node Type":"ConstantExpr-Filter"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Node Type":"Collect"}],"Subplan Name":"CTE precompute_2_0","Node Type":"Precompute_2","Parent Relationship":"InitPlan","PlanNodeType":"Materialize"},{"PlanNodeId":6,"Plans":[{"PlanNodeId":5,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"E-Size":"No estimate","LookupKeyColumns":["Key"],"Node Type":"TableLookup","PlanNodeId":2,"Path":"\/Root\/SecondaryKeys","Columns":["Key"],"E-Rows":"No estimate","Plans":[{"PlanNodeId":1,"Operators":[{"Inputs":[],"Iterator":"precompute_0_1","Name":"Iterator"}],"Node Type":"ConstantExpr","CTE Name":"precompute_0_1"}],"Table":"SecondaryKeys","PlanNodeType":"Connection","E-Cost":"No estimate"}],"Node Type":"Stage"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Node Type":"Stage"}],"Subplan Name":"CTE precompute_1_0","Node Type":"Precompute_1","Parent Relationship":"InitPlan","PlanNodeType":"Materialize"}],"Node Type":"Query","PlanNodeType":"Query","Stats":{"ResourcePoolId":"default"}},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/SecondaryKeys","reads":[{"lookup_by":["Key"],"columns":["Key"],"type":"Lookup"}],"writes":[{"columns":["Key","Value"],"type":"MultiUpsert"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":2,"Operators":[{"Path":"\/Root\/SecondaryKeys","Name":"Upsert","Table":"SecondaryKeys"}],"Plans":[{"PlanNodeId":8,"Operators":[{"E-Rows":"2","Predicate":"Contains","E-Cost":"0","E-Size":"10","Name":"Filter"}],"Node Type":"Filter"}],"Node Type":"Upsert"}],"Node Type":"Effect"}],"Node Type":"Query","PlanNodeType":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0}}} >> KqpExplain::FullOuterJoin [GOOD] >> SystemView::AuthOwners_Access [GOOD] >> SystemView::AuthOwners_ResultOrder >> KqpStats::RequestUnitForBadRequestExecute [GOOD] >> KqpStats::RequestUnitForBadRequestExplicitPrepare >> KqpLimits::QSReplySizeEnsureMemoryLimits+useSink >> KqpQuery::PreparedQueryInvalidate [GOOD] >> KqpQuery::QueryCache >> KqpQuery::OltpCreateAsSelect_Simple [GOOD] >> KqpQuery::OltpCreateAsSelect_Disable >> KqpQuery::UdfTerminate [GOOD] >> KqpQuery::UdfMemoryLimit >> TSchemeshardBackgroundCompactionTest::SchemeshardShouldNotCompactBorrowed [GOOD] >> TSchemeshardBackgroundCompactionTest::SchemeshardShouldHandleCompactionTimeouts >> KqpQuery::ReadOverloaded-StreamLookup [GOOD] >> KqpQuery::SelectWhereInSubquery [GOOD] >> KqpQuery::TableSink_ReplaceDataShardDataQuery+UseSink >> KqpParams::RowsList ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpExplain::FullOuterJoin [GOOD] Test command err: Trying to start YDB, gRPC: 17387, MsgBus: 64144 2025-05-07T09:16:53.314712Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501630311424076645:2067];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:53.314761Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001e35/r3tmp/tmpil48lC/pdisk_1.dat 2025-05-07T09:16:53.839313Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:53.839407Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:53.844036Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-05-07T09:16:53.884513Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17387, node 1 2025-05-07T09:16:53.963199Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:53.963232Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:53.963239Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:53.963376Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64144 TClient is connected to server localhost:64144 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:16:54.620391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:54.643018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T09:16:54.660965Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:54.809117Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:54.965344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:55.063233Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:56.976052Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630324308980172:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:56.976149Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:57.307215Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:16:57.368419Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:16:57.416314Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:16:57.455342Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:16:57.486802Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:16:57.568559Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:16:57.609170Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:16:57.716828Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630328603948132:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:57.716942Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:57.717142Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630328603948137:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:57.721044Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:16:57.740518Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501630328603948139:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:16:57.803032Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501630328603948190:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:16:58.315122Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501630311424076645:2067];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:58.315203Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; {"Plan":{"Plans":[{"PlanNodeId":9,"Plans":[{"PlanNodeId":8,"Operators":[{"Inputs":[],"Iterator":"precompute_0_0","Name":"Iterator"}],"Node Type":"ConstantExpr","CTE Name":"precompute_0_0"}],"Node Type":"ResultSet_1","PlanNodeType":"ResultSet"},{"PlanNodeId":6,"Subplan Name":"CTE precompute_0_0","Plans":[{"PlanNodeId":5,"Plans":[{"PlanNodeId":4,"Plans":[{"Tables":["EightShard"],"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["KeyValue"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","E-Size":"No estimate","ReadRanges":["Key (-∞, +∞)"],"Name":"TableFullScan","Inputs":[],"Path":"\/Root\/KeyValue","E-Rows":"No estimate","Table":"KeyValue","ReadColumns":["Key"],"E-Cost":"No estimate"}],"Node Type":"TableFullScan"}],"Node Type":"Broadcast","PlanNodeType":"Connection"}],"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Name":"Aggregate","Phase":"Intermediate"},{"Inputs":[{"InternalOperatorId":2},{"ExternalPlanNodeId":2}],"E-Rows":"No estimate","Condition":"t.Data = kv.Key","Name":"InnerJoin (MapJoin)","E-Size":"No estimate","E-Cost":"No estimate"},{"Inputs":[{"InternalOperatorId":3}],"E-Rows":"No estimate","Predicate":"Exist(item.Data)","Name":"Filter","E-Size":"No estimate","E-Cost":"No estimate"},{"Scan":"Parallel","E-Size":"No estimate","ReadRanges":["Key (-∞, +∞)"],"Name":"TableFullScan","Inputs":[],"Path":"\/Root\/EightShard","E-Rows":"No estimate","Table":"EightShard","ReadColumns":["Data"],"E-Cost":"No estimate"}],"Node Type":"Aggregate-InnerJoin (MapJoin)-Filter-TableFullScan"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Name":"Aggregate","Phase":"Final"},{"Inputs":[{"InternalOperatorId":2}],"Name":"Limit","Limit":"1"},{"Inputs":[{"ExternalPlanNodeId":4}],"Name":"Aggregate","Phase":"Final"}],"Node Type":"Aggregate-Limit-Aggregate"}],"Node Type":"Precompute_0","Parent Relationship":"InitPlan","PlanNodeType":"Materialize"}],"Node Type":"Query","Stats":{"ResourcePoolId":"default"},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/EightShard","reads":[{"columns":["Data"],"scan_by":["Key (-∞, +∞)"],"type":"FullScan"}]},{"name":"\/Root\/KeyValue","reads":[{"columns":["Key"],"scan_by":["Key (-∞, +∞)"],"type":"FullScan"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":5,"Plans":[{"PlanNodeId":6,"Plans":[{"PlanNodeId":8,"Plans":[{"PlanNodeId":9,"Plans":[{"PlanNodeId":10,"Plans":[{"PlanNodeId":11,"Operators":[{"Scan":"Parallel","E-Size":"No estimate","ReadRanges":["Key (-∞, +∞)"],"Name":"TableFullScan","Path":"\/Root\/EightShard","E-Rows":"No estimate","Table":"EightShard","ReadColumns":["Data"],"E-Cost":"No estimate"}],"Node Type":"TableFullScan"}],"Operators":[{"E-Ro ... +∞)"],"type":"FullScan"},{"columns":["Key"],"scan_by":["Key (350, +∞)"],"type":"Scan"},{"columns":["Data","Key"],"scan_by":["Key [100, 100]","Key [200, 200]","Key [300, 300]"],"type":"Scan"}],"writes":[{"columns":["Data","Key"],"type":"MultiUpsert"},{"columns":["Data","Key"],"type":"MultiUpsert"},{"type":"MultiErase"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":2,"Operators":[{"Path":"\/Root\/EightShard","Name":"Delete","Table":"EightShard"}],"Plans":[{"PlanNodeId":8,"Operators":[{"Scan":"Parallel","E-Size":"No estimate","ReadRange":["Key (350, +∞)"],"Name":"TableRangeScan","Path":"\/Root\/EightShard","E-Rows":"No estimate","Table":"EightShard","ReadColumns":["Key"],"E-Cost":"No estimate"}],"Node Type":"TableRangeScan"}],"Node Type":"Delete"}],"Node Type":"Effect"},{"PlanNodeId":9,"Plans":[{"PlanNodeId":10,"Operators":[{"Path":"\/Root\/EightShard","Name":"Upsert","Table":"EightShard"}],"Plans":[{"PlanNodeId":16,"Operators":[{"Scan":"Parallel","E-Size":"No estimate","ReadRanges":["Key [100, 100]","Key [200, 200]","Key [300, 300]"],"Name":"TableRangeScan","Path":"\/Root\/EightShard","ReadRangesPointPrefixLen":"1","E-Rows":"No estimate","ReadRangesKeys":["Key"],"Table":"EightShard","ReadColumns":["Data","Key"],"E-Cost":"No estimate","ReadRangesExpectedSize":"3"}],"Node Type":"TableRangeScan"}],"Node Type":"Upsert"}],"Node Type":"Effect"},{"PlanNodeId":17,"Plans":[{"PlanNodeId":18,"Operators":[{"Path":"\/Root\/EightShard","Name":"Upsert","Table":"EightShard"}],"Plans":[{"PlanNodeId":24,"Operators":[{"Scan":"Parallel","E-Size":"No estimate","ReadRanges":["Key (-∞, +∞)"],"Name":"TableFullScan","Path":"\/Root\/EightShard","ReadRangesPointPrefixLen":"0","E-Rows":"No estimate","Table":"EightShard","ReadColumns":["Data","Key"],"E-Cost":"No estimate"}],"Node Type":"TableFullScan"}],"Node Type":"Upsert"}],"Node Type":"Effect"}],"Node Type":"Query","PlanNodeType":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0}}} Trying to start YDB, gRPC: 29781, MsgBus: 25938 2025-05-07T09:17:22.060850Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7501630436399221937:2069];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:17:22.060904Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001e35/r3tmp/tmp2jKKu8/pdisk_1.dat 2025-05-07T09:17:22.307031Z node 5 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:17:22.310304Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:17:22.310412Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:17:22.316189Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29781, node 5 2025-05-07T09:17:22.449745Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:17:22.449776Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:17:22.449787Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:17:22.449942Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25938 TClient is connected to server localhost:25938 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:17:23.212527Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:23.220669Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T09:17:23.239849Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:23.315329Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:23.519509Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:23.679023Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:26.482157Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7501630453579092747:2407], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:26.482271Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:26.543481Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-05-07T09:17:26.632430Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-05-07T09:17:26.676117Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-05-07T09:17:26.730908Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-05-07T09:17:26.789321Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-05-07T09:17:26.855229Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-05-07T09:17:26.923580Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-05-07T09:17:27.009422Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7501630457874060695:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:27.009621Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:27.010071Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7501630457874060700:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:27.015786Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-05-07T09:17:27.048546Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [5:7501630457874060702:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-05-07T09:17:27.061336Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[5:7501630436399221937:2069];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:17:27.061428Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:17:27.149122Z node 5 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [5:7501630457874060756:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:17:28.611034Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:28.963052Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480 2025-05-07T09:17:29.009314Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715675:0, at schemeshard: 72057594046644480 >> KqpParams::MissingParameter [GOOD] >> KqpParams::MissingOptionalParameter+UseSink >> KqpLimits::KqpMkqlMemoryLimitException [GOOD] >> KqpLimits::LargeParametersAndMkqlFailure >> KqpQuery::Now >> KqpQuery::CreateAsSelectTypes+NotNull-IsOlap >> KqpParams::BadParameterType [GOOD] >> KqpParams::CheckQueryCacheForExecuteAndPreparedQueries >> KqpTypes::MultipleCurrentUtcTimestamp [GOOD] >> TSchemeshardBackgroundCompactionTest::SchemeshardShouldNotRequestCompactionsAfterDisable [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpQuery::ReadOverloaded-StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 14404, MsgBus: 14827 2025-05-07T09:16:47.557506Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501630283047497518:2136];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:47.558332Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002186/r3tmp/tmpovcZfo/pdisk_1.dat 2025-05-07T09:16:48.254281Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:48.264621Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:48.264715Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:48.266942Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14404, node 1 2025-05-07T09:16:48.462575Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:48.462604Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:48.462611Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:48.462751Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14827 TClient is connected to server localhost:14827 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:16:49.379027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:49.410393Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T09:16:49.434650Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:49.673415Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:16:49.872819Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T09:16:49.966824Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 2025-05-07T09:16:51.714898Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630300227368277:2407], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:51.715012Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:52.251272Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.292211Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.350276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.384009Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.424747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.483489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.556289Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:16:52.559104Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501630283047497518:2136];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:16:52.559176Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-05-07T09:16:52.665954Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630304522336243:2472], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:52.666561Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:52.667223Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630304522336248:2475], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:16:52.672662Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:16:52.698447Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501630304522336250:2476], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:16:52.797948Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501630304522336301:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 20491, MsgBus: 8688 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002186/r3tmp/tmpO0xphj/pdisk_1.dat 2025-05-07T09:16:55.752980Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-05-07T09:16:55.867884Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:55.875800Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:16:55.875872Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:16:55.881605Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20491, node 2 2025-05-07T09:16:56.030771Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:16:56.030791Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:16:56.030798Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:16:56.030908Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8688 TClient is connected to server localhost:8688 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:16:56.545801Z node 2 :FLAT_TX_SCHEMESHARD WARN ... yNS1mYzk0YTY1ZC04MTI4YmMyMQ==. TraceId : 01jtn0hvvrbqzaer01bpeamtdc. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [3:3142:4062], status: OVERLOADED, reason: {
: Error: Terminate execution } 2025-05-07T09:17:16.005589Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=3&id=ZjRjYzBmOC1mYzgyMTYyNS1mYzk0YTY1ZC04MTI4YmMyMQ==, ActorId: [3:2720:4062], ActorState: ExecuteState, TraceId: 01jtn0hvvrbqzaer01bpeamtdc, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 1697, MsgBus: 14644 2025-05-07T09:17:21.124212Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:101:2147], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-05-07T09:17:21.124423Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-05-07T09:17:21.124645Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/002186/r3tmp/tmpoDI5tv/pdisk_1.dat TServer::EnableGrpc on GrpcPort 1697, node 4 2025-05-07T09:17:21.733617Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:17:21.734840Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:17:21.734902Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:17:21.734951Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:17:21.735494Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-05-07T09:17:21.787943Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:17:21.788111Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:17:21.803377Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:14644 TClient is connected to server localhost:14644 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:17:22.163469Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:22.277435Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:22.629198Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:23.126845Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:23.525802Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:24.221810Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:1723:3318], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:24.222248Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:24.250797Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-05-07T09:17:24.491844Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-05-07T09:17:24.757831Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-05-07T09:17:25.075594Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-05-07T09:17:25.449650Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-05-07T09:17:25.915004Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-05-07T09:17:26.226176Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-05-07T09:17:26.660288Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:2394:3814], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:26.660449Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:26.660794Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:2399:3819], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:26.673016Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-05-07T09:17:26.831814Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:2401:3821], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-05-07T09:17:26.908340Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:2459:3860] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:17:28.334103Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-05-07T09:17:28.583312Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480 2025-05-07T09:17:28.981612Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480 2025-05-07T09:17:31.071625Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1552: SelfId: [4:3146:4373], TxId: 281474976715675, task: 1. Ctx: { SessionId : ydb://session/3?node_id=4&id=YjMxMjNkYWItYWZjNWNkM2UtM2NmMmYzN2QtNDhjMWMzODc=. TraceId : 01jtn0jafafmy998jdgsp39b8q. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Source[0] fatal error: {
: Error: Table '/Root/SecondaryKeys' retry limit exceeded. } 2025-05-07T09:17:31.071818Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [4:3146:4373], TxId: 281474976715675, task: 1. Ctx: { SessionId : ydb://session/3?node_id=4&id=YjMxMjNkYWItYWZjNWNkM2UtM2NmMmYzN2QtNDhjMWMzODc=. TraceId : 01jtn0jafafmy998jdgsp39b8q. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: OVERLOADED DEFAULT_ERROR: {
: Error: Table '/Root/SecondaryKeys' retry limit exceeded. }. 2025-05-07T09:17:31.073098Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1216: SelfId: [4:3147:4374], TxId: 281474976715675, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=YjMxMjNkYWItYWZjNWNkM2UtM2NmMmYzN2QtNDhjMWMzODc=. TraceId : 01jtn0jafafmy998jdgsp39b8q. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [4:3140:4064], status: OVERLOADED, reason: {
: Error: Terminate execution } 2025-05-07T09:17:31.074260Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2578: SessionId: ydb://session/3?node_id=4&id=YjMxMjNkYWItYWZjNWNkM2UtM2NmMmYzN2QtNDhjMWMzODc=, ActorId: [4:2722:4064], ActorState: ExecuteState, TraceId: 01jtn0jafafmy998jdgsp39b8q, Create QueryResponse for error on request, msg: >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldHandleDataShardReboot [GOOD] >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldNotCompactAfterDrop >> KqpQuery::ExecuteDataQueryCollectMeta [GOOD] >> KqpQuery::DeleteWhereInSubquery ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_compaction/unittest >> TSchemeshardBackgroundCompactionTest::SchemeshardShouldNotRequestCompactionsAfterDisable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:114:2058] recipient: [1:108:2140] Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:126:2058] recipient: [1:108:2140] 2025-05-07T09:16:11.876057Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7448: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-05-07T09:16:11.876166Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7476: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:16:11.876232Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7362: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-05-07T09:16:11.876276Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7378: OperationsProcessing config: using default configuration 2025-05-07T09:16:11.877648Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-05-07T09:16:11.877732Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7384: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-05-07T09:16:11.877836Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7508: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-05-07T09:16:11.877925Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-05-07T09:16:11.878678Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7578: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-05-07T09:16:11.882405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-05-07T09:16:11.987802Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7269: Cannot subscribe to console configs 2025-05-07T09:16:11.987865Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:16:12.017344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-05-07T09:16:12.017605Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-05-07T09:16:12.017773Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-05-07T09:16:12.037723Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-05-07T09:16:12.038074Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-05-07T09:16:12.038736Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-05-07T09:16:12.038904Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-05-07T09:16:12.044327Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:16:12.056400Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:16:12.056476Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:16:12.056567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-05-07T09:16:12.056612Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:16:12.056714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-05-07T09:16:12.059149Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6631: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-05-07T09:16:12.066393Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2150] sender: [1:238:2058] recipient: [1:15:2062] 2025-05-07T09:16:12.212253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-05-07T09:16:12.214270Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:12.216137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-05-07T09:16:12.217636Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-05-07T09:16:12.217722Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:12.223246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-05-07T09:16:12.223428Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-05-07T09:16:12.223624Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:12.223747Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-05-07T09:16:12.223790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-05-07T09:16:12.223825Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 2 -> 3 2025-05-07T09:16:12.228690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:12.228771Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-05-07T09:16:12.228846Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 3 -> 128 2025-05-07T09:16:12.235314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:12.235403Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-05-07T09:16:12.235457Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:16:12.235538Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1638: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-05-07T09:16:12.241749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1707: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-05-07T09:16:12.250810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-05-07T09:16:12.251043Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1739: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-05-07T09:16:12.258560Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-05-07T09:16:12.258698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969452 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-05-07T09:16:12.258743Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:16:12.261523Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2492: Change state for txid 1:0 128 -> 240 2025-05-07T09:16:12.261603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-05-07T09:16:12.261781Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:475: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-05-07T09:16:12.261849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-05-07T09:16:12.269166Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-05-07T09:16:12.269215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-05-07T09:16:12.269413Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-05-07T09:16:12.269460Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: ... 25-05-07T09:17:33.054678Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186233409546 2025-05-07T09:17:33.054713Z node 3 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186233409546 has no attached operations 2025-05-07T09:17:33.054745Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186233409546 2025-05-07T09:17:33.054882Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 2146435073, Sender [0:0:0], Recipient [3:328:2310]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvCleanupTransaction 2025-05-07T09:17:33.054926Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3156: StateWork, processing event TEvPrivate::TEvCleanupTransaction 2025-05-07T09:17:33.054985Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:214: No cleanup at 72075186233409547 outdated step 5000002 last cleanup 0 2025-05-07T09:17:33.055027Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186233409547 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-05-07T09:17:33.055049Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186233409547 2025-05-07T09:17:33.055074Z node 3 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186233409547 has no attached operations 2025-05-07T09:17:33.055099Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186233409547 2025-05-07T09:17:33.055215Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 2146435079, Sender [0:0:0], Recipient [3:323:2307]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvPeriodicWakeup 2025-05-07T09:17:33.055382Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3438: TEvPeriodicTableStats from datashard 72075186233409546, FollowerId 0, tableId 2 2025-05-07T09:17:33.055541Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 2146435079, Sender [0:0:0], Recipient [3:328:2310]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvPeriodicWakeup 2025-05-07T09:17:33.055628Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3438: TEvPeriodicTableStats from datashard 72075186233409547, FollowerId 0, tableId 2 2025-05-07T09:17:33.056048Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269553162, Sender [3:323:2307], Recipient [3:129:2154]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186233409546 TableLocalId: 2 Generation: 2 Round: 10 TableStats { DataSize: 13940 RowCount: 100 IndexSize: 102 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 29 HasLoanedParts: false Channels { Channel: 1 DataSize: 13940 IndexSize: 102 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 30 Memory: 124232 Storage: 14156 } ShardState: 2 UserTablePartOwners: 72075186233409546 NodeId: 3 StartTime: 44 TableOwnerId: 72057594046678944 FollowerId: 0 2025-05-07T09:17:33.056105Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4877: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-05-07T09:17:33.056160Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:561: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409546 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] state 'Ready' dataSize 13940 rowCount 100 cpuUsage 0.003 2025-05-07T09:17:33.056275Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:568: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409546 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] raw table stats: DataSize: 13940 RowCount: 100 IndexSize: 102 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 29 HasLoanedParts: false Channels { Channel: 1 DataSize: 13940 IndexSize: 102 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-05-07T09:17:33.056319Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:608: Will delay TTxStoreTableStats on# 0.100000s, queue# 1 2025-05-07T09:17:33.056531Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 269553162, Sender [3:328:2310], Recipient [3:129:2154]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186233409547 TableLocalId: 2 Generation: 2 Round: 10 TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 16 Memory: 119352 } ShardState: 2 UserTablePartOwners: 72075186233409547 NodeId: 3 StartTime: 44 TableOwnerId: 72057594046678944 FollowerId: 0 2025-05-07T09:17:33.056568Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4877: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-05-07T09:17:33.056603Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:561: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409547 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0016 2025-05-07T09:17:33.056710Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:568: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409547 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] raw table stats: DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-05-07T09:17:33.114454Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:129:2154]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-05-07T09:17:33.114542Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5015: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-05-07T09:17:33.114574Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:588: Started TEvPersistStats at tablet 72057594046678944, queue size# 2 2025-05-07T09:17:33.114674Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:599: Will execute TTxStoreStats, queue# 2 2025-05-07T09:17:33.114722Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:608: Will delay TTxStoreTableStats on# 0.000000s, queue# 2 2025-05-07T09:17:33.114838Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046678944:1 data size 13940 row count 100 2025-05-07T09:17:33.114916Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409546 maps to shardIdx: 72057594046678944:1 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], pathId map=Simple, is column=0, is olap=0, RowCount 100, DataSize 13940 2025-05-07T09:17:33.114995Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:219: [BackgroundCompaction] [Update] Updated shard# 72057594046678944:1 with partCount# 1, rowCount# 100, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:29.000000Z at schemeshard 72057594046678944 2025-05-07T09:17:33.115065Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:475: Do not want to split tablet 72075186233409546 by size, its table already has 2 out of 2 partitions 2025-05-07T09:17:33.115117Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046678944:2 data size 0 row count 0 2025-05-07T09:17:33.115177Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409547 maps to shardIdx: 72057594046678944:2 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], pathId map=Simple, is column=0, is olap=0, RowCount 0, DataSize 0 2025-05-07T09:17:33.115225Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:219: [BackgroundCompaction] [Update] Updated shard# 72057594046678944:2 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046678944 2025-05-07T09:17:33.115270Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:475: Do not want to split tablet 72075186233409547 by size, its table already has 2 out of 2 partitions 2025-05-07T09:17:33.115349Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-05-07T09:17:33.126363Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4840: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:129:2154]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-05-07T09:17:33.126447Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5015: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-05-07T09:17:33.126482Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:588: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-05-07T09:17:33.164660Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877761, Sender [3:1328:3250], Recipient [3:323:2307]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T09:17:33.164747Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T09:17:33.164805Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186233409546, clientId# [3:1327:3249], serverId# [3:1328:3250], sessionId# [0:0:0] 2025-05-07T09:17:33.164991Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269553213, Sender [3:1326:3248], Recipient [3:323:2307]: NKikimrTxDataShard.TEvGetCompactTableStats PathId { OwnerId: 72057594046678944 LocalId: 2 } 2025-05-07T09:17:33.171869Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269877761, Sender [3:1331:3253], Recipient [3:328:2310]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-05-07T09:17:33.171946Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-05-07T09:17:33.171990Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186233409547, clientId# [3:1330:3252], serverId# [3:1331:3253], sessionId# [0:0:0] 2025-05-07T09:17:33.172224Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3130: StateWork, received event# 269553213, Sender [3:1329:3251], Recipient [3:328:2310]: NKikimrTxDataShard.TEvGetCompactTableStats PathId { OwnerId: 72057594046678944 LocalId: 2 } >> KqpExplain::UpdateConditionalKey-UseSink >> SystemView::AuthGroups [GOOD] >> SystemView::AuthGroupMembers >> KqpExplain::PureExpr [GOOD] >> KqpExplain::ReadTableRanges ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpTypes::MultipleCurrentUtcTimestamp [GOOD] Test command err: Trying to start YDB, gRPC: 18332, MsgBus: 19403 2025-05-07T09:17:06.905749Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7501630366992251246:2064];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:17:06.905800Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001e08/r3tmp/tmpxcIr7w/pdisk_1.dat 2025-05-07T09:17:07.441934Z node 1 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:17:07.460766Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:17:07.460996Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:17:07.464783Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18332, node 1 2025-05-07T09:17:07.624046Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:17:07.624069Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:17:07.624079Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:17:07.626436Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19403 TClient is connected to server localhost:19403 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:17:08.356755Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:08.373652Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-05-07T09:17:08.389846Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:08.572630Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T09:17:08.749378Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 2025-05-07T09:17:08.841647Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:10.668020Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630384172122097:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:10.668128Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:10.970211Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-05-07T09:17:11.007328Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-05-07T09:17:11.040659Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-05-07T09:17:11.077544Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-05-07T09:17:11.115453Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-05-07T09:17:11.188120Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-05-07T09:17:11.252571Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-05-07T09:17:11.336330Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630388467090057:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:11.336424Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:11.336694Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7501630388467090062:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:11.340755Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-05-07T09:17:11.359054Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7501630388467090064:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-05-07T09:17:11.432941Z node 1 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [1:7501630388467090115:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:17:11.906289Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7501630366992251246:2064];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:17:11.906394Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 4906, MsgBus: 16335 2025-05-07T09:17:13.652778Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7501630397552445151:2210];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:17:13.654108Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001e08/r3tmp/tmp6pAvcP/pdisk_1.dat 2025-05-07T09:17:13.766206Z node 2 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:17:13.782430Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:17:13.782504Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:17:13.785788Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4906, node 2 2025-05-07T09:17:13.858961Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:17:13.858979Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:17:13.858991Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:17:13.859108Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16335 TClient is connected to server localhost:16335 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { Sche ... ESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-05-07T09:17:24.059697Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-05-07T09:17:24.150326Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7501630443977437690:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:24.150420Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:24.150629Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7501630443977437695:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:24.154955Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-05-07T09:17:24.173209Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7501630443977437697:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-05-07T09:17:24.234672Z node 3 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [3:7501630443977437748:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:17:25.026540Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7501630422502598907:2064];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:17:25.026585Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 10121, MsgBus: 14286 2025-05-07T09:17:26.219603Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7501630451043787887:2067];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:17:26.219705Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/zvgn/001e08/r3tmp/tmpat2PyJ/pdisk_1.dat 2025-05-07T09:17:26.337740Z node 4 :IMPORT WARN: schemeshard_import.cpp:295: Table profiles were not loaded 2025-05-07T09:17:26.382379Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-05-07T09:17:26.382463Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-05-07T09:17:26.383572Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10121, node 4 2025-05-07T09:17:26.481147Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-05-07T09:17:26.481166Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-05-07T09:17:26.481173Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-05-07T09:17:26.481305Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14286 TClient is connected to server localhost:14286 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-05-07T09:17:27.053287Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:27.066479Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-05-07T09:17:27.076220Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:27.139585Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-05-07T09:17:27.379314Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-05-07T09:17:27.467871Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 2025-05-07T09:17:29.914565Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501630463928691406:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:29.914685Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:29.989131Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-05-07T09:17:30.032725Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-05-07T09:17:30.071257Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-05-07T09:17:30.107685Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-05-07T09:17:30.150891Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-05-07T09:17:30.199234Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-05-07T09:17:30.275484Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-05-07T09:17:30.388982Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501630468223659367:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:30.389158Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:30.390069Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7501630468223659372:2473], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-05-07T09:17:30.396101Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-05-07T09:17:30.408523Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7501630468223659374:2474], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-05-07T09:17:30.495638Z node 4 :TX_PROXY ERROR: schemereq.cpp:540: Actor# [4:7501630468223659425:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-05-07T09:17:31.222309Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7501630451043787887:2067];send_to=[0:7307199536658146131:7762515]; 2025-05-07T09:17:31.222381Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpLimits::TooBigKey+useSink >> KqpExplain::UpdateConditional-UseSink [GOOD] >> KqpExplain::UpdateConditionalKey+UseSink >> KqpStats::RequestUnitForSuccessExplicitPrepare >> KqpStats::StreamLookupStats+StreamLookupJoin >> KqpQuery::OltpCreateAsSelect_Disable [GOOD] >> KqpQuery::OlapCreateAsSelect_Complex >> KqpStats::MultiTxStatsFullExpYql [GOOD] >> KqpStats::MultiTxStatsFullExpScan >> KqpStats::RequestUnitForBadRequestExplicitPrepare [GOOD] >> KqpStats::OneShardNonLocalExec+UseSink >> KqpStats::DataQueryWithEffects-UseSink [GOOD] >> KqpStats::DataQueryMulti >> KqpQuery::QueryCache [GOOD] >> KqpQuery::QueryCacheInvalidate